i965: Delete bogus assertion in emit_gs_input_load().
[mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_nir.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/glsl/ir.h"
25 #include "brw_fs.h"
26 #include "brw_fs_surface_builder.h"
27 #include "brw_nir.h"
28 #include "brw_program.h"
29
30 using namespace brw;
31 using namespace brw::surface_access;
32
33 void
34 fs_visitor::emit_nir_code()
35 {
36 /* emit the arrays used for inputs and outputs - load/store intrinsics will
37 * be converted to reads/writes of these arrays
38 */
39 nir_setup_inputs();
40 nir_setup_outputs();
41 nir_setup_uniforms();
42 nir_emit_system_values();
43
44 /* get the main function and emit it */
45 nir_foreach_function(function, nir) {
46 assert(strcmp(function->name, "main") == 0);
47 assert(function->impl);
48 nir_emit_impl(function->impl);
49 }
50 }
51
52 void
53 fs_visitor::nir_setup_inputs()
54 {
55 if (stage != MESA_SHADER_FRAGMENT)
56 return;
57
58 nir_inputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_inputs);
59
60 nir_foreach_variable(var, &nir->inputs) {
61 fs_reg input = offset(nir_inputs, bld, var->data.driver_location);
62
63 fs_reg reg;
64 if (var->data.location == VARYING_SLOT_POS) {
65 reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
66 var->data.origin_upper_left);
67 emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
68 input, reg), 0xF);
69 } else if (var->data.location == VARYING_SLOT_LAYER) {
70 struct brw_reg reg = suboffset(interp_reg(VARYING_SLOT_LAYER, 1), 3);
71 reg.type = BRW_REGISTER_TYPE_D;
72 bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
73 } else if (var->data.location == VARYING_SLOT_VIEWPORT) {
74 struct brw_reg reg = suboffset(interp_reg(VARYING_SLOT_VIEWPORT, 2), 3);
75 reg.type = BRW_REGISTER_TYPE_D;
76 bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
77 } else {
78 int location = var->data.location;
79 emit_general_interpolation(&input, var->name, var->type,
80 (glsl_interp_qualifier) var->data.interpolation,
81 &location, var->data.centroid,
82 var->data.sample);
83 }
84 }
85 }
86
87 void
88 fs_visitor::nir_setup_single_output_varying(fs_reg *reg,
89 const glsl_type *type,
90 unsigned *location)
91 {
92 if (type->is_array() || type->is_matrix()) {
93 const struct glsl_type *elem_type = glsl_get_array_element(type);
94 const unsigned length = glsl_get_length(type);
95
96 for (unsigned i = 0; i < length; i++) {
97 nir_setup_single_output_varying(reg, elem_type, location);
98 }
99 } else if (type->is_record()) {
100 for (unsigned i = 0; i < type->length; i++) {
101 const struct glsl_type *field_type = type->fields.structure[i].type;
102 nir_setup_single_output_varying(reg, field_type, location);
103 }
104 } else {
105 assert(type->is_scalar() || type->is_vector());
106 this->outputs[*location] = *reg;
107 this->output_components[*location] = type->vector_elements;
108 *reg = offset(*reg, bld, 4);
109 (*location)++;
110 }
111 }
112
113 void
114 fs_visitor::nir_setup_outputs()
115 {
116 if (stage == MESA_SHADER_TESS_CTRL)
117 return;
118
119 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
120
121 nir_outputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_outputs);
122
123 nir_foreach_variable(var, &nir->outputs) {
124 fs_reg reg = offset(nir_outputs, bld, var->data.driver_location);
125
126 switch (stage) {
127 case MESA_SHADER_VERTEX:
128 case MESA_SHADER_TESS_EVAL:
129 case MESA_SHADER_GEOMETRY: {
130 unsigned location = var->data.location;
131 nir_setup_single_output_varying(&reg, var->type, &location);
132 break;
133 }
134 case MESA_SHADER_FRAGMENT:
135 if (key->force_dual_color_blend &&
136 var->data.location == FRAG_RESULT_DATA1) {
137 this->dual_src_output = reg;
138 this->do_dual_src = true;
139 } else if (var->data.index > 0) {
140 assert(var->data.location == FRAG_RESULT_DATA0);
141 assert(var->data.index == 1);
142 this->dual_src_output = reg;
143 this->do_dual_src = true;
144 } else if (var->data.location == FRAG_RESULT_COLOR) {
145 /* Writing gl_FragColor outputs to all color regions. */
146 for (unsigned int i = 0; i < MAX2(key->nr_color_regions, 1); i++) {
147 this->outputs[i] = reg;
148 this->output_components[i] = 4;
149 }
150 } else if (var->data.location == FRAG_RESULT_DEPTH) {
151 this->frag_depth = reg;
152 } else if (var->data.location == FRAG_RESULT_STENCIL) {
153 this->frag_stencil = reg;
154 } else if (var->data.location == FRAG_RESULT_SAMPLE_MASK) {
155 this->sample_mask = reg;
156 } else {
157 int vector_elements = var->type->without_array()->vector_elements;
158
159 /* gl_FragData or a user-defined FS output */
160 assert(var->data.location >= FRAG_RESULT_DATA0 &&
161 var->data.location < FRAG_RESULT_DATA0+BRW_MAX_DRAW_BUFFERS);
162
163 /* General color output. */
164 for (unsigned int i = 0; i < MAX2(1, var->type->length); i++) {
165 int output = var->data.location - FRAG_RESULT_DATA0 + i;
166 this->outputs[output] = offset(reg, bld, vector_elements * i);
167 this->output_components[output] = vector_elements;
168 }
169 }
170 break;
171 default:
172 unreachable("unhandled shader stage");
173 }
174 }
175 }
176
177 void
178 fs_visitor::nir_setup_uniforms()
179 {
180 if (dispatch_width != 8)
181 return;
182
183 uniforms = nir->num_uniforms / 4;
184 }
185
186 static bool
187 emit_system_values_block(nir_block *block, fs_visitor *v)
188 {
189 fs_reg *reg;
190
191 nir_foreach_instr(instr, block) {
192 if (instr->type != nir_instr_type_intrinsic)
193 continue;
194
195 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
196 switch (intrin->intrinsic) {
197 case nir_intrinsic_load_vertex_id:
198 unreachable("should be lowered by lower_vertex_id().");
199
200 case nir_intrinsic_load_vertex_id_zero_base:
201 assert(v->stage == MESA_SHADER_VERTEX);
202 reg = &v->nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
203 if (reg->file == BAD_FILE)
204 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
205 break;
206
207 case nir_intrinsic_load_base_vertex:
208 assert(v->stage == MESA_SHADER_VERTEX);
209 reg = &v->nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
210 if (reg->file == BAD_FILE)
211 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_BASE_VERTEX);
212 break;
213
214 case nir_intrinsic_load_instance_id:
215 assert(v->stage == MESA_SHADER_VERTEX);
216 reg = &v->nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
217 if (reg->file == BAD_FILE)
218 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_INSTANCE_ID);
219 break;
220
221 case nir_intrinsic_load_base_instance:
222 assert(v->stage == MESA_SHADER_VERTEX);
223 reg = &v->nir_system_values[SYSTEM_VALUE_BASE_INSTANCE];
224 if (reg->file == BAD_FILE)
225 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_BASE_INSTANCE);
226 break;
227
228 case nir_intrinsic_load_draw_id:
229 assert(v->stage == MESA_SHADER_VERTEX);
230 reg = &v->nir_system_values[SYSTEM_VALUE_DRAW_ID];
231 if (reg->file == BAD_FILE)
232 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_DRAW_ID);
233 break;
234
235 case nir_intrinsic_load_invocation_id:
236 if (v->stage == MESA_SHADER_TESS_CTRL)
237 break;
238 assert(v->stage == MESA_SHADER_GEOMETRY);
239 reg = &v->nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
240 if (reg->file == BAD_FILE) {
241 const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL);
242 fs_reg g1(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
243 fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
244 abld.SHR(iid, g1, brw_imm_ud(27u));
245 *reg = iid;
246 }
247 break;
248
249 case nir_intrinsic_load_sample_pos:
250 assert(v->stage == MESA_SHADER_FRAGMENT);
251 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
252 if (reg->file == BAD_FILE)
253 *reg = *v->emit_samplepos_setup();
254 break;
255
256 case nir_intrinsic_load_sample_id:
257 assert(v->stage == MESA_SHADER_FRAGMENT);
258 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
259 if (reg->file == BAD_FILE)
260 *reg = *v->emit_sampleid_setup();
261 break;
262
263 case nir_intrinsic_load_sample_mask_in:
264 assert(v->stage == MESA_SHADER_FRAGMENT);
265 assert(v->devinfo->gen >= 7);
266 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
267 if (reg->file == BAD_FILE)
268 *reg = *v->emit_samplemaskin_setup();
269 break;
270
271 case nir_intrinsic_load_local_invocation_id:
272 assert(v->stage == MESA_SHADER_COMPUTE);
273 reg = &v->nir_system_values[SYSTEM_VALUE_LOCAL_INVOCATION_ID];
274 if (reg->file == BAD_FILE)
275 *reg = *v->emit_cs_local_invocation_id_setup();
276 break;
277
278 case nir_intrinsic_load_work_group_id:
279 assert(v->stage == MESA_SHADER_COMPUTE);
280 reg = &v->nir_system_values[SYSTEM_VALUE_WORK_GROUP_ID];
281 if (reg->file == BAD_FILE)
282 *reg = *v->emit_cs_work_group_id_setup();
283 break;
284
285 case nir_intrinsic_load_helper_invocation:
286 assert(v->stage == MESA_SHADER_FRAGMENT);
287 reg = &v->nir_system_values[SYSTEM_VALUE_HELPER_INVOCATION];
288 if (reg->file == BAD_FILE) {
289 const fs_builder abld =
290 v->bld.annotate("gl_HelperInvocation", NULL);
291
292 /* On Gen6+ (gl_HelperInvocation is only exposed on Gen7+) the
293 * pixel mask is in g1.7 of the thread payload.
294 *
295 * We move the per-channel pixel enable bit to the low bit of each
296 * channel by shifting the byte containing the pixel mask by the
297 * vector immediate 0x76543210UV.
298 *
299 * The region of <1,8,0> reads only 1 byte (the pixel masks for
300 * subspans 0 and 1) in SIMD8 and an additional byte (the pixel
301 * masks for 2 and 3) in SIMD16.
302 */
303 fs_reg shifted = abld.vgrf(BRW_REGISTER_TYPE_UW, 1);
304 abld.SHR(shifted,
305 stride(byte_offset(retype(brw_vec1_grf(1, 0),
306 BRW_REGISTER_TYPE_UB), 28),
307 1, 8, 0),
308 brw_imm_uv(0x76543210));
309
310 /* A set bit in the pixel mask means the channel is enabled, but
311 * that is the opposite of gl_HelperInvocation so we need to invert
312 * the mask.
313 *
314 * The negate source-modifier bit of logical instructions on Gen8+
315 * performs 1's complement negation, so we can use that instead of
316 * a NOT instruction.
317 */
318 fs_reg inverted = negate(shifted);
319 if (v->devinfo->gen < 8) {
320 inverted = abld.vgrf(BRW_REGISTER_TYPE_UW);
321 abld.NOT(inverted, shifted);
322 }
323
324 /* We then resolve the 0/1 result to 0/~0 boolean values by ANDing
325 * with 1 and negating.
326 */
327 fs_reg anded = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
328 abld.AND(anded, inverted, brw_imm_uw(1));
329
330 fs_reg dst = abld.vgrf(BRW_REGISTER_TYPE_D, 1);
331 abld.MOV(dst, negate(retype(anded, BRW_REGISTER_TYPE_D)));
332 *reg = dst;
333 }
334 break;
335
336 default:
337 break;
338 }
339 }
340
341 return true;
342 }
343
344 void
345 fs_visitor::nir_emit_system_values()
346 {
347 nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
348 for (unsigned i = 0; i < SYSTEM_VALUE_MAX; i++) {
349 nir_system_values[i] = fs_reg();
350 }
351
352 nir_foreach_function(function, nir) {
353 assert(strcmp(function->name, "main") == 0);
354 assert(function->impl);
355 nir_foreach_block(block, function->impl) {
356 emit_system_values_block(block, this);
357 }
358 }
359 }
360
361 void
362 fs_visitor::nir_emit_impl(nir_function_impl *impl)
363 {
364 nir_locals = ralloc_array(mem_ctx, fs_reg, impl->reg_alloc);
365 for (unsigned i = 0; i < impl->reg_alloc; i++) {
366 nir_locals[i] = fs_reg();
367 }
368
369 foreach_list_typed(nir_register, reg, node, &impl->registers) {
370 unsigned array_elems =
371 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
372 unsigned size = array_elems * reg->num_components;
373 const brw_reg_type reg_type =
374 reg->bit_size == 32 ? BRW_REGISTER_TYPE_F : BRW_REGISTER_TYPE_DF;
375 nir_locals[reg->index] = bld.vgrf(reg_type, size);
376 }
377
378 nir_ssa_values = reralloc(mem_ctx, nir_ssa_values, fs_reg,
379 impl->ssa_alloc);
380
381 nir_emit_cf_list(&impl->body);
382 }
383
384 void
385 fs_visitor::nir_emit_cf_list(exec_list *list)
386 {
387 exec_list_validate(list);
388 foreach_list_typed(nir_cf_node, node, node, list) {
389 switch (node->type) {
390 case nir_cf_node_if:
391 nir_emit_if(nir_cf_node_as_if(node));
392 break;
393
394 case nir_cf_node_loop:
395 nir_emit_loop(nir_cf_node_as_loop(node));
396 break;
397
398 case nir_cf_node_block:
399 nir_emit_block(nir_cf_node_as_block(node));
400 break;
401
402 default:
403 unreachable("Invalid CFG node block");
404 }
405 }
406 }
407
408 void
409 fs_visitor::nir_emit_if(nir_if *if_stmt)
410 {
411 /* first, put the condition into f0 */
412 fs_inst *inst = bld.MOV(bld.null_reg_d(),
413 retype(get_nir_src(if_stmt->condition),
414 BRW_REGISTER_TYPE_D));
415 inst->conditional_mod = BRW_CONDITIONAL_NZ;
416
417 bld.IF(BRW_PREDICATE_NORMAL);
418
419 nir_emit_cf_list(&if_stmt->then_list);
420
421 /* note: if the else is empty, dead CF elimination will remove it */
422 bld.emit(BRW_OPCODE_ELSE);
423
424 nir_emit_cf_list(&if_stmt->else_list);
425
426 bld.emit(BRW_OPCODE_ENDIF);
427 }
428
429 void
430 fs_visitor::nir_emit_loop(nir_loop *loop)
431 {
432 bld.emit(BRW_OPCODE_DO);
433
434 nir_emit_cf_list(&loop->body);
435
436 bld.emit(BRW_OPCODE_WHILE);
437 }
438
439 void
440 fs_visitor::nir_emit_block(nir_block *block)
441 {
442 nir_foreach_instr(instr, block) {
443 nir_emit_instr(instr);
444 }
445 }
446
447 void
448 fs_visitor::nir_emit_instr(nir_instr *instr)
449 {
450 const fs_builder abld = bld.annotate(NULL, instr);
451
452 switch (instr->type) {
453 case nir_instr_type_alu:
454 nir_emit_alu(abld, nir_instr_as_alu(instr));
455 break;
456
457 case nir_instr_type_intrinsic:
458 switch (stage) {
459 case MESA_SHADER_VERTEX:
460 nir_emit_vs_intrinsic(abld, nir_instr_as_intrinsic(instr));
461 break;
462 case MESA_SHADER_TESS_CTRL:
463 nir_emit_tcs_intrinsic(abld, nir_instr_as_intrinsic(instr));
464 break;
465 case MESA_SHADER_TESS_EVAL:
466 nir_emit_tes_intrinsic(abld, nir_instr_as_intrinsic(instr));
467 break;
468 case MESA_SHADER_GEOMETRY:
469 nir_emit_gs_intrinsic(abld, nir_instr_as_intrinsic(instr));
470 break;
471 case MESA_SHADER_FRAGMENT:
472 nir_emit_fs_intrinsic(abld, nir_instr_as_intrinsic(instr));
473 break;
474 case MESA_SHADER_COMPUTE:
475 nir_emit_cs_intrinsic(abld, nir_instr_as_intrinsic(instr));
476 break;
477 default:
478 unreachable("unsupported shader stage");
479 }
480 break;
481
482 case nir_instr_type_tex:
483 nir_emit_texture(abld, nir_instr_as_tex(instr));
484 break;
485
486 case nir_instr_type_load_const:
487 nir_emit_load_const(abld, nir_instr_as_load_const(instr));
488 break;
489
490 case nir_instr_type_ssa_undef:
491 nir_emit_undef(abld, nir_instr_as_ssa_undef(instr));
492 break;
493
494 case nir_instr_type_jump:
495 nir_emit_jump(abld, nir_instr_as_jump(instr));
496 break;
497
498 default:
499 unreachable("unknown instruction type");
500 }
501 }
502
503 /**
504 * Recognizes a parent instruction of nir_op_extract_* and changes the type to
505 * match instr.
506 */
507 bool
508 fs_visitor::optimize_extract_to_float(nir_alu_instr *instr,
509 const fs_reg &result)
510 {
511 if (!instr->src[0].src.is_ssa ||
512 !instr->src[0].src.ssa->parent_instr)
513 return false;
514
515 if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
516 return false;
517
518 nir_alu_instr *src0 =
519 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
520
521 if (src0->op != nir_op_extract_u8 && src0->op != nir_op_extract_u16 &&
522 src0->op != nir_op_extract_i8 && src0->op != nir_op_extract_i16)
523 return false;
524
525 nir_const_value *element = nir_src_as_const_value(src0->src[1].src);
526 assert(element != NULL);
527
528 enum opcode extract_op;
529 if (src0->op == nir_op_extract_u16 || src0->op == nir_op_extract_i16) {
530 assert(element->u32[0] <= 1);
531 extract_op = SHADER_OPCODE_EXTRACT_WORD;
532 } else {
533 assert(element->u32[0] <= 3);
534 extract_op = SHADER_OPCODE_EXTRACT_BYTE;
535 }
536
537 fs_reg op0 = get_nir_src(src0->src[0].src);
538 op0.type = brw_type_for_nir_type(
539 (nir_alu_type)(nir_op_infos[src0->op].input_types[0] |
540 nir_src_bit_size(src0->src[0].src)));
541 op0 = offset(op0, bld, src0->src[0].swizzle[0]);
542
543 set_saturate(instr->dest.saturate,
544 bld.emit(extract_op, result, op0, brw_imm_ud(element->u32[0])));
545 return true;
546 }
547
548 bool
549 fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
550 const fs_reg &result)
551 {
552 if (!instr->src[0].src.is_ssa ||
553 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_intrinsic)
554 return false;
555
556 nir_intrinsic_instr *src0 =
557 nir_instr_as_intrinsic(instr->src[0].src.ssa->parent_instr);
558
559 if (src0->intrinsic != nir_intrinsic_load_front_face)
560 return false;
561
562 nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
563 if (!value1 || fabsf(value1->f32[0]) != 1.0f)
564 return false;
565
566 nir_const_value *value2 = nir_src_as_const_value(instr->src[2].src);
567 if (!value2 || fabsf(value2->f32[0]) != 1.0f)
568 return false;
569
570 fs_reg tmp = vgrf(glsl_type::int_type);
571
572 if (devinfo->gen >= 6) {
573 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
574 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
575
576 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
577 *
578 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
579 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
580 *
581 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
582 *
583 * This negation looks like it's safe in practice, because bits 0:4 will
584 * surely be TRIANGLES
585 */
586
587 if (value1->f32[0] == -1.0f) {
588 g0.negate = true;
589 }
590
591 tmp.type = BRW_REGISTER_TYPE_W;
592 tmp.subreg_offset = 2;
593 tmp.stride = 2;
594
595 bld.OR(tmp, g0, brw_imm_uw(0x3f80));
596
597 tmp.type = BRW_REGISTER_TYPE_D;
598 tmp.subreg_offset = 0;
599 tmp.stride = 1;
600 } else {
601 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
602 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
603
604 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
605 *
606 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
607 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
608 *
609 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
610 *
611 * This negation looks like it's safe in practice, because bits 0:4 will
612 * surely be TRIANGLES
613 */
614
615 if (value1->f32[0] == -1.0f) {
616 g1_6.negate = true;
617 }
618
619 bld.OR(tmp, g1_6, brw_imm_d(0x3f800000));
620 }
621 bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, brw_imm_d(0xbf800000));
622
623 return true;
624 }
625
626 void
627 fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
628 {
629 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
630 fs_inst *inst;
631
632 fs_reg result = get_nir_dest(instr->dest.dest);
633 result.type = brw_type_for_nir_type(
634 (nir_alu_type)(nir_op_infos[instr->op].output_type |
635 nir_dest_bit_size(instr->dest.dest)));
636
637 fs_reg op[4];
638 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
639 op[i] = get_nir_src(instr->src[i].src);
640 op[i].type = brw_type_for_nir_type(
641 (nir_alu_type)(nir_op_infos[instr->op].input_types[i] |
642 nir_src_bit_size(instr->src[i].src)));
643 op[i].abs = instr->src[i].abs;
644 op[i].negate = instr->src[i].negate;
645 }
646
647 /* We get a bunch of mov's out of the from_ssa pass and they may still
648 * be vectorized. We'll handle them as a special-case. We'll also
649 * handle vecN here because it's basically the same thing.
650 */
651 switch (instr->op) {
652 case nir_op_imov:
653 case nir_op_fmov:
654 case nir_op_vec2:
655 case nir_op_vec3:
656 case nir_op_vec4: {
657 fs_reg temp = result;
658 bool need_extra_copy = false;
659 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
660 if (!instr->src[i].src.is_ssa &&
661 instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
662 need_extra_copy = true;
663 temp = bld.vgrf(result.type, 4);
664 break;
665 }
666 }
667
668 for (unsigned i = 0; i < 4; i++) {
669 if (!(instr->dest.write_mask & (1 << i)))
670 continue;
671
672 if (instr->op == nir_op_imov || instr->op == nir_op_fmov) {
673 inst = bld.MOV(offset(temp, bld, i),
674 offset(op[0], bld, instr->src[0].swizzle[i]));
675 } else {
676 inst = bld.MOV(offset(temp, bld, i),
677 offset(op[i], bld, instr->src[i].swizzle[0]));
678 }
679 inst->saturate = instr->dest.saturate;
680 }
681
682 /* In this case the source and destination registers were the same,
683 * so we need to insert an extra set of moves in order to deal with
684 * any swizzling.
685 */
686 if (need_extra_copy) {
687 for (unsigned i = 0; i < 4; i++) {
688 if (!(instr->dest.write_mask & (1 << i)))
689 continue;
690
691 bld.MOV(offset(result, bld, i), offset(temp, bld, i));
692 }
693 }
694 return;
695 }
696 default:
697 break;
698 }
699
700 /* At this point, we have dealt with any instruction that operates on
701 * more than a single channel. Therefore, we can just adjust the source
702 * and destination registers for that channel and emit the instruction.
703 */
704 unsigned channel = 0;
705 if (nir_op_infos[instr->op].output_size == 0) {
706 /* Since NIR is doing the scalarizing for us, we should only ever see
707 * vectorized operations with a single channel.
708 */
709 assert(_mesa_bitcount(instr->dest.write_mask) == 1);
710 channel = ffs(instr->dest.write_mask) - 1;
711
712 result = offset(result, bld, channel);
713 }
714
715 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
716 assert(nir_op_infos[instr->op].input_sizes[i] < 2);
717 op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
718 }
719
720 switch (instr->op) {
721 case nir_op_i2f:
722 case nir_op_u2f:
723 if (optimize_extract_to_float(instr, result))
724 return;
725
726 case nir_op_f2d:
727 case nir_op_i2d:
728 case nir_op_u2d:
729 case nir_op_d2f:
730 case nir_op_d2i:
731 case nir_op_d2u:
732 inst = bld.MOV(result, op[0]);
733 inst->saturate = instr->dest.saturate;
734 break;
735
736 case nir_op_f2i:
737 case nir_op_f2u:
738 bld.MOV(result, op[0]);
739 break;
740
741 case nir_op_fsign: {
742 if (type_sz(op[0].type) < 8) {
743 /* AND(val, 0x80000000) gives the sign bit.
744 *
745 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
746 * zero.
747 */
748 bld.CMP(bld.null_reg_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ);
749
750 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
751 op[0].type = BRW_REGISTER_TYPE_UD;
752 result.type = BRW_REGISTER_TYPE_UD;
753 bld.AND(result_int, op[0], brw_imm_ud(0x80000000u));
754
755 inst = bld.OR(result_int, result_int, brw_imm_ud(0x3f800000u));
756 inst->predicate = BRW_PREDICATE_NORMAL;
757 if (instr->dest.saturate) {
758 inst = bld.MOV(result, result);
759 inst->saturate = true;
760 }
761 } else {
762 /* For doubles we do the same but we need to consider:
763 *
764 * - 2-src instructions can't operate with 64-bit immediates
765 * - The sign is encoded in the high 32-bit of each DF
766 * - CMP with DF requires special handling in SIMD16
767 * - We need to produce a DF result.
768 */
769
770 /* 2-src instructions can't have 64-bit immediates, so put 0.0 in
771 * a register and compare with that.
772 */
773 fs_reg tmp = vgrf(glsl_type::double_type);
774 bld.MOV(tmp, brw_imm_df(0.0));
775
776 /* A direct DF CMP using the flag register (null dst) won't work in
777 * SIMD16 because the CMP will be split in two by lower_simd_width,
778 * resulting in two CMP instructions with the same dst (NULL),
779 * leading to dead code elimination of the first one. In SIMD8,
780 * however, there is no need to split the CMP and we can save some
781 * work.
782 */
783 fs_reg dst_tmp = vgrf(glsl_type::double_type);
784 bld.CMP(dst_tmp, op[0], tmp, BRW_CONDITIONAL_NZ);
785
786 /* In SIMD16 we want to avoid using a NULL dst register with DF CMP,
787 * so we store the result of the comparison in a vgrf instead and
788 * then we generate a UD comparison from that that won't have to
789 * be split by lower_simd_width. This is what NIR does to handle
790 * double comparisons in the general case.
791 */
792 if (bld.dispatch_width() == 16 ) {
793 fs_reg dst_tmp_ud = retype(dst_tmp, BRW_REGISTER_TYPE_UD);
794 bld.MOV(dst_tmp_ud, subscript(dst_tmp, BRW_REGISTER_TYPE_UD, 0));
795 bld.CMP(bld.null_reg_ud(),
796 dst_tmp_ud, brw_imm_ud(0), BRW_CONDITIONAL_NZ);
797 }
798
799 /* Get the high 32-bit of each double component where the sign is */
800 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
801 bld.MOV(result_int, subscript(op[0], BRW_REGISTER_TYPE_UD, 1));
802
803 /* Get the sign bit */
804 bld.AND(result_int, result_int, brw_imm_ud(0x80000000u));
805
806 /* Add 1.0 to the sign, predicated to skip the case of op[0] == 0.0 */
807 inst = bld.OR(result_int, result_int, brw_imm_ud(0x3f800000u));
808 inst->predicate = BRW_PREDICATE_NORMAL;
809
810 /* Convert from 32-bit float to 64-bit double */
811 result.type = BRW_REGISTER_TYPE_DF;
812 inst = bld.MOV(result, retype(result_int, BRW_REGISTER_TYPE_F));
813
814 if (instr->dest.saturate) {
815 inst = bld.MOV(result, result);
816 inst->saturate = true;
817 }
818 }
819 break;
820 }
821
822 case nir_op_isign:
823 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
824 * -> non-negative val generates 0x00000000.
825 * Predicated OR sets 1 if val is positive.
826 */
827 assert(nir_dest_bit_size(instr->dest.dest) < 64);
828 bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_G);
829 bld.ASR(result, op[0], brw_imm_d(31));
830 inst = bld.OR(result, result, brw_imm_d(1));
831 inst->predicate = BRW_PREDICATE_NORMAL;
832 break;
833
834 case nir_op_frcp:
835 inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
836 inst->saturate = instr->dest.saturate;
837 break;
838
839 case nir_op_fexp2:
840 inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
841 inst->saturate = instr->dest.saturate;
842 break;
843
844 case nir_op_flog2:
845 inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
846 inst->saturate = instr->dest.saturate;
847 break;
848
849 case nir_op_fsin:
850 inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
851 inst->saturate = instr->dest.saturate;
852 break;
853
854 case nir_op_fcos:
855 inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
856 inst->saturate = instr->dest.saturate;
857 break;
858
859 case nir_op_fddx:
860 if (fs_key->high_quality_derivatives) {
861 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
862 } else {
863 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
864 }
865 inst->saturate = instr->dest.saturate;
866 break;
867 case nir_op_fddx_fine:
868 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
869 inst->saturate = instr->dest.saturate;
870 break;
871 case nir_op_fddx_coarse:
872 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
873 inst->saturate = instr->dest.saturate;
874 break;
875 case nir_op_fddy:
876 if (fs_key->high_quality_derivatives) {
877 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
878 brw_imm_d(fs_key->render_to_fbo));
879 } else {
880 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
881 brw_imm_d(fs_key->render_to_fbo));
882 }
883 inst->saturate = instr->dest.saturate;
884 break;
885 case nir_op_fddy_fine:
886 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
887 brw_imm_d(fs_key->render_to_fbo));
888 inst->saturate = instr->dest.saturate;
889 break;
890 case nir_op_fddy_coarse:
891 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
892 brw_imm_d(fs_key->render_to_fbo));
893 inst->saturate = instr->dest.saturate;
894 break;
895
896 case nir_op_iadd:
897 assert(nir_dest_bit_size(instr->dest.dest) < 64);
898 case nir_op_fadd:
899 inst = bld.ADD(result, op[0], op[1]);
900 inst->saturate = instr->dest.saturate;
901 break;
902
903 case nir_op_fmul:
904 inst = bld.MUL(result, op[0], op[1]);
905 inst->saturate = instr->dest.saturate;
906 break;
907
908 case nir_op_imul:
909 assert(nir_dest_bit_size(instr->dest.dest) < 64);
910 bld.MUL(result, op[0], op[1]);
911 break;
912
913 case nir_op_imul_high:
914 case nir_op_umul_high:
915 assert(nir_dest_bit_size(instr->dest.dest) < 64);
916 bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
917 break;
918
919 case nir_op_idiv:
920 case nir_op_udiv:
921 assert(nir_dest_bit_size(instr->dest.dest) < 64);
922 bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
923 break;
924
925 case nir_op_uadd_carry:
926 unreachable("Should have been lowered by carry_to_arith().");
927
928 case nir_op_usub_borrow:
929 unreachable("Should have been lowered by borrow_to_arith().");
930
931 case nir_op_umod:
932 case nir_op_irem:
933 /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
934 * appears that our hardware just does the right thing for signed
935 * remainder.
936 */
937 assert(nir_dest_bit_size(instr->dest.dest) < 64);
938 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
939 break;
940
941 case nir_op_imod: {
942 /* Get a regular C-style remainder. If a % b == 0, set the predicate. */
943 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
944
945 /* Math instructions don't support conditional mod */
946 inst = bld.MOV(bld.null_reg_d(), result);
947 inst->conditional_mod = BRW_CONDITIONAL_NZ;
948
949 /* Now, we need to determine if signs of the sources are different.
950 * When we XOR the sources, the top bit is 0 if they are the same and 1
951 * if they are different. We can then use a conditional modifier to
952 * turn that into a predicate. This leads us to an XOR.l instruction.
953 *
954 * Technically, according to the PRM, you're not allowed to use .l on a
955 * XOR instruction. However, emperical experiments and Curro's reading
956 * of the simulator source both indicate that it's safe.
957 */
958 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_D);
959 inst = bld.XOR(tmp, op[0], op[1]);
960 inst->predicate = BRW_PREDICATE_NORMAL;
961 inst->conditional_mod = BRW_CONDITIONAL_L;
962
963 /* If the result of the initial remainder operation is non-zero and the
964 * two sources have different signs, add in a copy of op[1] to get the
965 * final integer modulus value.
966 */
967 inst = bld.ADD(result, result, op[1]);
968 inst->predicate = BRW_PREDICATE_NORMAL;
969 break;
970 }
971
972 case nir_op_flt:
973 case nir_op_fge:
974 case nir_op_feq:
975 case nir_op_fne: {
976 fs_reg dest = result;
977 if (nir_src_bit_size(instr->src[0].src) > 32) {
978 dest = bld.vgrf(BRW_REGISTER_TYPE_DF, 1);
979 }
980 brw_conditional_mod cond;
981 switch (instr->op) {
982 case nir_op_flt:
983 cond = BRW_CONDITIONAL_L;
984 break;
985 case nir_op_fge:
986 cond = BRW_CONDITIONAL_GE;
987 break;
988 case nir_op_feq:
989 cond = BRW_CONDITIONAL_Z;
990 break;
991 case nir_op_fne:
992 cond = BRW_CONDITIONAL_NZ;
993 break;
994 default:
995 unreachable("bad opcode");
996 }
997 bld.CMP(dest, op[0], op[1], cond);
998 if (nir_src_bit_size(instr->src[0].src) > 32) {
999 bld.MOV(result, subscript(dest, BRW_REGISTER_TYPE_UD, 0));
1000 }
1001 break;
1002 }
1003
1004 case nir_op_ilt:
1005 case nir_op_ult:
1006 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1007 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_L);
1008 break;
1009
1010 case nir_op_ige:
1011 case nir_op_uge:
1012 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1013 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_GE);
1014 break;
1015
1016 case nir_op_ieq:
1017 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1018 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_Z);
1019 break;
1020
1021 case nir_op_ine:
1022 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1023 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ);
1024 break;
1025
1026 case nir_op_inot:
1027 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1028 if (devinfo->gen >= 8) {
1029 op[0] = resolve_source_modifiers(op[0]);
1030 }
1031 bld.NOT(result, op[0]);
1032 break;
1033 case nir_op_ixor:
1034 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1035 if (devinfo->gen >= 8) {
1036 op[0] = resolve_source_modifiers(op[0]);
1037 op[1] = resolve_source_modifiers(op[1]);
1038 }
1039 bld.XOR(result, op[0], op[1]);
1040 break;
1041 case nir_op_ior:
1042 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1043 if (devinfo->gen >= 8) {
1044 op[0] = resolve_source_modifiers(op[0]);
1045 op[1] = resolve_source_modifiers(op[1]);
1046 }
1047 bld.OR(result, op[0], op[1]);
1048 break;
1049 case nir_op_iand:
1050 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1051 if (devinfo->gen >= 8) {
1052 op[0] = resolve_source_modifiers(op[0]);
1053 op[1] = resolve_source_modifiers(op[1]);
1054 }
1055 bld.AND(result, op[0], op[1]);
1056 break;
1057
1058 case nir_op_fdot2:
1059 case nir_op_fdot3:
1060 case nir_op_fdot4:
1061 case nir_op_ball_fequal2:
1062 case nir_op_ball_iequal2:
1063 case nir_op_ball_fequal3:
1064 case nir_op_ball_iequal3:
1065 case nir_op_ball_fequal4:
1066 case nir_op_ball_iequal4:
1067 case nir_op_bany_fnequal2:
1068 case nir_op_bany_inequal2:
1069 case nir_op_bany_fnequal3:
1070 case nir_op_bany_inequal3:
1071 case nir_op_bany_fnequal4:
1072 case nir_op_bany_inequal4:
1073 unreachable("Lowered by nir_lower_alu_reductions");
1074
1075 case nir_op_fnoise1_1:
1076 case nir_op_fnoise1_2:
1077 case nir_op_fnoise1_3:
1078 case nir_op_fnoise1_4:
1079 case nir_op_fnoise2_1:
1080 case nir_op_fnoise2_2:
1081 case nir_op_fnoise2_3:
1082 case nir_op_fnoise2_4:
1083 case nir_op_fnoise3_1:
1084 case nir_op_fnoise3_2:
1085 case nir_op_fnoise3_3:
1086 case nir_op_fnoise3_4:
1087 case nir_op_fnoise4_1:
1088 case nir_op_fnoise4_2:
1089 case nir_op_fnoise4_3:
1090 case nir_op_fnoise4_4:
1091 unreachable("not reached: should be handled by lower_noise");
1092
1093 case nir_op_ldexp:
1094 unreachable("not reached: should be handled by ldexp_to_arith()");
1095
1096 case nir_op_fsqrt:
1097 inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
1098 inst->saturate = instr->dest.saturate;
1099 break;
1100
1101 case nir_op_frsq:
1102 inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
1103 inst->saturate = instr->dest.saturate;
1104 break;
1105
1106 case nir_op_b2i:
1107 case nir_op_b2f:
1108 bld.MOV(result, negate(op[0]));
1109 break;
1110
1111 case nir_op_f2b:
1112 bld.CMP(result, op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ);
1113 break;
1114 case nir_op_d2b: {
1115 /* two-argument instructions can't take 64-bit immediates */
1116 fs_reg zero = vgrf(glsl_type::double_type);
1117 bld.MOV(zero, brw_imm_df(0.0));
1118 /* A SIMD16 execution needs to be split in two instructions, so use
1119 * a vgrf instead of the flag register as dst so instruction splitting
1120 * works
1121 */
1122 fs_reg tmp = vgrf(glsl_type::double_type);
1123 bld.CMP(tmp, op[0], zero, BRW_CONDITIONAL_NZ);
1124 bld.MOV(result, subscript(tmp, BRW_REGISTER_TYPE_UD, 0));
1125 break;
1126 }
1127 case nir_op_i2b:
1128 bld.CMP(result, op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
1129 break;
1130
1131 case nir_op_ftrunc:
1132 inst = bld.RNDZ(result, op[0]);
1133 inst->saturate = instr->dest.saturate;
1134 break;
1135
1136 case nir_op_fceil: {
1137 op[0].negate = !op[0].negate;
1138 fs_reg temp = vgrf(glsl_type::float_type);
1139 bld.RNDD(temp, op[0]);
1140 temp.negate = true;
1141 inst = bld.MOV(result, temp);
1142 inst->saturate = instr->dest.saturate;
1143 break;
1144 }
1145 case nir_op_ffloor:
1146 inst = bld.RNDD(result, op[0]);
1147 inst->saturate = instr->dest.saturate;
1148 break;
1149 case nir_op_ffract:
1150 inst = bld.FRC(result, op[0]);
1151 inst->saturate = instr->dest.saturate;
1152 break;
1153 case nir_op_fround_even:
1154 inst = bld.RNDE(result, op[0]);
1155 inst->saturate = instr->dest.saturate;
1156 break;
1157
1158 case nir_op_fquantize2f16: {
1159 fs_reg tmp16 = bld.vgrf(BRW_REGISTER_TYPE_D);
1160 fs_reg tmp32 = bld.vgrf(BRW_REGISTER_TYPE_F);
1161 fs_reg zero = bld.vgrf(BRW_REGISTER_TYPE_F);
1162
1163 /* The destination stride must be at least as big as the source stride. */
1164 tmp16.type = BRW_REGISTER_TYPE_W;
1165 tmp16.stride = 2;
1166
1167 /* Check for denormal */
1168 fs_reg abs_src0 = op[0];
1169 abs_src0.abs = true;
1170 bld.CMP(bld.null_reg_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
1171 BRW_CONDITIONAL_L);
1172 /* Get the appropriately signed zero */
1173 bld.AND(retype(zero, BRW_REGISTER_TYPE_UD),
1174 retype(op[0], BRW_REGISTER_TYPE_UD),
1175 brw_imm_ud(0x80000000));
1176 /* Do the actual F32 -> F16 -> F32 conversion */
1177 bld.emit(BRW_OPCODE_F32TO16, tmp16, op[0]);
1178 bld.emit(BRW_OPCODE_F16TO32, tmp32, tmp16);
1179 /* Select that or zero based on normal status */
1180 inst = bld.SEL(result, zero, tmp32);
1181 inst->predicate = BRW_PREDICATE_NORMAL;
1182 inst->saturate = instr->dest.saturate;
1183 break;
1184 }
1185
1186 case nir_op_imin:
1187 case nir_op_umin:
1188 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1189 case nir_op_fmin:
1190 inst = bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_L);
1191 inst->saturate = instr->dest.saturate;
1192 break;
1193
1194 case nir_op_imax:
1195 case nir_op_umax:
1196 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1197 case nir_op_fmax:
1198 inst = bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_GE);
1199 inst->saturate = instr->dest.saturate;
1200 break;
1201
1202 case nir_op_pack_snorm_2x16:
1203 case nir_op_pack_snorm_4x8:
1204 case nir_op_pack_unorm_2x16:
1205 case nir_op_pack_unorm_4x8:
1206 case nir_op_unpack_snorm_2x16:
1207 case nir_op_unpack_snorm_4x8:
1208 case nir_op_unpack_unorm_2x16:
1209 case nir_op_unpack_unorm_4x8:
1210 case nir_op_unpack_half_2x16:
1211 case nir_op_pack_half_2x16:
1212 unreachable("not reached: should be handled by lower_packing_builtins");
1213
1214 case nir_op_unpack_half_2x16_split_x:
1215 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
1216 inst->saturate = instr->dest.saturate;
1217 break;
1218 case nir_op_unpack_half_2x16_split_y:
1219 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
1220 inst->saturate = instr->dest.saturate;
1221 break;
1222
1223 case nir_op_pack_double_2x32_split:
1224 /* Optimize the common case where we are re-packing a double with
1225 * the result of a previous double unpack. In this case we can take the
1226 * 32-bit value to use in the re-pack from the original double and bypass
1227 * the unpack operation.
1228 */
1229 for (int i = 0; i < 2; i++) {
1230 if (instr->src[i].src.is_ssa)
1231 continue;
1232
1233 const nir_instr *parent_instr = instr->src[i].src.ssa->parent_instr;
1234 if (parent_instr->type == nir_instr_type_alu)
1235 continue;
1236
1237 const nir_alu_instr *alu_parent = nir_instr_as_alu(parent_instr);
1238 if (alu_parent->op == nir_op_unpack_double_2x32_split_x ||
1239 alu_parent->op == nir_op_unpack_double_2x32_split_y)
1240 continue;
1241
1242 if (!alu_parent->src[0].src.is_ssa)
1243 continue;
1244
1245 op[i] = get_nir_src(alu_parent->src[0].src);
1246 op[i] = offset(retype(op[i], BRW_REGISTER_TYPE_DF), bld,
1247 alu_parent->src[0].swizzle[channel]);
1248 if (alu_parent->op == nir_op_unpack_double_2x32_split_y)
1249 op[i] = subscript(op[i], BRW_REGISTER_TYPE_UD, 1);
1250 else
1251 op[i] = subscript(op[i], BRW_REGISTER_TYPE_UD, 0);
1252 }
1253 bld.emit(FS_OPCODE_PACK, result, op[0], op[1]);
1254 break;
1255
1256 case nir_op_unpack_double_2x32_split_x:
1257 case nir_op_unpack_double_2x32_split_y: {
1258 /* Optimize the common case where we are unpacking from a double we have
1259 * previously packed. In this case we can just bypass the pack operation
1260 * and source directly from its arguments.
1261 */
1262 unsigned index = (instr->op == nir_op_unpack_double_2x32_split_x) ? 0 : 1;
1263 if (instr->src[0].src.is_ssa) {
1264 nir_instr *parent_instr = instr->src[0].src.ssa->parent_instr;
1265 if (parent_instr->type == nir_instr_type_alu) {
1266 nir_alu_instr *alu_parent = nir_instr_as_alu(parent_instr);
1267 if (alu_parent->op == nir_op_pack_double_2x32_split &&
1268 alu_parent->src[index].src.is_ssa) {
1269 op[0] = retype(get_nir_src(alu_parent->src[index].src),
1270 BRW_REGISTER_TYPE_UD);
1271 op[0] =
1272 offset(op[0], bld, alu_parent->src[index].swizzle[channel]);
1273 bld.MOV(result, op[0]);
1274 break;
1275 }
1276 }
1277 }
1278
1279 if (instr->op == nir_op_unpack_double_2x32_split_x)
1280 bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UD, 0));
1281 else
1282 bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UD, 1));
1283 break;
1284 }
1285
1286 case nir_op_fpow:
1287 inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
1288 inst->saturate = instr->dest.saturate;
1289 break;
1290
1291 case nir_op_bitfield_reverse:
1292 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1293 bld.BFREV(result, op[0]);
1294 break;
1295
1296 case nir_op_bit_count:
1297 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1298 bld.CBIT(result, op[0]);
1299 break;
1300
1301 case nir_op_ufind_msb:
1302 case nir_op_ifind_msb: {
1303 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1304 bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
1305
1306 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1307 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1308 * subtract the result from 31 to convert the MSB count into an LSB count.
1309 */
1310 bld.CMP(bld.null_reg_d(), result, brw_imm_d(-1), BRW_CONDITIONAL_NZ);
1311
1312 inst = bld.ADD(result, result, brw_imm_d(31));
1313 inst->predicate = BRW_PREDICATE_NORMAL;
1314 inst->src[0].negate = true;
1315 break;
1316 }
1317
1318 case nir_op_find_lsb:
1319 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1320 bld.FBL(result, op[0]);
1321 break;
1322
1323 case nir_op_ubitfield_extract:
1324 case nir_op_ibitfield_extract:
1325 unreachable("should have been lowered");
1326 case nir_op_ubfe:
1327 case nir_op_ibfe:
1328 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1329 bld.BFE(result, op[2], op[1], op[0]);
1330 break;
1331 case nir_op_bfm:
1332 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1333 bld.BFI1(result, op[0], op[1]);
1334 break;
1335 case nir_op_bfi:
1336 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1337 bld.BFI2(result, op[0], op[1], op[2]);
1338 break;
1339
1340 case nir_op_bitfield_insert:
1341 unreachable("not reached: should have been lowered");
1342
1343 case nir_op_ishl:
1344 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1345 bld.SHL(result, op[0], op[1]);
1346 break;
1347 case nir_op_ishr:
1348 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1349 bld.ASR(result, op[0], op[1]);
1350 break;
1351 case nir_op_ushr:
1352 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1353 bld.SHR(result, op[0], op[1]);
1354 break;
1355
1356 case nir_op_pack_half_2x16_split:
1357 bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
1358 break;
1359
1360 case nir_op_ffma:
1361 inst = bld.MAD(result, op[2], op[1], op[0]);
1362 inst->saturate = instr->dest.saturate;
1363 break;
1364
1365 case nir_op_flrp:
1366 inst = bld.LRP(result, op[0], op[1], op[2]);
1367 inst->saturate = instr->dest.saturate;
1368 break;
1369
1370 case nir_op_bcsel:
1371 if (optimize_frontfacing_ternary(instr, result))
1372 return;
1373
1374 bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
1375 inst = bld.SEL(result, op[1], op[2]);
1376 inst->predicate = BRW_PREDICATE_NORMAL;
1377 break;
1378
1379 case nir_op_extract_u8:
1380 case nir_op_extract_i8: {
1381 nir_const_value *byte = nir_src_as_const_value(instr->src[1].src);
1382 bld.emit(SHADER_OPCODE_EXTRACT_BYTE,
1383 result, op[0], brw_imm_ud(byte->u32[0]));
1384 break;
1385 }
1386
1387 case nir_op_extract_u16:
1388 case nir_op_extract_i16: {
1389 nir_const_value *word = nir_src_as_const_value(instr->src[1].src);
1390 bld.emit(SHADER_OPCODE_EXTRACT_WORD,
1391 result, op[0], brw_imm_ud(word->u32[0]));
1392 break;
1393 }
1394
1395 default:
1396 unreachable("unhandled instruction");
1397 }
1398
1399 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1400 * to sign extend the low bit to 0/~0
1401 */
1402 if (devinfo->gen <= 5 &&
1403 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1404 fs_reg masked = vgrf(glsl_type::int_type);
1405 bld.AND(masked, result, brw_imm_d(1));
1406 masked.negate = true;
1407 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
1408 }
1409 }
1410
1411 void
1412 fs_visitor::nir_emit_load_const(const fs_builder &bld,
1413 nir_load_const_instr *instr)
1414 {
1415 const brw_reg_type reg_type =
1416 instr->def.bit_size == 32 ? BRW_REGISTER_TYPE_D : BRW_REGISTER_TYPE_DF;
1417 fs_reg reg = bld.vgrf(reg_type, instr->def.num_components);
1418
1419 switch (instr->def.bit_size) {
1420 case 32:
1421 for (unsigned i = 0; i < instr->def.num_components; i++)
1422 bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value.i32[i]));
1423 break;
1424
1425 case 64:
1426 for (unsigned i = 0; i < instr->def.num_components; i++)
1427 bld.MOV(offset(reg, bld, i), brw_imm_df(instr->value.f64[i]));
1428 break;
1429
1430 default:
1431 unreachable("Invalid bit size");
1432 }
1433
1434 nir_ssa_values[instr->def.index] = reg;
1435 }
1436
1437 void
1438 fs_visitor::nir_emit_undef(const fs_builder &bld, nir_ssa_undef_instr *instr)
1439 {
1440 const brw_reg_type reg_type =
1441 instr->def.bit_size == 32 ? BRW_REGISTER_TYPE_D : BRW_REGISTER_TYPE_DF;
1442 nir_ssa_values[instr->def.index] =
1443 bld.vgrf(reg_type, instr->def.num_components);
1444 }
1445
1446 fs_reg
1447 fs_visitor::get_nir_src(nir_src src)
1448 {
1449 fs_reg reg;
1450 if (src.is_ssa) {
1451 reg = nir_ssa_values[src.ssa->index];
1452 } else {
1453 /* We don't handle indirects on locals */
1454 assert(src.reg.indirect == NULL);
1455 reg = offset(nir_locals[src.reg.reg->index], bld,
1456 src.reg.base_offset * src.reg.reg->num_components);
1457 }
1458
1459 /* to avoid floating-point denorm flushing problems, set the type by
1460 * default to D - instructions that need floating point semantics will set
1461 * this to F if they need to
1462 */
1463 return retype(reg, BRW_REGISTER_TYPE_D);
1464 }
1465
1466 fs_reg
1467 fs_visitor::get_nir_dest(nir_dest dest)
1468 {
1469 if (dest.is_ssa) {
1470 const brw_reg_type reg_type =
1471 dest.ssa.bit_size == 32 ? BRW_REGISTER_TYPE_F : BRW_REGISTER_TYPE_DF;
1472 nir_ssa_values[dest.ssa.index] =
1473 bld.vgrf(reg_type, dest.ssa.num_components);
1474 return nir_ssa_values[dest.ssa.index];
1475 } else {
1476 /* We don't handle indirects on locals */
1477 assert(dest.reg.indirect == NULL);
1478 return offset(nir_locals[dest.reg.reg->index], bld,
1479 dest.reg.base_offset * dest.reg.reg->num_components);
1480 }
1481 }
1482
1483 fs_reg
1484 fs_visitor::get_nir_image_deref(const nir_deref_var *deref)
1485 {
1486 fs_reg image(UNIFORM, deref->var->data.driver_location / 4,
1487 BRW_REGISTER_TYPE_UD);
1488 fs_reg indirect;
1489 unsigned indirect_max = 0;
1490
1491 for (const nir_deref *tail = &deref->deref; tail->child;
1492 tail = tail->child) {
1493 const nir_deref_array *deref_array = nir_deref_as_array(tail->child);
1494 assert(tail->child->deref_type == nir_deref_type_array);
1495 const unsigned size = glsl_get_length(tail->type);
1496 const unsigned element_size = type_size_scalar(deref_array->deref.type);
1497 const unsigned base = MIN2(deref_array->base_offset, size - 1);
1498 image = offset(image, bld, base * element_size);
1499
1500 if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
1501 fs_reg tmp = vgrf(glsl_type::uint_type);
1502
1503 /* Accessing an invalid surface index with the dataport can result
1504 * in a hang. According to the spec "if the index used to
1505 * select an individual element is negative or greater than or
1506 * equal to the size of the array, the results of the operation
1507 * are undefined but may not lead to termination" -- which is one
1508 * of the possible outcomes of the hang. Clamp the index to
1509 * prevent access outside of the array bounds.
1510 */
1511 bld.emit_minmax(tmp, retype(get_nir_src(deref_array->indirect),
1512 BRW_REGISTER_TYPE_UD),
1513 brw_imm_ud(size - base - 1), BRW_CONDITIONAL_L);
1514
1515 indirect_max += element_size * (tail->type->length - 1);
1516
1517 bld.MUL(tmp, tmp, brw_imm_ud(element_size * 4));
1518 if (indirect.file == BAD_FILE) {
1519 indirect = tmp;
1520 } else {
1521 bld.ADD(indirect, indirect, tmp);
1522 }
1523 }
1524 }
1525
1526 if (indirect.file == BAD_FILE) {
1527 return image;
1528 } else {
1529 /* Emit a pile of MOVs to load the uniform into a temporary. The
1530 * dead-code elimination pass will get rid of what we don't use.
1531 */
1532 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, BRW_IMAGE_PARAM_SIZE);
1533 for (unsigned j = 0; j < BRW_IMAGE_PARAM_SIZE; j++) {
1534 bld.emit(SHADER_OPCODE_MOV_INDIRECT,
1535 offset(tmp, bld, j), offset(image, bld, j),
1536 indirect, brw_imm_ud((indirect_max + 1) * 4));
1537 }
1538 return tmp;
1539 }
1540 }
1541
1542 void
1543 fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
1544 unsigned wr_mask)
1545 {
1546 for (unsigned i = 0; i < 4; i++) {
1547 if (!((wr_mask >> i) & 1))
1548 continue;
1549
1550 fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
1551 new_inst->dst = offset(new_inst->dst, bld, i);
1552 for (unsigned j = 0; j < new_inst->sources; j++)
1553 if (new_inst->src[j].file == VGRF)
1554 new_inst->src[j] = offset(new_inst->src[j], bld, i);
1555
1556 bld.emit(new_inst);
1557 }
1558 }
1559
1560 /**
1561 * Get the matching channel register datatype for an image intrinsic of the
1562 * specified GLSL image type.
1563 */
1564 static brw_reg_type
1565 get_image_base_type(const glsl_type *type)
1566 {
1567 switch ((glsl_base_type)type->sampled_type) {
1568 case GLSL_TYPE_UINT:
1569 return BRW_REGISTER_TYPE_UD;
1570 case GLSL_TYPE_INT:
1571 return BRW_REGISTER_TYPE_D;
1572 case GLSL_TYPE_FLOAT:
1573 return BRW_REGISTER_TYPE_F;
1574 default:
1575 unreachable("Not reached.");
1576 }
1577 }
1578
1579 /**
1580 * Get the appropriate atomic op for an image atomic intrinsic.
1581 */
1582 static unsigned
1583 get_image_atomic_op(nir_intrinsic_op op, const glsl_type *type)
1584 {
1585 switch (op) {
1586 case nir_intrinsic_image_atomic_add:
1587 return BRW_AOP_ADD;
1588 case nir_intrinsic_image_atomic_min:
1589 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1590 BRW_AOP_IMIN : BRW_AOP_UMIN);
1591 case nir_intrinsic_image_atomic_max:
1592 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1593 BRW_AOP_IMAX : BRW_AOP_UMAX);
1594 case nir_intrinsic_image_atomic_and:
1595 return BRW_AOP_AND;
1596 case nir_intrinsic_image_atomic_or:
1597 return BRW_AOP_OR;
1598 case nir_intrinsic_image_atomic_xor:
1599 return BRW_AOP_XOR;
1600 case nir_intrinsic_image_atomic_exchange:
1601 return BRW_AOP_MOV;
1602 case nir_intrinsic_image_atomic_comp_swap:
1603 return BRW_AOP_CMPWR;
1604 default:
1605 unreachable("Not reachable.");
1606 }
1607 }
1608
1609 static fs_inst *
1610 emit_pixel_interpolater_send(const fs_builder &bld,
1611 enum opcode opcode,
1612 const fs_reg &dst,
1613 const fs_reg &src,
1614 const fs_reg &desc,
1615 glsl_interp_qualifier interpolation)
1616 {
1617 fs_inst *inst;
1618 fs_reg payload;
1619 int mlen;
1620
1621 if (src.file == BAD_FILE) {
1622 /* Dummy payload */
1623 payload = bld.vgrf(BRW_REGISTER_TYPE_F, 1);
1624 mlen = 1;
1625 } else {
1626 payload = src;
1627 mlen = 2 * bld.dispatch_width() / 8;
1628 }
1629
1630 inst = bld.emit(opcode, dst, payload, desc);
1631 inst->mlen = mlen;
1632 /* 2 floats per slot returned */
1633 inst->regs_written = 2 * bld.dispatch_width() / 8;
1634 inst->pi_noperspective = interpolation == INTERP_QUALIFIER_NOPERSPECTIVE;
1635
1636 return inst;
1637 }
1638
1639 /**
1640 * Computes 1 << x, given a D/UD register containing some value x.
1641 */
1642 static fs_reg
1643 intexp2(const fs_builder &bld, const fs_reg &x)
1644 {
1645 assert(x.type == BRW_REGISTER_TYPE_UD || x.type == BRW_REGISTER_TYPE_D);
1646
1647 fs_reg result = bld.vgrf(x.type, 1);
1648 fs_reg one = bld.vgrf(x.type, 1);
1649
1650 bld.MOV(one, retype(brw_imm_d(1), one.type));
1651 bld.SHL(result, one, x);
1652 return result;
1653 }
1654
1655 void
1656 fs_visitor::emit_gs_end_primitive(const nir_src &vertex_count_nir_src)
1657 {
1658 assert(stage == MESA_SHADER_GEOMETRY);
1659
1660 struct brw_gs_prog_data *gs_prog_data =
1661 (struct brw_gs_prog_data *) prog_data;
1662
1663 /* We can only do EndPrimitive() functionality when the control data
1664 * consists of cut bits. Fortunately, the only time it isn't is when the
1665 * output type is points, in which case EndPrimitive() is a no-op.
1666 */
1667 if (gs_prog_data->control_data_format !=
1668 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
1669 return;
1670 }
1671
1672 /* Cut bits use one bit per vertex. */
1673 assert(gs_compile->control_data_bits_per_vertex == 1);
1674
1675 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1676 vertex_count.type = BRW_REGISTER_TYPE_UD;
1677
1678 /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
1679 * vertex n, 0 otherwise. So all we need to do here is mark bit
1680 * (vertex_count - 1) % 32 in the cut_bits register to indicate that
1681 * EndPrimitive() was called after emitting vertex (vertex_count - 1);
1682 * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
1683 *
1684 * Note that if EndPrimitive() is called before emitting any vertices, this
1685 * will cause us to set bit 31 of the control_data_bits register to 1.
1686 * That's fine because:
1687 *
1688 * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
1689 * output, so the hardware will ignore cut bit 31.
1690 *
1691 * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
1692 * last vertex, so setting cut bit 31 has no effect (since the primitive
1693 * is automatically ended when the GS terminates).
1694 *
1695 * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
1696 * control_data_bits register to 0 when the first vertex is emitted.
1697 */
1698
1699 const fs_builder abld = bld.annotate("end primitive");
1700
1701 /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
1702 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1703 abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
1704 fs_reg mask = intexp2(abld, prev_count);
1705 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1706 * attention to the lower 5 bits of its second source argument, so on this
1707 * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
1708 * ((vertex_count - 1) % 32).
1709 */
1710 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1711 }
1712
1713 void
1714 fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
1715 {
1716 assert(stage == MESA_SHADER_GEOMETRY);
1717 assert(gs_compile->control_data_bits_per_vertex != 0);
1718
1719 struct brw_gs_prog_data *gs_prog_data =
1720 (struct brw_gs_prog_data *) prog_data;
1721
1722 const fs_builder abld = bld.annotate("emit control data bits");
1723 const fs_builder fwa_bld = bld.exec_all();
1724
1725 /* We use a single UD register to accumulate control data bits (32 bits
1726 * for each of the SIMD8 channels). So we need to write a DWord (32 bits)
1727 * at a time.
1728 *
1729 * Unfortunately, the URB_WRITE_SIMD8 message uses 128-bit (OWord) offsets.
1730 * We have select a 128-bit group via the Global and Per-Slot Offsets, then
1731 * use the Channel Mask phase to enable/disable which DWord within that
1732 * group to write. (Remember, different SIMD8 channels may have emitted
1733 * different numbers of vertices, so we may need per-slot offsets.)
1734 *
1735 * Channel masking presents an annoying problem: we may have to replicate
1736 * the data up to 4 times:
1737 *
1738 * Msg = Handles, Per-Slot Offsets, Channel Masks, Data, Data, Data, Data.
1739 *
1740 * To avoid penalizing shaders that emit a small number of vertices, we
1741 * can avoid these sometimes: if the size of the control data header is
1742 * <= 128 bits, then there is only 1 OWord. All SIMD8 channels will land
1743 * land in the same 128-bit group, so we can skip per-slot offsets.
1744 *
1745 * Similarly, if the control data header is <= 32 bits, there is only one
1746 * DWord, so we can skip channel masks.
1747 */
1748 enum opcode opcode = SHADER_OPCODE_URB_WRITE_SIMD8;
1749
1750 fs_reg channel_mask, per_slot_offset;
1751
1752 if (gs_compile->control_data_header_size_bits > 32) {
1753 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
1754 channel_mask = vgrf(glsl_type::uint_type);
1755 }
1756
1757 if (gs_compile->control_data_header_size_bits > 128) {
1758 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT;
1759 per_slot_offset = vgrf(glsl_type::uint_type);
1760 }
1761
1762 /* Figure out which DWord we're trying to write to using the formula:
1763 *
1764 * dword_index = (vertex_count - 1) * bits_per_vertex / 32
1765 *
1766 * Since bits_per_vertex is a power of two, and is known at compile
1767 * time, this can be optimized to:
1768 *
1769 * dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
1770 */
1771 if (opcode != SHADER_OPCODE_URB_WRITE_SIMD8) {
1772 fs_reg dword_index = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1773 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1774 abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
1775 unsigned log2_bits_per_vertex =
1776 _mesa_fls(gs_compile->control_data_bits_per_vertex);
1777 abld.SHR(dword_index, prev_count, brw_imm_ud(6u - log2_bits_per_vertex));
1778
1779 if (per_slot_offset.file != BAD_FILE) {
1780 /* Set the per-slot offset to dword_index / 4, so that we'll write to
1781 * the appropriate OWord within the control data header.
1782 */
1783 abld.SHR(per_slot_offset, dword_index, brw_imm_ud(2u));
1784 }
1785
1786 /* Set the channel masks to 1 << (dword_index % 4), so that we'll
1787 * write to the appropriate DWORD within the OWORD.
1788 */
1789 fs_reg channel = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1790 fwa_bld.AND(channel, dword_index, brw_imm_ud(3u));
1791 channel_mask = intexp2(fwa_bld, channel);
1792 /* Then the channel masks need to be in bits 23:16. */
1793 fwa_bld.SHL(channel_mask, channel_mask, brw_imm_ud(16u));
1794 }
1795
1796 /* Store the control data bits in the message payload and send it. */
1797 int mlen = 2;
1798 if (channel_mask.file != BAD_FILE)
1799 mlen += 4; /* channel masks, plus 3 extra copies of the data */
1800 if (per_slot_offset.file != BAD_FILE)
1801 mlen++;
1802
1803 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
1804 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
1805 int i = 0;
1806 sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
1807 if (per_slot_offset.file != BAD_FILE)
1808 sources[i++] = per_slot_offset;
1809 if (channel_mask.file != BAD_FILE)
1810 sources[i++] = channel_mask;
1811 while (i < mlen) {
1812 sources[i++] = this->control_data_bits;
1813 }
1814
1815 abld.LOAD_PAYLOAD(payload, sources, mlen, mlen);
1816 fs_inst *inst = abld.emit(opcode, reg_undef, payload);
1817 inst->mlen = mlen;
1818 /* We need to increment Global Offset by 256-bits to make room for
1819 * Broadwell's extra "Vertex Count" payload at the beginning of the
1820 * URB entry. Since this is an OWord message, Global Offset is counted
1821 * in 128-bit units, so we must set it to 2.
1822 */
1823 if (gs_prog_data->static_vertex_count == -1)
1824 inst->offset = 2;
1825 }
1826
1827 void
1828 fs_visitor::set_gs_stream_control_data_bits(const fs_reg &vertex_count,
1829 unsigned stream_id)
1830 {
1831 /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
1832
1833 /* Note: we are calling this *before* increasing vertex_count, so
1834 * this->vertex_count == vertex_count - 1 in the formula above.
1835 */
1836
1837 /* Stream mode uses 2 bits per vertex */
1838 assert(gs_compile->control_data_bits_per_vertex == 2);
1839
1840 /* Must be a valid stream */
1841 assert(stream_id >= 0 && stream_id < MAX_VERTEX_STREAMS);
1842
1843 /* Control data bits are initialized to 0 so we don't have to set any
1844 * bits when sending vertices to stream 0.
1845 */
1846 if (stream_id == 0)
1847 return;
1848
1849 const fs_builder abld = bld.annotate("set stream control data bits", NULL);
1850
1851 /* reg::sid = stream_id */
1852 fs_reg sid = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1853 abld.MOV(sid, brw_imm_ud(stream_id));
1854
1855 /* reg:shift_count = 2 * (vertex_count - 1) */
1856 fs_reg shift_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1857 abld.SHL(shift_count, vertex_count, brw_imm_ud(1u));
1858
1859 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1860 * attention to the lower 5 bits of its second source argument, so on this
1861 * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
1862 * stream_id << ((2 * (vertex_count - 1)) % 32).
1863 */
1864 fs_reg mask = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1865 abld.SHL(mask, sid, shift_count);
1866 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1867 }
1868
1869 void
1870 fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
1871 unsigned stream_id)
1872 {
1873 assert(stage == MESA_SHADER_GEOMETRY);
1874
1875 struct brw_gs_prog_data *gs_prog_data =
1876 (struct brw_gs_prog_data *) prog_data;
1877
1878 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1879 vertex_count.type = BRW_REGISTER_TYPE_UD;
1880
1881 /* Haswell and later hardware ignores the "Render Stream Select" bits
1882 * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
1883 * and instead sends all primitives down the pipeline for rasterization.
1884 * If the SOL stage is enabled, "Render Stream Select" is honored and
1885 * primitives bound to non-zero streams are discarded after stream output.
1886 *
1887 * Since the only purpose of primives sent to non-zero streams is to
1888 * be recorded by transform feedback, we can simply discard all geometry
1889 * bound to these streams when transform feedback is disabled.
1890 */
1891 if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
1892 return;
1893
1894 /* If we're outputting 32 control data bits or less, then we can wait
1895 * until the shader is over to output them all. Otherwise we need to
1896 * output them as we go. Now is the time to do it, since we're about to
1897 * output the vertex_count'th vertex, so it's guaranteed that the
1898 * control data bits associated with the (vertex_count - 1)th vertex are
1899 * correct.
1900 */
1901 if (gs_compile->control_data_header_size_bits > 32) {
1902 const fs_builder abld =
1903 bld.annotate("emit vertex: emit control data bits");
1904
1905 /* Only emit control data bits if we've finished accumulating a batch
1906 * of 32 bits. This is the case when:
1907 *
1908 * (vertex_count * bits_per_vertex) % 32 == 0
1909 *
1910 * (in other words, when the last 5 bits of vertex_count *
1911 * bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
1912 * integer n (which is always the case, since bits_per_vertex is
1913 * always 1 or 2), this is equivalent to requiring that the last 5-n
1914 * bits of vertex_count are 0:
1915 *
1916 * vertex_count & (2^(5-n) - 1) == 0
1917 *
1918 * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
1919 * equivalent to:
1920 *
1921 * vertex_count & (32 / bits_per_vertex - 1) == 0
1922 *
1923 * TODO: If vertex_count is an immediate, we could do some of this math
1924 * at compile time...
1925 */
1926 fs_inst *inst =
1927 abld.AND(bld.null_reg_d(), vertex_count,
1928 brw_imm_ud(32u / gs_compile->control_data_bits_per_vertex - 1u));
1929 inst->conditional_mod = BRW_CONDITIONAL_Z;
1930
1931 abld.IF(BRW_PREDICATE_NORMAL);
1932 /* If vertex_count is 0, then no control data bits have been
1933 * accumulated yet, so we can skip emitting them.
1934 */
1935 abld.CMP(bld.null_reg_d(), vertex_count, brw_imm_ud(0u),
1936 BRW_CONDITIONAL_NEQ);
1937 abld.IF(BRW_PREDICATE_NORMAL);
1938 emit_gs_control_data_bits(vertex_count);
1939 abld.emit(BRW_OPCODE_ENDIF);
1940
1941 /* Reset control_data_bits to 0 so we can start accumulating a new
1942 * batch.
1943 *
1944 * Note: in the case where vertex_count == 0, this neutralizes the
1945 * effect of any call to EndPrimitive() that the shader may have
1946 * made before outputting its first vertex.
1947 */
1948 inst = abld.MOV(this->control_data_bits, brw_imm_ud(0u));
1949 inst->force_writemask_all = true;
1950 abld.emit(BRW_OPCODE_ENDIF);
1951 }
1952
1953 emit_urb_writes(vertex_count);
1954
1955 /* In stream mode we have to set control data bits for all vertices
1956 * unless we have disabled control data bits completely (which we do
1957 * do for GL_POINTS outputs that don't use streams).
1958 */
1959 if (gs_compile->control_data_header_size_bits > 0 &&
1960 gs_prog_data->control_data_format ==
1961 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
1962 set_gs_stream_control_data_bits(vertex_count, stream_id);
1963 }
1964 }
1965
1966 void
1967 fs_visitor::emit_gs_input_load(const fs_reg &dst,
1968 const nir_src &vertex_src,
1969 unsigned base_offset,
1970 const nir_src &offset_src,
1971 unsigned num_components)
1972 {
1973 struct brw_gs_prog_data *gs_prog_data = (struct brw_gs_prog_data *) prog_data;
1974
1975 nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
1976 nir_const_value *offset_const = nir_src_as_const_value(offset_src);
1977 const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8;
1978
1979 /* Offset 0 is the VUE header, which contains VARYING_SLOT_LAYER [.y],
1980 * VARYING_SLOT_VIEWPORT [.z], and VARYING_SLOT_PSIZ [.w]. Only
1981 * gl_PointSize is available as a GS input, however, so it must be that.
1982 */
1983 const bool is_point_size = (base_offset == 0);
1984
1985 /* TODO: figure out push input layout for invocations == 1 */
1986 if (gs_prog_data->invocations == 1 &&
1987 offset_const != NULL && vertex_const != NULL &&
1988 4 * (base_offset + offset_const->u32[0]) < push_reg_count) {
1989 int imm_offset = (base_offset + offset_const->u32[0]) * 4 +
1990 vertex_const->u32[0] * push_reg_count;
1991 /* This input was pushed into registers. */
1992 if (is_point_size) {
1993 /* gl_PointSize comes in .w */
1994 bld.MOV(dst, fs_reg(ATTR, imm_offset + 3, dst.type));
1995 } else {
1996 for (unsigned i = 0; i < num_components; i++) {
1997 bld.MOV(offset(dst, bld, i),
1998 fs_reg(ATTR, imm_offset + i, dst.type));
1999 }
2000 }
2001 return;
2002 }
2003
2004 /* Resort to the pull model. Ensure the VUE handles are provided. */
2005 gs_prog_data->base.include_vue_handles = true;
2006
2007 unsigned first_icp_handle = gs_prog_data->include_primitive_id ? 3 : 2;
2008 fs_reg icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2009
2010 if (gs_prog_data->invocations == 1) {
2011 if (vertex_const) {
2012 /* The vertex index is constant; just select the proper URB handle. */
2013 icp_handle =
2014 retype(brw_vec8_grf(first_icp_handle + vertex_const->i32[0], 0),
2015 BRW_REGISTER_TYPE_UD);
2016 } else {
2017 /* The vertex index is non-constant. We need to use indirect
2018 * addressing to fetch the proper URB handle.
2019 *
2020 * First, we start with the sequence <7, 6, 5, 4, 3, 2, 1, 0>
2021 * indicating that channel <n> should read the handle from
2022 * DWord <n>. We convert that to bytes by multiplying by 4.
2023 *
2024 * Next, we convert the vertex index to bytes by multiplying
2025 * by 32 (shifting by 5), and add the two together. This is
2026 * the final indirect byte offset.
2027 */
2028 fs_reg sequence = bld.vgrf(BRW_REGISTER_TYPE_W, 1);
2029 fs_reg channel_offsets = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2030 fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2031 fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2032
2033 /* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */
2034 bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210)));
2035 /* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */
2036 bld.SHL(channel_offsets, sequence, brw_imm_ud(2u));
2037 /* Convert vertex_index to bytes (multiply by 32) */
2038 bld.SHL(vertex_offset_bytes,
2039 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2040 brw_imm_ud(5u));
2041 bld.ADD(icp_offset_bytes, vertex_offset_bytes, channel_offsets);
2042
2043 /* Use first_icp_handle as the base offset. There is one register
2044 * of URB handles per vertex, so inform the register allocator that
2045 * we might read up to nir->info.gs.vertices_in registers.
2046 */
2047 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2048 fs_reg(brw_vec8_grf(first_icp_handle, 0)),
2049 fs_reg(icp_offset_bytes),
2050 brw_imm_ud(nir->info.gs.vertices_in * REG_SIZE));
2051 }
2052 } else {
2053 assert(gs_prog_data->invocations > 1);
2054
2055 if (vertex_const) {
2056 assert(devinfo->gen >= 9 || vertex_const->i32[0] <= 5);
2057 bld.MOV(icp_handle,
2058 retype(brw_vec1_grf(first_icp_handle +
2059 vertex_const->i32[0] / 8,
2060 vertex_const->i32[0] % 8),
2061 BRW_REGISTER_TYPE_UD));
2062 } else {
2063 /* The vertex index is non-constant. We need to use indirect
2064 * addressing to fetch the proper URB handle.
2065 *
2066 */
2067 fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2068
2069 /* Convert vertex_index to bytes (multiply by 4) */
2070 bld.SHL(icp_offset_bytes,
2071 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2072 brw_imm_ud(2u));
2073
2074 /* Use first_icp_handle as the base offset. There is one DWord
2075 * of URB handles per vertex, so inform the register allocator that
2076 * we might read up to ceil(nir->info.gs.vertices_in / 8) registers.
2077 */
2078 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2079 fs_reg(brw_vec8_grf(first_icp_handle, 0)),
2080 fs_reg(icp_offset_bytes),
2081 brw_imm_ud(DIV_ROUND_UP(nir->info.gs.vertices_in, 8) *
2082 REG_SIZE));
2083 }
2084 }
2085
2086 fs_inst *inst;
2087 if (offset_const) {
2088 /* Constant indexing - use global offset. */
2089 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
2090 inst->offset = base_offset + offset_const->u32[0];
2091 inst->base_mrf = -1;
2092 inst->mlen = 1;
2093 inst->regs_written = num_components;
2094 } else {
2095 /* Indirect indexing - use per-slot offsets as well. */
2096 const fs_reg srcs[] = { icp_handle, get_nir_src(offset_src) };
2097 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2098 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2099
2100 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
2101 inst->offset = base_offset;
2102 inst->base_mrf = -1;
2103 inst->mlen = 2;
2104 inst->regs_written = num_components;
2105 }
2106
2107 if (is_point_size) {
2108 /* Read the whole VUE header (because of alignment) and read .w. */
2109 fs_reg tmp = bld.vgrf(dst.type, 4);
2110 inst->dst = tmp;
2111 inst->regs_written = 4;
2112 bld.MOV(dst, offset(tmp, bld, 3));
2113 }
2114 }
2115
2116 fs_reg
2117 fs_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
2118 {
2119 nir_src *offset_src = nir_get_io_offset_src(instr);
2120 nir_const_value *const_value = nir_src_as_const_value(*offset_src);
2121
2122 if (const_value) {
2123 /* The only constant offset we should find is 0. brw_nir.c's
2124 * add_const_offset_to_base() will fold other constant offsets
2125 * into instr->const_index[0].
2126 */
2127 assert(const_value->u32[0] == 0);
2128 return fs_reg();
2129 }
2130
2131 return get_nir_src(*offset_src);
2132 }
2133
2134 void
2135 fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld,
2136 nir_intrinsic_instr *instr)
2137 {
2138 assert(stage == MESA_SHADER_VERTEX);
2139
2140 fs_reg dest;
2141 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2142 dest = get_nir_dest(instr->dest);
2143
2144 switch (instr->intrinsic) {
2145 case nir_intrinsic_load_vertex_id:
2146 unreachable("should be lowered by lower_vertex_id()");
2147
2148 case nir_intrinsic_load_vertex_id_zero_base:
2149 case nir_intrinsic_load_base_vertex:
2150 case nir_intrinsic_load_instance_id:
2151 case nir_intrinsic_load_base_instance:
2152 case nir_intrinsic_load_draw_id: {
2153 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
2154 fs_reg val = nir_system_values[sv];
2155 assert(val.file != BAD_FILE);
2156 dest.type = val.type;
2157 bld.MOV(dest, val);
2158 break;
2159 }
2160
2161 default:
2162 nir_emit_intrinsic(bld, instr);
2163 break;
2164 }
2165 }
2166
2167 void
2168 fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld,
2169 nir_intrinsic_instr *instr)
2170 {
2171 assert(stage == MESA_SHADER_TESS_CTRL);
2172 struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) key;
2173 struct brw_tcs_prog_data *tcs_prog_data =
2174 (struct brw_tcs_prog_data *) prog_data;
2175
2176 fs_reg dst;
2177 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2178 dst = get_nir_dest(instr->dest);
2179
2180 switch (instr->intrinsic) {
2181 case nir_intrinsic_load_primitive_id:
2182 bld.MOV(dst, fs_reg(brw_vec1_grf(0, 1)));
2183 break;
2184 case nir_intrinsic_load_invocation_id:
2185 bld.MOV(retype(dst, invocation_id.type), invocation_id);
2186 break;
2187 case nir_intrinsic_load_patch_vertices_in:
2188 bld.MOV(retype(dst, BRW_REGISTER_TYPE_D),
2189 brw_imm_d(tcs_key->input_vertices));
2190 break;
2191
2192 case nir_intrinsic_barrier: {
2193 if (tcs_prog_data->instances == 1)
2194 break;
2195
2196 fs_reg m0 = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2197 fs_reg m0_2 = byte_offset(m0, 2 * sizeof(uint32_t));
2198
2199 const fs_builder fwa_bld = bld.exec_all();
2200
2201 /* Zero the message header */
2202 fwa_bld.MOV(m0, brw_imm_ud(0u));
2203
2204 /* Copy "Barrier ID" from r0.2, bits 16:13 */
2205 fwa_bld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
2206 brw_imm_ud(INTEL_MASK(16, 13)));
2207
2208 /* Shift it up to bits 27:24. */
2209 fwa_bld.SHL(m0_2, m0_2, brw_imm_ud(11));
2210
2211 /* Set the Barrier Count and the enable bit */
2212 fwa_bld.OR(m0_2, m0_2,
2213 brw_imm_ud(tcs_prog_data->instances << 8 | (1 << 15)));
2214
2215 bld.emit(SHADER_OPCODE_BARRIER, bld.null_reg_ud(), m0);
2216 break;
2217 }
2218
2219 case nir_intrinsic_load_input:
2220 unreachable("nir_lower_io should never give us these.");
2221 break;
2222
2223 case nir_intrinsic_load_per_vertex_input: {
2224 fs_reg indirect_offset = get_indirect_offset(instr);
2225 unsigned imm_offset = instr->const_index[0];
2226
2227 const nir_src &vertex_src = instr->src[0];
2228 nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
2229
2230 fs_inst *inst;
2231
2232 fs_reg icp_handle;
2233
2234 if (vertex_const) {
2235 /* Emit a MOV to resolve <0,1,0> regioning. */
2236 icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2237 bld.MOV(icp_handle,
2238 retype(brw_vec1_grf(1 + (vertex_const->i32[0] >> 3),
2239 vertex_const->i32[0] & 7),
2240 BRW_REGISTER_TYPE_UD));
2241 } else if (tcs_prog_data->instances == 1 &&
2242 vertex_src.is_ssa &&
2243 vertex_src.ssa->parent_instr->type == nir_instr_type_intrinsic &&
2244 nir_instr_as_intrinsic(vertex_src.ssa->parent_instr)->intrinsic == nir_intrinsic_load_invocation_id) {
2245 /* For the common case of only 1 instance, an array index of
2246 * gl_InvocationID means reading g1. Skip all the indirect work.
2247 */
2248 icp_handle = retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD);
2249 } else {
2250 /* The vertex index is non-constant. We need to use indirect
2251 * addressing to fetch the proper URB handle.
2252 */
2253 icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2254
2255 /* Each ICP handle is a single DWord (4 bytes) */
2256 fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2257 bld.SHL(vertex_offset_bytes,
2258 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2259 brw_imm_ud(2u));
2260
2261 /* Start at g1. We might read up to 4 registers. */
2262 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2263 fs_reg(brw_vec8_grf(1, 0)), vertex_offset_bytes,
2264 brw_imm_ud(4 * REG_SIZE));
2265 }
2266
2267 if (indirect_offset.file == BAD_FILE) {
2268 /* Constant indexing - use global offset. */
2269 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
2270 inst->offset = imm_offset;
2271 inst->mlen = 1;
2272 inst->base_mrf = -1;
2273 inst->regs_written = instr->num_components;
2274 } else {
2275 /* Indirect indexing - use per-slot offsets as well. */
2276 const fs_reg srcs[] = { icp_handle, indirect_offset };
2277 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2278 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2279
2280 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
2281 inst->offset = imm_offset;
2282 inst->base_mrf = -1;
2283 inst->mlen = 2;
2284 inst->regs_written = instr->num_components;
2285 }
2286
2287 /* Copy the temporary to the destination to deal with writemasking.
2288 *
2289 * Also attempt to deal with gl_PointSize being in the .w component.
2290 */
2291 if (inst->offset == 0 && indirect_offset.file == BAD_FILE) {
2292 inst->dst = bld.vgrf(dst.type, 4);
2293 inst->regs_written = 4;
2294 bld.MOV(dst, offset(inst->dst, bld, 3));
2295 }
2296 break;
2297 }
2298
2299 case nir_intrinsic_load_output:
2300 case nir_intrinsic_load_per_vertex_output: {
2301 fs_reg indirect_offset = get_indirect_offset(instr);
2302 unsigned imm_offset = instr->const_index[0];
2303
2304 fs_inst *inst;
2305 if (indirect_offset.file == BAD_FILE) {
2306 /* Replicate the patch handle to all enabled channels */
2307 fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2308 bld.MOV(patch_handle,
2309 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD));
2310
2311 if (imm_offset == 0) {
2312 /* This is a read of gl_TessLevelInner[], which lives in the
2313 * Patch URB header. The layout depends on the domain.
2314 */
2315 dst.type = BRW_REGISTER_TYPE_F;
2316 switch (tcs_key->tes_primitive_mode) {
2317 case GL_QUADS: {
2318 /* DWords 3-2 (reversed) */
2319 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
2320
2321 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp, patch_handle);
2322 inst->offset = 0;
2323 inst->mlen = 1;
2324 inst->base_mrf = -1;
2325 inst->regs_written = 4;
2326
2327 /* dst.xy = tmp.wz */
2328 bld.MOV(dst, offset(tmp, bld, 3));
2329 bld.MOV(offset(dst, bld, 1), offset(tmp, bld, 2));
2330 break;
2331 }
2332 case GL_TRIANGLES:
2333 /* DWord 4; hardcode offset = 1 and regs_written = 1 */
2334 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, patch_handle);
2335 inst->offset = 1;
2336 inst->mlen = 1;
2337 inst->base_mrf = -1;
2338 inst->regs_written = 1;
2339 break;
2340 case GL_ISOLINES:
2341 /* All channels are undefined. */
2342 break;
2343 default:
2344 unreachable("Bogus tessellation domain");
2345 }
2346 } else if (imm_offset == 1) {
2347 /* This is a read of gl_TessLevelOuter[], which lives in the
2348 * Patch URB header. The layout depends on the domain.
2349 */
2350 dst.type = BRW_REGISTER_TYPE_F;
2351
2352 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
2353 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp, patch_handle);
2354 inst->offset = 1;
2355 inst->mlen = 1;
2356 inst->base_mrf = -1;
2357 inst->regs_written = 4;
2358
2359 /* Reswizzle: WZYX */
2360 fs_reg srcs[4] = {
2361 offset(tmp, bld, 3),
2362 offset(tmp, bld, 2),
2363 offset(tmp, bld, 1),
2364 offset(tmp, bld, 0),
2365 };
2366
2367 unsigned num_components;
2368 switch (tcs_key->tes_primitive_mode) {
2369 case GL_QUADS:
2370 num_components = 4;
2371 break;
2372 case GL_TRIANGLES:
2373 num_components = 3;
2374 break;
2375 case GL_ISOLINES:
2376 /* Isolines are not reversed; swizzle .zw -> .xy */
2377 srcs[0] = offset(tmp, bld, 2);
2378 srcs[1] = offset(tmp, bld, 3);
2379 num_components = 2;
2380 break;
2381 default:
2382 unreachable("Bogus tessellation domain");
2383 }
2384 bld.LOAD_PAYLOAD(dst, srcs, num_components, 0);
2385 } else {
2386 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, patch_handle);
2387 inst->offset = imm_offset;
2388 inst->mlen = 1;
2389 inst->base_mrf = -1;
2390 inst->regs_written = instr->num_components;
2391 }
2392 } else {
2393 /* Indirect indexing - use per-slot offsets as well. */
2394 const fs_reg srcs[] = {
2395 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
2396 indirect_offset
2397 };
2398 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2399 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2400
2401 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
2402 inst->offset = imm_offset;
2403 inst->mlen = 2;
2404 inst->base_mrf = -1;
2405 inst->regs_written = instr->num_components;
2406 }
2407 break;
2408 }
2409
2410 case nir_intrinsic_store_output:
2411 case nir_intrinsic_store_per_vertex_output: {
2412 fs_reg value = get_nir_src(instr->src[0]);
2413 fs_reg indirect_offset = get_indirect_offset(instr);
2414 unsigned imm_offset = instr->const_index[0];
2415 unsigned swiz = BRW_SWIZZLE_XYZW;
2416 unsigned mask = instr->const_index[1];
2417 unsigned header_regs = 0;
2418 fs_reg srcs[7];
2419 srcs[header_regs++] = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD);
2420
2421 if (indirect_offset.file != BAD_FILE) {
2422 srcs[header_regs++] = indirect_offset;
2423 } else if (!is_passthrough_shader) {
2424 if (imm_offset == 0) {
2425 value.type = BRW_REGISTER_TYPE_F;
2426
2427 mask &= (1 << tesslevel_inner_components(tcs_key->tes_primitive_mode)) - 1;
2428
2429 /* This is a write to gl_TessLevelInner[], which lives in the
2430 * Patch URB header. The layout depends on the domain.
2431 */
2432 switch (tcs_key->tes_primitive_mode) {
2433 case GL_QUADS:
2434 /* gl_TessLevelInner[].xy lives at DWords 3-2 (reversed).
2435 * We use an XXYX swizzle to reverse put .xy in the .wz
2436 * channels, and use a .zw writemask.
2437 */
2438 mask = writemask_for_backwards_vector(mask);
2439 swiz = BRW_SWIZZLE4(0, 0, 1, 0);
2440 break;
2441 case GL_TRIANGLES:
2442 /* gl_TessLevelInner[].x lives at DWord 4, so we set the
2443 * writemask to X and bump the URB offset by 1.
2444 */
2445 imm_offset = 1;
2446 break;
2447 case GL_ISOLINES:
2448 /* Skip; gl_TessLevelInner[] doesn't exist for isolines. */
2449 return;
2450 default:
2451 unreachable("Bogus tessellation domain");
2452 }
2453 } else if (imm_offset == 1) {
2454 /* This is a write to gl_TessLevelOuter[] which lives in the
2455 * Patch URB Header at DWords 4-7. However, it's reversed, so
2456 * instead of .xyzw we have .wzyx.
2457 */
2458 value.type = BRW_REGISTER_TYPE_F;
2459
2460 mask &= (1 << tesslevel_outer_components(tcs_key->tes_primitive_mode)) - 1;
2461
2462 if (tcs_key->tes_primitive_mode == GL_ISOLINES) {
2463 /* Isolines .xy should be stored in .zw, in order. */
2464 swiz = BRW_SWIZZLE4(0, 0, 0, 1);
2465 mask <<= 2;
2466 } else {
2467 /* Other domains are reversed; store .wzyx instead of .xyzw */
2468 swiz = BRW_SWIZZLE_WZYX;
2469 mask = writemask_for_backwards_vector(mask);
2470 }
2471 }
2472 }
2473
2474 if (mask == 0)
2475 break;
2476
2477 unsigned num_components = _mesa_fls(mask);
2478 enum opcode opcode;
2479
2480 if (mask != WRITEMASK_XYZW) {
2481 srcs[header_regs++] = brw_imm_ud(mask << 16);
2482 opcode = indirect_offset.file != BAD_FILE ?
2483 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
2484 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
2485 } else {
2486 opcode = indirect_offset.file != BAD_FILE ?
2487 SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT :
2488 SHADER_OPCODE_URB_WRITE_SIMD8;
2489 }
2490
2491 for (unsigned i = 0; i < num_components; i++) {
2492 if (mask & (1 << i))
2493 srcs[header_regs + i] = offset(value, bld, BRW_GET_SWZ(swiz, i));
2494 }
2495
2496 unsigned mlen = header_regs + num_components;
2497
2498 fs_reg payload =
2499 bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
2500 bld.LOAD_PAYLOAD(payload, srcs, mlen, header_regs);
2501
2502 fs_inst *inst = bld.emit(opcode, bld.null_reg_ud(), payload);
2503 inst->offset = imm_offset;
2504 inst->mlen = mlen;
2505 inst->base_mrf = -1;
2506 break;
2507 }
2508
2509 default:
2510 nir_emit_intrinsic(bld, instr);
2511 break;
2512 }
2513 }
2514
2515 void
2516 fs_visitor::nir_emit_tes_intrinsic(const fs_builder &bld,
2517 nir_intrinsic_instr *instr)
2518 {
2519 assert(stage == MESA_SHADER_TESS_EVAL);
2520 struct brw_tes_prog_data *tes_prog_data = (struct brw_tes_prog_data *) prog_data;
2521
2522 fs_reg dest;
2523 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2524 dest = get_nir_dest(instr->dest);
2525
2526 switch (instr->intrinsic) {
2527 case nir_intrinsic_load_primitive_id:
2528 bld.MOV(dest, fs_reg(brw_vec1_grf(0, 1)));
2529 break;
2530 case nir_intrinsic_load_tess_coord:
2531 /* gl_TessCoord is part of the payload in g1-3 */
2532 for (unsigned i = 0; i < 3; i++) {
2533 bld.MOV(offset(dest, bld, i), fs_reg(brw_vec8_grf(1 + i, 0)));
2534 }
2535 break;
2536
2537 case nir_intrinsic_load_tess_level_outer:
2538 /* When the TES reads gl_TessLevelOuter, we ensure that the patch header
2539 * appears as a push-model input. So, we can simply use the ATTR file
2540 * rather than issuing URB read messages. The data is stored in the
2541 * high DWords in reverse order - DWord 7 contains .x, DWord 6 contains
2542 * .y, and so on.
2543 */
2544 switch (tes_prog_data->domain) {
2545 case BRW_TESS_DOMAIN_QUAD:
2546 for (unsigned i = 0; i < 4; i++)
2547 bld.MOV(offset(dest, bld, i), component(fs_reg(ATTR, 0), 7 - i));
2548 break;
2549 case BRW_TESS_DOMAIN_TRI:
2550 for (unsigned i = 0; i < 3; i++)
2551 bld.MOV(offset(dest, bld, i), component(fs_reg(ATTR, 0), 7 - i));
2552 break;
2553 case BRW_TESS_DOMAIN_ISOLINE:
2554 for (unsigned i = 0; i < 2; i++)
2555 bld.MOV(offset(dest, bld, i), component(fs_reg(ATTR, 0), 7 - i));
2556 break;
2557 }
2558 break;
2559
2560 case nir_intrinsic_load_tess_level_inner:
2561 /* When the TES reads gl_TessLevelInner, we ensure that the patch header
2562 * appears as a push-model input. So, we can simply use the ATTR file
2563 * rather than issuing URB read messages.
2564 */
2565 switch (tes_prog_data->domain) {
2566 case BRW_TESS_DOMAIN_QUAD:
2567 bld.MOV(dest, component(fs_reg(ATTR, 0), 3));
2568 bld.MOV(offset(dest, bld, 1), component(fs_reg(ATTR, 0), 2));
2569 break;
2570 case BRW_TESS_DOMAIN_TRI:
2571 bld.MOV(dest, component(fs_reg(ATTR, 0), 4));
2572 break;
2573 case BRW_TESS_DOMAIN_ISOLINE:
2574 /* ignore - value is undefined */
2575 break;
2576 }
2577 break;
2578
2579 case nir_intrinsic_load_input:
2580 case nir_intrinsic_load_per_vertex_input: {
2581 fs_reg indirect_offset = get_indirect_offset(instr);
2582 unsigned imm_offset = instr->const_index[0];
2583
2584 fs_inst *inst;
2585 if (indirect_offset.file == BAD_FILE) {
2586 /* Arbitrarily only push up to 32 vec4 slots worth of data,
2587 * which is 16 registers (since each holds 2 vec4 slots).
2588 */
2589 const unsigned max_push_slots = 32;
2590 if (imm_offset < max_push_slots) {
2591 fs_reg src = fs_reg(ATTR, imm_offset / 2, dest.type);
2592 for (int i = 0; i < instr->num_components; i++) {
2593 bld.MOV(offset(dest, bld, i),
2594 component(src, 4 * (imm_offset % 2) + i));
2595 }
2596 tes_prog_data->base.urb_read_length =
2597 MAX2(tes_prog_data->base.urb_read_length,
2598 DIV_ROUND_UP(imm_offset + 1, 2));
2599 } else {
2600 /* Replicate the patch handle to all enabled channels */
2601 const fs_reg srcs[] = {
2602 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)
2603 };
2604 fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2605 bld.LOAD_PAYLOAD(patch_handle, srcs, ARRAY_SIZE(srcs), 0);
2606
2607 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dest, patch_handle);
2608 inst->mlen = 1;
2609 inst->offset = imm_offset;
2610 inst->base_mrf = -1;
2611 inst->regs_written = instr->num_components;
2612 }
2613 } else {
2614 /* Indirect indexing - use per-slot offsets as well. */
2615 const fs_reg srcs[] = {
2616 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
2617 indirect_offset
2618 };
2619 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2620 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2621
2622 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dest, payload);
2623 inst->mlen = 2;
2624 inst->offset = imm_offset;
2625 inst->base_mrf = -1;
2626 inst->regs_written = instr->num_components;
2627 }
2628 break;
2629 }
2630 default:
2631 nir_emit_intrinsic(bld, instr);
2632 break;
2633 }
2634 }
2635
2636 void
2637 fs_visitor::nir_emit_gs_intrinsic(const fs_builder &bld,
2638 nir_intrinsic_instr *instr)
2639 {
2640 assert(stage == MESA_SHADER_GEOMETRY);
2641 fs_reg indirect_offset;
2642
2643 fs_reg dest;
2644 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2645 dest = get_nir_dest(instr->dest);
2646
2647 switch (instr->intrinsic) {
2648 case nir_intrinsic_load_primitive_id:
2649 assert(stage == MESA_SHADER_GEOMETRY);
2650 assert(((struct brw_gs_prog_data *)prog_data)->include_primitive_id);
2651 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
2652 retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD));
2653 break;
2654
2655 case nir_intrinsic_load_input:
2656 unreachable("load_input intrinsics are invalid for the GS stage");
2657
2658 case nir_intrinsic_load_per_vertex_input:
2659 emit_gs_input_load(dest, instr->src[0], instr->const_index[0],
2660 instr->src[1], instr->num_components);
2661 break;
2662
2663 case nir_intrinsic_emit_vertex_with_counter:
2664 emit_gs_vertex(instr->src[0], instr->const_index[0]);
2665 break;
2666
2667 case nir_intrinsic_end_primitive_with_counter:
2668 emit_gs_end_primitive(instr->src[0]);
2669 break;
2670
2671 case nir_intrinsic_set_vertex_count:
2672 bld.MOV(this->final_gs_vertex_count, get_nir_src(instr->src[0]));
2673 break;
2674
2675 case nir_intrinsic_load_invocation_id: {
2676 fs_reg val = nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
2677 assert(val.file != BAD_FILE);
2678 dest.type = val.type;
2679 bld.MOV(dest, val);
2680 break;
2681 }
2682
2683 default:
2684 nir_emit_intrinsic(bld, instr);
2685 break;
2686 }
2687 }
2688
2689 void
2690 fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
2691 nir_intrinsic_instr *instr)
2692 {
2693 assert(stage == MESA_SHADER_FRAGMENT);
2694 struct brw_wm_prog_data *wm_prog_data =
2695 (struct brw_wm_prog_data *) prog_data;
2696 const struct brw_wm_prog_key *wm_key = (const struct brw_wm_prog_key *) key;
2697
2698 fs_reg dest;
2699 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2700 dest = get_nir_dest(instr->dest);
2701
2702 switch (instr->intrinsic) {
2703 case nir_intrinsic_load_front_face:
2704 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
2705 *emit_frontfacing_interpolation());
2706 break;
2707
2708 case nir_intrinsic_load_sample_pos: {
2709 fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
2710 assert(sample_pos.file != BAD_FILE);
2711 dest.type = sample_pos.type;
2712 bld.MOV(dest, sample_pos);
2713 bld.MOV(offset(dest, bld, 1), offset(sample_pos, bld, 1));
2714 break;
2715 }
2716
2717 case nir_intrinsic_load_helper_invocation:
2718 case nir_intrinsic_load_sample_mask_in:
2719 case nir_intrinsic_load_sample_id: {
2720 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
2721 fs_reg val = nir_system_values[sv];
2722 assert(val.file != BAD_FILE);
2723 dest.type = val.type;
2724 bld.MOV(dest, val);
2725 break;
2726 }
2727
2728 case nir_intrinsic_discard:
2729 case nir_intrinsic_discard_if: {
2730 /* We track our discarded pixels in f0.1. By predicating on it, we can
2731 * update just the flag bits that aren't yet discarded. If there's no
2732 * condition, we emit a CMP of g0 != g0, so all currently executing
2733 * channels will get turned off.
2734 */
2735 fs_inst *cmp;
2736 if (instr->intrinsic == nir_intrinsic_discard_if) {
2737 cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
2738 brw_imm_d(0), BRW_CONDITIONAL_Z);
2739 } else {
2740 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
2741 BRW_REGISTER_TYPE_UW));
2742 cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
2743 }
2744 cmp->predicate = BRW_PREDICATE_NORMAL;
2745 cmp->flag_subreg = 1;
2746
2747 if (devinfo->gen >= 6) {
2748 emit_discard_jump();
2749 }
2750 break;
2751 }
2752
2753 case nir_intrinsic_interp_var_at_centroid:
2754 case nir_intrinsic_interp_var_at_sample:
2755 case nir_intrinsic_interp_var_at_offset: {
2756 /* Handle ARB_gpu_shader5 interpolation intrinsics
2757 *
2758 * It's worth a quick word of explanation as to why we handle the full
2759 * variable-based interpolation intrinsic rather than a lowered version
2760 * with like we do for other inputs. We have to do that because the way
2761 * we set up inputs doesn't allow us to use the already setup inputs for
2762 * interpolation. At the beginning of the shader, we go through all of
2763 * the input variables and do the initial interpolation and put it in
2764 * the nir_inputs array based on its location as determined in
2765 * nir_lower_io. If the input isn't used, dead code cleans up and
2766 * everything works fine. However, when we get to the ARB_gpu_shader5
2767 * interpolation intrinsics, we need to reinterpolate the input
2768 * differently. If we used an intrinsic that just had an index it would
2769 * only give us the offset into the nir_inputs array. However, this is
2770 * useless because that value is post-interpolation and we need
2771 * pre-interpolation. In order to get the actual location of the bits
2772 * we get from the vertex fetching hardware, we need the variable.
2773 */
2774 wm_prog_data->pulls_bary = true;
2775
2776 fs_reg dst_xy = bld.vgrf(BRW_REGISTER_TYPE_F, 2);
2777 const glsl_interp_qualifier interpolation =
2778 (glsl_interp_qualifier) instr->variables[0]->var->data.interpolation;
2779
2780 switch (instr->intrinsic) {
2781 case nir_intrinsic_interp_var_at_centroid:
2782 emit_pixel_interpolater_send(bld,
2783 FS_OPCODE_INTERPOLATE_AT_CENTROID,
2784 dst_xy,
2785 fs_reg(), /* src */
2786 brw_imm_ud(0u),
2787 interpolation);
2788 break;
2789
2790 case nir_intrinsic_interp_var_at_sample: {
2791 if (!wm_key->multisample_fbo) {
2792 /* From the ARB_gpu_shader5 specification:
2793 * "If multisample buffers are not available, the input varying
2794 * will be evaluated at the center of the pixel."
2795 */
2796 emit_pixel_interpolater_send(bld,
2797 FS_OPCODE_INTERPOLATE_AT_CENTROID,
2798 dst_xy,
2799 fs_reg(), /* src */
2800 brw_imm_ud(0u),
2801 interpolation);
2802 break;
2803 }
2804
2805 nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
2806
2807 if (const_sample) {
2808 unsigned msg_data = const_sample->i32[0] << 4;
2809
2810 emit_pixel_interpolater_send(bld,
2811 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
2812 dst_xy,
2813 fs_reg(), /* src */
2814 brw_imm_ud(msg_data),
2815 interpolation);
2816 } else {
2817 const fs_reg sample_src = retype(get_nir_src(instr->src[0]),
2818 BRW_REGISTER_TYPE_UD);
2819
2820 if (nir_src_is_dynamically_uniform(instr->src[0])) {
2821 const fs_reg sample_id = bld.emit_uniformize(sample_src);
2822 const fs_reg msg_data = vgrf(glsl_type::uint_type);
2823 bld.exec_all().group(1, 0)
2824 .SHL(msg_data, sample_id, brw_imm_ud(4u));
2825 emit_pixel_interpolater_send(bld,
2826 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
2827 dst_xy,
2828 fs_reg(), /* src */
2829 msg_data,
2830 interpolation);
2831 } else {
2832 /* Make a loop that sends a message to the pixel interpolater
2833 * for the sample number in each live channel. If there are
2834 * multiple channels with the same sample number then these
2835 * will be handled simultaneously with a single interation of
2836 * the loop.
2837 */
2838 bld.emit(BRW_OPCODE_DO);
2839
2840 /* Get the next live sample number into sample_id_reg */
2841 const fs_reg sample_id = bld.emit_uniformize(sample_src);
2842
2843 /* Set the flag register so that we can perform the send
2844 * message on all channels that have the same sample number
2845 */
2846 bld.CMP(bld.null_reg_ud(),
2847 sample_src, sample_id,
2848 BRW_CONDITIONAL_EQ);
2849 const fs_reg msg_data = vgrf(glsl_type::uint_type);
2850 bld.exec_all().group(1, 0)
2851 .SHL(msg_data, sample_id, brw_imm_ud(4u));
2852 fs_inst *inst =
2853 emit_pixel_interpolater_send(bld,
2854 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
2855 dst_xy,
2856 fs_reg(), /* src */
2857 msg_data,
2858 interpolation);
2859 set_predicate(BRW_PREDICATE_NORMAL, inst);
2860
2861 /* Continue the loop if there are any live channels left */
2862 set_predicate_inv(BRW_PREDICATE_NORMAL,
2863 true, /* inverse */
2864 bld.emit(BRW_OPCODE_WHILE));
2865 }
2866 }
2867
2868 break;
2869 }
2870
2871 case nir_intrinsic_interp_var_at_offset: {
2872 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
2873
2874 if (const_offset) {
2875 unsigned off_x = MIN2((int)(const_offset->f32[0] * 16), 7) & 0xf;
2876 unsigned off_y = MIN2((int)(const_offset->f32[1] * 16), 7) & 0xf;
2877
2878 emit_pixel_interpolater_send(bld,
2879 FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
2880 dst_xy,
2881 fs_reg(), /* src */
2882 brw_imm_ud(off_x | (off_y << 4)),
2883 interpolation);
2884 } else {
2885 fs_reg src = vgrf(glsl_type::ivec2_type);
2886 fs_reg offset_src = retype(get_nir_src(instr->src[0]),
2887 BRW_REGISTER_TYPE_F);
2888 for (int i = 0; i < 2; i++) {
2889 fs_reg temp = vgrf(glsl_type::float_type);
2890 bld.MUL(temp, offset(offset_src, bld, i), brw_imm_f(16.0f));
2891 fs_reg itemp = vgrf(glsl_type::int_type);
2892 bld.MOV(itemp, temp); /* float to int */
2893
2894 /* Clamp the upper end of the range to +7/16.
2895 * ARB_gpu_shader5 requires that we support a maximum offset
2896 * of +0.5, which isn't representable in a S0.4 value -- if
2897 * we didn't clamp it, we'd end up with -8/16, which is the
2898 * opposite of what the shader author wanted.
2899 *
2900 * This is legal due to ARB_gpu_shader5's quantization
2901 * rules:
2902 *
2903 * "Not all values of <offset> may be supported; x and y
2904 * offsets may be rounded to fixed-point values with the
2905 * number of fraction bits given by the
2906 * implementation-dependent constant
2907 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
2908 */
2909 set_condmod(BRW_CONDITIONAL_L,
2910 bld.SEL(offset(src, bld, i), itemp, brw_imm_d(7)));
2911 }
2912
2913 const enum opcode opcode = FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET;
2914 emit_pixel_interpolater_send(bld,
2915 opcode,
2916 dst_xy,
2917 src,
2918 brw_imm_ud(0u),
2919 interpolation);
2920 }
2921 break;
2922 }
2923
2924 default:
2925 unreachable("Invalid intrinsic");
2926 }
2927
2928 for (unsigned j = 0; j < instr->num_components; j++) {
2929 fs_reg src = interp_reg(instr->variables[0]->var->data.location, j);
2930 src.type = dest.type;
2931
2932 bld.emit(FS_OPCODE_LINTERP, dest, dst_xy, src);
2933 dest = offset(dest, bld, 1);
2934 }
2935 break;
2936 }
2937 default:
2938 nir_emit_intrinsic(bld, instr);
2939 break;
2940 }
2941 }
2942
2943 void
2944 fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
2945 nir_intrinsic_instr *instr)
2946 {
2947 assert(stage == MESA_SHADER_COMPUTE);
2948 struct brw_cs_prog_data *cs_prog_data =
2949 (struct brw_cs_prog_data *) prog_data;
2950
2951 fs_reg dest;
2952 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2953 dest = get_nir_dest(instr->dest);
2954
2955 switch (instr->intrinsic) {
2956 case nir_intrinsic_barrier:
2957 emit_barrier();
2958 cs_prog_data->uses_barrier = true;
2959 break;
2960
2961 case nir_intrinsic_load_local_invocation_id:
2962 case nir_intrinsic_load_work_group_id: {
2963 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
2964 fs_reg val = nir_system_values[sv];
2965 assert(val.file != BAD_FILE);
2966 dest.type = val.type;
2967 for (unsigned i = 0; i < 3; i++)
2968 bld.MOV(offset(dest, bld, i), offset(val, bld, i));
2969 break;
2970 }
2971
2972 case nir_intrinsic_load_num_work_groups: {
2973 const unsigned surface =
2974 cs_prog_data->binding_table.work_groups_start;
2975
2976 cs_prog_data->uses_num_work_groups = true;
2977
2978 fs_reg surf_index = brw_imm_ud(surface);
2979 brw_mark_surface_used(prog_data, surface);
2980
2981 /* Read the 3 GLuint components of gl_NumWorkGroups */
2982 for (unsigned i = 0; i < 3; i++) {
2983 fs_reg read_result =
2984 emit_untyped_read(bld, surf_index,
2985 brw_imm_ud(i << 2),
2986 1 /* dims */, 1 /* size */,
2987 BRW_PREDICATE_NONE);
2988 read_result.type = dest.type;
2989 bld.MOV(dest, read_result);
2990 dest = offset(dest, bld, 1);
2991 }
2992 break;
2993 }
2994
2995 case nir_intrinsic_shared_atomic_add:
2996 nir_emit_shared_atomic(bld, BRW_AOP_ADD, instr);
2997 break;
2998 case nir_intrinsic_shared_atomic_imin:
2999 nir_emit_shared_atomic(bld, BRW_AOP_IMIN, instr);
3000 break;
3001 case nir_intrinsic_shared_atomic_umin:
3002 nir_emit_shared_atomic(bld, BRW_AOP_UMIN, instr);
3003 break;
3004 case nir_intrinsic_shared_atomic_imax:
3005 nir_emit_shared_atomic(bld, BRW_AOP_IMAX, instr);
3006 break;
3007 case nir_intrinsic_shared_atomic_umax:
3008 nir_emit_shared_atomic(bld, BRW_AOP_UMAX, instr);
3009 break;
3010 case nir_intrinsic_shared_atomic_and:
3011 nir_emit_shared_atomic(bld, BRW_AOP_AND, instr);
3012 break;
3013 case nir_intrinsic_shared_atomic_or:
3014 nir_emit_shared_atomic(bld, BRW_AOP_OR, instr);
3015 break;
3016 case nir_intrinsic_shared_atomic_xor:
3017 nir_emit_shared_atomic(bld, BRW_AOP_XOR, instr);
3018 break;
3019 case nir_intrinsic_shared_atomic_exchange:
3020 nir_emit_shared_atomic(bld, BRW_AOP_MOV, instr);
3021 break;
3022 case nir_intrinsic_shared_atomic_comp_swap:
3023 nir_emit_shared_atomic(bld, BRW_AOP_CMPWR, instr);
3024 break;
3025
3026 case nir_intrinsic_load_shared: {
3027 assert(devinfo->gen >= 7);
3028
3029 fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
3030
3031 /* Get the offset to read from */
3032 fs_reg offset_reg;
3033 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3034 if (const_offset) {
3035 offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
3036 } else {
3037 offset_reg = vgrf(glsl_type::uint_type);
3038 bld.ADD(offset_reg,
3039 retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
3040 brw_imm_ud(instr->const_index[0]));
3041 }
3042
3043 /* Read the vector */
3044 fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
3045 1 /* dims */,
3046 instr->num_components,
3047 BRW_PREDICATE_NONE);
3048 read_result.type = dest.type;
3049 for (int i = 0; i < instr->num_components; i++)
3050 bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
3051
3052 break;
3053 }
3054
3055 case nir_intrinsic_store_shared: {
3056 assert(devinfo->gen >= 7);
3057
3058 /* Block index */
3059 fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
3060
3061 /* Value */
3062 fs_reg val_reg = get_nir_src(instr->src[0]);
3063
3064 /* Writemask */
3065 unsigned writemask = instr->const_index[1];
3066
3067 /* Combine groups of consecutive enabled channels in one write
3068 * message. We use ffs to find the first enabled channel and then ffs on
3069 * the bit-inverse, down-shifted writemask to determine the length of
3070 * the block of enabled bits.
3071 */
3072 while (writemask) {
3073 unsigned first_component = ffs(writemask) - 1;
3074 unsigned length = ffs(~(writemask >> first_component)) - 1;
3075 fs_reg offset_reg;
3076
3077 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
3078 if (const_offset) {
3079 offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0] +
3080 4 * first_component);
3081 } else {
3082 offset_reg = vgrf(glsl_type::uint_type);
3083 bld.ADD(offset_reg,
3084 retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_UD),
3085 brw_imm_ud(instr->const_index[0] + 4 * first_component));
3086 }
3087
3088 emit_untyped_write(bld, surf_index, offset_reg,
3089 offset(val_reg, bld, first_component),
3090 1 /* dims */, length,
3091 BRW_PREDICATE_NONE);
3092
3093 /* Clear the bits in the writemask that we just wrote, then try
3094 * again to see if more channels are left.
3095 */
3096 writemask &= (15 << (first_component + length));
3097 }
3098
3099 break;
3100 }
3101
3102 default:
3103 nir_emit_intrinsic(bld, instr);
3104 break;
3105 }
3106 }
3107
3108 void
3109 fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
3110 {
3111 fs_reg dest;
3112 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3113 dest = get_nir_dest(instr->dest);
3114
3115 switch (instr->intrinsic) {
3116 case nir_intrinsic_atomic_counter_inc:
3117 case nir_intrinsic_atomic_counter_dec:
3118 case nir_intrinsic_atomic_counter_read: {
3119 /* Get the arguments of the atomic intrinsic. */
3120 const fs_reg offset = get_nir_src(instr->src[0]);
3121 const unsigned surface = (stage_prog_data->binding_table.abo_start +
3122 instr->const_index[0]);
3123 fs_reg tmp;
3124
3125 /* Emit a surface read or atomic op. */
3126 switch (instr->intrinsic) {
3127 case nir_intrinsic_atomic_counter_read:
3128 tmp = emit_untyped_read(bld, brw_imm_ud(surface), offset, 1, 1);
3129 break;
3130
3131 case nir_intrinsic_atomic_counter_inc:
3132 tmp = emit_untyped_atomic(bld, brw_imm_ud(surface), offset, fs_reg(),
3133 fs_reg(), 1, 1, BRW_AOP_INC);
3134 break;
3135
3136 case nir_intrinsic_atomic_counter_dec:
3137 tmp = emit_untyped_atomic(bld, brw_imm_ud(surface), offset, fs_reg(),
3138 fs_reg(), 1, 1, BRW_AOP_PREDEC);
3139 break;
3140
3141 default:
3142 unreachable("Unreachable");
3143 }
3144
3145 /* Assign the result. */
3146 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD), tmp);
3147
3148 /* Mark the surface as used. */
3149 brw_mark_surface_used(stage_prog_data, surface);
3150 break;
3151 }
3152
3153 case nir_intrinsic_image_load:
3154 case nir_intrinsic_image_store:
3155 case nir_intrinsic_image_atomic_add:
3156 case nir_intrinsic_image_atomic_min:
3157 case nir_intrinsic_image_atomic_max:
3158 case nir_intrinsic_image_atomic_and:
3159 case nir_intrinsic_image_atomic_or:
3160 case nir_intrinsic_image_atomic_xor:
3161 case nir_intrinsic_image_atomic_exchange:
3162 case nir_intrinsic_image_atomic_comp_swap: {
3163 using namespace image_access;
3164
3165 /* Get the referenced image variable and type. */
3166 const nir_variable *var = instr->variables[0]->var;
3167 const glsl_type *type = var->type->without_array();
3168 const brw_reg_type base_type = get_image_base_type(type);
3169
3170 /* Get some metadata from the image intrinsic. */
3171 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
3172 const unsigned arr_dims = type->sampler_array ? 1 : 0;
3173 const unsigned surf_dims = type->coordinate_components() - arr_dims;
3174 const unsigned format = var->data.image.format;
3175
3176 /* Get the arguments of the image intrinsic. */
3177 const fs_reg image = get_nir_image_deref(instr->variables[0]);
3178 const fs_reg addr = retype(get_nir_src(instr->src[0]),
3179 BRW_REGISTER_TYPE_UD);
3180 const fs_reg src0 = (info->num_srcs >= 3 ?
3181 retype(get_nir_src(instr->src[2]), base_type) :
3182 fs_reg());
3183 const fs_reg src1 = (info->num_srcs >= 4 ?
3184 retype(get_nir_src(instr->src[3]), base_type) :
3185 fs_reg());
3186 fs_reg tmp;
3187
3188 /* Emit an image load, store or atomic op. */
3189 if (instr->intrinsic == nir_intrinsic_image_load)
3190 tmp = emit_image_load(bld, image, addr, surf_dims, arr_dims, format);
3191
3192 else if (instr->intrinsic == nir_intrinsic_image_store)
3193 emit_image_store(bld, image, addr, src0, surf_dims, arr_dims,
3194 var->data.image.write_only ? GL_NONE : format);
3195
3196 else
3197 tmp = emit_image_atomic(bld, image, addr, src0, src1,
3198 surf_dims, arr_dims, info->dest_components,
3199 get_image_atomic_op(instr->intrinsic, type));
3200
3201 /* Assign the result. */
3202 for (unsigned c = 0; c < info->dest_components; ++c)
3203 bld.MOV(offset(retype(dest, base_type), bld, c),
3204 offset(tmp, bld, c));
3205 break;
3206 }
3207
3208 case nir_intrinsic_memory_barrier_atomic_counter:
3209 case nir_intrinsic_memory_barrier_buffer:
3210 case nir_intrinsic_memory_barrier_image:
3211 case nir_intrinsic_memory_barrier: {
3212 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 16 / dispatch_width);
3213 bld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp)
3214 ->regs_written = 2;
3215 break;
3216 }
3217
3218 case nir_intrinsic_group_memory_barrier:
3219 case nir_intrinsic_memory_barrier_shared:
3220 /* We treat these workgroup-level barriers as no-ops. This should be
3221 * safe at present and as long as:
3222 *
3223 * - Memory access instructions are not subsequently reordered by the
3224 * compiler back-end.
3225 *
3226 * - All threads from a given compute shader workgroup fit within a
3227 * single subslice and therefore talk to the same HDC shared unit
3228 * what supposedly guarantees ordering and coherency between threads
3229 * from the same workgroup. This may change in the future when we
3230 * start splitting workgroups across multiple subslices.
3231 *
3232 * - The context is not in fault-and-stream mode, which could cause
3233 * memory transactions (including to SLM) prior to the barrier to be
3234 * replayed after the barrier if a pagefault occurs. This shouldn't
3235 * be a problem up to and including SKL because fault-and-stream is
3236 * not usable due to hardware issues, but that's likely to change in
3237 * the future.
3238 */
3239 break;
3240
3241 case nir_intrinsic_shader_clock: {
3242 /* We cannot do anything if there is an event, so ignore it for now */
3243 fs_reg shader_clock = get_timestamp(bld);
3244 const fs_reg srcs[] = { shader_clock.set_smear(0), shader_clock.set_smear(1) };
3245
3246 bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
3247 break;
3248 }
3249
3250 case nir_intrinsic_image_size: {
3251 /* Get the referenced image variable and type. */
3252 const nir_variable *var = instr->variables[0]->var;
3253 const glsl_type *type = var->type->without_array();
3254
3255 /* Get the size of the image. */
3256 const fs_reg image = get_nir_image_deref(instr->variables[0]);
3257 const fs_reg size = offset(image, bld, BRW_IMAGE_PARAM_SIZE_OFFSET);
3258
3259 /* For 1DArray image types, the array index is stored in the Z component.
3260 * Fix this by swizzling the Z component to the Y component.
3261 */
3262 const bool is_1d_array_image =
3263 type->sampler_dimensionality == GLSL_SAMPLER_DIM_1D &&
3264 type->sampler_array;
3265
3266 /* For CubeArray images, we should count the number of cubes instead
3267 * of the number of faces. Fix it by dividing the (Z component) by 6.
3268 */
3269 const bool is_cube_array_image =
3270 type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE &&
3271 type->sampler_array;
3272
3273 /* Copy all the components. */
3274 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
3275 for (unsigned c = 0; c < info->dest_components; ++c) {
3276 if ((int)c >= type->coordinate_components()) {
3277 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
3278 brw_imm_d(1));
3279 } else if (c == 1 && is_1d_array_image) {
3280 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
3281 offset(size, bld, 2));
3282 } else if (c == 2 && is_cube_array_image) {
3283 bld.emit(SHADER_OPCODE_INT_QUOTIENT,
3284 offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
3285 offset(size, bld, c), brw_imm_d(6));
3286 } else {
3287 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
3288 offset(size, bld, c));
3289 }
3290 }
3291
3292 break;
3293 }
3294
3295 case nir_intrinsic_image_samples:
3296 /* The driver does not support multi-sampled images. */
3297 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(1));
3298 break;
3299
3300 case nir_intrinsic_load_uniform: {
3301 /* Offsets are in bytes but they should always be multiples of 4 */
3302 assert(instr->const_index[0] % 4 == 0);
3303
3304 fs_reg src(UNIFORM, instr->const_index[0] / 4, dest.type);
3305
3306 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3307 if (const_offset) {
3308 /* Offsets are in bytes but they should always be multiples of 4 */
3309 assert(const_offset->u32[0] % 4 == 0);
3310 src.reg_offset = const_offset->u32[0] / 4;
3311
3312 for (unsigned j = 0; j < instr->num_components; j++) {
3313 bld.MOV(offset(dest, bld, j), offset(src, bld, j));
3314 }
3315 } else {
3316 fs_reg indirect = retype(get_nir_src(instr->src[0]),
3317 BRW_REGISTER_TYPE_UD);
3318
3319 /* We need to pass a size to the MOV_INDIRECT but we don't want it to
3320 * go past the end of the uniform. In order to keep the n'th
3321 * component from running past, we subtract off the size of all but
3322 * one component of the vector.
3323 */
3324 assert(instr->const_index[1] >=
3325 instr->num_components * (int) type_sz(dest.type));
3326 unsigned read_size = instr->const_index[1] -
3327 (instr->num_components - 1) * type_sz(dest.type);
3328
3329 for (unsigned j = 0; j < instr->num_components; j++) {
3330 bld.emit(SHADER_OPCODE_MOV_INDIRECT,
3331 offset(dest, bld, j), offset(src, bld, j),
3332 indirect, brw_imm_ud(read_size));
3333 }
3334 }
3335 break;
3336 }
3337
3338 case nir_intrinsic_load_ubo: {
3339 nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
3340 fs_reg surf_index;
3341
3342 if (const_index) {
3343 const unsigned index = stage_prog_data->binding_table.ubo_start +
3344 const_index->u32[0];
3345 surf_index = brw_imm_ud(index);
3346 brw_mark_surface_used(prog_data, index);
3347 } else {
3348 /* The block index is not a constant. Evaluate the index expression
3349 * per-channel and add the base UBO index; we have to select a value
3350 * from any live channel.
3351 */
3352 surf_index = vgrf(glsl_type::uint_type);
3353 bld.ADD(surf_index, get_nir_src(instr->src[0]),
3354 brw_imm_ud(stage_prog_data->binding_table.ubo_start));
3355 surf_index = bld.emit_uniformize(surf_index);
3356
3357 /* Assume this may touch any UBO. It would be nice to provide
3358 * a tighter bound, but the array information is already lowered away.
3359 */
3360 brw_mark_surface_used(prog_data,
3361 stage_prog_data->binding_table.ubo_start +
3362 nir->info.num_ubos - 1);
3363 }
3364
3365 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
3366 if (const_offset == NULL) {
3367 fs_reg base_offset = retype(get_nir_src(instr->src[1]),
3368 BRW_REGISTER_TYPE_UD);
3369
3370 for (int i = 0; i < instr->num_components; i++)
3371 VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
3372 base_offset, i * 4);
3373 } else {
3374 fs_reg packed_consts = vgrf(glsl_type::float_type);
3375 packed_consts.type = dest.type;
3376
3377 struct brw_reg const_offset_reg = brw_imm_ud(const_offset->u32[0] & ~15);
3378 bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
3379 surf_index, const_offset_reg);
3380
3381 for (unsigned i = 0; i < instr->num_components; i++) {
3382 packed_consts.set_smear(const_offset->u32[0] % 16 / 4 + i);
3383
3384 /* The std140 packing rules don't allow vectors to cross 16-byte
3385 * boundaries, and a reg is 32 bytes.
3386 */
3387 assert(packed_consts.subreg_offset < 32);
3388
3389 bld.MOV(dest, packed_consts);
3390 dest = offset(dest, bld, 1);
3391 }
3392 }
3393 break;
3394 }
3395
3396 case nir_intrinsic_load_ssbo: {
3397 assert(devinfo->gen >= 7);
3398
3399 nir_const_value *const_uniform_block =
3400 nir_src_as_const_value(instr->src[0]);
3401
3402 fs_reg surf_index;
3403 if (const_uniform_block) {
3404 unsigned index = stage_prog_data->binding_table.ssbo_start +
3405 const_uniform_block->u32[0];
3406 surf_index = brw_imm_ud(index);
3407 brw_mark_surface_used(prog_data, index);
3408 } else {
3409 surf_index = vgrf(glsl_type::uint_type);
3410 bld.ADD(surf_index, get_nir_src(instr->src[0]),
3411 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
3412
3413 /* Assume this may touch any UBO. It would be nice to provide
3414 * a tighter bound, but the array information is already lowered away.
3415 */
3416 brw_mark_surface_used(prog_data,
3417 stage_prog_data->binding_table.ssbo_start +
3418 nir->info.num_ssbos - 1);
3419 }
3420
3421 fs_reg offset_reg;
3422 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
3423 if (const_offset) {
3424 offset_reg = brw_imm_ud(const_offset->u32[0]);
3425 } else {
3426 offset_reg = get_nir_src(instr->src[1]);
3427 }
3428
3429 /* Read the vector */
3430 fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
3431 1 /* dims */,
3432 instr->num_components,
3433 BRW_PREDICATE_NONE);
3434 read_result.type = dest.type;
3435 for (int i = 0; i < instr->num_components; i++)
3436 bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
3437
3438 break;
3439 }
3440
3441 case nir_intrinsic_load_input: {
3442 fs_reg src;
3443 if (stage == MESA_SHADER_VERTEX) {
3444 src = fs_reg(ATTR, instr->const_index[0], dest.type);
3445 } else {
3446 src = offset(retype(nir_inputs, dest.type), bld,
3447 instr->const_index[0]);
3448 }
3449
3450 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3451 assert(const_offset && "Indirect input loads not allowed");
3452 src = offset(src, bld, const_offset->u32[0]);
3453
3454 for (unsigned j = 0; j < instr->num_components; j++) {
3455 bld.MOV(offset(dest, bld, j), offset(src, bld, j));
3456 }
3457 break;
3458 }
3459
3460 case nir_intrinsic_store_ssbo: {
3461 assert(devinfo->gen >= 7);
3462
3463 /* Block index */
3464 fs_reg surf_index;
3465 nir_const_value *const_uniform_block =
3466 nir_src_as_const_value(instr->src[1]);
3467 if (const_uniform_block) {
3468 unsigned index = stage_prog_data->binding_table.ssbo_start +
3469 const_uniform_block->u32[0];
3470 surf_index = brw_imm_ud(index);
3471 brw_mark_surface_used(prog_data, index);
3472 } else {
3473 surf_index = vgrf(glsl_type::uint_type);
3474 bld.ADD(surf_index, get_nir_src(instr->src[1]),
3475 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
3476
3477 brw_mark_surface_used(prog_data,
3478 stage_prog_data->binding_table.ssbo_start +
3479 nir->info.num_ssbos - 1);
3480 }
3481
3482 /* Value */
3483 fs_reg val_reg = get_nir_src(instr->src[0]);
3484
3485 /* Writemask */
3486 unsigned writemask = instr->const_index[0];
3487
3488 /* Combine groups of consecutive enabled channels in one write
3489 * message. We use ffs to find the first enabled channel and then ffs on
3490 * the bit-inverse, down-shifted writemask to determine the length of
3491 * the block of enabled bits.
3492 */
3493 while (writemask) {
3494 unsigned first_component = ffs(writemask) - 1;
3495 unsigned length = ffs(~(writemask >> first_component)) - 1;
3496
3497 fs_reg offset_reg;
3498 nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
3499 if (const_offset) {
3500 offset_reg = brw_imm_ud(const_offset->u32[0] + 4 * first_component);
3501 } else {
3502 offset_reg = vgrf(glsl_type::uint_type);
3503 bld.ADD(offset_reg,
3504 retype(get_nir_src(instr->src[2]), BRW_REGISTER_TYPE_UD),
3505 brw_imm_ud(4 * first_component));
3506 }
3507
3508 emit_untyped_write(bld, surf_index, offset_reg,
3509 offset(val_reg, bld, first_component),
3510 1 /* dims */, length,
3511 BRW_PREDICATE_NONE);
3512
3513 /* Clear the bits in the writemask that we just wrote, then try
3514 * again to see if more channels are left.
3515 */
3516 writemask &= (15 << (first_component + length));
3517 }
3518 break;
3519 }
3520
3521 case nir_intrinsic_store_output: {
3522 fs_reg src = get_nir_src(instr->src[0]);
3523 fs_reg new_dest = offset(retype(nir_outputs, src.type), bld,
3524 instr->const_index[0]);
3525
3526 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
3527 assert(const_offset && "Indirect output stores not allowed");
3528 new_dest = offset(new_dest, bld, const_offset->u32[0]);
3529
3530 for (unsigned j = 0; j < instr->num_components; j++) {
3531 bld.MOV(offset(new_dest, bld, j), offset(src, bld, j));
3532 }
3533 break;
3534 }
3535
3536 case nir_intrinsic_ssbo_atomic_add:
3537 nir_emit_ssbo_atomic(bld, BRW_AOP_ADD, instr);
3538 break;
3539 case nir_intrinsic_ssbo_atomic_imin:
3540 nir_emit_ssbo_atomic(bld, BRW_AOP_IMIN, instr);
3541 break;
3542 case nir_intrinsic_ssbo_atomic_umin:
3543 nir_emit_ssbo_atomic(bld, BRW_AOP_UMIN, instr);
3544 break;
3545 case nir_intrinsic_ssbo_atomic_imax:
3546 nir_emit_ssbo_atomic(bld, BRW_AOP_IMAX, instr);
3547 break;
3548 case nir_intrinsic_ssbo_atomic_umax:
3549 nir_emit_ssbo_atomic(bld, BRW_AOP_UMAX, instr);
3550 break;
3551 case nir_intrinsic_ssbo_atomic_and:
3552 nir_emit_ssbo_atomic(bld, BRW_AOP_AND, instr);
3553 break;
3554 case nir_intrinsic_ssbo_atomic_or:
3555 nir_emit_ssbo_atomic(bld, BRW_AOP_OR, instr);
3556 break;
3557 case nir_intrinsic_ssbo_atomic_xor:
3558 nir_emit_ssbo_atomic(bld, BRW_AOP_XOR, instr);
3559 break;
3560 case nir_intrinsic_ssbo_atomic_exchange:
3561 nir_emit_ssbo_atomic(bld, BRW_AOP_MOV, instr);
3562 break;
3563 case nir_intrinsic_ssbo_atomic_comp_swap:
3564 nir_emit_ssbo_atomic(bld, BRW_AOP_CMPWR, instr);
3565 break;
3566
3567 case nir_intrinsic_get_buffer_size: {
3568 nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
3569 unsigned ssbo_index = const_uniform_block ? const_uniform_block->u32[0] : 0;
3570 int reg_width = dispatch_width / 8;
3571
3572 /* Set LOD = 0 */
3573 fs_reg source = brw_imm_d(0);
3574
3575 int mlen = 1 * reg_width;
3576
3577 /* A resinfo's sampler message is used to get the buffer size.
3578 * The SIMD8's writeback message consists of four registers and
3579 * SIMD16's writeback message consists of 8 destination registers
3580 * (two per each component), although we are only interested on the
3581 * first component, where resinfo returns the buffer size for
3582 * SURFTYPE_BUFFER.
3583 */
3584 int regs_written = 4 * mlen;
3585 fs_reg src_payload = fs_reg(VGRF, alloc.allocate(mlen),
3586 BRW_REGISTER_TYPE_UD);
3587 bld.LOAD_PAYLOAD(src_payload, &source, 1, 0);
3588 fs_reg buffer_size = fs_reg(VGRF, alloc.allocate(regs_written),
3589 BRW_REGISTER_TYPE_UD);
3590 const unsigned index = prog_data->binding_table.ssbo_start + ssbo_index;
3591 fs_inst *inst = bld.emit(FS_OPCODE_GET_BUFFER_SIZE, buffer_size,
3592 src_payload, brw_imm_ud(index));
3593 inst->header_size = 0;
3594 inst->mlen = mlen;
3595 inst->regs_written = regs_written;
3596 bld.emit(inst);
3597 bld.MOV(retype(dest, buffer_size.type), buffer_size);
3598
3599 brw_mark_surface_used(prog_data, index);
3600 break;
3601 }
3602
3603 default:
3604 unreachable("unknown intrinsic");
3605 }
3606 }
3607
3608 void
3609 fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
3610 int op, nir_intrinsic_instr *instr)
3611 {
3612 fs_reg dest;
3613 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3614 dest = get_nir_dest(instr->dest);
3615
3616 fs_reg surface;
3617 nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
3618 if (const_surface) {
3619 unsigned surf_index = stage_prog_data->binding_table.ssbo_start +
3620 const_surface->u32[0];
3621 surface = brw_imm_ud(surf_index);
3622 brw_mark_surface_used(prog_data, surf_index);
3623 } else {
3624 surface = vgrf(glsl_type::uint_type);
3625 bld.ADD(surface, get_nir_src(instr->src[0]),
3626 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
3627
3628 /* Assume this may touch any SSBO. This is the same we do for other
3629 * UBO/SSBO accesses with non-constant surface.
3630 */
3631 brw_mark_surface_used(prog_data,
3632 stage_prog_data->binding_table.ssbo_start +
3633 nir->info.num_ssbos - 1);
3634 }
3635
3636 fs_reg offset = get_nir_src(instr->src[1]);
3637 fs_reg data1 = get_nir_src(instr->src[2]);
3638 fs_reg data2;
3639 if (op == BRW_AOP_CMPWR)
3640 data2 = get_nir_src(instr->src[3]);
3641
3642 /* Emit the actual atomic operation operation */
3643
3644 fs_reg atomic_result = emit_untyped_atomic(bld, surface, offset,
3645 data1, data2,
3646 1 /* dims */, 1 /* rsize */,
3647 op,
3648 BRW_PREDICATE_NONE);
3649 dest.type = atomic_result.type;
3650 bld.MOV(dest, atomic_result);
3651 }
3652
3653 void
3654 fs_visitor::nir_emit_shared_atomic(const fs_builder &bld,
3655 int op, nir_intrinsic_instr *instr)
3656 {
3657 fs_reg dest;
3658 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3659 dest = get_nir_dest(instr->dest);
3660
3661 fs_reg surface = brw_imm_ud(GEN7_BTI_SLM);
3662 fs_reg offset = get_nir_src(instr->src[0]);
3663 fs_reg data1 = get_nir_src(instr->src[1]);
3664 fs_reg data2;
3665 if (op == BRW_AOP_CMPWR)
3666 data2 = get_nir_src(instr->src[2]);
3667
3668 /* Emit the actual atomic operation operation */
3669
3670 fs_reg atomic_result = emit_untyped_atomic(bld, surface, offset,
3671 data1, data2,
3672 1 /* dims */, 1 /* rsize */,
3673 op,
3674 BRW_PREDICATE_NONE);
3675 dest.type = atomic_result.type;
3676 bld.MOV(dest, atomic_result);
3677 }
3678
3679 void
3680 fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
3681 {
3682 unsigned texture = instr->texture_index;
3683 unsigned sampler = instr->sampler_index;
3684
3685 fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
3686
3687 srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(texture);
3688 srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_ud(sampler);
3689
3690 int lod_components = 0;
3691
3692 /* The hardware requires a LOD for buffer textures */
3693 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
3694 srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_d(0);
3695
3696 for (unsigned i = 0; i < instr->num_srcs; i++) {
3697 fs_reg src = get_nir_src(instr->src[i].src);
3698 switch (instr->src[i].src_type) {
3699 case nir_tex_src_bias:
3700 srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_F);
3701 break;
3702 case nir_tex_src_comparitor:
3703 srcs[TEX_LOGICAL_SRC_SHADOW_C] = retype(src, BRW_REGISTER_TYPE_F);
3704 break;
3705 case nir_tex_src_coord:
3706 switch (instr->op) {
3707 case nir_texop_txf:
3708 case nir_texop_txf_ms:
3709 case nir_texop_samples_identical:
3710 srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_D);
3711 break;
3712 default:
3713 srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_F);
3714 break;
3715 }
3716 break;
3717 case nir_tex_src_ddx:
3718 srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_F);
3719 lod_components = nir_tex_instr_src_size(instr, i);
3720 break;
3721 case nir_tex_src_ddy:
3722 srcs[TEX_LOGICAL_SRC_LOD2] = retype(src, BRW_REGISTER_TYPE_F);
3723 break;
3724 case nir_tex_src_lod:
3725 switch (instr->op) {
3726 case nir_texop_txs:
3727 srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_UD);
3728 break;
3729 case nir_texop_txf:
3730 srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_D);
3731 break;
3732 default:
3733 srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_F);
3734 break;
3735 }
3736 break;
3737 case nir_tex_src_ms_index:
3738 srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX] = retype(src, BRW_REGISTER_TYPE_UD);
3739 break;
3740
3741 case nir_tex_src_offset: {
3742 nir_const_value *const_offset =
3743 nir_src_as_const_value(instr->src[i].src);
3744 if (const_offset) {
3745 unsigned header_bits = brw_texture_offset(const_offset->i32, 3);
3746 if (header_bits != 0)
3747 srcs[TEX_LOGICAL_SRC_OFFSET_VALUE] = brw_imm_ud(header_bits);
3748 } else {
3749 srcs[TEX_LOGICAL_SRC_OFFSET_VALUE] =
3750 retype(src, BRW_REGISTER_TYPE_D);
3751 }
3752 break;
3753 }
3754
3755 case nir_tex_src_projector:
3756 unreachable("should be lowered");
3757
3758 case nir_tex_src_texture_offset: {
3759 /* Figure out the highest possible texture index and mark it as used */
3760 uint32_t max_used = texture + instr->texture_array_size - 1;
3761 if (instr->op == nir_texop_tg4 && devinfo->gen < 8) {
3762 max_used += stage_prog_data->binding_table.gather_texture_start;
3763 } else {
3764 max_used += stage_prog_data->binding_table.texture_start;
3765 }
3766 brw_mark_surface_used(prog_data, max_used);
3767
3768 /* Emit code to evaluate the actual indexing expression */
3769 fs_reg tmp = vgrf(glsl_type::uint_type);
3770 bld.ADD(tmp, src, brw_imm_ud(texture));
3771 srcs[TEX_LOGICAL_SRC_SURFACE] = bld.emit_uniformize(tmp);
3772 break;
3773 }
3774
3775 case nir_tex_src_sampler_offset: {
3776 /* Emit code to evaluate the actual indexing expression */
3777 fs_reg tmp = vgrf(glsl_type::uint_type);
3778 bld.ADD(tmp, src, brw_imm_ud(sampler));
3779 srcs[TEX_LOGICAL_SRC_SAMPLER] = bld.emit_uniformize(tmp);
3780 break;
3781 }
3782
3783 default:
3784 unreachable("unknown texture source");
3785 }
3786 }
3787
3788 if (instr->op == nir_texop_txf_ms ||
3789 instr->op == nir_texop_samples_identical) {
3790 if (devinfo->gen >= 7 &&
3791 key_tex->compressed_multisample_layout_mask & (1 << texture)) {
3792 srcs[TEX_LOGICAL_SRC_MCS] =
3793 emit_mcs_fetch(srcs[TEX_LOGICAL_SRC_COORDINATE],
3794 instr->coord_components,
3795 srcs[TEX_LOGICAL_SRC_SURFACE]);
3796 } else {
3797 srcs[TEX_LOGICAL_SRC_MCS] = brw_imm_ud(0u);
3798 }
3799 }
3800
3801 srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(instr->coord_components);
3802 srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(lod_components);
3803
3804 if (instr->op == nir_texop_query_levels) {
3805 /* textureQueryLevels() is implemented in terms of TXS so we need to
3806 * pass a valid LOD argument.
3807 */
3808 assert(srcs[TEX_LOGICAL_SRC_LOD].file == BAD_FILE);
3809 srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_ud(0u);
3810 }
3811
3812 enum opcode opcode;
3813 switch (instr->op) {
3814 case nir_texop_tex:
3815 opcode = SHADER_OPCODE_TEX_LOGICAL;
3816 break;
3817 case nir_texop_txb:
3818 opcode = FS_OPCODE_TXB_LOGICAL;
3819 break;
3820 case nir_texop_txl:
3821 opcode = SHADER_OPCODE_TXL_LOGICAL;
3822 break;
3823 case nir_texop_txd:
3824 opcode = SHADER_OPCODE_TXD_LOGICAL;
3825 break;
3826 case nir_texop_txf:
3827 opcode = SHADER_OPCODE_TXF_LOGICAL;
3828 break;
3829 case nir_texop_txf_ms:
3830 if ((key_tex->msaa_16 & (1 << sampler)))
3831 opcode = SHADER_OPCODE_TXF_CMS_W_LOGICAL;
3832 else
3833 opcode = SHADER_OPCODE_TXF_CMS_LOGICAL;
3834 break;
3835 case nir_texop_query_levels:
3836 case nir_texop_txs:
3837 opcode = SHADER_OPCODE_TXS_LOGICAL;
3838 break;
3839 case nir_texop_lod:
3840 opcode = SHADER_OPCODE_LOD_LOGICAL;
3841 break;
3842 case nir_texop_tg4:
3843 if (srcs[TEX_LOGICAL_SRC_OFFSET_VALUE].file != BAD_FILE &&
3844 srcs[TEX_LOGICAL_SRC_OFFSET_VALUE].file != IMM)
3845 opcode = SHADER_OPCODE_TG4_OFFSET_LOGICAL;
3846 else
3847 opcode = SHADER_OPCODE_TG4_LOGICAL;
3848 break;
3849 case nir_texop_texture_samples: {
3850 fs_reg dst = retype(get_nir_dest(instr->dest), BRW_REGISTER_TYPE_D);
3851
3852 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_D, 4);
3853 fs_inst *inst = bld.emit(SHADER_OPCODE_SAMPLEINFO, tmp,
3854 bld.vgrf(BRW_REGISTER_TYPE_D, 1),
3855 srcs[TEX_LOGICAL_SRC_SURFACE],
3856 srcs[TEX_LOGICAL_SRC_SURFACE]);
3857 inst->mlen = 1;
3858 inst->header_size = 1;
3859 inst->base_mrf = -1;
3860 inst->regs_written = 4 * (dispatch_width / 8);
3861
3862 /* Pick off the one component we care about */
3863 bld.MOV(dst, tmp);
3864 return;
3865 }
3866 case nir_texop_samples_identical: {
3867 fs_reg dst = retype(get_nir_dest(instr->dest), BRW_REGISTER_TYPE_D);
3868
3869 /* If mcs is an immediate value, it means there is no MCS. In that case
3870 * just return false.
3871 */
3872 if (srcs[TEX_LOGICAL_SRC_MCS].file == BRW_IMMEDIATE_VALUE) {
3873 bld.MOV(dst, brw_imm_ud(0u));
3874 } else if ((key_tex->msaa_16 & (1 << sampler))) {
3875 fs_reg tmp = vgrf(glsl_type::uint_type);
3876 bld.OR(tmp, srcs[TEX_LOGICAL_SRC_MCS],
3877 offset(srcs[TEX_LOGICAL_SRC_MCS], bld, 1));
3878 bld.CMP(dst, tmp, brw_imm_ud(0u), BRW_CONDITIONAL_EQ);
3879 } else {
3880 bld.CMP(dst, srcs[TEX_LOGICAL_SRC_MCS], brw_imm_ud(0u),
3881 BRW_CONDITIONAL_EQ);
3882 }
3883 return;
3884 }
3885 default:
3886 unreachable("unknown texture opcode");
3887 }
3888
3889 fs_reg dst = bld.vgrf(brw_type_for_nir_type(instr->dest_type), 4);
3890 fs_inst *inst = bld.emit(opcode, dst, srcs, ARRAY_SIZE(srcs));
3891
3892 const unsigned dest_size = nir_tex_instr_dest_size(instr);
3893 if (devinfo->gen >= 9 &&
3894 instr->op != nir_texop_tg4 && instr->op != nir_texop_query_levels) {
3895 unsigned write_mask = instr->dest.is_ssa ?
3896 nir_ssa_def_components_read(&instr->dest.ssa):
3897 (1 << dest_size) - 1;
3898 assert(write_mask != 0); /* dead code should have been eliminated */
3899 inst->regs_written = _mesa_fls(write_mask) * dispatch_width / 8;
3900 } else {
3901 inst->regs_written = 4 * dispatch_width / 8;
3902 }
3903
3904 if (srcs[TEX_LOGICAL_SRC_SHADOW_C].file != BAD_FILE)
3905 inst->shadow_compare = true;
3906
3907 if (srcs[TEX_LOGICAL_SRC_OFFSET_VALUE].file == IMM)
3908 inst->offset = srcs[TEX_LOGICAL_SRC_OFFSET_VALUE].ud;
3909
3910 if (instr->op == nir_texop_tg4) {
3911 if (instr->component == 1 &&
3912 key_tex->gather_channel_quirk_mask & (1 << texture)) {
3913 /* gather4 sampler is broken for green channel on RG32F --
3914 * we must ask for blue instead.
3915 */
3916 inst->offset |= 2 << 16;
3917 } else {
3918 inst->offset |= instr->component << 16;
3919 }
3920
3921 if (devinfo->gen == 6)
3922 emit_gen6_gather_wa(key_tex->gen6_gather_wa[texture], dst);
3923 }
3924
3925 fs_reg nir_dest[4];
3926 for (unsigned i = 0; i < dest_size; i++)
3927 nir_dest[i] = offset(dst, bld, i);
3928
3929 bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
3930 instr->is_array;
3931
3932 if (instr->op == nir_texop_query_levels) {
3933 /* # levels is in .w */
3934 nir_dest[0] = offset(dst, bld, 3);
3935 } else if (instr->op == nir_texop_txs && dest_size >= 3 &&
3936 (devinfo->gen < 7 || is_cube_array)) {
3937 fs_reg depth = offset(dst, bld, 2);
3938 fs_reg fixed_depth = vgrf(glsl_type::int_type);
3939
3940 if (is_cube_array) {
3941 /* fixup #layers for cube map arrays */
3942 bld.emit(SHADER_OPCODE_INT_QUOTIENT, fixed_depth, depth, brw_imm_d(6));
3943 } else if (devinfo->gen < 7) {
3944 /* Gen4-6 return 0 instead of 1 for single layer surfaces. */
3945 bld.emit_minmax(fixed_depth, depth, brw_imm_d(1), BRW_CONDITIONAL_GE);
3946 }
3947
3948 nir_dest[2] = fixed_depth;
3949 }
3950
3951 bld.LOAD_PAYLOAD(get_nir_dest(instr->dest), nir_dest, dest_size, 0);
3952 }
3953
3954 void
3955 fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
3956 {
3957 switch (instr->type) {
3958 case nir_jump_break:
3959 bld.emit(BRW_OPCODE_BREAK);
3960 break;
3961 case nir_jump_continue:
3962 bld.emit(BRW_OPCODE_CONTINUE);
3963 break;
3964 case nir_jump_return:
3965 default:
3966 unreachable("unknown jump");
3967 }
3968 }