b499542bb4ae1bdf606d64a0a37d761a43635dac
[mesa.git] / src / intel / compiler / brw_fs_nir.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/glsl/ir.h"
25 #include "brw_fs.h"
26 #include "brw_nir.h"
27 #include "nir_search_helpers.h"
28 #include "util/u_math.h"
29 #include "util/bitscan.h"
30
31 using namespace brw;
32
33 void
34 fs_visitor::emit_nir_code()
35 {
36 /* emit the arrays used for inputs and outputs - load/store intrinsics will
37 * be converted to reads/writes of these arrays
38 */
39 nir_setup_outputs();
40 nir_setup_uniforms();
41 nir_emit_system_values();
42
43 nir_emit_impl(nir_shader_get_entrypoint((nir_shader *)nir));
44 }
45
46 void
47 fs_visitor::nir_setup_outputs()
48 {
49 if (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_FRAGMENT)
50 return;
51
52 unsigned vec4s[VARYING_SLOT_TESS_MAX] = { 0, };
53
54 /* Calculate the size of output registers in a separate pass, before
55 * allocating them. With ARB_enhanced_layouts, multiple output variables
56 * may occupy the same slot, but have different type sizes.
57 */
58 nir_foreach_variable(var, &nir->outputs) {
59 const int loc = var->data.driver_location;
60 const unsigned var_vec4s =
61 var->data.compact ? DIV_ROUND_UP(glsl_get_length(var->type), 4)
62 : type_size_vec4(var->type, true);
63 vec4s[loc] = MAX2(vec4s[loc], var_vec4s);
64 }
65
66 for (unsigned loc = 0; loc < ARRAY_SIZE(vec4s);) {
67 if (vec4s[loc] == 0) {
68 loc++;
69 continue;
70 }
71
72 unsigned reg_size = vec4s[loc];
73
74 /* Check if there are any ranges that start within this range and extend
75 * past it. If so, include them in this allocation.
76 */
77 for (unsigned i = 1; i < reg_size; i++)
78 reg_size = MAX2(vec4s[i + loc] + i, reg_size);
79
80 fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_F, 4 * reg_size);
81 for (unsigned i = 0; i < reg_size; i++)
82 outputs[loc + i] = offset(reg, bld, 4 * i);
83
84 loc += reg_size;
85 }
86 }
87
88 void
89 fs_visitor::nir_setup_uniforms()
90 {
91 /* Only the first compile gets to set up uniforms. */
92 if (push_constant_loc) {
93 assert(pull_constant_loc);
94 return;
95 }
96
97 uniforms = nir->num_uniforms / 4;
98
99 if (stage == MESA_SHADER_COMPUTE) {
100 /* Add a uniform for the thread local id. It must be the last uniform
101 * on the list.
102 */
103 assert(uniforms == prog_data->nr_params);
104 uint32_t *param = brw_stage_prog_data_add_params(prog_data, 1);
105 *param = BRW_PARAM_BUILTIN_SUBGROUP_ID;
106 subgroup_id = fs_reg(UNIFORM, uniforms++, BRW_REGISTER_TYPE_UD);
107 }
108 }
109
110 static bool
111 emit_system_values_block(nir_block *block, fs_visitor *v)
112 {
113 fs_reg *reg;
114
115 nir_foreach_instr(instr, block) {
116 if (instr->type != nir_instr_type_intrinsic)
117 continue;
118
119 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
120 switch (intrin->intrinsic) {
121 case nir_intrinsic_load_vertex_id:
122 case nir_intrinsic_load_base_vertex:
123 unreachable("should be lowered by nir_lower_system_values().");
124
125 case nir_intrinsic_load_vertex_id_zero_base:
126 case nir_intrinsic_load_is_indexed_draw:
127 case nir_intrinsic_load_first_vertex:
128 case nir_intrinsic_load_instance_id:
129 case nir_intrinsic_load_base_instance:
130 case nir_intrinsic_load_draw_id:
131 unreachable("should be lowered by brw_nir_lower_vs_inputs().");
132
133 case nir_intrinsic_load_invocation_id:
134 if (v->stage == MESA_SHADER_TESS_CTRL)
135 break;
136 assert(v->stage == MESA_SHADER_GEOMETRY);
137 reg = &v->nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
138 if (reg->file == BAD_FILE) {
139 const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL);
140 fs_reg g1(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
141 fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
142 abld.SHR(iid, g1, brw_imm_ud(27u));
143 *reg = iid;
144 }
145 break;
146
147 case nir_intrinsic_load_sample_pos:
148 assert(v->stage == MESA_SHADER_FRAGMENT);
149 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
150 if (reg->file == BAD_FILE)
151 *reg = *v->emit_samplepos_setup();
152 break;
153
154 case nir_intrinsic_load_sample_id:
155 assert(v->stage == MESA_SHADER_FRAGMENT);
156 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
157 if (reg->file == BAD_FILE)
158 *reg = *v->emit_sampleid_setup();
159 break;
160
161 case nir_intrinsic_load_sample_mask_in:
162 assert(v->stage == MESA_SHADER_FRAGMENT);
163 assert(v->devinfo->gen >= 7);
164 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
165 if (reg->file == BAD_FILE)
166 *reg = *v->emit_samplemaskin_setup();
167 break;
168
169 case nir_intrinsic_load_work_group_id:
170 assert(v->stage == MESA_SHADER_COMPUTE);
171 reg = &v->nir_system_values[SYSTEM_VALUE_WORK_GROUP_ID];
172 if (reg->file == BAD_FILE)
173 *reg = *v->emit_cs_work_group_id_setup();
174 break;
175
176 case nir_intrinsic_load_helper_invocation:
177 assert(v->stage == MESA_SHADER_FRAGMENT);
178 reg = &v->nir_system_values[SYSTEM_VALUE_HELPER_INVOCATION];
179 if (reg->file == BAD_FILE) {
180 const fs_builder abld =
181 v->bld.annotate("gl_HelperInvocation", NULL);
182
183 /* On Gen6+ (gl_HelperInvocation is only exposed on Gen7+) the
184 * pixel mask is in g1.7 of the thread payload.
185 *
186 * We move the per-channel pixel enable bit to the low bit of each
187 * channel by shifting the byte containing the pixel mask by the
188 * vector immediate 0x76543210UV.
189 *
190 * The region of <1,8,0> reads only 1 byte (the pixel masks for
191 * subspans 0 and 1) in SIMD8 and an additional byte (the pixel
192 * masks for 2 and 3) in SIMD16.
193 */
194 fs_reg shifted = abld.vgrf(BRW_REGISTER_TYPE_UW, 1);
195
196 for (unsigned i = 0; i < DIV_ROUND_UP(v->dispatch_width, 16); i++) {
197 const fs_builder hbld = abld.group(MIN2(16, v->dispatch_width), i);
198 hbld.SHR(offset(shifted, hbld, i),
199 stride(retype(brw_vec1_grf(1 + i, 7),
200 BRW_REGISTER_TYPE_UB),
201 1, 8, 0),
202 brw_imm_v(0x76543210));
203 }
204
205 /* A set bit in the pixel mask means the channel is enabled, but
206 * that is the opposite of gl_HelperInvocation so we need to invert
207 * the mask.
208 *
209 * The negate source-modifier bit of logical instructions on Gen8+
210 * performs 1's complement negation, so we can use that instead of
211 * a NOT instruction.
212 */
213 fs_reg inverted = negate(shifted);
214 if (v->devinfo->gen < 8) {
215 inverted = abld.vgrf(BRW_REGISTER_TYPE_UW);
216 abld.NOT(inverted, shifted);
217 }
218
219 /* We then resolve the 0/1 result to 0/~0 boolean values by ANDing
220 * with 1 and negating.
221 */
222 fs_reg anded = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
223 abld.AND(anded, inverted, brw_imm_uw(1));
224
225 fs_reg dst = abld.vgrf(BRW_REGISTER_TYPE_D, 1);
226 abld.MOV(dst, negate(retype(anded, BRW_REGISTER_TYPE_D)));
227 *reg = dst;
228 }
229 break;
230
231 default:
232 break;
233 }
234 }
235
236 return true;
237 }
238
239 void
240 fs_visitor::nir_emit_system_values()
241 {
242 nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
243 for (unsigned i = 0; i < SYSTEM_VALUE_MAX; i++) {
244 nir_system_values[i] = fs_reg();
245 }
246
247 /* Always emit SUBGROUP_INVOCATION. Dead code will clean it up if we
248 * never end up using it.
249 */
250 {
251 const fs_builder abld = bld.annotate("gl_SubgroupInvocation", NULL);
252 fs_reg &reg = nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION];
253 reg = abld.vgrf(BRW_REGISTER_TYPE_UW);
254
255 const fs_builder allbld8 = abld.group(8, 0).exec_all();
256 allbld8.MOV(reg, brw_imm_v(0x76543210));
257 if (dispatch_width > 8)
258 allbld8.ADD(byte_offset(reg, 16), reg, brw_imm_uw(8u));
259 if (dispatch_width > 16) {
260 const fs_builder allbld16 = abld.group(16, 0).exec_all();
261 allbld16.ADD(byte_offset(reg, 32), reg, brw_imm_uw(16u));
262 }
263 }
264
265 nir_function_impl *impl = nir_shader_get_entrypoint((nir_shader *)nir);
266 nir_foreach_block(block, impl)
267 emit_system_values_block(block, this);
268 }
269
270 /*
271 * Returns a type based on a reference_type (word, float, half-float) and a
272 * given bit_size.
273 *
274 * Reference BRW_REGISTER_TYPE are HF,F,DF,W,D,UW,UD.
275 *
276 * @FIXME: 64-bit return types are always DF on integer types to maintain
277 * compability with uses of DF previously to the introduction of int64
278 * support.
279 */
280 static brw_reg_type
281 brw_reg_type_from_bit_size(const unsigned bit_size,
282 const brw_reg_type reference_type)
283 {
284 switch(reference_type) {
285 case BRW_REGISTER_TYPE_HF:
286 case BRW_REGISTER_TYPE_F:
287 case BRW_REGISTER_TYPE_DF:
288 switch(bit_size) {
289 case 16:
290 return BRW_REGISTER_TYPE_HF;
291 case 32:
292 return BRW_REGISTER_TYPE_F;
293 case 64:
294 return BRW_REGISTER_TYPE_DF;
295 default:
296 unreachable("Invalid bit size");
297 }
298 case BRW_REGISTER_TYPE_B:
299 case BRW_REGISTER_TYPE_W:
300 case BRW_REGISTER_TYPE_D:
301 case BRW_REGISTER_TYPE_Q:
302 switch(bit_size) {
303 case 8:
304 return BRW_REGISTER_TYPE_B;
305 case 16:
306 return BRW_REGISTER_TYPE_W;
307 case 32:
308 return BRW_REGISTER_TYPE_D;
309 case 64:
310 return BRW_REGISTER_TYPE_Q;
311 default:
312 unreachable("Invalid bit size");
313 }
314 case BRW_REGISTER_TYPE_UB:
315 case BRW_REGISTER_TYPE_UW:
316 case BRW_REGISTER_TYPE_UD:
317 case BRW_REGISTER_TYPE_UQ:
318 switch(bit_size) {
319 case 8:
320 return BRW_REGISTER_TYPE_UB;
321 case 16:
322 return BRW_REGISTER_TYPE_UW;
323 case 32:
324 return BRW_REGISTER_TYPE_UD;
325 case 64:
326 return BRW_REGISTER_TYPE_UQ;
327 default:
328 unreachable("Invalid bit size");
329 }
330 default:
331 unreachable("Unknown type");
332 }
333 }
334
335 void
336 fs_visitor::nir_emit_impl(nir_function_impl *impl)
337 {
338 nir_locals = ralloc_array(mem_ctx, fs_reg, impl->reg_alloc);
339 for (unsigned i = 0; i < impl->reg_alloc; i++) {
340 nir_locals[i] = fs_reg();
341 }
342
343 foreach_list_typed(nir_register, reg, node, &impl->registers) {
344 unsigned array_elems =
345 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
346 unsigned size = array_elems * reg->num_components;
347 const brw_reg_type reg_type = reg->bit_size == 8 ? BRW_REGISTER_TYPE_B :
348 brw_reg_type_from_bit_size(reg->bit_size, BRW_REGISTER_TYPE_F);
349 nir_locals[reg->index] = bld.vgrf(reg_type, size);
350 }
351
352 nir_ssa_values = reralloc(mem_ctx, nir_ssa_values, fs_reg,
353 impl->ssa_alloc);
354
355 nir_emit_cf_list(&impl->body);
356 }
357
358 void
359 fs_visitor::nir_emit_cf_list(exec_list *list)
360 {
361 exec_list_validate(list);
362 foreach_list_typed(nir_cf_node, node, node, list) {
363 switch (node->type) {
364 case nir_cf_node_if:
365 nir_emit_if(nir_cf_node_as_if(node));
366 break;
367
368 case nir_cf_node_loop:
369 nir_emit_loop(nir_cf_node_as_loop(node));
370 break;
371
372 case nir_cf_node_block:
373 nir_emit_block(nir_cf_node_as_block(node));
374 break;
375
376 default:
377 unreachable("Invalid CFG node block");
378 }
379 }
380 }
381
382 void
383 fs_visitor::nir_emit_if(nir_if *if_stmt)
384 {
385 bool invert;
386 fs_reg cond_reg;
387
388 /* If the condition has the form !other_condition, use other_condition as
389 * the source, but invert the predicate on the if instruction.
390 */
391 nir_alu_instr *cond = nir_src_as_alu_instr(if_stmt->condition);
392 if (cond != NULL && cond->op == nir_op_inot) {
393 assert(!cond->src[0].negate);
394 assert(!cond->src[0].abs);
395
396 invert = true;
397 cond_reg = get_nir_src(cond->src[0].src);
398 } else {
399 invert = false;
400 cond_reg = get_nir_src(if_stmt->condition);
401 }
402
403 /* first, put the condition into f0 */
404 fs_inst *inst = bld.MOV(bld.null_reg_d(),
405 retype(cond_reg, BRW_REGISTER_TYPE_D));
406 inst->conditional_mod = BRW_CONDITIONAL_NZ;
407
408 bld.IF(BRW_PREDICATE_NORMAL)->predicate_inverse = invert;
409
410 nir_emit_cf_list(&if_stmt->then_list);
411
412 if (!nir_cf_list_is_empty_block(&if_stmt->else_list)) {
413 bld.emit(BRW_OPCODE_ELSE);
414 nir_emit_cf_list(&if_stmt->else_list);
415 }
416
417 bld.emit(BRW_OPCODE_ENDIF);
418
419 if (devinfo->gen < 7)
420 limit_dispatch_width(16, "Non-uniform control flow unsupported "
421 "in SIMD32 mode.");
422 }
423
424 void
425 fs_visitor::nir_emit_loop(nir_loop *loop)
426 {
427 bld.emit(BRW_OPCODE_DO);
428
429 nir_emit_cf_list(&loop->body);
430
431 bld.emit(BRW_OPCODE_WHILE);
432
433 if (devinfo->gen < 7)
434 limit_dispatch_width(16, "Non-uniform control flow unsupported "
435 "in SIMD32 mode.");
436 }
437
438 void
439 fs_visitor::nir_emit_block(nir_block *block)
440 {
441 nir_foreach_instr(instr, block) {
442 nir_emit_instr(instr);
443 }
444 }
445
446 void
447 fs_visitor::nir_emit_instr(nir_instr *instr)
448 {
449 const fs_builder abld = bld.annotate(NULL, instr);
450
451 switch (instr->type) {
452 case nir_instr_type_alu:
453 nir_emit_alu(abld, nir_instr_as_alu(instr), true);
454 break;
455
456 case nir_instr_type_deref:
457 unreachable("All derefs should've been lowered");
458 break;
459
460 case nir_instr_type_intrinsic:
461 switch (stage) {
462 case MESA_SHADER_VERTEX:
463 nir_emit_vs_intrinsic(abld, nir_instr_as_intrinsic(instr));
464 break;
465 case MESA_SHADER_TESS_CTRL:
466 nir_emit_tcs_intrinsic(abld, nir_instr_as_intrinsic(instr));
467 break;
468 case MESA_SHADER_TESS_EVAL:
469 nir_emit_tes_intrinsic(abld, nir_instr_as_intrinsic(instr));
470 break;
471 case MESA_SHADER_GEOMETRY:
472 nir_emit_gs_intrinsic(abld, nir_instr_as_intrinsic(instr));
473 break;
474 case MESA_SHADER_FRAGMENT:
475 nir_emit_fs_intrinsic(abld, nir_instr_as_intrinsic(instr));
476 break;
477 case MESA_SHADER_COMPUTE:
478 nir_emit_cs_intrinsic(abld, nir_instr_as_intrinsic(instr));
479 break;
480 default:
481 unreachable("unsupported shader stage");
482 }
483 break;
484
485 case nir_instr_type_tex:
486 nir_emit_texture(abld, nir_instr_as_tex(instr));
487 break;
488
489 case nir_instr_type_load_const:
490 nir_emit_load_const(abld, nir_instr_as_load_const(instr));
491 break;
492
493 case nir_instr_type_ssa_undef:
494 /* We create a new VGRF for undefs on every use (by handling
495 * them in get_nir_src()), rather than for each definition.
496 * This helps register coalescing eliminate MOVs from undef.
497 */
498 break;
499
500 case nir_instr_type_jump:
501 nir_emit_jump(abld, nir_instr_as_jump(instr));
502 break;
503
504 default:
505 unreachable("unknown instruction type");
506 }
507 }
508
509 /**
510 * Recognizes a parent instruction of nir_op_extract_* and changes the type to
511 * match instr.
512 */
513 bool
514 fs_visitor::optimize_extract_to_float(nir_alu_instr *instr,
515 const fs_reg &result)
516 {
517 if (!instr->src[0].src.is_ssa ||
518 !instr->src[0].src.ssa->parent_instr)
519 return false;
520
521 if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
522 return false;
523
524 nir_alu_instr *src0 =
525 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
526
527 if (src0->op != nir_op_extract_u8 && src0->op != nir_op_extract_u16 &&
528 src0->op != nir_op_extract_i8 && src0->op != nir_op_extract_i16)
529 return false;
530
531 /* If either opcode has source modifiers, bail.
532 *
533 * TODO: We can potentially handle source modifiers if both of the opcodes
534 * we're combining are signed integers.
535 */
536 if (instr->src[0].abs || instr->src[0].negate ||
537 src0->src[0].abs || src0->src[0].negate)
538 return false;
539
540 unsigned element = nir_src_as_uint(src0->src[1].src);
541
542 /* Element type to extract.*/
543 const brw_reg_type type = brw_int_type(
544 src0->op == nir_op_extract_u16 || src0->op == nir_op_extract_i16 ? 2 : 1,
545 src0->op == nir_op_extract_i16 || src0->op == nir_op_extract_i8);
546
547 fs_reg op0 = get_nir_src(src0->src[0].src);
548 op0.type = brw_type_for_nir_type(devinfo,
549 (nir_alu_type)(nir_op_infos[src0->op].input_types[0] |
550 nir_src_bit_size(src0->src[0].src)));
551 op0 = offset(op0, bld, src0->src[0].swizzle[0]);
552
553 set_saturate(instr->dest.saturate,
554 bld.MOV(result, subscript(op0, type, element)));
555 return true;
556 }
557
558 bool
559 fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
560 const fs_reg &result)
561 {
562 nir_intrinsic_instr *src0 = nir_src_as_intrinsic(instr->src[0].src);
563 if (src0 == NULL || src0->intrinsic != nir_intrinsic_load_front_face)
564 return false;
565
566 if (!nir_src_is_const(instr->src[1].src) ||
567 !nir_src_is_const(instr->src[2].src))
568 return false;
569
570 const float value1 = nir_src_as_float(instr->src[1].src);
571 const float value2 = nir_src_as_float(instr->src[2].src);
572 if (fabsf(value1) != 1.0f || fabsf(value2) != 1.0f)
573 return false;
574
575 /* nir_opt_algebraic should have gotten rid of bcsel(b, a, a) */
576 assert(value1 == -value2);
577
578 fs_reg tmp = vgrf(glsl_type::int_type);
579
580 if (devinfo->gen >= 6) {
581 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
582 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
583
584 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
585 *
586 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
587 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
588 *
589 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
590 *
591 * This negation looks like it's safe in practice, because bits 0:4 will
592 * surely be TRIANGLES
593 */
594
595 if (value1 == -1.0f) {
596 g0.negate = true;
597 }
598
599 bld.OR(subscript(tmp, BRW_REGISTER_TYPE_W, 1),
600 g0, brw_imm_uw(0x3f80));
601 } else {
602 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
603 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
604
605 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
606 *
607 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
608 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
609 *
610 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
611 *
612 * This negation looks like it's safe in practice, because bits 0:4 will
613 * surely be TRIANGLES
614 */
615
616 if (value1 == -1.0f) {
617 g1_6.negate = true;
618 }
619
620 bld.OR(tmp, g1_6, brw_imm_d(0x3f800000));
621 }
622 bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, brw_imm_d(0xbf800000));
623
624 return true;
625 }
626
627 static void
628 emit_find_msb_using_lzd(const fs_builder &bld,
629 const fs_reg &result,
630 const fs_reg &src,
631 bool is_signed)
632 {
633 fs_inst *inst;
634 fs_reg temp = src;
635
636 if (is_signed) {
637 /* LZD of an absolute value source almost always does the right
638 * thing. There are two problem values:
639 *
640 * * 0x80000000. Since abs(0x80000000) == 0x80000000, LZD returns
641 * 0. However, findMSB(int(0x80000000)) == 30.
642 *
643 * * 0xffffffff. Since abs(0xffffffff) == 1, LZD returns
644 * 31. Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
645 *
646 * For a value of zero or negative one, -1 will be returned.
647 *
648 * * Negative powers of two. LZD(abs(-(1<<x))) returns x, but
649 * findMSB(-(1<<x)) should return x-1.
650 *
651 * For all negative number cases, including 0x80000000 and
652 * 0xffffffff, the correct value is obtained from LZD if instead of
653 * negating the (already negative) value the logical-not is used. A
654 * conditonal logical-not can be achieved in two instructions.
655 */
656 temp = bld.vgrf(BRW_REGISTER_TYPE_D);
657
658 bld.ASR(temp, src, brw_imm_d(31));
659 bld.XOR(temp, temp, src);
660 }
661
662 bld.LZD(retype(result, BRW_REGISTER_TYPE_UD),
663 retype(temp, BRW_REGISTER_TYPE_UD));
664
665 /* LZD counts from the MSB side, while GLSL's findMSB() wants the count
666 * from the LSB side. Subtract the result from 31 to convert the MSB
667 * count into an LSB count. If no bits are set, LZD will return 32.
668 * 31-32 = -1, which is exactly what findMSB() is supposed to return.
669 */
670 inst = bld.ADD(result, retype(result, BRW_REGISTER_TYPE_D), brw_imm_d(31));
671 inst->src[0].negate = true;
672 }
673
674 static brw_rnd_mode
675 brw_rnd_mode_from_nir_op (const nir_op op) {
676 switch (op) {
677 case nir_op_f2f16_rtz:
678 return BRW_RND_MODE_RTZ;
679 case nir_op_f2f16_rtne:
680 return BRW_RND_MODE_RTNE;
681 default:
682 unreachable("Operation doesn't support rounding mode");
683 }
684 }
685
686 fs_reg
687 fs_visitor::prepare_alu_destination_and_sources(const fs_builder &bld,
688 nir_alu_instr *instr,
689 fs_reg *op,
690 bool need_dest)
691 {
692 fs_reg result =
693 need_dest ? get_nir_dest(instr->dest.dest) : bld.null_reg_ud();
694
695 result.type = brw_type_for_nir_type(devinfo,
696 (nir_alu_type)(nir_op_infos[instr->op].output_type |
697 nir_dest_bit_size(instr->dest.dest)));
698
699 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
700 op[i] = get_nir_src(instr->src[i].src);
701 op[i].type = brw_type_for_nir_type(devinfo,
702 (nir_alu_type)(nir_op_infos[instr->op].input_types[i] |
703 nir_src_bit_size(instr->src[i].src)));
704 op[i].abs = instr->src[i].abs;
705 op[i].negate = instr->src[i].negate;
706 }
707
708 /* Move and vecN instrutions may still be vectored. Return the raw,
709 * vectored source and destination so that fs_visitor::nir_emit_alu can
710 * handle it. Other callers should not have to handle these kinds of
711 * instructions.
712 */
713 switch (instr->op) {
714 case nir_op_mov:
715 case nir_op_vec2:
716 case nir_op_vec3:
717 case nir_op_vec4:
718 return result;
719 default:
720 break;
721 }
722
723 /* At this point, we have dealt with any instruction that operates on
724 * more than a single channel. Therefore, we can just adjust the source
725 * and destination registers for that channel and emit the instruction.
726 */
727 unsigned channel = 0;
728 if (nir_op_infos[instr->op].output_size == 0) {
729 /* Since NIR is doing the scalarizing for us, we should only ever see
730 * vectorized operations with a single channel.
731 */
732 assert(util_bitcount(instr->dest.write_mask) == 1);
733 channel = ffs(instr->dest.write_mask) - 1;
734
735 result = offset(result, bld, channel);
736 }
737
738 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
739 assert(nir_op_infos[instr->op].input_sizes[i] < 2);
740 op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
741 }
742
743 return result;
744 }
745
746 void
747 fs_visitor::resolve_inot_sources(const fs_builder &bld, nir_alu_instr *instr,
748 fs_reg *op)
749 {
750 for (unsigned i = 0; i < 2; i++) {
751 nir_alu_instr *inot_instr = nir_src_as_alu_instr(instr->src[i].src);
752
753 if (inot_instr != NULL && inot_instr->op == nir_op_inot &&
754 !inot_instr->src[0].abs && !inot_instr->src[0].negate) {
755 /* The source of the inot is now the source of instr. */
756 prepare_alu_destination_and_sources(bld, inot_instr, &op[i], false);
757
758 assert(!op[i].negate);
759 op[i].negate = true;
760 } else {
761 op[i] = resolve_source_modifiers(op[i]);
762 }
763 }
764 }
765
766 bool
767 fs_visitor::try_emit_b2fi_of_inot(const fs_builder &bld,
768 fs_reg result,
769 nir_alu_instr *instr)
770 {
771 if (devinfo->gen < 6 || devinfo->gen >= 12)
772 return false;
773
774 nir_alu_instr *inot_instr = nir_src_as_alu_instr(instr->src[0].src);
775
776 if (inot_instr == NULL || inot_instr->op != nir_op_inot)
777 return false;
778
779 /* HF is also possible as a destination on BDW+. For nir_op_b2i, the set
780 * of valid size-changing combinations is a bit more complex.
781 *
782 * The source restriction is just because I was lazy about generating the
783 * constant below.
784 */
785 if (nir_dest_bit_size(instr->dest.dest) != 32 ||
786 nir_src_bit_size(inot_instr->src[0].src) != 32)
787 return false;
788
789 /* b2[fi](inot(a)) maps a=0 => 1, a=-1 => 0. Since a can only be 0 or -1,
790 * this is float(1 + a).
791 */
792 fs_reg op;
793
794 prepare_alu_destination_and_sources(bld, inot_instr, &op, false);
795
796 /* Ignore the saturate modifier, if there is one. The result of the
797 * arithmetic can only be 0 or 1, so the clamping will do nothing anyway.
798 */
799 bld.ADD(result, op, brw_imm_d(1));
800
801 return true;
802 }
803
804 /**
805 * Emit code for nir_op_fsign possibly fused with a nir_op_fmul
806 *
807 * If \c instr is not the \c nir_op_fsign, then \c fsign_src is the index of
808 * the source of \c instr that is a \c nir_op_fsign.
809 */
810 void
811 fs_visitor::emit_fsign(const fs_builder &bld, const nir_alu_instr *instr,
812 fs_reg result, fs_reg *op, unsigned fsign_src)
813 {
814 fs_inst *inst;
815
816 assert(instr->op == nir_op_fsign || instr->op == nir_op_fmul);
817 assert(fsign_src < nir_op_infos[instr->op].num_inputs);
818
819 if (instr->op != nir_op_fsign) {
820 const nir_alu_instr *const fsign_instr =
821 nir_src_as_alu_instr(instr->src[fsign_src].src);
822
823 assert(!fsign_instr->dest.saturate);
824
825 /* op[fsign_src] has the nominal result of the fsign, and op[1 -
826 * fsign_src] has the other multiply source. This must be rearranged so
827 * that op[0] is the source of the fsign op[1] is the other multiply
828 * source.
829 */
830 if (fsign_src != 0)
831 op[1] = op[0];
832
833 op[0] = get_nir_src(fsign_instr->src[0].src);
834
835 const nir_alu_type t =
836 (nir_alu_type)(nir_op_infos[instr->op].input_types[0] |
837 nir_src_bit_size(fsign_instr->src[0].src));
838
839 op[0].type = brw_type_for_nir_type(devinfo, t);
840 op[0].abs = fsign_instr->src[0].abs;
841 op[0].negate = fsign_instr->src[0].negate;
842
843 unsigned channel = 0;
844 if (nir_op_infos[instr->op].output_size == 0) {
845 /* Since NIR is doing the scalarizing for us, we should only ever see
846 * vectorized operations with a single channel.
847 */
848 assert(util_bitcount(instr->dest.write_mask) == 1);
849 channel = ffs(instr->dest.write_mask) - 1;
850 }
851
852 op[0] = offset(op[0], bld, fsign_instr->src[0].swizzle[channel]);
853 } else {
854 assert(!instr->dest.saturate);
855 }
856
857 if (op[0].abs) {
858 /* Straightforward since the source can be assumed to be either strictly
859 * >= 0 or strictly <= 0 depending on the setting of the negate flag.
860 */
861 set_condmod(BRW_CONDITIONAL_NZ, bld.MOV(result, op[0]));
862
863 if (instr->op == nir_op_fsign) {
864 inst = (op[0].negate)
865 ? bld.MOV(result, brw_imm_f(-1.0f))
866 : bld.MOV(result, brw_imm_f(1.0f));
867 } else {
868 op[1].negate = (op[0].negate != op[1].negate);
869 inst = bld.MOV(result, op[1]);
870 }
871
872 set_predicate(BRW_PREDICATE_NORMAL, inst);
873 } else if (type_sz(op[0].type) == 2) {
874 /* AND(val, 0x8000) gives the sign bit.
875 *
876 * Predicated OR ORs 1.0 (0x3c00) with the sign bit if val is not zero.
877 */
878 fs_reg zero = retype(brw_imm_uw(0), BRW_REGISTER_TYPE_HF);
879 bld.CMP(bld.null_reg_f(), op[0], zero, BRW_CONDITIONAL_NZ);
880
881 op[0].type = BRW_REGISTER_TYPE_UW;
882 result.type = BRW_REGISTER_TYPE_UW;
883 bld.AND(result, op[0], brw_imm_uw(0x8000u));
884
885 if (instr->op == nir_op_fsign)
886 inst = bld.OR(result, result, brw_imm_uw(0x3c00u));
887 else {
888 /* Use XOR here to get the result sign correct. */
889 inst = bld.XOR(result, result, retype(op[1], BRW_REGISTER_TYPE_UW));
890 }
891
892 inst->predicate = BRW_PREDICATE_NORMAL;
893 } else if (type_sz(op[0].type) == 4) {
894 /* AND(val, 0x80000000) gives the sign bit.
895 *
896 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
897 * zero.
898 */
899 bld.CMP(bld.null_reg_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ);
900
901 op[0].type = BRW_REGISTER_TYPE_UD;
902 result.type = BRW_REGISTER_TYPE_UD;
903 bld.AND(result, op[0], brw_imm_ud(0x80000000u));
904
905 if (instr->op == nir_op_fsign)
906 inst = bld.OR(result, result, brw_imm_ud(0x3f800000u));
907 else {
908 /* Use XOR here to get the result sign correct. */
909 inst = bld.XOR(result, result, retype(op[1], BRW_REGISTER_TYPE_UD));
910 }
911
912 inst->predicate = BRW_PREDICATE_NORMAL;
913 } else {
914 /* For doubles we do the same but we need to consider:
915 *
916 * - 2-src instructions can't operate with 64-bit immediates
917 * - The sign is encoded in the high 32-bit of each DF
918 * - We need to produce a DF result.
919 */
920
921 fs_reg zero = vgrf(glsl_type::double_type);
922 bld.MOV(zero, setup_imm_df(bld, 0.0));
923 bld.CMP(bld.null_reg_df(), op[0], zero, BRW_CONDITIONAL_NZ);
924
925 bld.MOV(result, zero);
926
927 fs_reg r = subscript(result, BRW_REGISTER_TYPE_UD, 1);
928 bld.AND(r, subscript(op[0], BRW_REGISTER_TYPE_UD, 1),
929 brw_imm_ud(0x80000000u));
930
931 if (instr->op == nir_op_fsign) {
932 set_predicate(BRW_PREDICATE_NORMAL,
933 bld.OR(r, r, brw_imm_ud(0x3ff00000u)));
934 } else {
935 /* This could be done better in some cases. If the scale is an
936 * immediate with the low 32-bits all 0, emitting a separate XOR and
937 * OR would allow an algebraic optimization to remove the OR. There
938 * are currently zero instances of fsign(double(x))*IMM in shader-db
939 * or any test suite, so it is hard to care at this time.
940 */
941 fs_reg result_int64 = retype(result, BRW_REGISTER_TYPE_UQ);
942 inst = bld.XOR(result_int64, result_int64,
943 retype(op[1], BRW_REGISTER_TYPE_UQ));
944 }
945 }
946 }
947
948 /**
949 * Deteremine whether sources of a nir_op_fmul can be fused with a nir_op_fsign
950 *
951 * Checks the operands of a \c nir_op_fmul to determine whether or not
952 * \c emit_fsign could fuse the multiplication with the \c sign() calculation.
953 *
954 * \param instr The multiplication instruction
955 *
956 * \param fsign_src The source of \c instr that may or may not be a
957 * \c nir_op_fsign
958 */
959 static bool
960 can_fuse_fmul_fsign(nir_alu_instr *instr, unsigned fsign_src)
961 {
962 assert(instr->op == nir_op_fmul);
963
964 nir_alu_instr *const fsign_instr =
965 nir_src_as_alu_instr(instr->src[fsign_src].src);
966
967 /* Rules:
968 *
969 * 1. instr->src[fsign_src] must be a nir_op_fsign.
970 * 2. The nir_op_fsign can only be used by this multiplication.
971 * 3. The source that is the nir_op_fsign does not have source modifiers.
972 * \c emit_fsign only examines the source modifiers of the source of the
973 * \c nir_op_fsign.
974 *
975 * The nir_op_fsign must also not have the saturate modifier, but steps
976 * have already been taken (in nir_opt_algebraic) to ensure that.
977 */
978 return fsign_instr != NULL && fsign_instr->op == nir_op_fsign &&
979 is_used_once(fsign_instr) &&
980 !instr->src[fsign_src].abs && !instr->src[fsign_src].negate;
981 }
982
983 void
984 fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
985 bool need_dest)
986 {
987 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
988 fs_inst *inst;
989
990 fs_reg op[4];
991 fs_reg result = prepare_alu_destination_and_sources(bld, instr, op, need_dest);
992
993 switch (instr->op) {
994 case nir_op_mov:
995 case nir_op_vec2:
996 case nir_op_vec3:
997 case nir_op_vec4: {
998 fs_reg temp = result;
999 bool need_extra_copy = false;
1000 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1001 if (!instr->src[i].src.is_ssa &&
1002 instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
1003 need_extra_copy = true;
1004 temp = bld.vgrf(result.type, 4);
1005 break;
1006 }
1007 }
1008
1009 for (unsigned i = 0; i < 4; i++) {
1010 if (!(instr->dest.write_mask & (1 << i)))
1011 continue;
1012
1013 if (instr->op == nir_op_mov) {
1014 inst = bld.MOV(offset(temp, bld, i),
1015 offset(op[0], bld, instr->src[0].swizzle[i]));
1016 } else {
1017 inst = bld.MOV(offset(temp, bld, i),
1018 offset(op[i], bld, instr->src[i].swizzle[0]));
1019 }
1020 inst->saturate = instr->dest.saturate;
1021 }
1022
1023 /* In this case the source and destination registers were the same,
1024 * so we need to insert an extra set of moves in order to deal with
1025 * any swizzling.
1026 */
1027 if (need_extra_copy) {
1028 for (unsigned i = 0; i < 4; i++) {
1029 if (!(instr->dest.write_mask & (1 << i)))
1030 continue;
1031
1032 bld.MOV(offset(result, bld, i), offset(temp, bld, i));
1033 }
1034 }
1035 return;
1036 }
1037
1038 case nir_op_i2f32:
1039 case nir_op_u2f32:
1040 if (optimize_extract_to_float(instr, result))
1041 return;
1042 inst = bld.MOV(result, op[0]);
1043 inst->saturate = instr->dest.saturate;
1044 break;
1045
1046 case nir_op_f2f16_rtne:
1047 case nir_op_f2f16_rtz:
1048 bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
1049 brw_imm_d(brw_rnd_mode_from_nir_op(instr->op)));
1050 /* fallthrough */
1051 case nir_op_f2f16:
1052 /* In theory, it would be better to use BRW_OPCODE_F32TO16. Depending
1053 * on the HW gen, it is a special hw opcode or just a MOV, and
1054 * brw_F32TO16 (at brw_eu_emit) would do the work to chose.
1055 *
1056 * But if we want to use that opcode, we need to provide support on
1057 * different optimizations and lowerings. As right now HF support is
1058 * only for gen8+, it will be better to use directly the MOV, and use
1059 * BRW_OPCODE_F32TO16 when/if we work for HF support on gen7.
1060 */
1061 assert(type_sz(op[0].type) < 8); /* brw_nir_lower_conversions */
1062 inst = bld.MOV(result, op[0]);
1063 inst->saturate = instr->dest.saturate;
1064 break;
1065
1066 case nir_op_b2i8:
1067 case nir_op_b2i16:
1068 case nir_op_b2i32:
1069 case nir_op_b2i64:
1070 case nir_op_b2f16:
1071 case nir_op_b2f32:
1072 case nir_op_b2f64:
1073 if (try_emit_b2fi_of_inot(bld, result, instr))
1074 break;
1075 op[0].type = BRW_REGISTER_TYPE_D;
1076 op[0].negate = !op[0].negate;
1077 /* fallthrough */
1078 case nir_op_i2f64:
1079 case nir_op_i2i64:
1080 case nir_op_u2f64:
1081 case nir_op_u2u64:
1082 case nir_op_f2f64:
1083 case nir_op_f2i64:
1084 case nir_op_f2u64:
1085 case nir_op_i2i32:
1086 case nir_op_u2u32:
1087 case nir_op_f2f32:
1088 case nir_op_f2i32:
1089 case nir_op_f2u32:
1090 case nir_op_i2f16:
1091 case nir_op_i2i16:
1092 case nir_op_u2f16:
1093 case nir_op_u2u16:
1094 case nir_op_f2i16:
1095 case nir_op_f2u16:
1096 case nir_op_i2i8:
1097 case nir_op_u2u8:
1098 case nir_op_f2i8:
1099 case nir_op_f2u8:
1100 if (result.type == BRW_REGISTER_TYPE_B ||
1101 result.type == BRW_REGISTER_TYPE_UB ||
1102 result.type == BRW_REGISTER_TYPE_HF)
1103 assert(type_sz(op[0].type) < 8); /* brw_nir_lower_conversions */
1104
1105 if (op[0].type == BRW_REGISTER_TYPE_B ||
1106 op[0].type == BRW_REGISTER_TYPE_UB ||
1107 op[0].type == BRW_REGISTER_TYPE_HF)
1108 assert(type_sz(result.type) < 8); /* brw_nir_lower_conversions */
1109
1110 inst = bld.MOV(result, op[0]);
1111 inst->saturate = instr->dest.saturate;
1112 break;
1113
1114 case nir_op_fsat:
1115 inst = bld.MOV(result, op[0]);
1116 inst->saturate = true;
1117 break;
1118
1119 case nir_op_fneg:
1120 case nir_op_ineg:
1121 op[0].negate = true;
1122 inst = bld.MOV(result, op[0]);
1123 if (instr->op == nir_op_fneg)
1124 inst->saturate = instr->dest.saturate;
1125 break;
1126
1127 case nir_op_fabs:
1128 case nir_op_iabs:
1129 op[0].negate = false;
1130 op[0].abs = true;
1131 inst = bld.MOV(result, op[0]);
1132 if (instr->op == nir_op_fabs)
1133 inst->saturate = instr->dest.saturate;
1134 break;
1135
1136 case nir_op_fsign:
1137 emit_fsign(bld, instr, result, op, 0);
1138 break;
1139
1140 case nir_op_frcp:
1141 inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
1142 inst->saturate = instr->dest.saturate;
1143 break;
1144
1145 case nir_op_fexp2:
1146 inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
1147 inst->saturate = instr->dest.saturate;
1148 break;
1149
1150 case nir_op_flog2:
1151 inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
1152 inst->saturate = instr->dest.saturate;
1153 break;
1154
1155 case nir_op_fsin:
1156 inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
1157 inst->saturate = instr->dest.saturate;
1158 break;
1159
1160 case nir_op_fcos:
1161 inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
1162 inst->saturate = instr->dest.saturate;
1163 break;
1164
1165 case nir_op_fddx:
1166 if (fs_key->high_quality_derivatives) {
1167 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
1168 } else {
1169 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
1170 }
1171 inst->saturate = instr->dest.saturate;
1172 break;
1173 case nir_op_fddx_fine:
1174 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
1175 inst->saturate = instr->dest.saturate;
1176 break;
1177 case nir_op_fddx_coarse:
1178 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
1179 inst->saturate = instr->dest.saturate;
1180 break;
1181 case nir_op_fddy:
1182 if (fs_key->high_quality_derivatives) {
1183 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0]);
1184 } else {
1185 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0]);
1186 }
1187 inst->saturate = instr->dest.saturate;
1188 break;
1189 case nir_op_fddy_fine:
1190 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0]);
1191 inst->saturate = instr->dest.saturate;
1192 break;
1193 case nir_op_fddy_coarse:
1194 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0]);
1195 inst->saturate = instr->dest.saturate;
1196 break;
1197
1198 case nir_op_iadd:
1199 case nir_op_fadd:
1200 inst = bld.ADD(result, op[0], op[1]);
1201 inst->saturate = instr->dest.saturate;
1202 break;
1203
1204 case nir_op_uadd_sat:
1205 inst = bld.ADD(result, op[0], op[1]);
1206 inst->saturate = true;
1207 break;
1208
1209 case nir_op_fmul:
1210 for (unsigned i = 0; i < 2; i++) {
1211 if (can_fuse_fmul_fsign(instr, i)) {
1212 emit_fsign(bld, instr, result, op, i);
1213 return;
1214 }
1215 }
1216
1217 inst = bld.MUL(result, op[0], op[1]);
1218 inst->saturate = instr->dest.saturate;
1219 break;
1220
1221 case nir_op_imul_2x32_64:
1222 case nir_op_umul_2x32_64:
1223 bld.MUL(result, op[0], op[1]);
1224 break;
1225
1226 case nir_op_imul:
1227 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1228 bld.MUL(result, op[0], op[1]);
1229 break;
1230
1231 case nir_op_imul_high:
1232 case nir_op_umul_high:
1233 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1234 bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
1235 break;
1236
1237 case nir_op_idiv:
1238 case nir_op_udiv:
1239 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1240 bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
1241 break;
1242
1243 case nir_op_uadd_carry:
1244 unreachable("Should have been lowered by carry_to_arith().");
1245
1246 case nir_op_usub_borrow:
1247 unreachable("Should have been lowered by borrow_to_arith().");
1248
1249 case nir_op_umod:
1250 case nir_op_irem:
1251 /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
1252 * appears that our hardware just does the right thing for signed
1253 * remainder.
1254 */
1255 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1256 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
1257 break;
1258
1259 case nir_op_imod: {
1260 /* Get a regular C-style remainder. If a % b == 0, set the predicate. */
1261 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
1262
1263 /* Math instructions don't support conditional mod */
1264 inst = bld.MOV(bld.null_reg_d(), result);
1265 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1266
1267 /* Now, we need to determine if signs of the sources are different.
1268 * When we XOR the sources, the top bit is 0 if they are the same and 1
1269 * if they are different. We can then use a conditional modifier to
1270 * turn that into a predicate. This leads us to an XOR.l instruction.
1271 *
1272 * Technically, according to the PRM, you're not allowed to use .l on a
1273 * XOR instruction. However, emperical experiments and Curro's reading
1274 * of the simulator source both indicate that it's safe.
1275 */
1276 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_D);
1277 inst = bld.XOR(tmp, op[0], op[1]);
1278 inst->predicate = BRW_PREDICATE_NORMAL;
1279 inst->conditional_mod = BRW_CONDITIONAL_L;
1280
1281 /* If the result of the initial remainder operation is non-zero and the
1282 * two sources have different signs, add in a copy of op[1] to get the
1283 * final integer modulus value.
1284 */
1285 inst = bld.ADD(result, result, op[1]);
1286 inst->predicate = BRW_PREDICATE_NORMAL;
1287 break;
1288 }
1289
1290 case nir_op_flt32:
1291 case nir_op_fge32:
1292 case nir_op_feq32:
1293 case nir_op_fne32: {
1294 fs_reg dest = result;
1295
1296 const uint32_t bit_size = nir_src_bit_size(instr->src[0].src);
1297 if (bit_size != 32)
1298 dest = bld.vgrf(op[0].type, 1);
1299
1300 brw_conditional_mod cond;
1301 switch (instr->op) {
1302 case nir_op_flt32:
1303 cond = BRW_CONDITIONAL_L;
1304 break;
1305 case nir_op_fge32:
1306 cond = BRW_CONDITIONAL_GE;
1307 break;
1308 case nir_op_feq32:
1309 cond = BRW_CONDITIONAL_Z;
1310 break;
1311 case nir_op_fne32:
1312 cond = BRW_CONDITIONAL_NZ;
1313 break;
1314 default:
1315 unreachable("bad opcode");
1316 }
1317
1318 bld.CMP(dest, op[0], op[1], cond);
1319
1320 if (bit_size > 32) {
1321 bld.MOV(result, subscript(dest, BRW_REGISTER_TYPE_UD, 0));
1322 } else if(bit_size < 32) {
1323 /* When we convert the result to 32-bit we need to be careful and do
1324 * it as a signed conversion to get sign extension (for 32-bit true)
1325 */
1326 const brw_reg_type src_type =
1327 brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_D);
1328
1329 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), retype(dest, src_type));
1330 }
1331 break;
1332 }
1333
1334 case nir_op_ilt32:
1335 case nir_op_ult32:
1336 case nir_op_ige32:
1337 case nir_op_uge32:
1338 case nir_op_ieq32:
1339 case nir_op_ine32: {
1340 fs_reg dest = result;
1341
1342 const uint32_t bit_size = nir_src_bit_size(instr->src[0].src);
1343 if (bit_size != 32)
1344 dest = bld.vgrf(op[0].type, 1);
1345
1346 brw_conditional_mod cond;
1347 switch (instr->op) {
1348 case nir_op_ilt32:
1349 case nir_op_ult32:
1350 cond = BRW_CONDITIONAL_L;
1351 break;
1352 case nir_op_ige32:
1353 case nir_op_uge32:
1354 cond = BRW_CONDITIONAL_GE;
1355 break;
1356 case nir_op_ieq32:
1357 cond = BRW_CONDITIONAL_Z;
1358 break;
1359 case nir_op_ine32:
1360 cond = BRW_CONDITIONAL_NZ;
1361 break;
1362 default:
1363 unreachable("bad opcode");
1364 }
1365 bld.CMP(dest, op[0], op[1], cond);
1366
1367 if (bit_size > 32) {
1368 bld.MOV(result, subscript(dest, BRW_REGISTER_TYPE_UD, 0));
1369 } else if (bit_size < 32) {
1370 /* When we convert the result to 32-bit we need to be careful and do
1371 * it as a signed conversion to get sign extension (for 32-bit true)
1372 */
1373 const brw_reg_type src_type =
1374 brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_D);
1375
1376 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), retype(dest, src_type));
1377 }
1378 break;
1379 }
1380
1381 case nir_op_inot:
1382 if (devinfo->gen >= 8) {
1383 nir_alu_instr *inot_src_instr = nir_src_as_alu_instr(instr->src[0].src);
1384
1385 if (inot_src_instr != NULL &&
1386 (inot_src_instr->op == nir_op_ior ||
1387 inot_src_instr->op == nir_op_ixor ||
1388 inot_src_instr->op == nir_op_iand) &&
1389 !inot_src_instr->src[0].abs &&
1390 !inot_src_instr->src[0].negate &&
1391 !inot_src_instr->src[1].abs &&
1392 !inot_src_instr->src[1].negate) {
1393 /* The sources of the source logical instruction are now the
1394 * sources of the instruction that will be generated.
1395 */
1396 prepare_alu_destination_and_sources(bld, inot_src_instr, op, false);
1397 resolve_inot_sources(bld, inot_src_instr, op);
1398
1399 /* Smash all of the sources and destination to be signed. This
1400 * doesn't matter for the operation of the instruction, but cmod
1401 * propagation fails on unsigned sources with negation (due to
1402 * fs_inst::can_do_cmod returning false).
1403 */
1404 result.type =
1405 brw_type_for_nir_type(devinfo,
1406 (nir_alu_type)(nir_type_int |
1407 nir_dest_bit_size(instr->dest.dest)));
1408 op[0].type =
1409 brw_type_for_nir_type(devinfo,
1410 (nir_alu_type)(nir_type_int |
1411 nir_src_bit_size(inot_src_instr->src[0].src)));
1412 op[1].type =
1413 brw_type_for_nir_type(devinfo,
1414 (nir_alu_type)(nir_type_int |
1415 nir_src_bit_size(inot_src_instr->src[1].src)));
1416
1417 /* For XOR, only invert one of the sources. Arbitrarily choose
1418 * the first source.
1419 */
1420 op[0].negate = !op[0].negate;
1421 if (inot_src_instr->op != nir_op_ixor)
1422 op[1].negate = !op[1].negate;
1423
1424 switch (inot_src_instr->op) {
1425 case nir_op_ior:
1426 bld.AND(result, op[0], op[1]);
1427 return;
1428
1429 case nir_op_iand:
1430 bld.OR(result, op[0], op[1]);
1431 return;
1432
1433 case nir_op_ixor:
1434 bld.XOR(result, op[0], op[1]);
1435 return;
1436
1437 default:
1438 unreachable("impossible opcode");
1439 }
1440 }
1441 op[0] = resolve_source_modifiers(op[0]);
1442 }
1443 bld.NOT(result, op[0]);
1444 break;
1445 case nir_op_ixor:
1446 if (devinfo->gen >= 8) {
1447 resolve_inot_sources(bld, instr, op);
1448 }
1449 bld.XOR(result, op[0], op[1]);
1450 break;
1451 case nir_op_ior:
1452 if (devinfo->gen >= 8) {
1453 resolve_inot_sources(bld, instr, op);
1454 }
1455 bld.OR(result, op[0], op[1]);
1456 break;
1457 case nir_op_iand:
1458 if (devinfo->gen >= 8) {
1459 resolve_inot_sources(bld, instr, op);
1460 }
1461 bld.AND(result, op[0], op[1]);
1462 break;
1463
1464 case nir_op_fdot2:
1465 case nir_op_fdot3:
1466 case nir_op_fdot4:
1467 case nir_op_b32all_fequal2:
1468 case nir_op_b32all_iequal2:
1469 case nir_op_b32all_fequal3:
1470 case nir_op_b32all_iequal3:
1471 case nir_op_b32all_fequal4:
1472 case nir_op_b32all_iequal4:
1473 case nir_op_b32any_fnequal2:
1474 case nir_op_b32any_inequal2:
1475 case nir_op_b32any_fnequal3:
1476 case nir_op_b32any_inequal3:
1477 case nir_op_b32any_fnequal4:
1478 case nir_op_b32any_inequal4:
1479 unreachable("Lowered by nir_lower_alu_reductions");
1480
1481 case nir_op_fnoise1_1:
1482 case nir_op_fnoise1_2:
1483 case nir_op_fnoise1_3:
1484 case nir_op_fnoise1_4:
1485 case nir_op_fnoise2_1:
1486 case nir_op_fnoise2_2:
1487 case nir_op_fnoise2_3:
1488 case nir_op_fnoise2_4:
1489 case nir_op_fnoise3_1:
1490 case nir_op_fnoise3_2:
1491 case nir_op_fnoise3_3:
1492 case nir_op_fnoise3_4:
1493 case nir_op_fnoise4_1:
1494 case nir_op_fnoise4_2:
1495 case nir_op_fnoise4_3:
1496 case nir_op_fnoise4_4:
1497 unreachable("not reached: should be handled by lower_noise");
1498
1499 case nir_op_ldexp:
1500 unreachable("not reached: should be handled by ldexp_to_arith()");
1501
1502 case nir_op_fsqrt:
1503 inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
1504 inst->saturate = instr->dest.saturate;
1505 break;
1506
1507 case nir_op_frsq:
1508 inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
1509 inst->saturate = instr->dest.saturate;
1510 break;
1511
1512 case nir_op_i2b32:
1513 case nir_op_f2b32: {
1514 uint32_t bit_size = nir_src_bit_size(instr->src[0].src);
1515 if (bit_size == 64) {
1516 /* two-argument instructions can't take 64-bit immediates */
1517 fs_reg zero;
1518 fs_reg tmp;
1519
1520 if (instr->op == nir_op_f2b32) {
1521 zero = vgrf(glsl_type::double_type);
1522 tmp = vgrf(glsl_type::double_type);
1523 bld.MOV(zero, setup_imm_df(bld, 0.0));
1524 } else {
1525 zero = vgrf(glsl_type::int64_t_type);
1526 tmp = vgrf(glsl_type::int64_t_type);
1527 bld.MOV(zero, brw_imm_q(0));
1528 }
1529
1530 /* A SIMD16 execution needs to be split in two instructions, so use
1531 * a vgrf instead of the flag register as dst so instruction splitting
1532 * works
1533 */
1534 bld.CMP(tmp, op[0], zero, BRW_CONDITIONAL_NZ);
1535 bld.MOV(result, subscript(tmp, BRW_REGISTER_TYPE_UD, 0));
1536 } else {
1537 fs_reg zero;
1538 if (bit_size == 32) {
1539 zero = instr->op == nir_op_f2b32 ? brw_imm_f(0.0f) : brw_imm_d(0);
1540 } else {
1541 assert(bit_size == 16);
1542 zero = instr->op == nir_op_f2b32 ?
1543 retype(brw_imm_w(0), BRW_REGISTER_TYPE_HF) : brw_imm_w(0);
1544 }
1545 bld.CMP(result, op[0], zero, BRW_CONDITIONAL_NZ);
1546 }
1547 break;
1548 }
1549
1550 case nir_op_ftrunc:
1551 inst = bld.RNDZ(result, op[0]);
1552 inst->saturate = instr->dest.saturate;
1553 break;
1554
1555 case nir_op_fceil: {
1556 op[0].negate = !op[0].negate;
1557 fs_reg temp = vgrf(glsl_type::float_type);
1558 bld.RNDD(temp, op[0]);
1559 temp.negate = true;
1560 inst = bld.MOV(result, temp);
1561 inst->saturate = instr->dest.saturate;
1562 break;
1563 }
1564 case nir_op_ffloor:
1565 inst = bld.RNDD(result, op[0]);
1566 inst->saturate = instr->dest.saturate;
1567 break;
1568 case nir_op_ffract:
1569 inst = bld.FRC(result, op[0]);
1570 inst->saturate = instr->dest.saturate;
1571 break;
1572 case nir_op_fround_even:
1573 inst = bld.RNDE(result, op[0]);
1574 inst->saturate = instr->dest.saturate;
1575 break;
1576
1577 case nir_op_fquantize2f16: {
1578 fs_reg tmp16 = bld.vgrf(BRW_REGISTER_TYPE_D);
1579 fs_reg tmp32 = bld.vgrf(BRW_REGISTER_TYPE_F);
1580 fs_reg zero = bld.vgrf(BRW_REGISTER_TYPE_F);
1581
1582 /* The destination stride must be at least as big as the source stride. */
1583 tmp16.type = BRW_REGISTER_TYPE_W;
1584 tmp16.stride = 2;
1585
1586 /* Check for denormal */
1587 fs_reg abs_src0 = op[0];
1588 abs_src0.abs = true;
1589 bld.CMP(bld.null_reg_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
1590 BRW_CONDITIONAL_L);
1591 /* Get the appropriately signed zero */
1592 bld.AND(retype(zero, BRW_REGISTER_TYPE_UD),
1593 retype(op[0], BRW_REGISTER_TYPE_UD),
1594 brw_imm_ud(0x80000000));
1595 /* Do the actual F32 -> F16 -> F32 conversion */
1596 bld.emit(BRW_OPCODE_F32TO16, tmp16, op[0]);
1597 bld.emit(BRW_OPCODE_F16TO32, tmp32, tmp16);
1598 /* Select that or zero based on normal status */
1599 inst = bld.SEL(result, zero, tmp32);
1600 inst->predicate = BRW_PREDICATE_NORMAL;
1601 inst->saturate = instr->dest.saturate;
1602 break;
1603 }
1604
1605 case nir_op_imin:
1606 case nir_op_umin:
1607 case nir_op_fmin:
1608 inst = bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_L);
1609 inst->saturate = instr->dest.saturate;
1610 break;
1611
1612 case nir_op_imax:
1613 case nir_op_umax:
1614 case nir_op_fmax:
1615 inst = bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_GE);
1616 inst->saturate = instr->dest.saturate;
1617 break;
1618
1619 case nir_op_pack_snorm_2x16:
1620 case nir_op_pack_snorm_4x8:
1621 case nir_op_pack_unorm_2x16:
1622 case nir_op_pack_unorm_4x8:
1623 case nir_op_unpack_snorm_2x16:
1624 case nir_op_unpack_snorm_4x8:
1625 case nir_op_unpack_unorm_2x16:
1626 case nir_op_unpack_unorm_4x8:
1627 case nir_op_unpack_half_2x16:
1628 case nir_op_pack_half_2x16:
1629 unreachable("not reached: should be handled by lower_packing_builtins");
1630
1631 case nir_op_unpack_half_2x16_split_x:
1632 inst = bld.emit(BRW_OPCODE_F16TO32, result,
1633 subscript(op[0], BRW_REGISTER_TYPE_UW, 0));
1634 inst->saturate = instr->dest.saturate;
1635 break;
1636 case nir_op_unpack_half_2x16_split_y:
1637 inst = bld.emit(BRW_OPCODE_F16TO32, result,
1638 subscript(op[0], BRW_REGISTER_TYPE_UW, 1));
1639 inst->saturate = instr->dest.saturate;
1640 break;
1641
1642 case nir_op_pack_64_2x32_split:
1643 case nir_op_pack_32_2x16_split:
1644 bld.emit(FS_OPCODE_PACK, result, op[0], op[1]);
1645 break;
1646
1647 case nir_op_unpack_64_2x32_split_x:
1648 case nir_op_unpack_64_2x32_split_y: {
1649 if (instr->op == nir_op_unpack_64_2x32_split_x)
1650 bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UD, 0));
1651 else
1652 bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UD, 1));
1653 break;
1654 }
1655
1656 case nir_op_unpack_32_2x16_split_x:
1657 case nir_op_unpack_32_2x16_split_y: {
1658 if (instr->op == nir_op_unpack_32_2x16_split_x)
1659 bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UW, 0));
1660 else
1661 bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UW, 1));
1662 break;
1663 }
1664
1665 case nir_op_fpow:
1666 inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
1667 inst->saturate = instr->dest.saturate;
1668 break;
1669
1670 case nir_op_bitfield_reverse:
1671 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1672 bld.BFREV(result, op[0]);
1673 break;
1674
1675 case nir_op_bit_count:
1676 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1677 bld.CBIT(result, op[0]);
1678 break;
1679
1680 case nir_op_ufind_msb: {
1681 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1682 emit_find_msb_using_lzd(bld, result, op[0], false);
1683 break;
1684 }
1685
1686 case nir_op_ifind_msb: {
1687 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1688
1689 if (devinfo->gen < 7) {
1690 emit_find_msb_using_lzd(bld, result, op[0], true);
1691 } else {
1692 bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
1693
1694 /* FBH counts from the MSB side, while GLSL's findMSB() wants the
1695 * count from the LSB side. If FBH didn't return an error
1696 * (0xFFFFFFFF), then subtract the result from 31 to convert the MSB
1697 * count into an LSB count.
1698 */
1699 bld.CMP(bld.null_reg_d(), result, brw_imm_d(-1), BRW_CONDITIONAL_NZ);
1700
1701 inst = bld.ADD(result, result, brw_imm_d(31));
1702 inst->predicate = BRW_PREDICATE_NORMAL;
1703 inst->src[0].negate = true;
1704 }
1705 break;
1706 }
1707
1708 case nir_op_find_lsb:
1709 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1710
1711 if (devinfo->gen < 7) {
1712 fs_reg temp = vgrf(glsl_type::int_type);
1713
1714 /* (x & -x) generates a value that consists of only the LSB of x.
1715 * For all powers of 2, findMSB(y) == findLSB(y).
1716 */
1717 fs_reg src = retype(op[0], BRW_REGISTER_TYPE_D);
1718 fs_reg negated_src = src;
1719
1720 /* One must be negated, and the other must be non-negated. It
1721 * doesn't matter which is which.
1722 */
1723 negated_src.negate = true;
1724 src.negate = false;
1725
1726 bld.AND(temp, src, negated_src);
1727 emit_find_msb_using_lzd(bld, result, temp, false);
1728 } else {
1729 bld.FBL(result, op[0]);
1730 }
1731 break;
1732
1733 case nir_op_ubitfield_extract:
1734 case nir_op_ibitfield_extract:
1735 unreachable("should have been lowered");
1736 case nir_op_ubfe:
1737 case nir_op_ibfe:
1738 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1739 bld.BFE(result, op[2], op[1], op[0]);
1740 break;
1741 case nir_op_bfm:
1742 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1743 bld.BFI1(result, op[0], op[1]);
1744 break;
1745 case nir_op_bfi:
1746 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1747 bld.BFI2(result, op[0], op[1], op[2]);
1748 break;
1749
1750 case nir_op_bitfield_insert:
1751 unreachable("not reached: should have been lowered");
1752
1753 case nir_op_ishl:
1754 bld.SHL(result, op[0], op[1]);
1755 break;
1756 case nir_op_ishr:
1757 bld.ASR(result, op[0], op[1]);
1758 break;
1759 case nir_op_ushr:
1760 bld.SHR(result, op[0], op[1]);
1761 break;
1762
1763 case nir_op_pack_half_2x16_split:
1764 bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
1765 break;
1766
1767 case nir_op_ffma:
1768 inst = bld.MAD(result, op[2], op[1], op[0]);
1769 inst->saturate = instr->dest.saturate;
1770 break;
1771
1772 case nir_op_flrp:
1773 inst = bld.LRP(result, op[0], op[1], op[2]);
1774 inst->saturate = instr->dest.saturate;
1775 break;
1776
1777 case nir_op_b32csel:
1778 if (optimize_frontfacing_ternary(instr, result))
1779 return;
1780
1781 bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
1782 inst = bld.SEL(result, op[1], op[2]);
1783 inst->predicate = BRW_PREDICATE_NORMAL;
1784 break;
1785
1786 case nir_op_extract_u8:
1787 case nir_op_extract_i8: {
1788 unsigned byte = nir_src_as_uint(instr->src[1].src);
1789
1790 /* The PRMs say:
1791 *
1792 * BDW+
1793 * There is no direct conversion from B/UB to Q/UQ or Q/UQ to B/UB.
1794 * Use two instructions and a word or DWord intermediate integer type.
1795 */
1796 if (nir_dest_bit_size(instr->dest.dest) == 64) {
1797 const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8);
1798
1799 if (instr->op == nir_op_extract_i8) {
1800 /* If we need to sign extend, extract to a word first */
1801 fs_reg w_temp = bld.vgrf(BRW_REGISTER_TYPE_W);
1802 bld.MOV(w_temp, subscript(op[0], type, byte));
1803 bld.MOV(result, w_temp);
1804 } else if (byte & 1) {
1805 /* Extract the high byte from the word containing the desired byte
1806 * offset.
1807 */
1808 bld.SHR(result,
1809 subscript(op[0], BRW_REGISTER_TYPE_UW, byte / 2),
1810 brw_imm_uw(8));
1811 } else {
1812 /* Otherwise use an AND with 0xff and a word type */
1813 bld.AND(result,
1814 subscript(op[0], BRW_REGISTER_TYPE_UW, byte / 2),
1815 brw_imm_uw(0xff));
1816 }
1817 } else {
1818 const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8);
1819 bld.MOV(result, subscript(op[0], type, byte));
1820 }
1821 break;
1822 }
1823
1824 case nir_op_extract_u16:
1825 case nir_op_extract_i16: {
1826 const brw_reg_type type = brw_int_type(2, instr->op == nir_op_extract_i16);
1827 unsigned word = nir_src_as_uint(instr->src[1].src);
1828 bld.MOV(result, subscript(op[0], type, word));
1829 break;
1830 }
1831
1832 default:
1833 unreachable("unhandled instruction");
1834 }
1835
1836 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1837 * to sign extend the low bit to 0/~0
1838 */
1839 if (devinfo->gen <= 5 &&
1840 !result.is_null() &&
1841 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1842 fs_reg masked = vgrf(glsl_type::int_type);
1843 bld.AND(masked, result, brw_imm_d(1));
1844 masked.negate = true;
1845 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
1846 }
1847 }
1848
1849 void
1850 fs_visitor::nir_emit_load_const(const fs_builder &bld,
1851 nir_load_const_instr *instr)
1852 {
1853 const brw_reg_type reg_type =
1854 brw_reg_type_from_bit_size(instr->def.bit_size, BRW_REGISTER_TYPE_D);
1855 fs_reg reg = bld.vgrf(reg_type, instr->def.num_components);
1856
1857 switch (instr->def.bit_size) {
1858 case 8:
1859 for (unsigned i = 0; i < instr->def.num_components; i++)
1860 bld.MOV(offset(reg, bld, i), setup_imm_b(bld, instr->value[i].i8));
1861 break;
1862
1863 case 16:
1864 for (unsigned i = 0; i < instr->def.num_components; i++)
1865 bld.MOV(offset(reg, bld, i), brw_imm_w(instr->value[i].i16));
1866 break;
1867
1868 case 32:
1869 for (unsigned i = 0; i < instr->def.num_components; i++)
1870 bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value[i].i32));
1871 break;
1872
1873 case 64:
1874 assert(devinfo->gen >= 7);
1875 if (devinfo->gen == 7) {
1876 /* We don't get 64-bit integer types until gen8 */
1877 for (unsigned i = 0; i < instr->def.num_components; i++) {
1878 bld.MOV(retype(offset(reg, bld, i), BRW_REGISTER_TYPE_DF),
1879 setup_imm_df(bld, instr->value[i].f64));
1880 }
1881 } else {
1882 for (unsigned i = 0; i < instr->def.num_components; i++)
1883 bld.MOV(offset(reg, bld, i), brw_imm_q(instr->value[i].i64));
1884 }
1885 break;
1886
1887 default:
1888 unreachable("Invalid bit size");
1889 }
1890
1891 nir_ssa_values[instr->def.index] = reg;
1892 }
1893
1894 fs_reg
1895 fs_visitor::get_nir_src(const nir_src &src)
1896 {
1897 fs_reg reg;
1898 if (src.is_ssa) {
1899 if (src.ssa->parent_instr->type == nir_instr_type_ssa_undef) {
1900 const brw_reg_type reg_type =
1901 brw_reg_type_from_bit_size(src.ssa->bit_size, BRW_REGISTER_TYPE_D);
1902 reg = bld.vgrf(reg_type, src.ssa->num_components);
1903 } else {
1904 reg = nir_ssa_values[src.ssa->index];
1905 }
1906 } else {
1907 /* We don't handle indirects on locals */
1908 assert(src.reg.indirect == NULL);
1909 reg = offset(nir_locals[src.reg.reg->index], bld,
1910 src.reg.base_offset * src.reg.reg->num_components);
1911 }
1912
1913 if (nir_src_bit_size(src) == 64 && devinfo->gen == 7) {
1914 /* The only 64-bit type available on gen7 is DF, so use that. */
1915 reg.type = BRW_REGISTER_TYPE_DF;
1916 } else {
1917 /* To avoid floating-point denorm flushing problems, set the type by
1918 * default to an integer type - instructions that need floating point
1919 * semantics will set this to F if they need to
1920 */
1921 reg.type = brw_reg_type_from_bit_size(nir_src_bit_size(src),
1922 BRW_REGISTER_TYPE_D);
1923 }
1924
1925 return reg;
1926 }
1927
1928 /**
1929 * Return an IMM for constants; otherwise call get_nir_src() as normal.
1930 *
1931 * This function should not be called on any value which may be 64 bits.
1932 * We could theoretically support 64-bit on gen8+ but we choose not to
1933 * because it wouldn't work in general (no gen7 support) and there are
1934 * enough restrictions in 64-bit immediates that you can't take the return
1935 * value and treat it the same as the result of get_nir_src().
1936 */
1937 fs_reg
1938 fs_visitor::get_nir_src_imm(const nir_src &src)
1939 {
1940 assert(nir_src_bit_size(src) == 32);
1941 return nir_src_is_const(src) ?
1942 fs_reg(brw_imm_d(nir_src_as_int(src))) : get_nir_src(src);
1943 }
1944
1945 fs_reg
1946 fs_visitor::get_nir_dest(const nir_dest &dest)
1947 {
1948 if (dest.is_ssa) {
1949 const brw_reg_type reg_type =
1950 brw_reg_type_from_bit_size(dest.ssa.bit_size,
1951 dest.ssa.bit_size == 8 ?
1952 BRW_REGISTER_TYPE_D :
1953 BRW_REGISTER_TYPE_F);
1954 nir_ssa_values[dest.ssa.index] =
1955 bld.vgrf(reg_type, dest.ssa.num_components);
1956 bld.UNDEF(nir_ssa_values[dest.ssa.index]);
1957 return nir_ssa_values[dest.ssa.index];
1958 } else {
1959 /* We don't handle indirects on locals */
1960 assert(dest.reg.indirect == NULL);
1961 return offset(nir_locals[dest.reg.reg->index], bld,
1962 dest.reg.base_offset * dest.reg.reg->num_components);
1963 }
1964 }
1965
1966 void
1967 fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
1968 unsigned wr_mask)
1969 {
1970 for (unsigned i = 0; i < 4; i++) {
1971 if (!((wr_mask >> i) & 1))
1972 continue;
1973
1974 fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
1975 new_inst->dst = offset(new_inst->dst, bld, i);
1976 for (unsigned j = 0; j < new_inst->sources; j++)
1977 if (new_inst->src[j].file == VGRF)
1978 new_inst->src[j] = offset(new_inst->src[j], bld, i);
1979
1980 bld.emit(new_inst);
1981 }
1982 }
1983
1984 static fs_inst *
1985 emit_pixel_interpolater_send(const fs_builder &bld,
1986 enum opcode opcode,
1987 const fs_reg &dst,
1988 const fs_reg &src,
1989 const fs_reg &desc,
1990 glsl_interp_mode interpolation)
1991 {
1992 struct brw_wm_prog_data *wm_prog_data =
1993 brw_wm_prog_data(bld.shader->stage_prog_data);
1994
1995 fs_inst *inst = bld.emit(opcode, dst, src, desc);
1996 /* 2 floats per slot returned */
1997 inst->size_written = 2 * dst.component_size(inst->exec_size);
1998 inst->pi_noperspective = interpolation == INTERP_MODE_NOPERSPECTIVE;
1999
2000 wm_prog_data->pulls_bary = true;
2001
2002 return inst;
2003 }
2004
2005 /**
2006 * Computes 1 << x, given a D/UD register containing some value x.
2007 */
2008 static fs_reg
2009 intexp2(const fs_builder &bld, const fs_reg &x)
2010 {
2011 assert(x.type == BRW_REGISTER_TYPE_UD || x.type == BRW_REGISTER_TYPE_D);
2012
2013 fs_reg result = bld.vgrf(x.type, 1);
2014 fs_reg one = bld.vgrf(x.type, 1);
2015
2016 bld.MOV(one, retype(brw_imm_d(1), one.type));
2017 bld.SHL(result, one, x);
2018 return result;
2019 }
2020
2021 void
2022 fs_visitor::emit_gs_end_primitive(const nir_src &vertex_count_nir_src)
2023 {
2024 assert(stage == MESA_SHADER_GEOMETRY);
2025
2026 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
2027
2028 if (gs_compile->control_data_header_size_bits == 0)
2029 return;
2030
2031 /* We can only do EndPrimitive() functionality when the control data
2032 * consists of cut bits. Fortunately, the only time it isn't is when the
2033 * output type is points, in which case EndPrimitive() is a no-op.
2034 */
2035 if (gs_prog_data->control_data_format !=
2036 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
2037 return;
2038 }
2039
2040 /* Cut bits use one bit per vertex. */
2041 assert(gs_compile->control_data_bits_per_vertex == 1);
2042
2043 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
2044 vertex_count.type = BRW_REGISTER_TYPE_UD;
2045
2046 /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
2047 * vertex n, 0 otherwise. So all we need to do here is mark bit
2048 * (vertex_count - 1) % 32 in the cut_bits register to indicate that
2049 * EndPrimitive() was called after emitting vertex (vertex_count - 1);
2050 * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
2051 *
2052 * Note that if EndPrimitive() is called before emitting any vertices, this
2053 * will cause us to set bit 31 of the control_data_bits register to 1.
2054 * That's fine because:
2055 *
2056 * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
2057 * output, so the hardware will ignore cut bit 31.
2058 *
2059 * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
2060 * last vertex, so setting cut bit 31 has no effect (since the primitive
2061 * is automatically ended when the GS terminates).
2062 *
2063 * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
2064 * control_data_bits register to 0 when the first vertex is emitted.
2065 */
2066
2067 const fs_builder abld = bld.annotate("end primitive");
2068
2069 /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
2070 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2071 abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
2072 fs_reg mask = intexp2(abld, prev_count);
2073 /* Note: we're relying on the fact that the GEN SHL instruction only pays
2074 * attention to the lower 5 bits of its second source argument, so on this
2075 * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
2076 * ((vertex_count - 1) % 32).
2077 */
2078 abld.OR(this->control_data_bits, this->control_data_bits, mask);
2079 }
2080
2081 void
2082 fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
2083 {
2084 assert(stage == MESA_SHADER_GEOMETRY);
2085 assert(gs_compile->control_data_bits_per_vertex != 0);
2086
2087 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
2088
2089 const fs_builder abld = bld.annotate("emit control data bits");
2090 const fs_builder fwa_bld = bld.exec_all();
2091
2092 /* We use a single UD register to accumulate control data bits (32 bits
2093 * for each of the SIMD8 channels). So we need to write a DWord (32 bits)
2094 * at a time.
2095 *
2096 * Unfortunately, the URB_WRITE_SIMD8 message uses 128-bit (OWord) offsets.
2097 * We have select a 128-bit group via the Global and Per-Slot Offsets, then
2098 * use the Channel Mask phase to enable/disable which DWord within that
2099 * group to write. (Remember, different SIMD8 channels may have emitted
2100 * different numbers of vertices, so we may need per-slot offsets.)
2101 *
2102 * Channel masking presents an annoying problem: we may have to replicate
2103 * the data up to 4 times:
2104 *
2105 * Msg = Handles, Per-Slot Offsets, Channel Masks, Data, Data, Data, Data.
2106 *
2107 * To avoid penalizing shaders that emit a small number of vertices, we
2108 * can avoid these sometimes: if the size of the control data header is
2109 * <= 128 bits, then there is only 1 OWord. All SIMD8 channels will land
2110 * land in the same 128-bit group, so we can skip per-slot offsets.
2111 *
2112 * Similarly, if the control data header is <= 32 bits, there is only one
2113 * DWord, so we can skip channel masks.
2114 */
2115 enum opcode opcode = SHADER_OPCODE_URB_WRITE_SIMD8;
2116
2117 fs_reg channel_mask, per_slot_offset;
2118
2119 if (gs_compile->control_data_header_size_bits > 32) {
2120 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
2121 channel_mask = vgrf(glsl_type::uint_type);
2122 }
2123
2124 if (gs_compile->control_data_header_size_bits > 128) {
2125 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT;
2126 per_slot_offset = vgrf(glsl_type::uint_type);
2127 }
2128
2129 /* Figure out which DWord we're trying to write to using the formula:
2130 *
2131 * dword_index = (vertex_count - 1) * bits_per_vertex / 32
2132 *
2133 * Since bits_per_vertex is a power of two, and is known at compile
2134 * time, this can be optimized to:
2135 *
2136 * dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
2137 */
2138 if (opcode != SHADER_OPCODE_URB_WRITE_SIMD8) {
2139 fs_reg dword_index = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2140 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2141 abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
2142 unsigned log2_bits_per_vertex =
2143 util_last_bit(gs_compile->control_data_bits_per_vertex);
2144 abld.SHR(dword_index, prev_count, brw_imm_ud(6u - log2_bits_per_vertex));
2145
2146 if (per_slot_offset.file != BAD_FILE) {
2147 /* Set the per-slot offset to dword_index / 4, so that we'll write to
2148 * the appropriate OWord within the control data header.
2149 */
2150 abld.SHR(per_slot_offset, dword_index, brw_imm_ud(2u));
2151 }
2152
2153 /* Set the channel masks to 1 << (dword_index % 4), so that we'll
2154 * write to the appropriate DWORD within the OWORD.
2155 */
2156 fs_reg channel = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2157 fwa_bld.AND(channel, dword_index, brw_imm_ud(3u));
2158 channel_mask = intexp2(fwa_bld, channel);
2159 /* Then the channel masks need to be in bits 23:16. */
2160 fwa_bld.SHL(channel_mask, channel_mask, brw_imm_ud(16u));
2161 }
2162
2163 /* Store the control data bits in the message payload and send it. */
2164 unsigned mlen = 2;
2165 if (channel_mask.file != BAD_FILE)
2166 mlen += 4; /* channel masks, plus 3 extra copies of the data */
2167 if (per_slot_offset.file != BAD_FILE)
2168 mlen++;
2169
2170 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
2171 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
2172 unsigned i = 0;
2173 sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
2174 if (per_slot_offset.file != BAD_FILE)
2175 sources[i++] = per_slot_offset;
2176 if (channel_mask.file != BAD_FILE)
2177 sources[i++] = channel_mask;
2178 while (i < mlen) {
2179 sources[i++] = this->control_data_bits;
2180 }
2181
2182 abld.LOAD_PAYLOAD(payload, sources, mlen, mlen);
2183 fs_inst *inst = abld.emit(opcode, reg_undef, payload);
2184 inst->mlen = mlen;
2185 /* We need to increment Global Offset by 256-bits to make room for
2186 * Broadwell's extra "Vertex Count" payload at the beginning of the
2187 * URB entry. Since this is an OWord message, Global Offset is counted
2188 * in 128-bit units, so we must set it to 2.
2189 */
2190 if (gs_prog_data->static_vertex_count == -1)
2191 inst->offset = 2;
2192 }
2193
2194 void
2195 fs_visitor::set_gs_stream_control_data_bits(const fs_reg &vertex_count,
2196 unsigned stream_id)
2197 {
2198 /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
2199
2200 /* Note: we are calling this *before* increasing vertex_count, so
2201 * this->vertex_count == vertex_count - 1 in the formula above.
2202 */
2203
2204 /* Stream mode uses 2 bits per vertex */
2205 assert(gs_compile->control_data_bits_per_vertex == 2);
2206
2207 /* Must be a valid stream */
2208 assert(stream_id < MAX_VERTEX_STREAMS);
2209
2210 /* Control data bits are initialized to 0 so we don't have to set any
2211 * bits when sending vertices to stream 0.
2212 */
2213 if (stream_id == 0)
2214 return;
2215
2216 const fs_builder abld = bld.annotate("set stream control data bits", NULL);
2217
2218 /* reg::sid = stream_id */
2219 fs_reg sid = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2220 abld.MOV(sid, brw_imm_ud(stream_id));
2221
2222 /* reg:shift_count = 2 * (vertex_count - 1) */
2223 fs_reg shift_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2224 abld.SHL(shift_count, vertex_count, brw_imm_ud(1u));
2225
2226 /* Note: we're relying on the fact that the GEN SHL instruction only pays
2227 * attention to the lower 5 bits of its second source argument, so on this
2228 * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
2229 * stream_id << ((2 * (vertex_count - 1)) % 32).
2230 */
2231 fs_reg mask = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2232 abld.SHL(mask, sid, shift_count);
2233 abld.OR(this->control_data_bits, this->control_data_bits, mask);
2234 }
2235
2236 void
2237 fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
2238 unsigned stream_id)
2239 {
2240 assert(stage == MESA_SHADER_GEOMETRY);
2241
2242 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
2243
2244 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
2245 vertex_count.type = BRW_REGISTER_TYPE_UD;
2246
2247 /* Haswell and later hardware ignores the "Render Stream Select" bits
2248 * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
2249 * and instead sends all primitives down the pipeline for rasterization.
2250 * If the SOL stage is enabled, "Render Stream Select" is honored and
2251 * primitives bound to non-zero streams are discarded after stream output.
2252 *
2253 * Since the only purpose of primives sent to non-zero streams is to
2254 * be recorded by transform feedback, we can simply discard all geometry
2255 * bound to these streams when transform feedback is disabled.
2256 */
2257 if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
2258 return;
2259
2260 /* If we're outputting 32 control data bits or less, then we can wait
2261 * until the shader is over to output them all. Otherwise we need to
2262 * output them as we go. Now is the time to do it, since we're about to
2263 * output the vertex_count'th vertex, so it's guaranteed that the
2264 * control data bits associated with the (vertex_count - 1)th vertex are
2265 * correct.
2266 */
2267 if (gs_compile->control_data_header_size_bits > 32) {
2268 const fs_builder abld =
2269 bld.annotate("emit vertex: emit control data bits");
2270
2271 /* Only emit control data bits if we've finished accumulating a batch
2272 * of 32 bits. This is the case when:
2273 *
2274 * (vertex_count * bits_per_vertex) % 32 == 0
2275 *
2276 * (in other words, when the last 5 bits of vertex_count *
2277 * bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
2278 * integer n (which is always the case, since bits_per_vertex is
2279 * always 1 or 2), this is equivalent to requiring that the last 5-n
2280 * bits of vertex_count are 0:
2281 *
2282 * vertex_count & (2^(5-n) - 1) == 0
2283 *
2284 * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
2285 * equivalent to:
2286 *
2287 * vertex_count & (32 / bits_per_vertex - 1) == 0
2288 *
2289 * TODO: If vertex_count is an immediate, we could do some of this math
2290 * at compile time...
2291 */
2292 fs_inst *inst =
2293 abld.AND(bld.null_reg_d(), vertex_count,
2294 brw_imm_ud(32u / gs_compile->control_data_bits_per_vertex - 1u));
2295 inst->conditional_mod = BRW_CONDITIONAL_Z;
2296
2297 abld.IF(BRW_PREDICATE_NORMAL);
2298 /* If vertex_count is 0, then no control data bits have been
2299 * accumulated yet, so we can skip emitting them.
2300 */
2301 abld.CMP(bld.null_reg_d(), vertex_count, brw_imm_ud(0u),
2302 BRW_CONDITIONAL_NEQ);
2303 abld.IF(BRW_PREDICATE_NORMAL);
2304 emit_gs_control_data_bits(vertex_count);
2305 abld.emit(BRW_OPCODE_ENDIF);
2306
2307 /* Reset control_data_bits to 0 so we can start accumulating a new
2308 * batch.
2309 *
2310 * Note: in the case where vertex_count == 0, this neutralizes the
2311 * effect of any call to EndPrimitive() that the shader may have
2312 * made before outputting its first vertex.
2313 */
2314 inst = abld.MOV(this->control_data_bits, brw_imm_ud(0u));
2315 inst->force_writemask_all = true;
2316 abld.emit(BRW_OPCODE_ENDIF);
2317 }
2318
2319 emit_urb_writes(vertex_count);
2320
2321 /* In stream mode we have to set control data bits for all vertices
2322 * unless we have disabled control data bits completely (which we do
2323 * do for GL_POINTS outputs that don't use streams).
2324 */
2325 if (gs_compile->control_data_header_size_bits > 0 &&
2326 gs_prog_data->control_data_format ==
2327 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
2328 set_gs_stream_control_data_bits(vertex_count, stream_id);
2329 }
2330 }
2331
2332 void
2333 fs_visitor::emit_gs_input_load(const fs_reg &dst,
2334 const nir_src &vertex_src,
2335 unsigned base_offset,
2336 const nir_src &offset_src,
2337 unsigned num_components,
2338 unsigned first_component)
2339 {
2340 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
2341 const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8;
2342
2343 /* TODO: figure out push input layout for invocations == 1 */
2344 /* TODO: make this work with 64-bit inputs */
2345 if (gs_prog_data->invocations == 1 &&
2346 type_sz(dst.type) <= 4 &&
2347 nir_src_is_const(offset_src) && nir_src_is_const(vertex_src) &&
2348 4 * (base_offset + nir_src_as_uint(offset_src)) < push_reg_count) {
2349 int imm_offset = (base_offset + nir_src_as_uint(offset_src)) * 4 +
2350 nir_src_as_uint(vertex_src) * push_reg_count;
2351 for (unsigned i = 0; i < num_components; i++) {
2352 bld.MOV(offset(dst, bld, i),
2353 fs_reg(ATTR, imm_offset + i + first_component, dst.type));
2354 }
2355 return;
2356 }
2357
2358 /* Resort to the pull model. Ensure the VUE handles are provided. */
2359 assert(gs_prog_data->base.include_vue_handles);
2360
2361 unsigned first_icp_handle = gs_prog_data->include_primitive_id ? 3 : 2;
2362 fs_reg icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2363
2364 if (gs_prog_data->invocations == 1) {
2365 if (nir_src_is_const(vertex_src)) {
2366 /* The vertex index is constant; just select the proper URB handle. */
2367 icp_handle =
2368 retype(brw_vec8_grf(first_icp_handle + nir_src_as_uint(vertex_src), 0),
2369 BRW_REGISTER_TYPE_UD);
2370 } else {
2371 /* The vertex index is non-constant. We need to use indirect
2372 * addressing to fetch the proper URB handle.
2373 *
2374 * First, we start with the sequence <7, 6, 5, 4, 3, 2, 1, 0>
2375 * indicating that channel <n> should read the handle from
2376 * DWord <n>. We convert that to bytes by multiplying by 4.
2377 *
2378 * Next, we convert the vertex index to bytes by multiplying
2379 * by 32 (shifting by 5), and add the two together. This is
2380 * the final indirect byte offset.
2381 */
2382 fs_reg sequence = bld.vgrf(BRW_REGISTER_TYPE_UW, 1);
2383 fs_reg channel_offsets = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2384 fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2385 fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2386
2387 /* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */
2388 bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210)));
2389 /* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */
2390 bld.SHL(channel_offsets, sequence, brw_imm_ud(2u));
2391 /* Convert vertex_index to bytes (multiply by 32) */
2392 bld.SHL(vertex_offset_bytes,
2393 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2394 brw_imm_ud(5u));
2395 bld.ADD(icp_offset_bytes, vertex_offset_bytes, channel_offsets);
2396
2397 /* Use first_icp_handle as the base offset. There is one register
2398 * of URB handles per vertex, so inform the register allocator that
2399 * we might read up to nir->info.gs.vertices_in registers.
2400 */
2401 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2402 retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
2403 fs_reg(icp_offset_bytes),
2404 brw_imm_ud(nir->info.gs.vertices_in * REG_SIZE));
2405 }
2406 } else {
2407 assert(gs_prog_data->invocations > 1);
2408
2409 if (nir_src_is_const(vertex_src)) {
2410 unsigned vertex = nir_src_as_uint(vertex_src);
2411 assert(devinfo->gen >= 9 || vertex <= 5);
2412 bld.MOV(icp_handle,
2413 retype(brw_vec1_grf(first_icp_handle + vertex / 8, vertex % 8),
2414 BRW_REGISTER_TYPE_UD));
2415 } else {
2416 /* The vertex index is non-constant. We need to use indirect
2417 * addressing to fetch the proper URB handle.
2418 *
2419 */
2420 fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2421
2422 /* Convert vertex_index to bytes (multiply by 4) */
2423 bld.SHL(icp_offset_bytes,
2424 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2425 brw_imm_ud(2u));
2426
2427 /* Use first_icp_handle as the base offset. There is one DWord
2428 * of URB handles per vertex, so inform the register allocator that
2429 * we might read up to ceil(nir->info.gs.vertices_in / 8) registers.
2430 */
2431 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2432 retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
2433 fs_reg(icp_offset_bytes),
2434 brw_imm_ud(DIV_ROUND_UP(nir->info.gs.vertices_in, 8) *
2435 REG_SIZE));
2436 }
2437 }
2438
2439 fs_inst *inst;
2440
2441 fs_reg tmp_dst = dst;
2442 fs_reg indirect_offset = get_nir_src(offset_src);
2443 unsigned num_iterations = 1;
2444 unsigned orig_num_components = num_components;
2445
2446 if (type_sz(dst.type) == 8) {
2447 if (num_components > 2) {
2448 num_iterations = 2;
2449 num_components = 2;
2450 }
2451 fs_reg tmp = fs_reg(VGRF, alloc.allocate(4), dst.type);
2452 tmp_dst = tmp;
2453 first_component = first_component / 2;
2454 }
2455
2456 for (unsigned iter = 0; iter < num_iterations; iter++) {
2457 if (nir_src_is_const(offset_src)) {
2458 /* Constant indexing - use global offset. */
2459 if (first_component != 0) {
2460 unsigned read_components = num_components + first_component;
2461 fs_reg tmp = bld.vgrf(dst.type, read_components);
2462 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp, icp_handle);
2463 inst->size_written = read_components *
2464 tmp.component_size(inst->exec_size);
2465 for (unsigned i = 0; i < num_components; i++) {
2466 bld.MOV(offset(tmp_dst, bld, i),
2467 offset(tmp, bld, i + first_component));
2468 }
2469 } else {
2470 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp_dst,
2471 icp_handle);
2472 inst->size_written = num_components *
2473 tmp_dst.component_size(inst->exec_size);
2474 }
2475 inst->offset = base_offset + nir_src_as_uint(offset_src);
2476 inst->mlen = 1;
2477 } else {
2478 /* Indirect indexing - use per-slot offsets as well. */
2479 const fs_reg srcs[] = { icp_handle, indirect_offset };
2480 unsigned read_components = num_components + first_component;
2481 fs_reg tmp = bld.vgrf(dst.type, read_components);
2482 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2483 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2484 if (first_component != 0) {
2485 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
2486 payload);
2487 inst->size_written = read_components *
2488 tmp.component_size(inst->exec_size);
2489 for (unsigned i = 0; i < num_components; i++) {
2490 bld.MOV(offset(tmp_dst, bld, i),
2491 offset(tmp, bld, i + first_component));
2492 }
2493 } else {
2494 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp_dst,
2495 payload);
2496 inst->size_written = num_components *
2497 tmp_dst.component_size(inst->exec_size);
2498 }
2499 inst->offset = base_offset;
2500 inst->mlen = 2;
2501 }
2502
2503 if (type_sz(dst.type) == 8) {
2504 shuffle_from_32bit_read(bld,
2505 offset(dst, bld, iter * 2),
2506 retype(tmp_dst, BRW_REGISTER_TYPE_D),
2507 0,
2508 num_components);
2509 }
2510
2511 if (num_iterations > 1) {
2512 num_components = orig_num_components - 2;
2513 if(nir_src_is_const(offset_src)) {
2514 base_offset++;
2515 } else {
2516 fs_reg new_indirect = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2517 bld.ADD(new_indirect, indirect_offset, brw_imm_ud(1u));
2518 indirect_offset = new_indirect;
2519 }
2520 }
2521 }
2522 }
2523
2524 fs_reg
2525 fs_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
2526 {
2527 nir_src *offset_src = nir_get_io_offset_src(instr);
2528
2529 if (nir_src_is_const(*offset_src)) {
2530 /* The only constant offset we should find is 0. brw_nir.c's
2531 * add_const_offset_to_base() will fold other constant offsets
2532 * into instr->const_index[0].
2533 */
2534 assert(nir_src_as_uint(*offset_src) == 0);
2535 return fs_reg();
2536 }
2537
2538 return get_nir_src(*offset_src);
2539 }
2540
2541 void
2542 fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld,
2543 nir_intrinsic_instr *instr)
2544 {
2545 assert(stage == MESA_SHADER_VERTEX);
2546
2547 fs_reg dest;
2548 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2549 dest = get_nir_dest(instr->dest);
2550
2551 switch (instr->intrinsic) {
2552 case nir_intrinsic_load_vertex_id:
2553 case nir_intrinsic_load_base_vertex:
2554 unreachable("should be lowered by nir_lower_system_values()");
2555
2556 case nir_intrinsic_load_input: {
2557 fs_reg src = fs_reg(ATTR, nir_intrinsic_base(instr) * 4, dest.type);
2558 unsigned first_component = nir_intrinsic_component(instr);
2559 unsigned num_components = instr->num_components;
2560
2561 src = offset(src, bld, nir_src_as_uint(instr->src[0]));
2562
2563 if (type_sz(dest.type) == 8)
2564 first_component /= 2;
2565
2566 /* For 16-bit support maybe a temporary will be needed to copy from
2567 * the ATTR file.
2568 */
2569 shuffle_from_32bit_read(bld, dest, retype(src, BRW_REGISTER_TYPE_D),
2570 first_component, num_components);
2571 break;
2572 }
2573
2574 case nir_intrinsic_load_vertex_id_zero_base:
2575 case nir_intrinsic_load_instance_id:
2576 case nir_intrinsic_load_base_instance:
2577 case nir_intrinsic_load_draw_id:
2578 case nir_intrinsic_load_first_vertex:
2579 case nir_intrinsic_load_is_indexed_draw:
2580 unreachable("lowered by brw_nir_lower_vs_inputs");
2581
2582 default:
2583 nir_emit_intrinsic(bld, instr);
2584 break;
2585 }
2586 }
2587
2588 fs_reg
2589 fs_visitor::get_tcs_single_patch_icp_handle(const fs_builder &bld,
2590 nir_intrinsic_instr *instr)
2591 {
2592 struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
2593 const nir_src &vertex_src = instr->src[0];
2594 nir_intrinsic_instr *vertex_intrin = nir_src_as_intrinsic(vertex_src);
2595 fs_reg icp_handle;
2596
2597 if (nir_src_is_const(vertex_src)) {
2598 /* Emit a MOV to resolve <0,1,0> regioning. */
2599 icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2600 unsigned vertex = nir_src_as_uint(vertex_src);
2601 bld.MOV(icp_handle,
2602 retype(brw_vec1_grf(1 + (vertex >> 3), vertex & 7),
2603 BRW_REGISTER_TYPE_UD));
2604 } else if (tcs_prog_data->instances == 1 && vertex_intrin &&
2605 vertex_intrin->intrinsic == nir_intrinsic_load_invocation_id) {
2606 /* For the common case of only 1 instance, an array index of
2607 * gl_InvocationID means reading g1. Skip all the indirect work.
2608 */
2609 icp_handle = retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD);
2610 } else {
2611 /* The vertex index is non-constant. We need to use indirect
2612 * addressing to fetch the proper URB handle.
2613 */
2614 icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2615
2616 /* Each ICP handle is a single DWord (4 bytes) */
2617 fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2618 bld.SHL(vertex_offset_bytes,
2619 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2620 brw_imm_ud(2u));
2621
2622 /* Start at g1. We might read up to 4 registers. */
2623 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2624 retype(brw_vec8_grf(1, 0), icp_handle.type), vertex_offset_bytes,
2625 brw_imm_ud(4 * REG_SIZE));
2626 }
2627
2628 return icp_handle;
2629 }
2630
2631 fs_reg
2632 fs_visitor::get_tcs_eight_patch_icp_handle(const fs_builder &bld,
2633 nir_intrinsic_instr *instr)
2634 {
2635 struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) key;
2636 struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
2637 const nir_src &vertex_src = instr->src[0];
2638
2639 unsigned first_icp_handle = tcs_prog_data->include_primitive_id ? 3 : 2;
2640
2641 if (nir_src_is_const(vertex_src)) {
2642 return fs_reg(retype(brw_vec8_grf(first_icp_handle +
2643 nir_src_as_uint(vertex_src), 0),
2644 BRW_REGISTER_TYPE_UD));
2645 }
2646
2647 /* The vertex index is non-constant. We need to use indirect
2648 * addressing to fetch the proper URB handle.
2649 *
2650 * First, we start with the sequence <7, 6, 5, 4, 3, 2, 1, 0>
2651 * indicating that channel <n> should read the handle from
2652 * DWord <n>. We convert that to bytes by multiplying by 4.
2653 *
2654 * Next, we convert the vertex index to bytes by multiplying
2655 * by 32 (shifting by 5), and add the two together. This is
2656 * the final indirect byte offset.
2657 */
2658 fs_reg icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2659 fs_reg sequence = bld.vgrf(BRW_REGISTER_TYPE_UW, 1);
2660 fs_reg channel_offsets = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2661 fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2662 fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2663
2664 /* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */
2665 bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210)));
2666 /* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */
2667 bld.SHL(channel_offsets, sequence, brw_imm_ud(2u));
2668 /* Convert vertex_index to bytes (multiply by 32) */
2669 bld.SHL(vertex_offset_bytes,
2670 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2671 brw_imm_ud(5u));
2672 bld.ADD(icp_offset_bytes, vertex_offset_bytes, channel_offsets);
2673
2674 /* Use first_icp_handle as the base offset. There is one register
2675 * of URB handles per vertex, so inform the register allocator that
2676 * we might read up to nir->info.gs.vertices_in registers.
2677 */
2678 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2679 retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
2680 icp_offset_bytes, brw_imm_ud(tcs_key->input_vertices * REG_SIZE));
2681
2682 return icp_handle;
2683 }
2684
2685 struct brw_reg
2686 fs_visitor::get_tcs_output_urb_handle()
2687 {
2688 struct brw_vue_prog_data *vue_prog_data = brw_vue_prog_data(prog_data);
2689
2690 if (vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_SINGLE_PATCH) {
2691 return retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD);
2692 } else {
2693 assert(vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH);
2694 return retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD);
2695 }
2696 }
2697
2698 void
2699 fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld,
2700 nir_intrinsic_instr *instr)
2701 {
2702 assert(stage == MESA_SHADER_TESS_CTRL);
2703 struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) key;
2704 struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
2705 struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
2706
2707 bool eight_patch =
2708 vue_prog_data->dispatch_mode == DISPATCH_MODE_TCS_8_PATCH;
2709
2710 fs_reg dst;
2711 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2712 dst = get_nir_dest(instr->dest);
2713
2714 switch (instr->intrinsic) {
2715 case nir_intrinsic_load_primitive_id:
2716 bld.MOV(dst, fs_reg(eight_patch ? brw_vec8_grf(2, 0)
2717 : brw_vec1_grf(0, 1)));
2718 break;
2719 case nir_intrinsic_load_invocation_id:
2720 bld.MOV(retype(dst, invocation_id.type), invocation_id);
2721 break;
2722 case nir_intrinsic_load_patch_vertices_in:
2723 bld.MOV(retype(dst, BRW_REGISTER_TYPE_D),
2724 brw_imm_d(tcs_key->input_vertices));
2725 break;
2726
2727 case nir_intrinsic_barrier: {
2728 if (tcs_prog_data->instances == 1)
2729 break;
2730
2731 fs_reg m0 = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2732 fs_reg m0_2 = component(m0, 2);
2733
2734 const fs_builder chanbld = bld.exec_all().group(1, 0);
2735
2736 /* Zero the message header */
2737 bld.exec_all().MOV(m0, brw_imm_ud(0u));
2738
2739 if (devinfo->gen < 11) {
2740 /* Copy "Barrier ID" from r0.2, bits 16:13 */
2741 chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
2742 brw_imm_ud(INTEL_MASK(16, 13)));
2743
2744 /* Shift it up to bits 27:24. */
2745 chanbld.SHL(m0_2, m0_2, brw_imm_ud(11));
2746 } else {
2747 chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
2748 brw_imm_ud(INTEL_MASK(30, 24)));
2749 }
2750
2751 /* Set the Barrier Count and the enable bit */
2752 if (devinfo->gen < 11) {
2753 chanbld.OR(m0_2, m0_2,
2754 brw_imm_ud(tcs_prog_data->instances << 9 | (1 << 15)));
2755 } else {
2756 chanbld.OR(m0_2, m0_2,
2757 brw_imm_ud(tcs_prog_data->instances << 8 | (1 << 15)));
2758 }
2759
2760 bld.emit(SHADER_OPCODE_BARRIER, bld.null_reg_ud(), m0);
2761 break;
2762 }
2763
2764 case nir_intrinsic_load_input:
2765 unreachable("nir_lower_io should never give us these.");
2766 break;
2767
2768 case nir_intrinsic_load_per_vertex_input: {
2769 fs_reg indirect_offset = get_indirect_offset(instr);
2770 unsigned imm_offset = instr->const_index[0];
2771 fs_inst *inst;
2772
2773 fs_reg icp_handle =
2774 eight_patch ? get_tcs_eight_patch_icp_handle(bld, instr)
2775 : get_tcs_single_patch_icp_handle(bld, instr);
2776
2777 /* We can only read two double components with each URB read, so
2778 * we send two read messages in that case, each one loading up to
2779 * two double components.
2780 */
2781 unsigned num_iterations = 1;
2782 unsigned num_components = instr->num_components;
2783 unsigned first_component = nir_intrinsic_component(instr);
2784 fs_reg orig_dst = dst;
2785 if (type_sz(dst.type) == 8) {
2786 first_component = first_component / 2;
2787 if (instr->num_components > 2) {
2788 num_iterations = 2;
2789 num_components = 2;
2790 }
2791
2792 fs_reg tmp = fs_reg(VGRF, alloc.allocate(4), dst.type);
2793 dst = tmp;
2794 }
2795
2796 for (unsigned iter = 0; iter < num_iterations; iter++) {
2797 if (indirect_offset.file == BAD_FILE) {
2798 /* Constant indexing - use global offset. */
2799 if (first_component != 0) {
2800 unsigned read_components = num_components + first_component;
2801 fs_reg tmp = bld.vgrf(dst.type, read_components);
2802 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp, icp_handle);
2803 for (unsigned i = 0; i < num_components; i++) {
2804 bld.MOV(offset(dst, bld, i),
2805 offset(tmp, bld, i + first_component));
2806 }
2807 } else {
2808 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
2809 }
2810 inst->offset = imm_offset;
2811 inst->mlen = 1;
2812 } else {
2813 /* Indirect indexing - use per-slot offsets as well. */
2814 const fs_reg srcs[] = { icp_handle, indirect_offset };
2815 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2816 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2817 if (first_component != 0) {
2818 unsigned read_components = num_components + first_component;
2819 fs_reg tmp = bld.vgrf(dst.type, read_components);
2820 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
2821 payload);
2822 for (unsigned i = 0; i < num_components; i++) {
2823 bld.MOV(offset(dst, bld, i),
2824 offset(tmp, bld, i + first_component));
2825 }
2826 } else {
2827 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst,
2828 payload);
2829 }
2830 inst->offset = imm_offset;
2831 inst->mlen = 2;
2832 }
2833 inst->size_written = (num_components + first_component) *
2834 inst->dst.component_size(inst->exec_size);
2835
2836 /* If we are reading 64-bit data using 32-bit read messages we need
2837 * build proper 64-bit data elements by shuffling the low and high
2838 * 32-bit components around like we do for other things like UBOs
2839 * or SSBOs.
2840 */
2841 if (type_sz(dst.type) == 8) {
2842 shuffle_from_32bit_read(bld,
2843 offset(orig_dst, bld, iter * 2),
2844 retype(dst, BRW_REGISTER_TYPE_D),
2845 0, num_components);
2846 }
2847
2848 /* Copy the temporary to the destination to deal with writemasking.
2849 *
2850 * Also attempt to deal with gl_PointSize being in the .w component.
2851 */
2852 if (inst->offset == 0 && indirect_offset.file == BAD_FILE) {
2853 assert(type_sz(dst.type) < 8);
2854 inst->dst = bld.vgrf(dst.type, 4);
2855 inst->size_written = 4 * REG_SIZE;
2856 bld.MOV(dst, offset(inst->dst, bld, 3));
2857 }
2858
2859 /* If we are loading double data and we need a second read message
2860 * adjust the write offset
2861 */
2862 if (num_iterations > 1) {
2863 num_components = instr->num_components - 2;
2864 imm_offset++;
2865 }
2866 }
2867 break;
2868 }
2869
2870 case nir_intrinsic_load_output:
2871 case nir_intrinsic_load_per_vertex_output: {
2872 fs_reg indirect_offset = get_indirect_offset(instr);
2873 unsigned imm_offset = instr->const_index[0];
2874 unsigned first_component = nir_intrinsic_component(instr);
2875
2876 struct brw_reg output_handles = get_tcs_output_urb_handle();
2877
2878 fs_inst *inst;
2879 if (indirect_offset.file == BAD_FILE) {
2880 /* This MOV replicates the output handle to all enabled channels
2881 * is SINGLE_PATCH mode.
2882 */
2883 fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2884 bld.MOV(patch_handle, output_handles);
2885
2886 {
2887 if (first_component != 0) {
2888 unsigned read_components =
2889 instr->num_components + first_component;
2890 fs_reg tmp = bld.vgrf(dst.type, read_components);
2891 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp,
2892 patch_handle);
2893 inst->size_written = read_components * REG_SIZE;
2894 for (unsigned i = 0; i < instr->num_components; i++) {
2895 bld.MOV(offset(dst, bld, i),
2896 offset(tmp, bld, i + first_component));
2897 }
2898 } else {
2899 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst,
2900 patch_handle);
2901 inst->size_written = instr->num_components * REG_SIZE;
2902 }
2903 inst->offset = imm_offset;
2904 inst->mlen = 1;
2905 }
2906 } else {
2907 /* Indirect indexing - use per-slot offsets as well. */
2908 const fs_reg srcs[] = { output_handles, indirect_offset };
2909 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2910 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2911 if (first_component != 0) {
2912 unsigned read_components =
2913 instr->num_components + first_component;
2914 fs_reg tmp = bld.vgrf(dst.type, read_components);
2915 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
2916 payload);
2917 inst->size_written = read_components * REG_SIZE;
2918 for (unsigned i = 0; i < instr->num_components; i++) {
2919 bld.MOV(offset(dst, bld, i),
2920 offset(tmp, bld, i + first_component));
2921 }
2922 } else {
2923 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst,
2924 payload);
2925 inst->size_written = instr->num_components * REG_SIZE;
2926 }
2927 inst->offset = imm_offset;
2928 inst->mlen = 2;
2929 }
2930 break;
2931 }
2932
2933 case nir_intrinsic_store_output:
2934 case nir_intrinsic_store_per_vertex_output: {
2935 fs_reg value = get_nir_src(instr->src[0]);
2936 bool is_64bit = (instr->src[0].is_ssa ?
2937 instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size) == 64;
2938 fs_reg indirect_offset = get_indirect_offset(instr);
2939 unsigned imm_offset = instr->const_index[0];
2940 unsigned mask = instr->const_index[1];
2941 unsigned header_regs = 0;
2942 struct brw_reg output_handles = get_tcs_output_urb_handle();
2943
2944 fs_reg srcs[7];
2945 srcs[header_regs++] = output_handles;
2946
2947 if (indirect_offset.file != BAD_FILE) {
2948 srcs[header_regs++] = indirect_offset;
2949 }
2950
2951 if (mask == 0)
2952 break;
2953
2954 unsigned num_components = util_last_bit(mask);
2955 enum opcode opcode;
2956
2957 /* We can only pack two 64-bit components in a single message, so send
2958 * 2 messages if we have more components
2959 */
2960 unsigned num_iterations = 1;
2961 unsigned iter_components = num_components;
2962 unsigned first_component = nir_intrinsic_component(instr);
2963 if (is_64bit) {
2964 first_component = first_component / 2;
2965 if (instr->num_components > 2) {
2966 num_iterations = 2;
2967 iter_components = 2;
2968 }
2969 }
2970
2971 mask = mask << first_component;
2972
2973 for (unsigned iter = 0; iter < num_iterations; iter++) {
2974 if (!is_64bit && mask != WRITEMASK_XYZW) {
2975 srcs[header_regs++] = brw_imm_ud(mask << 16);
2976 opcode = indirect_offset.file != BAD_FILE ?
2977 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
2978 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
2979 } else if (is_64bit && ((mask & WRITEMASK_XY) != WRITEMASK_XY)) {
2980 /* Expand the 64-bit mask to 32-bit channels. We only handle
2981 * two channels in each iteration, so we only care about X/Y.
2982 */
2983 unsigned mask32 = 0;
2984 if (mask & WRITEMASK_X)
2985 mask32 |= WRITEMASK_XY;
2986 if (mask & WRITEMASK_Y)
2987 mask32 |= WRITEMASK_ZW;
2988
2989 /* If the mask does not include any of the channels X or Y there
2990 * is nothing to do in this iteration. Move on to the next couple
2991 * of 64-bit channels.
2992 */
2993 if (!mask32) {
2994 mask >>= 2;
2995 imm_offset++;
2996 continue;
2997 }
2998
2999 srcs[header_regs++] = brw_imm_ud(mask32 << 16);
3000 opcode = indirect_offset.file != BAD_FILE ?
3001 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
3002 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
3003 } else {
3004 opcode = indirect_offset.file != BAD_FILE ?
3005 SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT :
3006 SHADER_OPCODE_URB_WRITE_SIMD8;
3007 }
3008
3009 for (unsigned i = 0; i < iter_components; i++) {
3010 if (!(mask & (1 << (i + first_component))))
3011 continue;
3012
3013 if (!is_64bit) {
3014 srcs[header_regs + i + first_component] = offset(value, bld, i);
3015 } else {
3016 /* We need to shuffle the 64-bit data to match the layout
3017 * expected by our 32-bit URB write messages. We use a temporary
3018 * for that.
3019 */
3020 unsigned channel = iter * 2 + i;
3021 fs_reg dest = shuffle_for_32bit_write(bld, value, channel, 1);
3022
3023 srcs[header_regs + (i + first_component) * 2] = dest;
3024 srcs[header_regs + (i + first_component) * 2 + 1] =
3025 offset(dest, bld, 1);
3026 }
3027 }
3028
3029 unsigned mlen =
3030 header_regs + (is_64bit ? 2 * iter_components : iter_components) +
3031 (is_64bit ? 2 * first_component : first_component);
3032 fs_reg payload =
3033 bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
3034 bld.LOAD_PAYLOAD(payload, srcs, mlen, header_regs);
3035
3036 fs_inst *inst = bld.emit(opcode, bld.null_reg_ud(), payload);
3037 inst->offset = imm_offset;
3038 inst->mlen = mlen;
3039
3040 /* If this is a 64-bit attribute, select the next two 64-bit channels
3041 * to be handled in the next iteration.
3042 */
3043 if (is_64bit) {
3044 mask >>= 2;
3045 imm_offset++;
3046 }
3047 }
3048 break;
3049 }
3050
3051 default:
3052 nir_emit_intrinsic(bld, instr);
3053 break;
3054 }
3055 }
3056
3057 void
3058 fs_visitor::nir_emit_tes_intrinsic(const fs_builder &bld,
3059 nir_intrinsic_instr *instr)
3060 {
3061 assert(stage == MESA_SHADER_TESS_EVAL);
3062 struct brw_tes_prog_data *tes_prog_data = brw_tes_prog_data(prog_data);
3063
3064 fs_reg dest;
3065 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3066 dest = get_nir_dest(instr->dest);
3067
3068 switch (instr->intrinsic) {
3069 case nir_intrinsic_load_primitive_id:
3070 bld.MOV(dest, fs_reg(brw_vec1_grf(0, 1)));
3071 break;
3072 case nir_intrinsic_load_tess_coord:
3073 /* gl_TessCoord is part of the payload in g1-3 */
3074 for (unsigned i = 0; i < 3; i++) {
3075 bld.MOV(offset(dest, bld, i), fs_reg(brw_vec8_grf(1 + i, 0)));
3076 }
3077 break;
3078
3079 case nir_intrinsic_load_input:
3080 case nir_intrinsic_load_per_vertex_input: {
3081 fs_reg indirect_offset = get_indirect_offset(instr);
3082 unsigned imm_offset = instr->const_index[0];
3083 unsigned first_component = nir_intrinsic_component(instr);
3084
3085 if (type_sz(dest.type) == 8) {
3086 first_component = first_component / 2;
3087 }
3088
3089 fs_inst *inst;
3090 if (indirect_offset.file == BAD_FILE) {
3091 /* Arbitrarily only push up to 32 vec4 slots worth of data,
3092 * which is 16 registers (since each holds 2 vec4 slots).
3093 */
3094 unsigned slot_count = 1;
3095 if (type_sz(dest.type) == 8 && instr->num_components > 2)
3096 slot_count++;
3097
3098 const unsigned max_push_slots = 32;
3099 if (imm_offset + slot_count <= max_push_slots) {
3100 fs_reg src = fs_reg(ATTR, imm_offset / 2, dest.type);
3101 for (int i = 0; i < instr->num_components; i++) {
3102 unsigned comp = 16 / type_sz(dest.type) * (imm_offset % 2) +
3103 i + first_component;
3104 bld.MOV(offset(dest, bld, i), component(src, comp));
3105 }
3106
3107 tes_prog_data->base.urb_read_length =
3108 MAX2(tes_prog_data->base.urb_read_length,
3109 DIV_ROUND_UP(imm_offset + slot_count, 2));
3110 } else {
3111 /* Replicate the patch handle to all enabled channels */
3112 const fs_reg srcs[] = {
3113 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)
3114 };
3115 fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
3116 bld.LOAD_PAYLOAD(patch_handle, srcs, ARRAY_SIZE(srcs), 0);
3117
3118 if (first_component != 0) {
3119 unsigned read_components =
3120 instr->num_components + first_component;
3121 fs_reg tmp = bld.vgrf(dest.type, read_components);
3122 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp,
3123 patch_handle);
3124 inst->size_written = read_components * REG_SIZE;
3125 for (unsigned i = 0; i < instr->num_components; i++) {
3126 bld.MOV(offset(dest, bld, i),
3127 offset(tmp, bld, i + first_component));
3128 }
3129 } else {
3130 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dest,
3131 patch_handle);
3132 inst->size_written = instr->num_components * REG_SIZE;
3133 }
3134 inst->mlen = 1;
3135 inst->offset = imm_offset;
3136 }
3137 } else {
3138 /* Indirect indexing - use per-slot offsets as well. */
3139
3140 /* We can only read two double components with each URB read, so
3141 * we send two read messages in that case, each one loading up to
3142 * two double components.
3143 */
3144 unsigned num_iterations = 1;
3145 unsigned num_components = instr->num_components;
3146 fs_reg orig_dest = dest;
3147 if (type_sz(dest.type) == 8) {
3148 if (instr->num_components > 2) {
3149 num_iterations = 2;
3150 num_components = 2;
3151 }
3152 fs_reg tmp = fs_reg(VGRF, alloc.allocate(4), dest.type);
3153 dest = tmp;
3154 }
3155
3156 for (unsigned iter = 0; iter < num_iterations; iter++) {
3157 const fs_reg srcs[] = {
3158 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
3159 indirect_offset
3160 };
3161 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
3162 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
3163
3164 if (first_component != 0) {
3165 unsigned read_components =
3166 num_components + first_component;
3167 fs_reg tmp = bld.vgrf(dest.type, read_components);
3168 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
3169 payload);
3170 for (unsigned i = 0; i < num_components; i++) {
3171 bld.MOV(offset(dest, bld, i),
3172 offset(tmp, bld, i + first_component));
3173 }
3174 } else {
3175 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dest,
3176 payload);
3177 }
3178 inst->mlen = 2;
3179 inst->offset = imm_offset;
3180 inst->size_written = (num_components + first_component) *
3181 inst->dst.component_size(inst->exec_size);
3182
3183 /* If we are reading 64-bit data using 32-bit read messages we need
3184 * build proper 64-bit data elements by shuffling the low and high
3185 * 32-bit components around like we do for other things like UBOs
3186 * or SSBOs.
3187 */
3188 if (type_sz(dest.type) == 8) {
3189 shuffle_from_32bit_read(bld,
3190 offset(orig_dest, bld, iter * 2),
3191 retype(dest, BRW_REGISTER_TYPE_D),
3192 0, num_components);
3193 }
3194
3195 /* If we are loading double data and we need a second read message
3196 * adjust the offset
3197 */
3198 if (num_iterations > 1) {
3199 num_components = instr->num_components - 2;
3200 imm_offset++;
3201 }
3202 }
3203 }
3204 break;
3205 }
3206 default:
3207 nir_emit_intrinsic(bld, instr);
3208 break;
3209 }
3210 }
3211
3212 void
3213 fs_visitor::nir_emit_gs_intrinsic(const fs_builder &bld,
3214 nir_intrinsic_instr *instr)
3215 {
3216 assert(stage == MESA_SHADER_GEOMETRY);
3217 fs_reg indirect_offset;
3218
3219 fs_reg dest;
3220 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3221 dest = get_nir_dest(instr->dest);
3222
3223 switch (instr->intrinsic) {
3224 case nir_intrinsic_load_primitive_id:
3225 assert(stage == MESA_SHADER_GEOMETRY);
3226 assert(brw_gs_prog_data(prog_data)->include_primitive_id);
3227 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
3228 retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD));
3229 break;
3230
3231 case nir_intrinsic_load_input:
3232 unreachable("load_input intrinsics are invalid for the GS stage");
3233
3234 case nir_intrinsic_load_per_vertex_input:
3235 emit_gs_input_load(dest, instr->src[0], instr->const_index[0],
3236 instr->src[1], instr->num_components,
3237 nir_intrinsic_component(instr));
3238 break;
3239
3240 case nir_intrinsic_emit_vertex_with_counter:
3241 emit_gs_vertex(instr->src[0], instr->const_index[0]);
3242 break;
3243
3244 case nir_intrinsic_end_primitive_with_counter:
3245 emit_gs_end_primitive(instr->src[0]);
3246 break;
3247
3248 case nir_intrinsic_set_vertex_count:
3249 bld.MOV(this->final_gs_vertex_count, get_nir_src(instr->src[0]));
3250 break;
3251
3252 case nir_intrinsic_load_invocation_id: {
3253 fs_reg val = nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
3254 assert(val.file != BAD_FILE);
3255 dest.type = val.type;
3256 bld.MOV(dest, val);
3257 break;
3258 }
3259
3260 default:
3261 nir_emit_intrinsic(bld, instr);
3262 break;
3263 }
3264 }
3265
3266 /**
3267 * Fetch the current render target layer index.
3268 */
3269 static fs_reg
3270 fetch_render_target_array_index(const fs_builder &bld)
3271 {
3272 if (bld.shader->devinfo->gen >= 6) {
3273 /* The render target array index is provided in the thread payload as
3274 * bits 26:16 of r0.0.
3275 */
3276 const fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_UD);
3277 bld.AND(idx, brw_uw1_reg(BRW_GENERAL_REGISTER_FILE, 0, 1),
3278 brw_imm_uw(0x7ff));
3279 return idx;
3280 } else {
3281 /* Pre-SNB we only ever render into the first layer of the framebuffer
3282 * since layered rendering is not implemented.
3283 */
3284 return brw_imm_ud(0);
3285 }
3286 }
3287
3288 /**
3289 * Fake non-coherent framebuffer read implemented using TXF to fetch from the
3290 * framebuffer at the current fragment coordinates and sample index.
3291 */
3292 fs_inst *
3293 fs_visitor::emit_non_coherent_fb_read(const fs_builder &bld, const fs_reg &dst,
3294 unsigned target)
3295 {
3296 const struct gen_device_info *devinfo = bld.shader->devinfo;
3297
3298 assert(bld.shader->stage == MESA_SHADER_FRAGMENT);
3299 const brw_wm_prog_key *wm_key =
3300 reinterpret_cast<const brw_wm_prog_key *>(key);
3301 assert(!wm_key->coherent_fb_fetch);
3302 const struct brw_wm_prog_data *wm_prog_data =
3303 brw_wm_prog_data(stage_prog_data);
3304
3305 /* Calculate the surface index relative to the start of the texture binding
3306 * table block, since that's what the texturing messages expect.
3307 */
3308 const unsigned surface = target +
3309 wm_prog_data->binding_table.render_target_read_start -
3310 wm_prog_data->base.binding_table.texture_start;
3311
3312 /* Calculate the fragment coordinates. */
3313 const fs_reg coords = bld.vgrf(BRW_REGISTER_TYPE_UD, 3);
3314 bld.MOV(offset(coords, bld, 0), pixel_x);
3315 bld.MOV(offset(coords, bld, 1), pixel_y);
3316 bld.MOV(offset(coords, bld, 2), fetch_render_target_array_index(bld));
3317
3318 /* Calculate the sample index and MCS payload when multisampling. Luckily
3319 * the MCS fetch message behaves deterministically for UMS surfaces, so it
3320 * shouldn't be necessary to recompile based on whether the framebuffer is
3321 * CMS or UMS.
3322 */
3323 if (wm_key->multisample_fbo &&
3324 nir_system_values[SYSTEM_VALUE_SAMPLE_ID].file == BAD_FILE)
3325 nir_system_values[SYSTEM_VALUE_SAMPLE_ID] = *emit_sampleid_setup();
3326
3327 const fs_reg sample = nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
3328 const fs_reg mcs = wm_key->multisample_fbo ?
3329 emit_mcs_fetch(coords, 3, brw_imm_ud(surface), fs_reg()) : fs_reg();
3330
3331 /* Use either a normal or a CMS texel fetch message depending on whether
3332 * the framebuffer is single or multisample. On SKL+ use the wide CMS
3333 * message just in case the framebuffer uses 16x multisampling, it should
3334 * be equivalent to the normal CMS fetch for lower multisampling modes.
3335 */
3336 const opcode op = !wm_key->multisample_fbo ? SHADER_OPCODE_TXF_LOGICAL :
3337 devinfo->gen >= 9 ? SHADER_OPCODE_TXF_CMS_W_LOGICAL :
3338 SHADER_OPCODE_TXF_CMS_LOGICAL;
3339
3340 /* Emit the instruction. */
3341 fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
3342 srcs[TEX_LOGICAL_SRC_COORDINATE] = coords;
3343 srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_ud(0);
3344 srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX] = sample;
3345 srcs[TEX_LOGICAL_SRC_MCS] = mcs;
3346 srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(surface);
3347 srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_ud(0);
3348 srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_ud(3);
3349 srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_ud(0);
3350
3351 fs_inst *inst = bld.emit(op, dst, srcs, ARRAY_SIZE(srcs));
3352 inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
3353
3354 return inst;
3355 }
3356
3357 /**
3358 * Actual coherent framebuffer read implemented using the native render target
3359 * read message. Requires SKL+.
3360 */
3361 static fs_inst *
3362 emit_coherent_fb_read(const fs_builder &bld, const fs_reg &dst, unsigned target)
3363 {
3364 assert(bld.shader->devinfo->gen >= 9);
3365 fs_inst *inst = bld.emit(FS_OPCODE_FB_READ_LOGICAL, dst);
3366 inst->target = target;
3367 inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
3368
3369 return inst;
3370 }
3371
3372 static fs_reg
3373 alloc_temporary(const fs_builder &bld, unsigned size, fs_reg *regs, unsigned n)
3374 {
3375 if (n && regs[0].file != BAD_FILE) {
3376 return regs[0];
3377
3378 } else {
3379 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, size);
3380
3381 for (unsigned i = 0; i < n; i++)
3382 regs[i] = tmp;
3383
3384 return tmp;
3385 }
3386 }
3387
3388 static fs_reg
3389 alloc_frag_output(fs_visitor *v, unsigned location)
3390 {
3391 assert(v->stage == MESA_SHADER_FRAGMENT);
3392 const brw_wm_prog_key *const key =
3393 reinterpret_cast<const brw_wm_prog_key *>(v->key);
3394 const unsigned l = GET_FIELD(location, BRW_NIR_FRAG_OUTPUT_LOCATION);
3395 const unsigned i = GET_FIELD(location, BRW_NIR_FRAG_OUTPUT_INDEX);
3396
3397 if (i > 0 || (key->force_dual_color_blend && l == FRAG_RESULT_DATA1))
3398 return alloc_temporary(v->bld, 4, &v->dual_src_output, 1);
3399
3400 else if (l == FRAG_RESULT_COLOR)
3401 return alloc_temporary(v->bld, 4, v->outputs,
3402 MAX2(key->nr_color_regions, 1));
3403
3404 else if (l == FRAG_RESULT_DEPTH)
3405 return alloc_temporary(v->bld, 1, &v->frag_depth, 1);
3406
3407 else if (l == FRAG_RESULT_STENCIL)
3408 return alloc_temporary(v->bld, 1, &v->frag_stencil, 1);
3409
3410 else if (l == FRAG_RESULT_SAMPLE_MASK)
3411 return alloc_temporary(v->bld, 1, &v->sample_mask, 1);
3412
3413 else if (l >= FRAG_RESULT_DATA0 &&
3414 l < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS)
3415 return alloc_temporary(v->bld, 4,
3416 &v->outputs[l - FRAG_RESULT_DATA0], 1);
3417
3418 else
3419 unreachable("Invalid location");
3420 }
3421
3422 void
3423 fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
3424 nir_intrinsic_instr *instr)
3425 {
3426 assert(stage == MESA_SHADER_FRAGMENT);
3427
3428 fs_reg dest;
3429 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3430 dest = get_nir_dest(instr->dest);
3431
3432 switch (instr->intrinsic) {
3433 case nir_intrinsic_load_front_face:
3434 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
3435 *emit_frontfacing_interpolation());
3436 break;
3437
3438 case nir_intrinsic_load_sample_pos: {
3439 fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
3440 assert(sample_pos.file != BAD_FILE);
3441 dest.type = sample_pos.type;
3442 bld.MOV(dest, sample_pos);
3443 bld.MOV(offset(dest, bld, 1), offset(sample_pos, bld, 1));
3444 break;
3445 }
3446
3447 case nir_intrinsic_load_layer_id:
3448 dest.type = BRW_REGISTER_TYPE_UD;
3449 bld.MOV(dest, fetch_render_target_array_index(bld));
3450 break;
3451
3452 case nir_intrinsic_load_helper_invocation:
3453 case nir_intrinsic_load_sample_mask_in:
3454 case nir_intrinsic_load_sample_id: {
3455 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
3456 fs_reg val = nir_system_values[sv];
3457 assert(val.file != BAD_FILE);
3458 dest.type = val.type;
3459 bld.MOV(dest, val);
3460 break;
3461 }
3462
3463 case nir_intrinsic_store_output: {
3464 const fs_reg src = get_nir_src(instr->src[0]);
3465 const unsigned store_offset = nir_src_as_uint(instr->src[1]);
3466 const unsigned location = nir_intrinsic_base(instr) +
3467 SET_FIELD(store_offset, BRW_NIR_FRAG_OUTPUT_LOCATION);
3468 const fs_reg new_dest = retype(alloc_frag_output(this, location),
3469 src.type);
3470
3471 for (unsigned j = 0; j < instr->num_components; j++)
3472 bld.MOV(offset(new_dest, bld, nir_intrinsic_component(instr) + j),
3473 offset(src, bld, j));
3474
3475 break;
3476 }
3477
3478 case nir_intrinsic_load_output: {
3479 const unsigned l = GET_FIELD(nir_intrinsic_base(instr),
3480 BRW_NIR_FRAG_OUTPUT_LOCATION);
3481 assert(l >= FRAG_RESULT_DATA0);
3482 const unsigned load_offset = nir_src_as_uint(instr->src[0]);
3483 const unsigned target = l - FRAG_RESULT_DATA0 + load_offset;
3484 const fs_reg tmp = bld.vgrf(dest.type, 4);
3485
3486 if (reinterpret_cast<const brw_wm_prog_key *>(key)->coherent_fb_fetch)
3487 emit_coherent_fb_read(bld, tmp, target);
3488 else
3489 emit_non_coherent_fb_read(bld, tmp, target);
3490
3491 for (unsigned j = 0; j < instr->num_components; j++) {
3492 bld.MOV(offset(dest, bld, j),
3493 offset(tmp, bld, nir_intrinsic_component(instr) + j));
3494 }
3495
3496 break;
3497 }
3498
3499 case nir_intrinsic_discard:
3500 case nir_intrinsic_discard_if: {
3501 /* We track our discarded pixels in f0.1. By predicating on it, we can
3502 * update just the flag bits that aren't yet discarded. If there's no
3503 * condition, we emit a CMP of g0 != g0, so all currently executing
3504 * channels will get turned off.
3505 */
3506 fs_inst *cmp;
3507 if (instr->intrinsic == nir_intrinsic_discard_if) {
3508 cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
3509 brw_imm_d(0), BRW_CONDITIONAL_Z);
3510 } else {
3511 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
3512 BRW_REGISTER_TYPE_UW));
3513 cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
3514 }
3515 cmp->predicate = BRW_PREDICATE_NORMAL;
3516 cmp->flag_subreg = 1;
3517
3518 if (devinfo->gen >= 6) {
3519 emit_discard_jump();
3520 }
3521
3522 limit_dispatch_width(16, "Fragment discard not implemented in SIMD32 mode.");
3523 break;
3524 }
3525
3526 case nir_intrinsic_load_input: {
3527 /* load_input is only used for flat inputs */
3528 unsigned base = nir_intrinsic_base(instr);
3529 unsigned comp = nir_intrinsic_component(instr);
3530 unsigned num_components = instr->num_components;
3531 fs_reg orig_dest = dest;
3532 enum brw_reg_type type = dest.type;
3533
3534 /* Special case fields in the VUE header */
3535 if (base == VARYING_SLOT_LAYER)
3536 comp = 1;
3537 else if (base == VARYING_SLOT_VIEWPORT)
3538 comp = 2;
3539
3540 if (nir_dest_bit_size(instr->dest) == 64) {
3541 /* const_index is in 32-bit type size units that could not be aligned
3542 * with DF. We need to read the double vector as if it was a float
3543 * vector of twice the number of components to fetch the right data.
3544 */
3545 type = BRW_REGISTER_TYPE_F;
3546 num_components *= 2;
3547 dest = bld.vgrf(type, num_components);
3548 }
3549
3550 for (unsigned int i = 0; i < num_components; i++) {
3551 bld.MOV(offset(retype(dest, type), bld, i),
3552 retype(component(interp_reg(base, comp + i), 3), type));
3553 }
3554
3555 if (nir_dest_bit_size(instr->dest) == 64) {
3556 shuffle_from_32bit_read(bld, orig_dest, dest, 0,
3557 instr->num_components);
3558 }
3559 break;
3560 }
3561
3562 case nir_intrinsic_load_barycentric_pixel:
3563 case nir_intrinsic_load_barycentric_centroid:
3564 case nir_intrinsic_load_barycentric_sample:
3565 /* Do nothing - load_interpolated_input handling will handle it later. */
3566 break;
3567
3568 case nir_intrinsic_load_barycentric_at_sample: {
3569 const glsl_interp_mode interpolation =
3570 (enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
3571
3572 if (nir_src_is_const(instr->src[0])) {
3573 unsigned msg_data = nir_src_as_uint(instr->src[0]) << 4;
3574
3575 emit_pixel_interpolater_send(bld,
3576 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3577 dest,
3578 fs_reg(), /* src */
3579 brw_imm_ud(msg_data),
3580 interpolation);
3581 } else {
3582 const fs_reg sample_src = retype(get_nir_src(instr->src[0]),
3583 BRW_REGISTER_TYPE_UD);
3584
3585 if (nir_src_is_dynamically_uniform(instr->src[0])) {
3586 const fs_reg sample_id = bld.emit_uniformize(sample_src);
3587 const fs_reg msg_data = vgrf(glsl_type::uint_type);
3588 bld.exec_all().group(1, 0)
3589 .SHL(msg_data, sample_id, brw_imm_ud(4u));
3590 emit_pixel_interpolater_send(bld,
3591 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3592 dest,
3593 fs_reg(), /* src */
3594 msg_data,
3595 interpolation);
3596 } else {
3597 /* Make a loop that sends a message to the pixel interpolater
3598 * for the sample number in each live channel. If there are
3599 * multiple channels with the same sample number then these
3600 * will be handled simultaneously with a single interation of
3601 * the loop.
3602 */
3603 bld.emit(BRW_OPCODE_DO);
3604
3605 /* Get the next live sample number into sample_id_reg */
3606 const fs_reg sample_id = bld.emit_uniformize(sample_src);
3607
3608 /* Set the flag register so that we can perform the send
3609 * message on all channels that have the same sample number
3610 */
3611 bld.CMP(bld.null_reg_ud(),
3612 sample_src, sample_id,
3613 BRW_CONDITIONAL_EQ);
3614 const fs_reg msg_data = vgrf(glsl_type::uint_type);
3615 bld.exec_all().group(1, 0)
3616 .SHL(msg_data, sample_id, brw_imm_ud(4u));
3617 fs_inst *inst =
3618 emit_pixel_interpolater_send(bld,
3619 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3620 dest,
3621 fs_reg(), /* src */
3622 component(msg_data, 0),
3623 interpolation);
3624 set_predicate(BRW_PREDICATE_NORMAL, inst);
3625
3626 /* Continue the loop if there are any live channels left */
3627 set_predicate_inv(BRW_PREDICATE_NORMAL,
3628 true, /* inverse */
3629 bld.emit(BRW_OPCODE_WHILE));
3630 }
3631 }
3632 break;
3633 }
3634
3635 case nir_intrinsic_load_barycentric_at_offset: {
3636 const glsl_interp_mode interpolation =
3637 (enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
3638
3639 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3640
3641 if (const_offset) {
3642 assert(nir_src_bit_size(instr->src[0]) == 32);
3643 unsigned off_x = MIN2((int)(const_offset[0].f32 * 16), 7) & 0xf;
3644 unsigned off_y = MIN2((int)(const_offset[1].f32 * 16), 7) & 0xf;
3645
3646 emit_pixel_interpolater_send(bld,
3647 FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
3648 dest,
3649 fs_reg(), /* src */
3650 brw_imm_ud(off_x | (off_y << 4)),
3651 interpolation);
3652 } else {
3653 fs_reg src = vgrf(glsl_type::ivec2_type);
3654 fs_reg offset_src = retype(get_nir_src(instr->src[0]),
3655 BRW_REGISTER_TYPE_F);
3656 for (int i = 0; i < 2; i++) {
3657 fs_reg temp = vgrf(glsl_type::float_type);
3658 bld.MUL(temp, offset(offset_src, bld, i), brw_imm_f(16.0f));
3659 fs_reg itemp = vgrf(glsl_type::int_type);
3660 /* float to int */
3661 bld.MOV(itemp, temp);
3662
3663 /* Clamp the upper end of the range to +7/16.
3664 * ARB_gpu_shader5 requires that we support a maximum offset
3665 * of +0.5, which isn't representable in a S0.4 value -- if
3666 * we didn't clamp it, we'd end up with -8/16, which is the
3667 * opposite of what the shader author wanted.
3668 *
3669 * This is legal due to ARB_gpu_shader5's quantization
3670 * rules:
3671 *
3672 * "Not all values of <offset> may be supported; x and y
3673 * offsets may be rounded to fixed-point values with the
3674 * number of fraction bits given by the
3675 * implementation-dependent constant
3676 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
3677 */
3678 set_condmod(BRW_CONDITIONAL_L,
3679 bld.SEL(offset(src, bld, i), itemp, brw_imm_d(7)));
3680 }
3681
3682 const enum opcode opcode = FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET;
3683 emit_pixel_interpolater_send(bld,
3684 opcode,
3685 dest,
3686 src,
3687 brw_imm_ud(0u),
3688 interpolation);
3689 }
3690 break;
3691 }
3692
3693 case nir_intrinsic_load_interpolated_input: {
3694 if (nir_intrinsic_base(instr) == VARYING_SLOT_POS) {
3695 emit_fragcoord_interpolation(dest);
3696 break;
3697 }
3698
3699 assert(instr->src[0].ssa &&
3700 instr->src[0].ssa->parent_instr->type == nir_instr_type_intrinsic);
3701 nir_intrinsic_instr *bary_intrinsic =
3702 nir_instr_as_intrinsic(instr->src[0].ssa->parent_instr);
3703 nir_intrinsic_op bary_intrin = bary_intrinsic->intrinsic;
3704 enum glsl_interp_mode interp_mode =
3705 (enum glsl_interp_mode) nir_intrinsic_interp_mode(bary_intrinsic);
3706 fs_reg dst_xy;
3707
3708 if (bary_intrin == nir_intrinsic_load_barycentric_at_offset ||
3709 bary_intrin == nir_intrinsic_load_barycentric_at_sample) {
3710 /* Use the result of the PI message */
3711 dst_xy = retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_F);
3712 } else {
3713 /* Use the delta_xy values computed from the payload */
3714 enum brw_barycentric_mode bary =
3715 brw_barycentric_mode(interp_mode, bary_intrin);
3716
3717 dst_xy = this->delta_xy[bary];
3718 }
3719
3720 for (unsigned int i = 0; i < instr->num_components; i++) {
3721 fs_reg interp =
3722 interp_reg(nir_intrinsic_base(instr),
3723 nir_intrinsic_component(instr) + i);
3724 interp.type = BRW_REGISTER_TYPE_F;
3725 dest.type = BRW_REGISTER_TYPE_F;
3726
3727 if (devinfo->gen < 6 && interp_mode == INTERP_MODE_SMOOTH) {
3728 fs_reg tmp = vgrf(glsl_type::float_type);
3729 bld.emit(FS_OPCODE_LINTERP, tmp, dst_xy, interp);
3730 bld.MUL(offset(dest, bld, i), tmp, this->pixel_w);
3731 } else {
3732 bld.emit(FS_OPCODE_LINTERP, offset(dest, bld, i), dst_xy, interp);
3733 }
3734 }
3735 break;
3736 }
3737
3738 default:
3739 nir_emit_intrinsic(bld, instr);
3740 break;
3741 }
3742 }
3743
3744 static int
3745 get_op_for_atomic_add(nir_intrinsic_instr *instr, unsigned src)
3746 {
3747 if (nir_src_is_const(instr->src[src])) {
3748 int64_t add_val = nir_src_as_int(instr->src[src]);
3749 if (add_val == 1)
3750 return BRW_AOP_INC;
3751 else if (add_val == -1)
3752 return BRW_AOP_DEC;
3753 }
3754
3755 return BRW_AOP_ADD;
3756 }
3757
3758 void
3759 fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
3760 nir_intrinsic_instr *instr)
3761 {
3762 assert(stage == MESA_SHADER_COMPUTE);
3763 struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(prog_data);
3764
3765 fs_reg dest;
3766 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3767 dest = get_nir_dest(instr->dest);
3768
3769 switch (instr->intrinsic) {
3770 case nir_intrinsic_barrier:
3771 emit_barrier();
3772 cs_prog_data->uses_barrier = true;
3773 break;
3774
3775 case nir_intrinsic_load_subgroup_id:
3776 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD), subgroup_id);
3777 break;
3778
3779 case nir_intrinsic_load_local_invocation_id:
3780 case nir_intrinsic_load_work_group_id: {
3781 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
3782 fs_reg val = nir_system_values[sv];
3783 assert(val.file != BAD_FILE);
3784 dest.type = val.type;
3785 for (unsigned i = 0; i < 3; i++)
3786 bld.MOV(offset(dest, bld, i), offset(val, bld, i));
3787 break;
3788 }
3789
3790 case nir_intrinsic_load_num_work_groups: {
3791 const unsigned surface =
3792 cs_prog_data->binding_table.work_groups_start;
3793
3794 cs_prog_data->uses_num_work_groups = true;
3795
3796 fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
3797 srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(surface);
3798 srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
3799 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(1); /* num components */
3800
3801 /* Read the 3 GLuint components of gl_NumWorkGroups */
3802 for (unsigned i = 0; i < 3; i++) {
3803 srcs[SURFACE_LOGICAL_SRC_ADDRESS] = brw_imm_ud(i << 2);
3804 bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
3805 offset(dest, bld, i), srcs, SURFACE_LOGICAL_NUM_SRCS);
3806 }
3807 break;
3808 }
3809
3810 case nir_intrinsic_shared_atomic_add:
3811 nir_emit_shared_atomic(bld, get_op_for_atomic_add(instr, 1), instr);
3812 break;
3813 case nir_intrinsic_shared_atomic_imin:
3814 nir_emit_shared_atomic(bld, BRW_AOP_IMIN, instr);
3815 break;
3816 case nir_intrinsic_shared_atomic_umin:
3817 nir_emit_shared_atomic(bld, BRW_AOP_UMIN, instr);
3818 break;
3819 case nir_intrinsic_shared_atomic_imax:
3820 nir_emit_shared_atomic(bld, BRW_AOP_IMAX, instr);
3821 break;
3822 case nir_intrinsic_shared_atomic_umax:
3823 nir_emit_shared_atomic(bld, BRW_AOP_UMAX, instr);
3824 break;
3825 case nir_intrinsic_shared_atomic_and:
3826 nir_emit_shared_atomic(bld, BRW_AOP_AND, instr);
3827 break;
3828 case nir_intrinsic_shared_atomic_or:
3829 nir_emit_shared_atomic(bld, BRW_AOP_OR, instr);
3830 break;
3831 case nir_intrinsic_shared_atomic_xor:
3832 nir_emit_shared_atomic(bld, BRW_AOP_XOR, instr);
3833 break;
3834 case nir_intrinsic_shared_atomic_exchange:
3835 nir_emit_shared_atomic(bld, BRW_AOP_MOV, instr);
3836 break;
3837 case nir_intrinsic_shared_atomic_comp_swap:
3838 nir_emit_shared_atomic(bld, BRW_AOP_CMPWR, instr);
3839 break;
3840 case nir_intrinsic_shared_atomic_fmin:
3841 nir_emit_shared_atomic_float(bld, BRW_AOP_FMIN, instr);
3842 break;
3843 case nir_intrinsic_shared_atomic_fmax:
3844 nir_emit_shared_atomic_float(bld, BRW_AOP_FMAX, instr);
3845 break;
3846 case nir_intrinsic_shared_atomic_fcomp_swap:
3847 nir_emit_shared_atomic_float(bld, BRW_AOP_FCMPWR, instr);
3848 break;
3849
3850 case nir_intrinsic_load_shared: {
3851 assert(devinfo->gen >= 7);
3852 assert(stage == MESA_SHADER_COMPUTE);
3853
3854 const unsigned bit_size = nir_dest_bit_size(instr->dest);
3855 fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
3856 srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM);
3857 srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[0]);
3858 srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
3859
3860 /* Make dest unsigned because that's what the temporary will be */
3861 dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
3862
3863 /* Read the vector */
3864 if (nir_intrinsic_align(instr) >= 4) {
3865 assert(nir_dest_bit_size(instr->dest) == 32);
3866 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
3867 fs_inst *inst =
3868 bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
3869 dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
3870 inst->size_written = instr->num_components * dispatch_width * 4;
3871 } else {
3872 assert(nir_dest_bit_size(instr->dest) <= 32);
3873 assert(nir_dest_num_components(instr->dest) == 1);
3874 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
3875
3876 fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD);
3877 bld.emit(SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL,
3878 read_result, srcs, SURFACE_LOGICAL_NUM_SRCS);
3879 bld.MOV(dest, read_result);
3880 }
3881 break;
3882 }
3883
3884 case nir_intrinsic_store_shared: {
3885 assert(devinfo->gen >= 7);
3886 assert(stage == MESA_SHADER_COMPUTE);
3887
3888 const unsigned bit_size = nir_src_bit_size(instr->src[0]);
3889 fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
3890 srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM);
3891 srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
3892 srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
3893
3894 fs_reg data = get_nir_src(instr->src[0]);
3895 data.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
3896
3897 assert(nir_intrinsic_write_mask(instr) ==
3898 (1u << instr->num_components) - 1);
3899 if (nir_intrinsic_align(instr) >= 4) {
3900 assert(nir_src_bit_size(instr->src[0]) == 32);
3901 assert(nir_src_num_components(instr->src[0]) <= 4);
3902 srcs[SURFACE_LOGICAL_SRC_DATA] = data;
3903 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
3904 bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
3905 fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
3906 } else {
3907 assert(nir_src_bit_size(instr->src[0]) <= 32);
3908 assert(nir_src_num_components(instr->src[0]) == 1);
3909 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
3910
3911 srcs[SURFACE_LOGICAL_SRC_DATA] = bld.vgrf(BRW_REGISTER_TYPE_UD);
3912 bld.MOV(srcs[SURFACE_LOGICAL_SRC_DATA], data);
3913
3914 bld.emit(SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL,
3915 fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
3916 }
3917 break;
3918 }
3919
3920 default:
3921 nir_emit_intrinsic(bld, instr);
3922 break;
3923 }
3924 }
3925
3926 static fs_reg
3927 brw_nir_reduction_op_identity(const fs_builder &bld,
3928 nir_op op, brw_reg_type type)
3929 {
3930 nir_const_value value = nir_alu_binop_identity(op, type_sz(type) * 8);
3931 switch (type_sz(type)) {
3932 case 2:
3933 assert(type != BRW_REGISTER_TYPE_HF);
3934 return retype(brw_imm_uw(value.u16), type);
3935 case 4:
3936 return retype(brw_imm_ud(value.u32), type);
3937 case 8:
3938 if (type == BRW_REGISTER_TYPE_DF)
3939 return setup_imm_df(bld, value.f64);
3940 else
3941 return retype(brw_imm_u64(value.u64), type);
3942 default:
3943 unreachable("Invalid type size");
3944 }
3945 }
3946
3947 static opcode
3948 brw_op_for_nir_reduction_op(nir_op op)
3949 {
3950 switch (op) {
3951 case nir_op_iadd: return BRW_OPCODE_ADD;
3952 case nir_op_fadd: return BRW_OPCODE_ADD;
3953 case nir_op_imul: return BRW_OPCODE_MUL;
3954 case nir_op_fmul: return BRW_OPCODE_MUL;
3955 case nir_op_imin: return BRW_OPCODE_SEL;
3956 case nir_op_umin: return BRW_OPCODE_SEL;
3957 case nir_op_fmin: return BRW_OPCODE_SEL;
3958 case nir_op_imax: return BRW_OPCODE_SEL;
3959 case nir_op_umax: return BRW_OPCODE_SEL;
3960 case nir_op_fmax: return BRW_OPCODE_SEL;
3961 case nir_op_iand: return BRW_OPCODE_AND;
3962 case nir_op_ior: return BRW_OPCODE_OR;
3963 case nir_op_ixor: return BRW_OPCODE_XOR;
3964 default:
3965 unreachable("Invalid reduction operation");
3966 }
3967 }
3968
3969 static brw_conditional_mod
3970 brw_cond_mod_for_nir_reduction_op(nir_op op)
3971 {
3972 switch (op) {
3973 case nir_op_iadd: return BRW_CONDITIONAL_NONE;
3974 case nir_op_fadd: return BRW_CONDITIONAL_NONE;
3975 case nir_op_imul: return BRW_CONDITIONAL_NONE;
3976 case nir_op_fmul: return BRW_CONDITIONAL_NONE;
3977 case nir_op_imin: return BRW_CONDITIONAL_L;
3978 case nir_op_umin: return BRW_CONDITIONAL_L;
3979 case nir_op_fmin: return BRW_CONDITIONAL_L;
3980 case nir_op_imax: return BRW_CONDITIONAL_GE;
3981 case nir_op_umax: return BRW_CONDITIONAL_GE;
3982 case nir_op_fmax: return BRW_CONDITIONAL_GE;
3983 case nir_op_iand: return BRW_CONDITIONAL_NONE;
3984 case nir_op_ior: return BRW_CONDITIONAL_NONE;
3985 case nir_op_ixor: return BRW_CONDITIONAL_NONE;
3986 default:
3987 unreachable("Invalid reduction operation");
3988 }
3989 }
3990
3991 fs_reg
3992 fs_visitor::get_nir_image_intrinsic_image(const brw::fs_builder &bld,
3993 nir_intrinsic_instr *instr)
3994 {
3995 fs_reg image = retype(get_nir_src_imm(instr->src[0]), BRW_REGISTER_TYPE_UD);
3996
3997 if (stage_prog_data->binding_table.image_start > 0) {
3998 if (image.file == BRW_IMMEDIATE_VALUE) {
3999 image.d += stage_prog_data->binding_table.image_start;
4000 } else {
4001 bld.ADD(image, image,
4002 brw_imm_d(stage_prog_data->binding_table.image_start));
4003 }
4004 }
4005
4006 return bld.emit_uniformize(image);
4007 }
4008
4009 fs_reg
4010 fs_visitor::get_nir_ssbo_intrinsic_index(const brw::fs_builder &bld,
4011 nir_intrinsic_instr *instr)
4012 {
4013 /* SSBO stores are weird in that their index is in src[1] */
4014 const unsigned src = instr->intrinsic == nir_intrinsic_store_ssbo ? 1 : 0;
4015
4016 fs_reg surf_index;
4017 if (nir_src_is_const(instr->src[src])) {
4018 unsigned index = stage_prog_data->binding_table.ssbo_start +
4019 nir_src_as_uint(instr->src[src]);
4020 surf_index = brw_imm_ud(index);
4021 } else {
4022 surf_index = vgrf(glsl_type::uint_type);
4023 bld.ADD(surf_index, get_nir_src(instr->src[src]),
4024 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
4025 }
4026
4027 return bld.emit_uniformize(surf_index);
4028 }
4029
4030 static unsigned
4031 image_intrinsic_coord_components(nir_intrinsic_instr *instr)
4032 {
4033 switch (nir_intrinsic_image_dim(instr)) {
4034 case GLSL_SAMPLER_DIM_1D:
4035 return 1 + nir_intrinsic_image_array(instr);
4036 case GLSL_SAMPLER_DIM_2D:
4037 case GLSL_SAMPLER_DIM_RECT:
4038 return 2 + nir_intrinsic_image_array(instr);
4039 case GLSL_SAMPLER_DIM_3D:
4040 case GLSL_SAMPLER_DIM_CUBE:
4041 return 3;
4042 case GLSL_SAMPLER_DIM_BUF:
4043 return 1;
4044 case GLSL_SAMPLER_DIM_MS:
4045 return 2 + nir_intrinsic_image_array(instr);
4046 default:
4047 unreachable("Invalid image dimension");
4048 }
4049 }
4050
4051 void
4052 fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
4053 {
4054 fs_reg dest;
4055 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
4056 dest = get_nir_dest(instr->dest);
4057
4058 switch (instr->intrinsic) {
4059 case nir_intrinsic_image_load:
4060 case nir_intrinsic_image_store:
4061 case nir_intrinsic_image_atomic_add:
4062 case nir_intrinsic_image_atomic_min:
4063 case nir_intrinsic_image_atomic_max:
4064 case nir_intrinsic_image_atomic_and:
4065 case nir_intrinsic_image_atomic_or:
4066 case nir_intrinsic_image_atomic_xor:
4067 case nir_intrinsic_image_atomic_exchange:
4068 case nir_intrinsic_image_atomic_comp_swap:
4069 case nir_intrinsic_bindless_image_load:
4070 case nir_intrinsic_bindless_image_store:
4071 case nir_intrinsic_bindless_image_atomic_add:
4072 case nir_intrinsic_bindless_image_atomic_min:
4073 case nir_intrinsic_bindless_image_atomic_max:
4074 case nir_intrinsic_bindless_image_atomic_and:
4075 case nir_intrinsic_bindless_image_atomic_or:
4076 case nir_intrinsic_bindless_image_atomic_xor:
4077 case nir_intrinsic_bindless_image_atomic_exchange:
4078 case nir_intrinsic_bindless_image_atomic_comp_swap: {
4079 if (stage == MESA_SHADER_FRAGMENT &&
4080 instr->intrinsic != nir_intrinsic_image_load)
4081 brw_wm_prog_data(prog_data)->has_side_effects = true;
4082
4083 /* Get some metadata from the image intrinsic. */
4084 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
4085 const GLenum format = nir_intrinsic_format(instr);
4086
4087 fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
4088
4089 switch (instr->intrinsic) {
4090 case nir_intrinsic_image_load:
4091 case nir_intrinsic_image_store:
4092 case nir_intrinsic_image_atomic_add:
4093 case nir_intrinsic_image_atomic_min:
4094 case nir_intrinsic_image_atomic_max:
4095 case nir_intrinsic_image_atomic_and:
4096 case nir_intrinsic_image_atomic_or:
4097 case nir_intrinsic_image_atomic_xor:
4098 case nir_intrinsic_image_atomic_exchange:
4099 case nir_intrinsic_image_atomic_comp_swap:
4100 srcs[SURFACE_LOGICAL_SRC_SURFACE] =
4101 get_nir_image_intrinsic_image(bld, instr);
4102 break;
4103
4104 default:
4105 /* Bindless */
4106 srcs[SURFACE_LOGICAL_SRC_SURFACE_HANDLE] =
4107 bld.emit_uniformize(get_nir_src(instr->src[0]));
4108 break;
4109 }
4110
4111 srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
4112 srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] =
4113 brw_imm_ud(image_intrinsic_coord_components(instr));
4114
4115 /* Emit an image load, store or atomic op. */
4116 if (instr->intrinsic == nir_intrinsic_image_load ||
4117 instr->intrinsic == nir_intrinsic_bindless_image_load) {
4118 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
4119 fs_inst *inst =
4120 bld.emit(SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL,
4121 dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
4122 inst->size_written = instr->num_components * dispatch_width * 4;
4123 } else if (instr->intrinsic == nir_intrinsic_image_store ||
4124 instr->intrinsic == nir_intrinsic_bindless_image_store) {
4125 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
4126 srcs[SURFACE_LOGICAL_SRC_DATA] = get_nir_src(instr->src[3]);
4127 bld.emit(SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL,
4128 fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
4129 } else {
4130 int op;
4131 unsigned num_srcs = info->num_srcs;
4132
4133 switch (instr->intrinsic) {
4134 case nir_intrinsic_image_atomic_add:
4135 case nir_intrinsic_bindless_image_atomic_add:
4136 assert(num_srcs == 4);
4137
4138 op = get_op_for_atomic_add(instr, 3);
4139
4140 if (op != BRW_AOP_ADD)
4141 num_srcs = 3;
4142 break;
4143 case nir_intrinsic_image_atomic_min:
4144 case nir_intrinsic_bindless_image_atomic_min:
4145 assert(format == GL_R32UI || format == GL_R32I);
4146 op = (format == GL_R32I) ? BRW_AOP_IMIN : BRW_AOP_UMIN;
4147 break;
4148 case nir_intrinsic_image_atomic_max:
4149 case nir_intrinsic_bindless_image_atomic_max:
4150 assert(format == GL_R32UI || format == GL_R32I);
4151 op = (format == GL_R32I) ? BRW_AOP_IMAX : BRW_AOP_UMAX;
4152 break;
4153 case nir_intrinsic_image_atomic_and:
4154 case nir_intrinsic_bindless_image_atomic_and:
4155 op = BRW_AOP_AND;
4156 break;
4157 case nir_intrinsic_image_atomic_or:
4158 case nir_intrinsic_bindless_image_atomic_or:
4159 op = BRW_AOP_OR;
4160 break;
4161 case nir_intrinsic_image_atomic_xor:
4162 case nir_intrinsic_bindless_image_atomic_xor:
4163 op = BRW_AOP_XOR;
4164 break;
4165 case nir_intrinsic_image_atomic_exchange:
4166 case nir_intrinsic_bindless_image_atomic_exchange:
4167 op = BRW_AOP_MOV;
4168 break;
4169 case nir_intrinsic_image_atomic_comp_swap:
4170 case nir_intrinsic_bindless_image_atomic_comp_swap:
4171 op = BRW_AOP_CMPWR;
4172 break;
4173 default:
4174 unreachable("Not reachable.");
4175 }
4176
4177 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
4178
4179 fs_reg data;
4180 if (num_srcs >= 4)
4181 data = get_nir_src(instr->src[3]);
4182 if (num_srcs >= 5) {
4183 fs_reg tmp = bld.vgrf(data.type, 2);
4184 fs_reg sources[2] = { data, get_nir_src(instr->src[4]) };
4185 bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
4186 data = tmp;
4187 }
4188 srcs[SURFACE_LOGICAL_SRC_DATA] = data;
4189
4190 bld.emit(SHADER_OPCODE_TYPED_ATOMIC_LOGICAL,
4191 dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
4192 }
4193 break;
4194 }
4195
4196 case nir_intrinsic_image_size:
4197 case nir_intrinsic_bindless_image_size: {
4198 /* Unlike the [un]typed load and store opcodes, the TXS that this turns
4199 * into will handle the binding table index for us in the geneerator.
4200 * Incidentally, this means that we can handle bindless with exactly the
4201 * same code.
4202 */
4203 fs_reg image = retype(get_nir_src_imm(instr->src[0]),
4204 BRW_REGISTER_TYPE_UD);
4205 image = bld.emit_uniformize(image);
4206
4207 fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
4208 if (instr->intrinsic == nir_intrinsic_image_size)
4209 srcs[TEX_LOGICAL_SRC_SURFACE] = image;
4210 else
4211 srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE] = image;
4212 srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_d(0);
4213 srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(0);
4214 srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(0);
4215
4216 /* Since the image size is always uniform, we can just emit a SIMD8
4217 * query instruction and splat the result out.
4218 */
4219 const fs_builder ubld = bld.exec_all().group(8, 0);
4220
4221 fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 4);
4222 fs_inst *inst = ubld.emit(SHADER_OPCODE_IMAGE_SIZE_LOGICAL,
4223 tmp, srcs, ARRAY_SIZE(srcs));
4224 inst->size_written = 4 * REG_SIZE;
4225
4226 for (unsigned c = 0; c < instr->dest.ssa.num_components; ++c) {
4227 if (c == 2 && nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_CUBE) {
4228 bld.emit(SHADER_OPCODE_INT_QUOTIENT,
4229 offset(retype(dest, tmp.type), bld, c),
4230 component(offset(tmp, ubld, c), 0), brw_imm_ud(6));
4231 } else {
4232 bld.MOV(offset(retype(dest, tmp.type), bld, c),
4233 component(offset(tmp, ubld, c), 0));
4234 }
4235 }
4236 break;
4237 }
4238
4239 case nir_intrinsic_image_load_raw_intel: {
4240 fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
4241 srcs[SURFACE_LOGICAL_SRC_SURFACE] =
4242 get_nir_image_intrinsic_image(bld, instr);
4243 srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
4244 srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
4245 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
4246
4247 fs_inst *inst =
4248 bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
4249 dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
4250 inst->size_written = instr->num_components * dispatch_width * 4;
4251 break;
4252 }
4253
4254 case nir_intrinsic_image_store_raw_intel: {
4255 if (stage == MESA_SHADER_FRAGMENT)
4256 brw_wm_prog_data(prog_data)->has_side_effects = true;
4257
4258 fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
4259 srcs[SURFACE_LOGICAL_SRC_SURFACE] =
4260 get_nir_image_intrinsic_image(bld, instr);
4261 srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
4262 srcs[SURFACE_LOGICAL_SRC_DATA] = get_nir_src(instr->src[2]);
4263 srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
4264 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
4265
4266 bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
4267 fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
4268 break;
4269 }
4270
4271 case nir_intrinsic_group_memory_barrier:
4272 case nir_intrinsic_memory_barrier_shared:
4273 case nir_intrinsic_memory_barrier_atomic_counter:
4274 case nir_intrinsic_memory_barrier_buffer:
4275 case nir_intrinsic_memory_barrier_image:
4276 case nir_intrinsic_memory_barrier: {
4277 const fs_builder ubld = bld.group(8, 0);
4278 const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
4279 ubld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp,
4280 brw_vec8_grf(0, 0), brw_imm_ud(0))
4281 ->size_written = 2 * REG_SIZE;
4282 break;
4283 }
4284
4285 case nir_intrinsic_shader_clock: {
4286 /* We cannot do anything if there is an event, so ignore it for now */
4287 const fs_reg shader_clock = get_timestamp(bld);
4288 const fs_reg srcs[] = { component(shader_clock, 0),
4289 component(shader_clock, 1) };
4290 bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
4291 break;
4292 }
4293
4294 case nir_intrinsic_image_samples:
4295 /* The driver does not support multi-sampled images. */
4296 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(1));
4297 break;
4298
4299 case nir_intrinsic_load_uniform: {
4300 /* Offsets are in bytes but they should always aligned to
4301 * the type size
4302 */
4303 assert(instr->const_index[0] % 4 == 0 ||
4304 instr->const_index[0] % type_sz(dest.type) == 0);
4305
4306 fs_reg src(UNIFORM, instr->const_index[0] / 4, dest.type);
4307
4308 if (nir_src_is_const(instr->src[0])) {
4309 unsigned load_offset = nir_src_as_uint(instr->src[0]);
4310 assert(load_offset % type_sz(dest.type) == 0);
4311 /* For 16-bit types we add the module of the const_index[0]
4312 * offset to access to not 32-bit aligned element
4313 */
4314 src.offset = load_offset + instr->const_index[0] % 4;
4315
4316 for (unsigned j = 0; j < instr->num_components; j++) {
4317 bld.MOV(offset(dest, bld, j), offset(src, bld, j));
4318 }
4319 } else {
4320 fs_reg indirect = retype(get_nir_src(instr->src[0]),
4321 BRW_REGISTER_TYPE_UD);
4322
4323 /* We need to pass a size to the MOV_INDIRECT but we don't want it to
4324 * go past the end of the uniform. In order to keep the n'th
4325 * component from running past, we subtract off the size of all but
4326 * one component of the vector.
4327 */
4328 assert(instr->const_index[1] >=
4329 instr->num_components * (int) type_sz(dest.type));
4330 unsigned read_size = instr->const_index[1] -
4331 (instr->num_components - 1) * type_sz(dest.type);
4332
4333 bool supports_64bit_indirects =
4334 !devinfo->is_cherryview && !gen_device_info_is_9lp(devinfo);
4335
4336 if (type_sz(dest.type) != 8 || supports_64bit_indirects) {
4337 for (unsigned j = 0; j < instr->num_components; j++) {
4338 bld.emit(SHADER_OPCODE_MOV_INDIRECT,
4339 offset(dest, bld, j), offset(src, bld, j),
4340 indirect, brw_imm_ud(read_size));
4341 }
4342 } else {
4343 const unsigned num_mov_indirects =
4344 type_sz(dest.type) / type_sz(BRW_REGISTER_TYPE_UD);
4345 /* We read a little bit less per MOV INDIRECT, as they are now
4346 * 32-bits ones instead of 64-bit. Fix read_size then.
4347 */
4348 const unsigned read_size_32bit = read_size -
4349 (num_mov_indirects - 1) * type_sz(BRW_REGISTER_TYPE_UD);
4350 for (unsigned j = 0; j < instr->num_components; j++) {
4351 for (unsigned i = 0; i < num_mov_indirects; i++) {
4352 bld.emit(SHADER_OPCODE_MOV_INDIRECT,
4353 subscript(offset(dest, bld, j), BRW_REGISTER_TYPE_UD, i),
4354 subscript(offset(src, bld, j), BRW_REGISTER_TYPE_UD, i),
4355 indirect, brw_imm_ud(read_size_32bit));
4356 }
4357 }
4358 }
4359 }
4360 break;
4361 }
4362
4363 case nir_intrinsic_load_ubo: {
4364 fs_reg surf_index;
4365 if (nir_src_is_const(instr->src[0])) {
4366 const unsigned index = stage_prog_data->binding_table.ubo_start +
4367 nir_src_as_uint(instr->src[0]);
4368 surf_index = brw_imm_ud(index);
4369 } else {
4370 /* The block index is not a constant. Evaluate the index expression
4371 * per-channel and add the base UBO index; we have to select a value
4372 * from any live channel.
4373 */
4374 surf_index = vgrf(glsl_type::uint_type);
4375 bld.ADD(surf_index, get_nir_src(instr->src[0]),
4376 brw_imm_ud(stage_prog_data->binding_table.ubo_start));
4377 surf_index = bld.emit_uniformize(surf_index);
4378 }
4379
4380 if (!nir_src_is_const(instr->src[1])) {
4381 fs_reg base_offset = retype(get_nir_src(instr->src[1]),
4382 BRW_REGISTER_TYPE_UD);
4383
4384 for (int i = 0; i < instr->num_components; i++)
4385 VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
4386 base_offset, i * type_sz(dest.type));
4387 } else {
4388 /* Even if we are loading doubles, a pull constant load will load
4389 * a 32-bit vec4, so should only reserve vgrf space for that. If we
4390 * need to load a full dvec4 we will have to emit 2 loads. This is
4391 * similar to demote_pull_constants(), except that in that case we
4392 * see individual accesses to each component of the vector and then
4393 * we let CSE deal with duplicate loads. Here we see a vector access
4394 * and we have to split it if necessary.
4395 */
4396 const unsigned type_size = type_sz(dest.type);
4397 const unsigned load_offset = nir_src_as_uint(instr->src[1]);
4398
4399 /* See if we've selected this as a push constant candidate */
4400 if (nir_src_is_const(instr->src[0])) {
4401 const unsigned ubo_block = nir_src_as_uint(instr->src[0]);
4402 const unsigned offset_256b = load_offset / 32;
4403
4404 fs_reg push_reg;
4405 for (int i = 0; i < 4; i++) {
4406 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
4407 if (range->block == ubo_block &&
4408 offset_256b >= range->start &&
4409 offset_256b < range->start + range->length) {
4410
4411 push_reg = fs_reg(UNIFORM, UBO_START + i, dest.type);
4412 push_reg.offset = load_offset - 32 * range->start;
4413 break;
4414 }
4415 }
4416
4417 if (push_reg.file != BAD_FILE) {
4418 for (unsigned i = 0; i < instr->num_components; i++) {
4419 bld.MOV(offset(dest, bld, i),
4420 byte_offset(push_reg, i * type_size));
4421 }
4422 break;
4423 }
4424 }
4425
4426 const unsigned block_sz = 64; /* Fetch one cacheline at a time. */
4427 const fs_builder ubld = bld.exec_all().group(block_sz / 4, 0);
4428 const fs_reg packed_consts = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4429
4430 for (unsigned c = 0; c < instr->num_components;) {
4431 const unsigned base = load_offset + c * type_size;
4432 /* Number of usable components in the next block-aligned load. */
4433 const unsigned count = MIN2(instr->num_components - c,
4434 (block_sz - base % block_sz) / type_size);
4435
4436 ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
4437 packed_consts, surf_index,
4438 brw_imm_ud(base & ~(block_sz - 1)));
4439
4440 const fs_reg consts =
4441 retype(byte_offset(packed_consts, base & (block_sz - 1)),
4442 dest.type);
4443
4444 for (unsigned d = 0; d < count; d++)
4445 bld.MOV(offset(dest, bld, c + d), component(consts, d));
4446
4447 c += count;
4448 }
4449 }
4450 break;
4451 }
4452
4453 case nir_intrinsic_load_global: {
4454 assert(devinfo->gen >= 8);
4455
4456 if (nir_intrinsic_align(instr) >= 4) {
4457 assert(nir_dest_bit_size(instr->dest) == 32);
4458 fs_inst *inst = bld.emit(SHADER_OPCODE_A64_UNTYPED_READ_LOGICAL,
4459 dest,
4460 get_nir_src(instr->src[0]), /* Address */
4461 fs_reg(), /* No source data */
4462 brw_imm_ud(instr->num_components));
4463 inst->size_written = instr->num_components *
4464 inst->dst.component_size(inst->exec_size);
4465 } else {
4466 const unsigned bit_size = nir_dest_bit_size(instr->dest);
4467 assert(bit_size <= 32);
4468 assert(nir_dest_num_components(instr->dest) == 1);
4469 brw_reg_type data_type =
4470 brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
4471 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
4472 bld.emit(SHADER_OPCODE_A64_BYTE_SCATTERED_READ_LOGICAL,
4473 tmp,
4474 get_nir_src(instr->src[0]), /* Address */
4475 fs_reg(), /* No source data */
4476 brw_imm_ud(bit_size));
4477 bld.MOV(retype(dest, data_type), tmp);
4478 }
4479 break;
4480 }
4481
4482 case nir_intrinsic_store_global:
4483 assert(devinfo->gen >= 8);
4484
4485 if (stage == MESA_SHADER_FRAGMENT)
4486 brw_wm_prog_data(prog_data)->has_side_effects = true;
4487
4488 if (nir_intrinsic_align(instr) >= 4) {
4489 assert(nir_src_bit_size(instr->src[0]) == 32);
4490 bld.emit(SHADER_OPCODE_A64_UNTYPED_WRITE_LOGICAL,
4491 fs_reg(),
4492 get_nir_src(instr->src[1]), /* Address */
4493 get_nir_src(instr->src[0]), /* Data */
4494 brw_imm_ud(instr->num_components));
4495 } else {
4496 const unsigned bit_size = nir_src_bit_size(instr->src[0]);
4497 assert(bit_size <= 32);
4498 assert(nir_src_num_components(instr->src[0]) == 1);
4499 brw_reg_type data_type =
4500 brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
4501 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
4502 bld.MOV(tmp, retype(get_nir_src(instr->src[0]), data_type));
4503 bld.emit(SHADER_OPCODE_A64_BYTE_SCATTERED_WRITE_LOGICAL,
4504 fs_reg(),
4505 get_nir_src(instr->src[1]), /* Address */
4506 tmp, /* Data */
4507 brw_imm_ud(nir_src_bit_size(instr->src[0])));
4508 }
4509 break;
4510
4511 case nir_intrinsic_global_atomic_add:
4512 nir_emit_global_atomic(bld, get_op_for_atomic_add(instr, 1), instr);
4513 break;
4514 case nir_intrinsic_global_atomic_imin:
4515 nir_emit_global_atomic(bld, BRW_AOP_IMIN, instr);
4516 break;
4517 case nir_intrinsic_global_atomic_umin:
4518 nir_emit_global_atomic(bld, BRW_AOP_UMIN, instr);
4519 break;
4520 case nir_intrinsic_global_atomic_imax:
4521 nir_emit_global_atomic(bld, BRW_AOP_IMAX, instr);
4522 break;
4523 case nir_intrinsic_global_atomic_umax:
4524 nir_emit_global_atomic(bld, BRW_AOP_UMAX, instr);
4525 break;
4526 case nir_intrinsic_global_atomic_and:
4527 nir_emit_global_atomic(bld, BRW_AOP_AND, instr);
4528 break;
4529 case nir_intrinsic_global_atomic_or:
4530 nir_emit_global_atomic(bld, BRW_AOP_OR, instr);
4531 break;
4532 case nir_intrinsic_global_atomic_xor:
4533 nir_emit_global_atomic(bld, BRW_AOP_XOR, instr);
4534 break;
4535 case nir_intrinsic_global_atomic_exchange:
4536 nir_emit_global_atomic(bld, BRW_AOP_MOV, instr);
4537 break;
4538 case nir_intrinsic_global_atomic_comp_swap:
4539 nir_emit_global_atomic(bld, BRW_AOP_CMPWR, instr);
4540 break;
4541 case nir_intrinsic_global_atomic_fmin:
4542 nir_emit_global_atomic_float(bld, BRW_AOP_FMIN, instr);
4543 break;
4544 case nir_intrinsic_global_atomic_fmax:
4545 nir_emit_global_atomic_float(bld, BRW_AOP_FMAX, instr);
4546 break;
4547 case nir_intrinsic_global_atomic_fcomp_swap:
4548 nir_emit_global_atomic_float(bld, BRW_AOP_FCMPWR, instr);
4549 break;
4550
4551 case nir_intrinsic_load_ssbo: {
4552 assert(devinfo->gen >= 7);
4553
4554 const unsigned bit_size = nir_dest_bit_size(instr->dest);
4555 fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
4556 srcs[SURFACE_LOGICAL_SRC_SURFACE] =
4557 get_nir_ssbo_intrinsic_index(bld, instr);
4558 srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
4559 srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
4560
4561 /* Make dest unsigned because that's what the temporary will be */
4562 dest.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
4563
4564 /* Read the vector */
4565 if (nir_intrinsic_align(instr) >= 4) {
4566 assert(nir_dest_bit_size(instr->dest) == 32);
4567 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
4568 fs_inst *inst =
4569 bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL,
4570 dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
4571 inst->size_written = instr->num_components * dispatch_width * 4;
4572 } else {
4573 assert(nir_dest_bit_size(instr->dest) <= 32);
4574 assert(nir_dest_num_components(instr->dest) == 1);
4575 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
4576
4577 fs_reg read_result = bld.vgrf(BRW_REGISTER_TYPE_UD);
4578 bld.emit(SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL,
4579 read_result, srcs, SURFACE_LOGICAL_NUM_SRCS);
4580 bld.MOV(dest, read_result);
4581 }
4582 break;
4583 }
4584
4585 case nir_intrinsic_store_ssbo: {
4586 assert(devinfo->gen >= 7);
4587
4588 if (stage == MESA_SHADER_FRAGMENT)
4589 brw_wm_prog_data(prog_data)->has_side_effects = true;
4590
4591 const unsigned bit_size = nir_src_bit_size(instr->src[0]);
4592 fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
4593 srcs[SURFACE_LOGICAL_SRC_SURFACE] =
4594 get_nir_ssbo_intrinsic_index(bld, instr);
4595 srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[2]);
4596 srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
4597
4598 fs_reg data = get_nir_src(instr->src[0]);
4599 data.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_UD);
4600
4601 assert(nir_intrinsic_write_mask(instr) ==
4602 (1u << instr->num_components) - 1);
4603 if (nir_intrinsic_align(instr) >= 4) {
4604 assert(nir_src_bit_size(instr->src[0]) == 32);
4605 assert(nir_src_num_components(instr->src[0]) <= 4);
4606 srcs[SURFACE_LOGICAL_SRC_DATA] = data;
4607 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(instr->num_components);
4608 bld.emit(SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL,
4609 fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
4610 } else {
4611 assert(nir_src_bit_size(instr->src[0]) <= 32);
4612 assert(nir_src_num_components(instr->src[0]) == 1);
4613 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(bit_size);
4614
4615 srcs[SURFACE_LOGICAL_SRC_DATA] = bld.vgrf(BRW_REGISTER_TYPE_UD);
4616 bld.MOV(srcs[SURFACE_LOGICAL_SRC_DATA], data);
4617
4618 bld.emit(SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL,
4619 fs_reg(), srcs, SURFACE_LOGICAL_NUM_SRCS);
4620 }
4621 break;
4622 }
4623
4624 case nir_intrinsic_store_output: {
4625 fs_reg src = get_nir_src(instr->src[0]);
4626
4627 unsigned store_offset = nir_src_as_uint(instr->src[1]);
4628 unsigned num_components = instr->num_components;
4629 unsigned first_component = nir_intrinsic_component(instr);
4630 if (nir_src_bit_size(instr->src[0]) == 64) {
4631 src = shuffle_for_32bit_write(bld, src, 0, num_components);
4632 num_components *= 2;
4633 }
4634
4635 fs_reg new_dest = retype(offset(outputs[instr->const_index[0]], bld,
4636 4 * store_offset), src.type);
4637 for (unsigned j = 0; j < num_components; j++) {
4638 bld.MOV(offset(new_dest, bld, j + first_component),
4639 offset(src, bld, j));
4640 }
4641 break;
4642 }
4643
4644 case nir_intrinsic_ssbo_atomic_add:
4645 nir_emit_ssbo_atomic(bld, get_op_for_atomic_add(instr, 2), instr);
4646 break;
4647 case nir_intrinsic_ssbo_atomic_imin:
4648 nir_emit_ssbo_atomic(bld, BRW_AOP_IMIN, instr);
4649 break;
4650 case nir_intrinsic_ssbo_atomic_umin:
4651 nir_emit_ssbo_atomic(bld, BRW_AOP_UMIN, instr);
4652 break;
4653 case nir_intrinsic_ssbo_atomic_imax:
4654 nir_emit_ssbo_atomic(bld, BRW_AOP_IMAX, instr);
4655 break;
4656 case nir_intrinsic_ssbo_atomic_umax:
4657 nir_emit_ssbo_atomic(bld, BRW_AOP_UMAX, instr);
4658 break;
4659 case nir_intrinsic_ssbo_atomic_and:
4660 nir_emit_ssbo_atomic(bld, BRW_AOP_AND, instr);
4661 break;
4662 case nir_intrinsic_ssbo_atomic_or:
4663 nir_emit_ssbo_atomic(bld, BRW_AOP_OR, instr);
4664 break;
4665 case nir_intrinsic_ssbo_atomic_xor:
4666 nir_emit_ssbo_atomic(bld, BRW_AOP_XOR, instr);
4667 break;
4668 case nir_intrinsic_ssbo_atomic_exchange:
4669 nir_emit_ssbo_atomic(bld, BRW_AOP_MOV, instr);
4670 break;
4671 case nir_intrinsic_ssbo_atomic_comp_swap:
4672 nir_emit_ssbo_atomic(bld, BRW_AOP_CMPWR, instr);
4673 break;
4674 case nir_intrinsic_ssbo_atomic_fmin:
4675 nir_emit_ssbo_atomic_float(bld, BRW_AOP_FMIN, instr);
4676 break;
4677 case nir_intrinsic_ssbo_atomic_fmax:
4678 nir_emit_ssbo_atomic_float(bld, BRW_AOP_FMAX, instr);
4679 break;
4680 case nir_intrinsic_ssbo_atomic_fcomp_swap:
4681 nir_emit_ssbo_atomic_float(bld, BRW_AOP_FCMPWR, instr);
4682 break;
4683
4684 case nir_intrinsic_get_buffer_size: {
4685 assert(nir_src_num_components(instr->src[0]) == 1);
4686 unsigned ssbo_index = nir_src_is_const(instr->src[0]) ?
4687 nir_src_as_uint(instr->src[0]) : 0;
4688
4689 /* A resinfo's sampler message is used to get the buffer size. The
4690 * SIMD8's writeback message consists of four registers and SIMD16's
4691 * writeback message consists of 8 destination registers (two per each
4692 * component). Because we are only interested on the first channel of
4693 * the first returned component, where resinfo returns the buffer size
4694 * for SURFTYPE_BUFFER, we can just use the SIMD8 variant regardless of
4695 * the dispatch width.
4696 */
4697 const fs_builder ubld = bld.exec_all().group(8, 0);
4698 fs_reg src_payload = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4699 fs_reg ret_payload = ubld.vgrf(BRW_REGISTER_TYPE_UD, 4);
4700
4701 /* Set LOD = 0 */
4702 ubld.MOV(src_payload, brw_imm_d(0));
4703
4704 const unsigned index = prog_data->binding_table.ssbo_start + ssbo_index;
4705 fs_inst *inst = ubld.emit(SHADER_OPCODE_GET_BUFFER_SIZE, ret_payload,
4706 src_payload, brw_imm_ud(index));
4707 inst->header_size = 0;
4708 inst->mlen = 1;
4709 inst->size_written = 4 * REG_SIZE;
4710
4711 /* SKL PRM, vol07, 3D Media GPGPU Engine, Bounds Checking and Faulting:
4712 *
4713 * "Out-of-bounds checking is always performed at a DWord granularity. If
4714 * any part of the DWord is out-of-bounds then the whole DWord is
4715 * considered out-of-bounds."
4716 *
4717 * This implies that types with size smaller than 4-bytes need to be
4718 * padded if they don't complete the last dword of the buffer. But as we
4719 * need to maintain the original size we need to reverse the padding
4720 * calculation to return the correct size to know the number of elements
4721 * of an unsized array. As we stored in the last two bits of the surface
4722 * size the needed padding for the buffer, we calculate here the
4723 * original buffer_size reversing the surface_size calculation:
4724 *
4725 * surface_size = isl_align(buffer_size, 4) +
4726 * (isl_align(buffer_size) - buffer_size)
4727 *
4728 * buffer_size = surface_size & ~3 - surface_size & 3
4729 */
4730
4731 fs_reg size_aligned4 = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4732 fs_reg size_padding = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4733 fs_reg buffer_size = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4734
4735 ubld.AND(size_padding, ret_payload, brw_imm_ud(3));
4736 ubld.AND(size_aligned4, ret_payload, brw_imm_ud(~3));
4737 ubld.ADD(buffer_size, size_aligned4, negate(size_padding));
4738
4739 bld.MOV(retype(dest, ret_payload.type), component(buffer_size, 0));
4740 break;
4741 }
4742
4743 case nir_intrinsic_load_subgroup_invocation:
4744 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
4745 nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION]);
4746 break;
4747
4748 case nir_intrinsic_load_subgroup_eq_mask:
4749 case nir_intrinsic_load_subgroup_ge_mask:
4750 case nir_intrinsic_load_subgroup_gt_mask:
4751 case nir_intrinsic_load_subgroup_le_mask:
4752 case nir_intrinsic_load_subgroup_lt_mask:
4753 unreachable("not reached");
4754
4755 case nir_intrinsic_vote_any: {
4756 const fs_builder ubld = bld.exec_all().group(1, 0);
4757
4758 /* The any/all predicates do not consider channel enables. To prevent
4759 * dead channels from affecting the result, we initialize the flag with
4760 * with the identity value for the logical operation.
4761 */
4762 if (dispatch_width == 32) {
4763 /* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
4764 ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
4765 brw_imm_ud(0));
4766 } else {
4767 ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0));
4768 }
4769 bld.CMP(bld.null_reg_d(), get_nir_src(instr->src[0]), brw_imm_d(0), BRW_CONDITIONAL_NZ);
4770
4771 /* For some reason, the any/all predicates don't work properly with
4772 * SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
4773 * doesn't read the correct subset of the flag register and you end up
4774 * getting garbage in the second half. Work around this by using a pair
4775 * of 1-wide MOVs and scattering the result.
4776 */
4777 fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
4778 ubld.MOV(res1, brw_imm_d(0));
4779 set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ANY8H :
4780 dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ANY16H :
4781 BRW_PREDICATE_ALIGN1_ANY32H,
4782 ubld.MOV(res1, brw_imm_d(-1)));
4783
4784 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
4785 break;
4786 }
4787 case nir_intrinsic_vote_all: {
4788 const fs_builder ubld = bld.exec_all().group(1, 0);
4789
4790 /* The any/all predicates do not consider channel enables. To prevent
4791 * dead channels from affecting the result, we initialize the flag with
4792 * with the identity value for the logical operation.
4793 */
4794 if (dispatch_width == 32) {
4795 /* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
4796 ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
4797 brw_imm_ud(0xffffffff));
4798 } else {
4799 ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0xffff));
4800 }
4801 bld.CMP(bld.null_reg_d(), get_nir_src(instr->src[0]), brw_imm_d(0), BRW_CONDITIONAL_NZ);
4802
4803 /* For some reason, the any/all predicates don't work properly with
4804 * SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
4805 * doesn't read the correct subset of the flag register and you end up
4806 * getting garbage in the second half. Work around this by using a pair
4807 * of 1-wide MOVs and scattering the result.
4808 */
4809 fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
4810 ubld.MOV(res1, brw_imm_d(0));
4811 set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ALL8H :
4812 dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ALL16H :
4813 BRW_PREDICATE_ALIGN1_ALL32H,
4814 ubld.MOV(res1, brw_imm_d(-1)));
4815
4816 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
4817 break;
4818 }
4819 case nir_intrinsic_vote_feq:
4820 case nir_intrinsic_vote_ieq: {
4821 fs_reg value = get_nir_src(instr->src[0]);
4822 if (instr->intrinsic == nir_intrinsic_vote_feq) {
4823 const unsigned bit_size = nir_src_bit_size(instr->src[0]);
4824 value.type = bit_size == 8 ? BRW_REGISTER_TYPE_B :
4825 brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_F);
4826 }
4827
4828 fs_reg uniformized = bld.emit_uniformize(value);
4829 const fs_builder ubld = bld.exec_all().group(1, 0);
4830
4831 /* The any/all predicates do not consider channel enables. To prevent
4832 * dead channels from affecting the result, we initialize the flag with
4833 * with the identity value for the logical operation.
4834 */
4835 if (dispatch_width == 32) {
4836 /* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
4837 ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
4838 brw_imm_ud(0xffffffff));
4839 } else {
4840 ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0xffff));
4841 }
4842 bld.CMP(bld.null_reg_d(), value, uniformized, BRW_CONDITIONAL_Z);
4843
4844 /* For some reason, the any/all predicates don't work properly with
4845 * SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
4846 * doesn't read the correct subset of the flag register and you end up
4847 * getting garbage in the second half. Work around this by using a pair
4848 * of 1-wide MOVs and scattering the result.
4849 */
4850 fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
4851 ubld.MOV(res1, brw_imm_d(0));
4852 set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ALL8H :
4853 dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ALL16H :
4854 BRW_PREDICATE_ALIGN1_ALL32H,
4855 ubld.MOV(res1, brw_imm_d(-1)));
4856
4857 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
4858 break;
4859 }
4860
4861 case nir_intrinsic_ballot: {
4862 const fs_reg value = retype(get_nir_src(instr->src[0]),
4863 BRW_REGISTER_TYPE_UD);
4864 struct brw_reg flag = brw_flag_reg(0, 0);
4865 /* FIXME: For SIMD32 programs, this causes us to stomp on f0.1 as well
4866 * as f0.0. This is a problem for fragment programs as we currently use
4867 * f0.1 for discards. Fortunately, we don't support SIMD32 fragment
4868 * programs yet so this isn't a problem. When we do, something will
4869 * have to change.
4870 */
4871 if (dispatch_width == 32)
4872 flag.type = BRW_REGISTER_TYPE_UD;
4873
4874 bld.exec_all().group(1, 0).MOV(flag, brw_imm_ud(0u));
4875 bld.CMP(bld.null_reg_ud(), value, brw_imm_ud(0u), BRW_CONDITIONAL_NZ);
4876
4877 if (instr->dest.ssa.bit_size > 32) {
4878 dest.type = BRW_REGISTER_TYPE_UQ;
4879 } else {
4880 dest.type = BRW_REGISTER_TYPE_UD;
4881 }
4882 bld.MOV(dest, flag);
4883 break;
4884 }
4885
4886 case nir_intrinsic_read_invocation: {
4887 const fs_reg value = get_nir_src(instr->src[0]);
4888 const fs_reg invocation = get_nir_src(instr->src[1]);
4889 fs_reg tmp = bld.vgrf(value.type);
4890
4891 bld.exec_all().emit(SHADER_OPCODE_BROADCAST, tmp, value,
4892 bld.emit_uniformize(invocation));
4893
4894 bld.MOV(retype(dest, value.type), fs_reg(component(tmp, 0)));
4895 break;
4896 }
4897
4898 case nir_intrinsic_read_first_invocation: {
4899 const fs_reg value = get_nir_src(instr->src[0]);
4900 bld.MOV(retype(dest, value.type), bld.emit_uniformize(value));
4901 break;
4902 }
4903
4904 case nir_intrinsic_shuffle: {
4905 const fs_reg value = get_nir_src(instr->src[0]);
4906 const fs_reg index = get_nir_src(instr->src[1]);
4907
4908 bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, index);
4909 break;
4910 }
4911
4912 case nir_intrinsic_first_invocation: {
4913 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
4914 bld.exec_all().emit(SHADER_OPCODE_FIND_LIVE_CHANNEL, tmp);
4915 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
4916 fs_reg(component(tmp, 0)));
4917 break;
4918 }
4919
4920 case nir_intrinsic_quad_broadcast: {
4921 const fs_reg value = get_nir_src(instr->src[0]);
4922 const unsigned index = nir_src_as_uint(instr->src[1]);
4923
4924 bld.emit(SHADER_OPCODE_CLUSTER_BROADCAST, retype(dest, value.type),
4925 value, brw_imm_ud(index), brw_imm_ud(4));
4926 break;
4927 }
4928
4929 case nir_intrinsic_quad_swap_horizontal: {
4930 const fs_reg value = get_nir_src(instr->src[0]);
4931 const fs_reg tmp = bld.vgrf(value.type);
4932 const fs_builder ubld = bld.exec_all().group(dispatch_width / 2, 0);
4933
4934 const fs_reg src_left = horiz_stride(value, 2);
4935 const fs_reg src_right = horiz_stride(horiz_offset(value, 1), 2);
4936 const fs_reg tmp_left = horiz_stride(tmp, 2);
4937 const fs_reg tmp_right = horiz_stride(horiz_offset(tmp, 1), 2);
4938
4939 ubld.MOV(tmp_left, src_right);
4940 ubld.MOV(tmp_right, src_left);
4941
4942 bld.MOV(retype(dest, value.type), tmp);
4943 break;
4944 }
4945
4946 case nir_intrinsic_quad_swap_vertical: {
4947 const fs_reg value = get_nir_src(instr->src[0]);
4948 if (nir_src_bit_size(instr->src[0]) == 32) {
4949 /* For 32-bit, we can use a SIMD4x2 instruction to do this easily */
4950 const fs_reg tmp = bld.vgrf(value.type);
4951 const fs_builder ubld = bld.exec_all();
4952 ubld.emit(SHADER_OPCODE_QUAD_SWIZZLE, tmp, value,
4953 brw_imm_ud(BRW_SWIZZLE4(2,3,0,1)));
4954 bld.MOV(retype(dest, value.type), tmp);
4955 } else {
4956 /* For larger data types, we have to either emit dispatch_width many
4957 * MOVs or else fall back to doing indirects.
4958 */
4959 fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_W);
4960 bld.XOR(idx, nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION],
4961 brw_imm_w(0x2));
4962 bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, idx);
4963 }
4964 break;
4965 }
4966
4967 case nir_intrinsic_quad_swap_diagonal: {
4968 const fs_reg value = get_nir_src(instr->src[0]);
4969 if (nir_src_bit_size(instr->src[0]) == 32) {
4970 /* For 32-bit, we can use a SIMD4x2 instruction to do this easily */
4971 const fs_reg tmp = bld.vgrf(value.type);
4972 const fs_builder ubld = bld.exec_all();
4973 ubld.emit(SHADER_OPCODE_QUAD_SWIZZLE, tmp, value,
4974 brw_imm_ud(BRW_SWIZZLE4(3,2,1,0)));
4975 bld.MOV(retype(dest, value.type), tmp);
4976 } else {
4977 /* For larger data types, we have to either emit dispatch_width many
4978 * MOVs or else fall back to doing indirects.
4979 */
4980 fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_W);
4981 bld.XOR(idx, nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION],
4982 brw_imm_w(0x3));
4983 bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, idx);
4984 }
4985 break;
4986 }
4987
4988 case nir_intrinsic_reduce: {
4989 fs_reg src = get_nir_src(instr->src[0]);
4990 nir_op redop = (nir_op)nir_intrinsic_reduction_op(instr);
4991 unsigned cluster_size = nir_intrinsic_cluster_size(instr);
4992 if (cluster_size == 0 || cluster_size > dispatch_width)
4993 cluster_size = dispatch_width;
4994
4995 /* Figure out the source type */
4996 src.type = brw_type_for_nir_type(devinfo,
4997 (nir_alu_type)(nir_op_infos[redop].input_types[0] |
4998 nir_src_bit_size(instr->src[0])));
4999
5000 fs_reg identity = brw_nir_reduction_op_identity(bld, redop, src.type);
5001 opcode brw_op = brw_op_for_nir_reduction_op(redop);
5002 brw_conditional_mod cond_mod = brw_cond_mod_for_nir_reduction_op(redop);
5003
5004 /* Set up a register for all of our scratching around and initialize it
5005 * to reduction operation's identity value.
5006 */
5007 fs_reg scan = bld.vgrf(src.type);
5008 bld.exec_all().emit(SHADER_OPCODE_SEL_EXEC, scan, src, identity);
5009
5010 bld.emit_scan(brw_op, scan, cluster_size, cond_mod);
5011
5012 dest.type = src.type;
5013 if (cluster_size * type_sz(src.type) >= REG_SIZE * 2) {
5014 /* In this case, CLUSTER_BROADCAST instruction isn't needed because
5015 * the distance between clusters is at least 2 GRFs. In this case,
5016 * we don't need the weird striding of the CLUSTER_BROADCAST
5017 * instruction and can just do regular MOVs.
5018 */
5019 assert((cluster_size * type_sz(src.type)) % (REG_SIZE * 2) == 0);
5020 const unsigned groups =
5021 (dispatch_width * type_sz(src.type)) / (REG_SIZE * 2);
5022 const unsigned group_size = dispatch_width / groups;
5023 for (unsigned i = 0; i < groups; i++) {
5024 const unsigned cluster = (i * group_size) / cluster_size;
5025 const unsigned comp = cluster * cluster_size + (cluster_size - 1);
5026 bld.group(group_size, i).MOV(horiz_offset(dest, i * group_size),
5027 component(scan, comp));
5028 }
5029 } else {
5030 bld.emit(SHADER_OPCODE_CLUSTER_BROADCAST, dest, scan,
5031 brw_imm_ud(cluster_size - 1), brw_imm_ud(cluster_size));
5032 }
5033 break;
5034 }
5035
5036 case nir_intrinsic_inclusive_scan:
5037 case nir_intrinsic_exclusive_scan: {
5038 fs_reg src = get_nir_src(instr->src[0]);
5039 nir_op redop = (nir_op)nir_intrinsic_reduction_op(instr);
5040
5041 /* Figure out the source type */
5042 src.type = brw_type_for_nir_type(devinfo,
5043 (nir_alu_type)(nir_op_infos[redop].input_types[0] |
5044 nir_src_bit_size(instr->src[0])));
5045
5046 fs_reg identity = brw_nir_reduction_op_identity(bld, redop, src.type);
5047 opcode brw_op = brw_op_for_nir_reduction_op(redop);
5048 brw_conditional_mod cond_mod = brw_cond_mod_for_nir_reduction_op(redop);
5049
5050 /* Set up a register for all of our scratching around and initialize it
5051 * to reduction operation's identity value.
5052 */
5053 fs_reg scan = bld.vgrf(src.type);
5054 const fs_builder allbld = bld.exec_all();
5055 allbld.emit(SHADER_OPCODE_SEL_EXEC, scan, src, identity);
5056
5057 if (instr->intrinsic == nir_intrinsic_exclusive_scan) {
5058 /* Exclusive scan is a bit harder because we have to do an annoying
5059 * shift of the contents before we can begin. To make things worse,
5060 * we can't do this with a normal stride; we have to use indirects.
5061 */
5062 fs_reg shifted = bld.vgrf(src.type);
5063 fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_W);
5064 allbld.ADD(idx, nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION],
5065 brw_imm_w(-1));
5066 allbld.emit(SHADER_OPCODE_SHUFFLE, shifted, scan, idx);
5067 allbld.group(1, 0).MOV(component(shifted, 0), identity);
5068 scan = shifted;
5069 }
5070
5071 bld.emit_scan(brw_op, scan, dispatch_width, cond_mod);
5072
5073 bld.MOV(retype(dest, src.type), scan);
5074 break;
5075 }
5076
5077 case nir_intrinsic_begin_invocation_interlock: {
5078 const fs_builder ubld = bld.group(8, 0);
5079 const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
5080
5081 ubld.emit(SHADER_OPCODE_INTERLOCK, tmp, brw_vec8_grf(0, 0))
5082 ->size_written = 2 * REG_SIZE;
5083 break;
5084 }
5085
5086 case nir_intrinsic_end_invocation_interlock: {
5087 /* For endInvocationInterlock(), we need to insert a memory fence which
5088 * stalls in the shader until the memory transactions prior to that
5089 * fence are complete. This ensures that the shader does not end before
5090 * any writes from its critical section have landed. Otherwise, you can
5091 * end up with a case where the next invocation on that pixel properly
5092 * stalls for previous FS invocation on its pixel to complete but
5093 * doesn't actually wait for the dataport memory transactions from that
5094 * thread to land before submitting its own.
5095 */
5096 const fs_builder ubld = bld.group(8, 0);
5097 const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
5098 ubld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp,
5099 brw_vec8_grf(0, 0), brw_imm_ud(1))
5100 ->size_written = 2 * REG_SIZE;
5101 break;
5102 }
5103
5104 default:
5105 unreachable("unknown intrinsic");
5106 }
5107 }
5108
5109 void
5110 fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
5111 int op, nir_intrinsic_instr *instr)
5112 {
5113 if (stage == MESA_SHADER_FRAGMENT)
5114 brw_wm_prog_data(prog_data)->has_side_effects = true;
5115
5116 /* The BTI untyped atomic messages only support 32-bit atomics. If you
5117 * just look at the big table of messages in the Vol 7 of the SKL PRM, they
5118 * appear to exist. However, if you look at Vol 2a, there are no message
5119 * descriptors provided for Qword atomic ops except for A64 messages.
5120 */
5121 assert(nir_dest_bit_size(instr->dest) == 32);
5122
5123 fs_reg dest;
5124 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
5125 dest = get_nir_dest(instr->dest);
5126
5127 fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
5128 srcs[SURFACE_LOGICAL_SRC_SURFACE] = get_nir_ssbo_intrinsic_index(bld, instr);
5129 srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
5130 srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
5131 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
5132
5133 fs_reg data;
5134 if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
5135 data = get_nir_src(instr->src[2]);
5136
5137 if (op == BRW_AOP_CMPWR) {
5138 fs_reg tmp = bld.vgrf(data.type, 2);
5139 fs_reg sources[2] = { data, get_nir_src(instr->src[3]) };
5140 bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
5141 data = tmp;
5142 }
5143 srcs[SURFACE_LOGICAL_SRC_DATA] = data;
5144
5145 /* Emit the actual atomic operation */
5146
5147 bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL,
5148 dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
5149 }
5150
5151 void
5152 fs_visitor::nir_emit_ssbo_atomic_float(const fs_builder &bld,
5153 int op, nir_intrinsic_instr *instr)
5154 {
5155 if (stage == MESA_SHADER_FRAGMENT)
5156 brw_wm_prog_data(prog_data)->has_side_effects = true;
5157
5158 fs_reg dest;
5159 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
5160 dest = get_nir_dest(instr->dest);
5161
5162 fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
5163 srcs[SURFACE_LOGICAL_SRC_SURFACE] = get_nir_ssbo_intrinsic_index(bld, instr);
5164 srcs[SURFACE_LOGICAL_SRC_ADDRESS] = get_nir_src(instr->src[1]);
5165 srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
5166 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
5167
5168 fs_reg data = get_nir_src(instr->src[2]);
5169 if (op == BRW_AOP_FCMPWR) {
5170 fs_reg tmp = bld.vgrf(data.type, 2);
5171 fs_reg sources[2] = { data, get_nir_src(instr->src[3]) };
5172 bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
5173 data = tmp;
5174 }
5175 srcs[SURFACE_LOGICAL_SRC_DATA] = data;
5176
5177 /* Emit the actual atomic operation */
5178
5179 bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL,
5180 dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
5181 }
5182
5183 void
5184 fs_visitor::nir_emit_shared_atomic(const fs_builder &bld,
5185 int op, nir_intrinsic_instr *instr)
5186 {
5187 fs_reg dest;
5188 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
5189 dest = get_nir_dest(instr->dest);
5190
5191 fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
5192 srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM);
5193 srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
5194 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
5195
5196 fs_reg data;
5197 if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
5198 data = get_nir_src(instr->src[1]);
5199 if (op == BRW_AOP_CMPWR) {
5200 fs_reg tmp = bld.vgrf(data.type, 2);
5201 fs_reg sources[2] = { data, get_nir_src(instr->src[2]) };
5202 bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
5203 data = tmp;
5204 }
5205 srcs[SURFACE_LOGICAL_SRC_DATA] = data;
5206
5207 /* Get the offset */
5208 if (nir_src_is_const(instr->src[0])) {
5209 srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
5210 brw_imm_ud(instr->const_index[0] + nir_src_as_uint(instr->src[0]));
5211 } else {
5212 srcs[SURFACE_LOGICAL_SRC_ADDRESS] = vgrf(glsl_type::uint_type);
5213 bld.ADD(srcs[SURFACE_LOGICAL_SRC_ADDRESS],
5214 retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
5215 brw_imm_ud(instr->const_index[0]));
5216 }
5217
5218 /* Emit the actual atomic operation operation */
5219
5220 bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL,
5221 dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
5222 }
5223
5224 void
5225 fs_visitor::nir_emit_shared_atomic_float(const fs_builder &bld,
5226 int op, nir_intrinsic_instr *instr)
5227 {
5228 fs_reg dest;
5229 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
5230 dest = get_nir_dest(instr->dest);
5231
5232 fs_reg srcs[SURFACE_LOGICAL_NUM_SRCS];
5233 srcs[SURFACE_LOGICAL_SRC_SURFACE] = brw_imm_ud(GEN7_BTI_SLM);
5234 srcs[SURFACE_LOGICAL_SRC_IMM_DIMS] = brw_imm_ud(1);
5235 srcs[SURFACE_LOGICAL_SRC_IMM_ARG] = brw_imm_ud(op);
5236
5237 fs_reg data = get_nir_src(instr->src[1]);
5238 if (op == BRW_AOP_FCMPWR) {
5239 fs_reg tmp = bld.vgrf(data.type, 2);
5240 fs_reg sources[2] = { data, get_nir_src(instr->src[2]) };
5241 bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
5242 data = tmp;
5243 }
5244 srcs[SURFACE_LOGICAL_SRC_DATA] = data;
5245
5246 /* Get the offset */
5247 if (nir_src_is_const(instr->src[0])) {
5248 srcs[SURFACE_LOGICAL_SRC_ADDRESS] =
5249 brw_imm_ud(instr->const_index[0] + nir_src_as_uint(instr->src[0]));
5250 } else {
5251 srcs[SURFACE_LOGICAL_SRC_ADDRESS] = vgrf(glsl_type::uint_type);
5252 bld.ADD(srcs[SURFACE_LOGICAL_SRC_ADDRESS],
5253 retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
5254 brw_imm_ud(instr->const_index[0]));
5255 }
5256
5257 /* Emit the actual atomic operation operation */
5258
5259 bld.emit(SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL,
5260 dest, srcs, SURFACE_LOGICAL_NUM_SRCS);
5261 }
5262
5263 void
5264 fs_visitor::nir_emit_global_atomic(const fs_builder &bld,
5265 int op, nir_intrinsic_instr *instr)
5266 {
5267 if (stage == MESA_SHADER_FRAGMENT)
5268 brw_wm_prog_data(prog_data)->has_side_effects = true;
5269
5270 fs_reg dest;
5271 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
5272 dest = get_nir_dest(instr->dest);
5273
5274 fs_reg addr = get_nir_src(instr->src[0]);
5275
5276 fs_reg data;
5277 if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
5278 data = get_nir_src(instr->src[1]);
5279
5280 if (op == BRW_AOP_CMPWR) {
5281 fs_reg tmp = bld.vgrf(data.type, 2);
5282 fs_reg sources[2] = { data, get_nir_src(instr->src[2]) };
5283 bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
5284 data = tmp;
5285 }
5286
5287 if (nir_dest_bit_size(instr->dest) == 64) {
5288 bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_INT64_LOGICAL,
5289 dest, addr, data, brw_imm_ud(op));
5290 } else {
5291 assert(nir_dest_bit_size(instr->dest) == 32);
5292 bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL,
5293 dest, addr, data, brw_imm_ud(op));
5294 }
5295 }
5296
5297 void
5298 fs_visitor::nir_emit_global_atomic_float(const fs_builder &bld,
5299 int op, nir_intrinsic_instr *instr)
5300 {
5301 if (stage == MESA_SHADER_FRAGMENT)
5302 brw_wm_prog_data(prog_data)->has_side_effects = true;
5303
5304 assert(nir_intrinsic_infos[instr->intrinsic].has_dest);
5305 fs_reg dest = get_nir_dest(instr->dest);
5306
5307 fs_reg addr = get_nir_src(instr->src[0]);
5308
5309 assert(op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC);
5310 fs_reg data = get_nir_src(instr->src[1]);
5311
5312 if (op == BRW_AOP_FCMPWR) {
5313 fs_reg tmp = bld.vgrf(data.type, 2);
5314 fs_reg sources[2] = { data, get_nir_src(instr->src[2]) };
5315 bld.LOAD_PAYLOAD(tmp, sources, 2, 0);
5316 data = tmp;
5317 }
5318
5319 bld.emit(SHADER_OPCODE_A64_UNTYPED_ATOMIC_LOGICAL,
5320 dest, addr, data, brw_imm_ud(op));
5321 }
5322
5323 void
5324 fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
5325 {
5326 unsigned texture = instr->texture_index;
5327 unsigned sampler = instr->sampler_index;
5328
5329 fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
5330
5331 srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(texture);
5332 srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_ud(sampler);
5333
5334 int lod_components = 0;
5335
5336 /* The hardware requires a LOD for buffer textures */
5337 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
5338 srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_d(0);
5339
5340 uint32_t header_bits = 0;
5341 for (unsigned i = 0; i < instr->num_srcs; i++) {
5342 fs_reg src = get_nir_src(instr->src[i].src);
5343 switch (instr->src[i].src_type) {
5344 case nir_tex_src_bias:
5345 srcs[TEX_LOGICAL_SRC_LOD] =
5346 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
5347 break;
5348 case nir_tex_src_comparator:
5349 srcs[TEX_LOGICAL_SRC_SHADOW_C] = retype(src, BRW_REGISTER_TYPE_F);
5350 break;
5351 case nir_tex_src_coord:
5352 switch (instr->op) {
5353 case nir_texop_txf:
5354 case nir_texop_txf_ms:
5355 case nir_texop_txf_ms_mcs:
5356 case nir_texop_samples_identical:
5357 srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_D);
5358 break;
5359 default:
5360 srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_F);
5361 break;
5362 }
5363 break;
5364 case nir_tex_src_ddx:
5365 srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_F);
5366 lod_components = nir_tex_instr_src_size(instr, i);
5367 break;
5368 case nir_tex_src_ddy:
5369 srcs[TEX_LOGICAL_SRC_LOD2] = retype(src, BRW_REGISTER_TYPE_F);
5370 break;
5371 case nir_tex_src_lod:
5372 switch (instr->op) {
5373 case nir_texop_txs:
5374 srcs[TEX_LOGICAL_SRC_LOD] =
5375 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_UD);
5376 break;
5377 case nir_texop_txf:
5378 srcs[TEX_LOGICAL_SRC_LOD] =
5379 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_D);
5380 break;
5381 default:
5382 srcs[TEX_LOGICAL_SRC_LOD] =
5383 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
5384 break;
5385 }
5386 break;
5387 case nir_tex_src_min_lod:
5388 srcs[TEX_LOGICAL_SRC_MIN_LOD] =
5389 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
5390 break;
5391 case nir_tex_src_ms_index:
5392 srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX] = retype(src, BRW_REGISTER_TYPE_UD);
5393 break;
5394
5395 case nir_tex_src_offset: {
5396 uint32_t offset_bits = 0;
5397 if (brw_texture_offset(instr, i, &offset_bits)) {
5398 header_bits |= offset_bits;
5399 } else {
5400 srcs[TEX_LOGICAL_SRC_TG4_OFFSET] =
5401 retype(src, BRW_REGISTER_TYPE_D);
5402 }
5403 break;
5404 }
5405
5406 case nir_tex_src_projector:
5407 unreachable("should be lowered");
5408
5409 case nir_tex_src_texture_offset: {
5410 /* Emit code to evaluate the actual indexing expression */
5411 fs_reg tmp = vgrf(glsl_type::uint_type);
5412 bld.ADD(tmp, src, brw_imm_ud(texture));
5413 srcs[TEX_LOGICAL_SRC_SURFACE] = bld.emit_uniformize(tmp);
5414 break;
5415 }
5416
5417 case nir_tex_src_sampler_offset: {
5418 /* Emit code to evaluate the actual indexing expression */
5419 fs_reg tmp = vgrf(glsl_type::uint_type);
5420 bld.ADD(tmp, src, brw_imm_ud(sampler));
5421 srcs[TEX_LOGICAL_SRC_SAMPLER] = bld.emit_uniformize(tmp);
5422 break;
5423 }
5424
5425 case nir_tex_src_texture_handle:
5426 assert(nir_tex_instr_src_index(instr, nir_tex_src_texture_offset) == -1);
5427 srcs[TEX_LOGICAL_SRC_SURFACE] = fs_reg();
5428 srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE] = bld.emit_uniformize(src);
5429 break;
5430
5431 case nir_tex_src_sampler_handle:
5432 assert(nir_tex_instr_src_index(instr, nir_tex_src_sampler_offset) == -1);
5433 srcs[TEX_LOGICAL_SRC_SAMPLER] = fs_reg();
5434 srcs[TEX_LOGICAL_SRC_SAMPLER_HANDLE] = bld.emit_uniformize(src);
5435 break;
5436
5437 case nir_tex_src_ms_mcs:
5438 assert(instr->op == nir_texop_txf_ms);
5439 srcs[TEX_LOGICAL_SRC_MCS] = retype(src, BRW_REGISTER_TYPE_D);
5440 break;
5441
5442 case nir_tex_src_plane: {
5443 const uint32_t plane = nir_src_as_uint(instr->src[i].src);
5444 const uint32_t texture_index =
5445 instr->texture_index +
5446 stage_prog_data->binding_table.plane_start[plane] -
5447 stage_prog_data->binding_table.texture_start;
5448
5449 srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(texture_index);
5450 break;
5451 }
5452
5453 default:
5454 unreachable("unknown texture source");
5455 }
5456 }
5457
5458 if (srcs[TEX_LOGICAL_SRC_MCS].file == BAD_FILE &&
5459 (instr->op == nir_texop_txf_ms ||
5460 instr->op == nir_texop_samples_identical)) {
5461 if (devinfo->gen >= 7 &&
5462 key_tex->compressed_multisample_layout_mask & (1 << texture)) {
5463 srcs[TEX_LOGICAL_SRC_MCS] =
5464 emit_mcs_fetch(srcs[TEX_LOGICAL_SRC_COORDINATE],
5465 instr->coord_components,
5466 srcs[TEX_LOGICAL_SRC_SURFACE],
5467 srcs[TEX_LOGICAL_SRC_SURFACE_HANDLE]);
5468 } else {
5469 srcs[TEX_LOGICAL_SRC_MCS] = brw_imm_ud(0u);
5470 }
5471 }
5472
5473 srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(instr->coord_components);
5474 srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(lod_components);
5475
5476 enum opcode opcode;
5477 switch (instr->op) {
5478 case nir_texop_tex:
5479 opcode = SHADER_OPCODE_TEX_LOGICAL;
5480 break;
5481 case nir_texop_txb:
5482 opcode = FS_OPCODE_TXB_LOGICAL;
5483 break;
5484 case nir_texop_txl:
5485 opcode = SHADER_OPCODE_TXL_LOGICAL;
5486 break;
5487 case nir_texop_txd:
5488 opcode = SHADER_OPCODE_TXD_LOGICAL;
5489 break;
5490 case nir_texop_txf:
5491 opcode = SHADER_OPCODE_TXF_LOGICAL;
5492 break;
5493 case nir_texop_txf_ms:
5494 if ((key_tex->msaa_16 & (1 << sampler)))
5495 opcode = SHADER_OPCODE_TXF_CMS_W_LOGICAL;
5496 else
5497 opcode = SHADER_OPCODE_TXF_CMS_LOGICAL;
5498 break;
5499 case nir_texop_txf_ms_mcs:
5500 opcode = SHADER_OPCODE_TXF_MCS_LOGICAL;
5501 break;
5502 case nir_texop_query_levels:
5503 case nir_texop_txs:
5504 opcode = SHADER_OPCODE_TXS_LOGICAL;
5505 break;
5506 case nir_texop_lod:
5507 opcode = SHADER_OPCODE_LOD_LOGICAL;
5508 break;
5509 case nir_texop_tg4:
5510 if (srcs[TEX_LOGICAL_SRC_TG4_OFFSET].file != BAD_FILE)
5511 opcode = SHADER_OPCODE_TG4_OFFSET_LOGICAL;
5512 else
5513 opcode = SHADER_OPCODE_TG4_LOGICAL;
5514 break;
5515 case nir_texop_texture_samples:
5516 opcode = SHADER_OPCODE_SAMPLEINFO_LOGICAL;
5517 break;
5518 case nir_texop_samples_identical: {
5519 fs_reg dst = retype(get_nir_dest(instr->dest), BRW_REGISTER_TYPE_D);
5520
5521 /* If mcs is an immediate value, it means there is no MCS. In that case
5522 * just return false.
5523 */
5524 if (srcs[TEX_LOGICAL_SRC_MCS].file == BRW_IMMEDIATE_VALUE) {
5525 bld.MOV(dst, brw_imm_ud(0u));
5526 } else if ((key_tex->msaa_16 & (1 << sampler))) {
5527 fs_reg tmp = vgrf(glsl_type::uint_type);
5528 bld.OR(tmp, srcs[TEX_LOGICAL_SRC_MCS],
5529 offset(srcs[TEX_LOGICAL_SRC_MCS], bld, 1));
5530 bld.CMP(dst, tmp, brw_imm_ud(0u), BRW_CONDITIONAL_EQ);
5531 } else {
5532 bld.CMP(dst, srcs[TEX_LOGICAL_SRC_MCS], brw_imm_ud(0u),
5533 BRW_CONDITIONAL_EQ);
5534 }
5535 return;
5536 }
5537 default:
5538 unreachable("unknown texture opcode");
5539 }
5540
5541 if (instr->op == nir_texop_tg4) {
5542 if (instr->component == 1 &&
5543 key_tex->gather_channel_quirk_mask & (1 << texture)) {
5544 /* gather4 sampler is broken for green channel on RG32F --
5545 * we must ask for blue instead.
5546 */
5547 header_bits |= 2 << 16;
5548 } else {
5549 header_bits |= instr->component << 16;
5550 }
5551 }
5552
5553 fs_reg dst = bld.vgrf(brw_type_for_nir_type(devinfo, instr->dest_type), 4);
5554 fs_inst *inst = bld.emit(opcode, dst, srcs, ARRAY_SIZE(srcs));
5555 inst->offset = header_bits;
5556
5557 const unsigned dest_size = nir_tex_instr_dest_size(instr);
5558 if (devinfo->gen >= 9 &&
5559 instr->op != nir_texop_tg4 && instr->op != nir_texop_query_levels) {
5560 unsigned write_mask = instr->dest.is_ssa ?
5561 nir_ssa_def_components_read(&instr->dest.ssa):
5562 (1 << dest_size) - 1;
5563 assert(write_mask != 0); /* dead code should have been eliminated */
5564 inst->size_written = util_last_bit(write_mask) *
5565 inst->dst.component_size(inst->exec_size);
5566 } else {
5567 inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
5568 }
5569
5570 if (srcs[TEX_LOGICAL_SRC_SHADOW_C].file != BAD_FILE)
5571 inst->shadow_compare = true;
5572
5573 if (instr->op == nir_texop_tg4 && devinfo->gen == 6)
5574 emit_gen6_gather_wa(key_tex->gen6_gather_wa[texture], dst);
5575
5576 fs_reg nir_dest[4];
5577 for (unsigned i = 0; i < dest_size; i++)
5578 nir_dest[i] = offset(dst, bld, i);
5579
5580 if (instr->op == nir_texop_query_levels) {
5581 /* # levels is in .w */
5582 nir_dest[0] = offset(dst, bld, 3);
5583 } else if (instr->op == nir_texop_txs &&
5584 dest_size >= 3 && devinfo->gen < 7) {
5585 /* Gen4-6 return 0 instead of 1 for single layer surfaces. */
5586 fs_reg depth = offset(dst, bld, 2);
5587 nir_dest[2] = vgrf(glsl_type::int_type);
5588 bld.emit_minmax(nir_dest[2], depth, brw_imm_d(1), BRW_CONDITIONAL_GE);
5589 }
5590
5591 bld.LOAD_PAYLOAD(get_nir_dest(instr->dest), nir_dest, dest_size, 0);
5592 }
5593
5594 void
5595 fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
5596 {
5597 switch (instr->type) {
5598 case nir_jump_break:
5599 bld.emit(BRW_OPCODE_BREAK);
5600 break;
5601 case nir_jump_continue:
5602 bld.emit(BRW_OPCODE_CONTINUE);
5603 break;
5604 case nir_jump_return:
5605 default:
5606 unreachable("unknown jump");
5607 }
5608 }
5609
5610 /*
5611 * This helper takes a source register and un/shuffles it into the destination
5612 * register.
5613 *
5614 * If source type size is smaller than destination type size the operation
5615 * needed is a component shuffle. The opposite case would be an unshuffle. If
5616 * source/destination type size is equal a shuffle is done that would be
5617 * equivalent to a simple MOV.
5618 *
5619 * For example, if source is a 16-bit type and destination is 32-bit. A 3
5620 * components .xyz 16-bit vector on SIMD8 would be.
5621 *
5622 * |x1|x2|x3|x4|x5|x6|x7|x8|y1|y2|y3|y4|y5|y6|y7|y8|
5623 * |z1|z2|z3|z4|z5|z6|z7|z8| | | | | | | | |
5624 *
5625 * This helper will return the following 2 32-bit components with the 16-bit
5626 * values shuffled:
5627 *
5628 * |x1 y1|x2 y2|x3 y3|x4 y4|x5 y5|x6 y6|x7 y7|x8 y8|
5629 * |z1 |z2 |z3 |z4 |z5 |z6 |z7 |z8 |
5630 *
5631 * For unshuffle, the example would be the opposite, a 64-bit type source
5632 * and a 32-bit destination. A 2 component .xy 64-bit vector on SIMD8
5633 * would be:
5634 *
5635 * | x1l x1h | x2l x2h | x3l x3h | x4l x4h |
5636 * | x5l x5h | x6l x6h | x7l x7h | x8l x8h |
5637 * | y1l y1h | y2l y2h | y3l y3h | y4l y4h |
5638 * | y5l y5h | y6l y6h | y7l y7h | y8l y8h |
5639 *
5640 * The returned result would be the following 4 32-bit components unshuffled:
5641 *
5642 * | x1l | x2l | x3l | x4l | x5l | x6l | x7l | x8l |
5643 * | x1h | x2h | x3h | x4h | x5h | x6h | x7h | x8h |
5644 * | y1l | y2l | y3l | y4l | y5l | y6l | y7l | y8l |
5645 * | y1h | y2h | y3h | y4h | y5h | y6h | y7h | y8h |
5646 *
5647 * - Source and destination register must not be overlapped.
5648 * - components units are measured in terms of the smaller type between
5649 * source and destination because we are un/shuffling the smaller
5650 * components from/into the bigger ones.
5651 * - first_component parameter allows skipping source components.
5652 */
5653 void
5654 shuffle_src_to_dst(const fs_builder &bld,
5655 const fs_reg &dst,
5656 const fs_reg &src,
5657 uint32_t first_component,
5658 uint32_t components)
5659 {
5660 if (type_sz(src.type) == type_sz(dst.type)) {
5661 assert(!regions_overlap(dst,
5662 type_sz(dst.type) * bld.dispatch_width() * components,
5663 offset(src, bld, first_component),
5664 type_sz(src.type) * bld.dispatch_width() * components));
5665 for (unsigned i = 0; i < components; i++) {
5666 bld.MOV(retype(offset(dst, bld, i), src.type),
5667 offset(src, bld, i + first_component));
5668 }
5669 } else if (type_sz(src.type) < type_sz(dst.type)) {
5670 /* Source is shuffled into destination */
5671 unsigned size_ratio = type_sz(dst.type) / type_sz(src.type);
5672 assert(!regions_overlap(dst,
5673 type_sz(dst.type) * bld.dispatch_width() *
5674 DIV_ROUND_UP(components, size_ratio),
5675 offset(src, bld, first_component),
5676 type_sz(src.type) * bld.dispatch_width() * components));
5677
5678 brw_reg_type shuffle_type =
5679 brw_reg_type_from_bit_size(8 * type_sz(src.type),
5680 BRW_REGISTER_TYPE_D);
5681 for (unsigned i = 0; i < components; i++) {
5682 fs_reg shuffle_component_i =
5683 subscript(offset(dst, bld, i / size_ratio),
5684 shuffle_type, i % size_ratio);
5685 bld.MOV(shuffle_component_i,
5686 retype(offset(src, bld, i + first_component), shuffle_type));
5687 }
5688 } else {
5689 /* Source is unshuffled into destination */
5690 unsigned size_ratio = type_sz(src.type) / type_sz(dst.type);
5691 assert(!regions_overlap(dst,
5692 type_sz(dst.type) * bld.dispatch_width() * components,
5693 offset(src, bld, first_component / size_ratio),
5694 type_sz(src.type) * bld.dispatch_width() *
5695 DIV_ROUND_UP(components + (first_component % size_ratio),
5696 size_ratio)));
5697
5698 brw_reg_type shuffle_type =
5699 brw_reg_type_from_bit_size(8 * type_sz(dst.type),
5700 BRW_REGISTER_TYPE_D);
5701 for (unsigned i = 0; i < components; i++) {
5702 fs_reg shuffle_component_i =
5703 subscript(offset(src, bld, (first_component + i) / size_ratio),
5704 shuffle_type, (first_component + i) % size_ratio);
5705 bld.MOV(retype(offset(dst, bld, i), shuffle_type),
5706 shuffle_component_i);
5707 }
5708 }
5709 }
5710
5711 void
5712 shuffle_from_32bit_read(const fs_builder &bld,
5713 const fs_reg &dst,
5714 const fs_reg &src,
5715 uint32_t first_component,
5716 uint32_t components)
5717 {
5718 assert(type_sz(src.type) == 4);
5719
5720 /* This function takes components in units of the destination type while
5721 * shuffle_src_to_dst takes components in units of the smallest type
5722 */
5723 if (type_sz(dst.type) > 4) {
5724 assert(type_sz(dst.type) == 8);
5725 first_component *= 2;
5726 components *= 2;
5727 }
5728
5729 shuffle_src_to_dst(bld, dst, src, first_component, components);
5730 }
5731
5732 fs_reg
5733 shuffle_for_32bit_write(const fs_builder &bld,
5734 const fs_reg &src,
5735 uint32_t first_component,
5736 uint32_t components)
5737 {
5738 fs_reg dst = bld.vgrf(BRW_REGISTER_TYPE_D,
5739 DIV_ROUND_UP (components * type_sz(src.type), 4));
5740 /* This function takes components in units of the source type while
5741 * shuffle_src_to_dst takes components in units of the smallest type
5742 */
5743 if (type_sz(src.type) > 4) {
5744 assert(type_sz(src.type) == 8);
5745 first_component *= 2;
5746 components *= 2;
5747 }
5748
5749 shuffle_src_to_dst(bld, dst, src, first_component, components);
5750
5751 return dst;
5752 }
5753
5754 fs_reg
5755 setup_imm_df(const fs_builder &bld, double v)
5756 {
5757 const struct gen_device_info *devinfo = bld.shader->devinfo;
5758 assert(devinfo->gen >= 7);
5759
5760 if (devinfo->gen >= 8)
5761 return brw_imm_df(v);
5762
5763 /* gen7.5 does not support DF immediates straighforward but the DIM
5764 * instruction allows to set the 64-bit immediate value.
5765 */
5766 if (devinfo->is_haswell) {
5767 const fs_builder ubld = bld.exec_all().group(1, 0);
5768 fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_DF, 1);
5769 ubld.DIM(dst, brw_imm_df(v));
5770 return component(dst, 0);
5771 }
5772
5773 /* gen7 does not support DF immediates, so we generate a 64-bit constant by
5774 * writing the low 32-bit of the constant to suboffset 0 of a VGRF and
5775 * the high 32-bit to suboffset 4 and then applying a stride of 0.
5776 *
5777 * Alternatively, we could also produce a normal VGRF (without stride 0)
5778 * by writing to all the channels in the VGRF, however, that would hit the
5779 * gen7 bug where we have to split writes that span more than 1 register
5780 * into instructions with a width of 4 (otherwise the write to the second
5781 * register written runs into an execmask hardware bug) which isn't very
5782 * nice.
5783 */
5784 union {
5785 double d;
5786 struct {
5787 uint32_t i1;
5788 uint32_t i2;
5789 };
5790 } di;
5791
5792 di.d = v;
5793
5794 const fs_builder ubld = bld.exec_all().group(1, 0);
5795 const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
5796 ubld.MOV(tmp, brw_imm_ud(di.i1));
5797 ubld.MOV(horiz_offset(tmp, 1), brw_imm_ud(di.i2));
5798
5799 return component(retype(tmp, BRW_REGISTER_TYPE_DF), 0);
5800 }
5801
5802 fs_reg
5803 setup_imm_b(const fs_builder &bld, int8_t v)
5804 {
5805 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_B);
5806 bld.MOV(tmp, brw_imm_w(v));
5807 return tmp;
5808 }
5809
5810 fs_reg
5811 setup_imm_ub(const fs_builder &bld, uint8_t v)
5812 {
5813 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UB);
5814 bld.MOV(tmp, brw_imm_uw(v));
5815 return tmp;
5816 }