ae85834ffe648e2ca1c8cf16d651d6562a6660d5
[mesa.git] / src / intel / compiler / brw_fs_nir.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/glsl/ir.h"
25 #include "brw_fs.h"
26 #include "brw_fs_surface_builder.h"
27 #include "brw_nir.h"
28
29 using namespace brw;
30 using namespace brw::surface_access;
31
32 void
33 fs_visitor::emit_nir_code()
34 {
35 /* emit the arrays used for inputs and outputs - load/store intrinsics will
36 * be converted to reads/writes of these arrays
37 */
38 nir_setup_outputs();
39 nir_setup_uniforms();
40 nir_emit_system_values();
41
42 /* get the main function and emit it */
43 nir_foreach_function(function, nir) {
44 assert(strcmp(function->name, "main") == 0);
45 assert(function->impl);
46 nir_emit_impl(function->impl);
47 }
48 }
49
50 void
51 fs_visitor::nir_setup_outputs()
52 {
53 if (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_FRAGMENT)
54 return;
55
56 unsigned vec4s[VARYING_SLOT_TESS_MAX] = { 0, };
57
58 /* Calculate the size of output registers in a separate pass, before
59 * allocating them. With ARB_enhanced_layouts, multiple output variables
60 * may occupy the same slot, but have different type sizes.
61 */
62 nir_foreach_variable(var, &nir->outputs) {
63 const int loc = var->data.driver_location;
64 const unsigned var_vec4s =
65 var->data.compact ? DIV_ROUND_UP(glsl_get_length(var->type), 4)
66 : type_size_vec4(var->type);
67 vec4s[loc] = MAX2(vec4s[loc], var_vec4s);
68 }
69
70 nir_foreach_variable(var, &nir->outputs) {
71 const int loc = var->data.driver_location;
72 if (outputs[loc].file == BAD_FILE) {
73 fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_F, 4 * vec4s[loc]);
74 for (unsigned i = 0; i < vec4s[loc]; i++) {
75 outputs[loc + i] = offset(reg, bld, 4 * i);
76 }
77 }
78 }
79 }
80
81 void
82 fs_visitor::nir_setup_uniforms()
83 {
84 /* Only the first compile gets to set up uniforms. */
85 if (push_constant_loc) {
86 assert(pull_constant_loc);
87 return;
88 }
89
90 uniforms = nir->num_uniforms / 4;
91
92 if (stage == MESA_SHADER_COMPUTE) {
93 /* Add a uniform for the thread local id. It must be the last uniform
94 * on the list.
95 */
96 assert(uniforms == prog_data->nr_params);
97 uint32_t *param = brw_stage_prog_data_add_params(prog_data, 1);
98 *param = BRW_PARAM_BUILTIN_SUBGROUP_ID;
99 subgroup_id = fs_reg(UNIFORM, uniforms++, BRW_REGISTER_TYPE_UD);
100 }
101 }
102
103 static bool
104 emit_system_values_block(nir_block *block, fs_visitor *v)
105 {
106 fs_reg *reg;
107
108 nir_foreach_instr(instr, block) {
109 if (instr->type != nir_instr_type_intrinsic)
110 continue;
111
112 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
113 switch (intrin->intrinsic) {
114 case nir_intrinsic_load_vertex_id:
115 unreachable("should be lowered by lower_vertex_id().");
116
117 case nir_intrinsic_load_vertex_id_zero_base:
118 case nir_intrinsic_load_base_vertex:
119 case nir_intrinsic_load_instance_id:
120 case nir_intrinsic_load_base_instance:
121 case nir_intrinsic_load_draw_id:
122 unreachable("should be lowered by brw_nir_lower_vs_inputs().");
123
124 case nir_intrinsic_load_invocation_id:
125 if (v->stage == MESA_SHADER_TESS_CTRL)
126 break;
127 assert(v->stage == MESA_SHADER_GEOMETRY);
128 reg = &v->nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
129 if (reg->file == BAD_FILE) {
130 const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL);
131 fs_reg g1(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
132 fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
133 abld.SHR(iid, g1, brw_imm_ud(27u));
134 *reg = iid;
135 }
136 break;
137
138 case nir_intrinsic_load_sample_pos:
139 assert(v->stage == MESA_SHADER_FRAGMENT);
140 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
141 if (reg->file == BAD_FILE)
142 *reg = *v->emit_samplepos_setup();
143 break;
144
145 case nir_intrinsic_load_sample_id:
146 assert(v->stage == MESA_SHADER_FRAGMENT);
147 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
148 if (reg->file == BAD_FILE)
149 *reg = *v->emit_sampleid_setup();
150 break;
151
152 case nir_intrinsic_load_sample_mask_in:
153 assert(v->stage == MESA_SHADER_FRAGMENT);
154 assert(v->devinfo->gen >= 7);
155 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
156 if (reg->file == BAD_FILE)
157 *reg = *v->emit_samplemaskin_setup();
158 break;
159
160 case nir_intrinsic_load_work_group_id:
161 assert(v->stage == MESA_SHADER_COMPUTE);
162 reg = &v->nir_system_values[SYSTEM_VALUE_WORK_GROUP_ID];
163 if (reg->file == BAD_FILE)
164 *reg = *v->emit_cs_work_group_id_setup();
165 break;
166
167 case nir_intrinsic_load_helper_invocation:
168 assert(v->stage == MESA_SHADER_FRAGMENT);
169 reg = &v->nir_system_values[SYSTEM_VALUE_HELPER_INVOCATION];
170 if (reg->file == BAD_FILE) {
171 const fs_builder abld =
172 v->bld.annotate("gl_HelperInvocation", NULL);
173
174 /* On Gen6+ (gl_HelperInvocation is only exposed on Gen7+) the
175 * pixel mask is in g1.7 of the thread payload.
176 *
177 * We move the per-channel pixel enable bit to the low bit of each
178 * channel by shifting the byte containing the pixel mask by the
179 * vector immediate 0x76543210UV.
180 *
181 * The region of <1,8,0> reads only 1 byte (the pixel masks for
182 * subspans 0 and 1) in SIMD8 and an additional byte (the pixel
183 * masks for 2 and 3) in SIMD16.
184 */
185 fs_reg shifted = abld.vgrf(BRW_REGISTER_TYPE_UW, 1);
186 abld.SHR(shifted,
187 stride(byte_offset(retype(brw_vec1_grf(1, 0),
188 BRW_REGISTER_TYPE_UB), 28),
189 1, 8, 0),
190 brw_imm_v(0x76543210));
191
192 /* A set bit in the pixel mask means the channel is enabled, but
193 * that is the opposite of gl_HelperInvocation so we need to invert
194 * the mask.
195 *
196 * The negate source-modifier bit of logical instructions on Gen8+
197 * performs 1's complement negation, so we can use that instead of
198 * a NOT instruction.
199 */
200 fs_reg inverted = negate(shifted);
201 if (v->devinfo->gen < 8) {
202 inverted = abld.vgrf(BRW_REGISTER_TYPE_UW);
203 abld.NOT(inverted, shifted);
204 }
205
206 /* We then resolve the 0/1 result to 0/~0 boolean values by ANDing
207 * with 1 and negating.
208 */
209 fs_reg anded = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
210 abld.AND(anded, inverted, brw_imm_uw(1));
211
212 fs_reg dst = abld.vgrf(BRW_REGISTER_TYPE_D, 1);
213 abld.MOV(dst, negate(retype(anded, BRW_REGISTER_TYPE_D)));
214 *reg = dst;
215 }
216 break;
217
218 default:
219 break;
220 }
221 }
222
223 return true;
224 }
225
226 void
227 fs_visitor::nir_emit_system_values()
228 {
229 nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
230 for (unsigned i = 0; i < SYSTEM_VALUE_MAX; i++) {
231 nir_system_values[i] = fs_reg();
232 }
233
234 /* Always emit SUBGROUP_INVOCATION. Dead code will clean it up if we
235 * never end up using it.
236 */
237 {
238 const fs_builder abld = bld.annotate("gl_SubgroupInvocation", NULL);
239 fs_reg &reg = nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION];
240 reg = abld.vgrf(BRW_REGISTER_TYPE_W);
241
242 const fs_builder allbld8 = abld.group(8, 0).exec_all();
243 allbld8.MOV(reg, brw_imm_v(0x76543210));
244 if (dispatch_width > 8)
245 allbld8.ADD(byte_offset(reg, 16), reg, brw_imm_uw(8u));
246 if (dispatch_width > 16) {
247 const fs_builder allbld16 = abld.group(16, 0).exec_all();
248 allbld16.ADD(byte_offset(reg, 32), reg, brw_imm_uw(16u));
249 }
250 }
251
252 nir_foreach_function(function, nir) {
253 assert(strcmp(function->name, "main") == 0);
254 assert(function->impl);
255 nir_foreach_block(block, function->impl) {
256 emit_system_values_block(block, this);
257 }
258 }
259 }
260
261 /*
262 * Returns a type based on a reference_type (word, float, half-float) and a
263 * given bit_size.
264 *
265 * Reference BRW_REGISTER_TYPE are HF,F,DF,W,D,UW,UD.
266 *
267 * @FIXME: 64-bit return types are always DF on integer types to maintain
268 * compability with uses of DF previously to the introduction of int64
269 * support.
270 */
271 static brw_reg_type
272 brw_reg_type_from_bit_size(const unsigned bit_size,
273 const brw_reg_type reference_type)
274 {
275 switch(reference_type) {
276 case BRW_REGISTER_TYPE_HF:
277 case BRW_REGISTER_TYPE_F:
278 case BRW_REGISTER_TYPE_DF:
279 switch(bit_size) {
280 case 16:
281 return BRW_REGISTER_TYPE_HF;
282 case 32:
283 return BRW_REGISTER_TYPE_F;
284 case 64:
285 return BRW_REGISTER_TYPE_DF;
286 default:
287 unreachable("Invalid bit size");
288 }
289 case BRW_REGISTER_TYPE_W:
290 case BRW_REGISTER_TYPE_D:
291 case BRW_REGISTER_TYPE_Q:
292 switch(bit_size) {
293 case 16:
294 return BRW_REGISTER_TYPE_W;
295 case 32:
296 return BRW_REGISTER_TYPE_D;
297 case 64:
298 return BRW_REGISTER_TYPE_Q;
299 default:
300 unreachable("Invalid bit size");
301 }
302 case BRW_REGISTER_TYPE_UW:
303 case BRW_REGISTER_TYPE_UD:
304 case BRW_REGISTER_TYPE_UQ:
305 switch(bit_size) {
306 case 16:
307 return BRW_REGISTER_TYPE_UW;
308 case 32:
309 return BRW_REGISTER_TYPE_UD;
310 case 64:
311 return BRW_REGISTER_TYPE_UQ;
312 default:
313 unreachable("Invalid bit size");
314 }
315 default:
316 unreachable("Unknown type");
317 }
318 }
319
320 void
321 fs_visitor::nir_emit_impl(nir_function_impl *impl)
322 {
323 nir_locals = ralloc_array(mem_ctx, fs_reg, impl->reg_alloc);
324 for (unsigned i = 0; i < impl->reg_alloc; i++) {
325 nir_locals[i] = fs_reg();
326 }
327
328 foreach_list_typed(nir_register, reg, node, &impl->registers) {
329 unsigned array_elems =
330 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
331 unsigned size = array_elems * reg->num_components;
332 const brw_reg_type reg_type =
333 brw_reg_type_from_bit_size(reg->bit_size, BRW_REGISTER_TYPE_F);
334 nir_locals[reg->index] = bld.vgrf(reg_type, size);
335 }
336
337 nir_ssa_values = reralloc(mem_ctx, nir_ssa_values, fs_reg,
338 impl->ssa_alloc);
339
340 nir_emit_cf_list(&impl->body);
341 }
342
343 void
344 fs_visitor::nir_emit_cf_list(exec_list *list)
345 {
346 exec_list_validate(list);
347 foreach_list_typed(nir_cf_node, node, node, list) {
348 switch (node->type) {
349 case nir_cf_node_if:
350 nir_emit_if(nir_cf_node_as_if(node));
351 break;
352
353 case nir_cf_node_loop:
354 nir_emit_loop(nir_cf_node_as_loop(node));
355 break;
356
357 case nir_cf_node_block:
358 nir_emit_block(nir_cf_node_as_block(node));
359 break;
360
361 default:
362 unreachable("Invalid CFG node block");
363 }
364 }
365 }
366
367 void
368 fs_visitor::nir_emit_if(nir_if *if_stmt)
369 {
370 /* first, put the condition into f0 */
371 fs_inst *inst = bld.MOV(bld.null_reg_d(),
372 retype(get_nir_src(if_stmt->condition),
373 BRW_REGISTER_TYPE_D));
374 inst->conditional_mod = BRW_CONDITIONAL_NZ;
375
376 bld.IF(BRW_PREDICATE_NORMAL);
377
378 nir_emit_cf_list(&if_stmt->then_list);
379
380 /* note: if the else is empty, dead CF elimination will remove it */
381 bld.emit(BRW_OPCODE_ELSE);
382
383 nir_emit_cf_list(&if_stmt->else_list);
384
385 bld.emit(BRW_OPCODE_ENDIF);
386 }
387
388 void
389 fs_visitor::nir_emit_loop(nir_loop *loop)
390 {
391 bld.emit(BRW_OPCODE_DO);
392
393 nir_emit_cf_list(&loop->body);
394
395 bld.emit(BRW_OPCODE_WHILE);
396 }
397
398 void
399 fs_visitor::nir_emit_block(nir_block *block)
400 {
401 nir_foreach_instr(instr, block) {
402 nir_emit_instr(instr);
403 }
404 }
405
406 void
407 fs_visitor::nir_emit_instr(nir_instr *instr)
408 {
409 const fs_builder abld = bld.annotate(NULL, instr);
410
411 switch (instr->type) {
412 case nir_instr_type_alu:
413 nir_emit_alu(abld, nir_instr_as_alu(instr));
414 break;
415
416 case nir_instr_type_intrinsic:
417 switch (stage) {
418 case MESA_SHADER_VERTEX:
419 nir_emit_vs_intrinsic(abld, nir_instr_as_intrinsic(instr));
420 break;
421 case MESA_SHADER_TESS_CTRL:
422 nir_emit_tcs_intrinsic(abld, nir_instr_as_intrinsic(instr));
423 break;
424 case MESA_SHADER_TESS_EVAL:
425 nir_emit_tes_intrinsic(abld, nir_instr_as_intrinsic(instr));
426 break;
427 case MESA_SHADER_GEOMETRY:
428 nir_emit_gs_intrinsic(abld, nir_instr_as_intrinsic(instr));
429 break;
430 case MESA_SHADER_FRAGMENT:
431 nir_emit_fs_intrinsic(abld, nir_instr_as_intrinsic(instr));
432 break;
433 case MESA_SHADER_COMPUTE:
434 nir_emit_cs_intrinsic(abld, nir_instr_as_intrinsic(instr));
435 break;
436 default:
437 unreachable("unsupported shader stage");
438 }
439 break;
440
441 case nir_instr_type_tex:
442 nir_emit_texture(abld, nir_instr_as_tex(instr));
443 break;
444
445 case nir_instr_type_load_const:
446 nir_emit_load_const(abld, nir_instr_as_load_const(instr));
447 break;
448
449 case nir_instr_type_ssa_undef:
450 /* We create a new VGRF for undefs on every use (by handling
451 * them in get_nir_src()), rather than for each definition.
452 * This helps register coalescing eliminate MOVs from undef.
453 */
454 break;
455
456 case nir_instr_type_jump:
457 nir_emit_jump(abld, nir_instr_as_jump(instr));
458 break;
459
460 default:
461 unreachable("unknown instruction type");
462 }
463 }
464
465 /**
466 * Recognizes a parent instruction of nir_op_extract_* and changes the type to
467 * match instr.
468 */
469 bool
470 fs_visitor::optimize_extract_to_float(nir_alu_instr *instr,
471 const fs_reg &result)
472 {
473 if (!instr->src[0].src.is_ssa ||
474 !instr->src[0].src.ssa->parent_instr)
475 return false;
476
477 if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
478 return false;
479
480 nir_alu_instr *src0 =
481 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
482
483 if (src0->op != nir_op_extract_u8 && src0->op != nir_op_extract_u16 &&
484 src0->op != nir_op_extract_i8 && src0->op != nir_op_extract_i16)
485 return false;
486
487 nir_const_value *element = nir_src_as_const_value(src0->src[1].src);
488 assert(element != NULL);
489
490 /* Element type to extract.*/
491 const brw_reg_type type = brw_int_type(
492 src0->op == nir_op_extract_u16 || src0->op == nir_op_extract_i16 ? 2 : 1,
493 src0->op == nir_op_extract_i16 || src0->op == nir_op_extract_i8);
494
495 fs_reg op0 = get_nir_src(src0->src[0].src);
496 op0.type = brw_type_for_nir_type(devinfo,
497 (nir_alu_type)(nir_op_infos[src0->op].input_types[0] |
498 nir_src_bit_size(src0->src[0].src)));
499 op0 = offset(op0, bld, src0->src[0].swizzle[0]);
500
501 set_saturate(instr->dest.saturate,
502 bld.MOV(result, subscript(op0, type, element->u32[0])));
503 return true;
504 }
505
506 bool
507 fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
508 const fs_reg &result)
509 {
510 if (!instr->src[0].src.is_ssa ||
511 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_intrinsic)
512 return false;
513
514 nir_intrinsic_instr *src0 =
515 nir_instr_as_intrinsic(instr->src[0].src.ssa->parent_instr);
516
517 if (src0->intrinsic != nir_intrinsic_load_front_face)
518 return false;
519
520 nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
521 if (!value1 || fabsf(value1->f32[0]) != 1.0f)
522 return false;
523
524 nir_const_value *value2 = nir_src_as_const_value(instr->src[2].src);
525 if (!value2 || fabsf(value2->f32[0]) != 1.0f)
526 return false;
527
528 fs_reg tmp = vgrf(glsl_type::int_type);
529
530 if (devinfo->gen >= 6) {
531 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
532 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
533
534 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
535 *
536 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
537 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
538 *
539 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
540 *
541 * This negation looks like it's safe in practice, because bits 0:4 will
542 * surely be TRIANGLES
543 */
544
545 if (value1->f32[0] == -1.0f) {
546 g0.negate = true;
547 }
548
549 bld.OR(subscript(tmp, BRW_REGISTER_TYPE_W, 1),
550 g0, brw_imm_uw(0x3f80));
551 } else {
552 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
553 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
554
555 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
556 *
557 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
558 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
559 *
560 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
561 *
562 * This negation looks like it's safe in practice, because bits 0:4 will
563 * surely be TRIANGLES
564 */
565
566 if (value1->f32[0] == -1.0f) {
567 g1_6.negate = true;
568 }
569
570 bld.OR(tmp, g1_6, brw_imm_d(0x3f800000));
571 }
572 bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, brw_imm_d(0xbf800000));
573
574 return true;
575 }
576
577 static void
578 emit_find_msb_using_lzd(const fs_builder &bld,
579 const fs_reg &result,
580 const fs_reg &src,
581 bool is_signed)
582 {
583 fs_inst *inst;
584 fs_reg temp = src;
585
586 if (is_signed) {
587 /* LZD of an absolute value source almost always does the right
588 * thing. There are two problem values:
589 *
590 * * 0x80000000. Since abs(0x80000000) == 0x80000000, LZD returns
591 * 0. However, findMSB(int(0x80000000)) == 30.
592 *
593 * * 0xffffffff. Since abs(0xffffffff) == 1, LZD returns
594 * 31. Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
595 *
596 * For a value of zero or negative one, -1 will be returned.
597 *
598 * * Negative powers of two. LZD(abs(-(1<<x))) returns x, but
599 * findMSB(-(1<<x)) should return x-1.
600 *
601 * For all negative number cases, including 0x80000000 and
602 * 0xffffffff, the correct value is obtained from LZD if instead of
603 * negating the (already negative) value the logical-not is used. A
604 * conditonal logical-not can be achieved in two instructions.
605 */
606 temp = bld.vgrf(BRW_REGISTER_TYPE_D);
607
608 bld.ASR(temp, src, brw_imm_d(31));
609 bld.XOR(temp, temp, src);
610 }
611
612 bld.LZD(retype(result, BRW_REGISTER_TYPE_UD),
613 retype(temp, BRW_REGISTER_TYPE_UD));
614
615 /* LZD counts from the MSB side, while GLSL's findMSB() wants the count
616 * from the LSB side. Subtract the result from 31 to convert the MSB
617 * count into an LSB count. If no bits are set, LZD will return 32.
618 * 31-32 = -1, which is exactly what findMSB() is supposed to return.
619 */
620 inst = bld.ADD(result, retype(result, BRW_REGISTER_TYPE_D), brw_imm_d(31));
621 inst->src[0].negate = true;
622 }
623
624 static brw_rnd_mode
625 brw_rnd_mode_from_nir_op (const nir_op op) {
626 switch (op) {
627 case nir_op_f2f16_rtz:
628 return BRW_RND_MODE_RTZ;
629 case nir_op_f2f16_rtne:
630 return BRW_RND_MODE_RTNE;
631 default:
632 unreachable("Operation doesn't support rounding mode");
633 }
634 }
635
636 void
637 fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
638 {
639 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
640 fs_inst *inst;
641
642 fs_reg result = get_nir_dest(instr->dest.dest);
643 result.type = brw_type_for_nir_type(devinfo,
644 (nir_alu_type)(nir_op_infos[instr->op].output_type |
645 nir_dest_bit_size(instr->dest.dest)));
646
647 fs_reg op[4];
648 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
649 op[i] = get_nir_src(instr->src[i].src);
650 op[i].type = brw_type_for_nir_type(devinfo,
651 (nir_alu_type)(nir_op_infos[instr->op].input_types[i] |
652 nir_src_bit_size(instr->src[i].src)));
653 op[i].abs = instr->src[i].abs;
654 op[i].negate = instr->src[i].negate;
655 }
656
657 /* We get a bunch of mov's out of the from_ssa pass and they may still
658 * be vectorized. We'll handle them as a special-case. We'll also
659 * handle vecN here because it's basically the same thing.
660 */
661 switch (instr->op) {
662 case nir_op_imov:
663 case nir_op_fmov:
664 case nir_op_vec2:
665 case nir_op_vec3:
666 case nir_op_vec4: {
667 fs_reg temp = result;
668 bool need_extra_copy = false;
669 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
670 if (!instr->src[i].src.is_ssa &&
671 instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
672 need_extra_copy = true;
673 temp = bld.vgrf(result.type, 4);
674 break;
675 }
676 }
677
678 for (unsigned i = 0; i < 4; i++) {
679 if (!(instr->dest.write_mask & (1 << i)))
680 continue;
681
682 if (instr->op == nir_op_imov || instr->op == nir_op_fmov) {
683 inst = bld.MOV(offset(temp, bld, i),
684 offset(op[0], bld, instr->src[0].swizzle[i]));
685 } else {
686 inst = bld.MOV(offset(temp, bld, i),
687 offset(op[i], bld, instr->src[i].swizzle[0]));
688 }
689 inst->saturate = instr->dest.saturate;
690 }
691
692 /* In this case the source and destination registers were the same,
693 * so we need to insert an extra set of moves in order to deal with
694 * any swizzling.
695 */
696 if (need_extra_copy) {
697 for (unsigned i = 0; i < 4; i++) {
698 if (!(instr->dest.write_mask & (1 << i)))
699 continue;
700
701 bld.MOV(offset(result, bld, i), offset(temp, bld, i));
702 }
703 }
704 return;
705 }
706 default:
707 break;
708 }
709
710 /* At this point, we have dealt with any instruction that operates on
711 * more than a single channel. Therefore, we can just adjust the source
712 * and destination registers for that channel and emit the instruction.
713 */
714 unsigned channel = 0;
715 if (nir_op_infos[instr->op].output_size == 0) {
716 /* Since NIR is doing the scalarizing for us, we should only ever see
717 * vectorized operations with a single channel.
718 */
719 assert(_mesa_bitcount(instr->dest.write_mask) == 1);
720 channel = ffs(instr->dest.write_mask) - 1;
721
722 result = offset(result, bld, channel);
723 }
724
725 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
726 assert(nir_op_infos[instr->op].input_sizes[i] < 2);
727 op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
728 }
729
730 switch (instr->op) {
731 case nir_op_i2f32:
732 case nir_op_u2f32:
733 if (optimize_extract_to_float(instr, result))
734 return;
735 inst = bld.MOV(result, op[0]);
736 inst->saturate = instr->dest.saturate;
737 break;
738
739 case nir_op_f2f16_rtne:
740 case nir_op_f2f16_rtz:
741 bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
742 brw_imm_d(brw_rnd_mode_from_nir_op(instr->op)));
743 /* fallthrough */
744
745 /* In theory, it would be better to use BRW_OPCODE_F32TO16. Depending
746 * on the HW gen, it is a special hw opcode or just a MOV, and
747 * brw_F32TO16 (at brw_eu_emit) would do the work to chose.
748 *
749 * But if we want to use that opcode, we need to provide support on
750 * different optimizations and lowerings. As right now HF support is
751 * only for gen8+, it will be better to use directly the MOV, and use
752 * BRW_OPCODE_F32TO16 when/if we work for HF support on gen7.
753 */
754
755 case nir_op_f2f16_undef:
756 case nir_op_i2i16:
757 case nir_op_u2u16: {
758 /* TODO: Fixing aligment rules for conversions from 32-bits to
759 * 16-bit types should be moved to lower_conversions
760 */
761 fs_reg tmp = bld.vgrf(op[0].type, 1);
762 tmp = subscript(tmp, result.type, 0);
763 inst = bld.MOV(tmp, op[0]);
764 inst->saturate = instr->dest.saturate;
765 inst = bld.MOV(result, tmp);
766 inst->saturate = instr->dest.saturate;
767 break;
768 }
769
770 case nir_op_f2f64:
771 case nir_op_f2i64:
772 case nir_op_f2u64:
773 case nir_op_i2f64:
774 case nir_op_i2i64:
775 case nir_op_u2f64:
776 case nir_op_u2u64:
777 /* CHV PRM, vol07, 3D Media GPGPU Engine, Register Region Restrictions:
778 *
779 * "When source or destination is 64b (...), regioning in Align1
780 * must follow these rules:
781 *
782 * 1. Source and destination horizontal stride must be aligned to
783 * the same qword.
784 * (...)"
785 *
786 * This means that 32-bit to 64-bit conversions need to have the 32-bit
787 * data elements aligned to 64-bit. This restriction does not apply to
788 * BDW and later.
789 */
790 if (nir_dest_bit_size(instr->dest.dest) == 64 &&
791 nir_src_bit_size(instr->src[0].src) == 32 &&
792 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
793 fs_reg tmp = bld.vgrf(result.type, 1);
794 tmp = subscript(tmp, op[0].type, 0);
795 inst = bld.MOV(tmp, op[0]);
796 inst = bld.MOV(result, tmp);
797 inst->saturate = instr->dest.saturate;
798 break;
799 }
800 /* fallthrough */
801 case nir_op_f2f32:
802 case nir_op_f2i32:
803 case nir_op_f2u32:
804 case nir_op_i2i32:
805 case nir_op_u2u32:
806 inst = bld.MOV(result, op[0]);
807 inst->saturate = instr->dest.saturate;
808 break;
809
810 case nir_op_fsign: {
811 if (op[0].abs) {
812 /* Straightforward since the source can be assumed to be
813 * non-negative.
814 */
815 set_condmod(BRW_CONDITIONAL_NZ, bld.MOV(result, op[0]));
816 set_predicate(BRW_PREDICATE_NORMAL, bld.MOV(result, brw_imm_f(1.0f)));
817
818 } else if (type_sz(op[0].type) < 8) {
819 /* AND(val, 0x80000000) gives the sign bit.
820 *
821 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
822 * zero.
823 */
824 bld.CMP(bld.null_reg_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ);
825
826 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
827 op[0].type = BRW_REGISTER_TYPE_UD;
828 result.type = BRW_REGISTER_TYPE_UD;
829 bld.AND(result_int, op[0], brw_imm_ud(0x80000000u));
830
831 inst = bld.OR(result_int, result_int, brw_imm_ud(0x3f800000u));
832 inst->predicate = BRW_PREDICATE_NORMAL;
833 if (instr->dest.saturate) {
834 inst = bld.MOV(result, result);
835 inst->saturate = true;
836 }
837 } else {
838 /* For doubles we do the same but we need to consider:
839 *
840 * - 2-src instructions can't operate with 64-bit immediates
841 * - The sign is encoded in the high 32-bit of each DF
842 * - We need to produce a DF result.
843 */
844
845 fs_reg zero = vgrf(glsl_type::double_type);
846 bld.MOV(zero, setup_imm_df(bld, 0.0));
847 bld.CMP(bld.null_reg_df(), op[0], zero, BRW_CONDITIONAL_NZ);
848
849 bld.MOV(result, zero);
850
851 fs_reg r = subscript(result, BRW_REGISTER_TYPE_UD, 1);
852 bld.AND(r, subscript(op[0], BRW_REGISTER_TYPE_UD, 1),
853 brw_imm_ud(0x80000000u));
854
855 set_predicate(BRW_PREDICATE_NORMAL,
856 bld.OR(r, r, brw_imm_ud(0x3ff00000u)));
857
858 if (instr->dest.saturate) {
859 inst = bld.MOV(result, result);
860 inst->saturate = true;
861 }
862 }
863 break;
864 }
865
866 case nir_op_isign:
867 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
868 * -> non-negative val generates 0x00000000.
869 * Predicated OR sets 1 if val is positive.
870 */
871 assert(nir_dest_bit_size(instr->dest.dest) < 64);
872 bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_G);
873 bld.ASR(result, op[0], brw_imm_d(31));
874 inst = bld.OR(result, result, brw_imm_d(1));
875 inst->predicate = BRW_PREDICATE_NORMAL;
876 break;
877
878 case nir_op_frcp:
879 inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
880 inst->saturate = instr->dest.saturate;
881 break;
882
883 case nir_op_fexp2:
884 inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
885 inst->saturate = instr->dest.saturate;
886 break;
887
888 case nir_op_flog2:
889 inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
890 inst->saturate = instr->dest.saturate;
891 break;
892
893 case nir_op_fsin:
894 inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
895 inst->saturate = instr->dest.saturate;
896 break;
897
898 case nir_op_fcos:
899 inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
900 inst->saturate = instr->dest.saturate;
901 break;
902
903 case nir_op_fddx:
904 if (fs_key->high_quality_derivatives) {
905 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
906 } else {
907 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
908 }
909 inst->saturate = instr->dest.saturate;
910 break;
911 case nir_op_fddx_fine:
912 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
913 inst->saturate = instr->dest.saturate;
914 break;
915 case nir_op_fddx_coarse:
916 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
917 inst->saturate = instr->dest.saturate;
918 break;
919 case nir_op_fddy:
920 if (fs_key->high_quality_derivatives) {
921 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0]);
922 } else {
923 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0]);
924 }
925 inst->saturate = instr->dest.saturate;
926 break;
927 case nir_op_fddy_fine:
928 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0]);
929 inst->saturate = instr->dest.saturate;
930 break;
931 case nir_op_fddy_coarse:
932 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0]);
933 inst->saturate = instr->dest.saturate;
934 break;
935
936 case nir_op_iadd:
937 case nir_op_fadd:
938 inst = bld.ADD(result, op[0], op[1]);
939 inst->saturate = instr->dest.saturate;
940 break;
941
942 case nir_op_fmul:
943 inst = bld.MUL(result, op[0], op[1]);
944 inst->saturate = instr->dest.saturate;
945 break;
946
947 case nir_op_imul:
948 assert(nir_dest_bit_size(instr->dest.dest) < 64);
949 bld.MUL(result, op[0], op[1]);
950 break;
951
952 case nir_op_imul_high:
953 case nir_op_umul_high:
954 assert(nir_dest_bit_size(instr->dest.dest) < 64);
955 bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
956 break;
957
958 case nir_op_idiv:
959 case nir_op_udiv:
960 assert(nir_dest_bit_size(instr->dest.dest) < 64);
961 bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
962 break;
963
964 case nir_op_uadd_carry:
965 unreachable("Should have been lowered by carry_to_arith().");
966
967 case nir_op_usub_borrow:
968 unreachable("Should have been lowered by borrow_to_arith().");
969
970 case nir_op_umod:
971 case nir_op_irem:
972 /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
973 * appears that our hardware just does the right thing for signed
974 * remainder.
975 */
976 assert(nir_dest_bit_size(instr->dest.dest) < 64);
977 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
978 break;
979
980 case nir_op_imod: {
981 /* Get a regular C-style remainder. If a % b == 0, set the predicate. */
982 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
983
984 /* Math instructions don't support conditional mod */
985 inst = bld.MOV(bld.null_reg_d(), result);
986 inst->conditional_mod = BRW_CONDITIONAL_NZ;
987
988 /* Now, we need to determine if signs of the sources are different.
989 * When we XOR the sources, the top bit is 0 if they are the same and 1
990 * if they are different. We can then use a conditional modifier to
991 * turn that into a predicate. This leads us to an XOR.l instruction.
992 *
993 * Technically, according to the PRM, you're not allowed to use .l on a
994 * XOR instruction. However, emperical experiments and Curro's reading
995 * of the simulator source both indicate that it's safe.
996 */
997 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_D);
998 inst = bld.XOR(tmp, op[0], op[1]);
999 inst->predicate = BRW_PREDICATE_NORMAL;
1000 inst->conditional_mod = BRW_CONDITIONAL_L;
1001
1002 /* If the result of the initial remainder operation is non-zero and the
1003 * two sources have different signs, add in a copy of op[1] to get the
1004 * final integer modulus value.
1005 */
1006 inst = bld.ADD(result, result, op[1]);
1007 inst->predicate = BRW_PREDICATE_NORMAL;
1008 break;
1009 }
1010
1011 case nir_op_flt:
1012 case nir_op_fge:
1013 case nir_op_feq:
1014 case nir_op_fne: {
1015 fs_reg dest = result;
1016 if (nir_src_bit_size(instr->src[0].src) > 32) {
1017 dest = bld.vgrf(BRW_REGISTER_TYPE_DF, 1);
1018 }
1019 brw_conditional_mod cond;
1020 switch (instr->op) {
1021 case nir_op_flt:
1022 cond = BRW_CONDITIONAL_L;
1023 break;
1024 case nir_op_fge:
1025 cond = BRW_CONDITIONAL_GE;
1026 break;
1027 case nir_op_feq:
1028 cond = BRW_CONDITIONAL_Z;
1029 break;
1030 case nir_op_fne:
1031 cond = BRW_CONDITIONAL_NZ;
1032 break;
1033 default:
1034 unreachable("bad opcode");
1035 }
1036 bld.CMP(dest, op[0], op[1], cond);
1037 if (nir_src_bit_size(instr->src[0].src) > 32) {
1038 bld.MOV(result, subscript(dest, BRW_REGISTER_TYPE_UD, 0));
1039 }
1040 break;
1041 }
1042
1043 case nir_op_ilt:
1044 case nir_op_ult:
1045 case nir_op_ige:
1046 case nir_op_uge:
1047 case nir_op_ieq:
1048 case nir_op_ine: {
1049 fs_reg dest = result;
1050 if (nir_src_bit_size(instr->src[0].src) > 32) {
1051 dest = bld.vgrf(BRW_REGISTER_TYPE_UQ, 1);
1052 }
1053
1054 brw_conditional_mod cond;
1055 switch (instr->op) {
1056 case nir_op_ilt:
1057 case nir_op_ult:
1058 cond = BRW_CONDITIONAL_L;
1059 break;
1060 case nir_op_ige:
1061 case nir_op_uge:
1062 cond = BRW_CONDITIONAL_GE;
1063 break;
1064 case nir_op_ieq:
1065 cond = BRW_CONDITIONAL_Z;
1066 break;
1067 case nir_op_ine:
1068 cond = BRW_CONDITIONAL_NZ;
1069 break;
1070 default:
1071 unreachable("bad opcode");
1072 }
1073 bld.CMP(dest, op[0], op[1], cond);
1074 if (nir_src_bit_size(instr->src[0].src) > 32) {
1075 bld.MOV(result, subscript(dest, BRW_REGISTER_TYPE_UD, 0));
1076 }
1077 break;
1078 }
1079
1080 case nir_op_inot:
1081 if (devinfo->gen >= 8) {
1082 op[0] = resolve_source_modifiers(op[0]);
1083 }
1084 bld.NOT(result, op[0]);
1085 break;
1086 case nir_op_ixor:
1087 if (devinfo->gen >= 8) {
1088 op[0] = resolve_source_modifiers(op[0]);
1089 op[1] = resolve_source_modifiers(op[1]);
1090 }
1091 bld.XOR(result, op[0], op[1]);
1092 break;
1093 case nir_op_ior:
1094 if (devinfo->gen >= 8) {
1095 op[0] = resolve_source_modifiers(op[0]);
1096 op[1] = resolve_source_modifiers(op[1]);
1097 }
1098 bld.OR(result, op[0], op[1]);
1099 break;
1100 case nir_op_iand:
1101 if (devinfo->gen >= 8) {
1102 op[0] = resolve_source_modifiers(op[0]);
1103 op[1] = resolve_source_modifiers(op[1]);
1104 }
1105 bld.AND(result, op[0], op[1]);
1106 break;
1107
1108 case nir_op_fdot2:
1109 case nir_op_fdot3:
1110 case nir_op_fdot4:
1111 case nir_op_ball_fequal2:
1112 case nir_op_ball_iequal2:
1113 case nir_op_ball_fequal3:
1114 case nir_op_ball_iequal3:
1115 case nir_op_ball_fequal4:
1116 case nir_op_ball_iequal4:
1117 case nir_op_bany_fnequal2:
1118 case nir_op_bany_inequal2:
1119 case nir_op_bany_fnequal3:
1120 case nir_op_bany_inequal3:
1121 case nir_op_bany_fnequal4:
1122 case nir_op_bany_inequal4:
1123 unreachable("Lowered by nir_lower_alu_reductions");
1124
1125 case nir_op_fnoise1_1:
1126 case nir_op_fnoise1_2:
1127 case nir_op_fnoise1_3:
1128 case nir_op_fnoise1_4:
1129 case nir_op_fnoise2_1:
1130 case nir_op_fnoise2_2:
1131 case nir_op_fnoise2_3:
1132 case nir_op_fnoise2_4:
1133 case nir_op_fnoise3_1:
1134 case nir_op_fnoise3_2:
1135 case nir_op_fnoise3_3:
1136 case nir_op_fnoise3_4:
1137 case nir_op_fnoise4_1:
1138 case nir_op_fnoise4_2:
1139 case nir_op_fnoise4_3:
1140 case nir_op_fnoise4_4:
1141 unreachable("not reached: should be handled by lower_noise");
1142
1143 case nir_op_ldexp:
1144 unreachable("not reached: should be handled by ldexp_to_arith()");
1145
1146 case nir_op_fsqrt:
1147 inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
1148 inst->saturate = instr->dest.saturate;
1149 break;
1150
1151 case nir_op_frsq:
1152 inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
1153 inst->saturate = instr->dest.saturate;
1154 break;
1155
1156 case nir_op_b2i:
1157 case nir_op_b2f:
1158 bld.MOV(result, negate(op[0]));
1159 break;
1160
1161 case nir_op_i2b:
1162 case nir_op_f2b:
1163 if (nir_src_bit_size(instr->src[0].src) == 64) {
1164 /* two-argument instructions can't take 64-bit immediates */
1165 fs_reg zero;
1166 fs_reg tmp;
1167
1168 if (instr->op == nir_op_f2b) {
1169 zero = vgrf(glsl_type::double_type);
1170 tmp = vgrf(glsl_type::double_type);
1171 bld.MOV(zero, setup_imm_df(bld, 0.0));
1172 } else {
1173 zero = vgrf(glsl_type::int64_t_type);
1174 tmp = vgrf(glsl_type::int64_t_type);
1175 bld.MOV(zero, brw_imm_q(0));
1176 }
1177
1178 /* A SIMD16 execution needs to be split in two instructions, so use
1179 * a vgrf instead of the flag register as dst so instruction splitting
1180 * works
1181 */
1182 bld.CMP(tmp, op[0], zero, BRW_CONDITIONAL_NZ);
1183 bld.MOV(result, subscript(tmp, BRW_REGISTER_TYPE_UD, 0));
1184 } else {
1185 if (instr->op == nir_op_f2b) {
1186 bld.CMP(result, op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ);
1187 } else {
1188 bld.CMP(result, op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
1189 }
1190 }
1191 break;
1192
1193 case nir_op_ftrunc:
1194 inst = bld.RNDZ(result, op[0]);
1195 inst->saturate = instr->dest.saturate;
1196 break;
1197
1198 case nir_op_fceil: {
1199 op[0].negate = !op[0].negate;
1200 fs_reg temp = vgrf(glsl_type::float_type);
1201 bld.RNDD(temp, op[0]);
1202 temp.negate = true;
1203 inst = bld.MOV(result, temp);
1204 inst->saturate = instr->dest.saturate;
1205 break;
1206 }
1207 case nir_op_ffloor:
1208 inst = bld.RNDD(result, op[0]);
1209 inst->saturate = instr->dest.saturate;
1210 break;
1211 case nir_op_ffract:
1212 inst = bld.FRC(result, op[0]);
1213 inst->saturate = instr->dest.saturate;
1214 break;
1215 case nir_op_fround_even:
1216 inst = bld.RNDE(result, op[0]);
1217 inst->saturate = instr->dest.saturate;
1218 break;
1219
1220 case nir_op_fquantize2f16: {
1221 fs_reg tmp16 = bld.vgrf(BRW_REGISTER_TYPE_D);
1222 fs_reg tmp32 = bld.vgrf(BRW_REGISTER_TYPE_F);
1223 fs_reg zero = bld.vgrf(BRW_REGISTER_TYPE_F);
1224
1225 /* The destination stride must be at least as big as the source stride. */
1226 tmp16.type = BRW_REGISTER_TYPE_W;
1227 tmp16.stride = 2;
1228
1229 /* Check for denormal */
1230 fs_reg abs_src0 = op[0];
1231 abs_src0.abs = true;
1232 bld.CMP(bld.null_reg_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
1233 BRW_CONDITIONAL_L);
1234 /* Get the appropriately signed zero */
1235 bld.AND(retype(zero, BRW_REGISTER_TYPE_UD),
1236 retype(op[0], BRW_REGISTER_TYPE_UD),
1237 brw_imm_ud(0x80000000));
1238 /* Do the actual F32 -> F16 -> F32 conversion */
1239 bld.emit(BRW_OPCODE_F32TO16, tmp16, op[0]);
1240 bld.emit(BRW_OPCODE_F16TO32, tmp32, tmp16);
1241 /* Select that or zero based on normal status */
1242 inst = bld.SEL(result, zero, tmp32);
1243 inst->predicate = BRW_PREDICATE_NORMAL;
1244 inst->saturate = instr->dest.saturate;
1245 break;
1246 }
1247
1248 case nir_op_imin:
1249 case nir_op_umin:
1250 case nir_op_fmin:
1251 inst = bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_L);
1252 inst->saturate = instr->dest.saturate;
1253 break;
1254
1255 case nir_op_imax:
1256 case nir_op_umax:
1257 case nir_op_fmax:
1258 inst = bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_GE);
1259 inst->saturate = instr->dest.saturate;
1260 break;
1261
1262 case nir_op_pack_snorm_2x16:
1263 case nir_op_pack_snorm_4x8:
1264 case nir_op_pack_unorm_2x16:
1265 case nir_op_pack_unorm_4x8:
1266 case nir_op_unpack_snorm_2x16:
1267 case nir_op_unpack_snorm_4x8:
1268 case nir_op_unpack_unorm_2x16:
1269 case nir_op_unpack_unorm_4x8:
1270 case nir_op_unpack_half_2x16:
1271 case nir_op_pack_half_2x16:
1272 unreachable("not reached: should be handled by lower_packing_builtins");
1273
1274 case nir_op_unpack_half_2x16_split_x:
1275 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
1276 inst->saturate = instr->dest.saturate;
1277 break;
1278 case nir_op_unpack_half_2x16_split_y:
1279 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
1280 inst->saturate = instr->dest.saturate;
1281 break;
1282
1283 case nir_op_pack_64_2x32_split:
1284 bld.emit(FS_OPCODE_PACK, result, op[0], op[1]);
1285 break;
1286
1287 case nir_op_unpack_64_2x32_split_x:
1288 case nir_op_unpack_64_2x32_split_y: {
1289 if (instr->op == nir_op_unpack_64_2x32_split_x)
1290 bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UD, 0));
1291 else
1292 bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UD, 1));
1293 break;
1294 }
1295
1296 case nir_op_fpow:
1297 inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
1298 inst->saturate = instr->dest.saturate;
1299 break;
1300
1301 case nir_op_bitfield_reverse:
1302 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1303 bld.BFREV(result, op[0]);
1304 break;
1305
1306 case nir_op_bit_count:
1307 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1308 bld.CBIT(result, op[0]);
1309 break;
1310
1311 case nir_op_ufind_msb: {
1312 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1313 emit_find_msb_using_lzd(bld, result, op[0], false);
1314 break;
1315 }
1316
1317 case nir_op_ifind_msb: {
1318 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1319
1320 if (devinfo->gen < 7) {
1321 emit_find_msb_using_lzd(bld, result, op[0], true);
1322 } else {
1323 bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
1324
1325 /* FBH counts from the MSB side, while GLSL's findMSB() wants the
1326 * count from the LSB side. If FBH didn't return an error
1327 * (0xFFFFFFFF), then subtract the result from 31 to convert the MSB
1328 * count into an LSB count.
1329 */
1330 bld.CMP(bld.null_reg_d(), result, brw_imm_d(-1), BRW_CONDITIONAL_NZ);
1331
1332 inst = bld.ADD(result, result, brw_imm_d(31));
1333 inst->predicate = BRW_PREDICATE_NORMAL;
1334 inst->src[0].negate = true;
1335 }
1336 break;
1337 }
1338
1339 case nir_op_find_lsb:
1340 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1341
1342 if (devinfo->gen < 7) {
1343 fs_reg temp = vgrf(glsl_type::int_type);
1344
1345 /* (x & -x) generates a value that consists of only the LSB of x.
1346 * For all powers of 2, findMSB(y) == findLSB(y).
1347 */
1348 fs_reg src = retype(op[0], BRW_REGISTER_TYPE_D);
1349 fs_reg negated_src = src;
1350
1351 /* One must be negated, and the other must be non-negated. It
1352 * doesn't matter which is which.
1353 */
1354 negated_src.negate = true;
1355 src.negate = false;
1356
1357 bld.AND(temp, src, negated_src);
1358 emit_find_msb_using_lzd(bld, result, temp, false);
1359 } else {
1360 bld.FBL(result, op[0]);
1361 }
1362 break;
1363
1364 case nir_op_ubitfield_extract:
1365 case nir_op_ibitfield_extract:
1366 unreachable("should have been lowered");
1367 case nir_op_ubfe:
1368 case nir_op_ibfe:
1369 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1370 bld.BFE(result, op[2], op[1], op[0]);
1371 break;
1372 case nir_op_bfm:
1373 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1374 bld.BFI1(result, op[0], op[1]);
1375 break;
1376 case nir_op_bfi:
1377 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1378 bld.BFI2(result, op[0], op[1], op[2]);
1379 break;
1380
1381 case nir_op_bitfield_insert:
1382 unreachable("not reached: should have been lowered");
1383
1384 case nir_op_ishl:
1385 case nir_op_ishr:
1386 case nir_op_ushr: {
1387 fs_reg shift_count = op[1];
1388
1389 if (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo)) {
1390 if (op[1].file == VGRF &&
1391 (result.type == BRW_REGISTER_TYPE_Q ||
1392 result.type == BRW_REGISTER_TYPE_UQ)) {
1393 shift_count = fs_reg(VGRF, alloc.allocate(dispatch_width / 4),
1394 BRW_REGISTER_TYPE_UD);
1395 shift_count.stride = 2;
1396 bld.MOV(shift_count, op[1]);
1397 }
1398 }
1399
1400 switch (instr->op) {
1401 case nir_op_ishl:
1402 bld.SHL(result, op[0], shift_count);
1403 break;
1404 case nir_op_ishr:
1405 bld.ASR(result, op[0], shift_count);
1406 break;
1407 case nir_op_ushr:
1408 bld.SHR(result, op[0], shift_count);
1409 break;
1410 default:
1411 unreachable("not reached");
1412 }
1413 break;
1414 }
1415
1416 case nir_op_pack_half_2x16_split:
1417 bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
1418 break;
1419
1420 case nir_op_ffma:
1421 inst = bld.MAD(result, op[2], op[1], op[0]);
1422 inst->saturate = instr->dest.saturate;
1423 break;
1424
1425 case nir_op_flrp:
1426 inst = bld.LRP(result, op[0], op[1], op[2]);
1427 inst->saturate = instr->dest.saturate;
1428 break;
1429
1430 case nir_op_bcsel:
1431 if (optimize_frontfacing_ternary(instr, result))
1432 return;
1433
1434 bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
1435 inst = bld.SEL(result, op[1], op[2]);
1436 inst->predicate = BRW_PREDICATE_NORMAL;
1437 break;
1438
1439 case nir_op_extract_u8:
1440 case nir_op_extract_i8: {
1441 nir_const_value *byte = nir_src_as_const_value(instr->src[1].src);
1442 assert(byte != NULL);
1443
1444 /* The PRMs say:
1445 *
1446 * BDW+
1447 * There is no direct conversion from B/UB to Q/UQ or Q/UQ to B/UB.
1448 * Use two instructions and a word or DWord intermediate integer type.
1449 */
1450 if (nir_dest_bit_size(instr->dest.dest) == 64) {
1451 const brw_reg_type type = brw_int_type(2, instr->op == nir_op_extract_i8);
1452
1453 if (instr->op == nir_op_extract_i8) {
1454 /* If we need to sign extend, extract to a word first */
1455 fs_reg w_temp = bld.vgrf(BRW_REGISTER_TYPE_W);
1456 bld.MOV(w_temp, subscript(op[0], type, byte->u32[0]));
1457 bld.MOV(result, w_temp);
1458 } else {
1459 /* Otherwise use an AND with 0xff and a word type */
1460 bld.AND(result, subscript(op[0], type, byte->u32[0] / 2), brw_imm_uw(0xff));
1461 }
1462 } else {
1463 const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8);
1464 bld.MOV(result, subscript(op[0], type, byte->u32[0]));
1465 }
1466 break;
1467 }
1468
1469 case nir_op_extract_u16:
1470 case nir_op_extract_i16: {
1471 const brw_reg_type type = brw_int_type(2, instr->op == nir_op_extract_i16);
1472 nir_const_value *word = nir_src_as_const_value(instr->src[1].src);
1473 assert(word != NULL);
1474 bld.MOV(result, subscript(op[0], type, word->u32[0]));
1475 break;
1476 }
1477
1478 default:
1479 unreachable("unhandled instruction");
1480 }
1481
1482 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1483 * to sign extend the low bit to 0/~0
1484 */
1485 if (devinfo->gen <= 5 &&
1486 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1487 fs_reg masked = vgrf(glsl_type::int_type);
1488 bld.AND(masked, result, brw_imm_d(1));
1489 masked.negate = true;
1490 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
1491 }
1492 }
1493
1494 void
1495 fs_visitor::nir_emit_load_const(const fs_builder &bld,
1496 nir_load_const_instr *instr)
1497 {
1498 const brw_reg_type reg_type =
1499 brw_reg_type_from_bit_size(instr->def.bit_size, BRW_REGISTER_TYPE_D);
1500 fs_reg reg = bld.vgrf(reg_type, instr->def.num_components);
1501
1502 switch (instr->def.bit_size) {
1503 case 32:
1504 for (unsigned i = 0; i < instr->def.num_components; i++)
1505 bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value.i32[i]));
1506 break;
1507
1508 case 64:
1509 assert(devinfo->gen >= 7);
1510 if (devinfo->gen == 7) {
1511 /* We don't get 64-bit integer types until gen8 */
1512 for (unsigned i = 0; i < instr->def.num_components; i++) {
1513 bld.MOV(retype(offset(reg, bld, i), BRW_REGISTER_TYPE_DF),
1514 setup_imm_df(bld, instr->value.f64[i]));
1515 }
1516 } else {
1517 for (unsigned i = 0; i < instr->def.num_components; i++)
1518 bld.MOV(offset(reg, bld, i), brw_imm_q(instr->value.i64[i]));
1519 }
1520 break;
1521
1522 default:
1523 unreachable("Invalid bit size");
1524 }
1525
1526 nir_ssa_values[instr->def.index] = reg;
1527 }
1528
1529 fs_reg
1530 fs_visitor::get_nir_src(const nir_src &src)
1531 {
1532 fs_reg reg;
1533 if (src.is_ssa) {
1534 if (src.ssa->parent_instr->type == nir_instr_type_ssa_undef) {
1535 const brw_reg_type reg_type =
1536 brw_reg_type_from_bit_size(src.ssa->bit_size, BRW_REGISTER_TYPE_D);
1537 reg = bld.vgrf(reg_type, src.ssa->num_components);
1538 } else {
1539 reg = nir_ssa_values[src.ssa->index];
1540 }
1541 } else {
1542 /* We don't handle indirects on locals */
1543 assert(src.reg.indirect == NULL);
1544 reg = offset(nir_locals[src.reg.reg->index], bld,
1545 src.reg.base_offset * src.reg.reg->num_components);
1546 }
1547
1548 if (nir_src_bit_size(src) == 64 && devinfo->gen == 7) {
1549 /* The only 64-bit type available on gen7 is DF, so use that. */
1550 reg.type = BRW_REGISTER_TYPE_DF;
1551 } else {
1552 /* To avoid floating-point denorm flushing problems, set the type by
1553 * default to an integer type - instructions that need floating point
1554 * semantics will set this to F if they need to
1555 */
1556 reg.type = brw_reg_type_from_bit_size(nir_src_bit_size(src),
1557 BRW_REGISTER_TYPE_D);
1558 }
1559
1560 return reg;
1561 }
1562
1563 /**
1564 * Return an IMM for constants; otherwise call get_nir_src() as normal.
1565 *
1566 * This function should not be called on any value which may be 64 bits.
1567 * We could theoretically support 64-bit on gen8+ but we choose not to
1568 * because it wouldn't work in general (no gen7 support) and there are
1569 * enough restrictions in 64-bit immediates that you can't take the return
1570 * value and treat it the same as the result of get_nir_src().
1571 */
1572 fs_reg
1573 fs_visitor::get_nir_src_imm(const nir_src &src)
1574 {
1575 nir_const_value *val = nir_src_as_const_value(src);
1576 assert(nir_src_bit_size(src) == 32);
1577 return val ? fs_reg(brw_imm_d(val->i32[0])) : get_nir_src(src);
1578 }
1579
1580 fs_reg
1581 fs_visitor::get_nir_dest(const nir_dest &dest)
1582 {
1583 if (dest.is_ssa) {
1584 const brw_reg_type reg_type =
1585 brw_reg_type_from_bit_size(dest.ssa.bit_size, BRW_REGISTER_TYPE_F);
1586 nir_ssa_values[dest.ssa.index] =
1587 bld.vgrf(reg_type, dest.ssa.num_components);
1588 return nir_ssa_values[dest.ssa.index];
1589 } else {
1590 /* We don't handle indirects on locals */
1591 assert(dest.reg.indirect == NULL);
1592 return offset(nir_locals[dest.reg.reg->index], bld,
1593 dest.reg.base_offset * dest.reg.reg->num_components);
1594 }
1595 }
1596
1597 fs_reg
1598 fs_visitor::get_nir_image_deref(const nir_deref_var *deref)
1599 {
1600 fs_reg image(UNIFORM, deref->var->data.driver_location / 4,
1601 BRW_REGISTER_TYPE_UD);
1602 fs_reg indirect;
1603 unsigned indirect_max = 0;
1604
1605 for (const nir_deref *tail = &deref->deref; tail->child;
1606 tail = tail->child) {
1607 const nir_deref_array *deref_array = nir_deref_as_array(tail->child);
1608 assert(tail->child->deref_type == nir_deref_type_array);
1609 const unsigned size = glsl_get_length(tail->type);
1610 const unsigned element_size = type_size_scalar(deref_array->deref.type);
1611 const unsigned base = MIN2(deref_array->base_offset, size - 1);
1612 image = offset(image, bld, base * element_size);
1613
1614 if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
1615 fs_reg tmp = vgrf(glsl_type::uint_type);
1616
1617 /* Accessing an invalid surface index with the dataport can result
1618 * in a hang. According to the spec "if the index used to
1619 * select an individual element is negative or greater than or
1620 * equal to the size of the array, the results of the operation
1621 * are undefined but may not lead to termination" -- which is one
1622 * of the possible outcomes of the hang. Clamp the index to
1623 * prevent access outside of the array bounds.
1624 */
1625 bld.emit_minmax(tmp, retype(get_nir_src(deref_array->indirect),
1626 BRW_REGISTER_TYPE_UD),
1627 brw_imm_ud(size - base - 1), BRW_CONDITIONAL_L);
1628
1629 indirect_max += element_size * (tail->type->length - 1);
1630
1631 bld.MUL(tmp, tmp, brw_imm_ud(element_size * 4));
1632 if (indirect.file == BAD_FILE) {
1633 indirect = tmp;
1634 } else {
1635 bld.ADD(indirect, indirect, tmp);
1636 }
1637 }
1638 }
1639
1640 if (indirect.file == BAD_FILE) {
1641 return image;
1642 } else {
1643 /* Emit a pile of MOVs to load the uniform into a temporary. The
1644 * dead-code elimination pass will get rid of what we don't use.
1645 */
1646 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, BRW_IMAGE_PARAM_SIZE);
1647 for (unsigned j = 0; j < BRW_IMAGE_PARAM_SIZE; j++) {
1648 bld.emit(SHADER_OPCODE_MOV_INDIRECT,
1649 offset(tmp, bld, j), offset(image, bld, j),
1650 indirect, brw_imm_ud((indirect_max + 1) * 4));
1651 }
1652 return tmp;
1653 }
1654 }
1655
1656 void
1657 fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
1658 unsigned wr_mask)
1659 {
1660 for (unsigned i = 0; i < 4; i++) {
1661 if (!((wr_mask >> i) & 1))
1662 continue;
1663
1664 fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
1665 new_inst->dst = offset(new_inst->dst, bld, i);
1666 for (unsigned j = 0; j < new_inst->sources; j++)
1667 if (new_inst->src[j].file == VGRF)
1668 new_inst->src[j] = offset(new_inst->src[j], bld, i);
1669
1670 bld.emit(new_inst);
1671 }
1672 }
1673
1674 /**
1675 * Get the matching channel register datatype for an image intrinsic of the
1676 * specified GLSL image type.
1677 */
1678 static brw_reg_type
1679 get_image_base_type(const glsl_type *type)
1680 {
1681 switch ((glsl_base_type)type->sampled_type) {
1682 case GLSL_TYPE_UINT:
1683 return BRW_REGISTER_TYPE_UD;
1684 case GLSL_TYPE_INT:
1685 return BRW_REGISTER_TYPE_D;
1686 case GLSL_TYPE_FLOAT:
1687 return BRW_REGISTER_TYPE_F;
1688 default:
1689 unreachable("Not reached.");
1690 }
1691 }
1692
1693 /**
1694 * Get the appropriate atomic op for an image atomic intrinsic.
1695 */
1696 static unsigned
1697 get_image_atomic_op(nir_intrinsic_op op, const glsl_type *type)
1698 {
1699 switch (op) {
1700 case nir_intrinsic_image_atomic_add:
1701 return BRW_AOP_ADD;
1702 case nir_intrinsic_image_atomic_min:
1703 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1704 BRW_AOP_IMIN : BRW_AOP_UMIN);
1705 case nir_intrinsic_image_atomic_max:
1706 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1707 BRW_AOP_IMAX : BRW_AOP_UMAX);
1708 case nir_intrinsic_image_atomic_and:
1709 return BRW_AOP_AND;
1710 case nir_intrinsic_image_atomic_or:
1711 return BRW_AOP_OR;
1712 case nir_intrinsic_image_atomic_xor:
1713 return BRW_AOP_XOR;
1714 case nir_intrinsic_image_atomic_exchange:
1715 return BRW_AOP_MOV;
1716 case nir_intrinsic_image_atomic_comp_swap:
1717 return BRW_AOP_CMPWR;
1718 default:
1719 unreachable("Not reachable.");
1720 }
1721 }
1722
1723 static fs_inst *
1724 emit_pixel_interpolater_send(const fs_builder &bld,
1725 enum opcode opcode,
1726 const fs_reg &dst,
1727 const fs_reg &src,
1728 const fs_reg &desc,
1729 glsl_interp_mode interpolation)
1730 {
1731 struct brw_wm_prog_data *wm_prog_data =
1732 brw_wm_prog_data(bld.shader->stage_prog_data);
1733 fs_inst *inst;
1734 fs_reg payload;
1735 int mlen;
1736
1737 if (src.file == BAD_FILE) {
1738 /* Dummy payload */
1739 payload = bld.vgrf(BRW_REGISTER_TYPE_F, 1);
1740 mlen = 1;
1741 } else {
1742 payload = src;
1743 mlen = 2 * bld.dispatch_width() / 8;
1744 }
1745
1746 inst = bld.emit(opcode, dst, payload, desc);
1747 inst->mlen = mlen;
1748 /* 2 floats per slot returned */
1749 inst->size_written = 2 * dst.component_size(inst->exec_size);
1750 inst->pi_noperspective = interpolation == INTERP_MODE_NOPERSPECTIVE;
1751
1752 wm_prog_data->pulls_bary = true;
1753
1754 return inst;
1755 }
1756
1757 /**
1758 * Computes 1 << x, given a D/UD register containing some value x.
1759 */
1760 static fs_reg
1761 intexp2(const fs_builder &bld, const fs_reg &x)
1762 {
1763 assert(x.type == BRW_REGISTER_TYPE_UD || x.type == BRW_REGISTER_TYPE_D);
1764
1765 fs_reg result = bld.vgrf(x.type, 1);
1766 fs_reg one = bld.vgrf(x.type, 1);
1767
1768 bld.MOV(one, retype(brw_imm_d(1), one.type));
1769 bld.SHL(result, one, x);
1770 return result;
1771 }
1772
1773 void
1774 fs_visitor::emit_gs_end_primitive(const nir_src &vertex_count_nir_src)
1775 {
1776 assert(stage == MESA_SHADER_GEOMETRY);
1777
1778 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
1779
1780 if (gs_compile->control_data_header_size_bits == 0)
1781 return;
1782
1783 /* We can only do EndPrimitive() functionality when the control data
1784 * consists of cut bits. Fortunately, the only time it isn't is when the
1785 * output type is points, in which case EndPrimitive() is a no-op.
1786 */
1787 if (gs_prog_data->control_data_format !=
1788 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
1789 return;
1790 }
1791
1792 /* Cut bits use one bit per vertex. */
1793 assert(gs_compile->control_data_bits_per_vertex == 1);
1794
1795 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1796 vertex_count.type = BRW_REGISTER_TYPE_UD;
1797
1798 /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
1799 * vertex n, 0 otherwise. So all we need to do here is mark bit
1800 * (vertex_count - 1) % 32 in the cut_bits register to indicate that
1801 * EndPrimitive() was called after emitting vertex (vertex_count - 1);
1802 * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
1803 *
1804 * Note that if EndPrimitive() is called before emitting any vertices, this
1805 * will cause us to set bit 31 of the control_data_bits register to 1.
1806 * That's fine because:
1807 *
1808 * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
1809 * output, so the hardware will ignore cut bit 31.
1810 *
1811 * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
1812 * last vertex, so setting cut bit 31 has no effect (since the primitive
1813 * is automatically ended when the GS terminates).
1814 *
1815 * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
1816 * control_data_bits register to 0 when the first vertex is emitted.
1817 */
1818
1819 const fs_builder abld = bld.annotate("end primitive");
1820
1821 /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
1822 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1823 abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
1824 fs_reg mask = intexp2(abld, prev_count);
1825 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1826 * attention to the lower 5 bits of its second source argument, so on this
1827 * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
1828 * ((vertex_count - 1) % 32).
1829 */
1830 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1831 }
1832
1833 void
1834 fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
1835 {
1836 assert(stage == MESA_SHADER_GEOMETRY);
1837 assert(gs_compile->control_data_bits_per_vertex != 0);
1838
1839 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
1840
1841 const fs_builder abld = bld.annotate("emit control data bits");
1842 const fs_builder fwa_bld = bld.exec_all();
1843
1844 /* We use a single UD register to accumulate control data bits (32 bits
1845 * for each of the SIMD8 channels). So we need to write a DWord (32 bits)
1846 * at a time.
1847 *
1848 * Unfortunately, the URB_WRITE_SIMD8 message uses 128-bit (OWord) offsets.
1849 * We have select a 128-bit group via the Global and Per-Slot Offsets, then
1850 * use the Channel Mask phase to enable/disable which DWord within that
1851 * group to write. (Remember, different SIMD8 channels may have emitted
1852 * different numbers of vertices, so we may need per-slot offsets.)
1853 *
1854 * Channel masking presents an annoying problem: we may have to replicate
1855 * the data up to 4 times:
1856 *
1857 * Msg = Handles, Per-Slot Offsets, Channel Masks, Data, Data, Data, Data.
1858 *
1859 * To avoid penalizing shaders that emit a small number of vertices, we
1860 * can avoid these sometimes: if the size of the control data header is
1861 * <= 128 bits, then there is only 1 OWord. All SIMD8 channels will land
1862 * land in the same 128-bit group, so we can skip per-slot offsets.
1863 *
1864 * Similarly, if the control data header is <= 32 bits, there is only one
1865 * DWord, so we can skip channel masks.
1866 */
1867 enum opcode opcode = SHADER_OPCODE_URB_WRITE_SIMD8;
1868
1869 fs_reg channel_mask, per_slot_offset;
1870
1871 if (gs_compile->control_data_header_size_bits > 32) {
1872 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
1873 channel_mask = vgrf(glsl_type::uint_type);
1874 }
1875
1876 if (gs_compile->control_data_header_size_bits > 128) {
1877 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT;
1878 per_slot_offset = vgrf(glsl_type::uint_type);
1879 }
1880
1881 /* Figure out which DWord we're trying to write to using the formula:
1882 *
1883 * dword_index = (vertex_count - 1) * bits_per_vertex / 32
1884 *
1885 * Since bits_per_vertex is a power of two, and is known at compile
1886 * time, this can be optimized to:
1887 *
1888 * dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
1889 */
1890 if (opcode != SHADER_OPCODE_URB_WRITE_SIMD8) {
1891 fs_reg dword_index = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1892 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1893 abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
1894 unsigned log2_bits_per_vertex =
1895 util_last_bit(gs_compile->control_data_bits_per_vertex);
1896 abld.SHR(dword_index, prev_count, brw_imm_ud(6u - log2_bits_per_vertex));
1897
1898 if (per_slot_offset.file != BAD_FILE) {
1899 /* Set the per-slot offset to dword_index / 4, so that we'll write to
1900 * the appropriate OWord within the control data header.
1901 */
1902 abld.SHR(per_slot_offset, dword_index, brw_imm_ud(2u));
1903 }
1904
1905 /* Set the channel masks to 1 << (dword_index % 4), so that we'll
1906 * write to the appropriate DWORD within the OWORD.
1907 */
1908 fs_reg channel = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1909 fwa_bld.AND(channel, dword_index, brw_imm_ud(3u));
1910 channel_mask = intexp2(fwa_bld, channel);
1911 /* Then the channel masks need to be in bits 23:16. */
1912 fwa_bld.SHL(channel_mask, channel_mask, brw_imm_ud(16u));
1913 }
1914
1915 /* Store the control data bits in the message payload and send it. */
1916 int mlen = 2;
1917 if (channel_mask.file != BAD_FILE)
1918 mlen += 4; /* channel masks, plus 3 extra copies of the data */
1919 if (per_slot_offset.file != BAD_FILE)
1920 mlen++;
1921
1922 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
1923 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
1924 int i = 0;
1925 sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
1926 if (per_slot_offset.file != BAD_FILE)
1927 sources[i++] = per_slot_offset;
1928 if (channel_mask.file != BAD_FILE)
1929 sources[i++] = channel_mask;
1930 while (i < mlen) {
1931 sources[i++] = this->control_data_bits;
1932 }
1933
1934 abld.LOAD_PAYLOAD(payload, sources, mlen, mlen);
1935 fs_inst *inst = abld.emit(opcode, reg_undef, payload);
1936 inst->mlen = mlen;
1937 /* We need to increment Global Offset by 256-bits to make room for
1938 * Broadwell's extra "Vertex Count" payload at the beginning of the
1939 * URB entry. Since this is an OWord message, Global Offset is counted
1940 * in 128-bit units, so we must set it to 2.
1941 */
1942 if (gs_prog_data->static_vertex_count == -1)
1943 inst->offset = 2;
1944 }
1945
1946 void
1947 fs_visitor::set_gs_stream_control_data_bits(const fs_reg &vertex_count,
1948 unsigned stream_id)
1949 {
1950 /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
1951
1952 /* Note: we are calling this *before* increasing vertex_count, so
1953 * this->vertex_count == vertex_count - 1 in the formula above.
1954 */
1955
1956 /* Stream mode uses 2 bits per vertex */
1957 assert(gs_compile->control_data_bits_per_vertex == 2);
1958
1959 /* Must be a valid stream */
1960 assert(stream_id < MAX_VERTEX_STREAMS);
1961
1962 /* Control data bits are initialized to 0 so we don't have to set any
1963 * bits when sending vertices to stream 0.
1964 */
1965 if (stream_id == 0)
1966 return;
1967
1968 const fs_builder abld = bld.annotate("set stream control data bits", NULL);
1969
1970 /* reg::sid = stream_id */
1971 fs_reg sid = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1972 abld.MOV(sid, brw_imm_ud(stream_id));
1973
1974 /* reg:shift_count = 2 * (vertex_count - 1) */
1975 fs_reg shift_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1976 abld.SHL(shift_count, vertex_count, brw_imm_ud(1u));
1977
1978 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1979 * attention to the lower 5 bits of its second source argument, so on this
1980 * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
1981 * stream_id << ((2 * (vertex_count - 1)) % 32).
1982 */
1983 fs_reg mask = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1984 abld.SHL(mask, sid, shift_count);
1985 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1986 }
1987
1988 void
1989 fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
1990 unsigned stream_id)
1991 {
1992 assert(stage == MESA_SHADER_GEOMETRY);
1993
1994 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
1995
1996 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1997 vertex_count.type = BRW_REGISTER_TYPE_UD;
1998
1999 /* Haswell and later hardware ignores the "Render Stream Select" bits
2000 * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
2001 * and instead sends all primitives down the pipeline for rasterization.
2002 * If the SOL stage is enabled, "Render Stream Select" is honored and
2003 * primitives bound to non-zero streams are discarded after stream output.
2004 *
2005 * Since the only purpose of primives sent to non-zero streams is to
2006 * be recorded by transform feedback, we can simply discard all geometry
2007 * bound to these streams when transform feedback is disabled.
2008 */
2009 if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
2010 return;
2011
2012 /* If we're outputting 32 control data bits or less, then we can wait
2013 * until the shader is over to output them all. Otherwise we need to
2014 * output them as we go. Now is the time to do it, since we're about to
2015 * output the vertex_count'th vertex, so it's guaranteed that the
2016 * control data bits associated with the (vertex_count - 1)th vertex are
2017 * correct.
2018 */
2019 if (gs_compile->control_data_header_size_bits > 32) {
2020 const fs_builder abld =
2021 bld.annotate("emit vertex: emit control data bits");
2022
2023 /* Only emit control data bits if we've finished accumulating a batch
2024 * of 32 bits. This is the case when:
2025 *
2026 * (vertex_count * bits_per_vertex) % 32 == 0
2027 *
2028 * (in other words, when the last 5 bits of vertex_count *
2029 * bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
2030 * integer n (which is always the case, since bits_per_vertex is
2031 * always 1 or 2), this is equivalent to requiring that the last 5-n
2032 * bits of vertex_count are 0:
2033 *
2034 * vertex_count & (2^(5-n) - 1) == 0
2035 *
2036 * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
2037 * equivalent to:
2038 *
2039 * vertex_count & (32 / bits_per_vertex - 1) == 0
2040 *
2041 * TODO: If vertex_count is an immediate, we could do some of this math
2042 * at compile time...
2043 */
2044 fs_inst *inst =
2045 abld.AND(bld.null_reg_d(), vertex_count,
2046 brw_imm_ud(32u / gs_compile->control_data_bits_per_vertex - 1u));
2047 inst->conditional_mod = BRW_CONDITIONAL_Z;
2048
2049 abld.IF(BRW_PREDICATE_NORMAL);
2050 /* If vertex_count is 0, then no control data bits have been
2051 * accumulated yet, so we can skip emitting them.
2052 */
2053 abld.CMP(bld.null_reg_d(), vertex_count, brw_imm_ud(0u),
2054 BRW_CONDITIONAL_NEQ);
2055 abld.IF(BRW_PREDICATE_NORMAL);
2056 emit_gs_control_data_bits(vertex_count);
2057 abld.emit(BRW_OPCODE_ENDIF);
2058
2059 /* Reset control_data_bits to 0 so we can start accumulating a new
2060 * batch.
2061 *
2062 * Note: in the case where vertex_count == 0, this neutralizes the
2063 * effect of any call to EndPrimitive() that the shader may have
2064 * made before outputting its first vertex.
2065 */
2066 inst = abld.MOV(this->control_data_bits, brw_imm_ud(0u));
2067 inst->force_writemask_all = true;
2068 abld.emit(BRW_OPCODE_ENDIF);
2069 }
2070
2071 emit_urb_writes(vertex_count);
2072
2073 /* In stream mode we have to set control data bits for all vertices
2074 * unless we have disabled control data bits completely (which we do
2075 * do for GL_POINTS outputs that don't use streams).
2076 */
2077 if (gs_compile->control_data_header_size_bits > 0 &&
2078 gs_prog_data->control_data_format ==
2079 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
2080 set_gs_stream_control_data_bits(vertex_count, stream_id);
2081 }
2082 }
2083
2084 void
2085 fs_visitor::emit_gs_input_load(const fs_reg &dst,
2086 const nir_src &vertex_src,
2087 unsigned base_offset,
2088 const nir_src &offset_src,
2089 unsigned num_components,
2090 unsigned first_component)
2091 {
2092 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
2093
2094 nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
2095 nir_const_value *offset_const = nir_src_as_const_value(offset_src);
2096 const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8;
2097
2098 /* TODO: figure out push input layout for invocations == 1 */
2099 /* TODO: make this work with 64-bit inputs */
2100 if (gs_prog_data->invocations == 1 &&
2101 type_sz(dst.type) <= 4 &&
2102 offset_const != NULL && vertex_const != NULL &&
2103 4 * (base_offset + offset_const->u32[0]) < push_reg_count) {
2104 int imm_offset = (base_offset + offset_const->u32[0]) * 4 +
2105 vertex_const->u32[0] * push_reg_count;
2106 for (unsigned i = 0; i < num_components; i++) {
2107 bld.MOV(offset(dst, bld, i),
2108 fs_reg(ATTR, imm_offset + i + first_component, dst.type));
2109 }
2110 return;
2111 }
2112
2113 /* Resort to the pull model. Ensure the VUE handles are provided. */
2114 assert(gs_prog_data->base.include_vue_handles);
2115
2116 unsigned first_icp_handle = gs_prog_data->include_primitive_id ? 3 : 2;
2117 fs_reg icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2118
2119 if (gs_prog_data->invocations == 1) {
2120 if (vertex_const) {
2121 /* The vertex index is constant; just select the proper URB handle. */
2122 icp_handle =
2123 retype(brw_vec8_grf(first_icp_handle + vertex_const->i32[0], 0),
2124 BRW_REGISTER_TYPE_UD);
2125 } else {
2126 /* The vertex index is non-constant. We need to use indirect
2127 * addressing to fetch the proper URB handle.
2128 *
2129 * First, we start with the sequence <7, 6, 5, 4, 3, 2, 1, 0>
2130 * indicating that channel <n> should read the handle from
2131 * DWord <n>. We convert that to bytes by multiplying by 4.
2132 *
2133 * Next, we convert the vertex index to bytes by multiplying
2134 * by 32 (shifting by 5), and add the two together. This is
2135 * the final indirect byte offset.
2136 */
2137 fs_reg sequence = bld.vgrf(BRW_REGISTER_TYPE_W, 1);
2138 fs_reg channel_offsets = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2139 fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2140 fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2141
2142 /* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */
2143 bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210)));
2144 /* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */
2145 bld.SHL(channel_offsets, sequence, brw_imm_ud(2u));
2146 /* Convert vertex_index to bytes (multiply by 32) */
2147 bld.SHL(vertex_offset_bytes,
2148 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2149 brw_imm_ud(5u));
2150 bld.ADD(icp_offset_bytes, vertex_offset_bytes, channel_offsets);
2151
2152 /* Use first_icp_handle as the base offset. There is one register
2153 * of URB handles per vertex, so inform the register allocator that
2154 * we might read up to nir->info.gs.vertices_in registers.
2155 */
2156 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2157 retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
2158 fs_reg(icp_offset_bytes),
2159 brw_imm_ud(nir->info.gs.vertices_in * REG_SIZE));
2160 }
2161 } else {
2162 assert(gs_prog_data->invocations > 1);
2163
2164 if (vertex_const) {
2165 assert(devinfo->gen >= 9 || vertex_const->i32[0] <= 5);
2166 bld.MOV(icp_handle,
2167 retype(brw_vec1_grf(first_icp_handle +
2168 vertex_const->i32[0] / 8,
2169 vertex_const->i32[0] % 8),
2170 BRW_REGISTER_TYPE_UD));
2171 } else {
2172 /* The vertex index is non-constant. We need to use indirect
2173 * addressing to fetch the proper URB handle.
2174 *
2175 */
2176 fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2177
2178 /* Convert vertex_index to bytes (multiply by 4) */
2179 bld.SHL(icp_offset_bytes,
2180 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2181 brw_imm_ud(2u));
2182
2183 /* Use first_icp_handle as the base offset. There is one DWord
2184 * of URB handles per vertex, so inform the register allocator that
2185 * we might read up to ceil(nir->info.gs.vertices_in / 8) registers.
2186 */
2187 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2188 retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
2189 fs_reg(icp_offset_bytes),
2190 brw_imm_ud(DIV_ROUND_UP(nir->info.gs.vertices_in, 8) *
2191 REG_SIZE));
2192 }
2193 }
2194
2195 fs_inst *inst;
2196
2197 fs_reg tmp_dst = dst;
2198 fs_reg indirect_offset = get_nir_src(offset_src);
2199 unsigned num_iterations = 1;
2200 unsigned orig_num_components = num_components;
2201
2202 if (type_sz(dst.type) == 8) {
2203 if (num_components > 2) {
2204 num_iterations = 2;
2205 num_components = 2;
2206 }
2207 fs_reg tmp = fs_reg(VGRF, alloc.allocate(4), dst.type);
2208 tmp_dst = tmp;
2209 first_component = first_component / 2;
2210 }
2211
2212 for (unsigned iter = 0; iter < num_iterations; iter++) {
2213 if (offset_const) {
2214 /* Constant indexing - use global offset. */
2215 if (first_component != 0) {
2216 unsigned read_components = num_components + first_component;
2217 fs_reg tmp = bld.vgrf(dst.type, read_components);
2218 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp, icp_handle);
2219 inst->size_written = read_components *
2220 tmp.component_size(inst->exec_size);
2221 for (unsigned i = 0; i < num_components; i++) {
2222 bld.MOV(offset(tmp_dst, bld, i),
2223 offset(tmp, bld, i + first_component));
2224 }
2225 } else {
2226 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp_dst,
2227 icp_handle);
2228 inst->size_written = num_components *
2229 tmp_dst.component_size(inst->exec_size);
2230 }
2231 inst->offset = base_offset + offset_const->u32[0];
2232 inst->mlen = 1;
2233 } else {
2234 /* Indirect indexing - use per-slot offsets as well. */
2235 const fs_reg srcs[] = { icp_handle, indirect_offset };
2236 unsigned read_components = num_components + first_component;
2237 fs_reg tmp = bld.vgrf(dst.type, read_components);
2238 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2239 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2240 if (first_component != 0) {
2241 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
2242 payload);
2243 inst->size_written = read_components *
2244 tmp.component_size(inst->exec_size);
2245 for (unsigned i = 0; i < num_components; i++) {
2246 bld.MOV(offset(tmp_dst, bld, i),
2247 offset(tmp, bld, i + first_component));
2248 }
2249 } else {
2250 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp_dst,
2251 payload);
2252 inst->size_written = num_components *
2253 tmp_dst.component_size(inst->exec_size);
2254 }
2255 inst->offset = base_offset;
2256 inst->mlen = 2;
2257 }
2258
2259 if (type_sz(dst.type) == 8) {
2260 shuffle_32bit_load_result_to_64bit_data(
2261 bld, tmp_dst, retype(tmp_dst, BRW_REGISTER_TYPE_F), num_components);
2262
2263 for (unsigned c = 0; c < num_components; c++)
2264 bld.MOV(offset(dst, bld, iter * 2 + c), offset(tmp_dst, bld, c));
2265 }
2266
2267 if (num_iterations > 1) {
2268 num_components = orig_num_components - 2;
2269 if(offset_const) {
2270 base_offset++;
2271 } else {
2272 fs_reg new_indirect = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2273 bld.ADD(new_indirect, indirect_offset, brw_imm_ud(1u));
2274 indirect_offset = new_indirect;
2275 }
2276 }
2277 }
2278 }
2279
2280 fs_reg
2281 fs_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
2282 {
2283 nir_src *offset_src = nir_get_io_offset_src(instr);
2284 nir_const_value *const_value = nir_src_as_const_value(*offset_src);
2285
2286 if (const_value) {
2287 /* The only constant offset we should find is 0. brw_nir.c's
2288 * add_const_offset_to_base() will fold other constant offsets
2289 * into instr->const_index[0].
2290 */
2291 assert(const_value->u32[0] == 0);
2292 return fs_reg();
2293 }
2294
2295 return get_nir_src(*offset_src);
2296 }
2297
2298 static void
2299 do_untyped_vector_read(const fs_builder &bld,
2300 const fs_reg dest,
2301 const fs_reg surf_index,
2302 const fs_reg offset_reg,
2303 unsigned num_components)
2304 {
2305 if (type_sz(dest.type) <= 2) {
2306 fs_reg read_offset = bld.vgrf(BRW_REGISTER_TYPE_UD);
2307 bld.MOV(read_offset, offset_reg);
2308 for (unsigned i = 0; i < num_components; i++) {
2309 fs_reg read_reg =
2310 emit_byte_scattered_read(bld, surf_index, read_offset,
2311 1 /* dims */, 1,
2312 type_sz(dest.type) * 8 /* bit_size */,
2313 BRW_PREDICATE_NONE);
2314 bld.MOV(offset(dest, bld, i), subscript(read_reg, dest.type, 0));
2315 bld.ADD(read_offset, read_offset, brw_imm_ud(type_sz(dest.type)));
2316 }
2317 } else if (type_sz(dest.type) == 4) {
2318 fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
2319 1 /* dims */,
2320 num_components,
2321 BRW_PREDICATE_NONE);
2322 read_result.type = dest.type;
2323 for (unsigned i = 0; i < num_components; i++)
2324 bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
2325 } else if (type_sz(dest.type) == 8) {
2326 /* Reading a dvec, so we need to:
2327 *
2328 * 1. Multiply num_components by 2, to account for the fact that we
2329 * need to read 64-bit components.
2330 * 2. Shuffle the result of the load to form valid 64-bit elements
2331 * 3. Emit a second load (for components z/w) if needed.
2332 */
2333 fs_reg read_offset = bld.vgrf(BRW_REGISTER_TYPE_UD);
2334 bld.MOV(read_offset, offset_reg);
2335
2336 int iters = num_components <= 2 ? 1 : 2;
2337
2338 /* Load the dvec, the first iteration loads components x/y, the second
2339 * iteration, if needed, loads components z/w
2340 */
2341 for (int it = 0; it < iters; it++) {
2342 /* Compute number of components to read in this iteration */
2343 int iter_components = MIN2(2, num_components);
2344 num_components -= iter_components;
2345
2346 /* Read. Since this message reads 32-bit components, we need to
2347 * read twice as many components.
2348 */
2349 fs_reg read_result = emit_untyped_read(bld, surf_index, read_offset,
2350 1 /* dims */,
2351 iter_components * 2,
2352 BRW_PREDICATE_NONE);
2353
2354 /* Shuffle the 32-bit load result into valid 64-bit data */
2355 const fs_reg packed_result = bld.vgrf(dest.type, iter_components);
2356 shuffle_32bit_load_result_to_64bit_data(
2357 bld, packed_result, read_result, iter_components);
2358
2359 /* Move each component to its destination */
2360 read_result = retype(read_result, BRW_REGISTER_TYPE_DF);
2361 for (int c = 0; c < iter_components; c++) {
2362 bld.MOV(offset(dest, bld, it * 2 + c),
2363 offset(packed_result, bld, c));
2364 }
2365
2366 bld.ADD(read_offset, read_offset, brw_imm_ud(16));
2367 }
2368 } else {
2369 unreachable("Unsupported type");
2370 }
2371 }
2372
2373 void
2374 fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld,
2375 nir_intrinsic_instr *instr)
2376 {
2377 assert(stage == MESA_SHADER_VERTEX);
2378
2379 fs_reg dest;
2380 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2381 dest = get_nir_dest(instr->dest);
2382
2383 switch (instr->intrinsic) {
2384 case nir_intrinsic_load_vertex_id:
2385 unreachable("should be lowered by lower_vertex_id()");
2386
2387 case nir_intrinsic_load_vertex_id_zero_base:
2388 case nir_intrinsic_load_base_vertex:
2389 case nir_intrinsic_load_instance_id:
2390 case nir_intrinsic_load_base_instance:
2391 case nir_intrinsic_load_draw_id: {
2392 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
2393 fs_reg val = nir_system_values[sv];
2394 assert(val.file != BAD_FILE);
2395 dest.type = val.type;
2396 bld.MOV(dest, val);
2397 break;
2398 }
2399
2400 case nir_intrinsic_load_input: {
2401 fs_reg src = fs_reg(ATTR, nir_intrinsic_base(instr) * 4, dest.type);
2402 unsigned first_component = nir_intrinsic_component(instr);
2403 unsigned num_components = instr->num_components;
2404 enum brw_reg_type type = dest.type;
2405
2406 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
2407 assert(const_offset && "Indirect input loads not allowed");
2408 src = offset(src, bld, const_offset->u32[0]);
2409
2410 for (unsigned j = 0; j < num_components; j++) {
2411 bld.MOV(offset(dest, bld, j), offset(src, bld, j + first_component));
2412 }
2413
2414 if (type == BRW_REGISTER_TYPE_DF) {
2415 /* Once the double vector is read, set again its original register
2416 * type to continue with normal execution.
2417 */
2418 src = retype(src, type);
2419 dest = retype(dest, type);
2420 }
2421
2422 if (type_sz(src.type) == 8) {
2423 shuffle_32bit_load_result_to_64bit_data(bld,
2424 dest,
2425 retype(dest, BRW_REGISTER_TYPE_F),
2426 instr->num_components);
2427 }
2428 break;
2429 }
2430
2431 default:
2432 nir_emit_intrinsic(bld, instr);
2433 break;
2434 }
2435 }
2436
2437 void
2438 fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld,
2439 nir_intrinsic_instr *instr)
2440 {
2441 assert(stage == MESA_SHADER_TESS_CTRL);
2442 struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) key;
2443 struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
2444
2445 fs_reg dst;
2446 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2447 dst = get_nir_dest(instr->dest);
2448
2449 switch (instr->intrinsic) {
2450 case nir_intrinsic_load_primitive_id:
2451 bld.MOV(dst, fs_reg(brw_vec1_grf(0, 1)));
2452 break;
2453 case nir_intrinsic_load_invocation_id:
2454 bld.MOV(retype(dst, invocation_id.type), invocation_id);
2455 break;
2456 case nir_intrinsic_load_patch_vertices_in:
2457 bld.MOV(retype(dst, BRW_REGISTER_TYPE_D),
2458 brw_imm_d(tcs_key->input_vertices));
2459 break;
2460
2461 case nir_intrinsic_barrier: {
2462 if (tcs_prog_data->instances == 1)
2463 break;
2464
2465 fs_reg m0 = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2466 fs_reg m0_2 = component(m0, 2);
2467
2468 const fs_builder chanbld = bld.exec_all().group(1, 0);
2469
2470 /* Zero the message header */
2471 bld.exec_all().MOV(m0, brw_imm_ud(0u));
2472
2473 /* Copy "Barrier ID" from r0.2, bits 16:13 */
2474 chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
2475 brw_imm_ud(INTEL_MASK(16, 13)));
2476
2477 /* Shift it up to bits 27:24. */
2478 chanbld.SHL(m0_2, m0_2, brw_imm_ud(11));
2479
2480 /* Set the Barrier Count and the enable bit */
2481 chanbld.OR(m0_2, m0_2,
2482 brw_imm_ud(tcs_prog_data->instances << 9 | (1 << 15)));
2483
2484 bld.emit(SHADER_OPCODE_BARRIER, bld.null_reg_ud(), m0);
2485 break;
2486 }
2487
2488 case nir_intrinsic_load_input:
2489 unreachable("nir_lower_io should never give us these.");
2490 break;
2491
2492 case nir_intrinsic_load_per_vertex_input: {
2493 fs_reg indirect_offset = get_indirect_offset(instr);
2494 unsigned imm_offset = instr->const_index[0];
2495
2496 const nir_src &vertex_src = instr->src[0];
2497 nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
2498
2499 fs_inst *inst;
2500
2501 fs_reg icp_handle;
2502
2503 if (vertex_const) {
2504 /* Emit a MOV to resolve <0,1,0> regioning. */
2505 icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2506 bld.MOV(icp_handle,
2507 retype(brw_vec1_grf(1 + (vertex_const->i32[0] >> 3),
2508 vertex_const->i32[0] & 7),
2509 BRW_REGISTER_TYPE_UD));
2510 } else if (tcs_prog_data->instances == 1 &&
2511 vertex_src.is_ssa &&
2512 vertex_src.ssa->parent_instr->type == nir_instr_type_intrinsic &&
2513 nir_instr_as_intrinsic(vertex_src.ssa->parent_instr)->intrinsic == nir_intrinsic_load_invocation_id) {
2514 /* For the common case of only 1 instance, an array index of
2515 * gl_InvocationID means reading g1. Skip all the indirect work.
2516 */
2517 icp_handle = retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD);
2518 } else {
2519 /* The vertex index is non-constant. We need to use indirect
2520 * addressing to fetch the proper URB handle.
2521 */
2522 icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2523
2524 /* Each ICP handle is a single DWord (4 bytes) */
2525 fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2526 bld.SHL(vertex_offset_bytes,
2527 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2528 brw_imm_ud(2u));
2529
2530 /* Start at g1. We might read up to 4 registers. */
2531 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2532 retype(brw_vec8_grf(1, 0), icp_handle.type), vertex_offset_bytes,
2533 brw_imm_ud(4 * REG_SIZE));
2534 }
2535
2536 /* We can only read two double components with each URB read, so
2537 * we send two read messages in that case, each one loading up to
2538 * two double components.
2539 */
2540 unsigned num_iterations = 1;
2541 unsigned num_components = instr->num_components;
2542 unsigned first_component = nir_intrinsic_component(instr);
2543 fs_reg orig_dst = dst;
2544 if (type_sz(dst.type) == 8) {
2545 first_component = first_component / 2;
2546 if (instr->num_components > 2) {
2547 num_iterations = 2;
2548 num_components = 2;
2549 }
2550
2551 fs_reg tmp = fs_reg(VGRF, alloc.allocate(4), dst.type);
2552 dst = tmp;
2553 }
2554
2555 for (unsigned iter = 0; iter < num_iterations; iter++) {
2556 if (indirect_offset.file == BAD_FILE) {
2557 /* Constant indexing - use global offset. */
2558 if (first_component != 0) {
2559 unsigned read_components = num_components + first_component;
2560 fs_reg tmp = bld.vgrf(dst.type, read_components);
2561 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp, icp_handle);
2562 for (unsigned i = 0; i < num_components; i++) {
2563 bld.MOV(offset(dst, bld, i),
2564 offset(tmp, bld, i + first_component));
2565 }
2566 } else {
2567 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
2568 }
2569 inst->offset = imm_offset;
2570 inst->mlen = 1;
2571 } else {
2572 /* Indirect indexing - use per-slot offsets as well. */
2573 const fs_reg srcs[] = { icp_handle, indirect_offset };
2574 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2575 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2576 if (first_component != 0) {
2577 unsigned read_components = num_components + first_component;
2578 fs_reg tmp = bld.vgrf(dst.type, read_components);
2579 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
2580 payload);
2581 for (unsigned i = 0; i < num_components; i++) {
2582 bld.MOV(offset(dst, bld, i),
2583 offset(tmp, bld, i + first_component));
2584 }
2585 } else {
2586 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst,
2587 payload);
2588 }
2589 inst->offset = imm_offset;
2590 inst->mlen = 2;
2591 }
2592 inst->size_written = (num_components + first_component) *
2593 inst->dst.component_size(inst->exec_size);
2594
2595 /* If we are reading 64-bit data using 32-bit read messages we need
2596 * build proper 64-bit data elements by shuffling the low and high
2597 * 32-bit components around like we do for other things like UBOs
2598 * or SSBOs.
2599 */
2600 if (type_sz(dst.type) == 8) {
2601 shuffle_32bit_load_result_to_64bit_data(
2602 bld, dst, retype(dst, BRW_REGISTER_TYPE_F), num_components);
2603
2604 for (unsigned c = 0; c < num_components; c++) {
2605 bld.MOV(offset(orig_dst, bld, iter * 2 + c),
2606 offset(dst, bld, c));
2607 }
2608 }
2609
2610 /* Copy the temporary to the destination to deal with writemasking.
2611 *
2612 * Also attempt to deal with gl_PointSize being in the .w component.
2613 */
2614 if (inst->offset == 0 && indirect_offset.file == BAD_FILE) {
2615 assert(type_sz(dst.type) < 8);
2616 inst->dst = bld.vgrf(dst.type, 4);
2617 inst->size_written = 4 * REG_SIZE;
2618 bld.MOV(dst, offset(inst->dst, bld, 3));
2619 }
2620
2621 /* If we are loading double data and we need a second read message
2622 * adjust the write offset
2623 */
2624 if (num_iterations > 1) {
2625 num_components = instr->num_components - 2;
2626 imm_offset++;
2627 }
2628 }
2629 break;
2630 }
2631
2632 case nir_intrinsic_load_output:
2633 case nir_intrinsic_load_per_vertex_output: {
2634 fs_reg indirect_offset = get_indirect_offset(instr);
2635 unsigned imm_offset = instr->const_index[0];
2636 unsigned first_component = nir_intrinsic_component(instr);
2637
2638 fs_inst *inst;
2639 if (indirect_offset.file == BAD_FILE) {
2640 /* Replicate the patch handle to all enabled channels */
2641 fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2642 bld.MOV(patch_handle,
2643 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD));
2644
2645 {
2646 if (first_component != 0) {
2647 unsigned read_components =
2648 instr->num_components + first_component;
2649 fs_reg tmp = bld.vgrf(dst.type, read_components);
2650 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp,
2651 patch_handle);
2652 inst->size_written = read_components * REG_SIZE;
2653 for (unsigned i = 0; i < instr->num_components; i++) {
2654 bld.MOV(offset(dst, bld, i),
2655 offset(tmp, bld, i + first_component));
2656 }
2657 } else {
2658 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst,
2659 patch_handle);
2660 inst->size_written = instr->num_components * REG_SIZE;
2661 }
2662 inst->offset = imm_offset;
2663 inst->mlen = 1;
2664 }
2665 } else {
2666 /* Indirect indexing - use per-slot offsets as well. */
2667 const fs_reg srcs[] = {
2668 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
2669 indirect_offset
2670 };
2671 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2672 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2673 if (first_component != 0) {
2674 unsigned read_components =
2675 instr->num_components + first_component;
2676 fs_reg tmp = bld.vgrf(dst.type, read_components);
2677 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
2678 payload);
2679 inst->size_written = read_components * REG_SIZE;
2680 for (unsigned i = 0; i < instr->num_components; i++) {
2681 bld.MOV(offset(dst, bld, i),
2682 offset(tmp, bld, i + first_component));
2683 }
2684 } else {
2685 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst,
2686 payload);
2687 inst->size_written = instr->num_components * REG_SIZE;
2688 }
2689 inst->offset = imm_offset;
2690 inst->mlen = 2;
2691 }
2692 break;
2693 }
2694
2695 case nir_intrinsic_store_output:
2696 case nir_intrinsic_store_per_vertex_output: {
2697 fs_reg value = get_nir_src(instr->src[0]);
2698 bool is_64bit = (instr->src[0].is_ssa ?
2699 instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size) == 64;
2700 fs_reg indirect_offset = get_indirect_offset(instr);
2701 unsigned imm_offset = instr->const_index[0];
2702 unsigned mask = instr->const_index[1];
2703 unsigned header_regs = 0;
2704 fs_reg srcs[7];
2705 srcs[header_regs++] = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD);
2706
2707 if (indirect_offset.file != BAD_FILE) {
2708 srcs[header_regs++] = indirect_offset;
2709 }
2710
2711 if (mask == 0)
2712 break;
2713
2714 unsigned num_components = util_last_bit(mask);
2715 enum opcode opcode;
2716
2717 /* We can only pack two 64-bit components in a single message, so send
2718 * 2 messages if we have more components
2719 */
2720 unsigned num_iterations = 1;
2721 unsigned iter_components = num_components;
2722 unsigned first_component = nir_intrinsic_component(instr);
2723 if (is_64bit) {
2724 first_component = first_component / 2;
2725 if (instr->num_components > 2) {
2726 num_iterations = 2;
2727 iter_components = 2;
2728 }
2729 }
2730
2731 mask = mask << first_component;
2732
2733 for (unsigned iter = 0; iter < num_iterations; iter++) {
2734 if (!is_64bit && mask != WRITEMASK_XYZW) {
2735 srcs[header_regs++] = brw_imm_ud(mask << 16);
2736 opcode = indirect_offset.file != BAD_FILE ?
2737 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
2738 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
2739 } else if (is_64bit && ((mask & WRITEMASK_XY) != WRITEMASK_XY)) {
2740 /* Expand the 64-bit mask to 32-bit channels. We only handle
2741 * two channels in each iteration, so we only care about X/Y.
2742 */
2743 unsigned mask32 = 0;
2744 if (mask & WRITEMASK_X)
2745 mask32 |= WRITEMASK_XY;
2746 if (mask & WRITEMASK_Y)
2747 mask32 |= WRITEMASK_ZW;
2748
2749 /* If the mask does not include any of the channels X or Y there
2750 * is nothing to do in this iteration. Move on to the next couple
2751 * of 64-bit channels.
2752 */
2753 if (!mask32) {
2754 mask >>= 2;
2755 imm_offset++;
2756 continue;
2757 }
2758
2759 srcs[header_regs++] = brw_imm_ud(mask32 << 16);
2760 opcode = indirect_offset.file != BAD_FILE ?
2761 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
2762 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
2763 } else {
2764 opcode = indirect_offset.file != BAD_FILE ?
2765 SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT :
2766 SHADER_OPCODE_URB_WRITE_SIMD8;
2767 }
2768
2769 for (unsigned i = 0; i < iter_components; i++) {
2770 if (!(mask & (1 << (i + first_component))))
2771 continue;
2772
2773 if (!is_64bit) {
2774 srcs[header_regs + i + first_component] = offset(value, bld, i);
2775 } else {
2776 /* We need to shuffle the 64-bit data to match the layout
2777 * expected by our 32-bit URB write messages. We use a temporary
2778 * for that.
2779 */
2780 unsigned channel = iter * 2 + i;
2781 fs_reg dest = shuffle_64bit_data_for_32bit_write(bld,
2782 offset(value, bld, channel), 1);
2783
2784 srcs[header_regs + (i + first_component) * 2] = dest;
2785 srcs[header_regs + (i + first_component) * 2 + 1] =
2786 offset(dest, bld, 1);
2787 }
2788 }
2789
2790 unsigned mlen =
2791 header_regs + (is_64bit ? 2 * iter_components : iter_components) +
2792 (is_64bit ? 2 * first_component : first_component);
2793 fs_reg payload =
2794 bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
2795 bld.LOAD_PAYLOAD(payload, srcs, mlen, header_regs);
2796
2797 fs_inst *inst = bld.emit(opcode, bld.null_reg_ud(), payload);
2798 inst->offset = imm_offset;
2799 inst->mlen = mlen;
2800
2801 /* If this is a 64-bit attribute, select the next two 64-bit channels
2802 * to be handled in the next iteration.
2803 */
2804 if (is_64bit) {
2805 mask >>= 2;
2806 imm_offset++;
2807 }
2808 }
2809 break;
2810 }
2811
2812 default:
2813 nir_emit_intrinsic(bld, instr);
2814 break;
2815 }
2816 }
2817
2818 void
2819 fs_visitor::nir_emit_tes_intrinsic(const fs_builder &bld,
2820 nir_intrinsic_instr *instr)
2821 {
2822 assert(stage == MESA_SHADER_TESS_EVAL);
2823 struct brw_tes_prog_data *tes_prog_data = brw_tes_prog_data(prog_data);
2824
2825 fs_reg dest;
2826 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2827 dest = get_nir_dest(instr->dest);
2828
2829 switch (instr->intrinsic) {
2830 case nir_intrinsic_load_primitive_id:
2831 bld.MOV(dest, fs_reg(brw_vec1_grf(0, 1)));
2832 break;
2833 case nir_intrinsic_load_tess_coord:
2834 /* gl_TessCoord is part of the payload in g1-3 */
2835 for (unsigned i = 0; i < 3; i++) {
2836 bld.MOV(offset(dest, bld, i), fs_reg(brw_vec8_grf(1 + i, 0)));
2837 }
2838 break;
2839
2840 case nir_intrinsic_load_input:
2841 case nir_intrinsic_load_per_vertex_input: {
2842 fs_reg indirect_offset = get_indirect_offset(instr);
2843 unsigned imm_offset = instr->const_index[0];
2844 unsigned first_component = nir_intrinsic_component(instr);
2845
2846 if (type_sz(dest.type) == 8) {
2847 first_component = first_component / 2;
2848 }
2849
2850 fs_inst *inst;
2851 if (indirect_offset.file == BAD_FILE) {
2852 /* Arbitrarily only push up to 32 vec4 slots worth of data,
2853 * which is 16 registers (since each holds 2 vec4 slots).
2854 */
2855 unsigned slot_count = 1;
2856 if (type_sz(dest.type) == 8 && instr->num_components > 2)
2857 slot_count++;
2858
2859 const unsigned max_push_slots = 32;
2860 if (imm_offset + slot_count <= max_push_slots) {
2861 fs_reg src = fs_reg(ATTR, imm_offset / 2, dest.type);
2862 for (int i = 0; i < instr->num_components; i++) {
2863 unsigned comp = 16 / type_sz(dest.type) * (imm_offset % 2) +
2864 i + first_component;
2865 bld.MOV(offset(dest, bld, i), component(src, comp));
2866 }
2867
2868 tes_prog_data->base.urb_read_length =
2869 MAX2(tes_prog_data->base.urb_read_length,
2870 DIV_ROUND_UP(imm_offset + slot_count, 2));
2871 } else {
2872 /* Replicate the patch handle to all enabled channels */
2873 const fs_reg srcs[] = {
2874 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)
2875 };
2876 fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2877 bld.LOAD_PAYLOAD(patch_handle, srcs, ARRAY_SIZE(srcs), 0);
2878
2879 if (first_component != 0) {
2880 unsigned read_components =
2881 instr->num_components + first_component;
2882 fs_reg tmp = bld.vgrf(dest.type, read_components);
2883 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp,
2884 patch_handle);
2885 inst->size_written = read_components * REG_SIZE;
2886 for (unsigned i = 0; i < instr->num_components; i++) {
2887 bld.MOV(offset(dest, bld, i),
2888 offset(tmp, bld, i + first_component));
2889 }
2890 } else {
2891 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dest,
2892 patch_handle);
2893 inst->size_written = instr->num_components * REG_SIZE;
2894 }
2895 inst->mlen = 1;
2896 inst->offset = imm_offset;
2897 }
2898 } else {
2899 /* Indirect indexing - use per-slot offsets as well. */
2900
2901 /* We can only read two double components with each URB read, so
2902 * we send two read messages in that case, each one loading up to
2903 * two double components.
2904 */
2905 unsigned num_iterations = 1;
2906 unsigned num_components = instr->num_components;
2907 fs_reg orig_dest = dest;
2908 if (type_sz(dest.type) == 8) {
2909 if (instr->num_components > 2) {
2910 num_iterations = 2;
2911 num_components = 2;
2912 }
2913 fs_reg tmp = fs_reg(VGRF, alloc.allocate(4), dest.type);
2914 dest = tmp;
2915 }
2916
2917 for (unsigned iter = 0; iter < num_iterations; iter++) {
2918 const fs_reg srcs[] = {
2919 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
2920 indirect_offset
2921 };
2922 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2923 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2924
2925 if (first_component != 0) {
2926 unsigned read_components =
2927 num_components + first_component;
2928 fs_reg tmp = bld.vgrf(dest.type, read_components);
2929 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
2930 payload);
2931 for (unsigned i = 0; i < num_components; i++) {
2932 bld.MOV(offset(dest, bld, i),
2933 offset(tmp, bld, i + first_component));
2934 }
2935 } else {
2936 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dest,
2937 payload);
2938 }
2939 inst->mlen = 2;
2940 inst->offset = imm_offset;
2941 inst->size_written = (num_components + first_component) *
2942 inst->dst.component_size(inst->exec_size);
2943
2944 /* If we are reading 64-bit data using 32-bit read messages we need
2945 * build proper 64-bit data elements by shuffling the low and high
2946 * 32-bit components around like we do for other things like UBOs
2947 * or SSBOs.
2948 */
2949 if (type_sz(dest.type) == 8) {
2950 shuffle_32bit_load_result_to_64bit_data(
2951 bld, dest, retype(dest, BRW_REGISTER_TYPE_F), num_components);
2952
2953 for (unsigned c = 0; c < num_components; c++) {
2954 bld.MOV(offset(orig_dest, bld, iter * 2 + c),
2955 offset(dest, bld, c));
2956 }
2957 }
2958
2959 /* If we are loading double data and we need a second read message
2960 * adjust the offset
2961 */
2962 if (num_iterations > 1) {
2963 num_components = instr->num_components - 2;
2964 imm_offset++;
2965 }
2966 }
2967 }
2968 break;
2969 }
2970 default:
2971 nir_emit_intrinsic(bld, instr);
2972 break;
2973 }
2974 }
2975
2976 void
2977 fs_visitor::nir_emit_gs_intrinsic(const fs_builder &bld,
2978 nir_intrinsic_instr *instr)
2979 {
2980 assert(stage == MESA_SHADER_GEOMETRY);
2981 fs_reg indirect_offset;
2982
2983 fs_reg dest;
2984 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2985 dest = get_nir_dest(instr->dest);
2986
2987 switch (instr->intrinsic) {
2988 case nir_intrinsic_load_primitive_id:
2989 assert(stage == MESA_SHADER_GEOMETRY);
2990 assert(brw_gs_prog_data(prog_data)->include_primitive_id);
2991 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
2992 retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD));
2993 break;
2994
2995 case nir_intrinsic_load_input:
2996 unreachable("load_input intrinsics are invalid for the GS stage");
2997
2998 case nir_intrinsic_load_per_vertex_input:
2999 emit_gs_input_load(dest, instr->src[0], instr->const_index[0],
3000 instr->src[1], instr->num_components,
3001 nir_intrinsic_component(instr));
3002 break;
3003
3004 case nir_intrinsic_emit_vertex_with_counter:
3005 emit_gs_vertex(instr->src[0], instr->const_index[0]);
3006 break;
3007
3008 case nir_intrinsic_end_primitive_with_counter:
3009 emit_gs_end_primitive(instr->src[0]);
3010 break;
3011
3012 case nir_intrinsic_set_vertex_count:
3013 bld.MOV(this->final_gs_vertex_count, get_nir_src(instr->src[0]));
3014 break;
3015
3016 case nir_intrinsic_load_invocation_id: {
3017 fs_reg val = nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
3018 assert(val.file != BAD_FILE);
3019 dest.type = val.type;
3020 bld.MOV(dest, val);
3021 break;
3022 }
3023
3024 default:
3025 nir_emit_intrinsic(bld, instr);
3026 break;
3027 }
3028 }
3029
3030 /**
3031 * Fetch the current render target layer index.
3032 */
3033 static fs_reg
3034 fetch_render_target_array_index(const fs_builder &bld)
3035 {
3036 if (bld.shader->devinfo->gen >= 6) {
3037 /* The render target array index is provided in the thread payload as
3038 * bits 26:16 of r0.0.
3039 */
3040 const fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_UD);
3041 bld.AND(idx, brw_uw1_reg(BRW_GENERAL_REGISTER_FILE, 0, 1),
3042 brw_imm_uw(0x7ff));
3043 return idx;
3044 } else {
3045 /* Pre-SNB we only ever render into the first layer of the framebuffer
3046 * since layered rendering is not implemented.
3047 */
3048 return brw_imm_ud(0);
3049 }
3050 }
3051
3052 /**
3053 * Fake non-coherent framebuffer read implemented using TXF to fetch from the
3054 * framebuffer at the current fragment coordinates and sample index.
3055 */
3056 fs_inst *
3057 fs_visitor::emit_non_coherent_fb_read(const fs_builder &bld, const fs_reg &dst,
3058 unsigned target)
3059 {
3060 const struct gen_device_info *devinfo = bld.shader->devinfo;
3061
3062 assert(bld.shader->stage == MESA_SHADER_FRAGMENT);
3063 const brw_wm_prog_key *wm_key =
3064 reinterpret_cast<const brw_wm_prog_key *>(key);
3065 assert(!wm_key->coherent_fb_fetch);
3066 const struct brw_wm_prog_data *wm_prog_data =
3067 brw_wm_prog_data(stage_prog_data);
3068
3069 /* Calculate the surface index relative to the start of the texture binding
3070 * table block, since that's what the texturing messages expect.
3071 */
3072 const unsigned surface = target +
3073 wm_prog_data->binding_table.render_target_read_start -
3074 wm_prog_data->base.binding_table.texture_start;
3075
3076 brw_mark_surface_used(
3077 bld.shader->stage_prog_data,
3078 wm_prog_data->binding_table.render_target_read_start + target);
3079
3080 /* Calculate the fragment coordinates. */
3081 const fs_reg coords = bld.vgrf(BRW_REGISTER_TYPE_UD, 3);
3082 bld.MOV(offset(coords, bld, 0), pixel_x);
3083 bld.MOV(offset(coords, bld, 1), pixel_y);
3084 bld.MOV(offset(coords, bld, 2), fetch_render_target_array_index(bld));
3085
3086 /* Calculate the sample index and MCS payload when multisampling. Luckily
3087 * the MCS fetch message behaves deterministically for UMS surfaces, so it
3088 * shouldn't be necessary to recompile based on whether the framebuffer is
3089 * CMS or UMS.
3090 */
3091 if (wm_key->multisample_fbo &&
3092 nir_system_values[SYSTEM_VALUE_SAMPLE_ID].file == BAD_FILE)
3093 nir_system_values[SYSTEM_VALUE_SAMPLE_ID] = *emit_sampleid_setup();
3094
3095 const fs_reg sample = nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
3096 const fs_reg mcs = wm_key->multisample_fbo ?
3097 emit_mcs_fetch(coords, 3, brw_imm_ud(surface)) : fs_reg();
3098
3099 /* Use either a normal or a CMS texel fetch message depending on whether
3100 * the framebuffer is single or multisample. On SKL+ use the wide CMS
3101 * message just in case the framebuffer uses 16x multisampling, it should
3102 * be equivalent to the normal CMS fetch for lower multisampling modes.
3103 */
3104 const opcode op = !wm_key->multisample_fbo ? SHADER_OPCODE_TXF_LOGICAL :
3105 devinfo->gen >= 9 ? SHADER_OPCODE_TXF_CMS_W_LOGICAL :
3106 SHADER_OPCODE_TXF_CMS_LOGICAL;
3107
3108 /* Emit the instruction. */
3109 const fs_reg srcs[] = { coords, fs_reg(), brw_imm_ud(0), fs_reg(),
3110 sample, mcs,
3111 brw_imm_ud(surface), brw_imm_ud(0),
3112 fs_reg(), brw_imm_ud(3), brw_imm_ud(0) };
3113 STATIC_ASSERT(ARRAY_SIZE(srcs) == TEX_LOGICAL_NUM_SRCS);
3114
3115 fs_inst *inst = bld.emit(op, dst, srcs, ARRAY_SIZE(srcs));
3116 inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
3117
3118 return inst;
3119 }
3120
3121 /**
3122 * Actual coherent framebuffer read implemented using the native render target
3123 * read message. Requires SKL+.
3124 */
3125 static fs_inst *
3126 emit_coherent_fb_read(const fs_builder &bld, const fs_reg &dst, unsigned target)
3127 {
3128 assert(bld.shader->devinfo->gen >= 9);
3129 fs_inst *inst = bld.emit(FS_OPCODE_FB_READ_LOGICAL, dst);
3130 inst->target = target;
3131 inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
3132
3133 return inst;
3134 }
3135
3136 static fs_reg
3137 alloc_temporary(const fs_builder &bld, unsigned size, fs_reg *regs, unsigned n)
3138 {
3139 if (n && regs[0].file != BAD_FILE) {
3140 return regs[0];
3141
3142 } else {
3143 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, size);
3144
3145 for (unsigned i = 0; i < n; i++)
3146 regs[i] = tmp;
3147
3148 return tmp;
3149 }
3150 }
3151
3152 static fs_reg
3153 alloc_frag_output(fs_visitor *v, unsigned location)
3154 {
3155 assert(v->stage == MESA_SHADER_FRAGMENT);
3156 const brw_wm_prog_key *const key =
3157 reinterpret_cast<const brw_wm_prog_key *>(v->key);
3158 const unsigned l = GET_FIELD(location, BRW_NIR_FRAG_OUTPUT_LOCATION);
3159 const unsigned i = GET_FIELD(location, BRW_NIR_FRAG_OUTPUT_INDEX);
3160
3161 if (i > 0 || (key->force_dual_color_blend && l == FRAG_RESULT_DATA1))
3162 return alloc_temporary(v->bld, 4, &v->dual_src_output, 1);
3163
3164 else if (l == FRAG_RESULT_COLOR)
3165 return alloc_temporary(v->bld, 4, v->outputs,
3166 MAX2(key->nr_color_regions, 1));
3167
3168 else if (l == FRAG_RESULT_DEPTH)
3169 return alloc_temporary(v->bld, 1, &v->frag_depth, 1);
3170
3171 else if (l == FRAG_RESULT_STENCIL)
3172 return alloc_temporary(v->bld, 1, &v->frag_stencil, 1);
3173
3174 else if (l == FRAG_RESULT_SAMPLE_MASK)
3175 return alloc_temporary(v->bld, 1, &v->sample_mask, 1);
3176
3177 else if (l >= FRAG_RESULT_DATA0 &&
3178 l < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS)
3179 return alloc_temporary(v->bld, 4,
3180 &v->outputs[l - FRAG_RESULT_DATA0], 1);
3181
3182 else
3183 unreachable("Invalid location");
3184 }
3185
3186 void
3187 fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
3188 nir_intrinsic_instr *instr)
3189 {
3190 assert(stage == MESA_SHADER_FRAGMENT);
3191
3192 fs_reg dest;
3193 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3194 dest = get_nir_dest(instr->dest);
3195
3196 switch (instr->intrinsic) {
3197 case nir_intrinsic_load_front_face:
3198 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
3199 *emit_frontfacing_interpolation());
3200 break;
3201
3202 case nir_intrinsic_load_sample_pos: {
3203 fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
3204 assert(sample_pos.file != BAD_FILE);
3205 dest.type = sample_pos.type;
3206 bld.MOV(dest, sample_pos);
3207 bld.MOV(offset(dest, bld, 1), offset(sample_pos, bld, 1));
3208 break;
3209 }
3210
3211 case nir_intrinsic_load_layer_id:
3212 dest.type = BRW_REGISTER_TYPE_UD;
3213 bld.MOV(dest, fetch_render_target_array_index(bld));
3214 break;
3215
3216 case nir_intrinsic_load_helper_invocation:
3217 case nir_intrinsic_load_sample_mask_in:
3218 case nir_intrinsic_load_sample_id: {
3219 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
3220 fs_reg val = nir_system_values[sv];
3221 assert(val.file != BAD_FILE);
3222 dest.type = val.type;
3223 bld.MOV(dest, val);
3224 break;
3225 }
3226
3227 case nir_intrinsic_store_output: {
3228 const fs_reg src = get_nir_src(instr->src[0]);
3229 const nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
3230 assert(const_offset && "Indirect output stores not allowed");
3231 const unsigned location = nir_intrinsic_base(instr) +
3232 SET_FIELD(const_offset->u32[0], BRW_NIR_FRAG_OUTPUT_LOCATION);
3233 const fs_reg new_dest = retype(alloc_frag_output(this, location),
3234 src.type);
3235
3236 for (unsigned j = 0; j < instr->num_components; j++)
3237 bld.MOV(offset(new_dest, bld, nir_intrinsic_component(instr) + j),
3238 offset(src, bld, j));
3239
3240 break;
3241 }
3242
3243 case nir_intrinsic_load_output: {
3244 const unsigned l = GET_FIELD(nir_intrinsic_base(instr),
3245 BRW_NIR_FRAG_OUTPUT_LOCATION);
3246 assert(l >= FRAG_RESULT_DATA0);
3247 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3248 assert(const_offset && "Indirect output loads not allowed");
3249 const unsigned target = l - FRAG_RESULT_DATA0 + const_offset->u32[0];
3250 const fs_reg tmp = bld.vgrf(dest.type, 4);
3251
3252 if (reinterpret_cast<const brw_wm_prog_key *>(key)->coherent_fb_fetch)
3253 emit_coherent_fb_read(bld, tmp, target);
3254 else
3255 emit_non_coherent_fb_read(bld, tmp, target);
3256
3257 for (unsigned j = 0; j < instr->num_components; j++) {
3258 bld.MOV(offset(dest, bld, j),
3259 offset(tmp, bld, nir_intrinsic_component(instr) + j));
3260 }
3261
3262 break;
3263 }
3264
3265 case nir_intrinsic_discard:
3266 case nir_intrinsic_discard_if: {
3267 /* We track our discarded pixels in f0.1. By predicating on it, we can
3268 * update just the flag bits that aren't yet discarded. If there's no
3269 * condition, we emit a CMP of g0 != g0, so all currently executing
3270 * channels will get turned off.
3271 */
3272 fs_inst *cmp;
3273 if (instr->intrinsic == nir_intrinsic_discard_if) {
3274 cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
3275 brw_imm_d(0), BRW_CONDITIONAL_Z);
3276 } else {
3277 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
3278 BRW_REGISTER_TYPE_UW));
3279 cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
3280 }
3281 cmp->predicate = BRW_PREDICATE_NORMAL;
3282 cmp->flag_subreg = 1;
3283
3284 if (devinfo->gen >= 6) {
3285 emit_discard_jump();
3286 }
3287 break;
3288 }
3289
3290 case nir_intrinsic_load_input: {
3291 /* load_input is only used for flat inputs */
3292 unsigned base = nir_intrinsic_base(instr);
3293 unsigned component = nir_intrinsic_component(instr);
3294 unsigned num_components = instr->num_components;
3295 enum brw_reg_type type = dest.type;
3296
3297 /* Special case fields in the VUE header */
3298 if (base == VARYING_SLOT_LAYER)
3299 component = 1;
3300 else if (base == VARYING_SLOT_VIEWPORT)
3301 component = 2;
3302
3303 if (nir_dest_bit_size(instr->dest) == 64) {
3304 /* const_index is in 32-bit type size units that could not be aligned
3305 * with DF. We need to read the double vector as if it was a float
3306 * vector of twice the number of components to fetch the right data.
3307 */
3308 type = BRW_REGISTER_TYPE_F;
3309 num_components *= 2;
3310 }
3311
3312 for (unsigned int i = 0; i < num_components; i++) {
3313 struct brw_reg interp = interp_reg(base, component + i);
3314 interp = suboffset(interp, 3);
3315 bld.emit(FS_OPCODE_CINTERP, offset(retype(dest, type), bld, i),
3316 retype(fs_reg(interp), type));
3317 }
3318
3319 if (nir_dest_bit_size(instr->dest) == 64) {
3320 shuffle_32bit_load_result_to_64bit_data(bld,
3321 dest,
3322 retype(dest, type),
3323 instr->num_components);
3324 }
3325 break;
3326 }
3327
3328 case nir_intrinsic_load_barycentric_pixel:
3329 case nir_intrinsic_load_barycentric_centroid:
3330 case nir_intrinsic_load_barycentric_sample:
3331 /* Do nothing - load_interpolated_input handling will handle it later. */
3332 break;
3333
3334 case nir_intrinsic_load_barycentric_at_sample: {
3335 const glsl_interp_mode interpolation =
3336 (enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
3337
3338 nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
3339
3340 if (const_sample) {
3341 unsigned msg_data = const_sample->i32[0] << 4;
3342
3343 emit_pixel_interpolater_send(bld,
3344 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3345 dest,
3346 fs_reg(), /* src */
3347 brw_imm_ud(msg_data),
3348 interpolation);
3349 } else {
3350 const fs_reg sample_src = retype(get_nir_src(instr->src[0]),
3351 BRW_REGISTER_TYPE_UD);
3352
3353 if (nir_src_is_dynamically_uniform(instr->src[0])) {
3354 const fs_reg sample_id = bld.emit_uniformize(sample_src);
3355 const fs_reg msg_data = vgrf(glsl_type::uint_type);
3356 bld.exec_all().group(1, 0)
3357 .SHL(msg_data, sample_id, brw_imm_ud(4u));
3358 emit_pixel_interpolater_send(bld,
3359 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3360 dest,
3361 fs_reg(), /* src */
3362 msg_data,
3363 interpolation);
3364 } else {
3365 /* Make a loop that sends a message to the pixel interpolater
3366 * for the sample number in each live channel. If there are
3367 * multiple channels with the same sample number then these
3368 * will be handled simultaneously with a single interation of
3369 * the loop.
3370 */
3371 bld.emit(BRW_OPCODE_DO);
3372
3373 /* Get the next live sample number into sample_id_reg */
3374 const fs_reg sample_id = bld.emit_uniformize(sample_src);
3375
3376 /* Set the flag register so that we can perform the send
3377 * message on all channels that have the same sample number
3378 */
3379 bld.CMP(bld.null_reg_ud(),
3380 sample_src, sample_id,
3381 BRW_CONDITIONAL_EQ);
3382 const fs_reg msg_data = vgrf(glsl_type::uint_type);
3383 bld.exec_all().group(1, 0)
3384 .SHL(msg_data, sample_id, brw_imm_ud(4u));
3385 fs_inst *inst =
3386 emit_pixel_interpolater_send(bld,
3387 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3388 dest,
3389 fs_reg(), /* src */
3390 msg_data,
3391 interpolation);
3392 set_predicate(BRW_PREDICATE_NORMAL, inst);
3393
3394 /* Continue the loop if there are any live channels left */
3395 set_predicate_inv(BRW_PREDICATE_NORMAL,
3396 true, /* inverse */
3397 bld.emit(BRW_OPCODE_WHILE));
3398 }
3399 }
3400 break;
3401 }
3402
3403 case nir_intrinsic_load_barycentric_at_offset: {
3404 const glsl_interp_mode interpolation =
3405 (enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
3406
3407 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3408
3409 if (const_offset) {
3410 unsigned off_x = MIN2((int)(const_offset->f32[0] * 16), 7) & 0xf;
3411 unsigned off_y = MIN2((int)(const_offset->f32[1] * 16), 7) & 0xf;
3412
3413 emit_pixel_interpolater_send(bld,
3414 FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
3415 dest,
3416 fs_reg(), /* src */
3417 brw_imm_ud(off_x | (off_y << 4)),
3418 interpolation);
3419 } else {
3420 fs_reg src = vgrf(glsl_type::ivec2_type);
3421 fs_reg offset_src = retype(get_nir_src(instr->src[0]),
3422 BRW_REGISTER_TYPE_F);
3423 for (int i = 0; i < 2; i++) {
3424 fs_reg temp = vgrf(glsl_type::float_type);
3425 bld.MUL(temp, offset(offset_src, bld, i), brw_imm_f(16.0f));
3426 fs_reg itemp = vgrf(glsl_type::int_type);
3427 /* float to int */
3428 bld.MOV(itemp, temp);
3429
3430 /* Clamp the upper end of the range to +7/16.
3431 * ARB_gpu_shader5 requires that we support a maximum offset
3432 * of +0.5, which isn't representable in a S0.4 value -- if
3433 * we didn't clamp it, we'd end up with -8/16, which is the
3434 * opposite of what the shader author wanted.
3435 *
3436 * This is legal due to ARB_gpu_shader5's quantization
3437 * rules:
3438 *
3439 * "Not all values of <offset> may be supported; x and y
3440 * offsets may be rounded to fixed-point values with the
3441 * number of fraction bits given by the
3442 * implementation-dependent constant
3443 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
3444 */
3445 set_condmod(BRW_CONDITIONAL_L,
3446 bld.SEL(offset(src, bld, i), itemp, brw_imm_d(7)));
3447 }
3448
3449 const enum opcode opcode = FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET;
3450 emit_pixel_interpolater_send(bld,
3451 opcode,
3452 dest,
3453 src,
3454 brw_imm_ud(0u),
3455 interpolation);
3456 }
3457 break;
3458 }
3459
3460 case nir_intrinsic_load_interpolated_input: {
3461 if (nir_intrinsic_base(instr) == VARYING_SLOT_POS) {
3462 emit_fragcoord_interpolation(dest);
3463 break;
3464 }
3465
3466 assert(instr->src[0].ssa &&
3467 instr->src[0].ssa->parent_instr->type == nir_instr_type_intrinsic);
3468 nir_intrinsic_instr *bary_intrinsic =
3469 nir_instr_as_intrinsic(instr->src[0].ssa->parent_instr);
3470 nir_intrinsic_op bary_intrin = bary_intrinsic->intrinsic;
3471 enum glsl_interp_mode interp_mode =
3472 (enum glsl_interp_mode) nir_intrinsic_interp_mode(bary_intrinsic);
3473 fs_reg dst_xy;
3474
3475 if (bary_intrin == nir_intrinsic_load_barycentric_at_offset ||
3476 bary_intrin == nir_intrinsic_load_barycentric_at_sample) {
3477 /* Use the result of the PI message */
3478 dst_xy = retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_F);
3479 } else {
3480 /* Use the delta_xy values computed from the payload */
3481 enum brw_barycentric_mode bary =
3482 brw_barycentric_mode(interp_mode, bary_intrin);
3483
3484 dst_xy = this->delta_xy[bary];
3485 }
3486
3487 for (unsigned int i = 0; i < instr->num_components; i++) {
3488 fs_reg interp =
3489 fs_reg(interp_reg(nir_intrinsic_base(instr),
3490 nir_intrinsic_component(instr) + i));
3491 interp.type = BRW_REGISTER_TYPE_F;
3492 dest.type = BRW_REGISTER_TYPE_F;
3493
3494 if (devinfo->gen < 6 && interp_mode == INTERP_MODE_SMOOTH) {
3495 fs_reg tmp = vgrf(glsl_type::float_type);
3496 bld.emit(FS_OPCODE_LINTERP, tmp, dst_xy, interp);
3497 bld.MUL(offset(dest, bld, i), tmp, this->pixel_w);
3498 } else {
3499 bld.emit(FS_OPCODE_LINTERP, offset(dest, bld, i), dst_xy, interp);
3500 }
3501 }
3502 break;
3503 }
3504
3505 default:
3506 nir_emit_intrinsic(bld, instr);
3507 break;
3508 }
3509 }
3510
3511 void
3512 fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
3513 nir_intrinsic_instr *instr)
3514 {
3515 assert(stage == MESA_SHADER_COMPUTE);
3516 struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(prog_data);
3517
3518 fs_reg dest;
3519 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3520 dest = get_nir_dest(instr->dest);
3521
3522 switch (instr->intrinsic) {
3523 case nir_intrinsic_barrier:
3524 emit_barrier();
3525 cs_prog_data->uses_barrier = true;
3526 break;
3527
3528 case nir_intrinsic_load_subgroup_id:
3529 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD), subgroup_id);
3530 break;
3531
3532 case nir_intrinsic_load_local_invocation_id:
3533 case nir_intrinsic_load_work_group_id: {
3534 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
3535 fs_reg val = nir_system_values[sv];
3536 assert(val.file != BAD_FILE);
3537 dest.type = val.type;
3538 for (unsigned i = 0; i < 3; i++)
3539 bld.MOV(offset(dest, bld, i), offset(val, bld, i));
3540 break;
3541 }
3542
3543 case nir_intrinsic_load_num_work_groups: {
3544 const unsigned surface =
3545 cs_prog_data->binding_table.work_groups_start;
3546
3547 cs_prog_data->uses_num_work_groups = true;
3548
3549 fs_reg surf_index = brw_imm_ud(surface);
3550 brw_mark_surface_used(prog_data, surface);
3551
3552 /* Read the 3 GLuint components of gl_NumWorkGroups */
3553 for (unsigned i = 0; i < 3; i++) {
3554 fs_reg read_result =
3555 emit_untyped_read(bld, surf_index,
3556 brw_imm_ud(i << 2),
3557 1 /* dims */, 1 /* size */,
3558 BRW_PREDICATE_NONE);
3559 read_result.type = dest.type;
3560 bld.MOV(dest, read_result);
3561 dest = offset(dest, bld, 1);
3562 }
3563 break;
3564 }
3565
3566 case nir_intrinsic_shared_atomic_add:
3567 nir_emit_shared_atomic(bld, BRW_AOP_ADD, instr);
3568 break;
3569 case nir_intrinsic_shared_atomic_imin:
3570 nir_emit_shared_atomic(bld, BRW_AOP_IMIN, instr);
3571 break;
3572 case nir_intrinsic_shared_atomic_umin:
3573 nir_emit_shared_atomic(bld, BRW_AOP_UMIN, instr);
3574 break;
3575 case nir_intrinsic_shared_atomic_imax:
3576 nir_emit_shared_atomic(bld, BRW_AOP_IMAX, instr);
3577 break;
3578 case nir_intrinsic_shared_atomic_umax:
3579 nir_emit_shared_atomic(bld, BRW_AOP_UMAX, instr);
3580 break;
3581 case nir_intrinsic_shared_atomic_and:
3582 nir_emit_shared_atomic(bld, BRW_AOP_AND, instr);
3583 break;
3584 case nir_intrinsic_shared_atomic_or:
3585 nir_emit_shared_atomic(bld, BRW_AOP_OR, instr);
3586 break;
3587 case nir_intrinsic_shared_atomic_xor:
3588 nir_emit_shared_atomic(bld, BRW_AOP_XOR, instr);
3589 break;
3590 case nir_intrinsic_shared_atomic_exchange:
3591 nir_emit_shared_atomic(bld, BRW_AOP_MOV, instr);
3592 break;
3593 case nir_intrinsic_shared_atomic_comp_swap:
3594 nir_emit_shared_atomic(bld, BRW_AOP_CMPWR, instr);
3595 break;
3596
3597 case nir_intrinsic_load_shared: {
3598 assert(devinfo->gen >= 7);
3599
3600 fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
3601
3602 /* Get the offset to read from */
3603 fs_reg offset_reg;
3604 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3605 if (const_offset) {
3606 offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
3607 } else {
3608 offset_reg = vgrf(glsl_type::uint_type);
3609 bld.ADD(offset_reg,
3610 retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
3611 brw_imm_ud(instr->const_index[0]));
3612 }
3613
3614 /* Read the vector */
3615 do_untyped_vector_read(bld, dest, surf_index, offset_reg,
3616 instr->num_components);
3617 break;
3618 }
3619
3620 case nir_intrinsic_store_shared: {
3621 assert(devinfo->gen >= 7);
3622
3623 /* Block index */
3624 fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
3625
3626 /* Value */
3627 fs_reg val_reg = get_nir_src(instr->src[0]);
3628
3629 /* Writemask */
3630 unsigned writemask = instr->const_index[1];
3631
3632 /* get_nir_src() retypes to integer. Be wary of 64-bit types though
3633 * since the untyped writes below operate in units of 32-bits, which
3634 * means that we need to write twice as many components each time.
3635 * Also, we have to suffle 64-bit data to be in the appropriate layout
3636 * expected by our 32-bit write messages.
3637 */
3638 unsigned type_size = 4;
3639 if (nir_src_bit_size(instr->src[0]) == 64) {
3640 type_size = 8;
3641 val_reg = shuffle_64bit_data_for_32bit_write(bld,
3642 val_reg, instr->num_components);
3643 }
3644
3645 unsigned type_slots = type_size / 4;
3646
3647 /* Combine groups of consecutive enabled channels in one write
3648 * message. We use ffs to find the first enabled channel and then ffs on
3649 * the bit-inverse, down-shifted writemask to determine the length of
3650 * the block of enabled bits.
3651 */
3652 while (writemask) {
3653 unsigned first_component = ffs(writemask) - 1;
3654 unsigned length = ffs(~(writemask >> first_component)) - 1;
3655
3656 /* We can't write more than 2 64-bit components at once. Limit the
3657 * length of the write to what we can do and let the next iteration
3658 * handle the rest
3659 */
3660 if (type_size > 4)
3661 length = MIN2(2, length);
3662
3663 fs_reg offset_reg;
3664 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
3665 if (const_offset) {
3666 offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0] +
3667 type_size * first_component);
3668 } else {
3669 offset_reg = vgrf(glsl_type::uint_type);
3670 bld.ADD(offset_reg,
3671 retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_UD),
3672 brw_imm_ud(instr->const_index[0] + type_size * first_component));
3673 }
3674
3675 emit_untyped_write(bld, surf_index, offset_reg,
3676 offset(val_reg, bld, first_component * type_slots),
3677 1 /* dims */, length * type_slots,
3678 BRW_PREDICATE_NONE);
3679
3680 /* Clear the bits in the writemask that we just wrote, then try
3681 * again to see if more channels are left.
3682 */
3683 writemask &= (15 << (first_component + length));
3684 }
3685
3686 break;
3687 }
3688
3689 default:
3690 nir_emit_intrinsic(bld, instr);
3691 break;
3692 }
3693 }
3694
3695 void
3696 fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
3697 {
3698 fs_reg dest;
3699 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3700 dest = get_nir_dest(instr->dest);
3701
3702 switch (instr->intrinsic) {
3703 case nir_intrinsic_image_load:
3704 case nir_intrinsic_image_store:
3705 case nir_intrinsic_image_atomic_add:
3706 case nir_intrinsic_image_atomic_min:
3707 case nir_intrinsic_image_atomic_max:
3708 case nir_intrinsic_image_atomic_and:
3709 case nir_intrinsic_image_atomic_or:
3710 case nir_intrinsic_image_atomic_xor:
3711 case nir_intrinsic_image_atomic_exchange:
3712 case nir_intrinsic_image_atomic_comp_swap: {
3713 using namespace image_access;
3714
3715 if (stage == MESA_SHADER_FRAGMENT &&
3716 instr->intrinsic != nir_intrinsic_image_load)
3717 brw_wm_prog_data(prog_data)->has_side_effects = true;
3718
3719 /* Get the referenced image variable and type. */
3720 const nir_variable *var = instr->variables[0]->var;
3721 const glsl_type *type = var->type->without_array();
3722 const brw_reg_type base_type = get_image_base_type(type);
3723
3724 /* Get some metadata from the image intrinsic. */
3725 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
3726 const unsigned arr_dims = type->sampler_array ? 1 : 0;
3727 const unsigned surf_dims = type->coordinate_components() - arr_dims;
3728 const unsigned format = var->data.image.format;
3729
3730 /* Get the arguments of the image intrinsic. */
3731 const fs_reg image = get_nir_image_deref(instr->variables[0]);
3732 const fs_reg addr = retype(get_nir_src(instr->src[0]),
3733 BRW_REGISTER_TYPE_UD);
3734 const fs_reg src0 = (info->num_srcs >= 3 ?
3735 retype(get_nir_src(instr->src[2]), base_type) :
3736 fs_reg());
3737 const fs_reg src1 = (info->num_srcs >= 4 ?
3738 retype(get_nir_src(instr->src[3]), base_type) :
3739 fs_reg());
3740 fs_reg tmp;
3741
3742 /* Emit an image load, store or atomic op. */
3743 if (instr->intrinsic == nir_intrinsic_image_load)
3744 tmp = emit_image_load(bld, image, addr, surf_dims, arr_dims, format);
3745
3746 else if (instr->intrinsic == nir_intrinsic_image_store)
3747 emit_image_store(bld, image, addr, src0, surf_dims, arr_dims,
3748 var->data.image.write_only ? GL_NONE : format);
3749
3750 else
3751 tmp = emit_image_atomic(bld, image, addr, src0, src1,
3752 surf_dims, arr_dims, info->dest_components,
3753 get_image_atomic_op(instr->intrinsic, type));
3754
3755 /* Assign the result. */
3756 for (unsigned c = 0; c < info->dest_components; ++c)
3757 bld.MOV(offset(retype(dest, base_type), bld, c),
3758 offset(tmp, bld, c));
3759 break;
3760 }
3761
3762 case nir_intrinsic_memory_barrier_atomic_counter:
3763 case nir_intrinsic_memory_barrier_buffer:
3764 case nir_intrinsic_memory_barrier_image:
3765 case nir_intrinsic_memory_barrier: {
3766 const fs_builder ubld = bld.group(8, 0);
3767 const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
3768 ubld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp)
3769 ->size_written = 2 * REG_SIZE;
3770 break;
3771 }
3772
3773 case nir_intrinsic_group_memory_barrier:
3774 case nir_intrinsic_memory_barrier_shared:
3775 /* We treat these workgroup-level barriers as no-ops. This should be
3776 * safe at present and as long as:
3777 *
3778 * - Memory access instructions are not subsequently reordered by the
3779 * compiler back-end.
3780 *
3781 * - All threads from a given compute shader workgroup fit within a
3782 * single subslice and therefore talk to the same HDC shared unit
3783 * what supposedly guarantees ordering and coherency between threads
3784 * from the same workgroup. This may change in the future when we
3785 * start splitting workgroups across multiple subslices.
3786 *
3787 * - The context is not in fault-and-stream mode, which could cause
3788 * memory transactions (including to SLM) prior to the barrier to be
3789 * replayed after the barrier if a pagefault occurs. This shouldn't
3790 * be a problem up to and including SKL because fault-and-stream is
3791 * not usable due to hardware issues, but that's likely to change in
3792 * the future.
3793 */
3794 break;
3795
3796 case nir_intrinsic_shader_clock: {
3797 /* We cannot do anything if there is an event, so ignore it for now */
3798 const fs_reg shader_clock = get_timestamp(bld);
3799 const fs_reg srcs[] = { component(shader_clock, 0),
3800 component(shader_clock, 1) };
3801 bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
3802 break;
3803 }
3804
3805 case nir_intrinsic_image_size: {
3806 /* Get the referenced image variable and type. */
3807 const nir_variable *var = instr->variables[0]->var;
3808 const glsl_type *type = var->type->without_array();
3809
3810 /* Get the size of the image. */
3811 const fs_reg image = get_nir_image_deref(instr->variables[0]);
3812 const fs_reg size = offset(image, bld, BRW_IMAGE_PARAM_SIZE_OFFSET);
3813
3814 /* For 1DArray image types, the array index is stored in the Z component.
3815 * Fix this by swizzling the Z component to the Y component.
3816 */
3817 const bool is_1d_array_image =
3818 type->sampler_dimensionality == GLSL_SAMPLER_DIM_1D &&
3819 type->sampler_array;
3820
3821 /* For CubeArray images, we should count the number of cubes instead
3822 * of the number of faces. Fix it by dividing the (Z component) by 6.
3823 */
3824 const bool is_cube_array_image =
3825 type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE &&
3826 type->sampler_array;
3827
3828 /* Copy all the components. */
3829 for (unsigned c = 0; c < instr->dest.ssa.num_components; ++c) {
3830 if ((int)c >= type->coordinate_components()) {
3831 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
3832 brw_imm_d(1));
3833 } else if (c == 1 && is_1d_array_image) {
3834 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
3835 offset(size, bld, 2));
3836 } else if (c == 2 && is_cube_array_image) {
3837 bld.emit(SHADER_OPCODE_INT_QUOTIENT,
3838 offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
3839 offset(size, bld, c), brw_imm_d(6));
3840 } else {
3841 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
3842 offset(size, bld, c));
3843 }
3844 }
3845
3846 break;
3847 }
3848
3849 case nir_intrinsic_image_samples:
3850 /* The driver does not support multi-sampled images. */
3851 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(1));
3852 break;
3853
3854 case nir_intrinsic_load_uniform: {
3855 /* Offsets are in bytes but they should always be multiples of 4 */
3856 assert(instr->const_index[0] % 4 == 0);
3857
3858 fs_reg src(UNIFORM, instr->const_index[0] / 4, dest.type);
3859
3860 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3861 if (const_offset) {
3862 /* Offsets are in bytes but they should always be multiples of 4 */
3863 assert(const_offset->u32[0] % 4 == 0);
3864 src.offset = const_offset->u32[0];
3865
3866 for (unsigned j = 0; j < instr->num_components; j++) {
3867 bld.MOV(offset(dest, bld, j), offset(src, bld, j));
3868 }
3869 } else {
3870 fs_reg indirect = retype(get_nir_src(instr->src[0]),
3871 BRW_REGISTER_TYPE_UD);
3872
3873 /* We need to pass a size to the MOV_INDIRECT but we don't want it to
3874 * go past the end of the uniform. In order to keep the n'th
3875 * component from running past, we subtract off the size of all but
3876 * one component of the vector.
3877 */
3878 assert(instr->const_index[1] >=
3879 instr->num_components * (int) type_sz(dest.type));
3880 unsigned read_size = instr->const_index[1] -
3881 (instr->num_components - 1) * type_sz(dest.type);
3882
3883 bool supports_64bit_indirects =
3884 !devinfo->is_cherryview && !gen_device_info_is_9lp(devinfo);
3885
3886 if (type_sz(dest.type) != 8 || supports_64bit_indirects) {
3887 for (unsigned j = 0; j < instr->num_components; j++) {
3888 bld.emit(SHADER_OPCODE_MOV_INDIRECT,
3889 offset(dest, bld, j), offset(src, bld, j),
3890 indirect, brw_imm_ud(read_size));
3891 }
3892 } else {
3893 const unsigned num_mov_indirects =
3894 type_sz(dest.type) / type_sz(BRW_REGISTER_TYPE_UD);
3895 /* We read a little bit less per MOV INDIRECT, as they are now
3896 * 32-bits ones instead of 64-bit. Fix read_size then.
3897 */
3898 const unsigned read_size_32bit = read_size -
3899 (num_mov_indirects - 1) * type_sz(BRW_REGISTER_TYPE_UD);
3900 for (unsigned j = 0; j < instr->num_components; j++) {
3901 for (unsigned i = 0; i < num_mov_indirects; i++) {
3902 bld.emit(SHADER_OPCODE_MOV_INDIRECT,
3903 subscript(offset(dest, bld, j), BRW_REGISTER_TYPE_UD, i),
3904 subscript(offset(src, bld, j), BRW_REGISTER_TYPE_UD, i),
3905 indirect, brw_imm_ud(read_size_32bit));
3906 }
3907 }
3908 }
3909 }
3910 break;
3911 }
3912
3913 case nir_intrinsic_load_ubo: {
3914 nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
3915 fs_reg surf_index;
3916
3917 if (const_index) {
3918 const unsigned index = stage_prog_data->binding_table.ubo_start +
3919 const_index->u32[0];
3920 surf_index = brw_imm_ud(index);
3921 brw_mark_surface_used(prog_data, index);
3922 } else {
3923 /* The block index is not a constant. Evaluate the index expression
3924 * per-channel and add the base UBO index; we have to select a value
3925 * from any live channel.
3926 */
3927 surf_index = vgrf(glsl_type::uint_type);
3928 bld.ADD(surf_index, get_nir_src(instr->src[0]),
3929 brw_imm_ud(stage_prog_data->binding_table.ubo_start));
3930 surf_index = bld.emit_uniformize(surf_index);
3931
3932 /* Assume this may touch any UBO. It would be nice to provide
3933 * a tighter bound, but the array information is already lowered away.
3934 */
3935 brw_mark_surface_used(prog_data,
3936 stage_prog_data->binding_table.ubo_start +
3937 nir->info.num_ubos - 1);
3938 }
3939
3940 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
3941 if (const_offset == NULL) {
3942 fs_reg base_offset = retype(get_nir_src(instr->src[1]),
3943 BRW_REGISTER_TYPE_UD);
3944
3945 for (int i = 0; i < instr->num_components; i++)
3946 VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
3947 base_offset, i * type_sz(dest.type));
3948 } else {
3949 /* Even if we are loading doubles, a pull constant load will load
3950 * a 32-bit vec4, so should only reserve vgrf space for that. If we
3951 * need to load a full dvec4 we will have to emit 2 loads. This is
3952 * similar to demote_pull_constants(), except that in that case we
3953 * see individual accesses to each component of the vector and then
3954 * we let CSE deal with duplicate loads. Here we see a vector access
3955 * and we have to split it if necessary.
3956 */
3957 const unsigned type_size = type_sz(dest.type);
3958
3959 /* See if we've selected this as a push constant candidate */
3960 if (const_index) {
3961 const unsigned ubo_block = const_index->u32[0];
3962 const unsigned offset_256b = const_offset->u32[0] / 32;
3963
3964 fs_reg push_reg;
3965 for (int i = 0; i < 4; i++) {
3966 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
3967 if (range->block == ubo_block &&
3968 offset_256b >= range->start &&
3969 offset_256b < range->start + range->length) {
3970
3971 push_reg = fs_reg(UNIFORM, UBO_START + i, dest.type);
3972 push_reg.offset = const_offset->u32[0] - 32 * range->start;
3973 break;
3974 }
3975 }
3976
3977 if (push_reg.file != BAD_FILE) {
3978 for (unsigned i = 0; i < instr->num_components; i++) {
3979 bld.MOV(offset(dest, bld, i),
3980 byte_offset(push_reg, i * type_size));
3981 }
3982 break;
3983 }
3984 }
3985
3986 const unsigned block_sz = 64; /* Fetch one cacheline at a time. */
3987 const fs_builder ubld = bld.exec_all().group(block_sz / 4, 0);
3988 const fs_reg packed_consts = ubld.vgrf(BRW_REGISTER_TYPE_UD);
3989
3990 for (unsigned c = 0; c < instr->num_components;) {
3991 const unsigned base = const_offset->u32[0] + c * type_size;
3992 /* Number of usable components in the next block-aligned load. */
3993 const unsigned count = MIN2(instr->num_components - c,
3994 (block_sz - base % block_sz) / type_size);
3995
3996 ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
3997 packed_consts, surf_index,
3998 brw_imm_ud(base & ~(block_sz - 1)));
3999
4000 const fs_reg consts =
4001 retype(byte_offset(packed_consts, base & (block_sz - 1)),
4002 dest.type);
4003
4004 for (unsigned d = 0; d < count; d++)
4005 bld.MOV(offset(dest, bld, c + d), component(consts, d));
4006
4007 c += count;
4008 }
4009 }
4010 break;
4011 }
4012
4013 case nir_intrinsic_load_ssbo: {
4014 assert(devinfo->gen >= 7);
4015
4016 nir_const_value *const_uniform_block =
4017 nir_src_as_const_value(instr->src[0]);
4018
4019 fs_reg surf_index;
4020 if (const_uniform_block) {
4021 unsigned index = stage_prog_data->binding_table.ssbo_start +
4022 const_uniform_block->u32[0];
4023 surf_index = brw_imm_ud(index);
4024 brw_mark_surface_used(prog_data, index);
4025 } else {
4026 surf_index = vgrf(glsl_type::uint_type);
4027 bld.ADD(surf_index, get_nir_src(instr->src[0]),
4028 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
4029
4030 /* Assume this may touch any UBO. It would be nice to provide
4031 * a tighter bound, but the array information is already lowered away.
4032 */
4033 brw_mark_surface_used(prog_data,
4034 stage_prog_data->binding_table.ssbo_start +
4035 nir->info.num_ssbos - 1);
4036 }
4037
4038 fs_reg offset_reg;
4039 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
4040 if (const_offset) {
4041 offset_reg = brw_imm_ud(const_offset->u32[0]);
4042 } else {
4043 offset_reg = get_nir_src(instr->src[1]);
4044 }
4045
4046 /* Read the vector */
4047 do_untyped_vector_read(bld, dest, surf_index, offset_reg,
4048 instr->num_components);
4049
4050 break;
4051 }
4052
4053 case nir_intrinsic_store_ssbo: {
4054 assert(devinfo->gen >= 7);
4055
4056 if (stage == MESA_SHADER_FRAGMENT)
4057 brw_wm_prog_data(prog_data)->has_side_effects = true;
4058
4059 /* Block index */
4060 fs_reg surf_index;
4061 nir_const_value *const_uniform_block =
4062 nir_src_as_const_value(instr->src[1]);
4063 if (const_uniform_block) {
4064 unsigned index = stage_prog_data->binding_table.ssbo_start +
4065 const_uniform_block->u32[0];
4066 surf_index = brw_imm_ud(index);
4067 brw_mark_surface_used(prog_data, index);
4068 } else {
4069 surf_index = vgrf(glsl_type::uint_type);
4070 bld.ADD(surf_index, get_nir_src(instr->src[1]),
4071 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
4072
4073 brw_mark_surface_used(prog_data,
4074 stage_prog_data->binding_table.ssbo_start +
4075 nir->info.num_ssbos - 1);
4076 }
4077
4078 /* Value */
4079 fs_reg val_reg = get_nir_src(instr->src[0]);
4080
4081 /* Writemask */
4082 unsigned writemask = instr->const_index[0];
4083
4084 /* get_nir_src() retypes to integer. Be wary of 64-bit types though
4085 * since the untyped writes below operate in units of 32-bits, which
4086 * means that we need to write twice as many components each time.
4087 * Also, we have to suffle 64-bit data to be in the appropriate layout
4088 * expected by our 32-bit write messages.
4089 */
4090 unsigned bit_size = nir_src_bit_size(instr->src[0]);
4091 unsigned type_size = bit_size / 8;
4092 if (bit_size == 64) {
4093 val_reg = shuffle_64bit_data_for_32bit_write(bld,
4094 val_reg, instr->num_components);
4095 }
4096
4097 /* Combine groups of consecutive enabled channels in one write
4098 * message. We use ffs to find the first enabled channel and then ffs on
4099 * the bit-inverse, down-shifted writemask to determine the num_components
4100 * of the block of enabled bits.
4101 */
4102 while (writemask) {
4103 unsigned first_component = ffs(writemask) - 1;
4104 unsigned num_components = ffs(~(writemask >> first_component)) - 1;
4105
4106 if (type_size > 4) {
4107 /* We can't write more than 2 64-bit components at once. Limit
4108 * the num_components of the write to what we can do and let the next
4109 * iteration handle the rest.
4110 */
4111 num_components = MIN2(2, num_components);
4112 } else if (type_size < 4) {
4113 /* For 16-bit types we are using byte scattered writes, that can
4114 * only write one component per call. So we limit the num_components,
4115 * and let the write happening in several iterations.
4116 */
4117 num_components = 1;
4118 }
4119
4120 fs_reg offset_reg;
4121 nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
4122 if (const_offset) {
4123 offset_reg = brw_imm_ud(const_offset->u32[0] +
4124 type_size * first_component);
4125 } else {
4126 offset_reg = vgrf(glsl_type::uint_type);
4127 bld.ADD(offset_reg,
4128 retype(get_nir_src(instr->src[2]), BRW_REGISTER_TYPE_UD),
4129 brw_imm_ud(type_size * first_component));
4130 }
4131
4132 if (type_size < 4) {
4133 /* Untyped Surface messages have a fixed 32-bit size, so we need
4134 * to rely on byte scattered in order to write 16-bit elements.
4135 * The byte_scattered_write message needs that every written 16-bit
4136 * type to be aligned 32-bits (stride=2).
4137 * Additionally, while on Untyped Surface messages the
4138 * bits of the execution mask are ANDed with the corresponding
4139 * bits of the Pixel/Sample Mask, that is not the case for byte
4140 * scattered writes. That is needed to avoid ssbo stores writing
4141 * on helper invocations. So when that can affect, we load the
4142 * sample mask, and predicate the send message.
4143 */
4144 brw_predicate pred = BRW_PREDICATE_NONE;
4145
4146 if (stage == MESA_SHADER_FRAGMENT) {
4147 bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
4148 pred = BRW_PREDICATE_NORMAL;
4149 }
4150
4151 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_D);
4152 bld.MOV(subscript(tmp, BRW_REGISTER_TYPE_W, 0),
4153 offset(val_reg, bld, first_component));
4154 emit_byte_scattered_write(bld, surf_index, offset_reg,
4155 tmp,
4156 1 /* dims */, 1,
4157 bit_size,
4158 pred);
4159 } else {
4160 assert(num_components * type_size <= 16);
4161 assert((num_components * type_size) % 4 == 0);
4162 assert((first_component * type_size) % 4 == 0);
4163 unsigned first_slot = (first_component * type_size) / 4;
4164 unsigned num_slots = (num_components * type_size) / 4;
4165 emit_untyped_write(bld, surf_index, offset_reg,
4166 offset(val_reg, bld, first_slot),
4167 1 /* dims */, num_slots,
4168 BRW_PREDICATE_NONE);
4169 }
4170
4171 /* Clear the bits in the writemask that we just wrote, then try
4172 * again to see if more channels are left.
4173 */
4174 writemask &= (15 << (first_component + num_components));
4175 }
4176 break;
4177 }
4178
4179 case nir_intrinsic_store_output: {
4180 fs_reg src = get_nir_src(instr->src[0]);
4181
4182 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
4183 assert(const_offset && "Indirect output stores not allowed");
4184
4185 unsigned num_components = instr->num_components;
4186 unsigned first_component = nir_intrinsic_component(instr);
4187 if (nir_src_bit_size(instr->src[0]) == 64) {
4188 src = shuffle_64bit_data_for_32bit_write(bld, src, num_components);
4189 num_components *= 2;
4190 }
4191
4192 fs_reg new_dest = retype(offset(outputs[instr->const_index[0]], bld,
4193 4 * const_offset->u32[0]), src.type);
4194 for (unsigned j = 0; j < num_components; j++) {
4195 bld.MOV(offset(new_dest, bld, j + first_component),
4196 offset(src, bld, j));
4197 }
4198 break;
4199 }
4200
4201 case nir_intrinsic_ssbo_atomic_add:
4202 nir_emit_ssbo_atomic(bld, BRW_AOP_ADD, instr);
4203 break;
4204 case nir_intrinsic_ssbo_atomic_imin:
4205 nir_emit_ssbo_atomic(bld, BRW_AOP_IMIN, instr);
4206 break;
4207 case nir_intrinsic_ssbo_atomic_umin:
4208 nir_emit_ssbo_atomic(bld, BRW_AOP_UMIN, instr);
4209 break;
4210 case nir_intrinsic_ssbo_atomic_imax:
4211 nir_emit_ssbo_atomic(bld, BRW_AOP_IMAX, instr);
4212 break;
4213 case nir_intrinsic_ssbo_atomic_umax:
4214 nir_emit_ssbo_atomic(bld, BRW_AOP_UMAX, instr);
4215 break;
4216 case nir_intrinsic_ssbo_atomic_and:
4217 nir_emit_ssbo_atomic(bld, BRW_AOP_AND, instr);
4218 break;
4219 case nir_intrinsic_ssbo_atomic_or:
4220 nir_emit_ssbo_atomic(bld, BRW_AOP_OR, instr);
4221 break;
4222 case nir_intrinsic_ssbo_atomic_xor:
4223 nir_emit_ssbo_atomic(bld, BRW_AOP_XOR, instr);
4224 break;
4225 case nir_intrinsic_ssbo_atomic_exchange:
4226 nir_emit_ssbo_atomic(bld, BRW_AOP_MOV, instr);
4227 break;
4228 case nir_intrinsic_ssbo_atomic_comp_swap:
4229 nir_emit_ssbo_atomic(bld, BRW_AOP_CMPWR, instr);
4230 break;
4231
4232 case nir_intrinsic_get_buffer_size: {
4233 nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
4234 unsigned ssbo_index = const_uniform_block ? const_uniform_block->u32[0] : 0;
4235
4236 /* A resinfo's sampler message is used to get the buffer size. The
4237 * SIMD8's writeback message consists of four registers and SIMD16's
4238 * writeback message consists of 8 destination registers (two per each
4239 * component). Because we are only interested on the first channel of
4240 * the first returned component, where resinfo returns the buffer size
4241 * for SURFTYPE_BUFFER, we can just use the SIMD8 variant regardless of
4242 * the dispatch width.
4243 */
4244 const fs_builder ubld = bld.exec_all().group(8, 0);
4245 fs_reg src_payload = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4246 fs_reg ret_payload = ubld.vgrf(BRW_REGISTER_TYPE_UD, 4);
4247
4248 /* Set LOD = 0 */
4249 ubld.MOV(src_payload, brw_imm_d(0));
4250
4251 const unsigned index = prog_data->binding_table.ssbo_start + ssbo_index;
4252 fs_inst *inst = ubld.emit(FS_OPCODE_GET_BUFFER_SIZE, ret_payload,
4253 src_payload, brw_imm_ud(index));
4254 inst->header_size = 0;
4255 inst->mlen = 1;
4256 inst->size_written = 4 * REG_SIZE;
4257
4258 bld.MOV(retype(dest, ret_payload.type), component(ret_payload, 0));
4259 brw_mark_surface_used(prog_data, index);
4260 break;
4261 }
4262
4263 case nir_intrinsic_load_subgroup_invocation:
4264 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
4265 nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION]);
4266 break;
4267
4268 case nir_intrinsic_load_subgroup_eq_mask:
4269 case nir_intrinsic_load_subgroup_ge_mask:
4270 case nir_intrinsic_load_subgroup_gt_mask:
4271 case nir_intrinsic_load_subgroup_le_mask:
4272 case nir_intrinsic_load_subgroup_lt_mask:
4273 unreachable("not reached");
4274
4275 case nir_intrinsic_vote_any: {
4276 const fs_builder ubld = bld.exec_all().group(1, 0);
4277
4278 /* The any/all predicates do not consider channel enables. To prevent
4279 * dead channels from affecting the result, we initialize the flag with
4280 * with the identity value for the logical operation.
4281 */
4282 if (dispatch_width == 32) {
4283 /* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
4284 ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
4285 brw_imm_ud(0));
4286 } else {
4287 ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0));
4288 }
4289 bld.CMP(bld.null_reg_d(), get_nir_src(instr->src[0]), brw_imm_d(0), BRW_CONDITIONAL_NZ);
4290
4291 /* For some reason, the any/all predicates don't work properly with
4292 * SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
4293 * doesn't read the correct subset of the flag register and you end up
4294 * getting garbage in the second half. Work around this by using a pair
4295 * of 1-wide MOVs and scattering the result.
4296 */
4297 fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
4298 ubld.MOV(res1, brw_imm_d(0));
4299 set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ANY8H :
4300 dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ANY16H :
4301 BRW_PREDICATE_ALIGN1_ANY32H,
4302 ubld.MOV(res1, brw_imm_d(-1)));
4303
4304 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
4305 break;
4306 }
4307 case nir_intrinsic_vote_all: {
4308 const fs_builder ubld = bld.exec_all().group(1, 0);
4309
4310 /* The any/all predicates do not consider channel enables. To prevent
4311 * dead channels from affecting the result, we initialize the flag with
4312 * with the identity value for the logical operation.
4313 */
4314 if (dispatch_width == 32) {
4315 /* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
4316 ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
4317 brw_imm_ud(0xffffffff));
4318 } else {
4319 ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0xffff));
4320 }
4321 bld.CMP(bld.null_reg_d(), get_nir_src(instr->src[0]), brw_imm_d(0), BRW_CONDITIONAL_NZ);
4322
4323 /* For some reason, the any/all predicates don't work properly with
4324 * SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
4325 * doesn't read the correct subset of the flag register and you end up
4326 * getting garbage in the second half. Work around this by using a pair
4327 * of 1-wide MOVs and scattering the result.
4328 */
4329 fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
4330 ubld.MOV(res1, brw_imm_d(0));
4331 set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ALL8H :
4332 dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ALL16H :
4333 BRW_PREDICATE_ALIGN1_ALL32H,
4334 ubld.MOV(res1, brw_imm_d(-1)));
4335
4336 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
4337 break;
4338 }
4339 case nir_intrinsic_vote_eq: {
4340 fs_reg value = get_nir_src(instr->src[0]);
4341 fs_reg uniformized = bld.emit_uniformize(value);
4342 const fs_builder ubld = bld.exec_all().group(1, 0);
4343
4344 /* The any/all predicates do not consider channel enables. To prevent
4345 * dead channels from affecting the result, we initialize the flag with
4346 * with the identity value for the logical operation.
4347 */
4348 if (dispatch_width == 32) {
4349 /* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
4350 ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
4351 brw_imm_ud(0xffffffff));
4352 } else {
4353 ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0xffff));
4354 }
4355 bld.CMP(bld.null_reg_d(), value, uniformized, BRW_CONDITIONAL_Z);
4356
4357 /* For some reason, the any/all predicates don't work properly with
4358 * SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
4359 * doesn't read the correct subset of the flag register and you end up
4360 * getting garbage in the second half. Work around this by using a pair
4361 * of 1-wide MOVs and scattering the result.
4362 */
4363 fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
4364 ubld.MOV(res1, brw_imm_d(0));
4365 set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ALL8H :
4366 dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ALL16H :
4367 BRW_PREDICATE_ALIGN1_ALL32H,
4368 ubld.MOV(res1, brw_imm_d(-1)));
4369
4370 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
4371 break;
4372 }
4373
4374 case nir_intrinsic_ballot: {
4375 const fs_reg value = retype(get_nir_src(instr->src[0]),
4376 BRW_REGISTER_TYPE_UD);
4377 struct brw_reg flag = brw_flag_reg(0, 0);
4378 /* FIXME: For SIMD32 programs, this causes us to stomp on f0.1 as well
4379 * as f0.0. This is a problem for fragment programs as we currently use
4380 * f0.1 for discards. Fortunately, we don't support SIMD32 fragment
4381 * programs yet so this isn't a problem. When we do, something will
4382 * have to change.
4383 */
4384 if (dispatch_width == 32)
4385 flag.type = BRW_REGISTER_TYPE_UD;
4386
4387 bld.exec_all().group(1, 0).MOV(flag, brw_imm_ud(0u));
4388 bld.CMP(bld.null_reg_ud(), value, brw_imm_ud(0u), BRW_CONDITIONAL_NZ);
4389
4390 if (instr->dest.ssa.bit_size > 32) {
4391 dest.type = BRW_REGISTER_TYPE_UQ;
4392 } else {
4393 dest.type = BRW_REGISTER_TYPE_UD;
4394 }
4395 bld.MOV(dest, flag);
4396 break;
4397 }
4398
4399 case nir_intrinsic_read_invocation: {
4400 const fs_reg value = get_nir_src(instr->src[0]);
4401 const fs_reg invocation = get_nir_src(instr->src[1]);
4402 fs_reg tmp = bld.vgrf(value.type);
4403
4404 bld.exec_all().emit(SHADER_OPCODE_BROADCAST, tmp, value,
4405 bld.emit_uniformize(invocation));
4406
4407 bld.MOV(retype(dest, value.type), fs_reg(component(tmp, 0)));
4408 break;
4409 }
4410
4411 case nir_intrinsic_read_first_invocation: {
4412 const fs_reg value = get_nir_src(instr->src[0]);
4413 bld.MOV(retype(dest, value.type), bld.emit_uniformize(value));
4414 break;
4415 }
4416
4417 default:
4418 unreachable("unknown intrinsic");
4419 }
4420 }
4421
4422 void
4423 fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
4424 int op, nir_intrinsic_instr *instr)
4425 {
4426 if (stage == MESA_SHADER_FRAGMENT)
4427 brw_wm_prog_data(prog_data)->has_side_effects = true;
4428
4429 fs_reg dest;
4430 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
4431 dest = get_nir_dest(instr->dest);
4432
4433 fs_reg surface;
4434 nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
4435 if (const_surface) {
4436 unsigned surf_index = stage_prog_data->binding_table.ssbo_start +
4437 const_surface->u32[0];
4438 surface = brw_imm_ud(surf_index);
4439 brw_mark_surface_used(prog_data, surf_index);
4440 } else {
4441 surface = vgrf(glsl_type::uint_type);
4442 bld.ADD(surface, get_nir_src(instr->src[0]),
4443 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
4444
4445 /* Assume this may touch any SSBO. This is the same we do for other
4446 * UBO/SSBO accesses with non-constant surface.
4447 */
4448 brw_mark_surface_used(prog_data,
4449 stage_prog_data->binding_table.ssbo_start +
4450 nir->info.num_ssbos - 1);
4451 }
4452
4453 fs_reg offset = get_nir_src(instr->src[1]);
4454 fs_reg data1 = get_nir_src(instr->src[2]);
4455 fs_reg data2;
4456 if (op == BRW_AOP_CMPWR)
4457 data2 = get_nir_src(instr->src[3]);
4458
4459 /* Emit the actual atomic operation */
4460
4461 fs_reg atomic_result = emit_untyped_atomic(bld, surface, offset,
4462 data1, data2,
4463 1 /* dims */, 1 /* rsize */,
4464 op,
4465 BRW_PREDICATE_NONE);
4466 dest.type = atomic_result.type;
4467 bld.MOV(dest, atomic_result);
4468 }
4469
4470 void
4471 fs_visitor::nir_emit_shared_atomic(const fs_builder &bld,
4472 int op, nir_intrinsic_instr *instr)
4473 {
4474 fs_reg dest;
4475 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
4476 dest = get_nir_dest(instr->dest);
4477
4478 fs_reg surface = brw_imm_ud(GEN7_BTI_SLM);
4479 fs_reg offset;
4480 fs_reg data1 = get_nir_src(instr->src[1]);
4481 fs_reg data2;
4482 if (op == BRW_AOP_CMPWR)
4483 data2 = get_nir_src(instr->src[2]);
4484
4485 /* Get the offset */
4486 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
4487 if (const_offset) {
4488 offset = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
4489 } else {
4490 offset = vgrf(glsl_type::uint_type);
4491 bld.ADD(offset,
4492 retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
4493 brw_imm_ud(instr->const_index[0]));
4494 }
4495
4496 /* Emit the actual atomic operation operation */
4497
4498 fs_reg atomic_result = emit_untyped_atomic(bld, surface, offset,
4499 data1, data2,
4500 1 /* dims */, 1 /* rsize */,
4501 op,
4502 BRW_PREDICATE_NONE);
4503 dest.type = atomic_result.type;
4504 bld.MOV(dest, atomic_result);
4505 }
4506
4507 void
4508 fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
4509 {
4510 unsigned texture = instr->texture_index;
4511 unsigned sampler = instr->sampler_index;
4512
4513 fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
4514
4515 srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(texture);
4516 srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_ud(sampler);
4517
4518 int lod_components = 0;
4519
4520 /* The hardware requires a LOD for buffer textures */
4521 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
4522 srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_d(0);
4523
4524 uint32_t header_bits = 0;
4525 for (unsigned i = 0; i < instr->num_srcs; i++) {
4526 fs_reg src = get_nir_src(instr->src[i].src);
4527 switch (instr->src[i].src_type) {
4528 case nir_tex_src_bias:
4529 srcs[TEX_LOGICAL_SRC_LOD] =
4530 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
4531 break;
4532 case nir_tex_src_comparator:
4533 srcs[TEX_LOGICAL_SRC_SHADOW_C] = retype(src, BRW_REGISTER_TYPE_F);
4534 break;
4535 case nir_tex_src_coord:
4536 switch (instr->op) {
4537 case nir_texop_txf:
4538 case nir_texop_txf_ms:
4539 case nir_texop_txf_ms_mcs:
4540 case nir_texop_samples_identical:
4541 srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_D);
4542 break;
4543 default:
4544 srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_F);
4545 break;
4546 }
4547 break;
4548 case nir_tex_src_ddx:
4549 srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_F);
4550 lod_components = nir_tex_instr_src_size(instr, i);
4551 break;
4552 case nir_tex_src_ddy:
4553 srcs[TEX_LOGICAL_SRC_LOD2] = retype(src, BRW_REGISTER_TYPE_F);
4554 break;
4555 case nir_tex_src_lod:
4556 switch (instr->op) {
4557 case nir_texop_txs:
4558 srcs[TEX_LOGICAL_SRC_LOD] =
4559 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_UD);
4560 break;
4561 case nir_texop_txf:
4562 srcs[TEX_LOGICAL_SRC_LOD] =
4563 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_D);
4564 break;
4565 default:
4566 srcs[TEX_LOGICAL_SRC_LOD] =
4567 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
4568 break;
4569 }
4570 break;
4571 case nir_tex_src_ms_index:
4572 srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX] = retype(src, BRW_REGISTER_TYPE_UD);
4573 break;
4574
4575 case nir_tex_src_offset: {
4576 nir_const_value *const_offset =
4577 nir_src_as_const_value(instr->src[i].src);
4578 unsigned offset_bits = 0;
4579 if (const_offset &&
4580 brw_texture_offset(const_offset->i32,
4581 nir_tex_instr_src_size(instr, i),
4582 &offset_bits)) {
4583 header_bits |= offset_bits;
4584 } else {
4585 srcs[TEX_LOGICAL_SRC_TG4_OFFSET] =
4586 retype(src, BRW_REGISTER_TYPE_D);
4587 }
4588 break;
4589 }
4590
4591 case nir_tex_src_projector:
4592 unreachable("should be lowered");
4593
4594 case nir_tex_src_texture_offset: {
4595 /* Figure out the highest possible texture index and mark it as used */
4596 uint32_t max_used = texture + instr->texture_array_size - 1;
4597 if (instr->op == nir_texop_tg4 && devinfo->gen < 8) {
4598 max_used += stage_prog_data->binding_table.gather_texture_start;
4599 } else {
4600 max_used += stage_prog_data->binding_table.texture_start;
4601 }
4602 brw_mark_surface_used(prog_data, max_used);
4603
4604 /* Emit code to evaluate the actual indexing expression */
4605 fs_reg tmp = vgrf(glsl_type::uint_type);
4606 bld.ADD(tmp, src, brw_imm_ud(texture));
4607 srcs[TEX_LOGICAL_SRC_SURFACE] = bld.emit_uniformize(tmp);
4608 break;
4609 }
4610
4611 case nir_tex_src_sampler_offset: {
4612 /* Emit code to evaluate the actual indexing expression */
4613 fs_reg tmp = vgrf(glsl_type::uint_type);
4614 bld.ADD(tmp, src, brw_imm_ud(sampler));
4615 srcs[TEX_LOGICAL_SRC_SAMPLER] = bld.emit_uniformize(tmp);
4616 break;
4617 }
4618
4619 case nir_tex_src_ms_mcs:
4620 assert(instr->op == nir_texop_txf_ms);
4621 srcs[TEX_LOGICAL_SRC_MCS] = retype(src, BRW_REGISTER_TYPE_D);
4622 break;
4623
4624 case nir_tex_src_plane: {
4625 nir_const_value *const_plane =
4626 nir_src_as_const_value(instr->src[i].src);
4627 const uint32_t plane = const_plane->u32[0];
4628 const uint32_t texture_index =
4629 instr->texture_index +
4630 stage_prog_data->binding_table.plane_start[plane] -
4631 stage_prog_data->binding_table.texture_start;
4632
4633 srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(texture_index);
4634 break;
4635 }
4636
4637 default:
4638 unreachable("unknown texture source");
4639 }
4640 }
4641
4642 if (srcs[TEX_LOGICAL_SRC_MCS].file == BAD_FILE &&
4643 (instr->op == nir_texop_txf_ms ||
4644 instr->op == nir_texop_samples_identical)) {
4645 if (devinfo->gen >= 7 &&
4646 key_tex->compressed_multisample_layout_mask & (1 << texture)) {
4647 srcs[TEX_LOGICAL_SRC_MCS] =
4648 emit_mcs_fetch(srcs[TEX_LOGICAL_SRC_COORDINATE],
4649 instr->coord_components,
4650 srcs[TEX_LOGICAL_SRC_SURFACE]);
4651 } else {
4652 srcs[TEX_LOGICAL_SRC_MCS] = brw_imm_ud(0u);
4653 }
4654 }
4655
4656 srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(instr->coord_components);
4657 srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(lod_components);
4658
4659 enum opcode opcode;
4660 switch (instr->op) {
4661 case nir_texop_tex:
4662 opcode = (stage == MESA_SHADER_FRAGMENT ? SHADER_OPCODE_TEX_LOGICAL :
4663 SHADER_OPCODE_TXL_LOGICAL);
4664 break;
4665 case nir_texop_txb:
4666 opcode = FS_OPCODE_TXB_LOGICAL;
4667 break;
4668 case nir_texop_txl:
4669 opcode = SHADER_OPCODE_TXL_LOGICAL;
4670 break;
4671 case nir_texop_txd:
4672 opcode = SHADER_OPCODE_TXD_LOGICAL;
4673 break;
4674 case nir_texop_txf:
4675 opcode = SHADER_OPCODE_TXF_LOGICAL;
4676 break;
4677 case nir_texop_txf_ms:
4678 if ((key_tex->msaa_16 & (1 << sampler)))
4679 opcode = SHADER_OPCODE_TXF_CMS_W_LOGICAL;
4680 else
4681 opcode = SHADER_OPCODE_TXF_CMS_LOGICAL;
4682 break;
4683 case nir_texop_txf_ms_mcs:
4684 opcode = SHADER_OPCODE_TXF_MCS_LOGICAL;
4685 break;
4686 case nir_texop_query_levels:
4687 case nir_texop_txs:
4688 opcode = SHADER_OPCODE_TXS_LOGICAL;
4689 break;
4690 case nir_texop_lod:
4691 opcode = SHADER_OPCODE_LOD_LOGICAL;
4692 break;
4693 case nir_texop_tg4:
4694 if (srcs[TEX_LOGICAL_SRC_TG4_OFFSET].file != BAD_FILE)
4695 opcode = SHADER_OPCODE_TG4_OFFSET_LOGICAL;
4696 else
4697 opcode = SHADER_OPCODE_TG4_LOGICAL;
4698 break;
4699 case nir_texop_texture_samples:
4700 opcode = SHADER_OPCODE_SAMPLEINFO_LOGICAL;
4701 break;
4702 case nir_texop_samples_identical: {
4703 fs_reg dst = retype(get_nir_dest(instr->dest), BRW_REGISTER_TYPE_D);
4704
4705 /* If mcs is an immediate value, it means there is no MCS. In that case
4706 * just return false.
4707 */
4708 if (srcs[TEX_LOGICAL_SRC_MCS].file == BRW_IMMEDIATE_VALUE) {
4709 bld.MOV(dst, brw_imm_ud(0u));
4710 } else if ((key_tex->msaa_16 & (1 << sampler))) {
4711 fs_reg tmp = vgrf(glsl_type::uint_type);
4712 bld.OR(tmp, srcs[TEX_LOGICAL_SRC_MCS],
4713 offset(srcs[TEX_LOGICAL_SRC_MCS], bld, 1));
4714 bld.CMP(dst, tmp, brw_imm_ud(0u), BRW_CONDITIONAL_EQ);
4715 } else {
4716 bld.CMP(dst, srcs[TEX_LOGICAL_SRC_MCS], brw_imm_ud(0u),
4717 BRW_CONDITIONAL_EQ);
4718 }
4719 return;
4720 }
4721 default:
4722 unreachable("unknown texture opcode");
4723 }
4724
4725 if (instr->op == nir_texop_tg4) {
4726 if (instr->component == 1 &&
4727 key_tex->gather_channel_quirk_mask & (1 << texture)) {
4728 /* gather4 sampler is broken for green channel on RG32F --
4729 * we must ask for blue instead.
4730 */
4731 header_bits |= 2 << 16;
4732 } else {
4733 header_bits |= instr->component << 16;
4734 }
4735 }
4736
4737 fs_reg dst = bld.vgrf(brw_type_for_nir_type(devinfo, instr->dest_type), 4);
4738 fs_inst *inst = bld.emit(opcode, dst, srcs, ARRAY_SIZE(srcs));
4739 inst->offset = header_bits;
4740
4741 const unsigned dest_size = nir_tex_instr_dest_size(instr);
4742 if (devinfo->gen >= 9 &&
4743 instr->op != nir_texop_tg4 && instr->op != nir_texop_query_levels) {
4744 unsigned write_mask = instr->dest.is_ssa ?
4745 nir_ssa_def_components_read(&instr->dest.ssa):
4746 (1 << dest_size) - 1;
4747 assert(write_mask != 0); /* dead code should have been eliminated */
4748 inst->size_written = util_last_bit(write_mask) *
4749 inst->dst.component_size(inst->exec_size);
4750 } else {
4751 inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
4752 }
4753
4754 if (srcs[TEX_LOGICAL_SRC_SHADOW_C].file != BAD_FILE)
4755 inst->shadow_compare = true;
4756
4757 if (instr->op == nir_texop_tg4 && devinfo->gen == 6)
4758 emit_gen6_gather_wa(key_tex->gen6_gather_wa[texture], dst);
4759
4760 fs_reg nir_dest[4];
4761 for (unsigned i = 0; i < dest_size; i++)
4762 nir_dest[i] = offset(dst, bld, i);
4763
4764 if (instr->op == nir_texop_query_levels) {
4765 /* # levels is in .w */
4766 nir_dest[0] = offset(dst, bld, 3);
4767 } else if (instr->op == nir_texop_txs &&
4768 dest_size >= 3 && devinfo->gen < 7) {
4769 /* Gen4-6 return 0 instead of 1 for single layer surfaces. */
4770 fs_reg depth = offset(dst, bld, 2);
4771 nir_dest[2] = vgrf(glsl_type::int_type);
4772 bld.emit_minmax(nir_dest[2], depth, brw_imm_d(1), BRW_CONDITIONAL_GE);
4773 }
4774
4775 bld.LOAD_PAYLOAD(get_nir_dest(instr->dest), nir_dest, dest_size, 0);
4776 }
4777
4778 void
4779 fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
4780 {
4781 switch (instr->type) {
4782 case nir_jump_break:
4783 bld.emit(BRW_OPCODE_BREAK);
4784 break;
4785 case nir_jump_continue:
4786 bld.emit(BRW_OPCODE_CONTINUE);
4787 break;
4788 case nir_jump_return:
4789 default:
4790 unreachable("unknown jump");
4791 }
4792 }
4793
4794 /**
4795 * This helper takes the result of a load operation that reads 32-bit elements
4796 * in this format:
4797 *
4798 * x x x x x x x x
4799 * y y y y y y y y
4800 * z z z z z z z z
4801 * w w w w w w w w
4802 *
4803 * and shuffles the data to get this:
4804 *
4805 * x y x y x y x y
4806 * x y x y x y x y
4807 * z w z w z w z w
4808 * z w z w z w z w
4809 *
4810 * Which is exactly what we want if the load is reading 64-bit components
4811 * like doubles, where x represents the low 32-bit of the x double component
4812 * and y represents the high 32-bit of the x double component (likewise with
4813 * z and w for double component y). The parameter @components represents
4814 * the number of 64-bit components present in @src. This would typically be
4815 * 2 at most, since we can only fit 2 double elements in the result of a
4816 * vec4 load.
4817 *
4818 * Notice that @dst and @src can be the same register.
4819 */
4820 void
4821 shuffle_32bit_load_result_to_64bit_data(const fs_builder &bld,
4822 const fs_reg &dst,
4823 const fs_reg &src,
4824 uint32_t components)
4825 {
4826 assert(type_sz(src.type) == 4);
4827 assert(type_sz(dst.type) == 8);
4828
4829 /* A temporary that we will use to shuffle the 32-bit data of each
4830 * component in the vector into valid 64-bit data. We can't write directly
4831 * to dst because dst can be (and would usually be) the same as src
4832 * and in that case the first MOV in the loop below would overwrite the
4833 * data read in the second MOV.
4834 */
4835 fs_reg tmp = bld.vgrf(dst.type);
4836
4837 for (unsigned i = 0; i < components; i++) {
4838 const fs_reg component_i = offset(src, bld, 2 * i);
4839
4840 bld.MOV(subscript(tmp, src.type, 0), component_i);
4841 bld.MOV(subscript(tmp, src.type, 1), offset(component_i, bld, 1));
4842
4843 bld.MOV(offset(dst, bld, i), tmp);
4844 }
4845 }
4846
4847 /**
4848 * This helper does the inverse operation of
4849 * SHUFFLE_32BIT_LOAD_RESULT_TO_64BIT_DATA.
4850 *
4851 * We need to do this when we are going to use untyped write messsages that
4852 * operate with 32-bit components in order to arrange our 64-bit data to be
4853 * in the expected layout.
4854 *
4855 * Notice that callers of this function, unlike in the case of the inverse
4856 * operation, would typically need to call this with dst and src being
4857 * different registers, since they would otherwise corrupt the original
4858 * 64-bit data they are about to write. Because of this the function checks
4859 * that the src and dst regions involved in the operation do not overlap.
4860 */
4861 fs_reg
4862 shuffle_64bit_data_for_32bit_write(const fs_builder &bld,
4863 const fs_reg &src,
4864 uint32_t components)
4865 {
4866 assert(type_sz(src.type) == 8);
4867
4868 fs_reg dst = bld.vgrf(BRW_REGISTER_TYPE_D, 2 * components);
4869
4870 for (unsigned i = 0; i < components; i++) {
4871 const fs_reg component_i = offset(src, bld, i);
4872 bld.MOV(offset(dst, bld, 2 * i), subscript(component_i, dst.type, 0));
4873 bld.MOV(offset(dst, bld, 2 * i + 1), subscript(component_i, dst.type, 1));
4874 }
4875
4876 return dst;
4877 }
4878
4879 fs_reg
4880 setup_imm_df(const fs_builder &bld, double v)
4881 {
4882 const struct gen_device_info *devinfo = bld.shader->devinfo;
4883 assert(devinfo->gen >= 7);
4884
4885 if (devinfo->gen >= 8)
4886 return brw_imm_df(v);
4887
4888 /* gen7.5 does not support DF immediates straighforward but the DIM
4889 * instruction allows to set the 64-bit immediate value.
4890 */
4891 if (devinfo->is_haswell) {
4892 const fs_builder ubld = bld.exec_all().group(1, 0);
4893 fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_DF, 1);
4894 ubld.DIM(dst, brw_imm_df(v));
4895 return component(dst, 0);
4896 }
4897
4898 /* gen7 does not support DF immediates, so we generate a 64-bit constant by
4899 * writing the low 32-bit of the constant to suboffset 0 of a VGRF and
4900 * the high 32-bit to suboffset 4 and then applying a stride of 0.
4901 *
4902 * Alternatively, we could also produce a normal VGRF (without stride 0)
4903 * by writing to all the channels in the VGRF, however, that would hit the
4904 * gen7 bug where we have to split writes that span more than 1 register
4905 * into instructions with a width of 4 (otherwise the write to the second
4906 * register written runs into an execmask hardware bug) which isn't very
4907 * nice.
4908 */
4909 union {
4910 double d;
4911 struct {
4912 uint32_t i1;
4913 uint32_t i2;
4914 };
4915 } di;
4916
4917 di.d = v;
4918
4919 const fs_builder ubld = bld.exec_all().group(1, 0);
4920 const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
4921 ubld.MOV(tmp, brw_imm_ud(di.i1));
4922 ubld.MOV(horiz_offset(tmp, 1), brw_imm_ud(di.i2));
4923
4924 return component(retype(tmp, BRW_REGISTER_TYPE_DF), 0);
4925 }