2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "glsl/ir_optimization.h"
26 #include "glsl/nir/glsl_to_nir.h"
27 #include "main/shaderimage.h"
28 #include "program/prog_to_nir.h"
30 #include "brw_fs_surface_builder.h"
36 fs_visitor::emit_nir_code()
38 nir_shader
*nir
= prog
->nir
;
40 /* emit the arrays used for inputs and outputs - load/store intrinsics will
41 * be converted to reads/writes of these arrays
43 nir_setup_inputs(nir
);
44 nir_setup_outputs(nir
);
45 nir_setup_uniforms(nir
);
46 nir_emit_system_values(nir
);
48 /* get the main function and emit it */
49 nir_foreach_overload(nir
, overload
) {
50 assert(strcmp(overload
->function
->name
, "main") == 0);
51 assert(overload
->impl
);
52 nir_emit_impl(overload
->impl
);
57 fs_visitor::nir_setup_inputs(nir_shader
*shader
)
59 nir_inputs
= bld
.vgrf(BRW_REGISTER_TYPE_F
, shader
->num_inputs
);
61 foreach_list_typed(nir_variable
, var
, node
, &shader
->inputs
) {
62 enum brw_reg_type type
= brw_type_for_base_type(var
->type
);
63 fs_reg input
= offset(nir_inputs
, bld
, var
->data
.driver_location
);
67 case MESA_SHADER_VERTEX
: {
68 /* Our ATTR file is indexed by VERT_ATTRIB_*, which is the value
69 * stored in nir_variable::location.
71 * However, NIR's load_input intrinsics use a different index - an
72 * offset into a single contiguous array containing all inputs.
73 * This index corresponds to the nir_variable::driver_location field.
75 * So, we need to copy from fs_reg(ATTR, var->location) to
76 * offset(nir_inputs, var->data.driver_location).
78 const glsl_type
*const t
= var
->type
->without_array();
79 const unsigned components
= t
->components();
80 const unsigned cols
= t
->matrix_columns
;
81 const unsigned elts
= t
->vector_elements
;
82 unsigned array_length
= var
->type
->is_array() ? var
->type
->length
: 1;
83 for (unsigned i
= 0; i
< array_length
; i
++) {
84 for (unsigned j
= 0; j
< cols
; j
++) {
85 for (unsigned k
= 0; k
< elts
; k
++) {
86 bld
.MOV(offset(retype(input
, type
), bld
,
87 components
* i
+ elts
* j
+ k
),
88 offset(fs_reg(ATTR
, var
->data
.location
+ i
, type
),
95 case MESA_SHADER_GEOMETRY
:
96 case MESA_SHADER_COMPUTE
:
97 case MESA_SHADER_TESS_CTRL
:
98 case MESA_SHADER_TESS_EVAL
:
99 unreachable("fs_visitor not used for these stages yet.");
101 case MESA_SHADER_FRAGMENT
:
102 if (var
->data
.location
== VARYING_SLOT_POS
) {
103 reg
= *emit_fragcoord_interpolation(var
->data
.pixel_center_integer
,
104 var
->data
.origin_upper_left
);
105 emit_percomp(bld
, fs_inst(BRW_OPCODE_MOV
, bld
.dispatch_width(),
108 emit_general_interpolation(input
, var
->name
, var
->type
,
109 (glsl_interp_qualifier
) var
->data
.interpolation
,
110 var
->data
.location
, var
->data
.centroid
,
119 fs_visitor::nir_setup_outputs(nir_shader
*shader
)
121 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
123 nir_outputs
= bld
.vgrf(BRW_REGISTER_TYPE_F
, shader
->num_outputs
);
125 foreach_list_typed(nir_variable
, var
, node
, &shader
->outputs
) {
126 fs_reg reg
= offset(nir_outputs
, bld
, var
->data
.driver_location
);
128 int vector_elements
=
129 var
->type
->is_array() ? var
->type
->fields
.array
->vector_elements
130 : var
->type
->vector_elements
;
133 case MESA_SHADER_VERTEX
:
134 for (unsigned int i
= 0; i
< ALIGN(type_size_scalar(var
->type
), 4) / 4; i
++) {
135 int output
= var
->data
.location
+ i
;
136 this->outputs
[output
] = offset(reg
, bld
, 4 * i
);
137 this->output_components
[output
] = vector_elements
;
140 case MESA_SHADER_FRAGMENT
:
141 if (var
->data
.index
> 0) {
142 assert(var
->data
.location
== FRAG_RESULT_DATA0
);
143 assert(var
->data
.index
== 1);
144 this->dual_src_output
= reg
;
145 this->do_dual_src
= true;
146 } else if (var
->data
.location
== FRAG_RESULT_COLOR
) {
147 /* Writing gl_FragColor outputs to all color regions. */
148 for (unsigned int i
= 0; i
< MAX2(key
->nr_color_regions
, 1); i
++) {
149 this->outputs
[i
] = reg
;
150 this->output_components
[i
] = 4;
152 } else if (var
->data
.location
== FRAG_RESULT_DEPTH
) {
153 this->frag_depth
= reg
;
154 } else if (var
->data
.location
== FRAG_RESULT_SAMPLE_MASK
) {
155 this->sample_mask
= reg
;
157 /* gl_FragData or a user-defined FS output */
158 assert(var
->data
.location
>= FRAG_RESULT_DATA0
&&
159 var
->data
.location
< FRAG_RESULT_DATA0
+BRW_MAX_DRAW_BUFFERS
);
161 /* General color output. */
162 for (unsigned int i
= 0; i
< MAX2(1, var
->type
->length
); i
++) {
163 int output
= var
->data
.location
- FRAG_RESULT_DATA0
+ i
;
164 this->outputs
[output
] = offset(reg
, bld
, vector_elements
* i
);
165 this->output_components
[output
] = vector_elements
;
170 unreachable("unhandled shader stage");
176 fs_visitor::nir_setup_uniforms(nir_shader
*shader
)
178 if (dispatch_width
!= 8)
181 uniforms
= shader
->num_uniforms
;
184 foreach_list_typed(nir_variable
, var
, node
, &shader
->uniforms
) {
185 /* UBO's and atomics don't take up space in the uniform file */
186 if (var
->interface_type
!= NULL
|| var
->type
->contains_atomic())
189 if (strncmp(var
->name
, "gl_", 3) == 0)
190 nir_setup_builtin_uniform(var
);
192 nir_setup_uniform(var
);
193 if(type_size_scalar(var
->type
) > 0)
194 param_size
[var
->data
.driver_location
] = type_size_scalar(var
->type
);
197 /* prog_to_nir only creates a single giant uniform variable so we can
198 * just set param up directly. */
199 for (unsigned p
= 0; p
< prog
->Parameters
->NumParameters
; p
++) {
200 for (unsigned int i
= 0; i
< 4; i
++) {
201 stage_prog_data
->param
[4 * p
+ i
] =
202 &prog
->Parameters
->ParameterValues
[p
][i
];
205 if(prog
->Parameters
->NumParameters
> 0)
206 param_size
[0] = prog
->Parameters
->NumParameters
* 4;
211 fs_visitor::nir_setup_uniform(nir_variable
*var
)
213 int namelen
= strlen(var
->name
);
215 /* The data for our (non-builtin) uniforms is stored in a series of
216 * gl_uniform_driver_storage structs for each subcomponent that
217 * glGetUniformLocation() could name. We know it's been set up in the
218 * same order we'd walk the type, so walk the list of storage and find
219 * anything with our name, or the prefix of a component that starts with
222 unsigned index
= var
->data
.driver_location
;
223 for (unsigned u
= 0; u
< shader_prog
->NumUniformStorage
; u
++) {
224 struct gl_uniform_storage
*storage
= &shader_prog
->UniformStorage
[u
];
226 if (storage
->builtin
)
229 if (strncmp(var
->name
, storage
->name
, namelen
) != 0 ||
230 (storage
->name
[namelen
] != 0 &&
231 storage
->name
[namelen
] != '.' &&
232 storage
->name
[namelen
] != '[')) {
236 if (storage
->type
->is_image()) {
237 setup_image_uniform_values(index
, storage
);
239 unsigned slots
= storage
->type
->component_slots();
240 if (storage
->array_elements
)
241 slots
*= storage
->array_elements
;
243 for (unsigned i
= 0; i
< slots
; i
++) {
244 stage_prog_data
->param
[index
++] = &storage
->storage
[i
];
251 fs_visitor::nir_setup_builtin_uniform(nir_variable
*var
)
253 const nir_state_slot
*const slots
= var
->state_slots
;
254 assert(var
->state_slots
!= NULL
);
256 unsigned uniform_index
= var
->data
.driver_location
;
257 for (unsigned int i
= 0; i
< var
->num_state_slots
; i
++) {
258 /* This state reference has already been setup by ir_to_mesa, but we'll
259 * get the same index back here.
261 int index
= _mesa_add_state_reference(this->prog
->Parameters
,
262 (gl_state_index
*)slots
[i
].tokens
);
264 /* Add each of the unique swizzles of the element as a parameter.
265 * This'll end up matching the expected layout of the
266 * array/matrix/structure we're trying to fill in.
269 for (unsigned int j
= 0; j
< 4; j
++) {
270 int swiz
= GET_SWZ(slots
[i
].swizzle
, j
);
271 if (swiz
== last_swiz
)
275 stage_prog_data
->param
[uniform_index
++] =
276 &prog
->Parameters
->ParameterValues
[index
][swiz
];
282 emit_system_values_block(nir_block
*block
, void *void_visitor
)
284 fs_visitor
*v
= (fs_visitor
*)void_visitor
;
287 nir_foreach_instr(block
, instr
) {
288 if (instr
->type
!= nir_instr_type_intrinsic
)
291 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
292 switch (intrin
->intrinsic
) {
293 case nir_intrinsic_load_vertex_id
:
294 unreachable("should be lowered by lower_vertex_id().");
296 case nir_intrinsic_load_vertex_id_zero_base
:
297 assert(v
->stage
== MESA_SHADER_VERTEX
);
298 reg
= &v
->nir_system_values
[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
];
299 if (reg
->file
== BAD_FILE
)
300 *reg
= *v
->emit_vs_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
);
303 case nir_intrinsic_load_base_vertex
:
304 assert(v
->stage
== MESA_SHADER_VERTEX
);
305 reg
= &v
->nir_system_values
[SYSTEM_VALUE_BASE_VERTEX
];
306 if (reg
->file
== BAD_FILE
)
307 *reg
= *v
->emit_vs_system_value(SYSTEM_VALUE_BASE_VERTEX
);
310 case nir_intrinsic_load_instance_id
:
311 assert(v
->stage
== MESA_SHADER_VERTEX
);
312 reg
= &v
->nir_system_values
[SYSTEM_VALUE_INSTANCE_ID
];
313 if (reg
->file
== BAD_FILE
)
314 *reg
= *v
->emit_vs_system_value(SYSTEM_VALUE_INSTANCE_ID
);
317 case nir_intrinsic_load_sample_pos
:
318 assert(v
->stage
== MESA_SHADER_FRAGMENT
);
319 reg
= &v
->nir_system_values
[SYSTEM_VALUE_SAMPLE_POS
];
320 if (reg
->file
== BAD_FILE
)
321 *reg
= *v
->emit_samplepos_setup();
324 case nir_intrinsic_load_sample_id
:
325 assert(v
->stage
== MESA_SHADER_FRAGMENT
);
326 reg
= &v
->nir_system_values
[SYSTEM_VALUE_SAMPLE_ID
];
327 if (reg
->file
== BAD_FILE
)
328 *reg
= *v
->emit_sampleid_setup();
331 case nir_intrinsic_load_sample_mask_in
:
332 assert(v
->stage
== MESA_SHADER_FRAGMENT
);
333 assert(v
->devinfo
->gen
>= 7);
334 reg
= &v
->nir_system_values
[SYSTEM_VALUE_SAMPLE_MASK_IN
];
335 if (reg
->file
== BAD_FILE
)
336 *reg
= fs_reg(retype(brw_vec8_grf(v
->payload
.sample_mask_in_reg
, 0),
337 BRW_REGISTER_TYPE_D
));
349 fs_visitor::nir_emit_system_values(nir_shader
*shader
)
351 nir_system_values
= ralloc_array(mem_ctx
, fs_reg
, SYSTEM_VALUE_MAX
);
352 nir_foreach_overload(shader
, overload
) {
353 assert(strcmp(overload
->function
->name
, "main") == 0);
354 assert(overload
->impl
);
355 nir_foreach_block(overload
->impl
, emit_system_values_block
, this);
360 fs_visitor::nir_emit_impl(nir_function_impl
*impl
)
362 nir_locals
= reralloc(mem_ctx
, nir_locals
, fs_reg
, impl
->reg_alloc
);
363 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
364 unsigned array_elems
=
365 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
366 unsigned size
= array_elems
* reg
->num_components
;
367 nir_locals
[reg
->index
] = bld
.vgrf(BRW_REGISTER_TYPE_F
, size
);
370 nir_ssa_values
= reralloc(mem_ctx
, nir_ssa_values
, fs_reg
,
373 nir_emit_cf_list(&impl
->body
);
377 fs_visitor::nir_emit_cf_list(exec_list
*list
)
379 exec_list_validate(list
);
380 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
381 switch (node
->type
) {
383 nir_emit_if(nir_cf_node_as_if(node
));
386 case nir_cf_node_loop
:
387 nir_emit_loop(nir_cf_node_as_loop(node
));
390 case nir_cf_node_block
:
391 nir_emit_block(nir_cf_node_as_block(node
));
395 unreachable("Invalid CFG node block");
401 fs_visitor::nir_emit_if(nir_if
*if_stmt
)
403 /* first, put the condition into f0 */
404 fs_inst
*inst
= bld
.MOV(bld
.null_reg_d(),
405 retype(get_nir_src(if_stmt
->condition
),
406 BRW_REGISTER_TYPE_D
));
407 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
409 bld
.IF(BRW_PREDICATE_NORMAL
);
411 nir_emit_cf_list(&if_stmt
->then_list
);
413 /* note: if the else is empty, dead CF elimination will remove it */
414 bld
.emit(BRW_OPCODE_ELSE
);
416 nir_emit_cf_list(&if_stmt
->else_list
);
418 bld
.emit(BRW_OPCODE_ENDIF
);
422 fs_visitor::nir_emit_loop(nir_loop
*loop
)
424 bld
.emit(BRW_OPCODE_DO
);
426 nir_emit_cf_list(&loop
->body
);
428 bld
.emit(BRW_OPCODE_WHILE
);
432 fs_visitor::nir_emit_block(nir_block
*block
)
434 nir_foreach_instr(block
, instr
) {
435 nir_emit_instr(instr
);
440 fs_visitor::nir_emit_instr(nir_instr
*instr
)
442 const fs_builder abld
= bld
.annotate(NULL
, instr
);
444 switch (instr
->type
) {
445 case nir_instr_type_alu
:
446 nir_emit_alu(abld
, nir_instr_as_alu(instr
));
449 case nir_instr_type_intrinsic
:
450 nir_emit_intrinsic(abld
, nir_instr_as_intrinsic(instr
));
453 case nir_instr_type_tex
:
454 nir_emit_texture(abld
, nir_instr_as_tex(instr
));
457 case nir_instr_type_load_const
:
458 nir_emit_load_const(abld
, nir_instr_as_load_const(instr
));
461 case nir_instr_type_ssa_undef
:
462 nir_emit_undef(abld
, nir_instr_as_ssa_undef(instr
));
465 case nir_instr_type_jump
:
466 nir_emit_jump(abld
, nir_instr_as_jump(instr
));
470 unreachable("unknown instruction type");
475 fs_visitor::optimize_frontfacing_ternary(nir_alu_instr
*instr
,
476 const fs_reg
&result
)
478 if (!instr
->src
[0].src
.is_ssa
||
479 instr
->src
[0].src
.ssa
->parent_instr
->type
!= nir_instr_type_intrinsic
)
482 nir_intrinsic_instr
*src0
=
483 nir_instr_as_intrinsic(instr
->src
[0].src
.ssa
->parent_instr
);
485 if (src0
->intrinsic
!= nir_intrinsic_load_front_face
)
488 nir_const_value
*value1
= nir_src_as_const_value(instr
->src
[1].src
);
489 if (!value1
|| fabsf(value1
->f
[0]) != 1.0f
)
492 nir_const_value
*value2
= nir_src_as_const_value(instr
->src
[2].src
);
493 if (!value2
|| fabsf(value2
->f
[0]) != 1.0f
)
496 fs_reg tmp
= vgrf(glsl_type::int_type
);
498 if (devinfo
->gen
>= 6) {
499 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
500 fs_reg g0
= fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W
));
502 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
504 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
505 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
507 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
509 * This negation looks like it's safe in practice, because bits 0:4 will
510 * surely be TRIANGLES
513 if (value1
->f
[0] == -1.0f
) {
517 tmp
.type
= BRW_REGISTER_TYPE_W
;
518 tmp
.subreg_offset
= 2;
521 fs_inst
*or_inst
= bld
.OR(tmp
, g0
, fs_reg(0x3f80));
522 or_inst
->src
[1].type
= BRW_REGISTER_TYPE_UW
;
524 tmp
.type
= BRW_REGISTER_TYPE_D
;
525 tmp
.subreg_offset
= 0;
528 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
529 fs_reg g1_6
= fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D
));
531 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
533 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
534 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
536 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
538 * This negation looks like it's safe in practice, because bits 0:4 will
539 * surely be TRIANGLES
542 if (value1
->f
[0] == -1.0f
) {
546 bld
.OR(tmp
, g1_6
, fs_reg(0x3f800000));
548 bld
.AND(retype(result
, BRW_REGISTER_TYPE_D
), tmp
, fs_reg(0xbf800000));
554 fs_visitor::nir_emit_alu(const fs_builder
&bld
, nir_alu_instr
*instr
)
556 struct brw_wm_prog_key
*fs_key
= (struct brw_wm_prog_key
*) this->key
;
559 fs_reg result
= get_nir_dest(instr
->dest
.dest
);
560 result
.type
= brw_type_for_nir_type(nir_op_infos
[instr
->op
].output_type
);
563 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
564 op
[i
] = get_nir_src(instr
->src
[i
].src
);
565 op
[i
].type
= brw_type_for_nir_type(nir_op_infos
[instr
->op
].input_types
[i
]);
566 op
[i
].abs
= instr
->src
[i
].abs
;
567 op
[i
].negate
= instr
->src
[i
].negate
;
570 /* We get a bunch of mov's out of the from_ssa pass and they may still
571 * be vectorized. We'll handle them as a special-case. We'll also
572 * handle vecN here because it's basically the same thing.
580 fs_reg temp
= result
;
581 bool need_extra_copy
= false;
582 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
583 if (!instr
->src
[i
].src
.is_ssa
&&
584 instr
->dest
.dest
.reg
.reg
== instr
->src
[i
].src
.reg
.reg
) {
585 need_extra_copy
= true;
586 temp
= bld
.vgrf(result
.type
, 4);
591 for (unsigned i
= 0; i
< 4; i
++) {
592 if (!(instr
->dest
.write_mask
& (1 << i
)))
595 if (instr
->op
== nir_op_imov
|| instr
->op
== nir_op_fmov
) {
596 inst
= bld
.MOV(offset(temp
, bld
, i
),
597 offset(op
[0], bld
, instr
->src
[0].swizzle
[i
]));
599 inst
= bld
.MOV(offset(temp
, bld
, i
),
600 offset(op
[i
], bld
, instr
->src
[i
].swizzle
[0]));
602 inst
->saturate
= instr
->dest
.saturate
;
605 /* In this case the source and destination registers were the same,
606 * so we need to insert an extra set of moves in order to deal with
609 if (need_extra_copy
) {
610 for (unsigned i
= 0; i
< 4; i
++) {
611 if (!(instr
->dest
.write_mask
& (1 << i
)))
614 bld
.MOV(offset(result
, bld
, i
), offset(temp
, bld
, i
));
623 /* At this point, we have dealt with any instruction that operates on
624 * more than a single channel. Therefore, we can just adjust the source
625 * and destination registers for that channel and emit the instruction.
627 unsigned channel
= 0;
628 if (nir_op_infos
[instr
->op
].output_size
== 0) {
629 /* Since NIR is doing the scalarizing for us, we should only ever see
630 * vectorized operations with a single channel.
632 assert(_mesa_bitcount(instr
->dest
.write_mask
) == 1);
633 channel
= ffs(instr
->dest
.write_mask
) - 1;
635 result
= offset(result
, bld
, channel
);
638 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
639 assert(nir_op_infos
[instr
->op
].input_sizes
[i
] < 2);
640 op
[i
] = offset(op
[i
], bld
, instr
->src
[i
].swizzle
[channel
]);
646 inst
= bld
.MOV(result
, op
[0]);
647 inst
->saturate
= instr
->dest
.saturate
;
652 bld
.MOV(result
, op
[0]);
656 /* AND(val, 0x80000000) gives the sign bit.
658 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
661 bld
.CMP(bld
.null_reg_f(), op
[0], fs_reg(0.0f
), BRW_CONDITIONAL_NZ
);
663 fs_reg result_int
= retype(result
, BRW_REGISTER_TYPE_UD
);
664 op
[0].type
= BRW_REGISTER_TYPE_UD
;
665 result
.type
= BRW_REGISTER_TYPE_UD
;
666 bld
.AND(result_int
, op
[0], fs_reg(0x80000000u
));
668 inst
= bld
.OR(result_int
, result_int
, fs_reg(0x3f800000u
));
669 inst
->predicate
= BRW_PREDICATE_NORMAL
;
670 if (instr
->dest
.saturate
) {
671 inst
= bld
.MOV(result
, result
);
672 inst
->saturate
= true;
678 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
679 * -> non-negative val generates 0x00000000.
680 * Predicated OR sets 1 if val is positive.
682 bld
.CMP(bld
.null_reg_d(), op
[0], fs_reg(0), BRW_CONDITIONAL_G
);
683 bld
.ASR(result
, op
[0], fs_reg(31));
684 inst
= bld
.OR(result
, result
, fs_reg(1));
685 inst
->predicate
= BRW_PREDICATE_NORMAL
;
689 inst
= bld
.emit(SHADER_OPCODE_RCP
, result
, op
[0]);
690 inst
->saturate
= instr
->dest
.saturate
;
694 inst
= bld
.emit(SHADER_OPCODE_EXP2
, result
, op
[0]);
695 inst
->saturate
= instr
->dest
.saturate
;
699 inst
= bld
.emit(SHADER_OPCODE_LOG2
, result
, op
[0]);
700 inst
->saturate
= instr
->dest
.saturate
;
704 inst
= bld
.emit(SHADER_OPCODE_SIN
, result
, op
[0]);
705 inst
->saturate
= instr
->dest
.saturate
;
709 inst
= bld
.emit(SHADER_OPCODE_COS
, result
, op
[0]);
710 inst
->saturate
= instr
->dest
.saturate
;
714 if (fs_key
->high_quality_derivatives
) {
715 inst
= bld
.emit(FS_OPCODE_DDX_FINE
, result
, op
[0]);
717 inst
= bld
.emit(FS_OPCODE_DDX_COARSE
, result
, op
[0]);
719 inst
->saturate
= instr
->dest
.saturate
;
721 case nir_op_fddx_fine
:
722 inst
= bld
.emit(FS_OPCODE_DDX_FINE
, result
, op
[0]);
723 inst
->saturate
= instr
->dest
.saturate
;
725 case nir_op_fddx_coarse
:
726 inst
= bld
.emit(FS_OPCODE_DDX_COARSE
, result
, op
[0]);
727 inst
->saturate
= instr
->dest
.saturate
;
730 if (fs_key
->high_quality_derivatives
) {
731 inst
= bld
.emit(FS_OPCODE_DDY_FINE
, result
, op
[0],
732 fs_reg(fs_key
->render_to_fbo
));
734 inst
= bld
.emit(FS_OPCODE_DDY_COARSE
, result
, op
[0],
735 fs_reg(fs_key
->render_to_fbo
));
737 inst
->saturate
= instr
->dest
.saturate
;
739 case nir_op_fddy_fine
:
740 inst
= bld
.emit(FS_OPCODE_DDY_FINE
, result
, op
[0],
741 fs_reg(fs_key
->render_to_fbo
));
742 inst
->saturate
= instr
->dest
.saturate
;
744 case nir_op_fddy_coarse
:
745 inst
= bld
.emit(FS_OPCODE_DDY_COARSE
, result
, op
[0],
746 fs_reg(fs_key
->render_to_fbo
));
747 inst
->saturate
= instr
->dest
.saturate
;
752 inst
= bld
.ADD(result
, op
[0], op
[1]);
753 inst
->saturate
= instr
->dest
.saturate
;
757 inst
= bld
.MUL(result
, op
[0], op
[1]);
758 inst
->saturate
= instr
->dest
.saturate
;
762 bld
.MUL(result
, op
[0], op
[1]);
765 case nir_op_imul_high
:
766 case nir_op_umul_high
:
767 bld
.emit(SHADER_OPCODE_MULH
, result
, op
[0], op
[1]);
772 bld
.emit(SHADER_OPCODE_INT_QUOTIENT
, result
, op
[0], op
[1]);
775 case nir_op_uadd_carry
:
776 unreachable("Should have been lowered by carry_to_arith().");
778 case nir_op_usub_borrow
:
779 unreachable("Should have been lowered by borrow_to_arith().");
782 bld
.emit(SHADER_OPCODE_INT_REMAINDER
, result
, op
[0], op
[1]);
788 bld
.CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_L
);
794 bld
.CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_GE
);
799 bld
.CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_Z
);
804 bld
.CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_NZ
);
808 if (devinfo
->gen
>= 8) {
809 op
[0] = resolve_source_modifiers(op
[0]);
811 bld
.NOT(result
, op
[0]);
814 if (devinfo
->gen
>= 8) {
815 op
[0] = resolve_source_modifiers(op
[0]);
816 op
[1] = resolve_source_modifiers(op
[1]);
818 bld
.XOR(result
, op
[0], op
[1]);
821 if (devinfo
->gen
>= 8) {
822 op
[0] = resolve_source_modifiers(op
[0]);
823 op
[1] = resolve_source_modifiers(op
[1]);
825 bld
.OR(result
, op
[0], op
[1]);
828 if (devinfo
->gen
>= 8) {
829 op
[0] = resolve_source_modifiers(op
[0]);
830 op
[1] = resolve_source_modifiers(op
[1]);
832 bld
.AND(result
, op
[0], op
[1]);
844 case nir_op_ball_fequal2
:
845 case nir_op_ball_iequal2
:
846 case nir_op_ball_fequal3
:
847 case nir_op_ball_iequal3
:
848 case nir_op_ball_fequal4
:
849 case nir_op_ball_iequal4
:
850 case nir_op_bany_fnequal2
:
851 case nir_op_bany_inequal2
:
852 case nir_op_bany_fnequal3
:
853 case nir_op_bany_inequal3
:
854 case nir_op_bany_fnequal4
:
855 case nir_op_bany_inequal4
:
856 unreachable("Lowered by nir_lower_alu_reductions");
858 case nir_op_fnoise1_1
:
859 case nir_op_fnoise1_2
:
860 case nir_op_fnoise1_3
:
861 case nir_op_fnoise1_4
:
862 case nir_op_fnoise2_1
:
863 case nir_op_fnoise2_2
:
864 case nir_op_fnoise2_3
:
865 case nir_op_fnoise2_4
:
866 case nir_op_fnoise3_1
:
867 case nir_op_fnoise3_2
:
868 case nir_op_fnoise3_3
:
869 case nir_op_fnoise3_4
:
870 case nir_op_fnoise4_1
:
871 case nir_op_fnoise4_2
:
872 case nir_op_fnoise4_3
:
873 case nir_op_fnoise4_4
:
874 unreachable("not reached: should be handled by lower_noise");
877 unreachable("not reached: should be handled by ldexp_to_arith()");
880 inst
= bld
.emit(SHADER_OPCODE_SQRT
, result
, op
[0]);
881 inst
->saturate
= instr
->dest
.saturate
;
885 inst
= bld
.emit(SHADER_OPCODE_RSQ
, result
, op
[0]);
886 inst
->saturate
= instr
->dest
.saturate
;
891 bld
.MOV(result
, negate(op
[0]));
895 bld
.CMP(result
, op
[0], fs_reg(0.0f
), BRW_CONDITIONAL_NZ
);
898 bld
.CMP(result
, op
[0], fs_reg(0), BRW_CONDITIONAL_NZ
);
902 inst
= bld
.RNDZ(result
, op
[0]);
903 inst
->saturate
= instr
->dest
.saturate
;
907 op
[0].negate
= !op
[0].negate
;
908 fs_reg temp
= vgrf(glsl_type::float_type
);
909 bld
.RNDD(temp
, op
[0]);
911 inst
= bld
.MOV(result
, temp
);
912 inst
->saturate
= instr
->dest
.saturate
;
916 inst
= bld
.RNDD(result
, op
[0]);
917 inst
->saturate
= instr
->dest
.saturate
;
920 inst
= bld
.FRC(result
, op
[0]);
921 inst
->saturate
= instr
->dest
.saturate
;
923 case nir_op_fround_even
:
924 inst
= bld
.RNDE(result
, op
[0]);
925 inst
->saturate
= instr
->dest
.saturate
;
931 if (devinfo
->gen
>= 6) {
932 inst
= bld
.emit(BRW_OPCODE_SEL
, result
, op
[0], op
[1]);
933 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
935 bld
.CMP(bld
.null_reg_d(), op
[0], op
[1], BRW_CONDITIONAL_L
);
936 inst
= bld
.SEL(result
, op
[0], op
[1]);
937 inst
->predicate
= BRW_PREDICATE_NORMAL
;
939 inst
->saturate
= instr
->dest
.saturate
;
945 if (devinfo
->gen
>= 6) {
946 inst
= bld
.emit(BRW_OPCODE_SEL
, result
, op
[0], op
[1]);
947 inst
->conditional_mod
= BRW_CONDITIONAL_GE
;
949 bld
.CMP(bld
.null_reg_d(), op
[0], op
[1], BRW_CONDITIONAL_GE
);
950 inst
= bld
.SEL(result
, op
[0], op
[1]);
951 inst
->predicate
= BRW_PREDICATE_NORMAL
;
953 inst
->saturate
= instr
->dest
.saturate
;
956 case nir_op_pack_snorm_2x16
:
957 case nir_op_pack_snorm_4x8
:
958 case nir_op_pack_unorm_2x16
:
959 case nir_op_pack_unorm_4x8
:
960 case nir_op_unpack_snorm_2x16
:
961 case nir_op_unpack_snorm_4x8
:
962 case nir_op_unpack_unorm_2x16
:
963 case nir_op_unpack_unorm_4x8
:
964 case nir_op_unpack_half_2x16
:
965 case nir_op_pack_half_2x16
:
966 unreachable("not reached: should be handled by lower_packing_builtins");
968 case nir_op_unpack_half_2x16_split_x
:
969 inst
= bld
.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
, result
, op
[0]);
970 inst
->saturate
= instr
->dest
.saturate
;
972 case nir_op_unpack_half_2x16_split_y
:
973 inst
= bld
.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
, result
, op
[0]);
974 inst
->saturate
= instr
->dest
.saturate
;
978 inst
= bld
.emit(SHADER_OPCODE_POW
, result
, op
[0], op
[1]);
979 inst
->saturate
= instr
->dest
.saturate
;
982 case nir_op_bitfield_reverse
:
983 bld
.BFREV(result
, op
[0]);
986 case nir_op_bit_count
:
987 bld
.CBIT(result
, op
[0]);
990 case nir_op_ufind_msb
:
991 case nir_op_ifind_msb
: {
992 bld
.FBH(retype(result
, BRW_REGISTER_TYPE_UD
), op
[0]);
994 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
995 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
996 * subtract the result from 31 to convert the MSB count into an LSB count.
999 bld
.CMP(bld
.null_reg_d(), result
, fs_reg(-1), BRW_CONDITIONAL_NZ
);
1000 fs_reg
neg_result(result
);
1001 neg_result
.negate
= true;
1002 inst
= bld
.ADD(result
, neg_result
, fs_reg(31));
1003 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1007 case nir_op_find_lsb
:
1008 bld
.FBL(result
, op
[0]);
1011 case nir_op_ubitfield_extract
:
1012 case nir_op_ibitfield_extract
:
1013 bld
.BFE(result
, op
[2], op
[1], op
[0]);
1016 bld
.BFI1(result
, op
[0], op
[1]);
1019 bld
.BFI2(result
, op
[0], op
[1], op
[2]);
1022 case nir_op_bitfield_insert
:
1023 unreachable("not reached: should be handled by "
1024 "lower_instructions::bitfield_insert_to_bfm_bfi");
1027 bld
.SHL(result
, op
[0], op
[1]);
1030 bld
.ASR(result
, op
[0], op
[1]);
1033 bld
.SHR(result
, op
[0], op
[1]);
1036 case nir_op_pack_half_2x16_split
:
1037 bld
.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT
, result
, op
[0], op
[1]);
1041 inst
= bld
.MAD(result
, op
[2], op
[1], op
[0]);
1042 inst
->saturate
= instr
->dest
.saturate
;
1046 inst
= bld
.LRP(result
, op
[0], op
[1], op
[2]);
1047 inst
->saturate
= instr
->dest
.saturate
;
1051 if (optimize_frontfacing_ternary(instr
, result
))
1054 bld
.CMP(bld
.null_reg_d(), op
[0], fs_reg(0), BRW_CONDITIONAL_NZ
);
1055 inst
= bld
.SEL(result
, op
[1], op
[2]);
1056 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1060 unreachable("unhandled instruction");
1063 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1064 * to sign extend the low bit to 0/~0
1066 if (devinfo
->gen
<= 5 &&
1067 (instr
->instr
.pass_flags
& BRW_NIR_BOOLEAN_MASK
) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE
) {
1068 fs_reg masked
= vgrf(glsl_type::int_type
);
1069 bld
.AND(masked
, result
, fs_reg(1));
1070 masked
.negate
= true;
1071 bld
.MOV(retype(result
, BRW_REGISTER_TYPE_D
), masked
);
1076 fs_visitor::nir_emit_load_const(const fs_builder
&bld
,
1077 nir_load_const_instr
*instr
)
1079 fs_reg reg
= bld
.vgrf(BRW_REGISTER_TYPE_D
, instr
->def
.num_components
);
1081 for (unsigned i
= 0; i
< instr
->def
.num_components
; i
++)
1082 bld
.MOV(offset(reg
, bld
, i
), fs_reg(instr
->value
.i
[i
]));
1084 nir_ssa_values
[instr
->def
.index
] = reg
;
1088 fs_visitor::nir_emit_undef(const fs_builder
&bld
, nir_ssa_undef_instr
*instr
)
1090 nir_ssa_values
[instr
->def
.index
] = bld
.vgrf(BRW_REGISTER_TYPE_D
,
1091 instr
->def
.num_components
);
1095 fs_reg_for_nir_reg(fs_visitor
*v
, nir_register
*nir_reg
,
1096 unsigned base_offset
, nir_src
*indirect
)
1100 assert(!nir_reg
->is_global
);
1102 reg
= v
->nir_locals
[nir_reg
->index
];
1104 reg
= offset(reg
, v
->bld
, base_offset
* nir_reg
->num_components
);
1106 int multiplier
= nir_reg
->num_components
* (v
->dispatch_width
/ 8);
1108 reg
.reladdr
= new(v
->mem_ctx
) fs_reg(v
->vgrf(glsl_type::int_type
));
1109 v
->bld
.MUL(*reg
.reladdr
, v
->get_nir_src(*indirect
),
1110 fs_reg(multiplier
));
1117 fs_visitor::get_nir_src(nir_src src
)
1121 reg
= nir_ssa_values
[src
.ssa
->index
];
1123 reg
= fs_reg_for_nir_reg(this, src
.reg
.reg
, src
.reg
.base_offset
,
1127 /* to avoid floating-point denorm flushing problems, set the type by
1128 * default to D - instructions that need floating point semantics will set
1129 * this to F if they need to
1131 return retype(reg
, BRW_REGISTER_TYPE_D
);
1135 fs_visitor::get_nir_dest(nir_dest dest
)
1138 nir_ssa_values
[dest
.ssa
.index
] = bld
.vgrf(BRW_REGISTER_TYPE_F
,
1139 dest
.ssa
.num_components
);
1140 return nir_ssa_values
[dest
.ssa
.index
];
1143 return fs_reg_for_nir_reg(this, dest
.reg
.reg
, dest
.reg
.base_offset
,
1148 fs_visitor::get_nir_image_deref(const nir_deref_var
*deref
)
1150 fs_reg
image(UNIFORM
, deref
->var
->data
.driver_location
,
1151 BRW_REGISTER_TYPE_UD
);
1153 if (deref
->deref
.child
) {
1154 const nir_deref_array
*deref_array
=
1155 nir_deref_as_array(deref
->deref
.child
);
1156 assert(deref
->deref
.child
->deref_type
== nir_deref_type_array
&&
1157 deref_array
->deref
.child
== NULL
);
1158 const unsigned size
= glsl_get_length(deref
->var
->type
);
1159 const unsigned base
= MIN2(deref_array
->base_offset
, size
- 1);
1161 image
= offset(image
, bld
, base
* BRW_IMAGE_PARAM_SIZE
);
1163 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
) {
1164 fs_reg
*tmp
= new(mem_ctx
) fs_reg(vgrf(glsl_type::int_type
));
1166 if (devinfo
->gen
== 7 && !devinfo
->is_haswell
) {
1167 /* IVB hangs when trying to access an invalid surface index with
1168 * the dataport. According to the spec "if the index used to
1169 * select an individual element is negative or greater than or
1170 * equal to the size of the array, the results of the operation
1171 * are undefined but may not lead to termination" -- which is one
1172 * of the possible outcomes of the hang. Clamp the index to
1173 * prevent access outside of the array bounds.
1175 bld
.emit_minmax(*tmp
, retype(get_nir_src(deref_array
->indirect
),
1176 BRW_REGISTER_TYPE_UD
),
1177 fs_reg(size
- base
- 1), BRW_CONDITIONAL_L
);
1179 bld
.MOV(*tmp
, get_nir_src(deref_array
->indirect
));
1182 bld
.MUL(*tmp
, *tmp
, fs_reg(BRW_IMAGE_PARAM_SIZE
));
1183 image
.reladdr
= tmp
;
1191 fs_visitor::emit_percomp(const fs_builder
&bld
, const fs_inst
&inst
,
1194 for (unsigned i
= 0; i
< 4; i
++) {
1195 if (!((wr_mask
>> i
) & 1))
1198 fs_inst
*new_inst
= new(mem_ctx
) fs_inst(inst
);
1199 new_inst
->dst
= offset(new_inst
->dst
, bld
, i
);
1200 for (unsigned j
= 0; j
< new_inst
->sources
; j
++)
1201 if (new_inst
->src
[j
].file
== GRF
)
1202 new_inst
->src
[j
] = offset(new_inst
->src
[j
], bld
, i
);
1209 * Get the matching channel register datatype for an image intrinsic of the
1210 * specified GLSL image type.
1213 get_image_base_type(const glsl_type
*type
)
1215 switch ((glsl_base_type
)type
->sampler_type
) {
1216 case GLSL_TYPE_UINT
:
1217 return BRW_REGISTER_TYPE_UD
;
1219 return BRW_REGISTER_TYPE_D
;
1220 case GLSL_TYPE_FLOAT
:
1221 return BRW_REGISTER_TYPE_F
;
1223 unreachable("Not reached.");
1228 * Get the appropriate atomic op for an image atomic intrinsic.
1231 get_image_atomic_op(nir_intrinsic_op op
, const glsl_type
*type
)
1234 case nir_intrinsic_image_atomic_add
:
1236 case nir_intrinsic_image_atomic_min
:
1237 return (get_image_base_type(type
) == BRW_REGISTER_TYPE_D
?
1238 BRW_AOP_IMIN
: BRW_AOP_UMIN
);
1239 case nir_intrinsic_image_atomic_max
:
1240 return (get_image_base_type(type
) == BRW_REGISTER_TYPE_D
?
1241 BRW_AOP_IMAX
: BRW_AOP_UMAX
);
1242 case nir_intrinsic_image_atomic_and
:
1244 case nir_intrinsic_image_atomic_or
:
1246 case nir_intrinsic_image_atomic_xor
:
1248 case nir_intrinsic_image_atomic_exchange
:
1250 case nir_intrinsic_image_atomic_comp_swap
:
1251 return BRW_AOP_CMPWR
;
1253 unreachable("Not reachable.");
1258 fs_visitor::nir_emit_intrinsic(const fs_builder
&bld
, nir_intrinsic_instr
*instr
)
1261 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
1262 dest
= get_nir_dest(instr
->dest
);
1264 bool has_indirect
= false;
1266 switch (instr
->intrinsic
) {
1267 case nir_intrinsic_discard
:
1268 case nir_intrinsic_discard_if
: {
1269 /* We track our discarded pixels in f0.1. By predicating on it, we can
1270 * update just the flag bits that aren't yet discarded. If there's no
1271 * condition, we emit a CMP of g0 != g0, so all currently executing
1272 * channels will get turned off.
1275 if (instr
->intrinsic
== nir_intrinsic_discard_if
) {
1276 cmp
= bld
.CMP(bld
.null_reg_f(), get_nir_src(instr
->src
[0]),
1277 fs_reg(0), BRW_CONDITIONAL_Z
);
1279 fs_reg some_reg
= fs_reg(retype(brw_vec8_grf(0, 0),
1280 BRW_REGISTER_TYPE_UW
));
1281 cmp
= bld
.CMP(bld
.null_reg_f(), some_reg
, some_reg
, BRW_CONDITIONAL_NZ
);
1283 cmp
->predicate
= BRW_PREDICATE_NORMAL
;
1284 cmp
->flag_subreg
= 1;
1286 if (devinfo
->gen
>= 6) {
1287 emit_discard_jump();
1292 case nir_intrinsic_atomic_counter_inc
:
1293 case nir_intrinsic_atomic_counter_dec
:
1294 case nir_intrinsic_atomic_counter_read
: {
1295 using namespace surface_access
;
1297 /* Get the arguments of the atomic intrinsic. */
1298 const fs_reg offset
= get_nir_src(instr
->src
[0]);
1299 const unsigned surface
= (stage_prog_data
->binding_table
.abo_start
+
1300 instr
->const_index
[0]);
1303 /* Emit a surface read or atomic op. */
1304 switch (instr
->intrinsic
) {
1305 case nir_intrinsic_atomic_counter_read
:
1306 tmp
= emit_untyped_read(bld
, fs_reg(surface
), offset
, 1, 1);
1309 case nir_intrinsic_atomic_counter_inc
:
1310 tmp
= emit_untyped_atomic(bld
, fs_reg(surface
), offset
, fs_reg(),
1311 fs_reg(), 1, 1, BRW_AOP_INC
);
1314 case nir_intrinsic_atomic_counter_dec
:
1315 tmp
= emit_untyped_atomic(bld
, fs_reg(surface
), offset
, fs_reg(),
1316 fs_reg(), 1, 1, BRW_AOP_PREDEC
);
1320 unreachable("Unreachable");
1323 /* Assign the result. */
1324 bld
.MOV(retype(dest
, BRW_REGISTER_TYPE_UD
), tmp
);
1326 /* Mark the surface as used. */
1327 brw_mark_surface_used(stage_prog_data
, surface
);
1331 case nir_intrinsic_image_load
:
1332 case nir_intrinsic_image_store
:
1333 case nir_intrinsic_image_atomic_add
:
1334 case nir_intrinsic_image_atomic_min
:
1335 case nir_intrinsic_image_atomic_max
:
1336 case nir_intrinsic_image_atomic_and
:
1337 case nir_intrinsic_image_atomic_or
:
1338 case nir_intrinsic_image_atomic_xor
:
1339 case nir_intrinsic_image_atomic_exchange
:
1340 case nir_intrinsic_image_atomic_comp_swap
: {
1341 using namespace image_access
;
1343 /* Get the referenced image variable and type. */
1344 const nir_variable
*var
= instr
->variables
[0]->var
;
1345 const glsl_type
*type
= var
->type
->without_array();
1346 const brw_reg_type base_type
= get_image_base_type(type
);
1348 /* Get some metadata from the image intrinsic. */
1349 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[instr
->intrinsic
];
1350 const unsigned arr_dims
= type
->sampler_array
? 1 : 0;
1351 const unsigned surf_dims
= type
->coordinate_components() - arr_dims
;
1352 const mesa_format format
=
1353 (var
->data
.image
.write_only
? MESA_FORMAT_NONE
:
1354 _mesa_get_shader_image_format(var
->data
.image
.format
));
1356 /* Get the arguments of the image intrinsic. */
1357 const fs_reg image
= get_nir_image_deref(instr
->variables
[0]);
1358 const fs_reg addr
= retype(get_nir_src(instr
->src
[0]),
1359 BRW_REGISTER_TYPE_UD
);
1360 const fs_reg src0
= (info
->num_srcs
>= 3 ?
1361 retype(get_nir_src(instr
->src
[2]), base_type
) :
1363 const fs_reg src1
= (info
->num_srcs
>= 4 ?
1364 retype(get_nir_src(instr
->src
[3]), base_type
) :
1368 /* Emit an image load, store or atomic op. */
1369 if (instr
->intrinsic
== nir_intrinsic_image_load
)
1370 tmp
= emit_image_load(bld
, image
, addr
, surf_dims
, arr_dims
, format
);
1372 else if (instr
->intrinsic
== nir_intrinsic_image_store
)
1373 emit_image_store(bld
, image
, addr
, src0
, surf_dims
, arr_dims
, format
);
1376 tmp
= emit_image_atomic(bld
, image
, addr
, src0
, src1
,
1377 surf_dims
, arr_dims
, info
->dest_components
,
1378 get_image_atomic_op(instr
->intrinsic
, type
));
1380 /* Assign the result. */
1381 for (unsigned c
= 0; c
< info
->dest_components
; ++c
)
1382 bld
.MOV(offset(retype(dest
, base_type
), bld
, c
),
1383 offset(tmp
, bld
, c
));
1387 case nir_intrinsic_memory_barrier
: {
1388 const fs_reg tmp
= bld
.vgrf(BRW_REGISTER_TYPE_UD
, 16 / dispatch_width
);
1389 bld
.emit(SHADER_OPCODE_MEMORY_FENCE
, tmp
)
1394 case nir_intrinsic_image_size
: {
1395 /* Get the referenced image variable and type. */
1396 const nir_variable
*var
= instr
->variables
[0]->var
;
1397 const glsl_type
*type
= var
->type
->without_array();
1399 /* Get the size of the image. */
1400 const fs_reg image
= get_nir_image_deref(instr
->variables
[0]);
1401 const fs_reg size
= offset(image
, bld
, BRW_IMAGE_PARAM_SIZE_OFFSET
);
1403 /* For 1DArray image types, the array index is stored in the Z component.
1404 * Fix this by swizzling the Z component to the Y component.
1406 const bool is_1d_array_image
=
1407 type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_1D
&&
1408 type
->sampler_array
;
1410 /* For CubeArray images, we should count the number of cubes instead
1411 * of the number of faces. Fix it by dividing the (Z component) by 6.
1413 const bool is_cube_array_image
=
1414 type
->sampler_dimensionality
== GLSL_SAMPLER_DIM_CUBE
&&
1415 type
->sampler_array
;
1417 /* Copy all the components. */
1418 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[instr
->intrinsic
];
1419 for (unsigned c
= 0; c
< info
->dest_components
; ++c
) {
1420 if ((int)c
>= type
->coordinate_components()) {
1421 bld
.MOV(offset(retype(dest
, BRW_REGISTER_TYPE_D
), bld
, c
),
1423 } else if (c
== 1 && is_1d_array_image
) {
1424 bld
.MOV(offset(retype(dest
, BRW_REGISTER_TYPE_D
), bld
, c
),
1425 offset(size
, bld
, 2));
1426 } else if (c
== 2 && is_cube_array_image
) {
1427 bld
.emit(SHADER_OPCODE_INT_QUOTIENT
,
1428 offset(retype(dest
, BRW_REGISTER_TYPE_D
), bld
, c
),
1429 offset(size
, bld
, c
), fs_reg(6));
1431 bld
.MOV(offset(retype(dest
, BRW_REGISTER_TYPE_D
), bld
, c
),
1432 offset(size
, bld
, c
));
1439 case nir_intrinsic_image_samples
:
1440 /* The driver does not support multi-sampled images. */
1441 bld
.MOV(retype(dest
, BRW_REGISTER_TYPE_D
), fs_reg(1));
1444 case nir_intrinsic_load_front_face
:
1445 bld
.MOV(retype(dest
, BRW_REGISTER_TYPE_D
),
1446 *emit_frontfacing_interpolation());
1449 case nir_intrinsic_load_vertex_id
:
1450 unreachable("should be lowered by lower_vertex_id()");
1452 case nir_intrinsic_load_vertex_id_zero_base
:
1453 case nir_intrinsic_load_base_vertex
:
1454 case nir_intrinsic_load_instance_id
:
1455 case nir_intrinsic_load_sample_mask_in
:
1456 case nir_intrinsic_load_sample_id
: {
1457 gl_system_value sv
= nir_system_value_from_intrinsic(instr
->intrinsic
);
1458 fs_reg val
= nir_system_values
[sv
];
1459 assert(val
.file
!= BAD_FILE
);
1460 dest
.type
= val
.type
;
1465 case nir_intrinsic_load_sample_pos
: {
1466 fs_reg sample_pos
= nir_system_values
[SYSTEM_VALUE_SAMPLE_POS
];
1467 assert(sample_pos
.file
!= BAD_FILE
);
1468 dest
.type
= sample_pos
.type
;
1469 bld
.MOV(dest
, sample_pos
);
1470 bld
.MOV(offset(dest
, bld
, 1), offset(sample_pos
, bld
, 1));
1474 case nir_intrinsic_load_uniform_indirect
:
1475 has_indirect
= true;
1477 case nir_intrinsic_load_uniform
: {
1478 fs_reg
uniform_reg(UNIFORM
, instr
->const_index
[0]);
1479 uniform_reg
.reg_offset
= instr
->const_index
[1];
1481 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1482 fs_reg src
= offset(retype(uniform_reg
, dest
.type
), bld
, j
);
1484 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[0]));
1487 dest
= offset(dest
, bld
, 1);
1492 case nir_intrinsic_load_ubo_indirect
:
1493 has_indirect
= true;
1495 case nir_intrinsic_load_ubo
: {
1496 nir_const_value
*const_index
= nir_src_as_const_value(instr
->src
[0]);
1500 surf_index
= fs_reg(stage_prog_data
->binding_table
.ubo_start
+
1503 /* The block index is not a constant. Evaluate the index expression
1504 * per-channel and add the base UBO index; we have to select a value
1505 * from any live channel.
1507 surf_index
= vgrf(glsl_type::uint_type
);
1508 bld
.ADD(surf_index
, get_nir_src(instr
->src
[0]),
1509 fs_reg(stage_prog_data
->binding_table
.ubo_start
));
1510 surf_index
= bld
.emit_uniformize(surf_index
);
1512 /* Assume this may touch any UBO. It would be nice to provide
1513 * a tighter bound, but the array information is already lowered away.
1515 brw_mark_surface_used(prog_data
,
1516 stage_prog_data
->binding_table
.ubo_start
+
1517 shader_prog
->NumUniformBlocks
- 1);
1521 /* Turn the byte offset into a dword offset. */
1522 fs_reg base_offset
= vgrf(glsl_type::int_type
);
1523 bld
.SHR(base_offset
, retype(get_nir_src(instr
->src
[1]),
1524 BRW_REGISTER_TYPE_D
),
1527 unsigned vec4_offset
= instr
->const_index
[0] / 4;
1528 for (int i
= 0; i
< instr
->num_components
; i
++)
1529 VARYING_PULL_CONSTANT_LOAD(bld
, offset(dest
, bld
, i
), surf_index
,
1530 base_offset
, vec4_offset
+ i
);
1532 fs_reg packed_consts
= vgrf(glsl_type::float_type
);
1533 packed_consts
.type
= dest
.type
;
1535 fs_reg
const_offset_reg((unsigned) instr
->const_index
[0] & ~15);
1536 bld
.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
, packed_consts
,
1537 surf_index
, const_offset_reg
);
1539 for (unsigned i
= 0; i
< instr
->num_components
; i
++) {
1540 packed_consts
.set_smear(instr
->const_index
[0] % 16 / 4 + i
);
1542 /* The std140 packing rules don't allow vectors to cross 16-byte
1543 * boundaries, and a reg is 32 bytes.
1545 assert(packed_consts
.subreg_offset
< 32);
1547 bld
.MOV(dest
, packed_consts
);
1548 dest
= offset(dest
, bld
, 1);
1554 case nir_intrinsic_load_input_indirect
:
1555 has_indirect
= true;
1557 case nir_intrinsic_load_input
: {
1559 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1560 fs_reg src
= offset(retype(nir_inputs
, dest
.type
), bld
,
1561 instr
->const_index
[0] + index
);
1563 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[0]));
1567 dest
= offset(dest
, bld
, 1);
1572 /* Handle ARB_gpu_shader5 interpolation intrinsics
1574 * It's worth a quick word of explanation as to why we handle the full
1575 * variable-based interpolation intrinsic rather than a lowered version
1576 * with like we do for other inputs. We have to do that because the way
1577 * we set up inputs doesn't allow us to use the already setup inputs for
1578 * interpolation. At the beginning of the shader, we go through all of
1579 * the input variables and do the initial interpolation and put it in
1580 * the nir_inputs array based on its location as determined in
1581 * nir_lower_io. If the input isn't used, dead code cleans up and
1582 * everything works fine. However, when we get to the ARB_gpu_shader5
1583 * interpolation intrinsics, we need to reinterpolate the input
1584 * differently. If we used an intrinsic that just had an index it would
1585 * only give us the offset into the nir_inputs array. However, this is
1586 * useless because that value is post-interpolation and we need
1587 * pre-interpolation. In order to get the actual location of the bits
1588 * we get from the vertex fetching hardware, we need the variable.
1590 case nir_intrinsic_interp_var_at_centroid
:
1591 case nir_intrinsic_interp_var_at_sample
:
1592 case nir_intrinsic_interp_var_at_offset
: {
1593 assert(stage
== MESA_SHADER_FRAGMENT
);
1595 ((struct brw_wm_prog_data
*) prog_data
)->pulls_bary
= true;
1597 fs_reg dst_xy
= bld
.vgrf(BRW_REGISTER_TYPE_F
, 2);
1599 /* For most messages, we need one reg of ignored data; the hardware
1600 * requires mlen==1 even when there is no payload. in the per-slot
1601 * offset case, we'll replace this with the proper source data.
1603 fs_reg src
= vgrf(glsl_type::float_type
);
1604 int mlen
= 1; /* one reg unless overriden */
1607 switch (instr
->intrinsic
) {
1608 case nir_intrinsic_interp_var_at_centroid
:
1609 inst
= bld
.emit(FS_OPCODE_INTERPOLATE_AT_CENTROID
,
1610 dst_xy
, src
, fs_reg(0u));
1613 case nir_intrinsic_interp_var_at_sample
: {
1614 /* XXX: We should probably handle non-constant sample id's */
1615 nir_const_value
*const_sample
= nir_src_as_const_value(instr
->src
[0]);
1616 assert(const_sample
);
1617 unsigned msg_data
= const_sample
? const_sample
->i
[0] << 4 : 0;
1618 inst
= bld
.emit(FS_OPCODE_INTERPOLATE_AT_SAMPLE
, dst_xy
, src
,
1623 case nir_intrinsic_interp_var_at_offset
: {
1624 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[0]);
1627 unsigned off_x
= MIN2((int)(const_offset
->f
[0] * 16), 7) & 0xf;
1628 unsigned off_y
= MIN2((int)(const_offset
->f
[1] * 16), 7) & 0xf;
1630 inst
= bld
.emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET
, dst_xy
, src
,
1631 fs_reg(off_x
| (off_y
<< 4)));
1633 src
= vgrf(glsl_type::ivec2_type
);
1634 fs_reg offset_src
= retype(get_nir_src(instr
->src
[0]),
1635 BRW_REGISTER_TYPE_F
);
1636 for (int i
= 0; i
< 2; i
++) {
1637 fs_reg temp
= vgrf(glsl_type::float_type
);
1638 bld
.MUL(temp
, offset(offset_src
, bld
, i
), fs_reg(16.0f
));
1639 fs_reg itemp
= vgrf(glsl_type::int_type
);
1640 bld
.MOV(itemp
, temp
); /* float to int */
1642 /* Clamp the upper end of the range to +7/16.
1643 * ARB_gpu_shader5 requires that we support a maximum offset
1644 * of +0.5, which isn't representable in a S0.4 value -- if
1645 * we didn't clamp it, we'd end up with -8/16, which is the
1646 * opposite of what the shader author wanted.
1648 * This is legal due to ARB_gpu_shader5's quantization
1651 * "Not all values of <offset> may be supported; x and y
1652 * offsets may be rounded to fixed-point values with the
1653 * number of fraction bits given by the
1654 * implementation-dependent constant
1655 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
1657 set_condmod(BRW_CONDITIONAL_L
,
1658 bld
.SEL(offset(src
, bld
, i
), itemp
, fs_reg(7)));
1661 mlen
= 2 * dispatch_width
/ 8;
1662 inst
= bld
.emit(FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET
, dst_xy
, src
,
1669 unreachable("Invalid intrinsic");
1673 /* 2 floats per slot returned */
1674 inst
->regs_written
= 2 * dispatch_width
/ 8;
1675 inst
->pi_noperspective
= instr
->variables
[0]->var
->data
.interpolation
==
1676 INTERP_QUALIFIER_NOPERSPECTIVE
;
1678 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1679 fs_reg src
= interp_reg(instr
->variables
[0]->var
->data
.location
, j
);
1680 src
.type
= dest
.type
;
1682 bld
.emit(FS_OPCODE_LINTERP
, dest
, dst_xy
, src
);
1683 dest
= offset(dest
, bld
, 1);
1688 case nir_intrinsic_store_output_indirect
:
1689 has_indirect
= true;
1691 case nir_intrinsic_store_output
: {
1692 fs_reg src
= get_nir_src(instr
->src
[0]);
1694 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1695 fs_reg new_dest
= offset(retype(nir_outputs
, src
.type
), bld
,
1696 instr
->const_index
[0] + index
);
1698 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[1]));
1700 bld
.MOV(new_dest
, src
);
1701 src
= offset(src
, bld
, 1);
1706 case nir_intrinsic_barrier
:
1708 if (stage
== MESA_SHADER_COMPUTE
)
1709 ((struct brw_cs_prog_data
*) prog_data
)->uses_barrier
= true;
1713 unreachable("unknown intrinsic");
1718 fs_visitor::nir_emit_texture(const fs_builder
&bld
, nir_tex_instr
*instr
)
1720 unsigned sampler
= instr
->sampler_index
;
1721 fs_reg
sampler_reg(sampler
);
1723 /* FINISHME: We're failing to recompile our programs when the sampler is
1724 * updated. This only matters for the texture rectangle scale parameters
1725 * (pre-gen6, or gen6+ with GL_CLAMP).
1727 int texunit
= prog
->SamplerUnits
[sampler
];
1729 int gather_component
= instr
->component
;
1731 bool is_rect
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
;
1733 bool is_cube_array
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
1736 int lod_components
= 0;
1737 int UNUSED offset_components
= 0;
1739 fs_reg coordinate
, shadow_comparitor
, lod
, lod2
, sample_index
, mcs
, tex_offset
;
1741 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1742 fs_reg src
= get_nir_src(instr
->src
[i
].src
);
1743 switch (instr
->src
[i
].src_type
) {
1744 case nir_tex_src_bias
:
1745 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1747 case nir_tex_src_comparitor
:
1748 shadow_comparitor
= retype(src
, BRW_REGISTER_TYPE_F
);
1750 case nir_tex_src_coord
:
1751 switch (instr
->op
) {
1753 case nir_texop_txf_ms
:
1754 coordinate
= retype(src
, BRW_REGISTER_TYPE_D
);
1757 coordinate
= retype(src
, BRW_REGISTER_TYPE_F
);
1761 case nir_tex_src_ddx
:
1762 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1763 lod_components
= nir_tex_instr_src_size(instr
, i
);
1765 case nir_tex_src_ddy
:
1766 lod2
= retype(src
, BRW_REGISTER_TYPE_F
);
1768 case nir_tex_src_lod
:
1769 switch (instr
->op
) {
1771 lod
= retype(src
, BRW_REGISTER_TYPE_UD
);
1774 lod
= retype(src
, BRW_REGISTER_TYPE_D
);
1777 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1781 case nir_tex_src_ms_index
:
1782 sample_index
= retype(src
, BRW_REGISTER_TYPE_UD
);
1784 case nir_tex_src_offset
:
1785 tex_offset
= retype(src
, BRW_REGISTER_TYPE_D
);
1786 if (instr
->is_array
)
1787 offset_components
= instr
->coord_components
- 1;
1789 offset_components
= instr
->coord_components
;
1791 case nir_tex_src_projector
:
1792 unreachable("should be lowered");
1794 case nir_tex_src_sampler_offset
: {
1795 /* Figure out the highest possible sampler index and mark it as used */
1796 uint32_t max_used
= sampler
+ instr
->sampler_array_size
- 1;
1797 if (instr
->op
== nir_texop_tg4
&& devinfo
->gen
< 8) {
1798 max_used
+= stage_prog_data
->binding_table
.gather_texture_start
;
1800 max_used
+= stage_prog_data
->binding_table
.texture_start
;
1802 brw_mark_surface_used(prog_data
, max_used
);
1804 /* Emit code to evaluate the actual indexing expression */
1805 sampler_reg
= vgrf(glsl_type::uint_type
);
1806 bld
.ADD(sampler_reg
, src
, fs_reg(sampler
));
1807 sampler_reg
= bld
.emit_uniformize(sampler_reg
);
1812 unreachable("unknown texture source");
1816 if (instr
->op
== nir_texop_txf_ms
) {
1817 if (devinfo
->gen
>= 7 &&
1818 key_tex
->compressed_multisample_layout_mask
& (1 << sampler
)) {
1819 mcs
= emit_mcs_fetch(coordinate
, instr
->coord_components
, sampler_reg
);
1825 for (unsigned i
= 0; i
< 3; i
++) {
1826 if (instr
->const_offset
[i
] != 0) {
1827 assert(offset_components
== 0);
1828 tex_offset
= fs_reg(brw_texture_offset(instr
->const_offset
, 3));
1833 enum glsl_base_type dest_base_type
=
1834 brw_glsl_base_type_for_nir_type (instr
->dest_type
);
1836 const glsl_type
*dest_type
=
1837 glsl_type::get_instance(dest_base_type
, nir_tex_instr_dest_size(instr
),
1840 ir_texture_opcode op
;
1841 switch (instr
->op
) {
1842 case nir_texop_lod
: op
= ir_lod
; break;
1843 case nir_texop_query_levels
: op
= ir_query_levels
; break;
1844 case nir_texop_tex
: op
= ir_tex
; break;
1845 case nir_texop_tg4
: op
= ir_tg4
; break;
1846 case nir_texop_txb
: op
= ir_txb
; break;
1847 case nir_texop_txd
: op
= ir_txd
; break;
1848 case nir_texop_txf
: op
= ir_txf
; break;
1849 case nir_texop_txf_ms
: op
= ir_txf_ms
; break;
1850 case nir_texop_txl
: op
= ir_txl
; break;
1851 case nir_texop_txs
: op
= ir_txs
; break;
1852 case nir_texop_texture_samples
: {
1853 fs_reg dst
= retype(get_nir_dest(instr
->dest
), BRW_REGISTER_TYPE_D
);
1854 fs_inst
*inst
= bld
.emit(SHADER_OPCODE_SAMPLEINFO
, dst
,
1855 bld
.vgrf(BRW_REGISTER_TYPE_D
, 1),
1858 inst
->header_size
= 1;
1859 inst
->base_mrf
= -1;
1863 unreachable("unknown texture opcode");
1866 emit_texture(op
, dest_type
, coordinate
, instr
->coord_components
,
1867 shadow_comparitor
, lod
, lod2
, lod_components
, sample_index
,
1868 tex_offset
, mcs
, gather_component
,
1869 is_cube_array
, is_rect
, sampler
, sampler_reg
, texunit
);
1871 fs_reg dest
= get_nir_dest(instr
->dest
);
1872 dest
.type
= this->result
.type
;
1873 unsigned num_components
= nir_tex_instr_dest_size(instr
);
1874 emit_percomp(bld
, fs_inst(BRW_OPCODE_MOV
, bld
.dispatch_width(),
1875 dest
, this->result
),
1876 (1 << num_components
) - 1);
1880 fs_visitor::nir_emit_jump(const fs_builder
&bld
, nir_jump_instr
*instr
)
1882 switch (instr
->type
) {
1883 case nir_jump_break
:
1884 bld
.emit(BRW_OPCODE_BREAK
);
1886 case nir_jump_continue
:
1887 bld
.emit(BRW_OPCODE_CONTINUE
);
1889 case nir_jump_return
:
1891 unreachable("unknown jump");