2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "glsl/ir_optimization.h"
26 #include "glsl/nir/glsl_to_nir.h"
27 #include "program/prog_to_nir.h"
34 fs_visitor::emit_nir_code()
36 nir_shader
*nir
= prog
->nir
;
38 /* emit the arrays used for inputs and outputs - load/store intrinsics will
39 * be converted to reads/writes of these arrays
42 if (nir
->num_inputs
> 0) {
43 nir_inputs
= bld
.vgrf(BRW_REGISTER_TYPE_F
, nir
->num_inputs
);
44 nir_setup_inputs(nir
);
47 if (nir
->num_outputs
> 0) {
48 nir_outputs
= bld
.vgrf(BRW_REGISTER_TYPE_F
, nir
->num_outputs
);
49 nir_setup_outputs(nir
);
52 if (nir
->num_uniforms
> 0) {
53 nir_setup_uniforms(nir
);
56 nir_emit_system_values(nir
);
58 nir_globals
= ralloc_array(mem_ctx
, fs_reg
, nir
->reg_alloc
);
59 foreach_list_typed(nir_register
, reg
, node
, &nir
->registers
) {
60 unsigned array_elems
=
61 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
62 unsigned size
= array_elems
* reg
->num_components
;
63 nir_globals
[reg
->index
] = bld
.vgrf(BRW_REGISTER_TYPE_F
, size
);
66 /* get the main function and emit it */
67 nir_foreach_overload(nir
, overload
) {
68 assert(strcmp(overload
->function
->name
, "main") == 0);
69 assert(overload
->impl
);
70 nir_emit_impl(overload
->impl
);
75 fs_visitor::nir_setup_inputs(nir_shader
*shader
)
77 foreach_list_typed(nir_variable
, var
, node
, &shader
->inputs
) {
78 enum brw_reg_type type
= brw_type_for_base_type(var
->type
);
79 fs_reg input
= offset(nir_inputs
, bld
, var
->data
.driver_location
);
83 case MESA_SHADER_VERTEX
: {
84 /* Our ATTR file is indexed by VERT_ATTRIB_*, which is the value
85 * stored in nir_variable::location.
87 * However, NIR's load_input intrinsics use a different index - an
88 * offset into a single contiguous array containing all inputs.
89 * This index corresponds to the nir_variable::driver_location field.
91 * So, we need to copy from fs_reg(ATTR, var->location) to
92 * offset(nir_inputs, var->data.driver_location).
94 const glsl_type
*const t
= var
->type
->without_array();
95 const unsigned components
= t
->components();
96 const unsigned cols
= t
->matrix_columns
;
97 const unsigned elts
= t
->vector_elements
;
98 unsigned array_length
= var
->type
->is_array() ? var
->type
->length
: 1;
99 for (unsigned i
= 0; i
< array_length
; i
++) {
100 for (unsigned j
= 0; j
< cols
; j
++) {
101 for (unsigned k
= 0; k
< elts
; k
++) {
102 bld
.MOV(offset(retype(input
, type
), bld
,
103 components
* i
+ elts
* j
+ k
),
104 offset(fs_reg(ATTR
, var
->data
.location
+ i
, type
),
111 case MESA_SHADER_GEOMETRY
:
112 case MESA_SHADER_COMPUTE
:
113 unreachable("fs_visitor not used for these stages yet.");
115 case MESA_SHADER_FRAGMENT
:
116 if (var
->data
.location
== VARYING_SLOT_POS
) {
117 reg
= *emit_fragcoord_interpolation(var
->data
.pixel_center_integer
,
118 var
->data
.origin_upper_left
);
119 emit_percomp(bld
, fs_inst(BRW_OPCODE_MOV
, bld
.dispatch_width(),
122 emit_general_interpolation(input
, var
->name
, var
->type
,
123 (glsl_interp_qualifier
) var
->data
.interpolation
,
124 var
->data
.location
, var
->data
.centroid
,
133 fs_visitor::nir_setup_outputs(nir_shader
*shader
)
135 brw_wm_prog_key
*key
= (brw_wm_prog_key
*) this->key
;
137 foreach_list_typed(nir_variable
, var
, node
, &shader
->outputs
) {
138 fs_reg reg
= offset(nir_outputs
, bld
, var
->data
.driver_location
);
140 int vector_elements
=
141 var
->type
->is_array() ? var
->type
->fields
.array
->vector_elements
142 : var
->type
->vector_elements
;
145 case MESA_SHADER_VERTEX
:
146 for (int i
= 0; i
< ALIGN(type_size(var
->type
), 4) / 4; i
++) {
147 int output
= var
->data
.location
+ i
;
148 this->outputs
[output
] = offset(reg
, bld
, 4 * i
);
149 this->output_components
[output
] = vector_elements
;
152 case MESA_SHADER_FRAGMENT
:
153 if (var
->data
.index
> 0) {
154 assert(var
->data
.location
== FRAG_RESULT_DATA0
);
155 assert(var
->data
.index
== 1);
156 this->dual_src_output
= reg
;
157 this->do_dual_src
= true;
158 } else if (var
->data
.location
== FRAG_RESULT_COLOR
) {
159 /* Writing gl_FragColor outputs to all color regions. */
160 for (unsigned int i
= 0; i
< MAX2(key
->nr_color_regions
, 1); i
++) {
161 this->outputs
[i
] = reg
;
162 this->output_components
[i
] = 4;
164 } else if (var
->data
.location
== FRAG_RESULT_DEPTH
) {
165 this->frag_depth
= reg
;
166 } else if (var
->data
.location
== FRAG_RESULT_SAMPLE_MASK
) {
167 this->sample_mask
= reg
;
169 /* gl_FragData or a user-defined FS output */
170 assert(var
->data
.location
>= FRAG_RESULT_DATA0
&&
171 var
->data
.location
< FRAG_RESULT_DATA0
+BRW_MAX_DRAW_BUFFERS
);
173 /* General color output. */
174 for (unsigned int i
= 0; i
< MAX2(1, var
->type
->length
); i
++) {
175 int output
= var
->data
.location
- FRAG_RESULT_DATA0
+ i
;
176 this->outputs
[output
] = offset(reg
, bld
, vector_elements
* i
);
177 this->output_components
[output
] = vector_elements
;
182 unreachable("unhandled shader stage");
188 fs_visitor::nir_setup_uniforms(nir_shader
*shader
)
190 uniforms
= shader
->num_uniforms
;
191 num_direct_uniforms
= shader
->num_direct_uniforms
;
193 /* We split the uniform register file in half. The first half is
194 * entirely direct uniforms. The second half is indirect.
196 param_size
[0] = num_direct_uniforms
;
197 if (shader
->num_uniforms
> num_direct_uniforms
)
198 param_size
[num_direct_uniforms
] = shader
->num_uniforms
- num_direct_uniforms
;
200 if (dispatch_width
!= 8)
204 foreach_list_typed(nir_variable
, var
, node
, &shader
->uniforms
) {
205 /* UBO's and atomics don't take up space in the uniform file */
206 if (var
->interface_type
!= NULL
|| var
->type
->contains_atomic())
209 if (strncmp(var
->name
, "gl_", 3) == 0)
210 nir_setup_builtin_uniform(var
);
212 nir_setup_uniform(var
);
215 /* prog_to_nir doesn't create uniform variables; set param up directly. */
216 for (unsigned p
= 0; p
< prog
->Parameters
->NumParameters
; p
++) {
217 for (unsigned int i
= 0; i
< 4; i
++) {
218 stage_prog_data
->param
[4 * p
+ i
] =
219 &prog
->Parameters
->ParameterValues
[p
][i
];
226 fs_visitor::nir_setup_uniform(nir_variable
*var
)
228 int namelen
= strlen(var
->name
);
230 /* The data for our (non-builtin) uniforms is stored in a series of
231 * gl_uniform_driver_storage structs for each subcomponent that
232 * glGetUniformLocation() could name. We know it's been set up in the
233 * same order we'd walk the type, so walk the list of storage and find
234 * anything with our name, or the prefix of a component that starts with
237 unsigned index
= var
->data
.driver_location
;
238 for (unsigned u
= 0; u
< shader_prog
->NumUniformStorage
; u
++) {
239 struct gl_uniform_storage
*storage
= &shader_prog
->UniformStorage
[u
];
241 if (storage
->builtin
)
244 if (strncmp(var
->name
, storage
->name
, namelen
) != 0 ||
245 (storage
->name
[namelen
] != 0 &&
246 storage
->name
[namelen
] != '.' &&
247 storage
->name
[namelen
] != '[')) {
251 unsigned slots
= storage
->type
->component_slots();
252 if (storage
->array_elements
)
253 slots
*= storage
->array_elements
;
255 for (unsigned i
= 0; i
< slots
; i
++) {
256 stage_prog_data
->param
[index
++] = &storage
->storage
[i
];
260 /* Make sure we actually initialized the right amount of stuff here. */
261 assert(var
->data
.driver_location
+ var
->type
->component_slots() == index
);
265 fs_visitor::nir_setup_builtin_uniform(nir_variable
*var
)
267 const nir_state_slot
*const slots
= var
->state_slots
;
268 assert(var
->state_slots
!= NULL
);
270 unsigned uniform_index
= var
->data
.driver_location
;
271 for (unsigned int i
= 0; i
< var
->num_state_slots
; i
++) {
272 /* This state reference has already been setup by ir_to_mesa, but we'll
273 * get the same index back here.
275 int index
= _mesa_add_state_reference(this->prog
->Parameters
,
276 (gl_state_index
*)slots
[i
].tokens
);
278 /* Add each of the unique swizzles of the element as a parameter.
279 * This'll end up matching the expected layout of the
280 * array/matrix/structure we're trying to fill in.
283 for (unsigned int j
= 0; j
< 4; j
++) {
284 int swiz
= GET_SWZ(slots
[i
].swizzle
, j
);
285 if (swiz
== last_swiz
)
289 stage_prog_data
->param
[uniform_index
++] =
290 &prog
->Parameters
->ParameterValues
[index
][swiz
];
296 emit_system_values_block(nir_block
*block
, void *void_visitor
)
298 fs_visitor
*v
= (fs_visitor
*)void_visitor
;
301 nir_foreach_instr(block
, instr
) {
302 if (instr
->type
!= nir_instr_type_intrinsic
)
305 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
306 switch (intrin
->intrinsic
) {
307 case nir_intrinsic_load_vertex_id
:
308 unreachable("should be lowered by lower_vertex_id().");
310 case nir_intrinsic_load_vertex_id_zero_base
:
311 assert(v
->stage
== MESA_SHADER_VERTEX
);
312 reg
= &v
->nir_system_values
[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
];
313 if (reg
->file
== BAD_FILE
)
314 *reg
= *v
->emit_vs_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
);
317 case nir_intrinsic_load_base_vertex
:
318 assert(v
->stage
== MESA_SHADER_VERTEX
);
319 reg
= &v
->nir_system_values
[SYSTEM_VALUE_BASE_VERTEX
];
320 if (reg
->file
== BAD_FILE
)
321 *reg
= *v
->emit_vs_system_value(SYSTEM_VALUE_BASE_VERTEX
);
324 case nir_intrinsic_load_instance_id
:
325 assert(v
->stage
== MESA_SHADER_VERTEX
);
326 reg
= &v
->nir_system_values
[SYSTEM_VALUE_INSTANCE_ID
];
327 if (reg
->file
== BAD_FILE
)
328 *reg
= *v
->emit_vs_system_value(SYSTEM_VALUE_INSTANCE_ID
);
331 case nir_intrinsic_load_sample_pos
:
332 assert(v
->stage
== MESA_SHADER_FRAGMENT
);
333 reg
= &v
->nir_system_values
[SYSTEM_VALUE_SAMPLE_POS
];
334 if (reg
->file
== BAD_FILE
)
335 *reg
= *v
->emit_samplepos_setup();
338 case nir_intrinsic_load_sample_id
:
339 assert(v
->stage
== MESA_SHADER_FRAGMENT
);
340 reg
= &v
->nir_system_values
[SYSTEM_VALUE_SAMPLE_ID
];
341 if (reg
->file
== BAD_FILE
)
342 *reg
= *v
->emit_sampleid_setup();
345 case nir_intrinsic_load_sample_mask_in
:
346 assert(v
->stage
== MESA_SHADER_FRAGMENT
);
347 assert(v
->devinfo
->gen
>= 7);
348 reg
= &v
->nir_system_values
[SYSTEM_VALUE_SAMPLE_MASK_IN
];
349 if (reg
->file
== BAD_FILE
)
350 *reg
= fs_reg(retype(brw_vec8_grf(v
->payload
.sample_mask_in_reg
, 0),
351 BRW_REGISTER_TYPE_D
));
363 fs_visitor::nir_emit_system_values(nir_shader
*shader
)
365 nir_system_values
= ralloc_array(mem_ctx
, fs_reg
, SYSTEM_VALUE_MAX
);
366 nir_foreach_overload(shader
, overload
) {
367 assert(strcmp(overload
->function
->name
, "main") == 0);
368 assert(overload
->impl
);
369 nir_foreach_block(overload
->impl
, emit_system_values_block
, this);
374 fs_visitor::nir_emit_impl(nir_function_impl
*impl
)
376 nir_locals
= reralloc(mem_ctx
, nir_locals
, fs_reg
, impl
->reg_alloc
);
377 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
378 unsigned array_elems
=
379 reg
->num_array_elems
== 0 ? 1 : reg
->num_array_elems
;
380 unsigned size
= array_elems
* reg
->num_components
;
381 nir_locals
[reg
->index
] = bld
.vgrf(BRW_REGISTER_TYPE_F
, size
);
384 nir_ssa_values
= reralloc(mem_ctx
, nir_ssa_values
, fs_reg
,
387 nir_emit_cf_list(&impl
->body
);
391 fs_visitor::nir_emit_cf_list(exec_list
*list
)
393 exec_list_validate(list
);
394 foreach_list_typed(nir_cf_node
, node
, node
, list
) {
395 switch (node
->type
) {
397 nir_emit_if(nir_cf_node_as_if(node
));
400 case nir_cf_node_loop
:
401 nir_emit_loop(nir_cf_node_as_loop(node
));
404 case nir_cf_node_block
:
405 nir_emit_block(nir_cf_node_as_block(node
));
409 unreachable("Invalid CFG node block");
415 fs_visitor::nir_emit_if(nir_if
*if_stmt
)
417 /* first, put the condition into f0 */
418 fs_inst
*inst
= bld
.MOV(bld
.null_reg_d(),
419 retype(get_nir_src(if_stmt
->condition
),
420 BRW_REGISTER_TYPE_D
));
421 inst
->conditional_mod
= BRW_CONDITIONAL_NZ
;
423 bld
.IF(BRW_PREDICATE_NORMAL
);
425 nir_emit_cf_list(&if_stmt
->then_list
);
427 /* note: if the else is empty, dead CF elimination will remove it */
428 bld
.emit(BRW_OPCODE_ELSE
);
430 nir_emit_cf_list(&if_stmt
->else_list
);
432 bld
.emit(BRW_OPCODE_ENDIF
);
434 try_replace_with_sel();
438 fs_visitor::nir_emit_loop(nir_loop
*loop
)
440 bld
.emit(BRW_OPCODE_DO
);
442 nir_emit_cf_list(&loop
->body
);
444 bld
.emit(BRW_OPCODE_WHILE
);
448 fs_visitor::nir_emit_block(nir_block
*block
)
450 nir_foreach_instr(block
, instr
) {
451 nir_emit_instr(instr
);
456 fs_visitor::nir_emit_instr(nir_instr
*instr
)
458 const fs_builder abld
= bld
.annotate(NULL
, instr
);
460 switch (instr
->type
) {
461 case nir_instr_type_alu
:
462 nir_emit_alu(abld
, nir_instr_as_alu(instr
));
465 case nir_instr_type_intrinsic
:
466 nir_emit_intrinsic(abld
, nir_instr_as_intrinsic(instr
));
469 case nir_instr_type_tex
:
470 nir_emit_texture(abld
, nir_instr_as_tex(instr
));
473 case nir_instr_type_load_const
:
474 nir_emit_load_const(abld
, nir_instr_as_load_const(instr
));
477 case nir_instr_type_ssa_undef
:
478 nir_emit_undef(abld
, nir_instr_as_ssa_undef(instr
));
481 case nir_instr_type_jump
:
482 nir_emit_jump(abld
, nir_instr_as_jump(instr
));
486 unreachable("unknown instruction type");
491 brw_type_for_nir_type(nir_alu_type type
)
494 case nir_type_unsigned
:
495 return BRW_REGISTER_TYPE_UD
;
498 return BRW_REGISTER_TYPE_D
;
500 return BRW_REGISTER_TYPE_F
;
502 unreachable("unknown type");
505 return BRW_REGISTER_TYPE_F
;
509 fs_visitor::optimize_frontfacing_ternary(nir_alu_instr
*instr
,
510 const fs_reg
&result
)
512 if (!instr
->src
[0].src
.is_ssa
||
513 instr
->src
[0].src
.ssa
->parent_instr
->type
!= nir_instr_type_intrinsic
)
516 nir_intrinsic_instr
*src0
=
517 nir_instr_as_intrinsic(instr
->src
[0].src
.ssa
->parent_instr
);
519 if (src0
->intrinsic
!= nir_intrinsic_load_front_face
)
522 nir_const_value
*value1
= nir_src_as_const_value(instr
->src
[1].src
);
523 if (!value1
|| fabsf(value1
->f
[0]) != 1.0f
)
526 nir_const_value
*value2
= nir_src_as_const_value(instr
->src
[2].src
);
527 if (!value2
|| fabsf(value2
->f
[0]) != 1.0f
)
530 fs_reg tmp
= vgrf(glsl_type::int_type
);
532 if (devinfo
->gen
>= 6) {
533 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
534 fs_reg g0
= fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W
));
536 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
538 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
539 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
541 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
543 * This negation looks like it's safe in practice, because bits 0:4 will
544 * surely be TRIANGLES
547 if (value1
->f
[0] == -1.0f
) {
551 tmp
.type
= BRW_REGISTER_TYPE_W
;
552 tmp
.subreg_offset
= 2;
555 fs_inst
*or_inst
= bld
.OR(tmp
, g0
, fs_reg(0x3f80));
556 or_inst
->src
[1].type
= BRW_REGISTER_TYPE_UW
;
558 tmp
.type
= BRW_REGISTER_TYPE_D
;
559 tmp
.subreg_offset
= 0;
562 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
563 fs_reg g1_6
= fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D
));
565 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
567 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
568 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
570 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
572 * This negation looks like it's safe in practice, because bits 0:4 will
573 * surely be TRIANGLES
576 if (value1
->f
[0] == -1.0f
) {
580 bld
.OR(tmp
, g1_6
, fs_reg(0x3f800000));
582 bld
.AND(retype(result
, BRW_REGISTER_TYPE_D
), tmp
, fs_reg(0xbf800000));
588 fs_visitor::nir_emit_alu(const fs_builder
&bld
, nir_alu_instr
*instr
)
590 struct brw_wm_prog_key
*fs_key
= (struct brw_wm_prog_key
*) this->key
;
593 fs_reg result
= get_nir_dest(instr
->dest
.dest
);
594 result
.type
= brw_type_for_nir_type(nir_op_infos
[instr
->op
].output_type
);
597 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
598 op
[i
] = get_nir_src(instr
->src
[i
].src
);
599 op
[i
].type
= brw_type_for_nir_type(nir_op_infos
[instr
->op
].input_types
[i
]);
600 op
[i
].abs
= instr
->src
[i
].abs
;
601 op
[i
].negate
= instr
->src
[i
].negate
;
604 /* We get a bunch of mov's out of the from_ssa pass and they may still
605 * be vectorized. We'll handle them as a special-case. We'll also
606 * handle vecN here because it's basically the same thing.
614 fs_reg temp
= result
;
615 bool need_extra_copy
= false;
616 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
617 if (!instr
->src
[i
].src
.is_ssa
&&
618 instr
->dest
.dest
.reg
.reg
== instr
->src
[i
].src
.reg
.reg
) {
619 need_extra_copy
= true;
620 temp
= bld
.vgrf(result
.type
, 4);
625 for (unsigned i
= 0; i
< 4; i
++) {
626 if (!(instr
->dest
.write_mask
& (1 << i
)))
629 if (instr
->op
== nir_op_imov
|| instr
->op
== nir_op_fmov
) {
630 inst
= bld
.MOV(offset(temp
, bld
, i
),
631 offset(op
[0], bld
, instr
->src
[0].swizzle
[i
]));
633 inst
= bld
.MOV(offset(temp
, bld
, i
),
634 offset(op
[i
], bld
, instr
->src
[i
].swizzle
[0]));
636 inst
->saturate
= instr
->dest
.saturate
;
639 /* In this case the source and destination registers were the same,
640 * so we need to insert an extra set of moves in order to deal with
643 if (need_extra_copy
) {
644 for (unsigned i
= 0; i
< 4; i
++) {
645 if (!(instr
->dest
.write_mask
& (1 << i
)))
648 bld
.MOV(offset(result
, bld
, i
), offset(temp
, bld
, i
));
657 /* At this point, we have dealt with any instruction that operates on
658 * more than a single channel. Therefore, we can just adjust the source
659 * and destination registers for that channel and emit the instruction.
661 unsigned channel
= 0;
662 if (nir_op_infos
[instr
->op
].output_size
== 0) {
663 /* Since NIR is doing the scalarizing for us, we should only ever see
664 * vectorized operations with a single channel.
666 assert(_mesa_bitcount(instr
->dest
.write_mask
) == 1);
667 channel
= ffs(instr
->dest
.write_mask
) - 1;
669 result
= offset(result
, bld
, channel
);
672 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++) {
673 assert(nir_op_infos
[instr
->op
].input_sizes
[i
] < 2);
674 op
[i
] = offset(op
[i
], bld
, instr
->src
[i
].swizzle
[channel
]);
680 inst
= bld
.MOV(result
, op
[0]);
681 inst
->saturate
= instr
->dest
.saturate
;
686 bld
.MOV(result
, op
[0]);
690 /* AND(val, 0x80000000) gives the sign bit.
692 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
695 bld
.CMP(bld
.null_reg_f(), op
[0], fs_reg(0.0f
), BRW_CONDITIONAL_NZ
);
697 fs_reg result_int
= retype(result
, BRW_REGISTER_TYPE_UD
);
698 op
[0].type
= BRW_REGISTER_TYPE_UD
;
699 result
.type
= BRW_REGISTER_TYPE_UD
;
700 bld
.AND(result_int
, op
[0], fs_reg(0x80000000u
));
702 inst
= bld
.OR(result_int
, result_int
, fs_reg(0x3f800000u
));
703 inst
->predicate
= BRW_PREDICATE_NORMAL
;
704 if (instr
->dest
.saturate
) {
705 inst
= bld
.MOV(result
, result
);
706 inst
->saturate
= true;
712 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
713 * -> non-negative val generates 0x00000000.
714 * Predicated OR sets 1 if val is positive.
716 bld
.CMP(bld
.null_reg_d(), op
[0], fs_reg(0), BRW_CONDITIONAL_G
);
717 bld
.ASR(result
, op
[0], fs_reg(31));
718 inst
= bld
.OR(result
, result
, fs_reg(1));
719 inst
->predicate
= BRW_PREDICATE_NORMAL
;
723 inst
= bld
.emit(SHADER_OPCODE_RCP
, result
, op
[0]);
724 inst
->saturate
= instr
->dest
.saturate
;
728 inst
= bld
.emit(SHADER_OPCODE_EXP2
, result
, op
[0]);
729 inst
->saturate
= instr
->dest
.saturate
;
733 inst
= bld
.emit(SHADER_OPCODE_LOG2
, result
, op
[0]);
734 inst
->saturate
= instr
->dest
.saturate
;
738 inst
= bld
.emit(SHADER_OPCODE_SIN
, result
, op
[0]);
739 inst
->saturate
= instr
->dest
.saturate
;
743 inst
= bld
.emit(SHADER_OPCODE_COS
, result
, op
[0]);
744 inst
->saturate
= instr
->dest
.saturate
;
748 if (fs_key
->high_quality_derivatives
) {
749 inst
= bld
.emit(FS_OPCODE_DDX_FINE
, result
, op
[0]);
751 inst
= bld
.emit(FS_OPCODE_DDX_COARSE
, result
, op
[0]);
753 inst
->saturate
= instr
->dest
.saturate
;
755 case nir_op_fddx_fine
:
756 inst
= bld
.emit(FS_OPCODE_DDX_FINE
, result
, op
[0]);
757 inst
->saturate
= instr
->dest
.saturate
;
759 case nir_op_fddx_coarse
:
760 inst
= bld
.emit(FS_OPCODE_DDX_COARSE
, result
, op
[0]);
761 inst
->saturate
= instr
->dest
.saturate
;
764 if (fs_key
->high_quality_derivatives
) {
765 inst
= bld
.emit(FS_OPCODE_DDY_FINE
, result
, op
[0],
766 fs_reg(fs_key
->render_to_fbo
));
768 inst
= bld
.emit(FS_OPCODE_DDY_COARSE
, result
, op
[0],
769 fs_reg(fs_key
->render_to_fbo
));
771 inst
->saturate
= instr
->dest
.saturate
;
773 case nir_op_fddy_fine
:
774 inst
= bld
.emit(FS_OPCODE_DDY_FINE
, result
, op
[0],
775 fs_reg(fs_key
->render_to_fbo
));
776 inst
->saturate
= instr
->dest
.saturate
;
778 case nir_op_fddy_coarse
:
779 inst
= bld
.emit(FS_OPCODE_DDY_COARSE
, result
, op
[0],
780 fs_reg(fs_key
->render_to_fbo
));
781 inst
->saturate
= instr
->dest
.saturate
;
786 inst
= bld
.ADD(result
, op
[0], op
[1]);
787 inst
->saturate
= instr
->dest
.saturate
;
791 inst
= bld
.MUL(result
, op
[0], op
[1]);
792 inst
->saturate
= instr
->dest
.saturate
;
796 bld
.MUL(result
, op
[0], op
[1]);
799 case nir_op_imul_high
:
800 case nir_op_umul_high
: {
801 if (devinfo
->gen
>= 7)
802 no16("SIMD16 explicit accumulator operands unsupported\n");
804 struct brw_reg acc
= retype(brw_acc_reg(dispatch_width
), result
.type
);
806 fs_inst
*mul
= bld
.MUL(acc
, op
[0], op
[1]);
807 bld
.MACH(result
, op
[0], op
[1]);
809 /* Until Gen8, integer multiplies read 32-bits from one source, and
810 * 16-bits from the other, and relying on the MACH instruction to
811 * generate the high bits of the result.
813 * On Gen8, the multiply instruction does a full 32x32-bit multiply,
814 * but in order to do a 64x64-bit multiply we have to simulate the
815 * previous behavior and then use a MACH instruction.
817 * FINISHME: Don't use source modifiers on src1.
819 if (devinfo
->gen
>= 8) {
820 assert(mul
->src
[1].type
== BRW_REGISTER_TYPE_D
||
821 mul
->src
[1].type
== BRW_REGISTER_TYPE_UD
);
822 if (mul
->src
[1].type
== BRW_REGISTER_TYPE_D
) {
823 mul
->src
[1].type
= BRW_REGISTER_TYPE_W
;
824 mul
->src
[1].stride
= 2;
826 mul
->src
[1].type
= BRW_REGISTER_TYPE_UW
;
827 mul
->src
[1].stride
= 2;
835 bld
.emit(SHADER_OPCODE_INT_QUOTIENT
, result
, op
[0], op
[1]);
838 case nir_op_uadd_carry
:
839 unreachable("Should have been lowered by carry_to_arith().");
841 case nir_op_usub_borrow
:
842 unreachable("Should have been lowered by borrow_to_arith().");
845 bld
.emit(SHADER_OPCODE_INT_REMAINDER
, result
, op
[0], op
[1]);
851 bld
.CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_L
);
857 bld
.CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_GE
);
862 bld
.CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_Z
);
867 bld
.CMP(result
, op
[0], op
[1], BRW_CONDITIONAL_NZ
);
871 if (devinfo
->gen
>= 8) {
872 resolve_source_modifiers(&op
[0]);
874 bld
.NOT(result
, op
[0]);
877 if (devinfo
->gen
>= 8) {
878 resolve_source_modifiers(&op
[0]);
879 resolve_source_modifiers(&op
[1]);
881 bld
.XOR(result
, op
[0], op
[1]);
884 if (devinfo
->gen
>= 8) {
885 resolve_source_modifiers(&op
[0]);
886 resolve_source_modifiers(&op
[1]);
888 bld
.OR(result
, op
[0], op
[1]);
891 if (devinfo
->gen
>= 8) {
892 resolve_source_modifiers(&op
[0]);
893 resolve_source_modifiers(&op
[1]);
895 bld
.AND(result
, op
[0], op
[1]);
907 case nir_op_ball_fequal2
:
908 case nir_op_ball_iequal2
:
909 case nir_op_ball_fequal3
:
910 case nir_op_ball_iequal3
:
911 case nir_op_ball_fequal4
:
912 case nir_op_ball_iequal4
:
913 case nir_op_bany_fnequal2
:
914 case nir_op_bany_inequal2
:
915 case nir_op_bany_fnequal3
:
916 case nir_op_bany_inequal3
:
917 case nir_op_bany_fnequal4
:
918 case nir_op_bany_inequal4
:
919 unreachable("Lowered by nir_lower_alu_reductions");
921 case nir_op_fnoise1_1
:
922 case nir_op_fnoise1_2
:
923 case nir_op_fnoise1_3
:
924 case nir_op_fnoise1_4
:
925 case nir_op_fnoise2_1
:
926 case nir_op_fnoise2_2
:
927 case nir_op_fnoise2_3
:
928 case nir_op_fnoise2_4
:
929 case nir_op_fnoise3_1
:
930 case nir_op_fnoise3_2
:
931 case nir_op_fnoise3_3
:
932 case nir_op_fnoise3_4
:
933 case nir_op_fnoise4_1
:
934 case nir_op_fnoise4_2
:
935 case nir_op_fnoise4_3
:
936 case nir_op_fnoise4_4
:
937 unreachable("not reached: should be handled by lower_noise");
940 unreachable("not reached: should be handled by ldexp_to_arith()");
943 inst
= bld
.emit(SHADER_OPCODE_SQRT
, result
, op
[0]);
944 inst
->saturate
= instr
->dest
.saturate
;
948 inst
= bld
.emit(SHADER_OPCODE_RSQ
, result
, op
[0]);
949 inst
->saturate
= instr
->dest
.saturate
;
954 bld
.MOV(result
, negate(op
[0]));
958 bld
.CMP(result
, op
[0], fs_reg(0.0f
), BRW_CONDITIONAL_NZ
);
961 bld
.CMP(result
, op
[0], fs_reg(0), BRW_CONDITIONAL_NZ
);
965 inst
= bld
.RNDZ(result
, op
[0]);
966 inst
->saturate
= instr
->dest
.saturate
;
970 op
[0].negate
= !op
[0].negate
;
971 fs_reg temp
= vgrf(glsl_type::float_type
);
972 bld
.RNDD(temp
, op
[0]);
974 inst
= bld
.MOV(result
, temp
);
975 inst
->saturate
= instr
->dest
.saturate
;
979 inst
= bld
.RNDD(result
, op
[0]);
980 inst
->saturate
= instr
->dest
.saturate
;
983 inst
= bld
.FRC(result
, op
[0]);
984 inst
->saturate
= instr
->dest
.saturate
;
986 case nir_op_fround_even
:
987 inst
= bld
.RNDE(result
, op
[0]);
988 inst
->saturate
= instr
->dest
.saturate
;
994 if (devinfo
->gen
>= 6) {
995 inst
= bld
.emit(BRW_OPCODE_SEL
, result
, op
[0], op
[1]);
996 inst
->conditional_mod
= BRW_CONDITIONAL_L
;
998 bld
.CMP(bld
.null_reg_d(), op
[0], op
[1], BRW_CONDITIONAL_L
);
999 inst
= bld
.SEL(result
, op
[0], op
[1]);
1000 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1002 inst
->saturate
= instr
->dest
.saturate
;
1008 if (devinfo
->gen
>= 6) {
1009 inst
= bld
.emit(BRW_OPCODE_SEL
, result
, op
[0], op
[1]);
1010 inst
->conditional_mod
= BRW_CONDITIONAL_GE
;
1012 bld
.CMP(bld
.null_reg_d(), op
[0], op
[1], BRW_CONDITIONAL_GE
);
1013 inst
= bld
.SEL(result
, op
[0], op
[1]);
1014 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1016 inst
->saturate
= instr
->dest
.saturate
;
1019 case nir_op_pack_snorm_2x16
:
1020 case nir_op_pack_snorm_4x8
:
1021 case nir_op_pack_unorm_2x16
:
1022 case nir_op_pack_unorm_4x8
:
1023 case nir_op_unpack_snorm_2x16
:
1024 case nir_op_unpack_snorm_4x8
:
1025 case nir_op_unpack_unorm_2x16
:
1026 case nir_op_unpack_unorm_4x8
:
1027 case nir_op_unpack_half_2x16
:
1028 case nir_op_pack_half_2x16
:
1029 unreachable("not reached: should be handled by lower_packing_builtins");
1031 case nir_op_unpack_half_2x16_split_x
:
1032 inst
= bld
.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X
, result
, op
[0]);
1033 inst
->saturate
= instr
->dest
.saturate
;
1035 case nir_op_unpack_half_2x16_split_y
:
1036 inst
= bld
.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y
, result
, op
[0]);
1037 inst
->saturate
= instr
->dest
.saturate
;
1041 inst
= bld
.emit(SHADER_OPCODE_POW
, result
, op
[0], op
[1]);
1042 inst
->saturate
= instr
->dest
.saturate
;
1045 case nir_op_bitfield_reverse
:
1046 bld
.BFREV(result
, op
[0]);
1049 case nir_op_bit_count
:
1050 bld
.CBIT(result
, op
[0]);
1053 case nir_op_ufind_msb
:
1054 case nir_op_ifind_msb
: {
1055 bld
.FBH(retype(result
, BRW_REGISTER_TYPE_UD
), op
[0]);
1057 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1058 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1059 * subtract the result from 31 to convert the MSB count into an LSB count.
1062 bld
.CMP(bld
.null_reg_d(), result
, fs_reg(-1), BRW_CONDITIONAL_NZ
);
1063 fs_reg
neg_result(result
);
1064 neg_result
.negate
= true;
1065 inst
= bld
.ADD(result
, neg_result
, fs_reg(31));
1066 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1070 case nir_op_find_lsb
:
1071 bld
.FBL(result
, op
[0]);
1074 case nir_op_ubitfield_extract
:
1075 case nir_op_ibitfield_extract
:
1076 bld
.BFE(result
, op
[2], op
[1], op
[0]);
1079 bld
.BFI1(result
, op
[0], op
[1]);
1082 bld
.BFI2(result
, op
[0], op
[1], op
[2]);
1085 case nir_op_bitfield_insert
:
1086 unreachable("not reached: should be handled by "
1087 "lower_instructions::bitfield_insert_to_bfm_bfi");
1090 bld
.SHL(result
, op
[0], op
[1]);
1093 bld
.ASR(result
, op
[0], op
[1]);
1096 bld
.SHR(result
, op
[0], op
[1]);
1099 case nir_op_pack_half_2x16_split
:
1100 bld
.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT
, result
, op
[0], op
[1]);
1104 inst
= bld
.MAD(result
, op
[2], op
[1], op
[0]);
1105 inst
->saturate
= instr
->dest
.saturate
;
1109 inst
= bld
.LRP(result
, op
[0], op
[1], op
[2]);
1110 inst
->saturate
= instr
->dest
.saturate
;
1114 if (optimize_frontfacing_ternary(instr
, result
))
1117 bld
.CMP(bld
.null_reg_d(), op
[0], fs_reg(0), BRW_CONDITIONAL_NZ
);
1118 inst
= bld
.SEL(result
, op
[1], op
[2]);
1119 inst
->predicate
= BRW_PREDICATE_NORMAL
;
1123 unreachable("unhandled instruction");
1126 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1127 * to sign extend the low bit to 0/~0
1129 if (devinfo
->gen
<= 5 &&
1130 (instr
->instr
.pass_flags
& BRW_NIR_BOOLEAN_MASK
) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE
) {
1131 fs_reg masked
= vgrf(glsl_type::int_type
);
1132 bld
.AND(masked
, result
, fs_reg(1));
1133 masked
.negate
= true;
1134 bld
.MOV(retype(result
, BRW_REGISTER_TYPE_D
), masked
);
1139 fs_visitor::nir_emit_load_const(const fs_builder
&bld
,
1140 nir_load_const_instr
*instr
)
1142 fs_reg reg
= bld
.vgrf(BRW_REGISTER_TYPE_D
, instr
->def
.num_components
);
1144 for (unsigned i
= 0; i
< instr
->def
.num_components
; i
++)
1145 bld
.MOV(offset(reg
, bld
, i
), fs_reg(instr
->value
.i
[i
]));
1147 nir_ssa_values
[instr
->def
.index
] = reg
;
1151 fs_visitor::nir_emit_undef(const fs_builder
&bld
, nir_ssa_undef_instr
*instr
)
1153 nir_ssa_values
[instr
->def
.index
] = bld
.vgrf(BRW_REGISTER_TYPE_D
,
1154 instr
->def
.num_components
);
1158 fs_reg_for_nir_reg(fs_visitor
*v
, nir_register
*nir_reg
,
1159 unsigned base_offset
, nir_src
*indirect
)
1162 if (nir_reg
->is_global
)
1163 reg
= v
->nir_globals
[nir_reg
->index
];
1165 reg
= v
->nir_locals
[nir_reg
->index
];
1167 reg
= offset(reg
, v
->bld
, base_offset
* nir_reg
->num_components
);
1169 int multiplier
= nir_reg
->num_components
* (v
->dispatch_width
/ 8);
1171 reg
.reladdr
= new(v
->mem_ctx
) fs_reg(v
->vgrf(glsl_type::int_type
));
1172 v
->bld
.MUL(*reg
.reladdr
, v
->get_nir_src(*indirect
),
1173 fs_reg(multiplier
));
1180 fs_visitor::get_nir_src(nir_src src
)
1184 reg
= nir_ssa_values
[src
.ssa
->index
];
1186 reg
= fs_reg_for_nir_reg(this, src
.reg
.reg
, src
.reg
.base_offset
,
1190 /* to avoid floating-point denorm flushing problems, set the type by
1191 * default to D - instructions that need floating point semantics will set
1192 * this to F if they need to
1194 return retype(reg
, BRW_REGISTER_TYPE_D
);
1198 fs_visitor::get_nir_dest(nir_dest dest
)
1201 nir_ssa_values
[dest
.ssa
.index
] = bld
.vgrf(BRW_REGISTER_TYPE_F
,
1202 dest
.ssa
.num_components
);
1203 return nir_ssa_values
[dest
.ssa
.index
];
1206 return fs_reg_for_nir_reg(this, dest
.reg
.reg
, dest
.reg
.base_offset
,
1211 fs_visitor::emit_percomp(const fs_builder
&bld
, const fs_inst
&inst
,
1214 for (unsigned i
= 0; i
< 4; i
++) {
1215 if (!((wr_mask
>> i
) & 1))
1218 fs_inst
*new_inst
= new(mem_ctx
) fs_inst(inst
);
1219 new_inst
->dst
= offset(new_inst
->dst
, bld
, i
);
1220 for (unsigned j
= 0; j
< new_inst
->sources
; j
++)
1221 if (new_inst
->src
[j
].file
== GRF
)
1222 new_inst
->src
[j
] = offset(new_inst
->src
[j
], bld
, i
);
1229 fs_visitor::nir_emit_intrinsic(const fs_builder
&bld
, nir_intrinsic_instr
*instr
)
1232 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
1233 dest
= get_nir_dest(instr
->dest
);
1235 bool has_indirect
= false;
1237 switch (instr
->intrinsic
) {
1238 case nir_intrinsic_discard
:
1239 case nir_intrinsic_discard_if
: {
1240 /* We track our discarded pixels in f0.1. By predicating on it, we can
1241 * update just the flag bits that aren't yet discarded. If there's no
1242 * condition, we emit a CMP of g0 != g0, so all currently executing
1243 * channels will get turned off.
1246 if (instr
->intrinsic
== nir_intrinsic_discard_if
) {
1247 cmp
= bld
.CMP(bld
.null_reg_f(), get_nir_src(instr
->src
[0]),
1248 fs_reg(0), BRW_CONDITIONAL_Z
);
1250 fs_reg some_reg
= fs_reg(retype(brw_vec8_grf(0, 0),
1251 BRW_REGISTER_TYPE_UW
));
1252 cmp
= bld
.CMP(bld
.null_reg_f(), some_reg
, some_reg
, BRW_CONDITIONAL_NZ
);
1254 cmp
->predicate
= BRW_PREDICATE_NORMAL
;
1255 cmp
->flag_subreg
= 1;
1257 if (devinfo
->gen
>= 6) {
1258 emit_discard_jump();
1263 case nir_intrinsic_atomic_counter_inc
:
1264 case nir_intrinsic_atomic_counter_dec
:
1265 case nir_intrinsic_atomic_counter_read
: {
1266 unsigned surf_index
= prog_data
->binding_table
.abo_start
+
1267 (unsigned) instr
->const_index
[0];
1268 fs_reg offset
= fs_reg(get_nir_src(instr
->src
[0]));
1270 switch (instr
->intrinsic
) {
1271 case nir_intrinsic_atomic_counter_inc
:
1272 emit_untyped_atomic(BRW_AOP_INC
, surf_index
, dest
, offset
,
1273 fs_reg(), fs_reg());
1275 case nir_intrinsic_atomic_counter_dec
:
1276 emit_untyped_atomic(BRW_AOP_PREDEC
, surf_index
, dest
, offset
,
1277 fs_reg(), fs_reg());
1279 case nir_intrinsic_atomic_counter_read
:
1280 emit_untyped_surface_read(surf_index
, dest
, offset
);
1283 unreachable("Unreachable");
1288 case nir_intrinsic_load_front_face
:
1289 bld
.MOV(retype(dest
, BRW_REGISTER_TYPE_D
),
1290 *emit_frontfacing_interpolation());
1293 case nir_intrinsic_load_vertex_id
:
1294 unreachable("should be lowered by lower_vertex_id()");
1296 case nir_intrinsic_load_vertex_id_zero_base
: {
1297 fs_reg vertex_id
= nir_system_values
[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
];
1298 assert(vertex_id
.file
!= BAD_FILE
);
1299 dest
.type
= vertex_id
.type
;
1300 bld
.MOV(dest
, vertex_id
);
1304 case nir_intrinsic_load_base_vertex
: {
1305 fs_reg base_vertex
= nir_system_values
[SYSTEM_VALUE_BASE_VERTEX
];
1306 assert(base_vertex
.file
!= BAD_FILE
);
1307 dest
.type
= base_vertex
.type
;
1308 bld
.MOV(dest
, base_vertex
);
1312 case nir_intrinsic_load_instance_id
: {
1313 fs_reg instance_id
= nir_system_values
[SYSTEM_VALUE_INSTANCE_ID
];
1314 assert(instance_id
.file
!= BAD_FILE
);
1315 dest
.type
= instance_id
.type
;
1316 bld
.MOV(dest
, instance_id
);
1320 case nir_intrinsic_load_sample_mask_in
: {
1321 fs_reg sample_mask_in
= nir_system_values
[SYSTEM_VALUE_SAMPLE_MASK_IN
];
1322 assert(sample_mask_in
.file
!= BAD_FILE
);
1323 dest
.type
= sample_mask_in
.type
;
1324 bld
.MOV(dest
, sample_mask_in
);
1328 case nir_intrinsic_load_sample_pos
: {
1329 fs_reg sample_pos
= nir_system_values
[SYSTEM_VALUE_SAMPLE_POS
];
1330 assert(sample_pos
.file
!= BAD_FILE
);
1331 dest
.type
= sample_pos
.type
;
1332 bld
.MOV(dest
, sample_pos
);
1333 bld
.MOV(offset(dest
, bld
, 1), offset(sample_pos
, bld
, 1));
1337 case nir_intrinsic_load_sample_id
: {
1338 fs_reg sample_id
= nir_system_values
[SYSTEM_VALUE_SAMPLE_ID
];
1339 assert(sample_id
.file
!= BAD_FILE
);
1340 dest
.type
= sample_id
.type
;
1341 bld
.MOV(dest
, sample_id
);
1345 case nir_intrinsic_load_uniform_indirect
:
1346 has_indirect
= true;
1348 case nir_intrinsic_load_uniform
: {
1349 unsigned index
= instr
->const_index
[0];
1352 if (index
< num_direct_uniforms
) {
1353 uniform_reg
= fs_reg(UNIFORM
, 0);
1355 uniform_reg
= fs_reg(UNIFORM
, num_direct_uniforms
);
1356 index
-= num_direct_uniforms
;
1359 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1360 fs_reg src
= offset(retype(uniform_reg
, dest
.type
), bld
, index
);
1362 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[0]));
1366 dest
= offset(dest
, bld
, 1);
1371 case nir_intrinsic_load_ubo_indirect
:
1372 has_indirect
= true;
1374 case nir_intrinsic_load_ubo
: {
1375 nir_const_value
*const_index
= nir_src_as_const_value(instr
->src
[0]);
1379 surf_index
= fs_reg(stage_prog_data
->binding_table
.ubo_start
+
1382 /* The block index is not a constant. Evaluate the index expression
1383 * per-channel and add the base UBO index; we have to select a value
1384 * from any live channel.
1386 surf_index
= vgrf(glsl_type::uint_type
);
1387 bld
.ADD(surf_index
, get_nir_src(instr
->src
[0]),
1388 fs_reg(stage_prog_data
->binding_table
.ubo_start
));
1389 bld
.emit_uniformize(surf_index
, surf_index
);
1391 /* Assume this may touch any UBO. It would be nice to provide
1392 * a tighter bound, but the array information is already lowered away.
1394 brw_mark_surface_used(prog_data
,
1395 stage_prog_data
->binding_table
.ubo_start
+
1396 shader_prog
->NumUniformBlocks
- 1);
1400 /* Turn the byte offset into a dword offset. */
1401 fs_reg base_offset
= vgrf(glsl_type::int_type
);
1402 bld
.SHR(base_offset
, retype(get_nir_src(instr
->src
[1]),
1403 BRW_REGISTER_TYPE_D
),
1406 unsigned vec4_offset
= instr
->const_index
[0] / 4;
1407 for (int i
= 0; i
< instr
->num_components
; i
++)
1408 VARYING_PULL_CONSTANT_LOAD(bld
, offset(dest
, bld
, i
), surf_index
,
1409 base_offset
, vec4_offset
+ i
);
1411 fs_reg packed_consts
= vgrf(glsl_type::float_type
);
1412 packed_consts
.type
= dest
.type
;
1414 fs_reg
const_offset_reg((unsigned) instr
->const_index
[0] & ~15);
1415 bld
.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD
, packed_consts
,
1416 surf_index
, const_offset_reg
);
1418 for (unsigned i
= 0; i
< instr
->num_components
; i
++) {
1419 packed_consts
.set_smear(instr
->const_index
[0] % 16 / 4 + i
);
1421 /* The std140 packing rules don't allow vectors to cross 16-byte
1422 * boundaries, and a reg is 32 bytes.
1424 assert(packed_consts
.subreg_offset
< 32);
1426 bld
.MOV(dest
, packed_consts
);
1427 dest
= offset(dest
, bld
, 1);
1433 case nir_intrinsic_load_input_indirect
:
1434 has_indirect
= true;
1436 case nir_intrinsic_load_input
: {
1438 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1439 fs_reg src
= offset(retype(nir_inputs
, dest
.type
), bld
,
1440 instr
->const_index
[0] + index
);
1442 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[0]));
1446 dest
= offset(dest
, bld
, 1);
1451 /* Handle ARB_gpu_shader5 interpolation intrinsics
1453 * It's worth a quick word of explanation as to why we handle the full
1454 * variable-based interpolation intrinsic rather than a lowered version
1455 * with like we do for other inputs. We have to do that because the way
1456 * we set up inputs doesn't allow us to use the already setup inputs for
1457 * interpolation. At the beginning of the shader, we go through all of
1458 * the input variables and do the initial interpolation and put it in
1459 * the nir_inputs array based on its location as determined in
1460 * nir_lower_io. If the input isn't used, dead code cleans up and
1461 * everything works fine. However, when we get to the ARB_gpu_shader5
1462 * interpolation intrinsics, we need to reinterpolate the input
1463 * differently. If we used an intrinsic that just had an index it would
1464 * only give us the offset into the nir_inputs array. However, this is
1465 * useless because that value is post-interpolation and we need
1466 * pre-interpolation. In order to get the actual location of the bits
1467 * we get from the vertex fetching hardware, we need the variable.
1469 case nir_intrinsic_interp_var_at_centroid
:
1470 case nir_intrinsic_interp_var_at_sample
:
1471 case nir_intrinsic_interp_var_at_offset
: {
1472 assert(stage
== MESA_SHADER_FRAGMENT
);
1474 ((struct brw_wm_prog_data
*) prog_data
)->pulls_bary
= true;
1476 fs_reg dst_xy
= bld
.vgrf(BRW_REGISTER_TYPE_F
, 2);
1478 /* For most messages, we need one reg of ignored data; the hardware
1479 * requires mlen==1 even when there is no payload. in the per-slot
1480 * offset case, we'll replace this with the proper source data.
1482 fs_reg src
= vgrf(glsl_type::float_type
);
1483 int mlen
= 1; /* one reg unless overriden */
1486 switch (instr
->intrinsic
) {
1487 case nir_intrinsic_interp_var_at_centroid
:
1488 inst
= bld
.emit(FS_OPCODE_INTERPOLATE_AT_CENTROID
,
1489 dst_xy
, src
, fs_reg(0u));
1492 case nir_intrinsic_interp_var_at_sample
: {
1493 /* XXX: We should probably handle non-constant sample id's */
1494 nir_const_value
*const_sample
= nir_src_as_const_value(instr
->src
[0]);
1495 assert(const_sample
);
1496 unsigned msg_data
= const_sample
? const_sample
->i
[0] << 4 : 0;
1497 inst
= bld
.emit(FS_OPCODE_INTERPOLATE_AT_SAMPLE
, dst_xy
, src
,
1502 case nir_intrinsic_interp_var_at_offset
: {
1503 nir_const_value
*const_offset
= nir_src_as_const_value(instr
->src
[0]);
1506 unsigned off_x
= MIN2((int)(const_offset
->f
[0] * 16), 7) & 0xf;
1507 unsigned off_y
= MIN2((int)(const_offset
->f
[1] * 16), 7) & 0xf;
1509 inst
= bld
.emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET
, dst_xy
, src
,
1510 fs_reg(off_x
| (off_y
<< 4)));
1512 src
= vgrf(glsl_type::ivec2_type
);
1513 fs_reg offset_src
= retype(get_nir_src(instr
->src
[0]),
1514 BRW_REGISTER_TYPE_F
);
1515 for (int i
= 0; i
< 2; i
++) {
1516 fs_reg temp
= vgrf(glsl_type::float_type
);
1517 bld
.MUL(temp
, offset(offset_src
, bld
, i
), fs_reg(16.0f
));
1518 fs_reg itemp
= vgrf(glsl_type::int_type
);
1519 bld
.MOV(itemp
, temp
); /* float to int */
1521 /* Clamp the upper end of the range to +7/16.
1522 * ARB_gpu_shader5 requires that we support a maximum offset
1523 * of +0.5, which isn't representable in a S0.4 value -- if
1524 * we didn't clamp it, we'd end up with -8/16, which is the
1525 * opposite of what the shader author wanted.
1527 * This is legal due to ARB_gpu_shader5's quantization
1530 * "Not all values of <offset> may be supported; x and y
1531 * offsets may be rounded to fixed-point values with the
1532 * number of fraction bits given by the
1533 * implementation-dependent constant
1534 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
1536 set_condmod(BRW_CONDITIONAL_L
,
1537 bld
.SEL(offset(src
, bld
, i
), itemp
, fs_reg(7)));
1540 mlen
= 2 * dispatch_width
/ 8;
1541 inst
= bld
.emit(FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET
, dst_xy
, src
,
1548 unreachable("Invalid intrinsic");
1552 /* 2 floats per slot returned */
1553 inst
->regs_written
= 2 * dispatch_width
/ 8;
1554 inst
->pi_noperspective
= instr
->variables
[0]->var
->data
.interpolation
==
1555 INTERP_QUALIFIER_NOPERSPECTIVE
;
1557 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1558 fs_reg src
= interp_reg(instr
->variables
[0]->var
->data
.location
, j
);
1559 src
.type
= dest
.type
;
1561 bld
.emit(FS_OPCODE_LINTERP
, dest
, dst_xy
, src
);
1562 dest
= offset(dest
, bld
, 1);
1567 case nir_intrinsic_store_output_indirect
:
1568 has_indirect
= true;
1570 case nir_intrinsic_store_output
: {
1571 fs_reg src
= get_nir_src(instr
->src
[0]);
1573 for (unsigned j
= 0; j
< instr
->num_components
; j
++) {
1574 fs_reg new_dest
= offset(retype(nir_outputs
, src
.type
), bld
,
1575 instr
->const_index
[0] + index
);
1577 src
.reladdr
= new(mem_ctx
) fs_reg(get_nir_src(instr
->src
[1]));
1579 bld
.MOV(new_dest
, src
);
1580 src
= offset(src
, bld
, 1);
1585 case nir_intrinsic_barrier
:
1590 unreachable("unknown intrinsic");
1595 fs_visitor::nir_emit_texture(const fs_builder
&bld
, nir_tex_instr
*instr
)
1597 unsigned sampler
= instr
->sampler_index
;
1598 fs_reg
sampler_reg(sampler
);
1600 /* FINISHME: We're failing to recompile our programs when the sampler is
1601 * updated. This only matters for the texture rectangle scale parameters
1602 * (pre-gen6, or gen6+ with GL_CLAMP).
1604 int texunit
= prog
->SamplerUnits
[sampler
];
1606 int gather_component
= instr
->component
;
1608 bool is_rect
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_RECT
;
1610 bool is_cube_array
= instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
&&
1613 int lod_components
= 0;
1614 int UNUSED offset_components
= 0;
1616 fs_reg coordinate
, shadow_comparitor
, lod
, lod2
, sample_index
, mcs
, tex_offset
;
1618 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1619 fs_reg src
= get_nir_src(instr
->src
[i
].src
);
1620 switch (instr
->src
[i
].src_type
) {
1621 case nir_tex_src_bias
:
1622 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1624 case nir_tex_src_comparitor
:
1625 shadow_comparitor
= retype(src
, BRW_REGISTER_TYPE_F
);
1627 case nir_tex_src_coord
:
1628 switch (instr
->op
) {
1630 case nir_texop_txf_ms
:
1631 coordinate
= retype(src
, BRW_REGISTER_TYPE_D
);
1634 coordinate
= retype(src
, BRW_REGISTER_TYPE_F
);
1638 case nir_tex_src_ddx
:
1639 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1640 lod_components
= nir_tex_instr_src_size(instr
, i
);
1642 case nir_tex_src_ddy
:
1643 lod2
= retype(src
, BRW_REGISTER_TYPE_F
);
1645 case nir_tex_src_lod
:
1646 switch (instr
->op
) {
1648 lod
= retype(src
, BRW_REGISTER_TYPE_UD
);
1651 lod
= retype(src
, BRW_REGISTER_TYPE_D
);
1654 lod
= retype(src
, BRW_REGISTER_TYPE_F
);
1658 case nir_tex_src_ms_index
:
1659 sample_index
= retype(src
, BRW_REGISTER_TYPE_UD
);
1661 case nir_tex_src_offset
:
1662 tex_offset
= retype(src
, BRW_REGISTER_TYPE_D
);
1663 if (instr
->is_array
)
1664 offset_components
= instr
->coord_components
- 1;
1666 offset_components
= instr
->coord_components
;
1668 case nir_tex_src_projector
:
1669 unreachable("should be lowered");
1671 case nir_tex_src_sampler_offset
: {
1672 /* Figure out the highest possible sampler index and mark it as used */
1673 uint32_t max_used
= sampler
+ instr
->sampler_array_size
- 1;
1674 if (instr
->op
== nir_texop_tg4
&& devinfo
->gen
< 8) {
1675 max_used
+= stage_prog_data
->binding_table
.gather_texture_start
;
1677 max_used
+= stage_prog_data
->binding_table
.texture_start
;
1679 brw_mark_surface_used(prog_data
, max_used
);
1681 /* Emit code to evaluate the actual indexing expression */
1682 sampler_reg
= vgrf(glsl_type::uint_type
);
1683 bld
.ADD(sampler_reg
, src
, fs_reg(sampler
));
1684 bld
.emit_uniformize(sampler_reg
, sampler_reg
);
1689 unreachable("unknown texture source");
1693 if (instr
->op
== nir_texop_txf_ms
) {
1694 if (devinfo
->gen
>= 7 &&
1695 key_tex
->compressed_multisample_layout_mask
& (1 << sampler
)) {
1696 mcs
= emit_mcs_fetch(coordinate
, instr
->coord_components
, sampler_reg
);
1702 for (unsigned i
= 0; i
< 3; i
++) {
1703 if (instr
->const_offset
[i
] != 0) {
1704 assert(offset_components
== 0);
1705 tex_offset
= fs_reg(brw_texture_offset(instr
->const_offset
, 3));
1710 enum glsl_base_type dest_base_type
;
1711 switch (instr
->dest_type
) {
1712 case nir_type_float
:
1713 dest_base_type
= GLSL_TYPE_FLOAT
;
1716 dest_base_type
= GLSL_TYPE_INT
;
1718 case nir_type_unsigned
:
1719 dest_base_type
= GLSL_TYPE_UINT
;
1722 unreachable("bad type");
1725 const glsl_type
*dest_type
=
1726 glsl_type::get_instance(dest_base_type
, nir_tex_instr_dest_size(instr
),
1729 ir_texture_opcode op
;
1730 switch (instr
->op
) {
1731 case nir_texop_lod
: op
= ir_lod
; break;
1732 case nir_texop_query_levels
: op
= ir_query_levels
; break;
1733 case nir_texop_tex
: op
= ir_tex
; break;
1734 case nir_texop_tg4
: op
= ir_tg4
; break;
1735 case nir_texop_txb
: op
= ir_txb
; break;
1736 case nir_texop_txd
: op
= ir_txd
; break;
1737 case nir_texop_txf
: op
= ir_txf
; break;
1738 case nir_texop_txf_ms
: op
= ir_txf_ms
; break;
1739 case nir_texop_txl
: op
= ir_txl
; break;
1740 case nir_texop_txs
: op
= ir_txs
; break;
1742 unreachable("unknown texture opcode");
1745 emit_texture(op
, dest_type
, coordinate
, instr
->coord_components
,
1746 shadow_comparitor
, lod
, lod2
, lod_components
, sample_index
,
1747 tex_offset
, mcs
, gather_component
,
1748 is_cube_array
, is_rect
, sampler
, sampler_reg
, texunit
);
1750 fs_reg dest
= get_nir_dest(instr
->dest
);
1751 dest
.type
= this->result
.type
;
1752 unsigned num_components
= nir_tex_instr_dest_size(instr
);
1753 emit_percomp(bld
, fs_inst(BRW_OPCODE_MOV
, bld
.dispatch_width(),
1754 dest
, this->result
),
1755 (1 << num_components
) - 1);
1759 fs_visitor::nir_emit_jump(const fs_builder
&bld
, nir_jump_instr
*instr
)
1761 switch (instr
->type
) {
1762 case nir_jump_break
:
1763 bld
.emit(BRW_OPCODE_BREAK
);
1765 case nir_jump_continue
:
1766 bld
.emit(BRW_OPCODE_CONTINUE
);
1768 case nir_jump_return
:
1770 unreachable("unknown jump");