2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "brw_shader.h"
30 #include "brw_ir_vec4.h"
31 #include "brw_vec4_builder.h"
34 #include "compiler/glsl/ir.h"
35 #include "compiler/nir/nir.h"
43 brw_vec4_generate_assembly(const struct brw_compiler
*compiler
,
46 const nir_shader
*nir
,
47 struct brw_vue_prog_data
*prog_data
,
48 const struct cfg_t
*cfg
);
55 class vec4_live_variables
;
58 * The vertex shader front-end.
60 * Translates either GLSL IR or Mesa IR (for ARB_vertex_program and
61 * fixed-function) into VS IR.
63 class vec4_visitor
: public backend_shader
66 vec4_visitor(const struct brw_compiler
*compiler
,
68 const struct brw_sampler_prog_key_data
*key
,
69 struct brw_vue_prog_data
*prog_data
,
70 const nir_shader
*shader
,
73 int shader_time_index
);
77 return dst_reg(brw_null_reg());
82 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_DF
));
87 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_D
));
92 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD
));
95 const struct brw_sampler_prog_key_data
* const key_tex
;
96 struct brw_vue_prog_data
* const prog_data
;
101 * GLSL IR currently being processed, which is associated with our
102 * driver IR instructions for debugging purposes.
105 const char *current_annotation
;
107 int first_non_payload_grf
;
108 unsigned int max_grf
;
109 int *virtual_grf_start
;
110 int *virtual_grf_end
;
111 brw::vec4_live_variables
*live_intervals
;
112 dst_reg userplane
[MAX_CLIP_PLANES
];
114 bool need_all_constants_in_pull_buffer
;
116 /* Regs for vertex results. Generated at ir_variable visiting time
117 * for the ir->location's used.
119 dst_reg output_reg
[VARYING_SLOT_TESS_MAX
][4];
120 unsigned output_num_components
[VARYING_SLOT_TESS_MAX
][4];
121 const char *output_reg_annotation
[VARYING_SLOT_TESS_MAX
];
124 src_reg shader_start_time
;
127 void fail(const char *msg
, ...);
129 int setup_uniforms(int payload_reg
);
131 bool reg_allocate_trivial();
133 void evaluate_spill_costs(float *spill_costs
, bool *no_spill
);
134 int choose_spill_reg(struct ra_graph
*g
);
135 void spill_reg(unsigned spill_reg
);
136 void move_grf_array_access_to_scratch();
137 void move_uniform_array_access_to_pull_constants();
138 void move_push_constants_to_pull_constants();
139 void split_uniform_registers();
140 void pack_uniform_registers();
141 void calculate_live_intervals();
142 void invalidate_live_intervals();
143 void split_virtual_grfs();
144 bool opt_vector_float();
145 bool opt_reduce_swizzle();
146 bool dead_code_eliminate();
147 int var_range_start(unsigned v
, unsigned n
) const;
148 int var_range_end(unsigned v
, unsigned n
) const;
149 bool virtual_grf_interferes(int a
, int b
);
150 bool opt_cmod_propagation();
151 bool opt_copy_propagation(bool do_constant_prop
= true);
152 bool opt_cse_local(bblock_t
*block
);
154 bool opt_algebraic();
155 bool opt_register_coalesce();
156 bool eliminate_find_live_channel();
157 bool is_dep_ctrl_unsafe(const vec4_instruction
*inst
);
158 void opt_set_dependency_control();
159 void opt_schedule_instructions();
160 void convert_to_hw_regs();
161 void fixup_3src_null_dest();
163 bool is_supported_64bit_region(vec4_instruction
*inst
, unsigned arg
);
164 bool lower_simd_width();
166 bool lower_64bit_mad_to_mul_add();
167 void apply_logical_swizzle(struct brw_reg
*hw_reg
,
168 vec4_instruction
*inst
, int arg
);
170 vec4_instruction
*emit(vec4_instruction
*inst
);
172 vec4_instruction
*emit(enum opcode opcode
);
173 vec4_instruction
*emit(enum opcode opcode
, const dst_reg
&dst
);
174 vec4_instruction
*emit(enum opcode opcode
, const dst_reg
&dst
,
175 const src_reg
&src0
);
176 vec4_instruction
*emit(enum opcode opcode
, const dst_reg
&dst
,
177 const src_reg
&src0
, const src_reg
&src1
);
178 vec4_instruction
*emit(enum opcode opcode
, const dst_reg
&dst
,
179 const src_reg
&src0
, const src_reg
&src1
,
180 const src_reg
&src2
);
182 vec4_instruction
*emit_before(bblock_t
*block
,
183 vec4_instruction
*inst
,
184 vec4_instruction
*new_inst
);
186 #define EMIT1(op) vec4_instruction *op(const dst_reg &, const src_reg &);
187 #define EMIT2(op) vec4_instruction *op(const dst_reg &, const src_reg &, const src_reg &);
188 #define EMIT3(op) vec4_instruction *op(const dst_reg &, const src_reg &, const src_reg &, const src_reg &);
210 vec4_instruction
*CMP(dst_reg dst
, src_reg src0
, src_reg src1
,
211 enum brw_conditional_mod condition
);
212 vec4_instruction
*IF(src_reg src0
, src_reg src1
,
213 enum brw_conditional_mod condition
);
214 vec4_instruction
*IF(enum brw_predicate predicate
);
234 int implied_mrf_writes(vec4_instruction
*inst
);
236 vec4_instruction
*emit_minmax(enum brw_conditional_mod conditionalmod
, dst_reg dst
,
237 src_reg src0
, src_reg src1
);
239 vec4_instruction
*emit_lrp(const dst_reg
&dst
, const src_reg
&x
,
240 const src_reg
&y
, const src_reg
&a
);
243 * Copy any live channel from \p src to the first channel of the
246 src_reg
emit_uniformize(const src_reg
&src
);
248 src_reg
fix_3src_operand(const src_reg
&src
);
249 src_reg
resolve_source_modifiers(const src_reg
&src
);
251 vec4_instruction
*emit_math(enum opcode opcode
, const dst_reg
&dst
, const src_reg
&src0
,
252 const src_reg
&src1
= src_reg());
254 src_reg
fix_math_operand(const src_reg
&src
);
256 void emit_pack_half_2x16(dst_reg dst
, src_reg src0
);
257 void emit_unpack_half_2x16(dst_reg dst
, src_reg src0
);
258 void emit_unpack_unorm_4x8(const dst_reg
&dst
, src_reg src0
);
259 void emit_unpack_snorm_4x8(const dst_reg
&dst
, src_reg src0
);
260 void emit_pack_unorm_4x8(const dst_reg
&dst
, const src_reg
&src0
);
261 void emit_pack_snorm_4x8(const dst_reg
&dst
, const src_reg
&src0
);
263 void emit_texture(ir_texture_opcode op
,
265 const glsl_type
*dest_type
,
267 int coord_components
,
268 src_reg shadow_comparator
,
269 src_reg lod
, src_reg lod2
,
270 src_reg sample_index
,
271 uint32_t constant_offset
,
272 src_reg offset_value
,
274 uint32_t surface
, src_reg surface_reg
,
275 src_reg sampler_reg
);
277 src_reg
emit_mcs_fetch(const glsl_type
*coordinate_type
, src_reg coordinate
,
279 void emit_gen6_gather_wa(uint8_t wa
, dst_reg dst
);
281 void emit_ndc_computation();
282 void emit_psiz_and_flags(dst_reg reg
);
283 vec4_instruction
*emit_generic_urb_slot(dst_reg reg
, int varying
, int comp
);
284 virtual void emit_urb_slot(dst_reg reg
, int varying
);
286 void emit_shader_time_begin();
287 void emit_shader_time_end();
288 void emit_shader_time_write(int shader_time_subindex
, src_reg value
);
290 src_reg
get_scratch_offset(bblock_t
*block
, vec4_instruction
*inst
,
291 src_reg
*reladdr
, int reg_offset
);
292 void emit_scratch_read(bblock_t
*block
, vec4_instruction
*inst
,
296 void emit_scratch_write(bblock_t
*block
, vec4_instruction
*inst
,
298 void emit_pull_constant_load(bblock_t
*block
, vec4_instruction
*inst
,
303 void emit_pull_constant_load_reg(dst_reg dst
,
306 bblock_t
*before_block
,
307 vec4_instruction
*before_inst
);
308 src_reg
emit_resolve_reladdr(int scratch_loc
[], bblock_t
*block
,
309 vec4_instruction
*inst
, src_reg src
);
311 void resolve_ud_negate(src_reg
*reg
);
315 src_reg
get_timestamp();
317 void dump_instruction(backend_instruction
*inst
);
318 void dump_instruction(backend_instruction
*inst
, FILE *file
);
320 bool is_high_sampler(src_reg sampler
);
322 bool optimize_predicate(nir_alu_instr
*instr
, enum brw_predicate
*predicate
);
324 void emit_conversion_from_double(dst_reg dst
, src_reg src
, bool saturate
);
325 void emit_conversion_to_double(dst_reg dst
, src_reg src
, bool saturate
);
327 vec4_instruction
*shuffle_64bit_data(dst_reg dst
, src_reg src
,
329 bblock_t
*block
= NULL
,
330 vec4_instruction
*ref
= NULL
);
332 virtual void emit_nir_code();
333 virtual void nir_setup_uniforms();
334 virtual void nir_emit_impl(nir_function_impl
*impl
);
335 virtual void nir_emit_cf_list(exec_list
*list
);
336 virtual void nir_emit_if(nir_if
*if_stmt
);
337 virtual void nir_emit_loop(nir_loop
*loop
);
338 virtual void nir_emit_block(nir_block
*block
);
339 virtual void nir_emit_instr(nir_instr
*instr
);
340 virtual void nir_emit_load_const(nir_load_const_instr
*instr
);
341 src_reg
get_nir_ssbo_intrinsic_index(nir_intrinsic_instr
*instr
);
342 virtual void nir_emit_intrinsic(nir_intrinsic_instr
*instr
);
343 virtual void nir_emit_alu(nir_alu_instr
*instr
);
344 virtual void nir_emit_jump(nir_jump_instr
*instr
);
345 virtual void nir_emit_texture(nir_tex_instr
*instr
);
346 virtual void nir_emit_undef(nir_ssa_undef_instr
*instr
);
347 virtual void nir_emit_ssbo_atomic(int op
, nir_intrinsic_instr
*instr
);
349 dst_reg
get_nir_dest(const nir_dest
&dest
, enum brw_reg_type type
);
350 dst_reg
get_nir_dest(const nir_dest
&dest
, nir_alu_type type
);
351 dst_reg
get_nir_dest(const nir_dest
&dest
);
352 src_reg
get_nir_src(const nir_src
&src
, enum brw_reg_type type
,
353 unsigned num_components
= 4);
354 src_reg
get_nir_src(const nir_src
&src
, nir_alu_type type
,
355 unsigned num_components
= 4);
356 src_reg
get_nir_src(const nir_src
&src
,
357 unsigned num_components
= 4);
358 src_reg
get_nir_src_imm(const nir_src
&src
);
359 src_reg
get_indirect_offset(nir_intrinsic_instr
*instr
);
362 dst_reg
*nir_ssa_values
;
366 void setup_payload_interference(struct ra_graph
*g
, int first_payload_node
,
368 virtual void setup_payload() = 0;
369 virtual void emit_prolog() = 0;
370 virtual void emit_thread_end() = 0;
371 virtual void emit_urb_write_header(int mrf
) = 0;
372 virtual vec4_instruction
*emit_urb_write_opcode(bool complete
) = 0;
373 virtual void gs_emit_vertex(int stream_id
);
374 virtual void gs_end_primitive();
378 * If true, then register allocation should fail instead of spilling.
380 const bool no_spills
;
382 int shader_time_index
;
384 unsigned last_scratch
; /**< measured in 32-byte (register size) units */
387 } /* namespace brw */
388 #endif /* __cplusplus */
390 #endif /* BRW_VEC4_H */