2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "brw_shader.h"
29 #include "main/compiler.h"
30 #include "program/hash_table.h"
31 #include "brw_program.h"
34 #include "brw_ir_vec4.h"
39 #include "brw_context.h"
41 #include "intel_asm_annotation.h"
48 #include "glsl/nir/nir.h"
56 brw_vue_setup_prog_key_for_precompile(struct gl_context
*ctx
,
57 struct brw_vue_prog_key
*key
,
58 GLuint id
, struct gl_program
*prog
);
65 class vec4_live_variables
;
68 * The vertex shader front-end.
70 * Translates either GLSL IR or Mesa IR (for ARB_vertex_program and
71 * fixed-function) into VS IR.
73 class vec4_visitor
: public backend_shader
, public ir_visitor
76 vec4_visitor(const struct brw_compiler
*compiler
,
78 struct gl_program
*prog
,
79 const struct brw_vue_prog_key
*key
,
80 struct brw_vue_prog_data
*prog_data
,
81 struct gl_shader_program
*shader_prog
,
82 gl_shader_stage stage
,
85 int shader_time_index
);
90 return dst_reg(brw_null_reg());
95 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_D
));
100 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD
));
103 const struct brw_vue_prog_key
* const key
;
104 struct brw_vue_prog_data
* const prog_data
;
105 unsigned int sanity_param_count
;
111 * GLSL IR currently being processed, which is associated with our
112 * driver IR instructions for debugging purposes.
115 const char *current_annotation
;
117 int first_non_payload_grf
;
118 unsigned int max_grf
;
119 int *virtual_grf_start
;
120 int *virtual_grf_end
;
121 brw::vec4_live_variables
*live_intervals
;
122 dst_reg userplane
[MAX_CLIP_PLANES
];
124 dst_reg
*variable_storage(ir_variable
*var
);
126 void reladdr_to_temp(ir_instruction
*ir
, src_reg
*reg
, int *num_reladdr
);
128 bool need_all_constants_in_pull_buffer
;
131 * \name Visit methods
133 * As typical for the visitor pattern, there must be one \c visit method for
134 * each concrete subclass of \c ir_instruction. Virtual base classes within
135 * the hierarchy should not have \c visit methods.
138 virtual void visit(ir_variable
*);
139 virtual void visit(ir_loop
*);
140 virtual void visit(ir_loop_jump
*);
141 virtual void visit(ir_function_signature
*);
142 virtual void visit(ir_function
*);
143 virtual void visit(ir_expression
*);
144 virtual void visit(ir_swizzle
*);
145 virtual void visit(ir_dereference_variable
*);
146 virtual void visit(ir_dereference_array
*);
147 virtual void visit(ir_dereference_record
*);
148 virtual void visit(ir_assignment
*);
149 virtual void visit(ir_constant
*);
150 virtual void visit(ir_call
*);
151 virtual void visit(ir_return
*);
152 virtual void visit(ir_discard
*);
153 virtual void visit(ir_texture
*);
154 virtual void visit(ir_if
*);
155 virtual void visit(ir_emit_vertex
*);
156 virtual void visit(ir_end_primitive
*);
157 virtual void visit(ir_barrier
*);
162 /* Regs for vertex results. Generated at ir_variable visiting time
163 * for the ir->location's used.
165 dst_reg output_reg
[BRW_VARYING_SLOT_COUNT
];
166 const char *output_reg_annotation
[BRW_VARYING_SLOT_COUNT
];
168 int *uniform_vector_size
;
169 int uniform_array_size
; /*< Size of uniform_[vector_]size arrays */
172 src_reg shader_start_time
;
174 struct hash_table
*variable_ht
;
176 bool run(gl_clip_plane
*clip_planes
);
177 void fail(const char *msg
, ...);
179 void setup_uniform_clipplane_values(gl_clip_plane
*clip_planes
);
180 virtual void setup_vector_uniform_values(const gl_constant_value
*values
,
182 void setup_uniform_values(ir_variable
*ir
);
183 void setup_builtin_uniform_values(ir_variable
*ir
);
184 int setup_uniforms(int payload_reg
);
186 bool reg_allocate_trivial();
188 void evaluate_spill_costs(float *spill_costs
, bool *no_spill
);
189 int choose_spill_reg(struct ra_graph
*g
);
190 void spill_reg(int spill_reg
);
191 void move_grf_array_access_to_scratch();
192 void move_uniform_array_access_to_pull_constants();
193 void move_push_constants_to_pull_constants();
194 void split_uniform_registers();
195 void pack_uniform_registers();
196 void calculate_live_intervals();
197 void invalidate_live_intervals();
198 void split_virtual_grfs();
199 bool opt_vector_float();
200 bool opt_reduce_swizzle();
201 bool dead_code_eliminate();
202 int var_range_start(unsigned v
, unsigned n
) const;
203 int var_range_end(unsigned v
, unsigned n
) const;
204 bool virtual_grf_interferes(int a
, int b
);
205 bool opt_copy_propagation(bool do_constant_prop
= true);
206 bool opt_cse_local(bblock_t
*block
);
208 bool opt_algebraic();
209 bool opt_register_coalesce();
210 bool eliminate_find_live_channel();
211 bool is_dep_ctrl_unsafe(const vec4_instruction
*inst
);
212 void opt_set_dependency_control();
213 void opt_schedule_instructions();
215 vec4_instruction
*emit(vec4_instruction
*inst
);
217 vec4_instruction
*emit(enum opcode opcode
);
218 vec4_instruction
*emit(enum opcode opcode
, const dst_reg
&dst
);
219 vec4_instruction
*emit(enum opcode opcode
, const dst_reg
&dst
,
220 const src_reg
&src0
);
221 vec4_instruction
*emit(enum opcode opcode
, const dst_reg
&dst
,
222 const src_reg
&src0
, const src_reg
&src1
);
223 vec4_instruction
*emit(enum opcode opcode
, const dst_reg
&dst
,
224 const src_reg
&src0
, const src_reg
&src1
,
225 const src_reg
&src2
);
227 vec4_instruction
*emit_before(bblock_t
*block
,
228 vec4_instruction
*inst
,
229 vec4_instruction
*new_inst
);
231 #define EMIT1(op) vec4_instruction *op(const dst_reg &, const src_reg &);
232 #define EMIT2(op) vec4_instruction *op(const dst_reg &, const src_reg &, const src_reg &);
233 #define EMIT3(op) vec4_instruction *op(const dst_reg &, const src_reg &, const src_reg &, const src_reg &);
255 vec4_instruction
*CMP(dst_reg dst
, src_reg src0
, src_reg src1
,
256 enum brw_conditional_mod condition
);
257 vec4_instruction
*IF(src_reg src0
, src_reg src1
,
258 enum brw_conditional_mod condition
);
259 vec4_instruction
*IF(enum brw_predicate predicate
);
277 int implied_mrf_writes(vec4_instruction
*inst
);
279 bool try_rewrite_rhs_to_dst(ir_assignment
*ir
,
282 vec4_instruction
*pre_rhs_inst
,
283 vec4_instruction
*last_rhs_inst
);
285 /** Walks an exec_list of ir_instruction and sends it through this visitor. */
286 void visit_instructions(const exec_list
*list
);
288 void emit_vp_sop(enum brw_conditional_mod condmod
, dst_reg dst
,
289 src_reg src0
, src_reg src1
, src_reg one
);
291 void emit_bool_to_cond_code(ir_rvalue
*ir
, enum brw_predicate
*predicate
);
292 void emit_if_gen6(ir_if
*ir
);
294 void emit_minmax(enum brw_conditional_mod conditionalmod
, dst_reg dst
,
295 src_reg src0
, src_reg src1
);
297 void emit_lrp(const dst_reg
&dst
,
298 const src_reg
&x
, const src_reg
&y
, const src_reg
&a
);
301 * Copy any live channel from \p src to the first channel of the
304 src_reg
emit_uniformize(const src_reg
&src
);
306 void emit_block_move(dst_reg
*dst
, src_reg
*src
,
307 const struct glsl_type
*type
, brw_predicate predicate
);
309 void emit_constant_values(dst_reg
*dst
, ir_constant
*value
);
312 * Emit the correct dot-product instruction for the type of arguments
314 void emit_dp(dst_reg dst
, src_reg src0
, src_reg src1
, unsigned elements
);
316 void emit_scalar(ir_instruction
*ir
, enum prog_opcode op
,
317 dst_reg dst
, src_reg src0
);
319 void emit_scalar(ir_instruction
*ir
, enum prog_opcode op
,
320 dst_reg dst
, src_reg src0
, src_reg src1
);
322 src_reg
fix_3src_operand(src_reg src
);
324 void emit_math(enum opcode opcode
, const dst_reg
&dst
, const src_reg
&src0
,
325 const src_reg
&src1
= src_reg());
326 src_reg
fix_math_operand(src_reg src
);
328 void emit_pack_half_2x16(dst_reg dst
, src_reg src0
);
329 void emit_unpack_half_2x16(dst_reg dst
, src_reg src0
);
330 void emit_unpack_unorm_4x8(const dst_reg
&dst
, src_reg src0
);
331 void emit_unpack_snorm_4x8(const dst_reg
&dst
, src_reg src0
);
332 void emit_pack_unorm_4x8(const dst_reg
&dst
, const src_reg
&src0
);
333 void emit_pack_snorm_4x8(const dst_reg
&dst
, const src_reg
&src0
);
335 uint32_t gather_channel(ir_texture
*ir
, uint32_t sampler
);
336 src_reg
emit_mcs_fetch(ir_texture
*ir
, src_reg coordinate
, src_reg sampler
);
337 void emit_gen6_gather_wa(uint8_t wa
, dst_reg dst
);
338 void swizzle_result(ir_texture
*ir
, src_reg orig_val
, uint32_t sampler
);
340 void emit_ndc_computation();
341 void emit_psiz_and_flags(dst_reg reg
);
342 void emit_clip_distances(dst_reg reg
, int offset
);
343 vec4_instruction
*emit_generic_urb_slot(dst_reg reg
, int varying
);
344 void emit_urb_slot(dst_reg reg
, int varying
);
346 void emit_shader_time_begin();
347 void emit_shader_time_end();
348 void emit_shader_time_write(int shader_time_subindex
, src_reg value
);
350 void emit_untyped_atomic(unsigned atomic_op
, unsigned surf_index
,
351 dst_reg dst
, src_reg offset
, src_reg src0
,
354 void emit_untyped_surface_read(unsigned surf_index
, dst_reg dst
,
357 src_reg
get_scratch_offset(bblock_t
*block
, vec4_instruction
*inst
,
358 src_reg
*reladdr
, int reg_offset
);
359 src_reg
get_pull_constant_offset(bblock_t
*block
, vec4_instruction
*inst
,
360 src_reg
*reladdr
, int reg_offset
);
361 void emit_scratch_read(bblock_t
*block
, vec4_instruction
*inst
,
365 void emit_scratch_write(bblock_t
*block
, vec4_instruction
*inst
,
367 void emit_pull_constant_load(bblock_t
*block
, vec4_instruction
*inst
,
371 void emit_pull_constant_load_reg(dst_reg dst
,
374 bblock_t
*before_block
,
375 vec4_instruction
*before_inst
);
376 src_reg
emit_resolve_reladdr(int scratch_loc
[], bblock_t
*block
,
377 vec4_instruction
*inst
, src_reg src
);
379 bool try_emit_mad(ir_expression
*ir
);
380 bool try_emit_b2f_of_compare(ir_expression
*ir
);
381 void resolve_ud_negate(src_reg
*reg
);
382 void resolve_bool_comparison(ir_rvalue
*rvalue
, src_reg
*reg
);
384 src_reg
get_timestamp();
386 bool process_move_condition(ir_rvalue
*ir
);
388 void dump_instruction(backend_instruction
*inst
);
389 void dump_instruction(backend_instruction
*inst
, FILE *file
);
391 void visit_atomic_counter_intrinsic(ir_call
*ir
);
393 virtual void emit_nir_code();
394 virtual void nir_setup_inputs(nir_shader
*shader
);
395 virtual void nir_setup_uniforms(nir_shader
*shader
);
396 virtual void nir_setup_uniform(nir_variable
*var
);
397 virtual void nir_setup_builtin_uniform(nir_variable
*var
);
398 virtual void nir_setup_system_values(nir_shader
*shader
);
399 virtual void nir_emit_impl(nir_function_impl
*impl
);
400 virtual void nir_emit_cf_list(exec_list
*list
);
401 virtual void nir_emit_if(nir_if
*if_stmt
);
402 virtual void nir_emit_loop(nir_loop
*loop
);
403 virtual void nir_emit_block(nir_block
*block
);
404 virtual void nir_emit_instr(nir_instr
*instr
);
405 virtual void nir_emit_load_const(nir_load_const_instr
*instr
);
406 virtual void nir_emit_intrinsic(nir_intrinsic_instr
*instr
);
407 virtual void nir_emit_alu(nir_alu_instr
*instr
);
408 virtual void nir_emit_jump(nir_jump_instr
*instr
);
409 virtual void nir_emit_texture(nir_tex_instr
*instr
);
413 void lower_attributes_to_hw_regs(const int *attribute_map
,
415 void setup_payload_interference(struct ra_graph
*g
, int first_payload_node
,
417 virtual dst_reg
*make_reg_for_system_value(ir_variable
*ir
) = 0;
418 virtual void assign_binding_table_offsets();
419 virtual void setup_payload() = 0;
420 virtual void emit_prolog() = 0;
421 virtual void emit_program_code() = 0;
422 virtual void emit_thread_end() = 0;
423 virtual void emit_urb_write_header(int mrf
) = 0;
424 virtual vec4_instruction
*emit_urb_write_opcode(bool complete
) = 0;
425 virtual int compute_array_stride(ir_dereference_array
*ir
);
429 * If true, then register allocation should fail instead of spilling.
431 const bool no_spills
;
433 int shader_time_index
;
435 unsigned last_scratch
; /**< measured in 32-byte (register size) units */
440 * The vertex shader code generator.
442 * Translates VS IR to actual i965 assembly code.
447 vec4_generator(const struct brw_compiler
*compiler
, void *log_data
,
448 struct gl_shader_program
*shader_prog
,
449 struct gl_program
*prog
,
450 struct brw_vue_prog_data
*prog_data
,
453 const char *stage_name
,
454 const char *stage_abbrev
);
457 const unsigned *generate_assembly(const cfg_t
*cfg
, unsigned *asm_size
);
460 void generate_code(const cfg_t
*cfg
);
462 void generate_math1_gen4(vec4_instruction
*inst
,
465 void generate_math2_gen4(vec4_instruction
*inst
,
468 struct brw_reg src1
);
469 void generate_math_gen6(vec4_instruction
*inst
,
472 struct brw_reg src1
);
474 void generate_tex(vec4_instruction
*inst
,
477 struct brw_reg sampler_index
);
479 void generate_vs_urb_write(vec4_instruction
*inst
);
480 void generate_gs_urb_write(vec4_instruction
*inst
);
481 void generate_gs_urb_write_allocate(vec4_instruction
*inst
);
482 void generate_gs_thread_end(vec4_instruction
*inst
);
483 void generate_gs_set_write_offset(struct brw_reg dst
,
485 struct brw_reg src1
);
486 void generate_gs_set_vertex_count(struct brw_reg dst
,
488 void generate_gs_svb_write(vec4_instruction
*inst
,
491 struct brw_reg src1
);
492 void generate_gs_svb_set_destination_index(vec4_instruction
*inst
,
495 void generate_gs_set_dword_2(struct brw_reg dst
, struct brw_reg src
);
496 void generate_gs_prepare_channel_masks(struct brw_reg dst
);
497 void generate_gs_set_channel_masks(struct brw_reg dst
, struct brw_reg src
);
498 void generate_gs_get_instance_id(struct brw_reg dst
);
499 void generate_gs_ff_sync_set_primitives(struct brw_reg dst
,
502 struct brw_reg src2
);
503 void generate_gs_ff_sync(vec4_instruction
*inst
,
506 struct brw_reg src1
);
507 void generate_gs_set_primitive_id(struct brw_reg dst
);
508 void generate_oword_dual_block_offsets(struct brw_reg m1
,
509 struct brw_reg index
);
510 void generate_scratch_write(vec4_instruction
*inst
,
513 struct brw_reg index
);
514 void generate_scratch_read(vec4_instruction
*inst
,
516 struct brw_reg index
);
517 void generate_pull_constant_load(vec4_instruction
*inst
,
519 struct brw_reg index
,
520 struct brw_reg offset
);
521 void generate_pull_constant_load_gen7(vec4_instruction
*inst
,
523 struct brw_reg surf_index
,
524 struct brw_reg offset
);
525 void generate_set_simd4x2_header_gen9(vec4_instruction
*inst
,
527 void generate_unpack_flags(struct brw_reg dst
);
529 const struct brw_compiler
*compiler
;
530 void *log_data
; /* Passed to compiler->*_log functions */
532 const struct brw_device_info
*devinfo
;
534 struct brw_codegen
*p
;
536 struct gl_shader_program
*shader_prog
;
537 const struct gl_program
*prog
;
539 struct brw_vue_prog_data
*prog_data
;
542 const char *stage_name
;
543 const char *stage_abbrev
;
544 const bool debug_flag
;
547 } /* namespace brw */
548 #endif /* __cplusplus */
550 #endif /* BRW_VEC4_H */