2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "brw_shader.h"
29 #include "main/compiler.h"
30 #include "program/hash_table.h"
31 #include "brw_program.h"
37 #include "brw_context.h"
39 #include "intel_asm_annotation.h"
48 struct brw_vec4_compile
{
49 GLuint last_scratch
; /**< measured in 32-byte (register size) units */
57 brw_vue_setup_prog_key_for_precompile(struct gl_context
*ctx
,
58 struct brw_vue_prog_key
*key
,
59 GLuint id
, struct gl_program
*prog
);
68 class vec4_live_variables
;
71 swizzle_for_size(int size
);
73 class src_reg
: public backend_reg
76 DECLARE_RALLOC_CXX_OPERATORS(src_reg
)
80 src_reg(register_file file
, int reg
, const glsl_type
*type
);
85 src_reg(uint8_t vf
[4]);
86 src_reg(uint8_t vf0
, uint8_t vf1
, uint8_t vf2
, uint8_t vf3
);
87 src_reg(struct brw_reg reg
);
89 bool equals(const src_reg
&r
) const;
91 src_reg(class vec4_visitor
*v
, const struct glsl_type
*type
);
92 src_reg(class vec4_visitor
*v
, const struct glsl_type
*type
, int size
);
94 explicit src_reg(dst_reg reg
);
96 GLuint swizzle
; /**< BRW_SWIZZLE_XYZW macros from brw_reg.h. */
101 static inline src_reg
102 retype(src_reg reg
, enum brw_reg_type type
)
104 reg
.fixed_hw_reg
.type
= reg
.type
= type
;
108 static inline src_reg
109 offset(src_reg reg
, unsigned delta
)
111 assert(delta
== 0 || (reg
.file
!= HW_REG
&& reg
.file
!= IMM
));
112 reg
.reg_offset
+= delta
;
117 * Reswizzle a given source register.
120 static inline src_reg
121 swizzle(src_reg reg
, unsigned swizzle
)
123 assert(reg
.file
!= HW_REG
);
124 reg
.swizzle
= BRW_SWIZZLE4(
125 BRW_GET_SWZ(reg
.swizzle
, BRW_GET_SWZ(swizzle
, 0)),
126 BRW_GET_SWZ(reg
.swizzle
, BRW_GET_SWZ(swizzle
, 1)),
127 BRW_GET_SWZ(reg
.swizzle
, BRW_GET_SWZ(swizzle
, 2)),
128 BRW_GET_SWZ(reg
.swizzle
, BRW_GET_SWZ(swizzle
, 3)));
132 static inline src_reg
135 assert(reg
.file
!= HW_REG
&& reg
.file
!= IMM
);
136 reg
.negate
= !reg
.negate
;
140 class dst_reg
: public backend_reg
143 DECLARE_RALLOC_CXX_OPERATORS(dst_reg
)
148 dst_reg(register_file file
, int reg
);
149 dst_reg(register_file file
, int reg
, const glsl_type
*type
, int writemask
);
150 dst_reg(struct brw_reg reg
);
151 dst_reg(class vec4_visitor
*v
, const struct glsl_type
*type
);
153 explicit dst_reg(src_reg reg
);
155 int writemask
; /**< Bitfield of WRITEMASK_[XYZW] */
160 static inline dst_reg
161 retype(dst_reg reg
, enum brw_reg_type type
)
163 reg
.fixed_hw_reg
.type
= reg
.type
= type
;
167 static inline dst_reg
168 offset(dst_reg reg
, unsigned delta
)
170 assert(delta
== 0 || (reg
.file
!= HW_REG
&& reg
.file
!= IMM
));
171 reg
.reg_offset
+= delta
;
175 static inline dst_reg
176 writemask(dst_reg reg
, unsigned mask
)
178 assert(reg
.file
!= HW_REG
&& reg
.file
!= IMM
);
179 assert((reg
.writemask
& mask
) != 0);
180 reg
.writemask
&= mask
;
184 class vec4_instruction
: public backend_instruction
{
186 DECLARE_RALLOC_CXX_OPERATORS(vec4_instruction
)
188 vec4_instruction(vec4_visitor
*v
, enum opcode opcode
,
189 const dst_reg
&dst
= dst_reg(),
190 const src_reg
&src0
= src_reg(),
191 const src_reg
&src1
= src_reg(),
192 const src_reg
&src2
= src_reg());
194 struct brw_reg
get_dst(void);
195 struct brw_reg
get_src(const struct brw_vue_prog_data
*prog_data
, int i
);
200 enum brw_urb_write_flags urb_write_flags
;
202 unsigned sol_binding
; /**< gen6: SOL binding table index */
203 bool sol_final_write
; /**< gen6: send commit message */
204 unsigned sol_vertex
; /**< gen6: used for setting dst index in SVB header */
206 bool is_send_from_grf();
207 bool can_reswizzle(int dst_writemask
, int swizzle
, int swizzle_mask
);
208 void reswizzle(int dst_writemask
, int swizzle
);
209 bool can_do_source_mods(struct brw_context
*brw
);
213 return predicate
|| opcode
== VS_OPCODE_UNPACK_FLAGS_SIMD4X2
;
218 return (conditional_mod
&& (opcode
!= BRW_OPCODE_SEL
&&
219 opcode
!= BRW_OPCODE_IF
&&
220 opcode
!= BRW_OPCODE_WHILE
));
225 * The vertex shader front-end.
227 * Translates either GLSL IR or Mesa IR (for ARB_vertex_program and
228 * fixed-function) into VS IR.
230 class vec4_visitor
: public backend_visitor
233 vec4_visitor(struct brw_context
*brw
,
234 struct brw_vec4_compile
*c
,
235 struct gl_program
*prog
,
236 const struct brw_vue_prog_key
*key
,
237 struct brw_vue_prog_data
*prog_data
,
238 struct gl_shader_program
*shader_prog
,
239 gl_shader_stage stage
,
243 shader_time_shader_type st_base
,
244 shader_time_shader_type st_written
,
245 shader_time_shader_type st_reset
);
250 return dst_reg(brw_null_reg());
255 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_D
));
258 dst_reg
dst_null_ud()
260 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD
));
263 struct brw_vec4_compile
* const c
;
264 const struct brw_vue_prog_key
* const key
;
265 struct brw_vue_prog_data
* const prog_data
;
266 unsigned int sanity_param_count
;
272 * GLSL IR currently being processed, which is associated with our
273 * driver IR instructions for debugging purposes.
276 const char *current_annotation
;
278 int *virtual_grf_sizes
;
279 int virtual_grf_count
;
280 int virtual_grf_array_size
;
281 int first_non_payload_grf
;
282 unsigned int max_grf
;
283 int *virtual_grf_start
;
284 int *virtual_grf_end
;
285 brw::vec4_live_variables
*live_intervals
;
286 dst_reg userplane
[MAX_CLIP_PLANES
];
289 * This is the size to be used for an array with an element per
292 int virtual_grf_reg_count
;
293 /** Per-virtual-grf indices into an array of size virtual_grf_reg_count */
294 int *virtual_grf_reg_map
;
296 dst_reg
*variable_storage(ir_variable
*var
);
298 void reladdr_to_temp(ir_instruction
*ir
, src_reg
*reg
, int *num_reladdr
);
300 bool need_all_constants_in_pull_buffer
;
303 * \name Visit methods
305 * As typical for the visitor pattern, there must be one \c visit method for
306 * each concrete subclass of \c ir_instruction. Virtual base classes within
307 * the hierarchy should not have \c visit methods.
310 virtual void visit(ir_variable
*);
311 virtual void visit(ir_loop
*);
312 virtual void visit(ir_loop_jump
*);
313 virtual void visit(ir_function_signature
*);
314 virtual void visit(ir_function
*);
315 virtual void visit(ir_expression
*);
316 virtual void visit(ir_swizzle
*);
317 virtual void visit(ir_dereference_variable
*);
318 virtual void visit(ir_dereference_array
*);
319 virtual void visit(ir_dereference_record
*);
320 virtual void visit(ir_assignment
*);
321 virtual void visit(ir_constant
*);
322 virtual void visit(ir_call
*);
323 virtual void visit(ir_return
*);
324 virtual void visit(ir_discard
*);
325 virtual void visit(ir_texture
*);
326 virtual void visit(ir_if
*);
327 virtual void visit(ir_emit_vertex
*);
328 virtual void visit(ir_end_primitive
*);
333 /* Regs for vertex results. Generated at ir_variable visiting time
334 * for the ir->location's used.
336 dst_reg output_reg
[BRW_VARYING_SLOT_COUNT
];
337 const char *output_reg_annotation
[BRW_VARYING_SLOT_COUNT
];
339 int *uniform_vector_size
;
340 int uniform_array_size
; /*< Size of uniform_[vector_]size arrays */
343 src_reg shader_start_time
;
345 struct hash_table
*variable_ht
;
348 void fail(const char *msg
, ...);
350 int virtual_grf_alloc(int size
);
351 void setup_uniform_clipplane_values();
352 void setup_uniform_values(ir_variable
*ir
);
353 void setup_builtin_uniform_values(ir_variable
*ir
);
354 int setup_uniforms(int payload_reg
);
355 bool reg_allocate_trivial();
357 void evaluate_spill_costs(float *spill_costs
, bool *no_spill
);
358 int choose_spill_reg(struct ra_graph
*g
);
359 void spill_reg(int spill_reg
);
360 void move_grf_array_access_to_scratch();
361 void move_uniform_array_access_to_pull_constants();
362 void move_push_constants_to_pull_constants();
363 void split_uniform_registers();
364 void pack_uniform_registers();
365 void calculate_live_intervals();
366 void invalidate_live_intervals();
367 void split_virtual_grfs();
368 bool opt_reduce_swizzle();
369 bool dead_code_eliminate();
370 bool virtual_grf_interferes(int a
, int b
);
371 bool opt_copy_propagation();
372 bool opt_cse_local(bblock_t
*block
);
374 bool opt_algebraic();
375 bool opt_register_coalesce();
376 bool is_dep_ctrl_unsafe(const vec4_instruction
*inst
);
377 void opt_set_dependency_control();
378 void opt_schedule_instructions();
380 vec4_instruction
*emit(vec4_instruction
*inst
);
382 vec4_instruction
*emit(enum opcode opcode
);
383 vec4_instruction
*emit(enum opcode opcode
, const dst_reg
&dst
);
384 vec4_instruction
*emit(enum opcode opcode
, const dst_reg
&dst
,
385 const src_reg
&src0
);
386 vec4_instruction
*emit(enum opcode opcode
, const dst_reg
&dst
,
387 const src_reg
&src0
, const src_reg
&src1
);
388 vec4_instruction
*emit(enum opcode opcode
, const dst_reg
&dst
,
389 const src_reg
&src0
, const src_reg
&src1
,
390 const src_reg
&src2
);
392 vec4_instruction
*emit_before(bblock_t
*block
,
393 vec4_instruction
*inst
,
394 vec4_instruction
*new_inst
);
396 #define EMIT1(op) vec4_instruction *op(const dst_reg &, const src_reg &);
397 #define EMIT2(op) vec4_instruction *op(const dst_reg &, const src_reg &, const src_reg &);
398 #define EMIT3(op) vec4_instruction *op(const dst_reg &, const src_reg &, const src_reg &, const src_reg &);
420 vec4_instruction
*CMP(dst_reg dst
, src_reg src0
, src_reg src1
,
421 enum brw_conditional_mod condition
);
422 vec4_instruction
*IF(src_reg src0
, src_reg src1
,
423 enum brw_conditional_mod condition
);
424 vec4_instruction
*IF(enum brw_predicate predicate
);
425 EMIT1(PULL_CONSTANT_LOAD
)
443 int implied_mrf_writes(vec4_instruction
*inst
);
445 bool try_rewrite_rhs_to_dst(ir_assignment
*ir
,
448 vec4_instruction
*pre_rhs_inst
,
449 vec4_instruction
*last_rhs_inst
);
451 /** Walks an exec_list of ir_instruction and sends it through this visitor. */
452 void visit_instructions(const exec_list
*list
);
454 void emit_vp_sop(enum brw_conditional_mod condmod
, dst_reg dst
,
455 src_reg src0
, src_reg src1
, src_reg one
);
457 void emit_bool_to_cond_code(ir_rvalue
*ir
, enum brw_predicate
*predicate
);
458 void emit_if_gen6(ir_if
*ir
);
460 void emit_minmax(enum brw_conditional_mod conditionalmod
, dst_reg dst
,
461 src_reg src0
, src_reg src1
);
463 void emit_lrp(const dst_reg
&dst
,
464 const src_reg
&x
, const src_reg
&y
, const src_reg
&a
);
466 void emit_block_move(dst_reg
*dst
, src_reg
*src
,
467 const struct glsl_type
*type
, brw_predicate predicate
);
469 void emit_constant_values(dst_reg
*dst
, ir_constant
*value
);
472 * Emit the correct dot-product instruction for the type of arguments
474 void emit_dp(dst_reg dst
, src_reg src0
, src_reg src1
, unsigned elements
);
476 void emit_scalar(ir_instruction
*ir
, enum prog_opcode op
,
477 dst_reg dst
, src_reg src0
);
479 void emit_scalar(ir_instruction
*ir
, enum prog_opcode op
,
480 dst_reg dst
, src_reg src0
, src_reg src1
);
482 void emit_scs(ir_instruction
*ir
, enum prog_opcode op
,
483 dst_reg dst
, const src_reg
&src
);
485 src_reg
fix_3src_operand(src_reg src
);
487 void emit_math(enum opcode opcode
, const dst_reg
&dst
, const src_reg
&src0
,
488 const src_reg
&src1
= src_reg());
489 src_reg
fix_math_operand(src_reg src
);
491 void emit_pack_half_2x16(dst_reg dst
, src_reg src0
);
492 void emit_unpack_half_2x16(dst_reg dst
, src_reg src0
);
493 void emit_unpack_unorm_4x8(const dst_reg
&dst
, src_reg src0
);
494 void emit_unpack_snorm_4x8(const dst_reg
&dst
, src_reg src0
);
495 void emit_pack_unorm_4x8(const dst_reg
&dst
, const src_reg
&src0
);
496 void emit_pack_snorm_4x8(const dst_reg
&dst
, const src_reg
&src0
);
498 uint32_t gather_channel(ir_texture
*ir
, uint32_t sampler
);
499 src_reg
emit_mcs_fetch(ir_texture
*ir
, src_reg coordinate
, src_reg sampler
);
500 void emit_gen6_gather_wa(uint8_t wa
, dst_reg dst
);
501 void swizzle_result(ir_texture
*ir
, src_reg orig_val
, uint32_t sampler
);
503 void emit_ndc_computation();
504 void emit_psiz_and_flags(dst_reg reg
);
505 void emit_clip_distances(dst_reg reg
, int offset
);
506 vec4_instruction
*emit_generic_urb_slot(dst_reg reg
, int varying
);
507 void emit_urb_slot(dst_reg reg
, int varying
);
509 void emit_shader_time_begin();
510 void emit_shader_time_end();
511 void emit_shader_time_write(enum shader_time_shader_type type
,
514 void emit_untyped_atomic(unsigned atomic_op
, unsigned surf_index
,
515 dst_reg dst
, src_reg offset
, src_reg src0
,
518 void emit_untyped_surface_read(unsigned surf_index
, dst_reg dst
,
521 src_reg
get_scratch_offset(bblock_t
*block
, vec4_instruction
*inst
,
522 src_reg
*reladdr
, int reg_offset
);
523 src_reg
get_pull_constant_offset(bblock_t
*block
, vec4_instruction
*inst
,
524 src_reg
*reladdr
, int reg_offset
);
525 void emit_scratch_read(bblock_t
*block
, vec4_instruction
*inst
,
529 void emit_scratch_write(bblock_t
*block
, vec4_instruction
*inst
,
531 void emit_pull_constant_load(bblock_t
*block
, vec4_instruction
*inst
,
536 bool try_emit_mad(ir_expression
*ir
);
537 bool try_emit_b2f_of_compare(ir_expression
*ir
);
538 void resolve_ud_negate(src_reg
*reg
);
539 void resolve_bool_comparison(ir_rvalue
*rvalue
, src_reg
*reg
);
541 src_reg
get_timestamp();
543 bool process_move_condition(ir_rvalue
*ir
);
545 void dump_instruction(backend_instruction
*inst
);
546 void dump_instruction(backend_instruction
*inst
, FILE *file
);
548 void visit_atomic_counter_intrinsic(ir_call
*ir
);
552 void lower_attributes_to_hw_regs(const int *attribute_map
,
554 void setup_payload_interference(struct ra_graph
*g
, int first_payload_node
,
556 virtual dst_reg
*make_reg_for_system_value(ir_variable
*ir
) = 0;
557 virtual void assign_binding_table_offsets();
558 virtual void setup_payload() = 0;
559 virtual void emit_prolog() = 0;
560 virtual void emit_program_code() = 0;
561 virtual void emit_thread_end() = 0;
562 virtual void emit_urb_write_header(int mrf
) = 0;
563 virtual vec4_instruction
*emit_urb_write_opcode(bool complete
) = 0;
564 virtual int compute_array_stride(ir_dereference_array
*ir
);
566 const bool debug_flag
;
570 * If true, then register allocation should fail instead of spilling.
572 const bool no_spills
;
574 const shader_time_shader_type st_base
;
575 const shader_time_shader_type st_written
;
576 const shader_time_shader_type st_reset
;
581 * The vertex shader code generator.
583 * Translates VS IR to actual i965 assembly code.
588 vec4_generator(struct brw_context
*brw
,
589 struct gl_shader_program
*shader_prog
,
590 struct gl_program
*prog
,
591 struct brw_vue_prog_data
*prog_data
,
596 const unsigned *generate_assembly(const cfg_t
*cfg
, unsigned *asm_size
);
599 void generate_code(const cfg_t
*cfg
);
601 void generate_math1_gen4(vec4_instruction
*inst
,
604 void generate_math2_gen4(vec4_instruction
*inst
,
607 struct brw_reg src1
);
608 void generate_math_gen6(vec4_instruction
*inst
,
611 struct brw_reg src1
);
613 void generate_tex(vec4_instruction
*inst
,
616 struct brw_reg sampler_index
);
618 void generate_vs_urb_write(vec4_instruction
*inst
);
619 void generate_gs_urb_write(vec4_instruction
*inst
);
620 void generate_gs_urb_write_allocate(vec4_instruction
*inst
);
621 void generate_gs_thread_end(vec4_instruction
*inst
);
622 void generate_gs_set_write_offset(struct brw_reg dst
,
624 struct brw_reg src1
);
625 void generate_gs_set_vertex_count(struct brw_reg dst
,
627 void generate_gs_svb_write(vec4_instruction
*inst
,
630 struct brw_reg src1
);
631 void generate_gs_svb_set_destination_index(vec4_instruction
*inst
,
634 void generate_gs_set_dword_2(struct brw_reg dst
, struct brw_reg src
);
635 void generate_gs_prepare_channel_masks(struct brw_reg dst
);
636 void generate_gs_set_channel_masks(struct brw_reg dst
, struct brw_reg src
);
637 void generate_gs_get_instance_id(struct brw_reg dst
);
638 void generate_gs_ff_sync_set_primitives(struct brw_reg dst
,
641 struct brw_reg src2
);
642 void generate_gs_ff_sync(vec4_instruction
*inst
,
645 struct brw_reg src1
);
646 void generate_gs_set_primitive_id(struct brw_reg dst
);
647 void generate_oword_dual_block_offsets(struct brw_reg m1
,
648 struct brw_reg index
);
649 void generate_scratch_write(vec4_instruction
*inst
,
652 struct brw_reg index
);
653 void generate_scratch_read(vec4_instruction
*inst
,
655 struct brw_reg index
);
656 void generate_pull_constant_load(vec4_instruction
*inst
,
658 struct brw_reg index
,
659 struct brw_reg offset
);
660 void generate_pull_constant_load_gen7(vec4_instruction
*inst
,
662 struct brw_reg surf_index
,
663 struct brw_reg offset
);
664 void generate_unpack_flags(vec4_instruction
*inst
,
667 void generate_untyped_atomic(vec4_instruction
*inst
,
669 struct brw_reg atomic_op
,
670 struct brw_reg surf_index
);
672 void generate_untyped_surface_read(vec4_instruction
*inst
,
674 struct brw_reg surf_index
);
676 struct brw_context
*brw
;
678 struct brw_compile
*p
;
680 struct gl_shader_program
*shader_prog
;
681 const struct gl_program
*prog
;
683 struct brw_vue_prog_data
*prog_data
;
686 const bool debug_flag
;
689 } /* namespace brw */
690 #endif /* __cplusplus */
692 #endif /* BRW_VEC4_H */