2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "brw_compiler.h"
29 #include "brw_eu_defines.h"
31 #include "compiler/nir/nir.h"
34 #include "brw_ir_allocator.h"
37 #define MAX_SAMPLER_MESSAGE_SIZE 11
38 #define MAX_VGRF_SIZE 16
41 struct backend_reg
: private brw_reg
44 backend_reg(const struct brw_reg
®
) : brw_reg(reg
) {}
46 const brw_reg
&as_brw_reg() const
48 assert(file
== ARF
|| file
== FIXED_GRF
|| file
== MRF
|| file
== IMM
);
50 return static_cast<const brw_reg
&>(*this);
55 assert(file
== ARF
|| file
== FIXED_GRF
|| file
== MRF
|| file
== IMM
);
57 return static_cast<brw_reg
&>(*this);
60 bool equals(const backend_reg
&r
) const;
64 bool is_negative_one() const;
66 bool is_accumulator() const;
68 /** Offset from the start of the (virtual) register in bytes. */
73 using brw_reg::negate
;
75 using brw_reg::address_mode
;
79 using brw_reg::swizzle
;
80 using brw_reg::writemask
;
81 using brw_reg::indirect_offset
;
82 using brw_reg::vstride
;
84 using brw_reg::hstride
;
97 struct backend_instruction
: public exec_node
{
98 bool is_3src(const struct gen_device_info
*devinfo
) const;
100 bool is_math() const;
101 bool is_control_flow() const;
102 bool is_commutative() const;
103 bool can_do_source_mods() const;
104 bool can_do_saturate() const;
105 bool can_do_cmod() const;
106 bool reads_accumulator_implicitly() const;
107 bool writes_accumulator_implicitly(const struct gen_device_info
*devinfo
) const;
109 void remove(bblock_t
*block
);
110 void insert_after(bblock_t
*block
, backend_instruction
*inst
);
111 void insert_before(bblock_t
*block
, backend_instruction
*inst
);
112 void insert_before(bblock_t
*block
, exec_list
*list
);
115 * True if the instruction has side effects other than writing to
116 * its destination registers. You are expected not to reorder or
117 * optimize these out unless you know what you are doing.
119 bool has_side_effects() const;
122 * True if the instruction might be affected by side effects of other
125 bool is_volatile() const;
127 struct backend_instruction
{
128 struct exec_node link
;
131 * Annotation for the generated IR. One of the two can be set.
134 const char *annotation
;
138 * Execution size of the instruction. This is used by the generator to
139 * generate the correct binary for the given instruction. Current valid
140 * values are 1, 4, 8, 16, 32.
145 * Channel group from the hardware execution and predication mask that
146 * should be applied to the instruction. The subset of channel enable
147 * signals (calculated from the EU control flow and predication state)
148 * given by [group, group + exec_size) will be used to mask GRF writes and
149 * any other side effects of the instruction.
153 uint32_t offset
; /**< spill/unspill offset or texture offset bitfield */
154 uint8_t mlen
; /**< SEND message length */
155 int8_t base_mrf
; /**< First MRF in the SEND message, if mlen is nonzero. */
156 uint8_t target
; /**< MRT target. */
157 unsigned size_written
; /**< Data written to the destination register in bytes. */
159 enum opcode opcode
; /* BRW_OPCODE_* or FS_OPCODE_* */
160 enum brw_conditional_mod conditional_mod
; /**< BRW_CONDITIONAL_* */
161 enum brw_predicate predicate
;
162 bool predicate_inverse
:1;
163 bool writes_accumulator
:1; /**< instruction implicitly writes accumulator */
164 bool force_writemask_all
:1;
168 bool shadow_compare
:1;
170 /* Chooses which flag subregister (f0.0 or f0.1) is used for conditional
171 * mod and predication.
173 unsigned flag_subreg
:1;
175 /** The number of hardware registers used for a message header. */
181 enum instruction_scheduler_mode
{
183 SCHEDULE_PRE_NON_LIFO
,
188 struct backend_shader
{
191 backend_shader(const struct brw_compiler
*compiler
,
194 const nir_shader
*shader
,
195 struct brw_stage_prog_data
*stage_prog_data
);
199 const struct brw_compiler
*compiler
;
200 void *log_data
; /* Passed to compiler->*_log functions */
202 const struct gen_device_info
* const devinfo
;
203 const nir_shader
*nir
;
204 struct brw_stage_prog_data
* const stage_prog_data
;
206 /** ralloc context for temporary data used during compile */
210 * List of either fs_inst or vec4_instruction (inheriting from
211 * backend_instruction)
213 exec_list instructions
;
217 gl_shader_stage stage
;
219 const char *stage_name
;
220 const char *stage_abbrev
;
222 brw::simple_allocator alloc
;
224 virtual void dump_instruction(backend_instruction
*inst
) = 0;
225 virtual void dump_instruction(backend_instruction
*inst
, FILE *file
) = 0;
226 virtual void dump_instructions();
227 virtual void dump_instructions(const char *name
);
229 void calculate_cfg();
231 virtual void invalidate_live_intervals() = 0;
234 bool brw_texture_offset(int *offsets
,
235 unsigned num_components
,
236 uint32_t *offset_bits
);
238 void brw_setup_image_uniform_values(gl_shader_stage stage
,
239 struct brw_stage_prog_data
*stage_prog_data
,
240 unsigned param_start_index
,
241 const gl_uniform_storage
*storage
);
244 struct backend_shader
;
245 #endif /* __cplusplus */
247 enum brw_reg_type
brw_type_for_base_type(const struct glsl_type
*type
);
248 enum brw_conditional_mod
brw_conditional_for_comparison(unsigned int op
);
249 uint32_t brw_math_function(enum opcode op
);
250 const char *brw_instruction_name(const struct gen_device_info
*devinfo
,
252 bool brw_saturate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
);
253 bool brw_negate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
);
254 bool brw_abs_immediate(enum brw_reg_type type
, struct brw_reg
*reg
);
256 bool opt_predicated_break(struct backend_shader
*s
);
262 /* brw_fs_reg_allocate.cpp */
263 void brw_fs_alloc_reg_sets(struct brw_compiler
*compiler
);
265 /* brw_vec4_reg_allocate.cpp */
266 void brw_vec4_alloc_reg_set(struct brw_compiler
*compiler
);
269 extern const char *const conditional_modifier
[16];
270 extern const char *const pred_ctrl_align16
[16];
272 /* Per-thread scratch space is a power-of-two multiple of 1KB. */
274 brw_get_scratch_size(int size
)
276 return MAX2(1024, util_next_power_of_two(size
));
280 * Scratch data used when compiling a GLSL geometry shader.
282 struct brw_gs_compile
284 struct brw_gs_prog_key key
;
285 struct brw_vue_map input_vue_map
;
287 unsigned control_data_bits_per_vertex
;
288 unsigned control_data_header_size_bits
;
291 unsigned get_atomic_counter_op(nir_intrinsic_op op
);