2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include "brw_compiler.h"
30 #include "brw_eu_defines.h"
32 #include "compiler/nir/nir.h"
35 #include "brw_ir_allocator.h"
38 #define MAX_SAMPLER_MESSAGE_SIZE 11
39 #define MAX_VGRF_SIZE 16
42 struct backend_reg
: private brw_reg
45 backend_reg(const struct brw_reg
®
) : brw_reg(reg
) {}
47 const brw_reg
&as_brw_reg() const
49 assert(file
== ARF
|| file
== FIXED_GRF
|| file
== MRF
|| file
== IMM
);
51 return static_cast<const brw_reg
&>(*this);
56 assert(file
== ARF
|| file
== FIXED_GRF
|| file
== MRF
|| file
== IMM
);
58 return static_cast<brw_reg
&>(*this);
61 bool equals(const backend_reg
&r
) const;
62 bool negative_equals(const backend_reg
&r
) const;
66 bool is_negative_one() const;
68 bool is_accumulator() const;
70 /** Offset from the start of the (virtual) register in bytes. */
75 using brw_reg::negate
;
77 using brw_reg::address_mode
;
81 using brw_reg::swizzle
;
82 using brw_reg::writemask
;
83 using brw_reg::indirect_offset
;
84 using brw_reg::vstride
;
86 using brw_reg::hstride
;
101 struct backend_instruction
: public exec_node
{
102 bool is_3src(const struct gen_device_info
*devinfo
) const;
104 bool is_math() const;
105 bool is_control_flow() const;
106 bool is_commutative() const;
107 bool can_do_source_mods() const;
108 bool can_do_saturate() const;
109 bool can_do_cmod() const;
110 bool reads_accumulator_implicitly() const;
111 bool writes_accumulator_implicitly(const struct gen_device_info
*devinfo
) const;
113 void remove(bblock_t
*block
);
114 void insert_after(bblock_t
*block
, backend_instruction
*inst
);
115 void insert_before(bblock_t
*block
, backend_instruction
*inst
);
116 void insert_before(bblock_t
*block
, exec_list
*list
);
119 * True if the instruction has side effects other than writing to
120 * its destination registers. You are expected not to reorder or
121 * optimize these out unless you know what you are doing.
123 bool has_side_effects() const;
126 * True if the instruction might be affected by side effects of other
129 bool is_volatile() const;
131 struct backend_instruction
{
132 struct exec_node link
;
135 * Annotation for the generated IR. One of the two can be set.
138 const char *annotation
;
142 * Execution size of the instruction. This is used by the generator to
143 * generate the correct binary for the given instruction. Current valid
144 * values are 1, 4, 8, 16, 32.
149 * Channel group from the hardware execution and predication mask that
150 * should be applied to the instruction. The subset of channel enable
151 * signals (calculated from the EU control flow and predication state)
152 * given by [group, group + exec_size) will be used to mask GRF writes and
153 * any other side effects of the instruction.
157 uint32_t offset
; /**< spill/unspill offset or texture offset bitfield */
158 uint8_t mlen
; /**< SEND message length */
159 uint8_t ex_mlen
; /**< SENDS extended message length */
160 int8_t base_mrf
; /**< First MRF in the SEND message, if mlen is nonzero. */
161 uint8_t target
; /**< MRT target. */
162 uint8_t sfid
; /**< SFID for SEND instructions */
163 uint32_t desc
; /**< SEND[S] message descriptor immediate */
164 unsigned size_written
; /**< Data written to the destination register in bytes. */
166 enum opcode opcode
; /* BRW_OPCODE_* or FS_OPCODE_* */
167 enum brw_conditional_mod conditional_mod
; /**< BRW_CONDITIONAL_* */
168 enum brw_predicate predicate
;
169 bool predicate_inverse
:1;
170 bool writes_accumulator
:1; /**< instruction implicitly writes accumulator */
171 bool force_writemask_all
:1;
175 bool shadow_compare
:1;
176 bool check_tdr
:1; /**< Only valid for SEND; turns it into a SENDC */
177 bool send_has_side_effects
:1; /**< Only valid for SHADER_OPCODE_SEND */
178 bool send_is_volatile
:1; /**< Only valid for SHADER_OPCODE_SEND */
181 /* Chooses which flag subregister (f0.0 to f1.1) is used for conditional
182 * mod and predication.
184 unsigned flag_subreg
:2;
186 /** The number of hardware registers used for a message header. */
192 enum instruction_scheduler_mode
{
194 SCHEDULE_PRE_NON_LIFO
,
199 struct backend_shader
{
202 backend_shader(const struct brw_compiler
*compiler
,
205 const nir_shader
*shader
,
206 struct brw_stage_prog_data
*stage_prog_data
);
209 virtual ~backend_shader();
211 const struct brw_compiler
*compiler
;
212 void *log_data
; /* Passed to compiler->*_log functions */
214 const struct gen_device_info
* const devinfo
;
215 const nir_shader
*nir
;
216 struct brw_stage_prog_data
* const stage_prog_data
;
218 /** ralloc context for temporary data used during compile */
222 * List of either fs_inst or vec4_instruction (inheriting from
223 * backend_instruction)
225 exec_list instructions
;
229 gl_shader_stage stage
;
231 const char *stage_name
;
232 const char *stage_abbrev
;
234 brw::simple_allocator alloc
;
236 virtual void dump_instruction(backend_instruction
*inst
) = 0;
237 virtual void dump_instruction(backend_instruction
*inst
, FILE *file
) = 0;
238 virtual void dump_instructions();
239 virtual void dump_instructions(const char *name
);
241 void calculate_cfg();
243 virtual void invalidate_live_intervals() = 0;
246 bool brw_texture_offset(const nir_tex_instr
*tex
, unsigned src
,
247 uint32_t *offset_bits
);
250 struct backend_shader
;
251 #endif /* __cplusplus */
253 enum brw_reg_type
brw_type_for_base_type(const struct glsl_type
*type
);
254 enum brw_conditional_mod
brw_conditional_for_comparison(unsigned int op
);
255 uint32_t brw_math_function(enum opcode op
);
256 const char *brw_instruction_name(const struct gen_device_info
*devinfo
,
258 bool brw_saturate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
);
259 bool brw_negate_immediate(enum brw_reg_type type
, struct brw_reg
*reg
);
260 bool brw_abs_immediate(enum brw_reg_type type
, struct brw_reg
*reg
);
262 bool opt_predicated_break(struct backend_shader
*s
);
268 /* brw_fs_reg_allocate.cpp */
269 void brw_fs_alloc_reg_sets(struct brw_compiler
*compiler
);
271 /* brw_vec4_reg_allocate.cpp */
272 void brw_vec4_alloc_reg_set(struct brw_compiler
*compiler
);
275 extern const char *const conditional_modifier
[16];
276 extern const char *const pred_ctrl_align16
[16];
278 /* Per-thread scratch space is a power-of-two multiple of 1KB. */
280 brw_get_scratch_size(int size
)
282 return MAX2(1024, util_next_power_of_two(size
));
286 * Scratch data used when compiling a GLSL geometry shader.
288 struct brw_gs_compile
290 struct brw_gs_prog_key key
;
291 struct brw_vue_map input_vue_map
;
293 unsigned control_data_bits_per_vertex
;
294 unsigned control_data_header_size_bits
;
301 #endif /* BRW_SHADER_H */