i965/nir: Sort uniforms direct-first and use two different uniform registers
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4.h
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef BRW_VEC4_H
25 #define BRW_VEC4_H
26
27 #include <stdint.h>
28 #include "brw_shader.h"
29 #include "main/compiler.h"
30 #include "program/hash_table.h"
31 #include "brw_program.h"
32
33 #ifdef __cplusplus
34 #include "brw_ir_vec4.h"
35
36 extern "C" {
37 #endif
38
39 #include "brw_context.h"
40 #include "brw_eu.h"
41 #include "intel_asm_annotation.h"
42
43 #ifdef __cplusplus
44 }; /* extern "C" */
45 #endif
46
47 #include "glsl/ir.h"
48
49
50 struct brw_vec4_compile {
51 GLuint last_scratch; /**< measured in 32-byte (register size) units */
52 };
53
54 #ifdef __cplusplus
55 extern "C" {
56 #endif
57
58 void
59 brw_vue_setup_prog_key_for_precompile(struct gl_context *ctx,
60 struct brw_vue_prog_key *key,
61 GLuint id, struct gl_program *prog);
62
63 #ifdef __cplusplus
64 } /* extern "C" */
65
66 namespace brw {
67
68 class vec4_live_variables;
69
70 /**
71 * The vertex shader front-end.
72 *
73 * Translates either GLSL IR or Mesa IR (for ARB_vertex_program and
74 * fixed-function) into VS IR.
75 */
76 class vec4_visitor : public backend_visitor
77 {
78 public:
79 vec4_visitor(struct brw_context *brw,
80 struct brw_vec4_compile *c,
81 struct gl_program *prog,
82 const struct brw_vue_prog_key *key,
83 struct brw_vue_prog_data *prog_data,
84 struct gl_shader_program *shader_prog,
85 gl_shader_stage stage,
86 void *mem_ctx,
87 bool no_spills,
88 shader_time_shader_type st_base,
89 shader_time_shader_type st_written,
90 shader_time_shader_type st_reset);
91 ~vec4_visitor();
92
93 dst_reg dst_null_f()
94 {
95 return dst_reg(brw_null_reg());
96 }
97
98 dst_reg dst_null_d()
99 {
100 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
101 }
102
103 dst_reg dst_null_ud()
104 {
105 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD));
106 }
107
108 struct brw_vec4_compile * const c;
109 const struct brw_vue_prog_key * const key;
110 struct brw_vue_prog_data * const prog_data;
111 unsigned int sanity_param_count;
112
113 char *fail_msg;
114 bool failed;
115
116 /**
117 * GLSL IR currently being processed, which is associated with our
118 * driver IR instructions for debugging purposes.
119 */
120 const void *base_ir;
121 const char *current_annotation;
122
123 int first_non_payload_grf;
124 unsigned int max_grf;
125 int *virtual_grf_start;
126 int *virtual_grf_end;
127 brw::vec4_live_variables *live_intervals;
128 dst_reg userplane[MAX_CLIP_PLANES];
129
130 dst_reg *variable_storage(ir_variable *var);
131
132 void reladdr_to_temp(ir_instruction *ir, src_reg *reg, int *num_reladdr);
133
134 bool need_all_constants_in_pull_buffer;
135
136 /**
137 * \name Visit methods
138 *
139 * As typical for the visitor pattern, there must be one \c visit method for
140 * each concrete subclass of \c ir_instruction. Virtual base classes within
141 * the hierarchy should not have \c visit methods.
142 */
143 /*@{*/
144 virtual void visit(ir_variable *);
145 virtual void visit(ir_loop *);
146 virtual void visit(ir_loop_jump *);
147 virtual void visit(ir_function_signature *);
148 virtual void visit(ir_function *);
149 virtual void visit(ir_expression *);
150 virtual void visit(ir_swizzle *);
151 virtual void visit(ir_dereference_variable *);
152 virtual void visit(ir_dereference_array *);
153 virtual void visit(ir_dereference_record *);
154 virtual void visit(ir_assignment *);
155 virtual void visit(ir_constant *);
156 virtual void visit(ir_call *);
157 virtual void visit(ir_return *);
158 virtual void visit(ir_discard *);
159 virtual void visit(ir_texture *);
160 virtual void visit(ir_if *);
161 virtual void visit(ir_emit_vertex *);
162 virtual void visit(ir_end_primitive *);
163 /*@}*/
164
165 src_reg result;
166
167 /* Regs for vertex results. Generated at ir_variable visiting time
168 * for the ir->location's used.
169 */
170 dst_reg output_reg[BRW_VARYING_SLOT_COUNT];
171 const char *output_reg_annotation[BRW_VARYING_SLOT_COUNT];
172 int *uniform_size;
173 int *uniform_vector_size;
174 int uniform_array_size; /*< Size of uniform_[vector_]size arrays */
175 int uniforms;
176
177 src_reg shader_start_time;
178
179 struct hash_table *variable_ht;
180
181 bool run(void);
182 void fail(const char *msg, ...);
183
184 void setup_uniform_clipplane_values();
185 void setup_uniform_values(ir_variable *ir);
186 void setup_builtin_uniform_values(ir_variable *ir);
187 int setup_uniforms(int payload_reg);
188 bool reg_allocate_trivial();
189 bool reg_allocate();
190 void evaluate_spill_costs(float *spill_costs, bool *no_spill);
191 int choose_spill_reg(struct ra_graph *g);
192 void spill_reg(int spill_reg);
193 void move_grf_array_access_to_scratch();
194 void move_uniform_array_access_to_pull_constants();
195 void move_push_constants_to_pull_constants();
196 void split_uniform_registers();
197 void pack_uniform_registers();
198 void calculate_live_intervals();
199 void invalidate_live_intervals();
200 void split_virtual_grfs();
201 bool opt_vector_float();
202 bool opt_reduce_swizzle();
203 bool dead_code_eliminate();
204 bool virtual_grf_interferes(int a, int b);
205 bool opt_copy_propagation(bool do_constant_prop = true);
206 bool opt_cse_local(bblock_t *block);
207 bool opt_cse();
208 bool opt_algebraic();
209 bool opt_register_coalesce();
210 bool is_dep_ctrl_unsafe(const vec4_instruction *inst);
211 void opt_set_dependency_control();
212 void opt_schedule_instructions();
213
214 vec4_instruction *emit(vec4_instruction *inst);
215
216 vec4_instruction *emit(enum opcode opcode);
217 vec4_instruction *emit(enum opcode opcode, const dst_reg &dst);
218 vec4_instruction *emit(enum opcode opcode, const dst_reg &dst,
219 const src_reg &src0);
220 vec4_instruction *emit(enum opcode opcode, const dst_reg &dst,
221 const src_reg &src0, const src_reg &src1);
222 vec4_instruction *emit(enum opcode opcode, const dst_reg &dst,
223 const src_reg &src0, const src_reg &src1,
224 const src_reg &src2);
225
226 vec4_instruction *emit_before(bblock_t *block,
227 vec4_instruction *inst,
228 vec4_instruction *new_inst);
229
230 #define EMIT1(op) vec4_instruction *op(const dst_reg &, const src_reg &);
231 #define EMIT2(op) vec4_instruction *op(const dst_reg &, const src_reg &, const src_reg &);
232 #define EMIT3(op) vec4_instruction *op(const dst_reg &, const src_reg &, const src_reg &, const src_reg &);
233 EMIT1(MOV)
234 EMIT1(NOT)
235 EMIT1(RNDD)
236 EMIT1(RNDE)
237 EMIT1(RNDZ)
238 EMIT1(FRC)
239 EMIT1(F32TO16)
240 EMIT1(F16TO32)
241 EMIT2(ADD)
242 EMIT2(MUL)
243 EMIT2(MACH)
244 EMIT2(MAC)
245 EMIT2(AND)
246 EMIT2(OR)
247 EMIT2(XOR)
248 EMIT2(DP3)
249 EMIT2(DP4)
250 EMIT2(DPH)
251 EMIT2(SHL)
252 EMIT2(SHR)
253 EMIT2(ASR)
254 vec4_instruction *CMP(dst_reg dst, src_reg src0, src_reg src1,
255 enum brw_conditional_mod condition);
256 vec4_instruction *IF(src_reg src0, src_reg src1,
257 enum brw_conditional_mod condition);
258 vec4_instruction *IF(enum brw_predicate predicate);
259 EMIT1(PULL_CONSTANT_LOAD)
260 EMIT1(SCRATCH_READ)
261 EMIT2(SCRATCH_WRITE)
262 EMIT3(LRP)
263 EMIT1(BFREV)
264 EMIT3(BFE)
265 EMIT2(BFI1)
266 EMIT3(BFI2)
267 EMIT1(FBH)
268 EMIT1(FBL)
269 EMIT1(CBIT)
270 EMIT3(MAD)
271 EMIT2(ADDC)
272 EMIT2(SUBB)
273 #undef EMIT1
274 #undef EMIT2
275 #undef EMIT3
276
277 int implied_mrf_writes(vec4_instruction *inst);
278
279 bool try_rewrite_rhs_to_dst(ir_assignment *ir,
280 dst_reg dst,
281 src_reg src,
282 vec4_instruction *pre_rhs_inst,
283 vec4_instruction *last_rhs_inst);
284
285 /** Walks an exec_list of ir_instruction and sends it through this visitor. */
286 void visit_instructions(const exec_list *list);
287
288 void emit_vp_sop(enum brw_conditional_mod condmod, dst_reg dst,
289 src_reg src0, src_reg src1, src_reg one);
290
291 void emit_bool_to_cond_code(ir_rvalue *ir, enum brw_predicate *predicate);
292 void emit_if_gen6(ir_if *ir);
293
294 void emit_minmax(enum brw_conditional_mod conditionalmod, dst_reg dst,
295 src_reg src0, src_reg src1);
296
297 void emit_lrp(const dst_reg &dst,
298 const src_reg &x, const src_reg &y, const src_reg &a);
299
300 void emit_block_move(dst_reg *dst, src_reg *src,
301 const struct glsl_type *type, brw_predicate predicate);
302
303 void emit_constant_values(dst_reg *dst, ir_constant *value);
304
305 /**
306 * Emit the correct dot-product instruction for the type of arguments
307 */
308 void emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements);
309
310 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
311 dst_reg dst, src_reg src0);
312
313 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
314 dst_reg dst, src_reg src0, src_reg src1);
315
316 void emit_scs(ir_instruction *ir, enum prog_opcode op,
317 dst_reg dst, const src_reg &src);
318
319 src_reg fix_3src_operand(src_reg src);
320
321 void emit_math(enum opcode opcode, const dst_reg &dst, const src_reg &src0,
322 const src_reg &src1 = src_reg());
323 src_reg fix_math_operand(src_reg src);
324
325 void emit_pack_half_2x16(dst_reg dst, src_reg src0);
326 void emit_unpack_half_2x16(dst_reg dst, src_reg src0);
327 void emit_unpack_unorm_4x8(const dst_reg &dst, src_reg src0);
328 void emit_unpack_snorm_4x8(const dst_reg &dst, src_reg src0);
329 void emit_pack_unorm_4x8(const dst_reg &dst, const src_reg &src0);
330 void emit_pack_snorm_4x8(const dst_reg &dst, const src_reg &src0);
331
332 uint32_t gather_channel(ir_texture *ir, uint32_t sampler);
333 src_reg emit_mcs_fetch(ir_texture *ir, src_reg coordinate, src_reg sampler);
334 void emit_gen6_gather_wa(uint8_t wa, dst_reg dst);
335 void swizzle_result(ir_texture *ir, src_reg orig_val, uint32_t sampler);
336
337 void emit_ndc_computation();
338 void emit_psiz_and_flags(dst_reg reg);
339 void emit_clip_distances(dst_reg reg, int offset);
340 vec4_instruction *emit_generic_urb_slot(dst_reg reg, int varying);
341 void emit_urb_slot(dst_reg reg, int varying);
342
343 void emit_shader_time_begin();
344 void emit_shader_time_end();
345 void emit_shader_time_write(enum shader_time_shader_type type,
346 src_reg value);
347
348 void emit_untyped_atomic(unsigned atomic_op, unsigned surf_index,
349 dst_reg dst, src_reg offset, src_reg src0,
350 src_reg src1);
351
352 void emit_untyped_surface_read(unsigned surf_index, dst_reg dst,
353 src_reg offset);
354
355 src_reg get_scratch_offset(bblock_t *block, vec4_instruction *inst,
356 src_reg *reladdr, int reg_offset);
357 src_reg get_pull_constant_offset(bblock_t *block, vec4_instruction *inst,
358 src_reg *reladdr, int reg_offset);
359 void emit_scratch_read(bblock_t *block, vec4_instruction *inst,
360 dst_reg dst,
361 src_reg orig_src,
362 int base_offset);
363 void emit_scratch_write(bblock_t *block, vec4_instruction *inst,
364 int base_offset);
365 void emit_pull_constant_load(bblock_t *block, vec4_instruction *inst,
366 dst_reg dst,
367 src_reg orig_src,
368 int base_offset);
369
370 bool try_emit_mad(ir_expression *ir);
371 bool try_emit_b2f_of_compare(ir_expression *ir);
372 void resolve_ud_negate(src_reg *reg);
373 void resolve_bool_comparison(ir_rvalue *rvalue, src_reg *reg);
374
375 src_reg get_timestamp();
376
377 bool process_move_condition(ir_rvalue *ir);
378
379 void dump_instruction(backend_instruction *inst);
380 void dump_instruction(backend_instruction *inst, FILE *file);
381
382 void visit_atomic_counter_intrinsic(ir_call *ir);
383
384 protected:
385 void emit_vertex();
386 void lower_attributes_to_hw_regs(const int *attribute_map,
387 bool interleaved);
388 void setup_payload_interference(struct ra_graph *g, int first_payload_node,
389 int reg_node_count);
390 virtual dst_reg *make_reg_for_system_value(ir_variable *ir) = 0;
391 virtual void assign_binding_table_offsets();
392 virtual void setup_payload() = 0;
393 virtual void emit_prolog() = 0;
394 virtual void emit_program_code() = 0;
395 virtual void emit_thread_end() = 0;
396 virtual void emit_urb_write_header(int mrf) = 0;
397 virtual vec4_instruction *emit_urb_write_opcode(bool complete) = 0;
398 virtual int compute_array_stride(ir_dereference_array *ir);
399
400 private:
401 /**
402 * If true, then register allocation should fail instead of spilling.
403 */
404 const bool no_spills;
405
406 const shader_time_shader_type st_base;
407 const shader_time_shader_type st_written;
408 const shader_time_shader_type st_reset;
409 };
410
411
412 /**
413 * The vertex shader code generator.
414 *
415 * Translates VS IR to actual i965 assembly code.
416 */
417 class vec4_generator
418 {
419 public:
420 vec4_generator(struct brw_context *brw,
421 struct gl_shader_program *shader_prog,
422 struct gl_program *prog,
423 struct brw_vue_prog_data *prog_data,
424 void *mem_ctx,
425 bool debug_flag,
426 const char *stage_name,
427 const char *stage_abbrev);
428 ~vec4_generator();
429
430 const unsigned *generate_assembly(const cfg_t *cfg, unsigned *asm_size);
431
432 private:
433 void generate_code(const cfg_t *cfg);
434
435 void generate_math1_gen4(vec4_instruction *inst,
436 struct brw_reg dst,
437 struct brw_reg src);
438 void generate_math2_gen4(vec4_instruction *inst,
439 struct brw_reg dst,
440 struct brw_reg src0,
441 struct brw_reg src1);
442 void generate_math_gen6(vec4_instruction *inst,
443 struct brw_reg dst,
444 struct brw_reg src0,
445 struct brw_reg src1);
446
447 void generate_tex(vec4_instruction *inst,
448 struct brw_reg dst,
449 struct brw_reg src,
450 struct brw_reg sampler_index);
451
452 void generate_vs_urb_write(vec4_instruction *inst);
453 void generate_gs_urb_write(vec4_instruction *inst);
454 void generate_gs_urb_write_allocate(vec4_instruction *inst);
455 void generate_gs_thread_end(vec4_instruction *inst);
456 void generate_gs_set_write_offset(struct brw_reg dst,
457 struct brw_reg src0,
458 struct brw_reg src1);
459 void generate_gs_set_vertex_count(struct brw_reg dst,
460 struct brw_reg src);
461 void generate_gs_svb_write(vec4_instruction *inst,
462 struct brw_reg dst,
463 struct brw_reg src0,
464 struct brw_reg src1);
465 void generate_gs_svb_set_destination_index(vec4_instruction *inst,
466 struct brw_reg dst,
467 struct brw_reg src);
468 void generate_gs_set_dword_2(struct brw_reg dst, struct brw_reg src);
469 void generate_gs_prepare_channel_masks(struct brw_reg dst);
470 void generate_gs_set_channel_masks(struct brw_reg dst, struct brw_reg src);
471 void generate_gs_get_instance_id(struct brw_reg dst);
472 void generate_gs_ff_sync_set_primitives(struct brw_reg dst,
473 struct brw_reg src0,
474 struct brw_reg src1,
475 struct brw_reg src2);
476 void generate_gs_ff_sync(vec4_instruction *inst,
477 struct brw_reg dst,
478 struct brw_reg src0,
479 struct brw_reg src1);
480 void generate_gs_set_primitive_id(struct brw_reg dst);
481 void generate_oword_dual_block_offsets(struct brw_reg m1,
482 struct brw_reg index);
483 void generate_scratch_write(vec4_instruction *inst,
484 struct brw_reg dst,
485 struct brw_reg src,
486 struct brw_reg index);
487 void generate_scratch_read(vec4_instruction *inst,
488 struct brw_reg dst,
489 struct brw_reg index);
490 void generate_pull_constant_load(vec4_instruction *inst,
491 struct brw_reg dst,
492 struct brw_reg index,
493 struct brw_reg offset);
494 void generate_pull_constant_load_gen7(vec4_instruction *inst,
495 struct brw_reg dst,
496 struct brw_reg surf_index,
497 struct brw_reg offset);
498 void generate_unpack_flags(struct brw_reg dst);
499
500 void generate_untyped_atomic(vec4_instruction *inst,
501 struct brw_reg dst,
502 struct brw_reg atomic_op,
503 struct brw_reg surf_index);
504
505 void generate_untyped_surface_read(vec4_instruction *inst,
506 struct brw_reg dst,
507 struct brw_reg surf_index);
508
509 struct brw_context *brw;
510
511 struct brw_compile *p;
512
513 struct gl_shader_program *shader_prog;
514 const struct gl_program *prog;
515
516 struct brw_vue_prog_data *prog_data;
517
518 void *mem_ctx;
519 const char *stage_name;
520 const char *stage_abbrev;
521 const bool debug_flag;
522 };
523
524 } /* namespace brw */
525 #endif /* __cplusplus */
526
527 #endif /* BRW_VEC4_H */