i965: Allocate vec4_visitor's uniform_size and uniform_vector_size arrays dynamically.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4.h
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef BRW_VEC4_H
25 #define BRW_VEC4_H
26
27 #include <stdint.h>
28 #include "brw_shader.h"
29 #include "main/compiler.h"
30 #include "program/hash_table.h"
31 #include "brw_program.h"
32
33 #ifdef __cplusplus
34 extern "C" {
35 #endif
36
37 #include "brw_context.h"
38 #include "brw_eu.h"
39
40 #ifdef __cplusplus
41 }; /* extern "C" */
42 #include "gen8_generator.h"
43 #endif
44
45 #include "glsl/ir.h"
46
47
48 struct brw_vec4_compile {
49 GLuint last_scratch; /**< measured in 32-byte (register size) units */
50 };
51
52
53 struct brw_vec4_prog_key {
54 GLuint program_string_id;
55
56 /**
57 * True if at least one clip flag is enabled, regardless of whether the
58 * shader uses clip planes or gl_ClipDistance.
59 */
60 GLuint userclip_active:1;
61
62 /**
63 * How many user clipping planes are being uploaded to the vertex shader as
64 * push constants.
65 */
66 GLuint nr_userclip_plane_consts:4;
67
68 GLuint clamp_vertex_color:1;
69
70 struct brw_sampler_prog_key_data tex;
71 };
72
73
74 #ifdef __cplusplus
75 extern "C" {
76 #endif
77
78 void
79 brw_vec4_setup_prog_key_for_precompile(struct gl_context *ctx,
80 struct brw_vec4_prog_key *key,
81 GLuint id, struct gl_program *prog);
82
83 #ifdef __cplusplus
84 } /* extern "C" */
85
86 namespace brw {
87
88 class dst_reg;
89
90 unsigned
91 swizzle_for_size(int size);
92
93 class reg
94 {
95 public:
96 /** Register file: GRF, MRF, IMM. */
97 enum register_file file;
98 /** virtual register number. 0 = fixed hw reg */
99 int reg;
100 /** Offset within the virtual register. */
101 int reg_offset;
102 /** Register type. BRW_REGISTER_TYPE_* */
103 int type;
104 struct brw_reg fixed_hw_reg;
105
106 /** Value for file == BRW_IMMMEDIATE_FILE */
107 union {
108 int32_t i;
109 uint32_t u;
110 float f;
111 } imm;
112 };
113
114 class src_reg : public reg
115 {
116 public:
117 DECLARE_RALLOC_CXX_OPERATORS(src_reg)
118
119 void init();
120
121 src_reg(register_file file, int reg, const glsl_type *type);
122 src_reg();
123 src_reg(float f);
124 src_reg(uint32_t u);
125 src_reg(int32_t i);
126 src_reg(struct brw_reg reg);
127
128 bool equals(src_reg *r);
129 bool is_zero() const;
130 bool is_one() const;
131
132 src_reg(class vec4_visitor *v, const struct glsl_type *type);
133
134 explicit src_reg(dst_reg reg);
135
136 GLuint swizzle; /**< BRW_SWIZZLE_XYZW macros from brw_reg.h. */
137 bool negate;
138 bool abs;
139
140 src_reg *reladdr;
141 };
142
143 static inline src_reg
144 retype(src_reg reg, unsigned type)
145 {
146 reg.fixed_hw_reg.type = reg.type = type;
147 return reg;
148 }
149
150 static inline src_reg
151 offset(src_reg reg, unsigned delta)
152 {
153 assert(delta == 0 || (reg.file != HW_REG && reg.file != IMM));
154 reg.reg_offset += delta;
155 return reg;
156 }
157
158 /**
159 * Reswizzle a given source register.
160 * \sa brw_swizzle().
161 */
162 static inline src_reg
163 swizzle(src_reg reg, unsigned swizzle)
164 {
165 assert(reg.file != HW_REG);
166 reg.swizzle = BRW_SWIZZLE4(
167 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 0)),
168 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 1)),
169 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 2)),
170 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 3)));
171 return reg;
172 }
173
174 static inline src_reg
175 negate(src_reg reg)
176 {
177 assert(reg.file != HW_REG && reg.file != IMM);
178 reg.negate = !reg.negate;
179 return reg;
180 }
181
182 class dst_reg : public reg
183 {
184 public:
185 DECLARE_RALLOC_CXX_OPERATORS(dst_reg)
186
187 void init();
188
189 dst_reg();
190 dst_reg(register_file file, int reg);
191 dst_reg(register_file file, int reg, const glsl_type *type, int writemask);
192 dst_reg(struct brw_reg reg);
193 dst_reg(class vec4_visitor *v, const struct glsl_type *type);
194
195 explicit dst_reg(src_reg reg);
196
197 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */
198
199 src_reg *reladdr;
200 };
201
202 static inline dst_reg
203 retype(dst_reg reg, unsigned type)
204 {
205 reg.fixed_hw_reg.type = reg.type = type;
206 return reg;
207 }
208
209 static inline dst_reg
210 offset(dst_reg reg, unsigned delta)
211 {
212 assert(delta == 0 || (reg.file != HW_REG && reg.file != IMM));
213 reg.reg_offset += delta;
214 return reg;
215 }
216
217 static inline dst_reg
218 writemask(dst_reg reg, unsigned mask)
219 {
220 assert(reg.file != HW_REG && reg.file != IMM);
221 assert((reg.writemask & mask) != 0);
222 reg.writemask &= mask;
223 return reg;
224 }
225
226 class vec4_instruction : public backend_instruction {
227 public:
228 DECLARE_RALLOC_CXX_OPERATORS(vec4_instruction)
229
230 vec4_instruction(vec4_visitor *v, enum opcode opcode,
231 dst_reg dst = dst_reg(),
232 src_reg src0 = src_reg(),
233 src_reg src1 = src_reg(),
234 src_reg src2 = src_reg());
235
236 struct brw_reg get_dst(void);
237 struct brw_reg get_src(const struct brw_vec4_prog_data *prog_data, int i);
238
239 dst_reg dst;
240 src_reg src[3];
241
242 bool saturate;
243 bool force_writemask_all;
244 bool no_dd_clear, no_dd_check;
245
246 int conditional_mod; /**< BRW_CONDITIONAL_* */
247
248 int sampler;
249 uint32_t texture_offset; /**< Texture Offset bitfield */
250 int target; /**< MRT target. */
251 bool shadow_compare;
252
253 enum brw_urb_write_flags urb_write_flags;
254 bool header_present;
255 int mlen; /**< SEND message length */
256 int base_mrf; /**< First MRF in the SEND message, if mlen is nonzero. */
257
258 uint32_t offset; /* spill/unspill offset */
259 /** @{
260 * Annotation for the generated IR. One of the two can be set.
261 */
262 const void *ir;
263 const char *annotation;
264
265 bool is_send_from_grf();
266 bool can_reswizzle_dst(int dst_writemask, int swizzle, int swizzle_mask);
267 void reswizzle_dst(int dst_writemask, int swizzle);
268
269 bool depends_on_flags()
270 {
271 return predicate || opcode == VS_OPCODE_UNPACK_FLAGS_SIMD4X2;
272 }
273 };
274
275 /**
276 * The vertex shader front-end.
277 *
278 * Translates either GLSL IR or Mesa IR (for ARB_vertex_program and
279 * fixed-function) into VS IR.
280 */
281 class vec4_visitor : public backend_visitor
282 {
283 public:
284 vec4_visitor(struct brw_context *brw,
285 struct brw_vec4_compile *c,
286 struct gl_program *prog,
287 const struct brw_vec4_prog_key *key,
288 struct brw_vec4_prog_data *prog_data,
289 struct gl_shader_program *shader_prog,
290 struct brw_shader *shader,
291 void *mem_ctx,
292 bool debug_flag,
293 bool no_spills,
294 shader_time_shader_type st_base,
295 shader_time_shader_type st_written,
296 shader_time_shader_type st_reset);
297 ~vec4_visitor();
298
299 dst_reg dst_null_f()
300 {
301 return dst_reg(brw_null_reg());
302 }
303
304 dst_reg dst_null_d()
305 {
306 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
307 }
308
309 dst_reg dst_null_ud()
310 {
311 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD));
312 }
313
314 struct brw_vec4_compile *c;
315 const struct brw_vec4_prog_key *key;
316 struct brw_vec4_prog_data *prog_data;
317 unsigned int sanity_param_count;
318
319 char *fail_msg;
320 bool failed;
321
322 /**
323 * GLSL IR currently being processed, which is associated with our
324 * driver IR instructions for debugging purposes.
325 */
326 const void *base_ir;
327 const char *current_annotation;
328
329 int *virtual_grf_sizes;
330 int virtual_grf_count;
331 int virtual_grf_array_size;
332 int first_non_payload_grf;
333 unsigned int max_grf;
334 int *virtual_grf_start;
335 int *virtual_grf_end;
336 dst_reg userplane[MAX_CLIP_PLANES];
337
338 /**
339 * This is the size to be used for an array with an element per
340 * reg_offset
341 */
342 int virtual_grf_reg_count;
343 /** Per-virtual-grf indices into an array of size virtual_grf_reg_count */
344 int *virtual_grf_reg_map;
345
346 bool live_intervals_valid;
347
348 dst_reg *variable_storage(ir_variable *var);
349
350 void reladdr_to_temp(ir_instruction *ir, src_reg *reg, int *num_reladdr);
351
352 bool need_all_constants_in_pull_buffer;
353
354 /**
355 * \name Visit methods
356 *
357 * As typical for the visitor pattern, there must be one \c visit method for
358 * each concrete subclass of \c ir_instruction. Virtual base classes within
359 * the hierarchy should not have \c visit methods.
360 */
361 /*@{*/
362 virtual void visit(ir_variable *);
363 virtual void visit(ir_loop *);
364 virtual void visit(ir_loop_jump *);
365 virtual void visit(ir_function_signature *);
366 virtual void visit(ir_function *);
367 virtual void visit(ir_expression *);
368 virtual void visit(ir_swizzle *);
369 virtual void visit(ir_dereference_variable *);
370 virtual void visit(ir_dereference_array *);
371 virtual void visit(ir_dereference_record *);
372 virtual void visit(ir_assignment *);
373 virtual void visit(ir_constant *);
374 virtual void visit(ir_call *);
375 virtual void visit(ir_return *);
376 virtual void visit(ir_discard *);
377 virtual void visit(ir_texture *);
378 virtual void visit(ir_if *);
379 virtual void visit(ir_emit_vertex *);
380 virtual void visit(ir_end_primitive *);
381 /*@}*/
382
383 src_reg result;
384
385 /* Regs for vertex results. Generated at ir_variable visiting time
386 * for the ir->location's used.
387 */
388 dst_reg output_reg[BRW_VARYING_SLOT_COUNT];
389 const char *output_reg_annotation[BRW_VARYING_SLOT_COUNT];
390 int *uniform_size;
391 int *uniform_vector_size;
392 int uniform_array_size; /*< Size of uniform_[vector_]size arrays */
393 int uniforms;
394
395 src_reg shader_start_time;
396
397 struct hash_table *variable_ht;
398
399 bool run(void);
400 void fail(const char *msg, ...);
401
402 int virtual_grf_alloc(int size);
403 void setup_uniform_clipplane_values();
404 void setup_uniform_values(ir_variable *ir);
405 void setup_builtin_uniform_values(ir_variable *ir);
406 int setup_uniforms(int payload_reg);
407 bool reg_allocate_trivial();
408 bool reg_allocate();
409 void evaluate_spill_costs(float *spill_costs, bool *no_spill);
410 int choose_spill_reg(struct ra_graph *g);
411 void spill_reg(int spill_reg);
412 void move_grf_array_access_to_scratch();
413 void move_uniform_array_access_to_pull_constants();
414 void move_push_constants_to_pull_constants();
415 void split_uniform_registers();
416 void pack_uniform_registers();
417 void calculate_live_intervals();
418 void invalidate_live_intervals();
419 void split_virtual_grfs();
420 bool dead_code_eliminate();
421 bool virtual_grf_interferes(int a, int b);
422 bool opt_copy_propagation();
423 bool opt_algebraic();
424 bool opt_register_coalesce();
425 void opt_set_dependency_control();
426 void opt_schedule_instructions();
427
428 bool can_do_source_mods(vec4_instruction *inst);
429
430 vec4_instruction *emit(vec4_instruction *inst);
431
432 vec4_instruction *emit(enum opcode opcode);
433
434 vec4_instruction *emit(enum opcode opcode, dst_reg dst);
435
436 vec4_instruction *emit(enum opcode opcode, dst_reg dst, src_reg src0);
437
438 vec4_instruction *emit(enum opcode opcode, dst_reg dst,
439 src_reg src0, src_reg src1);
440
441 vec4_instruction *emit(enum opcode opcode, dst_reg dst,
442 src_reg src0, src_reg src1, src_reg src2);
443
444 vec4_instruction *emit_before(vec4_instruction *inst,
445 vec4_instruction *new_inst);
446
447 vec4_instruction *MOV(dst_reg dst, src_reg src0);
448 vec4_instruction *NOT(dst_reg dst, src_reg src0);
449 vec4_instruction *RNDD(dst_reg dst, src_reg src0);
450 vec4_instruction *RNDE(dst_reg dst, src_reg src0);
451 vec4_instruction *RNDZ(dst_reg dst, src_reg src0);
452 vec4_instruction *FRC(dst_reg dst, src_reg src0);
453 vec4_instruction *F32TO16(dst_reg dst, src_reg src0);
454 vec4_instruction *F16TO32(dst_reg dst, src_reg src0);
455 vec4_instruction *ADD(dst_reg dst, src_reg src0, src_reg src1);
456 vec4_instruction *MUL(dst_reg dst, src_reg src0, src_reg src1);
457 vec4_instruction *MACH(dst_reg dst, src_reg src0, src_reg src1);
458 vec4_instruction *MAC(dst_reg dst, src_reg src0, src_reg src1);
459 vec4_instruction *AND(dst_reg dst, src_reg src0, src_reg src1);
460 vec4_instruction *OR(dst_reg dst, src_reg src0, src_reg src1);
461 vec4_instruction *XOR(dst_reg dst, src_reg src0, src_reg src1);
462 vec4_instruction *DP3(dst_reg dst, src_reg src0, src_reg src1);
463 vec4_instruction *DP4(dst_reg dst, src_reg src0, src_reg src1);
464 vec4_instruction *DPH(dst_reg dst, src_reg src0, src_reg src1);
465 vec4_instruction *SHL(dst_reg dst, src_reg src0, src_reg src1);
466 vec4_instruction *SHR(dst_reg dst, src_reg src0, src_reg src1);
467 vec4_instruction *ASR(dst_reg dst, src_reg src0, src_reg src1);
468 vec4_instruction *CMP(dst_reg dst, src_reg src0, src_reg src1,
469 uint32_t condition);
470 vec4_instruction *IF(src_reg src0, src_reg src1, uint32_t condition);
471 vec4_instruction *IF(uint32_t predicate);
472 vec4_instruction *PULL_CONSTANT_LOAD(dst_reg dst, src_reg index);
473 vec4_instruction *SCRATCH_READ(dst_reg dst, src_reg index);
474 vec4_instruction *SCRATCH_WRITE(dst_reg dst, src_reg src, src_reg index);
475 vec4_instruction *LRP(dst_reg dst, src_reg a, src_reg y, src_reg x);
476 vec4_instruction *BFREV(dst_reg dst, src_reg value);
477 vec4_instruction *BFE(dst_reg dst, src_reg bits, src_reg offset, src_reg value);
478 vec4_instruction *BFI1(dst_reg dst, src_reg bits, src_reg offset);
479 vec4_instruction *BFI2(dst_reg dst, src_reg bfi1_dst, src_reg insert, src_reg base);
480 vec4_instruction *FBH(dst_reg dst, src_reg value);
481 vec4_instruction *FBL(dst_reg dst, src_reg value);
482 vec4_instruction *CBIT(dst_reg dst, src_reg value);
483 vec4_instruction *MAD(dst_reg dst, src_reg c, src_reg b, src_reg a);
484 vec4_instruction *ADDC(dst_reg dst, src_reg src0, src_reg src1);
485 vec4_instruction *SUBB(dst_reg dst, src_reg src0, src_reg src1);
486
487 int implied_mrf_writes(vec4_instruction *inst);
488
489 bool try_rewrite_rhs_to_dst(ir_assignment *ir,
490 dst_reg dst,
491 src_reg src,
492 vec4_instruction *pre_rhs_inst,
493 vec4_instruction *last_rhs_inst);
494
495 bool try_copy_propagation(vec4_instruction *inst, int arg,
496 src_reg *values[4]);
497
498 /** Walks an exec_list of ir_instruction and sends it through this visitor. */
499 void visit_instructions(const exec_list *list);
500
501 void emit_vp_sop(uint32_t condmod, dst_reg dst,
502 src_reg src0, src_reg src1, src_reg one);
503
504 void emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate);
505 void emit_bool_comparison(unsigned int op, dst_reg dst, src_reg src0, src_reg src1);
506 void emit_if_gen6(ir_if *ir);
507
508 void emit_minmax(uint32_t condmod, dst_reg dst, src_reg src0, src_reg src1);
509
510 void emit_lrp(const dst_reg &dst,
511 const src_reg &x, const src_reg &y, const src_reg &a);
512
513 void emit_block_move(dst_reg *dst, src_reg *src,
514 const struct glsl_type *type, uint32_t predicate);
515
516 void emit_constant_values(dst_reg *dst, ir_constant *value);
517
518 /**
519 * Emit the correct dot-product instruction for the type of arguments
520 */
521 void emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements);
522
523 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
524 dst_reg dst, src_reg src0);
525
526 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
527 dst_reg dst, src_reg src0, src_reg src1);
528
529 void emit_scs(ir_instruction *ir, enum prog_opcode op,
530 dst_reg dst, const src_reg &src);
531
532 src_reg fix_3src_operand(src_reg src);
533
534 void emit_math1_gen6(enum opcode opcode, dst_reg dst, src_reg src);
535 void emit_math1_gen4(enum opcode opcode, dst_reg dst, src_reg src);
536 void emit_math(enum opcode opcode, dst_reg dst, src_reg src);
537 void emit_math2_gen6(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
538 void emit_math2_gen4(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
539 void emit_math(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
540 src_reg fix_math_operand(src_reg src);
541
542 void emit_pack_half_2x16(dst_reg dst, src_reg src0);
543 void emit_unpack_half_2x16(dst_reg dst, src_reg src0);
544
545 uint32_t gather_channel(ir_texture *ir, int sampler);
546 src_reg emit_mcs_fetch(ir_texture *ir, src_reg coordinate, int sampler);
547 void emit_gen6_gather_wa(uint8_t wa, dst_reg dst);
548 void swizzle_result(ir_texture *ir, src_reg orig_val, int sampler);
549
550 void emit_ndc_computation();
551 void emit_psiz_and_flags(struct brw_reg reg);
552 void emit_clip_distances(dst_reg reg, int offset);
553 void emit_generic_urb_slot(dst_reg reg, int varying);
554 void emit_urb_slot(int mrf, int varying);
555
556 void emit_shader_time_begin();
557 void emit_shader_time_end();
558 void emit_shader_time_write(enum shader_time_shader_type type,
559 src_reg value);
560
561 void emit_untyped_atomic(unsigned atomic_op, unsigned surf_index,
562 dst_reg dst, src_reg offset, src_reg src0,
563 src_reg src1);
564
565 void emit_untyped_surface_read(unsigned surf_index, dst_reg dst,
566 src_reg offset);
567
568 src_reg get_scratch_offset(vec4_instruction *inst,
569 src_reg *reladdr, int reg_offset);
570 src_reg get_pull_constant_offset(vec4_instruction *inst,
571 src_reg *reladdr, int reg_offset);
572 void emit_scratch_read(vec4_instruction *inst,
573 dst_reg dst,
574 src_reg orig_src,
575 int base_offset);
576 void emit_scratch_write(vec4_instruction *inst,
577 int base_offset);
578 void emit_pull_constant_load(vec4_instruction *inst,
579 dst_reg dst,
580 src_reg orig_src,
581 int base_offset);
582
583 bool try_emit_sat(ir_expression *ir);
584 bool try_emit_mad(ir_expression *ir, int mul_arg);
585 void resolve_ud_negate(src_reg *reg);
586
587 src_reg get_timestamp();
588
589 bool process_move_condition(ir_rvalue *ir);
590
591 void dump_instruction(backend_instruction *inst);
592
593 void visit_atomic_counter_intrinsic(ir_call *ir);
594
595 protected:
596 void emit_vertex();
597 void lower_attributes_to_hw_regs(const int *attribute_map,
598 bool interleaved);
599 void setup_payload_interference(struct ra_graph *g, int first_payload_node,
600 int reg_node_count);
601 virtual dst_reg *make_reg_for_system_value(ir_variable *ir) = 0;
602 virtual void setup_payload() = 0;
603 virtual void emit_prolog() = 0;
604 virtual void emit_program_code() = 0;
605 virtual void emit_thread_end() = 0;
606 virtual void emit_urb_write_header(int mrf) = 0;
607 virtual vec4_instruction *emit_urb_write_opcode(bool complete) = 0;
608 virtual int compute_array_stride(ir_dereference_array *ir);
609
610 const bool debug_flag;
611
612 private:
613 /**
614 * If true, then register allocation should fail instead of spilling.
615 */
616 const bool no_spills;
617
618 const shader_time_shader_type st_base;
619 const shader_time_shader_type st_written;
620 const shader_time_shader_type st_reset;
621 };
622
623
624 /**
625 * The vertex shader code generator.
626 *
627 * Translates VS IR to actual i965 assembly code.
628 */
629 class vec4_generator
630 {
631 public:
632 vec4_generator(struct brw_context *brw,
633 struct gl_shader_program *shader_prog,
634 struct gl_program *prog,
635 struct brw_vec4_prog_data *prog_data,
636 void *mem_ctx,
637 bool debug_flag);
638 ~vec4_generator();
639
640 const unsigned *generate_assembly(exec_list *insts, unsigned *asm_size);
641
642 private:
643 void generate_code(exec_list *instructions);
644 void generate_vec4_instruction(vec4_instruction *inst,
645 struct brw_reg dst,
646 struct brw_reg *src);
647
648 void generate_math1_gen4(vec4_instruction *inst,
649 struct brw_reg dst,
650 struct brw_reg src);
651 void generate_math1_gen6(vec4_instruction *inst,
652 struct brw_reg dst,
653 struct brw_reg src);
654 void generate_math2_gen4(vec4_instruction *inst,
655 struct brw_reg dst,
656 struct brw_reg src0,
657 struct brw_reg src1);
658 void generate_math2_gen6(vec4_instruction *inst,
659 struct brw_reg dst,
660 struct brw_reg src0,
661 struct brw_reg src1);
662 void generate_math2_gen7(vec4_instruction *inst,
663 struct brw_reg dst,
664 struct brw_reg src0,
665 struct brw_reg src1);
666
667 void generate_tex(vec4_instruction *inst,
668 struct brw_reg dst,
669 struct brw_reg src);
670
671 void generate_vs_urb_write(vec4_instruction *inst);
672 void generate_gs_urb_write(vec4_instruction *inst);
673 void generate_gs_thread_end(vec4_instruction *inst);
674 void generate_gs_set_write_offset(struct brw_reg dst,
675 struct brw_reg src0,
676 struct brw_reg src1);
677 void generate_gs_set_vertex_count(struct brw_reg dst,
678 struct brw_reg src);
679 void generate_gs_set_dword_2_immed(struct brw_reg dst, struct brw_reg src);
680 void generate_gs_prepare_channel_masks(struct brw_reg dst);
681 void generate_gs_set_channel_masks(struct brw_reg dst, struct brw_reg src);
682 void generate_gs_get_instance_id(struct brw_reg dst);
683 void generate_oword_dual_block_offsets(struct brw_reg m1,
684 struct brw_reg index);
685 void generate_scratch_write(vec4_instruction *inst,
686 struct brw_reg dst,
687 struct brw_reg src,
688 struct brw_reg index);
689 void generate_scratch_read(vec4_instruction *inst,
690 struct brw_reg dst,
691 struct brw_reg index);
692 void generate_pull_constant_load(vec4_instruction *inst,
693 struct brw_reg dst,
694 struct brw_reg index,
695 struct brw_reg offset);
696 void generate_pull_constant_load_gen7(vec4_instruction *inst,
697 struct brw_reg dst,
698 struct brw_reg surf_index,
699 struct brw_reg offset);
700 void generate_unpack_flags(vec4_instruction *inst,
701 struct brw_reg dst);
702
703 void generate_untyped_atomic(vec4_instruction *inst,
704 struct brw_reg dst,
705 struct brw_reg atomic_op,
706 struct brw_reg surf_index);
707
708 void generate_untyped_surface_read(vec4_instruction *inst,
709 struct brw_reg dst,
710 struct brw_reg surf_index);
711
712 struct brw_context *brw;
713
714 struct brw_compile *p;
715
716 struct gl_shader_program *shader_prog;
717 const struct gl_program *prog;
718
719 struct brw_vec4_prog_data *prog_data;
720
721 void *mem_ctx;
722 const bool debug_flag;
723 };
724
725 /**
726 * The vertex shader code generator.
727 *
728 * Translates VS IR to actual i965 assembly code.
729 */
730 class gen8_vec4_generator : public gen8_generator
731 {
732 public:
733 gen8_vec4_generator(struct brw_context *brw,
734 struct gl_shader_program *shader_prog,
735 struct gl_program *prog,
736 struct brw_vec4_prog_data *prog_data,
737 void *mem_ctx,
738 bool debug_flag);
739 ~gen8_vec4_generator();
740
741 const unsigned *generate_assembly(exec_list *insts, unsigned *asm_size);
742
743 private:
744 void generate_code(exec_list *instructions);
745 void generate_vec4_instruction(vec4_instruction *inst,
746 struct brw_reg dst,
747 struct brw_reg *src);
748
749 void generate_tex(vec4_instruction *inst,
750 struct brw_reg dst);
751
752 void generate_urb_write(vec4_instruction *ir, bool copy_g0);
753 void generate_gs_thread_end(vec4_instruction *ir);
754 void generate_gs_set_write_offset(struct brw_reg dst,
755 struct brw_reg src0,
756 struct brw_reg src1);
757 void generate_gs_set_vertex_count(struct brw_reg dst,
758 struct brw_reg src);
759 void generate_gs_set_dword_2_immed(struct brw_reg dst, struct brw_reg src);
760 void generate_gs_prepare_channel_masks(struct brw_reg dst);
761 void generate_gs_set_channel_masks(struct brw_reg dst, struct brw_reg src);
762
763 void generate_oword_dual_block_offsets(struct brw_reg m1,
764 struct brw_reg index);
765 void generate_scratch_write(vec4_instruction *inst,
766 struct brw_reg dst,
767 struct brw_reg src,
768 struct brw_reg index);
769 void generate_scratch_read(vec4_instruction *inst,
770 struct brw_reg dst,
771 struct brw_reg index);
772 void generate_pull_constant_load(vec4_instruction *inst,
773 struct brw_reg dst,
774 struct brw_reg index,
775 struct brw_reg offset);
776
777 void mark_surface_used(unsigned surf_index);
778
779 struct brw_vec4_prog_data *prog_data;
780
781 const bool debug_flag;
782 };
783
784
785 } /* namespace brw */
786 #endif /* __cplusplus */
787
788 #endif /* BRW_VEC4_H */