5e5ef036a66ef3b1be90c7c5babd27d0e8aadb4f
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4.h
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef BRW_VEC4_H
25 #define BRW_VEC4_H
26
27 #include <stdint.h>
28 #include "brw_shader.h"
29 #include "main/compiler.h"
30 #include "program/hash_table.h"
31 #include "brw_program.h"
32
33 #ifdef __cplusplus
34 extern "C" {
35 #endif
36
37 #include "brw_context.h"
38 #include "brw_eu.h"
39
40 #ifdef __cplusplus
41 }; /* extern "C" */
42 #include "gen8_generator.h"
43 #endif
44
45 #include "glsl/ir.h"
46
47
48 struct brw_vec4_compile {
49 GLuint last_scratch; /**< measured in 32-byte (register size) units */
50 };
51
52
53 struct brw_vec4_prog_key {
54 GLuint program_string_id;
55
56 /**
57 * True if at least one clip flag is enabled, regardless of whether the
58 * shader uses clip planes or gl_ClipDistance.
59 */
60 GLuint userclip_active:1;
61
62 /**
63 * How many user clipping planes are being uploaded to the vertex shader as
64 * push constants.
65 */
66 GLuint nr_userclip_plane_consts:4;
67
68 GLuint clamp_vertex_color:1;
69
70 struct brw_sampler_prog_key_data tex;
71 };
72
73
74 #ifdef __cplusplus
75 extern "C" {
76 #endif
77
78 void
79 brw_vec4_setup_prog_key_for_precompile(struct gl_context *ctx,
80 struct brw_vec4_prog_key *key,
81 GLuint id, struct gl_program *prog);
82
83 #ifdef __cplusplus
84 } /* extern "C" */
85
86 namespace brw {
87
88 class dst_reg;
89
90 unsigned
91 swizzle_for_size(int size);
92
93 class reg
94 {
95 public:
96 /** Register file: GRF, MRF, IMM. */
97 enum register_file file;
98 /** virtual register number. 0 = fixed hw reg */
99 int reg;
100 /** Offset within the virtual register. */
101 int reg_offset;
102 /** Register type. BRW_REGISTER_TYPE_* */
103 int type;
104 struct brw_reg fixed_hw_reg;
105
106 /** Value for file == BRW_IMMMEDIATE_FILE */
107 union {
108 int32_t i;
109 uint32_t u;
110 float f;
111 } imm;
112 };
113
114 class src_reg : public reg
115 {
116 public:
117 DECLARE_RALLOC_CXX_OPERATORS(src_reg)
118
119 void init();
120
121 src_reg(register_file file, int reg, const glsl_type *type);
122 src_reg();
123 src_reg(float f);
124 src_reg(uint32_t u);
125 src_reg(int32_t i);
126 src_reg(struct brw_reg reg);
127
128 bool equals(src_reg *r);
129 bool is_zero() const;
130 bool is_one() const;
131
132 src_reg(class vec4_visitor *v, const struct glsl_type *type);
133
134 explicit src_reg(dst_reg reg);
135
136 GLuint swizzle; /**< BRW_SWIZZLE_XYZW macros from brw_reg.h. */
137 bool negate;
138 bool abs;
139
140 src_reg *reladdr;
141 };
142
143 static inline src_reg
144 retype(src_reg reg, unsigned type)
145 {
146 reg.fixed_hw_reg.type = reg.type = type;
147 return reg;
148 }
149
150 static inline src_reg
151 offset(src_reg reg, unsigned delta)
152 {
153 assert(delta == 0 || (reg.file != HW_REG && reg.file != IMM));
154 reg.reg_offset += delta;
155 return reg;
156 }
157
158 /**
159 * Reswizzle a given source register.
160 * \sa brw_swizzle().
161 */
162 static inline src_reg
163 swizzle(src_reg reg, unsigned swizzle)
164 {
165 assert(reg.file != HW_REG);
166 reg.swizzle = BRW_SWIZZLE4(
167 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 0)),
168 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 1)),
169 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 2)),
170 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 3)));
171 return reg;
172 }
173
174 static inline src_reg
175 negate(src_reg reg)
176 {
177 assert(reg.file != HW_REG && reg.file != IMM);
178 reg.negate = !reg.negate;
179 return reg;
180 }
181
182 class dst_reg : public reg
183 {
184 public:
185 DECLARE_RALLOC_CXX_OPERATORS(dst_reg)
186
187 void init();
188
189 dst_reg();
190 dst_reg(register_file file, int reg);
191 dst_reg(register_file file, int reg, const glsl_type *type, int writemask);
192 dst_reg(struct brw_reg reg);
193 dst_reg(class vec4_visitor *v, const struct glsl_type *type);
194
195 explicit dst_reg(src_reg reg);
196
197 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */
198
199 src_reg *reladdr;
200 };
201
202 static inline dst_reg
203 retype(dst_reg reg, unsigned type)
204 {
205 reg.fixed_hw_reg.type = reg.type = type;
206 return reg;
207 }
208
209 static inline dst_reg
210 offset(dst_reg reg, unsigned delta)
211 {
212 assert(delta == 0 || (reg.file != HW_REG && reg.file != IMM));
213 reg.reg_offset += delta;
214 return reg;
215 }
216
217 static inline dst_reg
218 writemask(dst_reg reg, unsigned mask)
219 {
220 assert(reg.file != HW_REG && reg.file != IMM);
221 assert((reg.writemask & mask) != 0);
222 reg.writemask &= mask;
223 return reg;
224 }
225
226 class vec4_instruction : public backend_instruction {
227 public:
228 DECLARE_RALLOC_CXX_OPERATORS(vec4_instruction)
229
230 vec4_instruction(vec4_visitor *v, enum opcode opcode,
231 dst_reg dst = dst_reg(),
232 src_reg src0 = src_reg(),
233 src_reg src1 = src_reg(),
234 src_reg src2 = src_reg());
235
236 struct brw_reg get_dst(void);
237 struct brw_reg get_src(const struct brw_vec4_prog_data *prog_data, int i);
238
239 dst_reg dst;
240 src_reg src[3];
241
242 bool saturate;
243 bool force_writemask_all;
244 bool no_dd_clear, no_dd_check;
245
246 int conditional_mod; /**< BRW_CONDITIONAL_* */
247
248 int sampler;
249 uint32_t texture_offset; /**< Texture Offset bitfield */
250 int target; /**< MRT target. */
251 bool shadow_compare;
252
253 enum brw_urb_write_flags urb_write_flags;
254 bool header_present;
255 int mlen; /**< SEND message length */
256 int base_mrf; /**< First MRF in the SEND message, if mlen is nonzero. */
257
258 uint32_t offset; /* spill/unspill offset */
259 /** @{
260 * Annotation for the generated IR. One of the two can be set.
261 */
262 const void *ir;
263 const char *annotation;
264 /** @} */
265
266 bool is_send_from_grf();
267 bool can_reswizzle_dst(int dst_writemask, int swizzle, int swizzle_mask);
268 void reswizzle_dst(int dst_writemask, int swizzle);
269
270 bool reads_flag()
271 {
272 return predicate || opcode == VS_OPCODE_UNPACK_FLAGS_SIMD4X2;
273 }
274
275 bool writes_flag()
276 {
277 return conditional_mod && opcode != BRW_OPCODE_SEL;
278 }
279 };
280
281 /**
282 * The vertex shader front-end.
283 *
284 * Translates either GLSL IR or Mesa IR (for ARB_vertex_program and
285 * fixed-function) into VS IR.
286 */
287 class vec4_visitor : public backend_visitor
288 {
289 public:
290 vec4_visitor(struct brw_context *brw,
291 struct brw_vec4_compile *c,
292 struct gl_program *prog,
293 const struct brw_vec4_prog_key *key,
294 struct brw_vec4_prog_data *prog_data,
295 struct gl_shader_program *shader_prog,
296 gl_shader_stage stage,
297 void *mem_ctx,
298 bool debug_flag,
299 bool no_spills,
300 shader_time_shader_type st_base,
301 shader_time_shader_type st_written,
302 shader_time_shader_type st_reset);
303 ~vec4_visitor();
304
305 dst_reg dst_null_f()
306 {
307 return dst_reg(brw_null_reg());
308 }
309
310 dst_reg dst_null_d()
311 {
312 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
313 }
314
315 dst_reg dst_null_ud()
316 {
317 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD));
318 }
319
320 struct brw_vec4_compile * const c;
321 const struct brw_vec4_prog_key * const key;
322 struct brw_vec4_prog_data * const prog_data;
323 unsigned int sanity_param_count;
324
325 char *fail_msg;
326 bool failed;
327
328 /**
329 * GLSL IR currently being processed, which is associated with our
330 * driver IR instructions for debugging purposes.
331 */
332 const void *base_ir;
333 const char *current_annotation;
334
335 int *virtual_grf_sizes;
336 int virtual_grf_count;
337 int virtual_grf_array_size;
338 int first_non_payload_grf;
339 unsigned int max_grf;
340 int *virtual_grf_start;
341 int *virtual_grf_end;
342 dst_reg userplane[MAX_CLIP_PLANES];
343
344 /**
345 * This is the size to be used for an array with an element per
346 * reg_offset
347 */
348 int virtual_grf_reg_count;
349 /** Per-virtual-grf indices into an array of size virtual_grf_reg_count */
350 int *virtual_grf_reg_map;
351
352 bool live_intervals_valid;
353
354 dst_reg *variable_storage(ir_variable *var);
355
356 void reladdr_to_temp(ir_instruction *ir, src_reg *reg, int *num_reladdr);
357
358 bool need_all_constants_in_pull_buffer;
359
360 /**
361 * \name Visit methods
362 *
363 * As typical for the visitor pattern, there must be one \c visit method for
364 * each concrete subclass of \c ir_instruction. Virtual base classes within
365 * the hierarchy should not have \c visit methods.
366 */
367 /*@{*/
368 virtual void visit(ir_variable *);
369 virtual void visit(ir_loop *);
370 virtual void visit(ir_loop_jump *);
371 virtual void visit(ir_function_signature *);
372 virtual void visit(ir_function *);
373 virtual void visit(ir_expression *);
374 virtual void visit(ir_swizzle *);
375 virtual void visit(ir_dereference_variable *);
376 virtual void visit(ir_dereference_array *);
377 virtual void visit(ir_dereference_record *);
378 virtual void visit(ir_assignment *);
379 virtual void visit(ir_constant *);
380 virtual void visit(ir_call *);
381 virtual void visit(ir_return *);
382 virtual void visit(ir_discard *);
383 virtual void visit(ir_texture *);
384 virtual void visit(ir_if *);
385 virtual void visit(ir_emit_vertex *);
386 virtual void visit(ir_end_primitive *);
387 /*@}*/
388
389 src_reg result;
390
391 /* Regs for vertex results. Generated at ir_variable visiting time
392 * for the ir->location's used.
393 */
394 dst_reg output_reg[BRW_VARYING_SLOT_COUNT];
395 const char *output_reg_annotation[BRW_VARYING_SLOT_COUNT];
396 int *uniform_size;
397 int *uniform_vector_size;
398 int uniform_array_size; /*< Size of uniform_[vector_]size arrays */
399 int uniforms;
400
401 src_reg shader_start_time;
402
403 struct hash_table *variable_ht;
404
405 bool run(void);
406 void fail(const char *msg, ...);
407
408 int virtual_grf_alloc(int size);
409 void setup_uniform_clipplane_values();
410 void setup_uniform_values(ir_variable *ir);
411 void setup_builtin_uniform_values(ir_variable *ir);
412 int setup_uniforms(int payload_reg);
413 bool reg_allocate_trivial();
414 bool reg_allocate();
415 void evaluate_spill_costs(float *spill_costs, bool *no_spill);
416 int choose_spill_reg(struct ra_graph *g);
417 void spill_reg(int spill_reg);
418 void move_grf_array_access_to_scratch();
419 void move_uniform_array_access_to_pull_constants();
420 void move_push_constants_to_pull_constants();
421 void split_uniform_registers();
422 void pack_uniform_registers();
423 void calculate_live_intervals();
424 void invalidate_live_intervals();
425 void split_virtual_grfs();
426 bool dead_code_eliminate();
427 bool virtual_grf_interferes(int a, int b);
428 bool opt_copy_propagation();
429 bool opt_algebraic();
430 bool opt_register_coalesce();
431 void opt_set_dependency_control();
432 void opt_schedule_instructions();
433
434 bool can_do_source_mods(vec4_instruction *inst);
435
436 vec4_instruction *emit(vec4_instruction *inst);
437
438 vec4_instruction *emit(enum opcode opcode);
439
440 vec4_instruction *emit(enum opcode opcode, dst_reg dst);
441
442 vec4_instruction *emit(enum opcode opcode, dst_reg dst, src_reg src0);
443
444 vec4_instruction *emit(enum opcode opcode, dst_reg dst,
445 src_reg src0, src_reg src1);
446
447 vec4_instruction *emit(enum opcode opcode, dst_reg dst,
448 src_reg src0, src_reg src1, src_reg src2);
449
450 vec4_instruction *emit_before(vec4_instruction *inst,
451 vec4_instruction *new_inst);
452
453 vec4_instruction *MOV(dst_reg dst, src_reg src0);
454 vec4_instruction *NOT(dst_reg dst, src_reg src0);
455 vec4_instruction *RNDD(dst_reg dst, src_reg src0);
456 vec4_instruction *RNDE(dst_reg dst, src_reg src0);
457 vec4_instruction *RNDZ(dst_reg dst, src_reg src0);
458 vec4_instruction *FRC(dst_reg dst, src_reg src0);
459 vec4_instruction *F32TO16(dst_reg dst, src_reg src0);
460 vec4_instruction *F16TO32(dst_reg dst, src_reg src0);
461 vec4_instruction *ADD(dst_reg dst, src_reg src0, src_reg src1);
462 vec4_instruction *MUL(dst_reg dst, src_reg src0, src_reg src1);
463 vec4_instruction *MACH(dst_reg dst, src_reg src0, src_reg src1);
464 vec4_instruction *MAC(dst_reg dst, src_reg src0, src_reg src1);
465 vec4_instruction *AND(dst_reg dst, src_reg src0, src_reg src1);
466 vec4_instruction *OR(dst_reg dst, src_reg src0, src_reg src1);
467 vec4_instruction *XOR(dst_reg dst, src_reg src0, src_reg src1);
468 vec4_instruction *DP3(dst_reg dst, src_reg src0, src_reg src1);
469 vec4_instruction *DP4(dst_reg dst, src_reg src0, src_reg src1);
470 vec4_instruction *DPH(dst_reg dst, src_reg src0, src_reg src1);
471 vec4_instruction *SHL(dst_reg dst, src_reg src0, src_reg src1);
472 vec4_instruction *SHR(dst_reg dst, src_reg src0, src_reg src1);
473 vec4_instruction *ASR(dst_reg dst, src_reg src0, src_reg src1);
474 vec4_instruction *CMP(dst_reg dst, src_reg src0, src_reg src1,
475 uint32_t condition);
476 vec4_instruction *IF(src_reg src0, src_reg src1, uint32_t condition);
477 vec4_instruction *IF(uint32_t predicate);
478 vec4_instruction *PULL_CONSTANT_LOAD(dst_reg dst, src_reg index);
479 vec4_instruction *SCRATCH_READ(dst_reg dst, src_reg index);
480 vec4_instruction *SCRATCH_WRITE(dst_reg dst, src_reg src, src_reg index);
481 vec4_instruction *LRP(dst_reg dst, src_reg a, src_reg y, src_reg x);
482 vec4_instruction *BFREV(dst_reg dst, src_reg value);
483 vec4_instruction *BFE(dst_reg dst, src_reg bits, src_reg offset, src_reg value);
484 vec4_instruction *BFI1(dst_reg dst, src_reg bits, src_reg offset);
485 vec4_instruction *BFI2(dst_reg dst, src_reg bfi1_dst, src_reg insert, src_reg base);
486 vec4_instruction *FBH(dst_reg dst, src_reg value);
487 vec4_instruction *FBL(dst_reg dst, src_reg value);
488 vec4_instruction *CBIT(dst_reg dst, src_reg value);
489 vec4_instruction *MAD(dst_reg dst, src_reg c, src_reg b, src_reg a);
490 vec4_instruction *ADDC(dst_reg dst, src_reg src0, src_reg src1);
491 vec4_instruction *SUBB(dst_reg dst, src_reg src0, src_reg src1);
492
493 int implied_mrf_writes(vec4_instruction *inst);
494
495 bool try_rewrite_rhs_to_dst(ir_assignment *ir,
496 dst_reg dst,
497 src_reg src,
498 vec4_instruction *pre_rhs_inst,
499 vec4_instruction *last_rhs_inst);
500
501 bool try_copy_propagation(vec4_instruction *inst, int arg,
502 src_reg *values[4]);
503
504 /** Walks an exec_list of ir_instruction and sends it through this visitor. */
505 void visit_instructions(const exec_list *list);
506
507 void emit_vp_sop(uint32_t condmod, dst_reg dst,
508 src_reg src0, src_reg src1, src_reg one);
509
510 void emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate);
511 void emit_bool_comparison(unsigned int op, dst_reg dst, src_reg src0, src_reg src1);
512 void emit_if_gen6(ir_if *ir);
513
514 void emit_minmax(uint32_t condmod, dst_reg dst, src_reg src0, src_reg src1);
515
516 void emit_lrp(const dst_reg &dst,
517 const src_reg &x, const src_reg &y, const src_reg &a);
518
519 void emit_block_move(dst_reg *dst, src_reg *src,
520 const struct glsl_type *type, uint32_t predicate);
521
522 void emit_constant_values(dst_reg *dst, ir_constant *value);
523
524 /**
525 * Emit the correct dot-product instruction for the type of arguments
526 */
527 void emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements);
528
529 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
530 dst_reg dst, src_reg src0);
531
532 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
533 dst_reg dst, src_reg src0, src_reg src1);
534
535 void emit_scs(ir_instruction *ir, enum prog_opcode op,
536 dst_reg dst, const src_reg &src);
537
538 src_reg fix_3src_operand(src_reg src);
539
540 void emit_math1_gen6(enum opcode opcode, dst_reg dst, src_reg src);
541 void emit_math1_gen4(enum opcode opcode, dst_reg dst, src_reg src);
542 void emit_math(enum opcode opcode, dst_reg dst, src_reg src);
543 void emit_math2_gen6(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
544 void emit_math2_gen4(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
545 void emit_math(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
546 src_reg fix_math_operand(src_reg src);
547
548 void emit_pack_half_2x16(dst_reg dst, src_reg src0);
549 void emit_unpack_half_2x16(dst_reg dst, src_reg src0);
550
551 uint32_t gather_channel(ir_texture *ir, int sampler);
552 src_reg emit_mcs_fetch(ir_texture *ir, src_reg coordinate, int sampler);
553 void emit_gen6_gather_wa(uint8_t wa, dst_reg dst);
554 void swizzle_result(ir_texture *ir, src_reg orig_val, int sampler);
555
556 void emit_ndc_computation();
557 void emit_psiz_and_flags(struct brw_reg reg);
558 void emit_clip_distances(dst_reg reg, int offset);
559 void emit_generic_urb_slot(dst_reg reg, int varying);
560 void emit_urb_slot(int mrf, int varying);
561
562 void emit_shader_time_begin();
563 void emit_shader_time_end();
564 void emit_shader_time_write(enum shader_time_shader_type type,
565 src_reg value);
566
567 void emit_untyped_atomic(unsigned atomic_op, unsigned surf_index,
568 dst_reg dst, src_reg offset, src_reg src0,
569 src_reg src1);
570
571 void emit_untyped_surface_read(unsigned surf_index, dst_reg dst,
572 src_reg offset);
573
574 src_reg get_scratch_offset(vec4_instruction *inst,
575 src_reg *reladdr, int reg_offset);
576 src_reg get_pull_constant_offset(vec4_instruction *inst,
577 src_reg *reladdr, int reg_offset);
578 void emit_scratch_read(vec4_instruction *inst,
579 dst_reg dst,
580 src_reg orig_src,
581 int base_offset);
582 void emit_scratch_write(vec4_instruction *inst,
583 int base_offset);
584 void emit_pull_constant_load(vec4_instruction *inst,
585 dst_reg dst,
586 src_reg orig_src,
587 int base_offset);
588
589 bool try_emit_sat(ir_expression *ir);
590 bool try_emit_mad(ir_expression *ir, int mul_arg);
591 void resolve_ud_negate(src_reg *reg);
592
593 src_reg get_timestamp();
594
595 bool process_move_condition(ir_rvalue *ir);
596
597 void dump_instruction(backend_instruction *inst);
598
599 void visit_atomic_counter_intrinsic(ir_call *ir);
600
601 protected:
602 void emit_vertex();
603 void lower_attributes_to_hw_regs(const int *attribute_map,
604 bool interleaved);
605 void setup_payload_interference(struct ra_graph *g, int first_payload_node,
606 int reg_node_count);
607 virtual dst_reg *make_reg_for_system_value(ir_variable *ir) = 0;
608 virtual void setup_payload() = 0;
609 virtual void emit_prolog() = 0;
610 virtual void emit_program_code() = 0;
611 virtual void emit_thread_end() = 0;
612 virtual void emit_urb_write_header(int mrf) = 0;
613 virtual vec4_instruction *emit_urb_write_opcode(bool complete) = 0;
614 virtual int compute_array_stride(ir_dereference_array *ir);
615
616 const bool debug_flag;
617
618 private:
619 /**
620 * If true, then register allocation should fail instead of spilling.
621 */
622 const bool no_spills;
623
624 const shader_time_shader_type st_base;
625 const shader_time_shader_type st_written;
626 const shader_time_shader_type st_reset;
627 };
628
629
630 /**
631 * The vertex shader code generator.
632 *
633 * Translates VS IR to actual i965 assembly code.
634 */
635 class vec4_generator
636 {
637 public:
638 vec4_generator(struct brw_context *brw,
639 struct gl_shader_program *shader_prog,
640 struct gl_program *prog,
641 struct brw_vec4_prog_data *prog_data,
642 void *mem_ctx,
643 bool debug_flag);
644 ~vec4_generator();
645
646 const unsigned *generate_assembly(exec_list *insts, unsigned *asm_size);
647
648 private:
649 void generate_code(exec_list *instructions);
650 void generate_vec4_instruction(vec4_instruction *inst,
651 struct brw_reg dst,
652 struct brw_reg *src);
653
654 void generate_math1_gen4(vec4_instruction *inst,
655 struct brw_reg dst,
656 struct brw_reg src);
657 void generate_math1_gen6(vec4_instruction *inst,
658 struct brw_reg dst,
659 struct brw_reg src);
660 void generate_math2_gen4(vec4_instruction *inst,
661 struct brw_reg dst,
662 struct brw_reg src0,
663 struct brw_reg src1);
664 void generate_math2_gen6(vec4_instruction *inst,
665 struct brw_reg dst,
666 struct brw_reg src0,
667 struct brw_reg src1);
668 void generate_math2_gen7(vec4_instruction *inst,
669 struct brw_reg dst,
670 struct brw_reg src0,
671 struct brw_reg src1);
672
673 void generate_tex(vec4_instruction *inst,
674 struct brw_reg dst,
675 struct brw_reg src);
676
677 void generate_vs_urb_write(vec4_instruction *inst);
678 void generate_gs_urb_write(vec4_instruction *inst);
679 void generate_gs_thread_end(vec4_instruction *inst);
680 void generate_gs_set_write_offset(struct brw_reg dst,
681 struct brw_reg src0,
682 struct brw_reg src1);
683 void generate_gs_set_vertex_count(struct brw_reg dst,
684 struct brw_reg src);
685 void generate_gs_set_dword_2_immed(struct brw_reg dst, struct brw_reg src);
686 void generate_gs_prepare_channel_masks(struct brw_reg dst);
687 void generate_gs_set_channel_masks(struct brw_reg dst, struct brw_reg src);
688 void generate_gs_get_instance_id(struct brw_reg dst);
689 void generate_oword_dual_block_offsets(struct brw_reg m1,
690 struct brw_reg index);
691 void generate_scratch_write(vec4_instruction *inst,
692 struct brw_reg dst,
693 struct brw_reg src,
694 struct brw_reg index);
695 void generate_scratch_read(vec4_instruction *inst,
696 struct brw_reg dst,
697 struct brw_reg index);
698 void generate_pull_constant_load(vec4_instruction *inst,
699 struct brw_reg dst,
700 struct brw_reg index,
701 struct brw_reg offset);
702 void generate_pull_constant_load_gen7(vec4_instruction *inst,
703 struct brw_reg dst,
704 struct brw_reg surf_index,
705 struct brw_reg offset);
706 void generate_unpack_flags(vec4_instruction *inst,
707 struct brw_reg dst);
708
709 void generate_untyped_atomic(vec4_instruction *inst,
710 struct brw_reg dst,
711 struct brw_reg atomic_op,
712 struct brw_reg surf_index);
713
714 void generate_untyped_surface_read(vec4_instruction *inst,
715 struct brw_reg dst,
716 struct brw_reg surf_index);
717
718 struct brw_context *brw;
719
720 struct brw_compile *p;
721
722 struct gl_shader_program *shader_prog;
723 const struct gl_program *prog;
724
725 struct brw_vec4_prog_data *prog_data;
726
727 void *mem_ctx;
728 const bool debug_flag;
729 };
730
731 /**
732 * The vertex shader code generator.
733 *
734 * Translates VS IR to actual i965 assembly code.
735 */
736 class gen8_vec4_generator : public gen8_generator
737 {
738 public:
739 gen8_vec4_generator(struct brw_context *brw,
740 struct gl_shader_program *shader_prog,
741 struct gl_program *prog,
742 struct brw_vec4_prog_data *prog_data,
743 void *mem_ctx,
744 bool debug_flag);
745 ~gen8_vec4_generator();
746
747 const unsigned *generate_assembly(exec_list *insts, unsigned *asm_size);
748
749 private:
750 void generate_code(exec_list *instructions);
751 void generate_vec4_instruction(vec4_instruction *inst,
752 struct brw_reg dst,
753 struct brw_reg *src);
754
755 void generate_tex(vec4_instruction *inst,
756 struct brw_reg dst);
757
758 void generate_urb_write(vec4_instruction *ir, bool copy_g0);
759 void generate_gs_thread_end(vec4_instruction *ir);
760 void generate_gs_set_write_offset(struct brw_reg dst,
761 struct brw_reg src0,
762 struct brw_reg src1);
763 void generate_gs_set_vertex_count(struct brw_reg dst,
764 struct brw_reg src);
765 void generate_gs_set_dword_2_immed(struct brw_reg dst, struct brw_reg src);
766 void generate_gs_prepare_channel_masks(struct brw_reg dst);
767 void generate_gs_set_channel_masks(struct brw_reg dst, struct brw_reg src);
768
769 void generate_oword_dual_block_offsets(struct brw_reg m1,
770 struct brw_reg index);
771 void generate_scratch_write(vec4_instruction *inst,
772 struct brw_reg dst,
773 struct brw_reg src,
774 struct brw_reg index);
775 void generate_scratch_read(vec4_instruction *inst,
776 struct brw_reg dst,
777 struct brw_reg index);
778 void generate_pull_constant_load(vec4_instruction *inst,
779 struct brw_reg dst,
780 struct brw_reg index,
781 struct brw_reg offset);
782
783 void mark_surface_used(unsigned surf_index);
784
785 struct brw_vec4_prog_data *prog_data;
786
787 const bool debug_flag;
788 };
789
790
791 } /* namespace brw */
792 #endif /* __cplusplus */
793
794 #endif /* BRW_VEC4_H */