i965: Remove cfg-invalidating parameter from invalidate_live_intervals.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4.h
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef BRW_VEC4_H
25 #define BRW_VEC4_H
26
27 #include <stdint.h>
28 #include "brw_shader.h"
29 #include "main/compiler.h"
30 #include "program/hash_table.h"
31 #include "brw_program.h"
32
33 #ifdef __cplusplus
34 extern "C" {
35 #endif
36
37 #include "brw_context.h"
38 #include "brw_eu.h"
39 #include "intel_asm_annotation.h"
40
41 #ifdef __cplusplus
42 }; /* extern "C" */
43 #endif
44
45 #include "glsl/ir.h"
46
47
48 struct brw_vec4_compile {
49 GLuint last_scratch; /**< measured in 32-byte (register size) units */
50 };
51
52
53 struct brw_vec4_prog_key {
54 GLuint program_string_id;
55
56 /**
57 * True if at least one clip flag is enabled, regardless of whether the
58 * shader uses clip planes or gl_ClipDistance.
59 */
60 GLuint userclip_active:1;
61
62 /**
63 * How many user clipping planes are being uploaded to the vertex shader as
64 * push constants.
65 */
66 GLuint nr_userclip_plane_consts:4;
67
68 GLuint clamp_vertex_color:1;
69
70 struct brw_sampler_prog_key_data tex;
71 };
72
73
74 #ifdef __cplusplus
75 extern "C" {
76 #endif
77
78 void
79 brw_vec4_setup_prog_key_for_precompile(struct gl_context *ctx,
80 struct brw_vec4_prog_key *key,
81 GLuint id, struct gl_program *prog);
82
83 #ifdef __cplusplus
84 } /* extern "C" */
85
86 namespace brw {
87
88 class dst_reg;
89
90 unsigned
91 swizzle_for_size(int size);
92
93 class src_reg : public backend_reg
94 {
95 public:
96 DECLARE_RALLOC_CXX_OPERATORS(src_reg)
97
98 void init();
99
100 src_reg(register_file file, int reg, const glsl_type *type);
101 src_reg();
102 src_reg(float f);
103 src_reg(uint32_t u);
104 src_reg(int32_t i);
105 src_reg(struct brw_reg reg);
106
107 bool equals(const src_reg &r) const;
108
109 src_reg(class vec4_visitor *v, const struct glsl_type *type);
110 src_reg(class vec4_visitor *v, const struct glsl_type *type, int size);
111
112 explicit src_reg(dst_reg reg);
113
114 GLuint swizzle; /**< BRW_SWIZZLE_XYZW macros from brw_reg.h. */
115
116 src_reg *reladdr;
117 };
118
119 static inline src_reg
120 retype(src_reg reg, enum brw_reg_type type)
121 {
122 reg.fixed_hw_reg.type = reg.type = type;
123 return reg;
124 }
125
126 static inline src_reg
127 offset(src_reg reg, unsigned delta)
128 {
129 assert(delta == 0 || (reg.file != HW_REG && reg.file != IMM));
130 reg.reg_offset += delta;
131 return reg;
132 }
133
134 /**
135 * Reswizzle a given source register.
136 * \sa brw_swizzle().
137 */
138 static inline src_reg
139 swizzle(src_reg reg, unsigned swizzle)
140 {
141 assert(reg.file != HW_REG);
142 reg.swizzle = BRW_SWIZZLE4(
143 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 0)),
144 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 1)),
145 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 2)),
146 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 3)));
147 return reg;
148 }
149
150 static inline src_reg
151 negate(src_reg reg)
152 {
153 assert(reg.file != HW_REG && reg.file != IMM);
154 reg.negate = !reg.negate;
155 return reg;
156 }
157
158 class dst_reg : public backend_reg
159 {
160 public:
161 DECLARE_RALLOC_CXX_OPERATORS(dst_reg)
162
163 void init();
164
165 dst_reg();
166 dst_reg(register_file file, int reg);
167 dst_reg(register_file file, int reg, const glsl_type *type, int writemask);
168 dst_reg(struct brw_reg reg);
169 dst_reg(class vec4_visitor *v, const struct glsl_type *type);
170
171 explicit dst_reg(src_reg reg);
172
173 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */
174
175 src_reg *reladdr;
176 };
177
178 static inline dst_reg
179 retype(dst_reg reg, enum brw_reg_type type)
180 {
181 reg.fixed_hw_reg.type = reg.type = type;
182 return reg;
183 }
184
185 static inline dst_reg
186 offset(dst_reg reg, unsigned delta)
187 {
188 assert(delta == 0 || (reg.file != HW_REG && reg.file != IMM));
189 reg.reg_offset += delta;
190 return reg;
191 }
192
193 static inline dst_reg
194 writemask(dst_reg reg, unsigned mask)
195 {
196 assert(reg.file != HW_REG && reg.file != IMM);
197 assert((reg.writemask & mask) != 0);
198 reg.writemask &= mask;
199 return reg;
200 }
201
202 class vec4_instruction : public backend_instruction {
203 public:
204 DECLARE_RALLOC_CXX_OPERATORS(vec4_instruction)
205
206 vec4_instruction(vec4_visitor *v, enum opcode opcode,
207 const dst_reg &dst = dst_reg(),
208 const src_reg &src0 = src_reg(),
209 const src_reg &src1 = src_reg(),
210 const src_reg &src2 = src_reg());
211
212 struct brw_reg get_dst(void);
213 struct brw_reg get_src(const struct brw_vec4_prog_data *prog_data, int i);
214
215 dst_reg dst;
216 src_reg src[3];
217
218 bool shadow_compare;
219
220 enum brw_urb_write_flags urb_write_flags;
221 bool header_present;
222
223 unsigned sol_binding; /**< gen6: SOL binding table index */
224 bool sol_final_write; /**< gen6: send commit message */
225 unsigned sol_vertex; /**< gen6: used for setting dst index in SVB header */
226
227 bool is_send_from_grf();
228 bool can_reswizzle(int dst_writemask, int swizzle, int swizzle_mask);
229 void reswizzle(int dst_writemask, int swizzle);
230 bool can_do_source_mods(struct brw_context *brw);
231
232 bool reads_flag()
233 {
234 return predicate || opcode == VS_OPCODE_UNPACK_FLAGS_SIMD4X2;
235 }
236
237 bool writes_flag()
238 {
239 return conditional_mod && opcode != BRW_OPCODE_SEL;
240 }
241 };
242
243 /**
244 * The vertex shader front-end.
245 *
246 * Translates either GLSL IR or Mesa IR (for ARB_vertex_program and
247 * fixed-function) into VS IR.
248 */
249 class vec4_visitor : public backend_visitor
250 {
251 public:
252 vec4_visitor(struct brw_context *brw,
253 struct brw_vec4_compile *c,
254 struct gl_program *prog,
255 const struct brw_vec4_prog_key *key,
256 struct brw_vec4_prog_data *prog_data,
257 struct gl_shader_program *shader_prog,
258 gl_shader_stage stage,
259 void *mem_ctx,
260 bool debug_flag,
261 bool no_spills,
262 shader_time_shader_type st_base,
263 shader_time_shader_type st_written,
264 shader_time_shader_type st_reset);
265 ~vec4_visitor();
266
267 dst_reg dst_null_f()
268 {
269 return dst_reg(brw_null_reg());
270 }
271
272 dst_reg dst_null_d()
273 {
274 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
275 }
276
277 dst_reg dst_null_ud()
278 {
279 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD));
280 }
281
282 struct brw_vec4_compile * const c;
283 const struct brw_vec4_prog_key * const key;
284 struct brw_vec4_prog_data * const prog_data;
285 unsigned int sanity_param_count;
286
287 char *fail_msg;
288 bool failed;
289
290 /**
291 * GLSL IR currently being processed, which is associated with our
292 * driver IR instructions for debugging purposes.
293 */
294 const void *base_ir;
295 const char *current_annotation;
296
297 int *virtual_grf_sizes;
298 int virtual_grf_count;
299 int virtual_grf_array_size;
300 int first_non_payload_grf;
301 unsigned int max_grf;
302 int *virtual_grf_start;
303 int *virtual_grf_end;
304 dst_reg userplane[MAX_CLIP_PLANES];
305
306 /**
307 * This is the size to be used for an array with an element per
308 * reg_offset
309 */
310 int virtual_grf_reg_count;
311 /** Per-virtual-grf indices into an array of size virtual_grf_reg_count */
312 int *virtual_grf_reg_map;
313
314 bool live_intervals_valid;
315
316 dst_reg *variable_storage(ir_variable *var);
317
318 void reladdr_to_temp(ir_instruction *ir, src_reg *reg, int *num_reladdr);
319
320 bool need_all_constants_in_pull_buffer;
321
322 /**
323 * \name Visit methods
324 *
325 * As typical for the visitor pattern, there must be one \c visit method for
326 * each concrete subclass of \c ir_instruction. Virtual base classes within
327 * the hierarchy should not have \c visit methods.
328 */
329 /*@{*/
330 virtual void visit(ir_variable *);
331 virtual void visit(ir_loop *);
332 virtual void visit(ir_loop_jump *);
333 virtual void visit(ir_function_signature *);
334 virtual void visit(ir_function *);
335 virtual void visit(ir_expression *);
336 virtual void visit(ir_swizzle *);
337 virtual void visit(ir_dereference_variable *);
338 virtual void visit(ir_dereference_array *);
339 virtual void visit(ir_dereference_record *);
340 virtual void visit(ir_assignment *);
341 virtual void visit(ir_constant *);
342 virtual void visit(ir_call *);
343 virtual void visit(ir_return *);
344 virtual void visit(ir_discard *);
345 virtual void visit(ir_texture *);
346 virtual void visit(ir_if *);
347 virtual void visit(ir_emit_vertex *);
348 virtual void visit(ir_end_primitive *);
349 /*@}*/
350
351 src_reg result;
352
353 /* Regs for vertex results. Generated at ir_variable visiting time
354 * for the ir->location's used.
355 */
356 dst_reg output_reg[BRW_VARYING_SLOT_COUNT];
357 const char *output_reg_annotation[BRW_VARYING_SLOT_COUNT];
358 int *uniform_size;
359 int *uniform_vector_size;
360 int uniform_array_size; /*< Size of uniform_[vector_]size arrays */
361 int uniforms;
362
363 src_reg shader_start_time;
364
365 struct hash_table *variable_ht;
366
367 bool run(void);
368 void fail(const char *msg, ...);
369
370 int virtual_grf_alloc(int size);
371 void setup_uniform_clipplane_values();
372 void setup_uniform_values(ir_variable *ir);
373 void setup_builtin_uniform_values(ir_variable *ir);
374 int setup_uniforms(int payload_reg);
375 bool reg_allocate_trivial();
376 bool reg_allocate();
377 void evaluate_spill_costs(float *spill_costs, bool *no_spill);
378 int choose_spill_reg(struct ra_graph *g);
379 void spill_reg(int spill_reg);
380 void move_grf_array_access_to_scratch();
381 void move_uniform_array_access_to_pull_constants();
382 void move_push_constants_to_pull_constants();
383 void split_uniform_registers();
384 void pack_uniform_registers();
385 void calculate_live_intervals();
386 void invalidate_live_intervals();
387 void split_virtual_grfs();
388 bool opt_reduce_swizzle();
389 bool dead_code_eliminate();
390 bool virtual_grf_interferes(int a, int b);
391 bool opt_copy_propagation();
392 bool opt_cse_local(bblock_t *block);
393 bool opt_cse();
394 bool opt_algebraic();
395 bool opt_register_coalesce();
396 void opt_set_dependency_control();
397 void opt_schedule_instructions();
398
399 vec4_instruction *emit(vec4_instruction *inst);
400
401 vec4_instruction *emit(enum opcode opcode);
402
403 vec4_instruction *emit(enum opcode opcode, dst_reg dst);
404
405 vec4_instruction *emit(enum opcode opcode, dst_reg dst, src_reg src0);
406
407 vec4_instruction *emit(enum opcode opcode, dst_reg dst,
408 src_reg src0, src_reg src1);
409
410 vec4_instruction *emit(enum opcode opcode, dst_reg dst,
411 src_reg src0, src_reg src1, src_reg src2);
412
413 vec4_instruction *emit_before(bblock_t *block,
414 vec4_instruction *inst,
415 vec4_instruction *new_inst);
416
417 vec4_instruction *MOV(const dst_reg &dst, const src_reg &src0);
418 vec4_instruction *NOT(const dst_reg &dst, const src_reg &src0);
419 vec4_instruction *RNDD(const dst_reg &dst, const src_reg &src0);
420 vec4_instruction *RNDE(const dst_reg &dst, const src_reg &src0);
421 vec4_instruction *RNDZ(const dst_reg &dst, const src_reg &src0);
422 vec4_instruction *FRC(const dst_reg &dst, const src_reg &src0);
423 vec4_instruction *F32TO16(const dst_reg &dst, const src_reg &src0);
424 vec4_instruction *F16TO32(const dst_reg &dst, const src_reg &src0);
425 vec4_instruction *ADD(const dst_reg &dst, const src_reg &src0,
426 const src_reg &src1);
427 vec4_instruction *MUL(const dst_reg &dst, const src_reg &src0,
428 const src_reg &src1);
429 vec4_instruction *MACH(const dst_reg &dst, const src_reg &src0,
430 const src_reg &src1);
431 vec4_instruction *MAC(const dst_reg &dst, const src_reg &src0,
432 const src_reg &src1);
433 vec4_instruction *AND(const dst_reg &dst, const src_reg &src0,
434 const src_reg &src1);
435 vec4_instruction *OR(const dst_reg &dst, const src_reg &src0,
436 const src_reg &src1);
437 vec4_instruction *XOR(const dst_reg &dst, const src_reg &src0,
438 const src_reg &src1);
439 vec4_instruction *DP3(const dst_reg &dst, const src_reg &src0,
440 const src_reg &src1);
441 vec4_instruction *DP4(const dst_reg &dst, const src_reg &src0,
442 const src_reg &src1);
443 vec4_instruction *DPH(const dst_reg &dst, const src_reg &src0,
444 const src_reg &src1);
445 vec4_instruction *SHL(const dst_reg &dst, const src_reg &src0,
446 const src_reg &src1);
447 vec4_instruction *SHR(const dst_reg &dst, const src_reg &src0,
448 const src_reg &src1);
449 vec4_instruction *ASR(const dst_reg &dst, const src_reg &src0,
450 const src_reg &src1);
451 vec4_instruction *CMP(dst_reg dst, src_reg src0, src_reg src1,
452 enum brw_conditional_mod condition);
453 vec4_instruction *IF(src_reg src0, src_reg src1,
454 enum brw_conditional_mod condition);
455 vec4_instruction *IF(enum brw_predicate predicate);
456 vec4_instruction *PULL_CONSTANT_LOAD(const dst_reg &dst,
457 const src_reg &index);
458 vec4_instruction *SCRATCH_READ(const dst_reg &dst, const src_reg &index);
459 vec4_instruction *SCRATCH_WRITE(const dst_reg &dst, const src_reg &src,
460 const src_reg &index);
461 vec4_instruction *LRP(const dst_reg &dst, const src_reg &a,
462 const src_reg &y, const src_reg &x);
463 vec4_instruction *BFREV(const dst_reg &dst, const src_reg &value);
464 vec4_instruction *BFE(const dst_reg &dst, const src_reg &bits,
465 const src_reg &offset, const src_reg &value);
466 vec4_instruction *BFI1(const dst_reg &dst, const src_reg &bits,
467 const src_reg &offset);
468 vec4_instruction *BFI2(const dst_reg &dst, const src_reg &bfi1_dst,
469 const src_reg &insert, const src_reg &base);
470 vec4_instruction *FBH(const dst_reg &dst, const src_reg &value);
471 vec4_instruction *FBL(const dst_reg &dst, const src_reg &value);
472 vec4_instruction *CBIT(const dst_reg &dst, const src_reg &value);
473 vec4_instruction *MAD(const dst_reg &dst, const src_reg &c,
474 const src_reg &b, const src_reg &a);
475 vec4_instruction *ADDC(const dst_reg &dst, const src_reg &src0,
476 const src_reg &src1);
477 vec4_instruction *SUBB(const dst_reg &dst, const src_reg &src0,
478 const src_reg &src1);
479
480 int implied_mrf_writes(vec4_instruction *inst);
481
482 bool try_rewrite_rhs_to_dst(ir_assignment *ir,
483 dst_reg dst,
484 src_reg src,
485 vec4_instruction *pre_rhs_inst,
486 vec4_instruction *last_rhs_inst);
487
488 /** Walks an exec_list of ir_instruction and sends it through this visitor. */
489 void visit_instructions(const exec_list *list);
490
491 void emit_vp_sop(enum brw_conditional_mod condmod, dst_reg dst,
492 src_reg src0, src_reg src1, src_reg one);
493
494 void emit_bool_to_cond_code(ir_rvalue *ir, enum brw_predicate *predicate);
495 void emit_if_gen6(ir_if *ir);
496
497 void emit_minmax(enum brw_conditional_mod conditionalmod, dst_reg dst,
498 src_reg src0, src_reg src1);
499
500 void emit_lrp(const dst_reg &dst,
501 const src_reg &x, const src_reg &y, const src_reg &a);
502
503 void emit_block_move(dst_reg *dst, src_reg *src,
504 const struct glsl_type *type, brw_predicate predicate);
505
506 void emit_constant_values(dst_reg *dst, ir_constant *value);
507
508 /**
509 * Emit the correct dot-product instruction for the type of arguments
510 */
511 void emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements);
512
513 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
514 dst_reg dst, src_reg src0);
515
516 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
517 dst_reg dst, src_reg src0, src_reg src1);
518
519 void emit_scs(ir_instruction *ir, enum prog_opcode op,
520 dst_reg dst, const src_reg &src);
521
522 src_reg fix_3src_operand(src_reg src);
523
524 void emit_math1_gen6(enum opcode opcode, dst_reg dst, src_reg src);
525 void emit_math1_gen4(enum opcode opcode, dst_reg dst, src_reg src);
526 void emit_math(enum opcode opcode, dst_reg dst, src_reg src);
527 void emit_math2_gen6(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
528 void emit_math2_gen4(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
529 void emit_math(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
530 src_reg fix_math_operand(src_reg src);
531
532 void emit_pack_half_2x16(dst_reg dst, src_reg src0);
533 void emit_unpack_half_2x16(dst_reg dst, src_reg src0);
534
535 uint32_t gather_channel(ir_texture *ir, uint32_t sampler);
536 src_reg emit_mcs_fetch(ir_texture *ir, src_reg coordinate, src_reg sampler);
537 void emit_gen6_gather_wa(uint8_t wa, dst_reg dst);
538 void swizzle_result(ir_texture *ir, src_reg orig_val, uint32_t sampler);
539
540 void emit_ndc_computation();
541 void emit_psiz_and_flags(dst_reg reg);
542 void emit_clip_distances(dst_reg reg, int offset);
543 void emit_generic_urb_slot(dst_reg reg, int varying);
544 void emit_urb_slot(dst_reg reg, int varying);
545
546 void emit_shader_time_begin();
547 void emit_shader_time_end();
548 void emit_shader_time_write(enum shader_time_shader_type type,
549 src_reg value);
550
551 void emit_untyped_atomic(unsigned atomic_op, unsigned surf_index,
552 dst_reg dst, src_reg offset, src_reg src0,
553 src_reg src1);
554
555 void emit_untyped_surface_read(unsigned surf_index, dst_reg dst,
556 src_reg offset);
557
558 src_reg get_scratch_offset(bblock_t *block, vec4_instruction *inst,
559 src_reg *reladdr, int reg_offset);
560 src_reg get_pull_constant_offset(bblock_t *block, vec4_instruction *inst,
561 src_reg *reladdr, int reg_offset);
562 void emit_scratch_read(bblock_t *block, vec4_instruction *inst,
563 dst_reg dst,
564 src_reg orig_src,
565 int base_offset);
566 void emit_scratch_write(bblock_t *block, vec4_instruction *inst,
567 int base_offset);
568 void emit_pull_constant_load(bblock_t *block, vec4_instruction *inst,
569 dst_reg dst,
570 src_reg orig_src,
571 int base_offset);
572
573 bool try_emit_mad(ir_expression *ir);
574 bool try_emit_b2f_of_compare(ir_expression *ir);
575 void resolve_ud_negate(src_reg *reg);
576
577 src_reg get_timestamp();
578
579 bool process_move_condition(ir_rvalue *ir);
580
581 void dump_instruction(backend_instruction *inst);
582 void dump_instruction(backend_instruction *inst, FILE *file);
583
584 void visit_atomic_counter_intrinsic(ir_call *ir);
585
586 protected:
587 void emit_vertex();
588 void lower_attributes_to_hw_regs(const int *attribute_map,
589 bool interleaved);
590 void setup_payload_interference(struct ra_graph *g, int first_payload_node,
591 int reg_node_count);
592 virtual dst_reg *make_reg_for_system_value(ir_variable *ir) = 0;
593 virtual void assign_binding_table_offsets();
594 virtual void setup_payload() = 0;
595 virtual void emit_prolog() = 0;
596 virtual void emit_program_code() = 0;
597 virtual void emit_thread_end() = 0;
598 virtual void emit_urb_write_header(int mrf) = 0;
599 virtual vec4_instruction *emit_urb_write_opcode(bool complete) = 0;
600 virtual int compute_array_stride(ir_dereference_array *ir);
601
602 const bool debug_flag;
603
604 private:
605 /**
606 * If true, then register allocation should fail instead of spilling.
607 */
608 const bool no_spills;
609
610 const shader_time_shader_type st_base;
611 const shader_time_shader_type st_written;
612 const shader_time_shader_type st_reset;
613 };
614
615
616 /**
617 * The vertex shader code generator.
618 *
619 * Translates VS IR to actual i965 assembly code.
620 */
621 class vec4_generator
622 {
623 public:
624 vec4_generator(struct brw_context *brw,
625 struct gl_shader_program *shader_prog,
626 struct gl_program *prog,
627 struct brw_vec4_prog_data *prog_data,
628 void *mem_ctx,
629 bool debug_flag);
630 ~vec4_generator();
631
632 const unsigned *generate_assembly(const cfg_t *cfg, unsigned *asm_size);
633
634 private:
635 void generate_code(const cfg_t *cfg);
636
637 void generate_math1_gen4(vec4_instruction *inst,
638 struct brw_reg dst,
639 struct brw_reg src);
640 void generate_math2_gen4(vec4_instruction *inst,
641 struct brw_reg dst,
642 struct brw_reg src0,
643 struct brw_reg src1);
644 void generate_math_gen6(vec4_instruction *inst,
645 struct brw_reg dst,
646 struct brw_reg src0,
647 struct brw_reg src1);
648
649 void generate_tex(vec4_instruction *inst,
650 struct brw_reg dst,
651 struct brw_reg src,
652 struct brw_reg sampler_index);
653
654 void generate_vs_urb_write(vec4_instruction *inst);
655 void generate_gs_urb_write(vec4_instruction *inst);
656 void generate_gs_urb_write_allocate(vec4_instruction *inst);
657 void generate_gs_thread_end(vec4_instruction *inst);
658 void generate_gs_set_write_offset(struct brw_reg dst,
659 struct brw_reg src0,
660 struct brw_reg src1);
661 void generate_gs_set_vertex_count(struct brw_reg dst,
662 struct brw_reg src);
663 void generate_gs_svb_write(vec4_instruction *inst,
664 struct brw_reg dst,
665 struct brw_reg src0,
666 struct brw_reg src1);
667 void generate_gs_svb_set_destination_index(vec4_instruction *inst,
668 struct brw_reg dst,
669 struct brw_reg src);
670 void generate_gs_set_dword_2(struct brw_reg dst, struct brw_reg src);
671 void generate_gs_prepare_channel_masks(struct brw_reg dst);
672 void generate_gs_set_channel_masks(struct brw_reg dst, struct brw_reg src);
673 void generate_gs_get_instance_id(struct brw_reg dst);
674 void generate_gs_ff_sync_set_primitives(struct brw_reg dst,
675 struct brw_reg src0,
676 struct brw_reg src1,
677 struct brw_reg src2);
678 void generate_gs_ff_sync(vec4_instruction *inst,
679 struct brw_reg dst,
680 struct brw_reg src0,
681 struct brw_reg src1);
682 void generate_gs_set_primitive_id(struct brw_reg dst);
683 void generate_oword_dual_block_offsets(struct brw_reg m1,
684 struct brw_reg index);
685 void generate_scratch_write(vec4_instruction *inst,
686 struct brw_reg dst,
687 struct brw_reg src,
688 struct brw_reg index);
689 void generate_scratch_read(vec4_instruction *inst,
690 struct brw_reg dst,
691 struct brw_reg index);
692 void generate_pull_constant_load(vec4_instruction *inst,
693 struct brw_reg dst,
694 struct brw_reg index,
695 struct brw_reg offset);
696 void generate_pull_constant_load_gen7(vec4_instruction *inst,
697 struct brw_reg dst,
698 struct brw_reg surf_index,
699 struct brw_reg offset);
700 void generate_unpack_flags(vec4_instruction *inst,
701 struct brw_reg dst);
702
703 void generate_untyped_atomic(vec4_instruction *inst,
704 struct brw_reg dst,
705 struct brw_reg atomic_op,
706 struct brw_reg surf_index);
707
708 void generate_untyped_surface_read(vec4_instruction *inst,
709 struct brw_reg dst,
710 struct brw_reg surf_index);
711
712 struct brw_context *brw;
713
714 struct brw_compile *p;
715
716 struct gl_shader_program *shader_prog;
717 const struct gl_program *prog;
718
719 struct brw_vec4_prog_data *prog_data;
720
721 void *mem_ctx;
722 const bool debug_flag;
723 };
724
725 } /* namespace brw */
726 #endif /* __cplusplus */
727
728 #endif /* BRW_VEC4_H */