i965/vec4: Add support for nonconst sampler indexing in VS visitor
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4.h
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef BRW_VEC4_H
25 #define BRW_VEC4_H
26
27 #include <stdint.h>
28 #include "brw_shader.h"
29 #include "main/compiler.h"
30 #include "program/hash_table.h"
31 #include "brw_program.h"
32
33 #ifdef __cplusplus
34 extern "C" {
35 #endif
36
37 #include "brw_context.h"
38 #include "brw_eu.h"
39 #include "intel_asm_annotation.h"
40
41 #ifdef __cplusplus
42 }; /* extern "C" */
43 #endif
44
45 #include "glsl/ir.h"
46
47
48 struct brw_vec4_compile {
49 GLuint last_scratch; /**< measured in 32-byte (register size) units */
50 };
51
52
53 struct brw_vec4_prog_key {
54 GLuint program_string_id;
55
56 /**
57 * True if at least one clip flag is enabled, regardless of whether the
58 * shader uses clip planes or gl_ClipDistance.
59 */
60 GLuint userclip_active:1;
61
62 /**
63 * How many user clipping planes are being uploaded to the vertex shader as
64 * push constants.
65 */
66 GLuint nr_userclip_plane_consts:4;
67
68 GLuint clamp_vertex_color:1;
69
70 struct brw_sampler_prog_key_data tex;
71 };
72
73
74 #ifdef __cplusplus
75 extern "C" {
76 #endif
77
78 void
79 brw_vec4_setup_prog_key_for_precompile(struct gl_context *ctx,
80 struct brw_vec4_prog_key *key,
81 GLuint id, struct gl_program *prog);
82
83 #ifdef __cplusplus
84 } /* extern "C" */
85
86 namespace brw {
87
88 class dst_reg;
89
90 unsigned
91 swizzle_for_size(int size);
92
93 class src_reg : public backend_reg
94 {
95 public:
96 DECLARE_RALLOC_CXX_OPERATORS(src_reg)
97
98 void init();
99
100 src_reg(register_file file, int reg, const glsl_type *type);
101 src_reg();
102 src_reg(float f);
103 src_reg(uint32_t u);
104 src_reg(int32_t i);
105 src_reg(struct brw_reg reg);
106
107 bool equals(const src_reg &r) const;
108
109 src_reg(class vec4_visitor *v, const struct glsl_type *type);
110
111 explicit src_reg(dst_reg reg);
112
113 GLuint swizzle; /**< BRW_SWIZZLE_XYZW macros from brw_reg.h. */
114
115 src_reg *reladdr;
116 };
117
118 static inline src_reg
119 retype(src_reg reg, enum brw_reg_type type)
120 {
121 reg.fixed_hw_reg.type = reg.type = type;
122 return reg;
123 }
124
125 static inline src_reg
126 offset(src_reg reg, unsigned delta)
127 {
128 assert(delta == 0 || (reg.file != HW_REG && reg.file != IMM));
129 reg.reg_offset += delta;
130 return reg;
131 }
132
133 /**
134 * Reswizzle a given source register.
135 * \sa brw_swizzle().
136 */
137 static inline src_reg
138 swizzle(src_reg reg, unsigned swizzle)
139 {
140 assert(reg.file != HW_REG);
141 reg.swizzle = BRW_SWIZZLE4(
142 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 0)),
143 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 1)),
144 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 2)),
145 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 3)));
146 return reg;
147 }
148
149 static inline src_reg
150 negate(src_reg reg)
151 {
152 assert(reg.file != HW_REG && reg.file != IMM);
153 reg.negate = !reg.negate;
154 return reg;
155 }
156
157 class dst_reg : public backend_reg
158 {
159 public:
160 DECLARE_RALLOC_CXX_OPERATORS(dst_reg)
161
162 void init();
163
164 dst_reg();
165 dst_reg(register_file file, int reg);
166 dst_reg(register_file file, int reg, const glsl_type *type, int writemask);
167 dst_reg(struct brw_reg reg);
168 dst_reg(class vec4_visitor *v, const struct glsl_type *type);
169
170 explicit dst_reg(src_reg reg);
171
172 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */
173
174 src_reg *reladdr;
175 };
176
177 static inline dst_reg
178 retype(dst_reg reg, enum brw_reg_type type)
179 {
180 reg.fixed_hw_reg.type = reg.type = type;
181 return reg;
182 }
183
184 static inline dst_reg
185 offset(dst_reg reg, unsigned delta)
186 {
187 assert(delta == 0 || (reg.file != HW_REG && reg.file != IMM));
188 reg.reg_offset += delta;
189 return reg;
190 }
191
192 static inline dst_reg
193 writemask(dst_reg reg, unsigned mask)
194 {
195 assert(reg.file != HW_REG && reg.file != IMM);
196 assert((reg.writemask & mask) != 0);
197 reg.writemask &= mask;
198 return reg;
199 }
200
201 class vec4_instruction : public backend_instruction {
202 public:
203 DECLARE_RALLOC_CXX_OPERATORS(vec4_instruction)
204
205 vec4_instruction(vec4_visitor *v, enum opcode opcode,
206 const dst_reg &dst = dst_reg(),
207 const src_reg &src0 = src_reg(),
208 const src_reg &src1 = src_reg(),
209 const src_reg &src2 = src_reg());
210
211 struct brw_reg get_dst(void);
212 struct brw_reg get_src(const struct brw_vec4_prog_data *prog_data, int i);
213
214 dst_reg dst;
215 src_reg src[3];
216
217 bool shadow_compare;
218
219 enum brw_urb_write_flags urb_write_flags;
220 bool header_present;
221
222 bool is_send_from_grf();
223 bool can_reswizzle_dst(int dst_writemask, int swizzle, int swizzle_mask);
224 void reswizzle_dst(int dst_writemask, int swizzle);
225 bool can_do_source_mods(struct brw_context *brw);
226
227 bool reads_flag()
228 {
229 return predicate || opcode == VS_OPCODE_UNPACK_FLAGS_SIMD4X2;
230 }
231
232 bool writes_flag()
233 {
234 return conditional_mod && opcode != BRW_OPCODE_SEL;
235 }
236 };
237
238 /**
239 * The vertex shader front-end.
240 *
241 * Translates either GLSL IR or Mesa IR (for ARB_vertex_program and
242 * fixed-function) into VS IR.
243 */
244 class vec4_visitor : public backend_visitor
245 {
246 public:
247 vec4_visitor(struct brw_context *brw,
248 struct brw_vec4_compile *c,
249 struct gl_program *prog,
250 const struct brw_vec4_prog_key *key,
251 struct brw_vec4_prog_data *prog_data,
252 struct gl_shader_program *shader_prog,
253 gl_shader_stage stage,
254 void *mem_ctx,
255 bool debug_flag,
256 bool no_spills,
257 shader_time_shader_type st_base,
258 shader_time_shader_type st_written,
259 shader_time_shader_type st_reset);
260 ~vec4_visitor();
261
262 dst_reg dst_null_f()
263 {
264 return dst_reg(brw_null_reg());
265 }
266
267 dst_reg dst_null_d()
268 {
269 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
270 }
271
272 dst_reg dst_null_ud()
273 {
274 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD));
275 }
276
277 struct brw_vec4_compile * const c;
278 const struct brw_vec4_prog_key * const key;
279 struct brw_vec4_prog_data * const prog_data;
280 unsigned int sanity_param_count;
281
282 char *fail_msg;
283 bool failed;
284
285 /**
286 * GLSL IR currently being processed, which is associated with our
287 * driver IR instructions for debugging purposes.
288 */
289 const void *base_ir;
290 const char *current_annotation;
291
292 int *virtual_grf_sizes;
293 int virtual_grf_count;
294 int virtual_grf_array_size;
295 int first_non_payload_grf;
296 unsigned int max_grf;
297 int *virtual_grf_start;
298 int *virtual_grf_end;
299 dst_reg userplane[MAX_CLIP_PLANES];
300
301 /**
302 * This is the size to be used for an array with an element per
303 * reg_offset
304 */
305 int virtual_grf_reg_count;
306 /** Per-virtual-grf indices into an array of size virtual_grf_reg_count */
307 int *virtual_grf_reg_map;
308
309 bool live_intervals_valid;
310
311 dst_reg *variable_storage(ir_variable *var);
312
313 void reladdr_to_temp(ir_instruction *ir, src_reg *reg, int *num_reladdr);
314
315 bool need_all_constants_in_pull_buffer;
316
317 /**
318 * \name Visit methods
319 *
320 * As typical for the visitor pattern, there must be one \c visit method for
321 * each concrete subclass of \c ir_instruction. Virtual base classes within
322 * the hierarchy should not have \c visit methods.
323 */
324 /*@{*/
325 virtual void visit(ir_variable *);
326 virtual void visit(ir_loop *);
327 virtual void visit(ir_loop_jump *);
328 virtual void visit(ir_function_signature *);
329 virtual void visit(ir_function *);
330 virtual void visit(ir_expression *);
331 virtual void visit(ir_swizzle *);
332 virtual void visit(ir_dereference_variable *);
333 virtual void visit(ir_dereference_array *);
334 virtual void visit(ir_dereference_record *);
335 virtual void visit(ir_assignment *);
336 virtual void visit(ir_constant *);
337 virtual void visit(ir_call *);
338 virtual void visit(ir_return *);
339 virtual void visit(ir_discard *);
340 virtual void visit(ir_texture *);
341 virtual void visit(ir_if *);
342 virtual void visit(ir_emit_vertex *);
343 virtual void visit(ir_end_primitive *);
344 /*@}*/
345
346 src_reg result;
347
348 /* Regs for vertex results. Generated at ir_variable visiting time
349 * for the ir->location's used.
350 */
351 dst_reg output_reg[BRW_VARYING_SLOT_COUNT];
352 const char *output_reg_annotation[BRW_VARYING_SLOT_COUNT];
353 int *uniform_size;
354 int *uniform_vector_size;
355 int uniform_array_size; /*< Size of uniform_[vector_]size arrays */
356 int uniforms;
357
358 src_reg shader_start_time;
359
360 struct hash_table *variable_ht;
361
362 bool run(void);
363 void fail(const char *msg, ...);
364
365 int virtual_grf_alloc(int size);
366 void setup_uniform_clipplane_values();
367 void setup_uniform_values(ir_variable *ir);
368 void setup_builtin_uniform_values(ir_variable *ir);
369 int setup_uniforms(int payload_reg);
370 bool reg_allocate_trivial();
371 bool reg_allocate();
372 void evaluate_spill_costs(float *spill_costs, bool *no_spill);
373 int choose_spill_reg(struct ra_graph *g);
374 void spill_reg(int spill_reg);
375 void move_grf_array_access_to_scratch();
376 void move_uniform_array_access_to_pull_constants();
377 void move_push_constants_to_pull_constants();
378 void split_uniform_registers();
379 void pack_uniform_registers();
380 void calculate_live_intervals();
381 void invalidate_live_intervals();
382 void split_virtual_grfs();
383 bool dead_code_eliminate();
384 bool virtual_grf_interferes(int a, int b);
385 bool opt_copy_propagation();
386 bool opt_cse_local(bblock_t *block);
387 bool opt_cse();
388 bool opt_algebraic();
389 bool opt_register_coalesce();
390 void opt_set_dependency_control();
391 void opt_schedule_instructions();
392
393 vec4_instruction *emit(vec4_instruction *inst);
394
395 vec4_instruction *emit(enum opcode opcode);
396
397 vec4_instruction *emit(enum opcode opcode, dst_reg dst);
398
399 vec4_instruction *emit(enum opcode opcode, dst_reg dst, src_reg src0);
400
401 vec4_instruction *emit(enum opcode opcode, dst_reg dst,
402 src_reg src0, src_reg src1);
403
404 vec4_instruction *emit(enum opcode opcode, dst_reg dst,
405 src_reg src0, src_reg src1, src_reg src2);
406
407 vec4_instruction *emit_before(vec4_instruction *inst,
408 vec4_instruction *new_inst);
409
410 vec4_instruction *MOV(const dst_reg &dst, const src_reg &src0);
411 vec4_instruction *NOT(const dst_reg &dst, const src_reg &src0);
412 vec4_instruction *RNDD(const dst_reg &dst, const src_reg &src0);
413 vec4_instruction *RNDE(const dst_reg &dst, const src_reg &src0);
414 vec4_instruction *RNDZ(const dst_reg &dst, const src_reg &src0);
415 vec4_instruction *FRC(const dst_reg &dst, const src_reg &src0);
416 vec4_instruction *F32TO16(const dst_reg &dst, const src_reg &src0);
417 vec4_instruction *F16TO32(const dst_reg &dst, const src_reg &src0);
418 vec4_instruction *ADD(const dst_reg &dst, const src_reg &src0,
419 const src_reg &src1);
420 vec4_instruction *MUL(const dst_reg &dst, const src_reg &src0,
421 const src_reg &src1);
422 vec4_instruction *MACH(const dst_reg &dst, const src_reg &src0,
423 const src_reg &src1);
424 vec4_instruction *MAC(const dst_reg &dst, const src_reg &src0,
425 const src_reg &src1);
426 vec4_instruction *AND(const dst_reg &dst, const src_reg &src0,
427 const src_reg &src1);
428 vec4_instruction *OR(const dst_reg &dst, const src_reg &src0,
429 const src_reg &src1);
430 vec4_instruction *XOR(const dst_reg &dst, const src_reg &src0,
431 const src_reg &src1);
432 vec4_instruction *DP3(const dst_reg &dst, const src_reg &src0,
433 const src_reg &src1);
434 vec4_instruction *DP4(const dst_reg &dst, const src_reg &src0,
435 const src_reg &src1);
436 vec4_instruction *DPH(const dst_reg &dst, const src_reg &src0,
437 const src_reg &src1);
438 vec4_instruction *SHL(const dst_reg &dst, const src_reg &src0,
439 const src_reg &src1);
440 vec4_instruction *SHR(const dst_reg &dst, const src_reg &src0,
441 const src_reg &src1);
442 vec4_instruction *ASR(const dst_reg &dst, const src_reg &src0,
443 const src_reg &src1);
444 vec4_instruction *CMP(dst_reg dst, src_reg src0, src_reg src1,
445 enum brw_conditional_mod condition);
446 vec4_instruction *IF(src_reg src0, src_reg src1,
447 enum brw_conditional_mod condition);
448 vec4_instruction *IF(enum brw_predicate predicate);
449 vec4_instruction *PULL_CONSTANT_LOAD(const dst_reg &dst,
450 const src_reg &index);
451 vec4_instruction *SCRATCH_READ(const dst_reg &dst, const src_reg &index);
452 vec4_instruction *SCRATCH_WRITE(const dst_reg &dst, const src_reg &src,
453 const src_reg &index);
454 vec4_instruction *LRP(const dst_reg &dst, const src_reg &a,
455 const src_reg &y, const src_reg &x);
456 vec4_instruction *BFREV(const dst_reg &dst, const src_reg &value);
457 vec4_instruction *BFE(const dst_reg &dst, const src_reg &bits,
458 const src_reg &offset, const src_reg &value);
459 vec4_instruction *BFI1(const dst_reg &dst, const src_reg &bits,
460 const src_reg &offset);
461 vec4_instruction *BFI2(const dst_reg &dst, const src_reg &bfi1_dst,
462 const src_reg &insert, const src_reg &base);
463 vec4_instruction *FBH(const dst_reg &dst, const src_reg &value);
464 vec4_instruction *FBL(const dst_reg &dst, const src_reg &value);
465 vec4_instruction *CBIT(const dst_reg &dst, const src_reg &value);
466 vec4_instruction *MAD(const dst_reg &dst, const src_reg &c,
467 const src_reg &b, const src_reg &a);
468 vec4_instruction *ADDC(const dst_reg &dst, const src_reg &src0,
469 const src_reg &src1);
470 vec4_instruction *SUBB(const dst_reg &dst, const src_reg &src0,
471 const src_reg &src1);
472
473 int implied_mrf_writes(vec4_instruction *inst);
474
475 bool try_rewrite_rhs_to_dst(ir_assignment *ir,
476 dst_reg dst,
477 src_reg src,
478 vec4_instruction *pre_rhs_inst,
479 vec4_instruction *last_rhs_inst);
480
481 /** Walks an exec_list of ir_instruction and sends it through this visitor. */
482 void visit_instructions(const exec_list *list);
483
484 void emit_vp_sop(enum brw_conditional_mod condmod, dst_reg dst,
485 src_reg src0, src_reg src1, src_reg one);
486
487 void emit_bool_to_cond_code(ir_rvalue *ir, enum brw_predicate *predicate);
488 void emit_if_gen6(ir_if *ir);
489
490 void emit_minmax(enum brw_conditional_mod conditionalmod, dst_reg dst,
491 src_reg src0, src_reg src1);
492
493 void emit_lrp(const dst_reg &dst,
494 const src_reg &x, const src_reg &y, const src_reg &a);
495
496 void emit_block_move(dst_reg *dst, src_reg *src,
497 const struct glsl_type *type, brw_predicate predicate);
498
499 void emit_constant_values(dst_reg *dst, ir_constant *value);
500
501 /**
502 * Emit the correct dot-product instruction for the type of arguments
503 */
504 void emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements);
505
506 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
507 dst_reg dst, src_reg src0);
508
509 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
510 dst_reg dst, src_reg src0, src_reg src1);
511
512 void emit_scs(ir_instruction *ir, enum prog_opcode op,
513 dst_reg dst, const src_reg &src);
514
515 src_reg fix_3src_operand(src_reg src);
516
517 void emit_math1_gen6(enum opcode opcode, dst_reg dst, src_reg src);
518 void emit_math1_gen4(enum opcode opcode, dst_reg dst, src_reg src);
519 void emit_math(enum opcode opcode, dst_reg dst, src_reg src);
520 void emit_math2_gen6(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
521 void emit_math2_gen4(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
522 void emit_math(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
523 src_reg fix_math_operand(src_reg src);
524
525 void emit_pack_half_2x16(dst_reg dst, src_reg src0);
526 void emit_unpack_half_2x16(dst_reg dst, src_reg src0);
527
528 uint32_t gather_channel(ir_texture *ir, uint32_t sampler);
529 src_reg emit_mcs_fetch(ir_texture *ir, src_reg coordinate, src_reg sampler);
530 void emit_gen6_gather_wa(uint8_t wa, dst_reg dst);
531 void swizzle_result(ir_texture *ir, src_reg orig_val, uint32_t sampler);
532
533 void emit_ndc_computation();
534 void emit_psiz_and_flags(struct brw_reg reg);
535 void emit_clip_distances(dst_reg reg, int offset);
536 void emit_generic_urb_slot(dst_reg reg, int varying);
537 void emit_urb_slot(int mrf, int varying);
538
539 void emit_shader_time_begin();
540 void emit_shader_time_end();
541 void emit_shader_time_write(enum shader_time_shader_type type,
542 src_reg value);
543
544 void emit_untyped_atomic(unsigned atomic_op, unsigned surf_index,
545 dst_reg dst, src_reg offset, src_reg src0,
546 src_reg src1);
547
548 void emit_untyped_surface_read(unsigned surf_index, dst_reg dst,
549 src_reg offset);
550
551 src_reg get_scratch_offset(vec4_instruction *inst,
552 src_reg *reladdr, int reg_offset);
553 src_reg get_pull_constant_offset(vec4_instruction *inst,
554 src_reg *reladdr, int reg_offset);
555 void emit_scratch_read(vec4_instruction *inst,
556 dst_reg dst,
557 src_reg orig_src,
558 int base_offset);
559 void emit_scratch_write(vec4_instruction *inst,
560 int base_offset);
561 void emit_pull_constant_load(vec4_instruction *inst,
562 dst_reg dst,
563 src_reg orig_src,
564 int base_offset);
565
566 bool try_emit_sat(ir_expression *ir);
567 bool try_emit_mad(ir_expression *ir);
568 bool try_emit_b2f_of_compare(ir_expression *ir);
569 void resolve_ud_negate(src_reg *reg);
570
571 src_reg get_timestamp();
572
573 bool process_move_condition(ir_rvalue *ir);
574
575 void dump_instruction(backend_instruction *inst);
576 void dump_instruction(backend_instruction *inst, FILE *file);
577
578 void visit_atomic_counter_intrinsic(ir_call *ir);
579
580 protected:
581 void emit_vertex();
582 void lower_attributes_to_hw_regs(const int *attribute_map,
583 bool interleaved);
584 void setup_payload_interference(struct ra_graph *g, int first_payload_node,
585 int reg_node_count);
586 virtual dst_reg *make_reg_for_system_value(ir_variable *ir) = 0;
587 virtual void setup_payload() = 0;
588 virtual void emit_prolog() = 0;
589 virtual void emit_program_code() = 0;
590 virtual void emit_thread_end() = 0;
591 virtual void emit_urb_write_header(int mrf) = 0;
592 virtual vec4_instruction *emit_urb_write_opcode(bool complete) = 0;
593 virtual int compute_array_stride(ir_dereference_array *ir);
594
595 const bool debug_flag;
596
597 private:
598 /**
599 * If true, then register allocation should fail instead of spilling.
600 */
601 const bool no_spills;
602
603 const shader_time_shader_type st_base;
604 const shader_time_shader_type st_written;
605 const shader_time_shader_type st_reset;
606 };
607
608
609 /**
610 * The vertex shader code generator.
611 *
612 * Translates VS IR to actual i965 assembly code.
613 */
614 class vec4_generator
615 {
616 public:
617 vec4_generator(struct brw_context *brw,
618 struct gl_shader_program *shader_prog,
619 struct gl_program *prog,
620 struct brw_vec4_prog_data *prog_data,
621 void *mem_ctx,
622 bool debug_flag);
623 ~vec4_generator();
624
625 const unsigned *generate_assembly(exec_list *insts, unsigned *asm_size);
626
627 private:
628 void generate_code(exec_list *instructions);
629 void generate_vec4_instruction(vec4_instruction *inst,
630 struct brw_reg dst,
631 struct brw_reg *src);
632
633 void generate_math1_gen4(vec4_instruction *inst,
634 struct brw_reg dst,
635 struct brw_reg src);
636 void generate_math2_gen4(vec4_instruction *inst,
637 struct brw_reg dst,
638 struct brw_reg src0,
639 struct brw_reg src1);
640 void generate_math_gen6(vec4_instruction *inst,
641 struct brw_reg dst,
642 struct brw_reg src0,
643 struct brw_reg src1);
644
645 void generate_tex(vec4_instruction *inst,
646 struct brw_reg dst,
647 struct brw_reg src,
648 struct brw_reg sampler_index);
649
650 void generate_vs_urb_write(vec4_instruction *inst);
651 void generate_gs_urb_write(vec4_instruction *inst);
652 void generate_gs_thread_end(vec4_instruction *inst);
653 void generate_gs_set_write_offset(struct brw_reg dst,
654 struct brw_reg src0,
655 struct brw_reg src1);
656 void generate_gs_set_vertex_count(struct brw_reg dst,
657 struct brw_reg src);
658 void generate_gs_set_dword_2_immed(struct brw_reg dst, struct brw_reg src);
659 void generate_gs_prepare_channel_masks(struct brw_reg dst);
660 void generate_gs_set_channel_masks(struct brw_reg dst, struct brw_reg src);
661 void generate_gs_get_instance_id(struct brw_reg dst);
662 void generate_oword_dual_block_offsets(struct brw_reg m1,
663 struct brw_reg index);
664 void generate_scratch_write(vec4_instruction *inst,
665 struct brw_reg dst,
666 struct brw_reg src,
667 struct brw_reg index);
668 void generate_scratch_read(vec4_instruction *inst,
669 struct brw_reg dst,
670 struct brw_reg index);
671 void generate_pull_constant_load(vec4_instruction *inst,
672 struct brw_reg dst,
673 struct brw_reg index,
674 struct brw_reg offset);
675 void generate_pull_constant_load_gen7(vec4_instruction *inst,
676 struct brw_reg dst,
677 struct brw_reg surf_index,
678 struct brw_reg offset);
679 void generate_unpack_flags(vec4_instruction *inst,
680 struct brw_reg dst);
681
682 void generate_untyped_atomic(vec4_instruction *inst,
683 struct brw_reg dst,
684 struct brw_reg atomic_op,
685 struct brw_reg surf_index);
686
687 void generate_untyped_surface_read(vec4_instruction *inst,
688 struct brw_reg dst,
689 struct brw_reg surf_index);
690
691 struct brw_context *brw;
692
693 struct brw_compile *p;
694
695 struct gl_shader_program *shader_prog;
696 const struct gl_program *prog;
697
698 struct brw_vec4_prog_data *prog_data;
699
700 void *mem_ctx;
701 const bool debug_flag;
702 };
703
704 } /* namespace brw */
705 #endif /* __cplusplus */
706
707 #endif /* BRW_VEC4_H */