9001286049847d2a2099fcf3d25ed682d316c11b
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4.h
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef BRW_VEC4_H
25 #define BRW_VEC4_H
26
27 #include <stdint.h>
28 #include "brw_shader.h"
29 #include "main/compiler.h"
30 #include "program/hash_table.h"
31 #include "brw_program.h"
32
33 #ifdef __cplusplus
34 extern "C" {
35 #endif
36
37 #include "brw_context.h"
38 #include "brw_eu.h"
39 #include "intel_asm_annotation.h"
40
41 #ifdef __cplusplus
42 }; /* extern "C" */
43 #include "gen8_generator.h"
44 #endif
45
46 #include "glsl/ir.h"
47
48
49 struct brw_vec4_compile {
50 GLuint last_scratch; /**< measured in 32-byte (register size) units */
51 };
52
53
54 struct brw_vec4_prog_key {
55 GLuint program_string_id;
56
57 /**
58 * True if at least one clip flag is enabled, regardless of whether the
59 * shader uses clip planes or gl_ClipDistance.
60 */
61 GLuint userclip_active:1;
62
63 /**
64 * How many user clipping planes are being uploaded to the vertex shader as
65 * push constants.
66 */
67 GLuint nr_userclip_plane_consts:4;
68
69 GLuint clamp_vertex_color:1;
70
71 struct brw_sampler_prog_key_data tex;
72 };
73
74
75 #ifdef __cplusplus
76 extern "C" {
77 #endif
78
79 void
80 brw_vec4_setup_prog_key_for_precompile(struct gl_context *ctx,
81 struct brw_vec4_prog_key *key,
82 GLuint id, struct gl_program *prog);
83
84 #ifdef __cplusplus
85 } /* extern "C" */
86
87 namespace brw {
88
89 class dst_reg;
90
91 unsigned
92 swizzle_for_size(int size);
93
94 class src_reg : public backend_reg
95 {
96 public:
97 DECLARE_RALLOC_CXX_OPERATORS(src_reg)
98
99 void init();
100
101 src_reg(register_file file, int reg, const glsl_type *type);
102 src_reg();
103 src_reg(float f);
104 src_reg(uint32_t u);
105 src_reg(int32_t i);
106 src_reg(struct brw_reg reg);
107
108 bool equals(const src_reg &r) const;
109
110 src_reg(class vec4_visitor *v, const struct glsl_type *type);
111
112 explicit src_reg(dst_reg reg);
113
114 GLuint swizzle; /**< BRW_SWIZZLE_XYZW macros from brw_reg.h. */
115
116 src_reg *reladdr;
117 };
118
119 static inline src_reg
120 retype(src_reg reg, enum brw_reg_type type)
121 {
122 reg.fixed_hw_reg.type = reg.type = type;
123 return reg;
124 }
125
126 static inline src_reg
127 offset(src_reg reg, unsigned delta)
128 {
129 assert(delta == 0 || (reg.file != HW_REG && reg.file != IMM));
130 reg.reg_offset += delta;
131 return reg;
132 }
133
134 /**
135 * Reswizzle a given source register.
136 * \sa brw_swizzle().
137 */
138 static inline src_reg
139 swizzle(src_reg reg, unsigned swizzle)
140 {
141 assert(reg.file != HW_REG);
142 reg.swizzle = BRW_SWIZZLE4(
143 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 0)),
144 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 1)),
145 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 2)),
146 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 3)));
147 return reg;
148 }
149
150 static inline src_reg
151 negate(src_reg reg)
152 {
153 assert(reg.file != HW_REG && reg.file != IMM);
154 reg.negate = !reg.negate;
155 return reg;
156 }
157
158 class dst_reg : public backend_reg
159 {
160 public:
161 DECLARE_RALLOC_CXX_OPERATORS(dst_reg)
162
163 void init();
164
165 dst_reg();
166 dst_reg(register_file file, int reg);
167 dst_reg(register_file file, int reg, const glsl_type *type, int writemask);
168 dst_reg(struct brw_reg reg);
169 dst_reg(class vec4_visitor *v, const struct glsl_type *type);
170
171 explicit dst_reg(src_reg reg);
172
173 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */
174
175 src_reg *reladdr;
176 };
177
178 static inline dst_reg
179 retype(dst_reg reg, enum brw_reg_type type)
180 {
181 reg.fixed_hw_reg.type = reg.type = type;
182 return reg;
183 }
184
185 static inline dst_reg
186 offset(dst_reg reg, unsigned delta)
187 {
188 assert(delta == 0 || (reg.file != HW_REG && reg.file != IMM));
189 reg.reg_offset += delta;
190 return reg;
191 }
192
193 static inline dst_reg
194 writemask(dst_reg reg, unsigned mask)
195 {
196 assert(reg.file != HW_REG && reg.file != IMM);
197 assert((reg.writemask & mask) != 0);
198 reg.writemask &= mask;
199 return reg;
200 }
201
202 class vec4_instruction : public backend_instruction {
203 public:
204 DECLARE_RALLOC_CXX_OPERATORS(vec4_instruction)
205
206 vec4_instruction(vec4_visitor *v, enum opcode opcode,
207 const dst_reg &dst = dst_reg(),
208 const src_reg &src0 = src_reg(),
209 const src_reg &src1 = src_reg(),
210 const src_reg &src2 = src_reg());
211
212 struct brw_reg get_dst(void);
213 struct brw_reg get_src(const struct brw_vec4_prog_data *prog_data, int i);
214
215 dst_reg dst;
216 src_reg src[3];
217
218 bool shadow_compare;
219
220 enum brw_urb_write_flags urb_write_flags;
221 bool header_present;
222
223 bool is_send_from_grf();
224 bool can_reswizzle_dst(int dst_writemask, int swizzle, int swizzle_mask);
225 void reswizzle_dst(int dst_writemask, int swizzle);
226 bool can_do_source_mods(struct brw_context *brw);
227
228 bool reads_flag()
229 {
230 return predicate || opcode == VS_OPCODE_UNPACK_FLAGS_SIMD4X2;
231 }
232
233 bool writes_flag()
234 {
235 return conditional_mod && opcode != BRW_OPCODE_SEL;
236 }
237 };
238
239 /**
240 * The vertex shader front-end.
241 *
242 * Translates either GLSL IR or Mesa IR (for ARB_vertex_program and
243 * fixed-function) into VS IR.
244 */
245 class vec4_visitor : public backend_visitor
246 {
247 public:
248 vec4_visitor(struct brw_context *brw,
249 struct brw_vec4_compile *c,
250 struct gl_program *prog,
251 const struct brw_vec4_prog_key *key,
252 struct brw_vec4_prog_data *prog_data,
253 struct gl_shader_program *shader_prog,
254 gl_shader_stage stage,
255 void *mem_ctx,
256 bool debug_flag,
257 bool no_spills,
258 shader_time_shader_type st_base,
259 shader_time_shader_type st_written,
260 shader_time_shader_type st_reset);
261 ~vec4_visitor();
262
263 dst_reg dst_null_f()
264 {
265 return dst_reg(brw_null_reg());
266 }
267
268 dst_reg dst_null_d()
269 {
270 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
271 }
272
273 dst_reg dst_null_ud()
274 {
275 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD));
276 }
277
278 struct brw_vec4_compile * const c;
279 const struct brw_vec4_prog_key * const key;
280 struct brw_vec4_prog_data * const prog_data;
281 unsigned int sanity_param_count;
282
283 char *fail_msg;
284 bool failed;
285
286 /**
287 * GLSL IR currently being processed, which is associated with our
288 * driver IR instructions for debugging purposes.
289 */
290 const void *base_ir;
291 const char *current_annotation;
292
293 int *virtual_grf_sizes;
294 int virtual_grf_count;
295 int virtual_grf_array_size;
296 int first_non_payload_grf;
297 unsigned int max_grf;
298 int *virtual_grf_start;
299 int *virtual_grf_end;
300 dst_reg userplane[MAX_CLIP_PLANES];
301
302 /**
303 * This is the size to be used for an array with an element per
304 * reg_offset
305 */
306 int virtual_grf_reg_count;
307 /** Per-virtual-grf indices into an array of size virtual_grf_reg_count */
308 int *virtual_grf_reg_map;
309
310 bool live_intervals_valid;
311
312 dst_reg *variable_storage(ir_variable *var);
313
314 void reladdr_to_temp(ir_instruction *ir, src_reg *reg, int *num_reladdr);
315
316 bool need_all_constants_in_pull_buffer;
317
318 /**
319 * \name Visit methods
320 *
321 * As typical for the visitor pattern, there must be one \c visit method for
322 * each concrete subclass of \c ir_instruction. Virtual base classes within
323 * the hierarchy should not have \c visit methods.
324 */
325 /*@{*/
326 virtual void visit(ir_variable *);
327 virtual void visit(ir_loop *);
328 virtual void visit(ir_loop_jump *);
329 virtual void visit(ir_function_signature *);
330 virtual void visit(ir_function *);
331 virtual void visit(ir_expression *);
332 virtual void visit(ir_swizzle *);
333 virtual void visit(ir_dereference_variable *);
334 virtual void visit(ir_dereference_array *);
335 virtual void visit(ir_dereference_record *);
336 virtual void visit(ir_assignment *);
337 virtual void visit(ir_constant *);
338 virtual void visit(ir_call *);
339 virtual void visit(ir_return *);
340 virtual void visit(ir_discard *);
341 virtual void visit(ir_texture *);
342 virtual void visit(ir_if *);
343 virtual void visit(ir_emit_vertex *);
344 virtual void visit(ir_end_primitive *);
345 /*@}*/
346
347 src_reg result;
348
349 /* Regs for vertex results. Generated at ir_variable visiting time
350 * for the ir->location's used.
351 */
352 dst_reg output_reg[BRW_VARYING_SLOT_COUNT];
353 const char *output_reg_annotation[BRW_VARYING_SLOT_COUNT];
354 int *uniform_size;
355 int *uniform_vector_size;
356 int uniform_array_size; /*< Size of uniform_[vector_]size arrays */
357 int uniforms;
358
359 src_reg shader_start_time;
360
361 struct hash_table *variable_ht;
362
363 bool run(void);
364 void fail(const char *msg, ...);
365
366 int virtual_grf_alloc(int size);
367 void setup_uniform_clipplane_values();
368 void setup_uniform_values(ir_variable *ir);
369 void setup_builtin_uniform_values(ir_variable *ir);
370 int setup_uniforms(int payload_reg);
371 bool reg_allocate_trivial();
372 bool reg_allocate();
373 void evaluate_spill_costs(float *spill_costs, bool *no_spill);
374 int choose_spill_reg(struct ra_graph *g);
375 void spill_reg(int spill_reg);
376 void move_grf_array_access_to_scratch();
377 void move_uniform_array_access_to_pull_constants();
378 void move_push_constants_to_pull_constants();
379 void split_uniform_registers();
380 void pack_uniform_registers();
381 void calculate_live_intervals();
382 void invalidate_live_intervals();
383 void split_virtual_grfs();
384 bool dead_code_eliminate();
385 bool virtual_grf_interferes(int a, int b);
386 bool opt_copy_propagation();
387 bool opt_cse_local(bblock_t *block);
388 bool opt_cse();
389 bool opt_algebraic();
390 bool opt_register_coalesce();
391 void opt_set_dependency_control();
392 void opt_schedule_instructions();
393
394 vec4_instruction *emit(vec4_instruction *inst);
395
396 vec4_instruction *emit(enum opcode opcode);
397
398 vec4_instruction *emit(enum opcode opcode, dst_reg dst);
399
400 vec4_instruction *emit(enum opcode opcode, dst_reg dst, src_reg src0);
401
402 vec4_instruction *emit(enum opcode opcode, dst_reg dst,
403 src_reg src0, src_reg src1);
404
405 vec4_instruction *emit(enum opcode opcode, dst_reg dst,
406 src_reg src0, src_reg src1, src_reg src2);
407
408 vec4_instruction *emit_before(vec4_instruction *inst,
409 vec4_instruction *new_inst);
410
411 vec4_instruction *MOV(const dst_reg &dst, const src_reg &src0);
412 vec4_instruction *NOT(const dst_reg &dst, const src_reg &src0);
413 vec4_instruction *RNDD(const dst_reg &dst, const src_reg &src0);
414 vec4_instruction *RNDE(const dst_reg &dst, const src_reg &src0);
415 vec4_instruction *RNDZ(const dst_reg &dst, const src_reg &src0);
416 vec4_instruction *FRC(const dst_reg &dst, const src_reg &src0);
417 vec4_instruction *F32TO16(const dst_reg &dst, const src_reg &src0);
418 vec4_instruction *F16TO32(const dst_reg &dst, const src_reg &src0);
419 vec4_instruction *ADD(const dst_reg &dst, const src_reg &src0,
420 const src_reg &src1);
421 vec4_instruction *MUL(const dst_reg &dst, const src_reg &src0,
422 const src_reg &src1);
423 vec4_instruction *MACH(const dst_reg &dst, const src_reg &src0,
424 const src_reg &src1);
425 vec4_instruction *MAC(const dst_reg &dst, const src_reg &src0,
426 const src_reg &src1);
427 vec4_instruction *AND(const dst_reg &dst, const src_reg &src0,
428 const src_reg &src1);
429 vec4_instruction *OR(const dst_reg &dst, const src_reg &src0,
430 const src_reg &src1);
431 vec4_instruction *XOR(const dst_reg &dst, const src_reg &src0,
432 const src_reg &src1);
433 vec4_instruction *DP3(const dst_reg &dst, const src_reg &src0,
434 const src_reg &src1);
435 vec4_instruction *DP4(const dst_reg &dst, const src_reg &src0,
436 const src_reg &src1);
437 vec4_instruction *DPH(const dst_reg &dst, const src_reg &src0,
438 const src_reg &src1);
439 vec4_instruction *SHL(const dst_reg &dst, const src_reg &src0,
440 const src_reg &src1);
441 vec4_instruction *SHR(const dst_reg &dst, const src_reg &src0,
442 const src_reg &src1);
443 vec4_instruction *ASR(const dst_reg &dst, const src_reg &src0,
444 const src_reg &src1);
445 vec4_instruction *CMP(dst_reg dst, src_reg src0, src_reg src1,
446 enum brw_conditional_mod condition);
447 vec4_instruction *IF(src_reg src0, src_reg src1,
448 enum brw_conditional_mod condition);
449 vec4_instruction *IF(enum brw_predicate predicate);
450 vec4_instruction *PULL_CONSTANT_LOAD(const dst_reg &dst,
451 const src_reg &index);
452 vec4_instruction *SCRATCH_READ(const dst_reg &dst, const src_reg &index);
453 vec4_instruction *SCRATCH_WRITE(const dst_reg &dst, const src_reg &src,
454 const src_reg &index);
455 vec4_instruction *LRP(const dst_reg &dst, const src_reg &a,
456 const src_reg &y, const src_reg &x);
457 vec4_instruction *BFREV(const dst_reg &dst, const src_reg &value);
458 vec4_instruction *BFE(const dst_reg &dst, const src_reg &bits,
459 const src_reg &offset, const src_reg &value);
460 vec4_instruction *BFI1(const dst_reg &dst, const src_reg &bits,
461 const src_reg &offset);
462 vec4_instruction *BFI2(const dst_reg &dst, const src_reg &bfi1_dst,
463 const src_reg &insert, const src_reg &base);
464 vec4_instruction *FBH(const dst_reg &dst, const src_reg &value);
465 vec4_instruction *FBL(const dst_reg &dst, const src_reg &value);
466 vec4_instruction *CBIT(const dst_reg &dst, const src_reg &value);
467 vec4_instruction *MAD(const dst_reg &dst, const src_reg &c,
468 const src_reg &b, const src_reg &a);
469 vec4_instruction *ADDC(const dst_reg &dst, const src_reg &src0,
470 const src_reg &src1);
471 vec4_instruction *SUBB(const dst_reg &dst, const src_reg &src0,
472 const src_reg &src1);
473
474 int implied_mrf_writes(vec4_instruction *inst);
475
476 bool try_rewrite_rhs_to_dst(ir_assignment *ir,
477 dst_reg dst,
478 src_reg src,
479 vec4_instruction *pre_rhs_inst,
480 vec4_instruction *last_rhs_inst);
481
482 /** Walks an exec_list of ir_instruction and sends it through this visitor. */
483 void visit_instructions(const exec_list *list);
484
485 void emit_vp_sop(enum brw_conditional_mod condmod, dst_reg dst,
486 src_reg src0, src_reg src1, src_reg one);
487
488 void emit_bool_to_cond_code(ir_rvalue *ir, enum brw_predicate *predicate);
489 void emit_if_gen6(ir_if *ir);
490
491 void emit_minmax(enum brw_conditional_mod conditionalmod, dst_reg dst,
492 src_reg src0, src_reg src1);
493
494 void emit_lrp(const dst_reg &dst,
495 const src_reg &x, const src_reg &y, const src_reg &a);
496
497 void emit_block_move(dst_reg *dst, src_reg *src,
498 const struct glsl_type *type, brw_predicate predicate);
499
500 void emit_constant_values(dst_reg *dst, ir_constant *value);
501
502 /**
503 * Emit the correct dot-product instruction for the type of arguments
504 */
505 void emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements);
506
507 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
508 dst_reg dst, src_reg src0);
509
510 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
511 dst_reg dst, src_reg src0, src_reg src1);
512
513 void emit_scs(ir_instruction *ir, enum prog_opcode op,
514 dst_reg dst, const src_reg &src);
515
516 src_reg fix_3src_operand(src_reg src);
517
518 void emit_math1_gen6(enum opcode opcode, dst_reg dst, src_reg src);
519 void emit_math1_gen4(enum opcode opcode, dst_reg dst, src_reg src);
520 void emit_math(enum opcode opcode, dst_reg dst, src_reg src);
521 void emit_math2_gen6(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
522 void emit_math2_gen4(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
523 void emit_math(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
524 src_reg fix_math_operand(src_reg src);
525
526 void emit_pack_half_2x16(dst_reg dst, src_reg src0);
527 void emit_unpack_half_2x16(dst_reg dst, src_reg src0);
528
529 uint32_t gather_channel(ir_texture *ir, uint32_t sampler);
530 src_reg emit_mcs_fetch(ir_texture *ir, src_reg coordinate, uint32_t sampler);
531 void emit_gen6_gather_wa(uint8_t wa, dst_reg dst);
532 void swizzle_result(ir_texture *ir, src_reg orig_val, uint32_t sampler);
533
534 void emit_ndc_computation();
535 void emit_psiz_and_flags(struct brw_reg reg);
536 void emit_clip_distances(dst_reg reg, int offset);
537 void emit_generic_urb_slot(dst_reg reg, int varying);
538 void emit_urb_slot(int mrf, int varying);
539
540 void emit_shader_time_begin();
541 void emit_shader_time_end();
542 void emit_shader_time_write(enum shader_time_shader_type type,
543 src_reg value);
544
545 void emit_untyped_atomic(unsigned atomic_op, unsigned surf_index,
546 dst_reg dst, src_reg offset, src_reg src0,
547 src_reg src1);
548
549 void emit_untyped_surface_read(unsigned surf_index, dst_reg dst,
550 src_reg offset);
551
552 src_reg get_scratch_offset(vec4_instruction *inst,
553 src_reg *reladdr, int reg_offset);
554 src_reg get_pull_constant_offset(vec4_instruction *inst,
555 src_reg *reladdr, int reg_offset);
556 void emit_scratch_read(vec4_instruction *inst,
557 dst_reg dst,
558 src_reg orig_src,
559 int base_offset);
560 void emit_scratch_write(vec4_instruction *inst,
561 int base_offset);
562 void emit_pull_constant_load(vec4_instruction *inst,
563 dst_reg dst,
564 src_reg orig_src,
565 int base_offset);
566
567 bool try_emit_sat(ir_expression *ir);
568 bool try_emit_mad(ir_expression *ir);
569 bool try_emit_b2f_of_compare(ir_expression *ir);
570 void resolve_ud_negate(src_reg *reg);
571
572 src_reg get_timestamp();
573
574 bool process_move_condition(ir_rvalue *ir);
575
576 void dump_instruction(backend_instruction *inst);
577 void dump_instruction(backend_instruction *inst, FILE *file);
578
579 void visit_atomic_counter_intrinsic(ir_call *ir);
580
581 protected:
582 void emit_vertex();
583 void lower_attributes_to_hw_regs(const int *attribute_map,
584 bool interleaved);
585 void setup_payload_interference(struct ra_graph *g, int first_payload_node,
586 int reg_node_count);
587 virtual dst_reg *make_reg_for_system_value(ir_variable *ir) = 0;
588 virtual void setup_payload() = 0;
589 virtual void emit_prolog() = 0;
590 virtual void emit_program_code() = 0;
591 virtual void emit_thread_end() = 0;
592 virtual void emit_urb_write_header(int mrf) = 0;
593 virtual vec4_instruction *emit_urb_write_opcode(bool complete) = 0;
594 virtual int compute_array_stride(ir_dereference_array *ir);
595
596 const bool debug_flag;
597
598 private:
599 /**
600 * If true, then register allocation should fail instead of spilling.
601 */
602 const bool no_spills;
603
604 const shader_time_shader_type st_base;
605 const shader_time_shader_type st_written;
606 const shader_time_shader_type st_reset;
607 };
608
609
610 /**
611 * The vertex shader code generator.
612 *
613 * Translates VS IR to actual i965 assembly code.
614 */
615 class vec4_generator
616 {
617 public:
618 vec4_generator(struct brw_context *brw,
619 struct gl_shader_program *shader_prog,
620 struct gl_program *prog,
621 struct brw_vec4_prog_data *prog_data,
622 void *mem_ctx,
623 bool debug_flag);
624 ~vec4_generator();
625
626 const unsigned *generate_assembly(exec_list *insts, unsigned *asm_size);
627
628 private:
629 void generate_code(exec_list *instructions);
630 void generate_vec4_instruction(vec4_instruction *inst,
631 struct brw_reg dst,
632 struct brw_reg *src);
633
634 void generate_math1_gen4(vec4_instruction *inst,
635 struct brw_reg dst,
636 struct brw_reg src);
637 void generate_math2_gen4(vec4_instruction *inst,
638 struct brw_reg dst,
639 struct brw_reg src0,
640 struct brw_reg src1);
641 void generate_math_gen6(vec4_instruction *inst,
642 struct brw_reg dst,
643 struct brw_reg src0,
644 struct brw_reg src1);
645
646 void generate_tex(vec4_instruction *inst,
647 struct brw_reg dst,
648 struct brw_reg src,
649 struct brw_reg sampler_index);
650
651 void generate_vs_urb_write(vec4_instruction *inst);
652 void generate_gs_urb_write(vec4_instruction *inst);
653 void generate_gs_thread_end(vec4_instruction *inst);
654 void generate_gs_set_write_offset(struct brw_reg dst,
655 struct brw_reg src0,
656 struct brw_reg src1);
657 void generate_gs_set_vertex_count(struct brw_reg dst,
658 struct brw_reg src);
659 void generate_gs_set_dword_2_immed(struct brw_reg dst, struct brw_reg src);
660 void generate_gs_prepare_channel_masks(struct brw_reg dst);
661 void generate_gs_set_channel_masks(struct brw_reg dst, struct brw_reg src);
662 void generate_gs_get_instance_id(struct brw_reg dst);
663 void generate_oword_dual_block_offsets(struct brw_reg m1,
664 struct brw_reg index);
665 void generate_scratch_write(vec4_instruction *inst,
666 struct brw_reg dst,
667 struct brw_reg src,
668 struct brw_reg index);
669 void generate_scratch_read(vec4_instruction *inst,
670 struct brw_reg dst,
671 struct brw_reg index);
672 void generate_pull_constant_load(vec4_instruction *inst,
673 struct brw_reg dst,
674 struct brw_reg index,
675 struct brw_reg offset);
676 void generate_pull_constant_load_gen7(vec4_instruction *inst,
677 struct brw_reg dst,
678 struct brw_reg surf_index,
679 struct brw_reg offset);
680 void generate_unpack_flags(vec4_instruction *inst,
681 struct brw_reg dst);
682
683 void generate_untyped_atomic(vec4_instruction *inst,
684 struct brw_reg dst,
685 struct brw_reg atomic_op,
686 struct brw_reg surf_index);
687
688 void generate_untyped_surface_read(vec4_instruction *inst,
689 struct brw_reg dst,
690 struct brw_reg surf_index);
691
692 struct brw_context *brw;
693
694 struct brw_compile *p;
695
696 struct gl_shader_program *shader_prog;
697 const struct gl_program *prog;
698
699 struct brw_vec4_prog_data *prog_data;
700
701 void *mem_ctx;
702 const bool debug_flag;
703 };
704
705 /**
706 * The vertex shader code generator.
707 *
708 * Translates VS IR to actual i965 assembly code.
709 */
710 class gen8_vec4_generator : public gen8_generator
711 {
712 public:
713 gen8_vec4_generator(struct brw_context *brw,
714 struct gl_shader_program *shader_prog,
715 struct gl_program *prog,
716 struct brw_vec4_prog_data *prog_data,
717 void *mem_ctx,
718 bool debug_flag);
719 ~gen8_vec4_generator();
720
721 const unsigned *generate_assembly(exec_list *insts, unsigned *asm_size);
722
723 private:
724 void generate_code(exec_list *instructions);
725 void generate_vec4_instruction(vec4_instruction *inst,
726 struct brw_reg dst,
727 struct brw_reg *src);
728
729 void generate_tex(vec4_instruction *inst,
730 struct brw_reg dst,
731 struct brw_reg sampler_index);
732
733 void generate_urb_write(vec4_instruction *ir, bool copy_g0);
734 void generate_gs_thread_end(vec4_instruction *ir);
735 void generate_gs_set_write_offset(struct brw_reg dst,
736 struct brw_reg src0,
737 struct brw_reg src1);
738 void generate_gs_set_vertex_count(struct brw_reg dst,
739 struct brw_reg src);
740 void generate_gs_set_dword_2_immed(struct brw_reg dst, struct brw_reg src);
741 void generate_gs_prepare_channel_masks(struct brw_reg dst);
742 void generate_gs_set_channel_masks(struct brw_reg dst, struct brw_reg src);
743
744 void generate_oword_dual_block_offsets(struct brw_reg m1,
745 struct brw_reg index);
746 void generate_scratch_write(vec4_instruction *inst,
747 struct brw_reg dst,
748 struct brw_reg src,
749 struct brw_reg index);
750 void generate_scratch_read(vec4_instruction *inst,
751 struct brw_reg dst,
752 struct brw_reg index);
753 void generate_pull_constant_load(vec4_instruction *inst,
754 struct brw_reg dst,
755 struct brw_reg index,
756 struct brw_reg offset);
757 void generate_untyped_atomic(vec4_instruction *ir,
758 struct brw_reg dst,
759 struct brw_reg atomic_op,
760 struct brw_reg surf_index);
761 void generate_untyped_surface_read(vec4_instruction *ir,
762 struct brw_reg dst,
763 struct brw_reg surf_index);
764
765 struct brw_vec4_prog_data *prog_data;
766
767 const bool debug_flag;
768 };
769
770
771 } /* namespace brw */
772 #endif /* __cplusplus */
773
774 #endif /* BRW_VEC4_H */