i965/vec4: Port untyped atomic message support to Broadwell.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4.h
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef BRW_VEC4_H
25 #define BRW_VEC4_H
26
27 #include <stdint.h>
28 #include "brw_shader.h"
29 #include "main/compiler.h"
30 #include "program/hash_table.h"
31 #include "brw_program.h"
32
33 #ifdef __cplusplus
34 extern "C" {
35 #endif
36
37 #include "brw_context.h"
38 #include "brw_eu.h"
39
40 #ifdef __cplusplus
41 }; /* extern "C" */
42 #include "gen8_generator.h"
43 #endif
44
45 #include "glsl/ir.h"
46
47
48 struct brw_vec4_compile {
49 GLuint last_scratch; /**< measured in 32-byte (register size) units */
50 };
51
52
53 struct brw_vec4_prog_key {
54 GLuint program_string_id;
55
56 /**
57 * True if at least one clip flag is enabled, regardless of whether the
58 * shader uses clip planes or gl_ClipDistance.
59 */
60 GLuint userclip_active:1;
61
62 /**
63 * How many user clipping planes are being uploaded to the vertex shader as
64 * push constants.
65 */
66 GLuint nr_userclip_plane_consts:4;
67
68 GLuint clamp_vertex_color:1;
69
70 struct brw_sampler_prog_key_data tex;
71 };
72
73
74 #ifdef __cplusplus
75 extern "C" {
76 #endif
77
78 void
79 brw_vec4_setup_prog_key_for_precompile(struct gl_context *ctx,
80 struct brw_vec4_prog_key *key,
81 GLuint id, struct gl_program *prog);
82
83 #ifdef __cplusplus
84 } /* extern "C" */
85
86 namespace brw {
87
88 class dst_reg;
89
90 unsigned
91 swizzle_for_size(int size);
92
93 class reg
94 {
95 public:
96 /** Register file: GRF, MRF, IMM. */
97 enum register_file file;
98 /** virtual register number. 0 = fixed hw reg */
99 int reg;
100 /** Offset within the virtual register. */
101 int reg_offset;
102 /** Register type. BRW_REGISTER_TYPE_* */
103 int type;
104 struct brw_reg fixed_hw_reg;
105
106 /** Value for file == BRW_IMMMEDIATE_FILE */
107 union {
108 int32_t i;
109 uint32_t u;
110 float f;
111 } imm;
112 };
113
114 class src_reg : public reg
115 {
116 public:
117 DECLARE_RALLOC_CXX_OPERATORS(src_reg)
118
119 void init();
120
121 src_reg(register_file file, int reg, const glsl_type *type);
122 src_reg();
123 src_reg(float f);
124 src_reg(uint32_t u);
125 src_reg(int32_t i);
126 src_reg(struct brw_reg reg);
127
128 bool equals(src_reg *r);
129 bool is_zero() const;
130 bool is_one() const;
131 bool is_accumulator() const;
132
133 src_reg(class vec4_visitor *v, const struct glsl_type *type);
134
135 explicit src_reg(dst_reg reg);
136
137 GLuint swizzle; /**< BRW_SWIZZLE_XYZW macros from brw_reg.h. */
138 bool negate;
139 bool abs;
140
141 src_reg *reladdr;
142 };
143
144 static inline src_reg
145 retype(src_reg reg, unsigned type)
146 {
147 reg.fixed_hw_reg.type = reg.type = type;
148 return reg;
149 }
150
151 static inline src_reg
152 offset(src_reg reg, unsigned delta)
153 {
154 assert(delta == 0 || (reg.file != HW_REG && reg.file != IMM));
155 reg.reg_offset += delta;
156 return reg;
157 }
158
159 /**
160 * Reswizzle a given source register.
161 * \sa brw_swizzle().
162 */
163 static inline src_reg
164 swizzle(src_reg reg, unsigned swizzle)
165 {
166 assert(reg.file != HW_REG);
167 reg.swizzle = BRW_SWIZZLE4(
168 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 0)),
169 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 1)),
170 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 2)),
171 BRW_GET_SWZ(reg.swizzle, BRW_GET_SWZ(swizzle, 3)));
172 return reg;
173 }
174
175 static inline src_reg
176 negate(src_reg reg)
177 {
178 assert(reg.file != HW_REG && reg.file != IMM);
179 reg.negate = !reg.negate;
180 return reg;
181 }
182
183 class dst_reg : public reg
184 {
185 public:
186 DECLARE_RALLOC_CXX_OPERATORS(dst_reg)
187
188 void init();
189
190 dst_reg();
191 dst_reg(register_file file, int reg);
192 dst_reg(register_file file, int reg, const glsl_type *type, int writemask);
193 dst_reg(struct brw_reg reg);
194 dst_reg(class vec4_visitor *v, const struct glsl_type *type);
195
196 explicit dst_reg(src_reg reg);
197
198 bool is_null() const;
199 bool is_accumulator() const;
200
201 int writemask; /**< Bitfield of WRITEMASK_[XYZW] */
202
203 src_reg *reladdr;
204 };
205
206 static inline dst_reg
207 retype(dst_reg reg, unsigned type)
208 {
209 reg.fixed_hw_reg.type = reg.type = type;
210 return reg;
211 }
212
213 static inline dst_reg
214 offset(dst_reg reg, unsigned delta)
215 {
216 assert(delta == 0 || (reg.file != HW_REG && reg.file != IMM));
217 reg.reg_offset += delta;
218 return reg;
219 }
220
221 static inline dst_reg
222 writemask(dst_reg reg, unsigned mask)
223 {
224 assert(reg.file != HW_REG && reg.file != IMM);
225 assert((reg.writemask & mask) != 0);
226 reg.writemask &= mask;
227 return reg;
228 }
229
230 class vec4_instruction : public backend_instruction {
231 public:
232 DECLARE_RALLOC_CXX_OPERATORS(vec4_instruction)
233
234 vec4_instruction(vec4_visitor *v, enum opcode opcode,
235 dst_reg dst = dst_reg(),
236 src_reg src0 = src_reg(),
237 src_reg src1 = src_reg(),
238 src_reg src2 = src_reg());
239
240 struct brw_reg get_dst(void);
241 struct brw_reg get_src(const struct brw_vec4_prog_data *prog_data, int i);
242
243 dst_reg dst;
244 src_reg src[3];
245
246 bool saturate;
247 bool force_writemask_all;
248 bool no_dd_clear, no_dd_check;
249
250 int conditional_mod; /**< BRW_CONDITIONAL_* */
251
252 int sampler;
253 uint32_t texture_offset; /**< Texture Offset bitfield */
254 int target; /**< MRT target. */
255 bool shadow_compare;
256
257 enum brw_urb_write_flags urb_write_flags;
258 bool header_present;
259 int mlen; /**< SEND message length */
260 int base_mrf; /**< First MRF in the SEND message, if mlen is nonzero. */
261
262 uint32_t offset; /* spill/unspill offset */
263 /** @{
264 * Annotation for the generated IR. One of the two can be set.
265 */
266 const void *ir;
267 const char *annotation;
268 /** @} */
269
270 bool is_send_from_grf();
271 bool can_reswizzle_dst(int dst_writemask, int swizzle, int swizzle_mask);
272 void reswizzle_dst(int dst_writemask, int swizzle);
273
274 bool reads_flag()
275 {
276 return predicate || opcode == VS_OPCODE_UNPACK_FLAGS_SIMD4X2;
277 }
278
279 bool writes_flag()
280 {
281 return conditional_mod && opcode != BRW_OPCODE_SEL;
282 }
283 };
284
285 /**
286 * The vertex shader front-end.
287 *
288 * Translates either GLSL IR or Mesa IR (for ARB_vertex_program and
289 * fixed-function) into VS IR.
290 */
291 class vec4_visitor : public backend_visitor
292 {
293 public:
294 vec4_visitor(struct brw_context *brw,
295 struct brw_vec4_compile *c,
296 struct gl_program *prog,
297 const struct brw_vec4_prog_key *key,
298 struct brw_vec4_prog_data *prog_data,
299 struct gl_shader_program *shader_prog,
300 gl_shader_stage stage,
301 void *mem_ctx,
302 bool debug_flag,
303 bool no_spills,
304 shader_time_shader_type st_base,
305 shader_time_shader_type st_written,
306 shader_time_shader_type st_reset);
307 ~vec4_visitor();
308
309 dst_reg dst_null_f()
310 {
311 return dst_reg(brw_null_reg());
312 }
313
314 dst_reg dst_null_d()
315 {
316 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_D));
317 }
318
319 dst_reg dst_null_ud()
320 {
321 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD));
322 }
323
324 struct brw_vec4_compile * const c;
325 const struct brw_vec4_prog_key * const key;
326 struct brw_vec4_prog_data * const prog_data;
327 unsigned int sanity_param_count;
328
329 char *fail_msg;
330 bool failed;
331
332 /**
333 * GLSL IR currently being processed, which is associated with our
334 * driver IR instructions for debugging purposes.
335 */
336 const void *base_ir;
337 const char *current_annotation;
338
339 int *virtual_grf_sizes;
340 int virtual_grf_count;
341 int virtual_grf_array_size;
342 int first_non_payload_grf;
343 unsigned int max_grf;
344 int *virtual_grf_start;
345 int *virtual_grf_end;
346 dst_reg userplane[MAX_CLIP_PLANES];
347
348 /**
349 * This is the size to be used for an array with an element per
350 * reg_offset
351 */
352 int virtual_grf_reg_count;
353 /** Per-virtual-grf indices into an array of size virtual_grf_reg_count */
354 int *virtual_grf_reg_map;
355
356 bool live_intervals_valid;
357
358 dst_reg *variable_storage(ir_variable *var);
359
360 void reladdr_to_temp(ir_instruction *ir, src_reg *reg, int *num_reladdr);
361
362 bool need_all_constants_in_pull_buffer;
363
364 /**
365 * \name Visit methods
366 *
367 * As typical for the visitor pattern, there must be one \c visit method for
368 * each concrete subclass of \c ir_instruction. Virtual base classes within
369 * the hierarchy should not have \c visit methods.
370 */
371 /*@{*/
372 virtual void visit(ir_variable *);
373 virtual void visit(ir_loop *);
374 virtual void visit(ir_loop_jump *);
375 virtual void visit(ir_function_signature *);
376 virtual void visit(ir_function *);
377 virtual void visit(ir_expression *);
378 virtual void visit(ir_swizzle *);
379 virtual void visit(ir_dereference_variable *);
380 virtual void visit(ir_dereference_array *);
381 virtual void visit(ir_dereference_record *);
382 virtual void visit(ir_assignment *);
383 virtual void visit(ir_constant *);
384 virtual void visit(ir_call *);
385 virtual void visit(ir_return *);
386 virtual void visit(ir_discard *);
387 virtual void visit(ir_texture *);
388 virtual void visit(ir_if *);
389 virtual void visit(ir_emit_vertex *);
390 virtual void visit(ir_end_primitive *);
391 /*@}*/
392
393 src_reg result;
394
395 /* Regs for vertex results. Generated at ir_variable visiting time
396 * for the ir->location's used.
397 */
398 dst_reg output_reg[BRW_VARYING_SLOT_COUNT];
399 const char *output_reg_annotation[BRW_VARYING_SLOT_COUNT];
400 int *uniform_size;
401 int *uniform_vector_size;
402 int uniform_array_size; /*< Size of uniform_[vector_]size arrays */
403 int uniforms;
404
405 src_reg shader_start_time;
406
407 struct hash_table *variable_ht;
408
409 bool run(void);
410 void fail(const char *msg, ...);
411
412 int virtual_grf_alloc(int size);
413 void setup_uniform_clipplane_values();
414 void setup_uniform_values(ir_variable *ir);
415 void setup_builtin_uniform_values(ir_variable *ir);
416 int setup_uniforms(int payload_reg);
417 bool reg_allocate_trivial();
418 bool reg_allocate();
419 void evaluate_spill_costs(float *spill_costs, bool *no_spill);
420 int choose_spill_reg(struct ra_graph *g);
421 void spill_reg(int spill_reg);
422 void move_grf_array_access_to_scratch();
423 void move_uniform_array_access_to_pull_constants();
424 void move_push_constants_to_pull_constants();
425 void split_uniform_registers();
426 void pack_uniform_registers();
427 void calculate_live_intervals();
428 void invalidate_live_intervals();
429 void split_virtual_grfs();
430 bool dead_code_eliminate();
431 bool virtual_grf_interferes(int a, int b);
432 bool opt_copy_propagation();
433 bool opt_algebraic();
434 bool opt_register_coalesce();
435 void opt_set_dependency_control();
436 void opt_schedule_instructions();
437
438 bool can_do_source_mods(vec4_instruction *inst);
439
440 vec4_instruction *emit(vec4_instruction *inst);
441
442 vec4_instruction *emit(enum opcode opcode);
443
444 vec4_instruction *emit(enum opcode opcode, dst_reg dst);
445
446 vec4_instruction *emit(enum opcode opcode, dst_reg dst, src_reg src0);
447
448 vec4_instruction *emit(enum opcode opcode, dst_reg dst,
449 src_reg src0, src_reg src1);
450
451 vec4_instruction *emit(enum opcode opcode, dst_reg dst,
452 src_reg src0, src_reg src1, src_reg src2);
453
454 vec4_instruction *emit_before(vec4_instruction *inst,
455 vec4_instruction *new_inst);
456
457 vec4_instruction *MOV(dst_reg dst, src_reg src0);
458 vec4_instruction *NOT(dst_reg dst, src_reg src0);
459 vec4_instruction *RNDD(dst_reg dst, src_reg src0);
460 vec4_instruction *RNDE(dst_reg dst, src_reg src0);
461 vec4_instruction *RNDZ(dst_reg dst, src_reg src0);
462 vec4_instruction *FRC(dst_reg dst, src_reg src0);
463 vec4_instruction *F32TO16(dst_reg dst, src_reg src0);
464 vec4_instruction *F16TO32(dst_reg dst, src_reg src0);
465 vec4_instruction *ADD(dst_reg dst, src_reg src0, src_reg src1);
466 vec4_instruction *MUL(dst_reg dst, src_reg src0, src_reg src1);
467 vec4_instruction *MACH(dst_reg dst, src_reg src0, src_reg src1);
468 vec4_instruction *MAC(dst_reg dst, src_reg src0, src_reg src1);
469 vec4_instruction *AND(dst_reg dst, src_reg src0, src_reg src1);
470 vec4_instruction *OR(dst_reg dst, src_reg src0, src_reg src1);
471 vec4_instruction *XOR(dst_reg dst, src_reg src0, src_reg src1);
472 vec4_instruction *DP3(dst_reg dst, src_reg src0, src_reg src1);
473 vec4_instruction *DP4(dst_reg dst, src_reg src0, src_reg src1);
474 vec4_instruction *DPH(dst_reg dst, src_reg src0, src_reg src1);
475 vec4_instruction *SHL(dst_reg dst, src_reg src0, src_reg src1);
476 vec4_instruction *SHR(dst_reg dst, src_reg src0, src_reg src1);
477 vec4_instruction *ASR(dst_reg dst, src_reg src0, src_reg src1);
478 vec4_instruction *CMP(dst_reg dst, src_reg src0, src_reg src1,
479 uint32_t condition);
480 vec4_instruction *IF(src_reg src0, src_reg src1, uint32_t condition);
481 vec4_instruction *IF(uint32_t predicate);
482 vec4_instruction *PULL_CONSTANT_LOAD(dst_reg dst, src_reg index);
483 vec4_instruction *SCRATCH_READ(dst_reg dst, src_reg index);
484 vec4_instruction *SCRATCH_WRITE(dst_reg dst, src_reg src, src_reg index);
485 vec4_instruction *LRP(dst_reg dst, src_reg a, src_reg y, src_reg x);
486 vec4_instruction *BFREV(dst_reg dst, src_reg value);
487 vec4_instruction *BFE(dst_reg dst, src_reg bits, src_reg offset, src_reg value);
488 vec4_instruction *BFI1(dst_reg dst, src_reg bits, src_reg offset);
489 vec4_instruction *BFI2(dst_reg dst, src_reg bfi1_dst, src_reg insert, src_reg base);
490 vec4_instruction *FBH(dst_reg dst, src_reg value);
491 vec4_instruction *FBL(dst_reg dst, src_reg value);
492 vec4_instruction *CBIT(dst_reg dst, src_reg value);
493 vec4_instruction *MAD(dst_reg dst, src_reg c, src_reg b, src_reg a);
494 vec4_instruction *ADDC(dst_reg dst, src_reg src0, src_reg src1);
495 vec4_instruction *SUBB(dst_reg dst, src_reg src0, src_reg src1);
496
497 int implied_mrf_writes(vec4_instruction *inst);
498
499 bool try_rewrite_rhs_to_dst(ir_assignment *ir,
500 dst_reg dst,
501 src_reg src,
502 vec4_instruction *pre_rhs_inst,
503 vec4_instruction *last_rhs_inst);
504
505 bool try_copy_propagation(vec4_instruction *inst, int arg,
506 src_reg *values[4]);
507
508 /** Walks an exec_list of ir_instruction and sends it through this visitor. */
509 void visit_instructions(const exec_list *list);
510
511 void emit_vp_sop(uint32_t condmod, dst_reg dst,
512 src_reg src0, src_reg src1, src_reg one);
513
514 void emit_bool_to_cond_code(ir_rvalue *ir, uint32_t *predicate);
515 void emit_bool_comparison(unsigned int op, dst_reg dst, src_reg src0, src_reg src1);
516 void emit_if_gen6(ir_if *ir);
517
518 void emit_minmax(uint32_t condmod, dst_reg dst, src_reg src0, src_reg src1);
519
520 void emit_lrp(const dst_reg &dst,
521 const src_reg &x, const src_reg &y, const src_reg &a);
522
523 void emit_block_move(dst_reg *dst, src_reg *src,
524 const struct glsl_type *type, uint32_t predicate);
525
526 void emit_constant_values(dst_reg *dst, ir_constant *value);
527
528 /**
529 * Emit the correct dot-product instruction for the type of arguments
530 */
531 void emit_dp(dst_reg dst, src_reg src0, src_reg src1, unsigned elements);
532
533 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
534 dst_reg dst, src_reg src0);
535
536 void emit_scalar(ir_instruction *ir, enum prog_opcode op,
537 dst_reg dst, src_reg src0, src_reg src1);
538
539 void emit_scs(ir_instruction *ir, enum prog_opcode op,
540 dst_reg dst, const src_reg &src);
541
542 src_reg fix_3src_operand(src_reg src);
543
544 void emit_math1_gen6(enum opcode opcode, dst_reg dst, src_reg src);
545 void emit_math1_gen4(enum opcode opcode, dst_reg dst, src_reg src);
546 void emit_math(enum opcode opcode, dst_reg dst, src_reg src);
547 void emit_math2_gen6(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
548 void emit_math2_gen4(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
549 void emit_math(enum opcode opcode, dst_reg dst, src_reg src0, src_reg src1);
550 src_reg fix_math_operand(src_reg src);
551
552 void emit_pack_half_2x16(dst_reg dst, src_reg src0);
553 void emit_unpack_half_2x16(dst_reg dst, src_reg src0);
554
555 uint32_t gather_channel(ir_texture *ir, int sampler);
556 src_reg emit_mcs_fetch(ir_texture *ir, src_reg coordinate, int sampler);
557 void emit_gen6_gather_wa(uint8_t wa, dst_reg dst);
558 void swizzle_result(ir_texture *ir, src_reg orig_val, int sampler);
559
560 void emit_ndc_computation();
561 void emit_psiz_and_flags(struct brw_reg reg);
562 void emit_clip_distances(dst_reg reg, int offset);
563 void emit_generic_urb_slot(dst_reg reg, int varying);
564 void emit_urb_slot(int mrf, int varying);
565
566 void emit_shader_time_begin();
567 void emit_shader_time_end();
568 void emit_shader_time_write(enum shader_time_shader_type type,
569 src_reg value);
570
571 void emit_untyped_atomic(unsigned atomic_op, unsigned surf_index,
572 dst_reg dst, src_reg offset, src_reg src0,
573 src_reg src1);
574
575 void emit_untyped_surface_read(unsigned surf_index, dst_reg dst,
576 src_reg offset);
577
578 src_reg get_scratch_offset(vec4_instruction *inst,
579 src_reg *reladdr, int reg_offset);
580 src_reg get_pull_constant_offset(vec4_instruction *inst,
581 src_reg *reladdr, int reg_offset);
582 void emit_scratch_read(vec4_instruction *inst,
583 dst_reg dst,
584 src_reg orig_src,
585 int base_offset);
586 void emit_scratch_write(vec4_instruction *inst,
587 int base_offset);
588 void emit_pull_constant_load(vec4_instruction *inst,
589 dst_reg dst,
590 src_reg orig_src,
591 int base_offset);
592
593 bool try_emit_sat(ir_expression *ir);
594 bool try_emit_mad(ir_expression *ir);
595 void resolve_ud_negate(src_reg *reg);
596
597 src_reg get_timestamp();
598
599 bool process_move_condition(ir_rvalue *ir);
600
601 void dump_instruction(backend_instruction *inst);
602
603 void visit_atomic_counter_intrinsic(ir_call *ir);
604
605 protected:
606 void emit_vertex();
607 void lower_attributes_to_hw_regs(const int *attribute_map,
608 bool interleaved);
609 void setup_payload_interference(struct ra_graph *g, int first_payload_node,
610 int reg_node_count);
611 virtual dst_reg *make_reg_for_system_value(ir_variable *ir) = 0;
612 virtual void setup_payload() = 0;
613 virtual void emit_prolog() = 0;
614 virtual void emit_program_code() = 0;
615 virtual void emit_thread_end() = 0;
616 virtual void emit_urb_write_header(int mrf) = 0;
617 virtual vec4_instruction *emit_urb_write_opcode(bool complete) = 0;
618 virtual int compute_array_stride(ir_dereference_array *ir);
619
620 const bool debug_flag;
621
622 private:
623 /**
624 * If true, then register allocation should fail instead of spilling.
625 */
626 const bool no_spills;
627
628 const shader_time_shader_type st_base;
629 const shader_time_shader_type st_written;
630 const shader_time_shader_type st_reset;
631 };
632
633
634 /**
635 * The vertex shader code generator.
636 *
637 * Translates VS IR to actual i965 assembly code.
638 */
639 class vec4_generator
640 {
641 public:
642 vec4_generator(struct brw_context *brw,
643 struct gl_shader_program *shader_prog,
644 struct gl_program *prog,
645 struct brw_vec4_prog_data *prog_data,
646 void *mem_ctx,
647 bool debug_flag);
648 ~vec4_generator();
649
650 const unsigned *generate_assembly(exec_list *insts, unsigned *asm_size);
651
652 private:
653 void generate_code(exec_list *instructions);
654 void generate_vec4_instruction(vec4_instruction *inst,
655 struct brw_reg dst,
656 struct brw_reg *src);
657
658 void generate_math1_gen4(vec4_instruction *inst,
659 struct brw_reg dst,
660 struct brw_reg src);
661 void generate_math1_gen6(vec4_instruction *inst,
662 struct brw_reg dst,
663 struct brw_reg src);
664 void generate_math2_gen4(vec4_instruction *inst,
665 struct brw_reg dst,
666 struct brw_reg src0,
667 struct brw_reg src1);
668 void generate_math2_gen6(vec4_instruction *inst,
669 struct brw_reg dst,
670 struct brw_reg src0,
671 struct brw_reg src1);
672 void generate_math2_gen7(vec4_instruction *inst,
673 struct brw_reg dst,
674 struct brw_reg src0,
675 struct brw_reg src1);
676
677 void generate_tex(vec4_instruction *inst,
678 struct brw_reg dst,
679 struct brw_reg src);
680
681 void generate_vs_urb_write(vec4_instruction *inst);
682 void generate_gs_urb_write(vec4_instruction *inst);
683 void generate_gs_thread_end(vec4_instruction *inst);
684 void generate_gs_set_write_offset(struct brw_reg dst,
685 struct brw_reg src0,
686 struct brw_reg src1);
687 void generate_gs_set_vertex_count(struct brw_reg dst,
688 struct brw_reg src);
689 void generate_gs_set_dword_2_immed(struct brw_reg dst, struct brw_reg src);
690 void generate_gs_prepare_channel_masks(struct brw_reg dst);
691 void generate_gs_set_channel_masks(struct brw_reg dst, struct brw_reg src);
692 void generate_gs_get_instance_id(struct brw_reg dst);
693 void generate_oword_dual_block_offsets(struct brw_reg m1,
694 struct brw_reg index);
695 void generate_scratch_write(vec4_instruction *inst,
696 struct brw_reg dst,
697 struct brw_reg src,
698 struct brw_reg index);
699 void generate_scratch_read(vec4_instruction *inst,
700 struct brw_reg dst,
701 struct brw_reg index);
702 void generate_pull_constant_load(vec4_instruction *inst,
703 struct brw_reg dst,
704 struct brw_reg index,
705 struct brw_reg offset);
706 void generate_pull_constant_load_gen7(vec4_instruction *inst,
707 struct brw_reg dst,
708 struct brw_reg surf_index,
709 struct brw_reg offset);
710 void generate_unpack_flags(vec4_instruction *inst,
711 struct brw_reg dst);
712
713 void generate_untyped_atomic(vec4_instruction *inst,
714 struct brw_reg dst,
715 struct brw_reg atomic_op,
716 struct brw_reg surf_index);
717
718 void generate_untyped_surface_read(vec4_instruction *inst,
719 struct brw_reg dst,
720 struct brw_reg surf_index);
721
722 struct brw_context *brw;
723
724 struct brw_compile *p;
725
726 struct gl_shader_program *shader_prog;
727 const struct gl_program *prog;
728
729 struct brw_vec4_prog_data *prog_data;
730
731 void *mem_ctx;
732 const bool debug_flag;
733 };
734
735 /**
736 * The vertex shader code generator.
737 *
738 * Translates VS IR to actual i965 assembly code.
739 */
740 class gen8_vec4_generator : public gen8_generator
741 {
742 public:
743 gen8_vec4_generator(struct brw_context *brw,
744 struct gl_shader_program *shader_prog,
745 struct gl_program *prog,
746 struct brw_vec4_prog_data *prog_data,
747 void *mem_ctx,
748 bool debug_flag);
749 ~gen8_vec4_generator();
750
751 const unsigned *generate_assembly(exec_list *insts, unsigned *asm_size);
752
753 private:
754 void generate_code(exec_list *instructions);
755 void generate_vec4_instruction(vec4_instruction *inst,
756 struct brw_reg dst,
757 struct brw_reg *src);
758
759 void generate_tex(vec4_instruction *inst,
760 struct brw_reg dst);
761
762 void generate_urb_write(vec4_instruction *ir, bool copy_g0);
763 void generate_gs_thread_end(vec4_instruction *ir);
764 void generate_gs_set_write_offset(struct brw_reg dst,
765 struct brw_reg src0,
766 struct brw_reg src1);
767 void generate_gs_set_vertex_count(struct brw_reg dst,
768 struct brw_reg src);
769 void generate_gs_set_dword_2_immed(struct brw_reg dst, struct brw_reg src);
770 void generate_gs_prepare_channel_masks(struct brw_reg dst);
771 void generate_gs_set_channel_masks(struct brw_reg dst, struct brw_reg src);
772
773 void generate_oword_dual_block_offsets(struct brw_reg m1,
774 struct brw_reg index);
775 void generate_scratch_write(vec4_instruction *inst,
776 struct brw_reg dst,
777 struct brw_reg src,
778 struct brw_reg index);
779 void generate_scratch_read(vec4_instruction *inst,
780 struct brw_reg dst,
781 struct brw_reg index);
782 void generate_pull_constant_load(vec4_instruction *inst,
783 struct brw_reg dst,
784 struct brw_reg index,
785 struct brw_reg offset);
786 void generate_untyped_atomic(vec4_instruction *ir,
787 struct brw_reg dst,
788 struct brw_reg atomic_op,
789 struct brw_reg surf_index);
790 void generate_untyped_surface_read(vec4_instruction *ir,
791 struct brw_reg dst,
792 struct brw_reg surf_index);
793
794 struct brw_vec4_prog_data *prog_data;
795
796 const bool debug_flag;
797 };
798
799
800 } /* namespace brw */
801 #endif /* __cplusplus */
802
803 #endif /* BRW_VEC4_H */