2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "brw_shader.h"
29 #include "main/compiler.h"
30 #include "program/hash_table.h"
31 #include "brw_program.h"
37 #include "brw_context.h"
39 #include "intel_asm_annotation.h"
43 #include "gen8_generator.h"
49 struct brw_vec4_compile
{
50 GLuint last_scratch
; /**< measured in 32-byte (register size) units */
54 struct brw_vec4_prog_key
{
55 GLuint program_string_id
;
58 * True if at least one clip flag is enabled, regardless of whether the
59 * shader uses clip planes or gl_ClipDistance.
61 GLuint userclip_active
:1;
64 * How many user clipping planes are being uploaded to the vertex shader as
67 GLuint nr_userclip_plane_consts
:4;
69 GLuint clamp_vertex_color
:1;
71 struct brw_sampler_prog_key_data tex
;
80 brw_vec4_setup_prog_key_for_precompile(struct gl_context
*ctx
,
81 struct brw_vec4_prog_key
*key
,
82 GLuint id
, struct gl_program
*prog
);
92 swizzle_for_size(int size
);
94 class src_reg
: public backend_reg
97 DECLARE_RALLOC_CXX_OPERATORS(src_reg
)
101 src_reg(register_file file
, int reg
, const glsl_type
*type
);
106 src_reg(struct brw_reg reg
);
108 bool equals(const src_reg
&r
) const;
110 src_reg(class vec4_visitor
*v
, const struct glsl_type
*type
);
112 explicit src_reg(dst_reg reg
);
114 GLuint swizzle
; /**< BRW_SWIZZLE_XYZW macros from brw_reg.h. */
119 static inline src_reg
120 retype(src_reg reg
, enum brw_reg_type type
)
122 reg
.fixed_hw_reg
.type
= reg
.type
= type
;
126 static inline src_reg
127 offset(src_reg reg
, unsigned delta
)
129 assert(delta
== 0 || (reg
.file
!= HW_REG
&& reg
.file
!= IMM
));
130 reg
.reg_offset
+= delta
;
135 * Reswizzle a given source register.
138 static inline src_reg
139 swizzle(src_reg reg
, unsigned swizzle
)
141 assert(reg
.file
!= HW_REG
);
142 reg
.swizzle
= BRW_SWIZZLE4(
143 BRW_GET_SWZ(reg
.swizzle
, BRW_GET_SWZ(swizzle
, 0)),
144 BRW_GET_SWZ(reg
.swizzle
, BRW_GET_SWZ(swizzle
, 1)),
145 BRW_GET_SWZ(reg
.swizzle
, BRW_GET_SWZ(swizzle
, 2)),
146 BRW_GET_SWZ(reg
.swizzle
, BRW_GET_SWZ(swizzle
, 3)));
150 static inline src_reg
153 assert(reg
.file
!= HW_REG
&& reg
.file
!= IMM
);
154 reg
.negate
= !reg
.negate
;
158 class dst_reg
: public backend_reg
161 DECLARE_RALLOC_CXX_OPERATORS(dst_reg
)
166 dst_reg(register_file file
, int reg
);
167 dst_reg(register_file file
, int reg
, const glsl_type
*type
, int writemask
);
168 dst_reg(struct brw_reg reg
);
169 dst_reg(class vec4_visitor
*v
, const struct glsl_type
*type
);
171 explicit dst_reg(src_reg reg
);
173 int writemask
; /**< Bitfield of WRITEMASK_[XYZW] */
178 static inline dst_reg
179 retype(dst_reg reg
, enum brw_reg_type type
)
181 reg
.fixed_hw_reg
.type
= reg
.type
= type
;
185 static inline dst_reg
186 offset(dst_reg reg
, unsigned delta
)
188 assert(delta
== 0 || (reg
.file
!= HW_REG
&& reg
.file
!= IMM
));
189 reg
.reg_offset
+= delta
;
193 static inline dst_reg
194 writemask(dst_reg reg
, unsigned mask
)
196 assert(reg
.file
!= HW_REG
&& reg
.file
!= IMM
);
197 assert((reg
.writemask
& mask
) != 0);
198 reg
.writemask
&= mask
;
202 class vec4_instruction
: public backend_instruction
{
204 DECLARE_RALLOC_CXX_OPERATORS(vec4_instruction
)
206 vec4_instruction(vec4_visitor
*v
, enum opcode opcode
,
207 const dst_reg
&dst
= dst_reg(),
208 const src_reg
&src0
= src_reg(),
209 const src_reg
&src1
= src_reg(),
210 const src_reg
&src2
= src_reg());
212 struct brw_reg
get_dst(void);
213 struct brw_reg
get_src(const struct brw_vec4_prog_data
*prog_data
, int i
);
220 enum brw_urb_write_flags urb_write_flags
;
223 bool is_send_from_grf();
224 bool can_reswizzle_dst(int dst_writemask
, int swizzle
, int swizzle_mask
);
225 void reswizzle_dst(int dst_writemask
, int swizzle
);
226 bool can_do_source_mods(struct brw_context
*brw
);
230 return predicate
|| opcode
== VS_OPCODE_UNPACK_FLAGS_SIMD4X2
;
235 return conditional_mod
&& opcode
!= BRW_OPCODE_SEL
;
240 * The vertex shader front-end.
242 * Translates either GLSL IR or Mesa IR (for ARB_vertex_program and
243 * fixed-function) into VS IR.
245 class vec4_visitor
: public backend_visitor
248 vec4_visitor(struct brw_context
*brw
,
249 struct brw_vec4_compile
*c
,
250 struct gl_program
*prog
,
251 const struct brw_vec4_prog_key
*key
,
252 struct brw_vec4_prog_data
*prog_data
,
253 struct gl_shader_program
*shader_prog
,
254 gl_shader_stage stage
,
258 shader_time_shader_type st_base
,
259 shader_time_shader_type st_written
,
260 shader_time_shader_type st_reset
);
265 return dst_reg(brw_null_reg());
270 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_D
));
273 dst_reg
dst_null_ud()
275 return dst_reg(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD
));
278 struct brw_vec4_compile
* const c
;
279 const struct brw_vec4_prog_key
* const key
;
280 struct brw_vec4_prog_data
* const prog_data
;
281 unsigned int sanity_param_count
;
287 * GLSL IR currently being processed, which is associated with our
288 * driver IR instructions for debugging purposes.
291 const char *current_annotation
;
293 int *virtual_grf_sizes
;
294 int virtual_grf_count
;
295 int virtual_grf_array_size
;
296 int first_non_payload_grf
;
297 unsigned int max_grf
;
298 int *virtual_grf_start
;
299 int *virtual_grf_end
;
300 dst_reg userplane
[MAX_CLIP_PLANES
];
303 * This is the size to be used for an array with an element per
306 int virtual_grf_reg_count
;
307 /** Per-virtual-grf indices into an array of size virtual_grf_reg_count */
308 int *virtual_grf_reg_map
;
310 bool live_intervals_valid
;
312 dst_reg
*variable_storage(ir_variable
*var
);
314 void reladdr_to_temp(ir_instruction
*ir
, src_reg
*reg
, int *num_reladdr
);
316 bool need_all_constants_in_pull_buffer
;
319 * \name Visit methods
321 * As typical for the visitor pattern, there must be one \c visit method for
322 * each concrete subclass of \c ir_instruction. Virtual base classes within
323 * the hierarchy should not have \c visit methods.
326 virtual void visit(ir_variable
*);
327 virtual void visit(ir_loop
*);
328 virtual void visit(ir_loop_jump
*);
329 virtual void visit(ir_function_signature
*);
330 virtual void visit(ir_function
*);
331 virtual void visit(ir_expression
*);
332 virtual void visit(ir_swizzle
*);
333 virtual void visit(ir_dereference_variable
*);
334 virtual void visit(ir_dereference_array
*);
335 virtual void visit(ir_dereference_record
*);
336 virtual void visit(ir_assignment
*);
337 virtual void visit(ir_constant
*);
338 virtual void visit(ir_call
*);
339 virtual void visit(ir_return
*);
340 virtual void visit(ir_discard
*);
341 virtual void visit(ir_texture
*);
342 virtual void visit(ir_if
*);
343 virtual void visit(ir_emit_vertex
*);
344 virtual void visit(ir_end_primitive
*);
349 /* Regs for vertex results. Generated at ir_variable visiting time
350 * for the ir->location's used.
352 dst_reg output_reg
[BRW_VARYING_SLOT_COUNT
];
353 const char *output_reg_annotation
[BRW_VARYING_SLOT_COUNT
];
355 int *uniform_vector_size
;
356 int uniform_array_size
; /*< Size of uniform_[vector_]size arrays */
359 src_reg shader_start_time
;
361 struct hash_table
*variable_ht
;
364 void fail(const char *msg
, ...);
366 int virtual_grf_alloc(int size
);
367 void setup_uniform_clipplane_values();
368 void setup_uniform_values(ir_variable
*ir
);
369 void setup_builtin_uniform_values(ir_variable
*ir
);
370 int setup_uniforms(int payload_reg
);
371 bool reg_allocate_trivial();
373 void evaluate_spill_costs(float *spill_costs
, bool *no_spill
);
374 int choose_spill_reg(struct ra_graph
*g
);
375 void spill_reg(int spill_reg
);
376 void move_grf_array_access_to_scratch();
377 void move_uniform_array_access_to_pull_constants();
378 void move_push_constants_to_pull_constants();
379 void split_uniform_registers();
380 void pack_uniform_registers();
381 void calculate_live_intervals();
382 void invalidate_live_intervals();
383 void split_virtual_grfs();
384 bool dead_code_eliminate();
385 bool virtual_grf_interferes(int a
, int b
);
386 bool opt_copy_propagation();
387 bool opt_cse_local(bblock_t
*block
);
389 bool opt_algebraic();
390 bool opt_register_coalesce();
391 void opt_set_dependency_control();
392 void opt_schedule_instructions();
394 vec4_instruction
*emit(vec4_instruction
*inst
);
396 vec4_instruction
*emit(enum opcode opcode
);
398 vec4_instruction
*emit(enum opcode opcode
, dst_reg dst
);
400 vec4_instruction
*emit(enum opcode opcode
, dst_reg dst
, src_reg src0
);
402 vec4_instruction
*emit(enum opcode opcode
, dst_reg dst
,
403 src_reg src0
, src_reg src1
);
405 vec4_instruction
*emit(enum opcode opcode
, dst_reg dst
,
406 src_reg src0
, src_reg src1
, src_reg src2
);
408 vec4_instruction
*emit_before(vec4_instruction
*inst
,
409 vec4_instruction
*new_inst
);
411 vec4_instruction
*MOV(const dst_reg
&dst
, const src_reg
&src0
);
412 vec4_instruction
*NOT(const dst_reg
&dst
, const src_reg
&src0
);
413 vec4_instruction
*RNDD(const dst_reg
&dst
, const src_reg
&src0
);
414 vec4_instruction
*RNDE(const dst_reg
&dst
, const src_reg
&src0
);
415 vec4_instruction
*RNDZ(const dst_reg
&dst
, const src_reg
&src0
);
416 vec4_instruction
*FRC(const dst_reg
&dst
, const src_reg
&src0
);
417 vec4_instruction
*F32TO16(const dst_reg
&dst
, const src_reg
&src0
);
418 vec4_instruction
*F16TO32(const dst_reg
&dst
, const src_reg
&src0
);
419 vec4_instruction
*ADD(const dst_reg
&dst
, const src_reg
&src0
,
420 const src_reg
&src1
);
421 vec4_instruction
*MUL(const dst_reg
&dst
, const src_reg
&src0
,
422 const src_reg
&src1
);
423 vec4_instruction
*MACH(const dst_reg
&dst
, const src_reg
&src0
,
424 const src_reg
&src1
);
425 vec4_instruction
*MAC(const dst_reg
&dst
, const src_reg
&src0
,
426 const src_reg
&src1
);
427 vec4_instruction
*AND(const dst_reg
&dst
, const src_reg
&src0
,
428 const src_reg
&src1
);
429 vec4_instruction
*OR(const dst_reg
&dst
, const src_reg
&src0
,
430 const src_reg
&src1
);
431 vec4_instruction
*XOR(const dst_reg
&dst
, const src_reg
&src0
,
432 const src_reg
&src1
);
433 vec4_instruction
*DP3(const dst_reg
&dst
, const src_reg
&src0
,
434 const src_reg
&src1
);
435 vec4_instruction
*DP4(const dst_reg
&dst
, const src_reg
&src0
,
436 const src_reg
&src1
);
437 vec4_instruction
*DPH(const dst_reg
&dst
, const src_reg
&src0
,
438 const src_reg
&src1
);
439 vec4_instruction
*SHL(const dst_reg
&dst
, const src_reg
&src0
,
440 const src_reg
&src1
);
441 vec4_instruction
*SHR(const dst_reg
&dst
, const src_reg
&src0
,
442 const src_reg
&src1
);
443 vec4_instruction
*ASR(const dst_reg
&dst
, const src_reg
&src0
,
444 const src_reg
&src1
);
445 vec4_instruction
*CMP(dst_reg dst
, src_reg src0
, src_reg src1
,
446 enum brw_conditional_mod condition
);
447 vec4_instruction
*IF(src_reg src0
, src_reg src1
,
448 enum brw_conditional_mod condition
);
449 vec4_instruction
*IF(enum brw_predicate predicate
);
450 vec4_instruction
*PULL_CONSTANT_LOAD(const dst_reg
&dst
,
451 const src_reg
&index
);
452 vec4_instruction
*SCRATCH_READ(const dst_reg
&dst
, const src_reg
&index
);
453 vec4_instruction
*SCRATCH_WRITE(const dst_reg
&dst
, const src_reg
&src
,
454 const src_reg
&index
);
455 vec4_instruction
*LRP(const dst_reg
&dst
, const src_reg
&a
,
456 const src_reg
&y
, const src_reg
&x
);
457 vec4_instruction
*BFREV(const dst_reg
&dst
, const src_reg
&value
);
458 vec4_instruction
*BFE(const dst_reg
&dst
, const src_reg
&bits
,
459 const src_reg
&offset
, const src_reg
&value
);
460 vec4_instruction
*BFI1(const dst_reg
&dst
, const src_reg
&bits
,
461 const src_reg
&offset
);
462 vec4_instruction
*BFI2(const dst_reg
&dst
, const src_reg
&bfi1_dst
,
463 const src_reg
&insert
, const src_reg
&base
);
464 vec4_instruction
*FBH(const dst_reg
&dst
, const src_reg
&value
);
465 vec4_instruction
*FBL(const dst_reg
&dst
, const src_reg
&value
);
466 vec4_instruction
*CBIT(const dst_reg
&dst
, const src_reg
&value
);
467 vec4_instruction
*MAD(const dst_reg
&dst
, const src_reg
&c
,
468 const src_reg
&b
, const src_reg
&a
);
469 vec4_instruction
*ADDC(const dst_reg
&dst
, const src_reg
&src0
,
470 const src_reg
&src1
);
471 vec4_instruction
*SUBB(const dst_reg
&dst
, const src_reg
&src0
,
472 const src_reg
&src1
);
474 int implied_mrf_writes(vec4_instruction
*inst
);
476 bool try_rewrite_rhs_to_dst(ir_assignment
*ir
,
479 vec4_instruction
*pre_rhs_inst
,
480 vec4_instruction
*last_rhs_inst
);
482 /** Walks an exec_list of ir_instruction and sends it through this visitor. */
483 void visit_instructions(const exec_list
*list
);
485 void emit_vp_sop(enum brw_conditional_mod condmod
, dst_reg dst
,
486 src_reg src0
, src_reg src1
, src_reg one
);
488 void emit_bool_to_cond_code(ir_rvalue
*ir
, enum brw_predicate
*predicate
);
489 void emit_bool_comparison(unsigned int op
, dst_reg dst
, src_reg src0
, src_reg src1
);
490 void emit_if_gen6(ir_if
*ir
);
492 void emit_minmax(enum brw_conditional_mod conditionalmod
, dst_reg dst
,
493 src_reg src0
, src_reg src1
);
495 void emit_lrp(const dst_reg
&dst
,
496 const src_reg
&x
, const src_reg
&y
, const src_reg
&a
);
498 void emit_block_move(dst_reg
*dst
, src_reg
*src
,
499 const struct glsl_type
*type
, brw_predicate predicate
);
501 void emit_constant_values(dst_reg
*dst
, ir_constant
*value
);
504 * Emit the correct dot-product instruction for the type of arguments
506 void emit_dp(dst_reg dst
, src_reg src0
, src_reg src1
, unsigned elements
);
508 void emit_scalar(ir_instruction
*ir
, enum prog_opcode op
,
509 dst_reg dst
, src_reg src0
);
511 void emit_scalar(ir_instruction
*ir
, enum prog_opcode op
,
512 dst_reg dst
, src_reg src0
, src_reg src1
);
514 void emit_scs(ir_instruction
*ir
, enum prog_opcode op
,
515 dst_reg dst
, const src_reg
&src
);
517 src_reg
fix_3src_operand(src_reg src
);
519 void emit_math1_gen6(enum opcode opcode
, dst_reg dst
, src_reg src
);
520 void emit_math1_gen4(enum opcode opcode
, dst_reg dst
, src_reg src
);
521 void emit_math(enum opcode opcode
, dst_reg dst
, src_reg src
);
522 void emit_math2_gen6(enum opcode opcode
, dst_reg dst
, src_reg src0
, src_reg src1
);
523 void emit_math2_gen4(enum opcode opcode
, dst_reg dst
, src_reg src0
, src_reg src1
);
524 void emit_math(enum opcode opcode
, dst_reg dst
, src_reg src0
, src_reg src1
);
525 src_reg
fix_math_operand(src_reg src
);
527 void emit_pack_half_2x16(dst_reg dst
, src_reg src0
);
528 void emit_unpack_half_2x16(dst_reg dst
, src_reg src0
);
530 uint32_t gather_channel(ir_texture
*ir
, int sampler
);
531 src_reg
emit_mcs_fetch(ir_texture
*ir
, src_reg coordinate
, int sampler
);
532 void emit_gen6_gather_wa(uint8_t wa
, dst_reg dst
);
533 void swizzle_result(ir_texture
*ir
, src_reg orig_val
, int sampler
);
535 void emit_ndc_computation();
536 void emit_psiz_and_flags(struct brw_reg reg
);
537 void emit_clip_distances(dst_reg reg
, int offset
);
538 void emit_generic_urb_slot(dst_reg reg
, int varying
);
539 void emit_urb_slot(int mrf
, int varying
);
541 void emit_shader_time_begin();
542 void emit_shader_time_end();
543 void emit_shader_time_write(enum shader_time_shader_type type
,
546 void emit_untyped_atomic(unsigned atomic_op
, unsigned surf_index
,
547 dst_reg dst
, src_reg offset
, src_reg src0
,
550 void emit_untyped_surface_read(unsigned surf_index
, dst_reg dst
,
553 src_reg
get_scratch_offset(vec4_instruction
*inst
,
554 src_reg
*reladdr
, int reg_offset
);
555 src_reg
get_pull_constant_offset(vec4_instruction
*inst
,
556 src_reg
*reladdr
, int reg_offset
);
557 void emit_scratch_read(vec4_instruction
*inst
,
561 void emit_scratch_write(vec4_instruction
*inst
,
563 void emit_pull_constant_load(vec4_instruction
*inst
,
568 bool try_emit_sat(ir_expression
*ir
);
569 bool try_emit_mad(ir_expression
*ir
);
570 bool try_emit_b2f_of_compare(ir_expression
*ir
);
571 void resolve_ud_negate(src_reg
*reg
);
573 src_reg
get_timestamp();
575 bool process_move_condition(ir_rvalue
*ir
);
577 void dump_instruction(backend_instruction
*inst
);
578 void dump_instruction(backend_instruction
*inst
, FILE *file
);
580 void visit_atomic_counter_intrinsic(ir_call
*ir
);
584 void lower_attributes_to_hw_regs(const int *attribute_map
,
586 void setup_payload_interference(struct ra_graph
*g
, int first_payload_node
,
588 virtual dst_reg
*make_reg_for_system_value(ir_variable
*ir
) = 0;
589 virtual void setup_payload() = 0;
590 virtual void emit_prolog() = 0;
591 virtual void emit_program_code() = 0;
592 virtual void emit_thread_end() = 0;
593 virtual void emit_urb_write_header(int mrf
) = 0;
594 virtual vec4_instruction
*emit_urb_write_opcode(bool complete
) = 0;
595 virtual int compute_array_stride(ir_dereference_array
*ir
);
597 const bool debug_flag
;
601 * If true, then register allocation should fail instead of spilling.
603 const bool no_spills
;
605 const shader_time_shader_type st_base
;
606 const shader_time_shader_type st_written
;
607 const shader_time_shader_type st_reset
;
612 * The vertex shader code generator.
614 * Translates VS IR to actual i965 assembly code.
619 vec4_generator(struct brw_context
*brw
,
620 struct gl_shader_program
*shader_prog
,
621 struct gl_program
*prog
,
622 struct brw_vec4_prog_data
*prog_data
,
627 const unsigned *generate_assembly(exec_list
*insts
, unsigned *asm_size
);
630 void generate_code(exec_list
*instructions
);
631 void generate_vec4_instruction(vec4_instruction
*inst
,
633 struct brw_reg
*src
);
635 void generate_math1_gen4(vec4_instruction
*inst
,
638 void generate_math2_gen4(vec4_instruction
*inst
,
641 struct brw_reg src1
);
642 void generate_math_gen6(vec4_instruction
*inst
,
645 struct brw_reg src1
);
647 void generate_tex(vec4_instruction
*inst
,
651 void generate_vs_urb_write(vec4_instruction
*inst
);
652 void generate_gs_urb_write(vec4_instruction
*inst
);
653 void generate_gs_thread_end(vec4_instruction
*inst
);
654 void generate_gs_set_write_offset(struct brw_reg dst
,
656 struct brw_reg src1
);
657 void generate_gs_set_vertex_count(struct brw_reg dst
,
659 void generate_gs_set_dword_2_immed(struct brw_reg dst
, struct brw_reg src
);
660 void generate_gs_prepare_channel_masks(struct brw_reg dst
);
661 void generate_gs_set_channel_masks(struct brw_reg dst
, struct brw_reg src
);
662 void generate_gs_get_instance_id(struct brw_reg dst
);
663 void generate_oword_dual_block_offsets(struct brw_reg m1
,
664 struct brw_reg index
);
665 void generate_scratch_write(vec4_instruction
*inst
,
668 struct brw_reg index
);
669 void generate_scratch_read(vec4_instruction
*inst
,
671 struct brw_reg index
);
672 void generate_pull_constant_load(vec4_instruction
*inst
,
674 struct brw_reg index
,
675 struct brw_reg offset
);
676 void generate_pull_constant_load_gen7(vec4_instruction
*inst
,
678 struct brw_reg surf_index
,
679 struct brw_reg offset
);
680 void generate_unpack_flags(vec4_instruction
*inst
,
683 void generate_untyped_atomic(vec4_instruction
*inst
,
685 struct brw_reg atomic_op
,
686 struct brw_reg surf_index
);
688 void generate_untyped_surface_read(vec4_instruction
*inst
,
690 struct brw_reg surf_index
);
692 struct brw_context
*brw
;
694 struct brw_compile
*p
;
696 struct gl_shader_program
*shader_prog
;
697 const struct gl_program
*prog
;
699 struct brw_vec4_prog_data
*prog_data
;
702 const bool debug_flag
;
706 * The vertex shader code generator.
708 * Translates VS IR to actual i965 assembly code.
710 class gen8_vec4_generator
: public gen8_generator
713 gen8_vec4_generator(struct brw_context
*brw
,
714 struct gl_shader_program
*shader_prog
,
715 struct gl_program
*prog
,
716 struct brw_vec4_prog_data
*prog_data
,
719 ~gen8_vec4_generator();
721 const unsigned *generate_assembly(exec_list
*insts
, unsigned *asm_size
);
724 void generate_code(exec_list
*instructions
);
725 void generate_vec4_instruction(vec4_instruction
*inst
,
727 struct brw_reg
*src
);
729 void generate_tex(vec4_instruction
*inst
,
732 void generate_urb_write(vec4_instruction
*ir
, bool copy_g0
);
733 void generate_gs_thread_end(vec4_instruction
*ir
);
734 void generate_gs_set_write_offset(struct brw_reg dst
,
736 struct brw_reg src1
);
737 void generate_gs_set_vertex_count(struct brw_reg dst
,
739 void generate_gs_set_dword_2_immed(struct brw_reg dst
, struct brw_reg src
);
740 void generate_gs_prepare_channel_masks(struct brw_reg dst
);
741 void generate_gs_set_channel_masks(struct brw_reg dst
, struct brw_reg src
);
743 void generate_oword_dual_block_offsets(struct brw_reg m1
,
744 struct brw_reg index
);
745 void generate_scratch_write(vec4_instruction
*inst
,
748 struct brw_reg index
);
749 void generate_scratch_read(vec4_instruction
*inst
,
751 struct brw_reg index
);
752 void generate_pull_constant_load(vec4_instruction
*inst
,
754 struct brw_reg index
,
755 struct brw_reg offset
);
756 void generate_untyped_atomic(vec4_instruction
*ir
,
758 struct brw_reg atomic_op
,
759 struct brw_reg surf_index
);
760 void generate_untyped_surface_read(vec4_instruction
*ir
,
762 struct brw_reg surf_index
);
764 struct brw_vec4_prog_data
*prog_data
;
766 const bool debug_flag
;
770 } /* namespace brw */
771 #endif /* __cplusplus */
773 #endif /* BRW_VEC4_H */