2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 #include "brw_shader.h"
34 #include <sys/types.h>
36 #include "main/macros.h"
37 #include "main/shaderobj.h"
38 #include "main/uniforms.h"
39 #include "program/prog_parameter.h"
40 #include "program/prog_print.h"
41 #include "program/prog_optimize.h"
42 #include "program/register_allocate.h"
43 #include "program/sampler.h"
44 #include "program/hash_table.h"
45 #include "brw_context.h"
48 #include "brw_shader.h"
50 #include "gen8_generator.h"
51 #include "glsl/glsl_types.h"
54 #define MAX_SAMPLER_MESSAGE_SIZE 11
62 class fs_live_variables
;
67 DECLARE_RALLOC_CXX_OPERATORS(fs_reg
)
75 fs_reg(struct brw_reg fixed_hw_reg
);
76 fs_reg(enum register_file file
, int reg
);
77 fs_reg(enum register_file file
, int reg
, uint32_t type
);
78 fs_reg(class fs_visitor
*v
, const struct glsl_type
*type
);
80 bool equals(const fs_reg
&r
) const;
84 bool is_valid_3src() const;
85 bool is_contiguous() const;
86 bool is_accumulator() const;
88 fs_reg
&apply_stride(unsigned stride
);
89 /** Smear a channel of the reg to all channels. */
90 fs_reg
&set_smear(unsigned subreg
);
92 /** Register file: GRF, MRF, IMM. */
93 enum register_file file
;
94 /** Register type. BRW_REGISTER_TYPE_* */
97 * Register number. For MRF, it's the hardware register. For
98 * GRF, it's a virtual register number until register allocation
102 * Offset from the start of the contiguous register block.
104 * For pre-register-allocation GRFs, this is in units of a float per pixel
105 * (1 hardware register for SIMD8 mode, or 2 registers for SIMD16 mode).
106 * For uniforms, this is in units of 1 float.
110 * Offset in bytes from the start of the register. Values up to a
111 * backend_reg::reg_offset unit are valid.
115 /** Value for file == IMM */
122 struct brw_reg fixed_hw_reg
;
129 /** Register region horizontal stride */
134 retype(fs_reg reg
, unsigned type
)
136 reg
.fixed_hw_reg
.type
= reg
.type
= type
;
141 offset(fs_reg reg
, unsigned delta
)
143 assert(delta
== 0 || (reg
.file
!= HW_REG
&& reg
.file
!= IMM
));
144 reg
.reg_offset
+= delta
;
149 byte_offset(fs_reg reg
, unsigned delta
)
151 assert(delta
== 0 || (reg
.file
!= HW_REG
&& reg
.file
!= IMM
));
152 reg
.subreg_offset
+= delta
;
157 * Get either of the 8-component halves of a 16-component register.
159 * Note: this also works if \c reg represents a SIMD16 pair of registers.
162 half(const fs_reg
®
, unsigned idx
)
165 assert(idx
== 0 || (reg
.file
!= HW_REG
&& reg
.file
!= IMM
));
166 return byte_offset(reg
, 8 * idx
* reg
.stride
* type_sz(reg
.type
));
169 static const fs_reg reg_undef
;
170 static const fs_reg
reg_null_f(retype(brw_null_reg(), BRW_REGISTER_TYPE_F
));
171 static const fs_reg
reg_null_d(retype(brw_null_reg(), BRW_REGISTER_TYPE_D
));
172 static const fs_reg
reg_null_ud(retype(brw_null_reg(), BRW_REGISTER_TYPE_UD
));
174 class ip_record
: public exec_node
{
176 DECLARE_RALLOC_CXX_OPERATORS(ip_record
)
186 class fs_inst
: public backend_instruction
{
188 DECLARE_RALLOC_CXX_OPERATORS(fs_inst
)
193 fs_inst(enum opcode opcode
);
194 fs_inst(enum opcode opcode
, fs_reg dst
);
195 fs_inst(enum opcode opcode
, fs_reg dst
, fs_reg src0
);
196 fs_inst(enum opcode opcode
, fs_reg dst
, fs_reg src0
, fs_reg src1
);
197 fs_inst(enum opcode opcode
, fs_reg dst
,
198 fs_reg src0
, fs_reg src1
,fs_reg src2
);
200 bool equals(fs_inst
*inst
) const;
201 bool overwrites_reg(const fs_reg
®
) const;
202 bool is_send_from_grf() const;
203 bool is_partial_write() const;
204 int regs_read(fs_visitor
*v
, int arg
) const;
206 bool reads_flag() const;
207 bool writes_flag() const;
213 * Annotation for the generated IR. One of the two can be set.
216 const char *annotation
;
219 uint32_t texture_offset
; /**< Texture offset bitfield */
220 uint32_t offset
; /* spill/unspill offset */
222 uint8_t conditional_mod
; /**< BRW_CONDITIONAL_* */
224 /* Chooses which flag subregister (f0.0 or f0.1) is used for conditional
225 * mod and predication.
229 uint8_t mlen
; /**< SEND message length */
230 uint8_t regs_written
; /**< Number of vgrfs written by a SEND message, or 1 */
231 int8_t base_mrf
; /**< First MRF in the SEND message, if mlen is nonzero. */
233 uint8_t target
; /**< MRT target. */
236 bool header_present
:1;
237 bool shadow_compare
:1;
238 bool force_uncompressed
:1;
239 bool force_sechalf
:1;
240 bool force_writemask_all
:1;
244 * The fragment shader front-end.
246 * Translates either GLSL IR or Mesa IR (for ARB_fragment_program) into FS IR.
248 class fs_visitor
: public backend_visitor
252 fs_visitor(struct brw_context
*brw
,
254 const struct brw_wm_prog_key
*key
,
255 struct brw_wm_prog_data
*prog_data
,
256 struct gl_shader_program
*shader_prog
,
257 struct gl_fragment_program
*fp
,
258 unsigned dispatch_width
);
261 fs_reg
*variable_storage(ir_variable
*var
);
262 int virtual_grf_alloc(int size
);
263 void import_uniforms(fs_visitor
*v
);
265 void visit(ir_variable
*ir
);
266 void visit(ir_assignment
*ir
);
267 void visit(ir_dereference_variable
*ir
);
268 void visit(ir_dereference_record
*ir
);
269 void visit(ir_dereference_array
*ir
);
270 void visit(ir_expression
*ir
);
271 void visit(ir_texture
*ir
);
272 void visit(ir_if
*ir
);
273 void visit(ir_constant
*ir
);
274 void visit(ir_swizzle
*ir
);
275 void visit(ir_return
*ir
);
276 void visit(ir_loop
*ir
);
277 void visit(ir_loop_jump
*ir
);
278 void visit(ir_discard
*ir
);
279 void visit(ir_call
*ir
);
280 void visit(ir_function
*ir
);
281 void visit(ir_function_signature
*ir
);
282 void visit(ir_emit_vertex
*);
283 void visit(ir_end_primitive
*);
285 uint32_t gather_channel(ir_texture
*ir
, int sampler
);
286 void swizzle_result(ir_texture
*ir
, fs_reg orig_val
, int sampler
);
288 bool can_do_source_mods(fs_inst
*inst
);
290 fs_inst
*emit(fs_inst
*inst
);
291 void emit(exec_list list
);
293 fs_inst
*emit(enum opcode opcode
);
294 fs_inst
*emit(enum opcode opcode
, fs_reg dst
);
295 fs_inst
*emit(enum opcode opcode
, fs_reg dst
, fs_reg src0
);
296 fs_inst
*emit(enum opcode opcode
, fs_reg dst
, fs_reg src0
, fs_reg src1
);
297 fs_inst
*emit(enum opcode opcode
, fs_reg dst
,
298 fs_reg src0
, fs_reg src1
, fs_reg src2
);
300 fs_inst
*MOV(fs_reg dst
, fs_reg src
);
301 fs_inst
*NOT(fs_reg dst
, fs_reg src
);
302 fs_inst
*RNDD(fs_reg dst
, fs_reg src
);
303 fs_inst
*RNDE(fs_reg dst
, fs_reg src
);
304 fs_inst
*RNDZ(fs_reg dst
, fs_reg src
);
305 fs_inst
*FRC(fs_reg dst
, fs_reg src
);
306 fs_inst
*ADD(fs_reg dst
, fs_reg src0
, fs_reg src1
);
307 fs_inst
*MUL(fs_reg dst
, fs_reg src0
, fs_reg src1
);
308 fs_inst
*MACH(fs_reg dst
, fs_reg src0
, fs_reg src1
);
309 fs_inst
*MAC(fs_reg dst
, fs_reg src0
, fs_reg src1
);
310 fs_inst
*SHL(fs_reg dst
, fs_reg src0
, fs_reg src1
);
311 fs_inst
*SHR(fs_reg dst
, fs_reg src0
, fs_reg src1
);
312 fs_inst
*ASR(fs_reg dst
, fs_reg src0
, fs_reg src1
);
313 fs_inst
*AND(fs_reg dst
, fs_reg src0
, fs_reg src1
);
314 fs_inst
*OR(fs_reg dst
, fs_reg src0
, fs_reg src1
);
315 fs_inst
*XOR(fs_reg dst
, fs_reg src0
, fs_reg src1
);
316 fs_inst
*IF(uint32_t predicate
);
317 fs_inst
*IF(fs_reg src0
, fs_reg src1
, uint32_t condition
);
318 fs_inst
*CMP(fs_reg dst
, fs_reg src0
, fs_reg src1
,
320 fs_inst
*LRP(fs_reg dst
, fs_reg a
, fs_reg y
, fs_reg x
);
321 fs_inst
*DEP_RESOLVE_MOV(int grf
);
322 fs_inst
*BFREV(fs_reg dst
, fs_reg value
);
323 fs_inst
*BFE(fs_reg dst
, fs_reg bits
, fs_reg offset
, fs_reg value
);
324 fs_inst
*BFI1(fs_reg dst
, fs_reg bits
, fs_reg offset
);
325 fs_inst
*BFI2(fs_reg dst
, fs_reg bfi1_dst
, fs_reg insert
, fs_reg base
);
326 fs_inst
*FBH(fs_reg dst
, fs_reg value
);
327 fs_inst
*FBL(fs_reg dst
, fs_reg value
);
328 fs_inst
*CBIT(fs_reg dst
, fs_reg value
);
329 fs_inst
*MAD(fs_reg dst
, fs_reg c
, fs_reg b
, fs_reg a
);
330 fs_inst
*ADDC(fs_reg dst
, fs_reg src0
, fs_reg src1
);
331 fs_inst
*SUBB(fs_reg dst
, fs_reg src0
, fs_reg src1
);
332 fs_inst
*SEL(fs_reg dst
, fs_reg src0
, fs_reg src1
);
334 int type_size(const struct glsl_type
*type
);
335 fs_inst
*get_instruction_generating_reg(fs_inst
*start
,
339 exec_list
VARYING_PULL_CONSTANT_LOAD(const fs_reg
&dst
,
340 const fs_reg
&surf_index
,
341 const fs_reg
&varying_offset
,
342 uint32_t const_offset
);
345 void assign_binding_table_offsets();
346 void setup_payload_gen4();
347 void setup_payload_gen6();
348 void assign_curb_setup();
349 void calculate_urb_setup();
350 void assign_urb_setup();
351 bool assign_regs(bool allow_spilling
);
352 void assign_regs_trivial();
353 void get_used_mrfs(bool *mrf_used
);
354 void setup_payload_interference(struct ra_graph
*g
, int payload_reg_count
,
355 int first_payload_node
);
356 void setup_mrf_hack_interference(struct ra_graph
*g
,
357 int first_mrf_hack_node
);
358 int choose_spill_reg(struct ra_graph
*g
);
359 void spill_reg(int spill_reg
);
360 void split_virtual_grfs();
361 void compact_virtual_grfs();
362 void move_uniform_array_access_to_pull_constants();
363 void assign_constant_locations();
364 void demote_pull_constants();
365 void invalidate_live_intervals();
366 void calculate_live_intervals();
367 void calculate_register_pressure();
368 bool opt_algebraic();
370 bool opt_cse_local(bblock_t
*block
, exec_list
*aeb
);
371 bool opt_copy_propagate();
372 bool try_copy_propagate(fs_inst
*inst
, int arg
, acp_entry
*entry
);
373 bool try_constant_propagate(fs_inst
*inst
, acp_entry
*entry
);
374 bool opt_copy_propagate_local(void *mem_ctx
, bblock_t
*block
,
376 void opt_drop_redundant_mov_to_flags();
377 bool register_coalesce();
378 bool compute_to_mrf();
379 bool dead_code_eliminate();
380 bool remove_duplicate_mrf_writes();
381 bool virtual_grf_interferes(int a
, int b
);
382 void schedule_instructions(instruction_scheduler_mode mode
);
383 void insert_gen4_send_dependency_workarounds();
384 void insert_gen4_pre_send_dependency_workarounds(fs_inst
*inst
);
385 void insert_gen4_post_send_dependency_workarounds(fs_inst
*inst
);
386 void vfail(const char *msg
, va_list args
);
387 void fail(const char *msg
, ...);
388 void no16(const char *msg
, ...);
389 void lower_uniform_pull_constant_loads();
391 void push_force_uncompressed();
392 void pop_force_uncompressed();
394 void emit_dummy_fs();
395 fs_reg
*emit_fragcoord_interpolation(ir_variable
*ir
);
396 fs_inst
*emit_linterp(const fs_reg
&attr
, const fs_reg
&interp
,
397 glsl_interp_qualifier interpolation_mode
,
398 bool is_centroid
, bool is_sample
);
399 fs_reg
*emit_frontfacing_interpolation(ir_variable
*ir
);
400 fs_reg
*emit_samplepos_setup(ir_variable
*ir
);
401 fs_reg
*emit_sampleid_setup(ir_variable
*ir
);
402 fs_reg
*emit_general_interpolation(ir_variable
*ir
);
403 void emit_interpolation_setup_gen4();
404 void emit_interpolation_setup_gen6();
405 void compute_sample_position(fs_reg dst
, fs_reg int_sample_pos
);
406 fs_reg
rescale_texcoord(ir_texture
*ir
, fs_reg coordinate
,
407 bool is_rect
, int sampler
, int texunit
);
408 fs_inst
*emit_texture_gen4(ir_texture
*ir
, fs_reg dst
, fs_reg coordinate
,
409 fs_reg shadow_comp
, fs_reg lod
, fs_reg lod2
);
410 fs_inst
*emit_texture_gen5(ir_texture
*ir
, fs_reg dst
, fs_reg coordinate
,
411 fs_reg shadow_comp
, fs_reg lod
, fs_reg lod2
,
412 fs_reg sample_index
);
413 fs_inst
*emit_texture_gen7(ir_texture
*ir
, fs_reg dst
, fs_reg coordinate
,
414 fs_reg shadow_comp
, fs_reg lod
, fs_reg lod2
,
415 fs_reg sample_index
, fs_reg mcs
, int sampler
);
416 fs_reg
emit_mcs_fetch(ir_texture
*ir
, fs_reg coordinate
, int sampler
);
417 void emit_gen6_gather_wa(uint8_t wa
, fs_reg dst
);
418 fs_reg
fix_math_operand(fs_reg src
);
419 fs_inst
*emit_math(enum opcode op
, fs_reg dst
, fs_reg src0
);
420 fs_inst
*emit_math(enum opcode op
, fs_reg dst
, fs_reg src0
, fs_reg src1
);
421 void emit_lrp(const fs_reg
&dst
, const fs_reg
&x
, const fs_reg
&y
,
423 void emit_minmax(uint32_t conditionalmod
, const fs_reg
&dst
,
424 const fs_reg
&src0
, const fs_reg
&src1
);
425 bool try_emit_saturate(ir_expression
*ir
);
426 bool try_emit_mad(ir_expression
*ir
);
427 void try_replace_with_sel();
428 bool opt_peephole_sel();
429 bool opt_peephole_predicated_break();
430 bool opt_saturate_propagation();
431 void emit_bool_to_cond_code(ir_rvalue
*condition
);
432 void emit_if_gen6(ir_if
*ir
);
433 void emit_unspill(fs_inst
*inst
, fs_reg reg
, uint32_t spill_offset
,
436 void emit_fragment_program_code();
437 void setup_fp_regs();
438 fs_reg
get_fp_src_reg(const prog_src_register
*src
);
439 fs_reg
get_fp_dst_reg(const prog_dst_register
*dst
);
440 void emit_fp_alu1(enum opcode opcode
,
441 const struct prog_instruction
*fpi
,
442 fs_reg dst
, fs_reg src
);
443 void emit_fp_alu2(enum opcode opcode
,
444 const struct prog_instruction
*fpi
,
445 fs_reg dst
, fs_reg src0
, fs_reg src1
);
446 void emit_fp_scalar_write(const struct prog_instruction
*fpi
,
447 fs_reg dst
, fs_reg src
);
448 void emit_fp_scalar_math(enum opcode opcode
,
449 const struct prog_instruction
*fpi
,
450 fs_reg dst
, fs_reg src
);
452 void emit_fp_minmax(const struct prog_instruction
*fpi
,
453 fs_reg dst
, fs_reg src0
, fs_reg src1
);
455 void emit_fp_sop(uint32_t conditional_mod
,
456 const struct prog_instruction
*fpi
,
457 fs_reg dst
, fs_reg src0
, fs_reg src1
, fs_reg one
);
459 void emit_color_write(int target
, int index
, int first_color_mrf
);
460 void emit_alpha_test();
461 void emit_fb_writes();
463 void emit_shader_time_begin();
464 void emit_shader_time_end();
465 void emit_shader_time_write(enum shader_time_shader_type type
,
468 void emit_untyped_atomic(unsigned atomic_op
, unsigned surf_index
,
469 fs_reg dst
, fs_reg offset
, fs_reg src0
,
472 void emit_untyped_surface_read(unsigned surf_index
, fs_reg dst
,
475 bool try_rewrite_rhs_to_dst(ir_assignment
*ir
,
478 fs_inst
*pre_rhs_inst
,
479 fs_inst
*last_rhs_inst
);
480 void emit_assignment_writes(fs_reg
&l
, fs_reg
&r
,
481 const glsl_type
*type
, bool predicated
);
482 void resolve_ud_negate(fs_reg
*reg
);
483 void resolve_bool_comparison(ir_rvalue
*rvalue
, fs_reg
*reg
);
485 fs_reg
get_timestamp();
487 struct brw_reg
interp_reg(int location
, int channel
);
488 void setup_uniform_values(ir_variable
*ir
);
489 void setup_builtin_uniform_values(ir_variable
*ir
);
490 int implied_mrf_writes(fs_inst
*inst
);
492 virtual void dump_instructions();
493 void dump_instruction(backend_instruction
*inst
);
495 void visit_atomic_counter_intrinsic(ir_call
*ir
);
497 struct gl_fragment_program
*fp
;
498 const struct brw_wm_prog_key
*const key
;
499 struct brw_wm_prog_data
*prog_data
;
500 unsigned int sanity_param_count
;
504 int *virtual_grf_sizes
;
505 int virtual_grf_count
;
506 int virtual_grf_array_size
;
507 int *virtual_grf_start
;
508 int *virtual_grf_end
;
509 brw::fs_live_variables
*live_intervals
;
511 int *regs_live_at_ip
;
513 /** Number of uniform variable components visited. */
516 /** Byte-offset for the next available spot in the scratch space buffer. */
517 unsigned last_scratch
;
520 * Array mapping UNIFORM register numbers to the pull parameter index,
521 * or -1 if this uniform register isn't being uploaded as a pull constant.
523 int *pull_constant_loc
;
526 * Array mapping UNIFORM register numbers to the push parameter index,
527 * or -1 if this uniform register isn't being uploaded as a push constant.
529 int *push_constant_loc
;
531 struct hash_table
*variable_ht
;
534 fs_reg outputs
[BRW_MAX_DRAW_BUFFERS
];
535 unsigned output_components
[BRW_MAX_DRAW_BUFFERS
];
536 fs_reg dual_src_output
;
538 int first_non_payload_grf
;
539 /** Either BRW_MAX_GRF or GEN7_MRF_HACK_START */
542 fs_reg
*fp_temp_regs
;
543 fs_reg
*fp_input_regs
;
545 /** @{ debug annotation info */
546 const char *current_annotation
;
552 bool simd16_unsupported
;
555 /* Result of last visit() method. */
558 /** Register numbers for thread payload fields. */
560 uint8_t source_depth_reg
;
561 uint8_t source_w_reg
;
562 uint8_t aa_dest_stencil_reg
;
563 uint8_t dest_depth_reg
;
564 uint8_t sample_pos_reg
;
565 uint8_t sample_mask_in_reg
;
566 uint8_t barycentric_coord_reg
[BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT
];
568 /** The number of thread payload registers the hardware will supply. */
572 bool source_depth_to_render_target
;
573 bool runtime_check_aads_emit
;
579 fs_reg delta_x
[BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT
];
580 fs_reg delta_y
[BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT
];
581 fs_reg shader_start_time
;
584 bool spilled_any_registers
;
586 const unsigned dispatch_width
; /**< 8 or 16 */
588 int force_uncompressed_stack
;
592 * The fragment shader code generator.
594 * Translates FS IR to actual i965 assembly code.
599 fs_generator(struct brw_context
*brw
,
601 const struct brw_wm_prog_key
*key
,
602 struct brw_wm_prog_data
*prog_data
,
603 struct gl_shader_program
*prog
,
604 struct gl_fragment_program
*fp
,
605 bool dual_source_output
,
609 const unsigned *generate_assembly(exec_list
*simd8_instructions
,
610 exec_list
*simd16_instructions
,
611 unsigned *assembly_size
);
614 void generate_code(exec_list
*instructions
);
615 void generate_fb_write(fs_inst
*inst
);
616 void generate_blorp_fb_write(fs_inst
*inst
);
617 void generate_pixel_xy(struct brw_reg dst
, bool is_x
);
618 void generate_linterp(fs_inst
*inst
, struct brw_reg dst
,
619 struct brw_reg
*src
);
620 void generate_tex(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
);
621 void generate_math1_gen7(fs_inst
*inst
,
624 void generate_math2_gen7(fs_inst
*inst
,
627 struct brw_reg src1
);
628 void generate_math1_gen6(fs_inst
*inst
,
631 void generate_math2_gen6(fs_inst
*inst
,
634 struct brw_reg src1
);
635 void generate_math_gen4(fs_inst
*inst
,
638 void generate_math_g45(fs_inst
*inst
,
641 void generate_ddx(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
);
642 void generate_ddy(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
,
644 void generate_scratch_write(fs_inst
*inst
, struct brw_reg src
);
645 void generate_scratch_read(fs_inst
*inst
, struct brw_reg dst
);
646 void generate_scratch_read_gen7(fs_inst
*inst
, struct brw_reg dst
);
647 void generate_uniform_pull_constant_load(fs_inst
*inst
, struct brw_reg dst
,
648 struct brw_reg index
,
649 struct brw_reg offset
);
650 void generate_uniform_pull_constant_load_gen7(fs_inst
*inst
,
652 struct brw_reg surf_index
,
653 struct brw_reg offset
);
654 void generate_varying_pull_constant_load(fs_inst
*inst
, struct brw_reg dst
,
655 struct brw_reg index
,
656 struct brw_reg offset
);
657 void generate_varying_pull_constant_load_gen7(fs_inst
*inst
,
659 struct brw_reg index
,
660 struct brw_reg offset
);
661 void generate_mov_dispatch_to_flags(fs_inst
*inst
);
663 void generate_set_omask(fs_inst
*inst
,
665 struct brw_reg sample_mask
);
667 void generate_set_sample_id(fs_inst
*inst
,
670 struct brw_reg src1
);
672 void generate_set_simd4x2_offset(fs_inst
*inst
,
674 struct brw_reg offset
);
675 void generate_discard_jump(fs_inst
*inst
);
677 void generate_pack_half_2x16_split(fs_inst
*inst
,
681 void generate_unpack_half_2x16_split(fs_inst
*inst
,
685 void generate_shader_time_add(fs_inst
*inst
,
686 struct brw_reg payload
,
687 struct brw_reg offset
,
688 struct brw_reg value
);
690 void generate_untyped_atomic(fs_inst
*inst
,
692 struct brw_reg atomic_op
,
693 struct brw_reg surf_index
);
695 void generate_untyped_surface_read(fs_inst
*inst
,
697 struct brw_reg surf_index
);
699 bool patch_discard_jumps_to_fb_writes();
701 struct brw_context
*brw
;
702 struct gl_context
*ctx
;
704 struct brw_compile
*p
;
705 const struct brw_wm_prog_key
*const key
;
706 struct brw_wm_prog_data
*prog_data
;
708 struct gl_shader_program
*prog
;
709 const struct gl_fragment_program
*fp
;
711 unsigned dispatch_width
; /**< 8 or 16 */
713 exec_list discard_halt_patches
;
714 bool dual_source_output
;
715 const bool debug_flag
;
720 * The fragment shader code generator.
722 * Translates FS IR to actual i965 assembly code.
724 class gen8_fs_generator
: public gen8_generator
727 gen8_fs_generator(struct brw_context
*brw
,
729 const struct brw_wm_prog_key
*key
,
730 struct brw_wm_prog_data
*prog_data
,
731 struct gl_shader_program
*prog
,
732 struct gl_fragment_program
*fp
,
733 bool dual_source_output
);
734 ~gen8_fs_generator();
736 const unsigned *generate_assembly(exec_list
*simd8_instructions
,
737 exec_list
*simd16_instructions
,
738 unsigned *assembly_size
);
741 void generate_code(exec_list
*instructions
);
742 void generate_fb_write(fs_inst
*inst
);
743 void generate_linterp(fs_inst
*inst
, struct brw_reg dst
,
744 struct brw_reg
*src
);
745 void generate_tex(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
);
746 void generate_math1(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
);
747 void generate_math2(fs_inst
*inst
, struct brw_reg dst
,
748 struct brw_reg src0
, struct brw_reg src1
);
749 void generate_ddx(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
);
750 void generate_ddy(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
,
752 void generate_scratch_write(fs_inst
*inst
, struct brw_reg src
);
753 void generate_scratch_read(fs_inst
*inst
, struct brw_reg dst
);
754 void generate_scratch_read_gen7(fs_inst
*inst
, struct brw_reg dst
);
755 void generate_uniform_pull_constant_load(fs_inst
*inst
,
757 struct brw_reg index
,
758 struct brw_reg offset
);
759 void generate_varying_pull_constant_load(fs_inst
*inst
,
761 struct brw_reg index
,
762 struct brw_reg offset
);
763 void generate_mov_dispatch_to_flags(fs_inst
*ir
);
764 void generate_set_omask(fs_inst
*ir
,
766 struct brw_reg sample_mask
);
767 void generate_set_sample_id(fs_inst
*ir
,
770 struct brw_reg src1
);
771 void generate_set_simd4x2_offset(fs_inst
*ir
,
773 struct brw_reg offset
);
774 void generate_pack_half_2x16_split(fs_inst
*inst
,
778 void generate_unpack_half_2x16_split(fs_inst
*inst
,
781 void generate_untyped_atomic(fs_inst
*inst
,
783 struct brw_reg atomic_op
,
784 struct brw_reg surf_index
);
786 void generate_untyped_surface_read(fs_inst
*inst
,
788 struct brw_reg surf_index
);
789 void generate_discard_jump(fs_inst
*ir
);
791 bool patch_discard_jumps_to_fb_writes();
793 const struct brw_wm_prog_key
*const key
;
794 struct brw_wm_prog_data
*prog_data
;
795 const struct gl_fragment_program
*fp
;
797 unsigned dispatch_width
; /** 8 or 16 */
799 bool dual_source_output
;
801 exec_list discard_halt_patches
;
804 bool brw_do_channel_expressions(struct exec_list
*instructions
);
805 bool brw_do_vector_splitting(struct exec_list
*instructions
);
806 bool brw_fs_precompile(struct gl_context
*ctx
, struct gl_shader_program
*prog
);
808 struct brw_reg
brw_reg_from_fs_reg(fs_reg
*reg
);