2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 #include "brw_shader.h"
34 #include <sys/types.h>
36 #include "main/macros.h"
37 #include "main/shaderobj.h"
38 #include "main/uniforms.h"
39 #include "program/prog_parameter.h"
40 #include "program/prog_print.h"
41 #include "program/prog_optimize.h"
42 #include "util/register_allocate.h"
43 #include "program/hash_table.h"
44 #include "brw_context.h"
47 #include "brw_shader.h"
48 #include "intel_asm_annotation.h"
50 #include "glsl/glsl_types.h"
52 #include "glsl/nir/nir.h"
53 #include "program/sampler.h"
55 #define MAX_SAMPLER_MESSAGE_SIZE 11
56 #define MAX_VGRF_SIZE 16
64 class fs_live_variables
;
70 class fs_reg
: public backend_reg
{
72 DECLARE_RALLOC_CXX_OPERATORS(fs_reg
)
77 explicit fs_reg(float f
);
78 explicit fs_reg(int32_t i
);
79 explicit fs_reg(uint32_t u
);
80 explicit fs_reg(uint8_t vf
[4]);
81 explicit fs_reg(uint8_t vf0
, uint8_t vf1
, uint8_t vf2
, uint8_t vf3
);
82 fs_reg(struct brw_reg fixed_hw_reg
);
83 fs_reg(enum register_file file
, int reg
);
84 fs_reg(enum register_file file
, int reg
, enum brw_reg_type type
);
85 fs_reg(enum register_file file
, int reg
, enum brw_reg_type type
, uint8_t width
);
86 fs_reg(fs_visitor
*v
, const struct glsl_type
*type
);
88 bool equals(const fs_reg
&r
) const;
89 bool is_contiguous() const;
91 /** Smear a channel of the reg to all channels. */
92 fs_reg
&set_smear(unsigned subreg
);
95 * Offset in bytes from the start of the register. Values up to a
96 * backend_reg::reg_offset unit are valid.
103 * The register width. This indicates how many hardware values are
104 * represented by each virtual value. Valid values are 1, 8, or 16.
105 * For immediate values, this is 1. Most of the rest of the time, it
106 * will be equal to the dispatch width.
111 * Returns the effective register width when used as a source in the
112 * given instruction. Registers such as uniforms and immediates
113 * effectively take on the width of the instruction in which they are
116 uint8_t effective_width
;
118 /** Register region horizontal stride */
125 assert(reg
.file
!= HW_REG
&& reg
.file
!= IMM
);
126 reg
.negate
= !reg
.negate
;
131 retype(fs_reg reg
, enum brw_reg_type type
)
133 reg
.fixed_hw_reg
.type
= reg
.type
= type
;
138 byte_offset(fs_reg reg
, unsigned delta
)
145 reg
.reg_offset
+= delta
/ 32;
148 reg
.reg
+= delta
/ 32;
153 reg
.subreg_offset
+= delta
% 32;
158 horiz_offset(fs_reg reg
, unsigned delta
)
164 /* These only have a single component that is implicitly splatted. A
165 * horizontal offset should be a harmless no-op.
171 return byte_offset(reg
, delta
* reg
.stride
* type_sz(reg
.type
));
179 offset(fs_reg reg
, unsigned delta
)
181 assert(reg
.stride
> 0);
188 return byte_offset(reg
, delta
* reg
.width
* reg
.stride
* type_sz(reg
.type
));
190 reg
.reg_offset
+= delta
;
199 component(fs_reg reg
, unsigned idx
)
201 assert(reg
.subreg_offset
== 0);
202 assert(idx
< reg
.width
);
203 reg
.subreg_offset
= idx
* type_sz(reg
.type
);
209 * Get either of the 8-component halves of a 16-component register.
211 * Note: this also works if \c reg represents a SIMD16 pair of registers.
214 half(fs_reg reg
, unsigned idx
)
218 if (reg
.file
== UNIFORM
)
221 assert(idx
== 0 || (reg
.file
!= HW_REG
&& reg
.file
!= IMM
));
222 assert(reg
.width
== 16);
224 return horiz_offset(reg
, 8 * idx
);
227 static const fs_reg reg_undef
;
229 class fs_inst
: public backend_instruction
{
230 fs_inst
&operator=(const fs_inst
&);
232 void init(enum opcode opcode
, uint8_t exec_width
, const fs_reg
&dst
,
233 fs_reg
*src
, int sources
);
236 DECLARE_RALLOC_CXX_OPERATORS(fs_inst
)
239 fs_inst(enum opcode opcode
, uint8_t exec_size
);
240 fs_inst(enum opcode opcode
, const fs_reg
&dst
);
241 fs_inst(enum opcode opcode
, uint8_t exec_size
, const fs_reg
&dst
,
243 fs_inst(enum opcode opcode
, const fs_reg
&dst
, const fs_reg
&src0
);
244 fs_inst(enum opcode opcode
, uint8_t exec_size
, const fs_reg
&dst
,
245 const fs_reg
&src0
, const fs_reg
&src1
);
246 fs_inst(enum opcode opcode
, const fs_reg
&dst
, const fs_reg
&src0
,
248 fs_inst(enum opcode opcode
, uint8_t exec_size
, const fs_reg
&dst
,
249 const fs_reg
&src0
, const fs_reg
&src1
, const fs_reg
&src2
);
250 fs_inst(enum opcode opcode
, const fs_reg
&dst
, const fs_reg
&src0
,
251 const fs_reg
&src1
, const fs_reg
&src2
);
252 fs_inst(enum opcode opcode
, const fs_reg
&dst
, fs_reg src
[], int sources
);
253 fs_inst(enum opcode opcode
, uint8_t exec_size
, const fs_reg
&dst
,
254 fs_reg src
[], int sources
);
255 fs_inst(const fs_inst
&that
);
257 void resize_sources(uint8_t num_sources
);
259 bool equals(fs_inst
*inst
) const;
260 bool overwrites_reg(const fs_reg
®
) const;
261 bool is_send_from_grf() const;
262 bool is_partial_write() const;
263 int regs_read(fs_visitor
*v
, int arg
) const;
264 bool can_do_source_mods(struct brw_context
*brw
);
266 bool reads_flag() const;
267 bool writes_flag() const;
272 uint8_t sources
; /**< Number of fs_reg sources. */
275 * Execution size of the instruction. This is used by the generator to
276 * generate the correct binary for the given fs_inst. Current valid
277 * values are 1, 8, 16.
281 /* Chooses which flag subregister (f0.0 or f0.1) is used for conditional
282 * mod and predication.
286 uint8_t regs_written
; /**< Number of vgrfs written by a SEND message, or 1 */
288 bool force_uncompressed
:1;
289 bool force_sechalf
:1;
290 bool pi_noperspective
:1; /**< Pixel interpolator noperspective flag */
294 * The fragment shader front-end.
296 * Translates either GLSL IR or Mesa IR (for ARB_fragment_program) into FS IR.
298 class fs_visitor
: public backend_visitor
301 const fs_reg reg_null_f
;
302 const fs_reg reg_null_d
;
303 const fs_reg reg_null_ud
;
305 fs_visitor(struct brw_context
*brw
,
307 const struct brw_wm_prog_key
*key
,
308 struct brw_wm_prog_data
*prog_data
,
309 struct gl_shader_program
*shader_prog
,
310 struct gl_fragment_program
*fp
,
311 unsigned dispatch_width
);
313 fs_visitor(struct brw_context
*brw
,
315 const struct brw_vs_prog_key
*key
,
316 struct brw_vs_prog_data
*prog_data
,
317 struct gl_shader_program
*shader_prog
,
318 struct gl_vertex_program
*cp
,
319 unsigned dispatch_width
);
324 fs_reg
*variable_storage(ir_variable
*var
);
325 int virtual_grf_alloc(int size
);
326 void import_uniforms(fs_visitor
*v
);
327 void setup_uniform_clipplane_values();
328 void compute_clip_distance();
330 void visit(ir_variable
*ir
);
331 void visit(ir_assignment
*ir
);
332 void visit(ir_dereference_variable
*ir
);
333 void visit(ir_dereference_record
*ir
);
334 void visit(ir_dereference_array
*ir
);
335 void visit(ir_expression
*ir
);
336 void visit(ir_texture
*ir
);
337 void visit(ir_if
*ir
);
338 void visit(ir_constant
*ir
);
339 void visit(ir_swizzle
*ir
);
340 void visit(ir_return
*ir
);
341 void visit(ir_loop
*ir
);
342 void visit(ir_loop_jump
*ir
);
343 void visit(ir_discard
*ir
);
344 void visit(ir_call
*ir
);
345 void visit(ir_function
*ir
);
346 void visit(ir_function_signature
*ir
);
347 void visit(ir_emit_vertex
*);
348 void visit(ir_end_primitive
*);
350 uint32_t gather_channel(int orig_chan
, uint32_t sampler
);
351 void swizzle_result(ir_texture_opcode op
, int dest_components
,
352 fs_reg orig_val
, uint32_t sampler
);
354 fs_inst
*emit(fs_inst
*inst
);
355 void emit(exec_list list
);
357 fs_inst
*emit(enum opcode opcode
);
358 fs_inst
*emit(enum opcode opcode
, const fs_reg
&dst
);
359 fs_inst
*emit(enum opcode opcode
, const fs_reg
&dst
, const fs_reg
&src0
);
360 fs_inst
*emit(enum opcode opcode
, const fs_reg
&dst
, const fs_reg
&src0
,
362 fs_inst
*emit(enum opcode opcode
, const fs_reg
&dst
,
363 const fs_reg
&src0
, const fs_reg
&src1
, const fs_reg
&src2
);
364 fs_inst
*emit(enum opcode opcode
, const fs_reg
&dst
,
365 fs_reg src
[], int sources
);
367 fs_inst
*MOV(const fs_reg
&dst
, const fs_reg
&src
);
368 fs_inst
*NOT(const fs_reg
&dst
, const fs_reg
&src
);
369 fs_inst
*RNDD(const fs_reg
&dst
, const fs_reg
&src
);
370 fs_inst
*RNDE(const fs_reg
&dst
, const fs_reg
&src
);
371 fs_inst
*RNDZ(const fs_reg
&dst
, const fs_reg
&src
);
372 fs_inst
*FRC(const fs_reg
&dst
, const fs_reg
&src
);
373 fs_inst
*ADD(const fs_reg
&dst
, const fs_reg
&src0
, const fs_reg
&src1
);
374 fs_inst
*MUL(const fs_reg
&dst
, const fs_reg
&src0
, const fs_reg
&src1
);
375 fs_inst
*MACH(const fs_reg
&dst
, const fs_reg
&src0
, const fs_reg
&src1
);
376 fs_inst
*MAC(const fs_reg
&dst
, const fs_reg
&src0
, const fs_reg
&src1
);
377 fs_inst
*SHL(const fs_reg
&dst
, const fs_reg
&src0
, const fs_reg
&src1
);
378 fs_inst
*SHR(const fs_reg
&dst
, const fs_reg
&src0
, const fs_reg
&src1
);
379 fs_inst
*ASR(const fs_reg
&dst
, const fs_reg
&src0
, const fs_reg
&src1
);
380 fs_inst
*AND(const fs_reg
&dst
, const fs_reg
&src0
, const fs_reg
&src1
);
381 fs_inst
*OR(const fs_reg
&dst
, const fs_reg
&src0
, const fs_reg
&src1
);
382 fs_inst
*XOR(const fs_reg
&dst
, const fs_reg
&src0
, const fs_reg
&src1
);
383 fs_inst
*IF(enum brw_predicate predicate
);
384 fs_inst
*IF(const fs_reg
&src0
, const fs_reg
&src1
,
385 enum brw_conditional_mod condition
);
386 fs_inst
*CMP(fs_reg dst
, fs_reg src0
, fs_reg src1
,
387 enum brw_conditional_mod condition
);
388 fs_inst
*LRP(const fs_reg
&dst
, const fs_reg
&a
, const fs_reg
&y
,
390 fs_inst
*DEP_RESOLVE_MOV(int grf
);
391 fs_inst
*BFREV(const fs_reg
&dst
, const fs_reg
&value
);
392 fs_inst
*BFE(const fs_reg
&dst
, const fs_reg
&bits
, const fs_reg
&offset
,
393 const fs_reg
&value
);
394 fs_inst
*BFI1(const fs_reg
&dst
, const fs_reg
&bits
, const fs_reg
&offset
);
395 fs_inst
*BFI2(const fs_reg
&dst
, const fs_reg
&bfi1_dst
,
396 const fs_reg
&insert
, const fs_reg
&base
);
397 fs_inst
*FBH(const fs_reg
&dst
, const fs_reg
&value
);
398 fs_inst
*FBL(const fs_reg
&dst
, const fs_reg
&value
);
399 fs_inst
*CBIT(const fs_reg
&dst
, const fs_reg
&value
);
400 fs_inst
*MAD(const fs_reg
&dst
, const fs_reg
&c
, const fs_reg
&b
,
402 fs_inst
*ADDC(const fs_reg
&dst
, const fs_reg
&src0
, const fs_reg
&src1
);
403 fs_inst
*SUBB(const fs_reg
&dst
, const fs_reg
&src0
, const fs_reg
&src1
);
404 fs_inst
*SEL(const fs_reg
&dst
, const fs_reg
&src0
, const fs_reg
&src1
);
406 int type_size(const struct glsl_type
*type
);
407 fs_inst
*get_instruction_generating_reg(fs_inst
*start
,
411 fs_inst
*LOAD_PAYLOAD(const fs_reg
&dst
, fs_reg
*src
, int sources
);
413 exec_list
VARYING_PULL_CONSTANT_LOAD(const fs_reg
&dst
,
414 const fs_reg
&surf_index
,
415 const fs_reg
&varying_offset
,
416 uint32_t const_offset
);
421 void allocate_registers();
422 void assign_binding_table_offsets();
423 void setup_payload_gen4();
424 void setup_payload_gen6();
425 void setup_vs_payload();
426 void assign_curb_setup();
427 void calculate_urb_setup();
428 void assign_urb_setup();
429 void assign_vs_urb_setup();
430 bool assign_regs(bool allow_spilling
);
431 void assign_regs_trivial();
432 void get_used_mrfs(bool *mrf_used
);
433 void setup_payload_interference(struct ra_graph
*g
, int payload_reg_count
,
434 int first_payload_node
);
435 void setup_mrf_hack_interference(struct ra_graph
*g
,
436 int first_mrf_hack_node
);
437 int choose_spill_reg(struct ra_graph
*g
);
438 void spill_reg(int spill_reg
);
439 void split_virtual_grfs();
440 bool compact_virtual_grfs();
441 void move_uniform_array_access_to_pull_constants();
442 void assign_constant_locations();
443 void demote_pull_constants();
444 void invalidate_live_intervals();
445 void calculate_live_intervals();
446 void calculate_register_pressure();
447 bool opt_algebraic();
449 bool opt_cse_local(bblock_t
*block
);
450 bool opt_copy_propagate();
451 bool try_copy_propagate(fs_inst
*inst
, int arg
, acp_entry
*entry
);
452 bool try_constant_propagate(fs_inst
*inst
, acp_entry
*entry
);
453 bool opt_copy_propagate_local(void *mem_ctx
, bblock_t
*block
,
455 bool opt_register_renaming();
456 bool register_coalesce();
457 bool compute_to_mrf();
458 bool dead_code_eliminate();
459 bool remove_duplicate_mrf_writes();
460 bool virtual_grf_interferes(int a
, int b
);
461 void schedule_instructions(instruction_scheduler_mode mode
);
462 void insert_gen4_send_dependency_workarounds();
463 void insert_gen4_pre_send_dependency_workarounds(bblock_t
*block
,
465 void insert_gen4_post_send_dependency_workarounds(bblock_t
*block
,
467 void vfail(const char *msg
, va_list args
);
468 void fail(const char *msg
, ...);
469 void no16(const char *msg
, ...);
470 void lower_uniform_pull_constant_loads();
471 bool lower_load_payload();
473 void emit_dummy_fs();
474 void emit_repclear_shader();
475 fs_reg
*emit_fragcoord_interpolation(bool pixel_center_integer
,
476 bool origin_upper_left
);
477 fs_inst
*emit_linterp(const fs_reg
&attr
, const fs_reg
&interp
,
478 glsl_interp_qualifier interpolation_mode
,
479 bool is_centroid
, bool is_sample
);
480 fs_reg
*emit_frontfacing_interpolation();
481 fs_reg
*emit_samplepos_setup();
482 fs_reg
*emit_sampleid_setup();
483 void emit_general_interpolation(fs_reg attr
, const char *name
,
484 const glsl_type
*type
,
485 glsl_interp_qualifier interpolation_mode
,
486 int location
, bool mod_centroid
,
488 fs_reg
*emit_vs_system_value(enum brw_reg_type type
, int location
);
489 void emit_interpolation_setup_gen4();
490 void emit_interpolation_setup_gen6();
491 void compute_sample_position(fs_reg dst
, fs_reg int_sample_pos
);
492 fs_reg
rescale_texcoord(fs_reg coordinate
, int coord_components
,
493 bool is_rect
, uint32_t sampler
, int texunit
);
494 fs_inst
*emit_texture_gen4(ir_texture_opcode op
, fs_reg dst
,
495 fs_reg coordinate
, int coord_components
,
497 fs_reg lod
, fs_reg lod2
, int grad_components
,
499 fs_inst
*emit_texture_gen5(ir_texture_opcode op
, fs_reg dst
,
500 fs_reg coordinate
, int coord_components
,
502 fs_reg lod
, fs_reg lod2
, int grad_components
,
503 fs_reg sample_index
, uint32_t sampler
,
505 fs_inst
*emit_texture_gen7(ir_texture_opcode op
, fs_reg dst
,
506 fs_reg coordinate
, int coord_components
,
508 fs_reg lod
, fs_reg lod2
, int grad_components
,
509 fs_reg sample_index
, fs_reg mcs
, fs_reg sampler
,
510 fs_reg offset_value
);
511 void emit_texture(ir_texture_opcode op
,
512 const glsl_type
*dest_type
,
513 fs_reg coordinate
, int components
,
515 fs_reg lod
, fs_reg dpdy
, int grad_components
,
517 fs_reg offset
, unsigned offset_components
,
519 int gather_component
,
525 fs_reg
emit_mcs_fetch(fs_reg coordinate
, int components
, fs_reg sampler
);
526 void emit_gen6_gather_wa(uint8_t wa
, fs_reg dst
);
527 fs_reg
fix_math_operand(fs_reg src
);
528 fs_inst
*emit_math(enum opcode op
, fs_reg dst
, fs_reg src0
);
529 fs_inst
*emit_math(enum opcode op
, fs_reg dst
, fs_reg src0
, fs_reg src1
);
530 void emit_lrp(const fs_reg
&dst
, const fs_reg
&x
, const fs_reg
&y
,
532 void emit_minmax(enum brw_conditional_mod conditionalmod
, const fs_reg
&dst
,
533 const fs_reg
&src0
, const fs_reg
&src1
);
534 bool try_emit_saturate(ir_expression
*ir
);
535 bool try_emit_line(ir_expression
*ir
);
536 bool try_emit_mad(ir_expression
*ir
);
537 void try_replace_with_sel();
538 bool opt_peephole_sel();
539 bool opt_peephole_predicated_break();
540 bool opt_saturate_propagation();
541 void emit_bool_to_cond_code(ir_rvalue
*condition
);
542 void emit_if_gen6(ir_if
*ir
);
543 void emit_unspill(bblock_t
*block
, fs_inst
*inst
, fs_reg reg
,
544 uint32_t spill_offset
, int count
);
545 void emit_spill(bblock_t
*block
, fs_inst
*inst
, fs_reg reg
,
546 uint32_t spill_offset
, int count
);
548 void emit_fragment_program_code();
549 void setup_fp_regs();
550 fs_reg
get_fp_src_reg(const prog_src_register
*src
);
551 fs_reg
get_fp_dst_reg(const prog_dst_register
*dst
);
552 void emit_fp_alu1(enum opcode opcode
,
553 const struct prog_instruction
*fpi
,
554 fs_reg dst
, fs_reg src
);
555 void emit_fp_alu2(enum opcode opcode
,
556 const struct prog_instruction
*fpi
,
557 fs_reg dst
, fs_reg src0
, fs_reg src1
);
558 void emit_fp_scalar_write(const struct prog_instruction
*fpi
,
559 fs_reg dst
, fs_reg src
);
560 void emit_fp_scalar_math(enum opcode opcode
,
561 const struct prog_instruction
*fpi
,
562 fs_reg dst
, fs_reg src
);
564 void emit_fp_minmax(const struct prog_instruction
*fpi
,
565 fs_reg dst
, fs_reg src0
, fs_reg src1
);
567 void emit_fp_sop(enum brw_conditional_mod conditional_mod
,
568 const struct prog_instruction
*fpi
,
569 fs_reg dst
, fs_reg src0
, fs_reg src1
, fs_reg one
);
571 void emit_nir_code();
572 void nir_setup_inputs(nir_shader
*shader
);
573 void nir_setup_outputs(nir_shader
*shader
);
574 void nir_setup_uniforms(nir_shader
*shader
);
575 void nir_setup_uniform(nir_variable
*var
);
576 void nir_setup_builtin_uniform(nir_variable
*var
);
577 void nir_emit_impl(nir_function_impl
*impl
);
578 void nir_emit_cf_list(exec_list
*list
);
579 void nir_emit_if(nir_if
*if_stmt
);
580 void nir_emit_loop(nir_loop
*loop
);
581 void nir_emit_block(nir_block
*block
);
582 void nir_emit_instr(nir_instr
*instr
);
583 void nir_emit_alu(nir_alu_instr
*instr
);
584 void nir_emit_intrinsic(nir_intrinsic_instr
*instr
);
585 void nir_emit_texture(nir_tex_instr
*instr
);
586 void nir_emit_load_const(nir_load_const_instr
*instr
);
587 void nir_emit_jump(nir_jump_instr
*instr
);
588 fs_reg
get_nir_src(nir_src src
);
589 fs_reg
get_nir_alu_src(nir_alu_instr
*instr
, unsigned src
);
590 fs_reg
get_nir_dest(nir_dest dest
);
591 void emit_percomp(fs_inst
*inst
, unsigned wr_mask
);
592 void emit_percomp(enum opcode op
, fs_reg dest
, fs_reg src0
,
593 unsigned wr_mask
, bool saturate
= false,
594 enum brw_predicate predicate
= BRW_PREDICATE_NONE
,
595 enum brw_conditional_mod mod
= BRW_CONDITIONAL_NONE
);
596 void emit_percomp(enum opcode op
, fs_reg dest
, fs_reg src0
, fs_reg src1
,
597 unsigned wr_mask
, bool saturate
= false,
598 enum brw_predicate predicate
= BRW_PREDICATE_NONE
,
599 enum brw_conditional_mod mod
= BRW_CONDITIONAL_NONE
);
600 void emit_math_percomp(enum opcode op
, fs_reg dest
, fs_reg src0
,
601 unsigned wr_mask
, bool saturate
= false);
602 void emit_math_percomp(enum opcode op
, fs_reg dest
, fs_reg src0
,
603 fs_reg src1
, unsigned wr_mask
,
604 bool saturate
= false);
605 void emit_reduction(enum opcode op
, fs_reg dest
, fs_reg src
,
606 unsigned num_components
);
608 int setup_color_payload(fs_reg
*dst
, fs_reg color
, unsigned components
);
609 void emit_alpha_test();
610 fs_inst
*emit_single_fb_write(fs_reg color1
, fs_reg color2
,
611 fs_reg src0_alpha
, unsigned components
);
612 void emit_fb_writes();
613 void emit_urb_writes();
615 void emit_shader_time_begin();
616 void emit_shader_time_end();
617 void emit_shader_time_write(enum shader_time_shader_type type
,
620 void emit_untyped_atomic(unsigned atomic_op
, unsigned surf_index
,
621 fs_reg dst
, fs_reg offset
, fs_reg src0
,
624 void emit_untyped_surface_read(unsigned surf_index
, fs_reg dst
,
627 void emit_interpolate_expression(ir_expression
*ir
);
629 bool try_rewrite_rhs_to_dst(ir_assignment
*ir
,
632 fs_inst
*pre_rhs_inst
,
633 fs_inst
*last_rhs_inst
);
634 void emit_assignment_writes(fs_reg
&l
, fs_reg
&r
,
635 const glsl_type
*type
, bool predicated
);
636 void resolve_ud_negate(fs_reg
*reg
);
637 void resolve_bool_comparison(ir_rvalue
*rvalue
, fs_reg
*reg
);
639 fs_reg
get_timestamp();
641 struct brw_reg
interp_reg(int location
, int channel
);
642 void setup_uniform_values(ir_variable
*ir
);
643 void setup_builtin_uniform_values(ir_variable
*ir
);
644 int implied_mrf_writes(fs_inst
*inst
);
646 virtual void dump_instructions();
647 virtual void dump_instructions(const char *name
);
648 void dump_instruction(backend_instruction
*inst
);
649 void dump_instruction(backend_instruction
*inst
, FILE *file
);
651 void visit_atomic_counter_intrinsic(ir_call
*ir
);
653 const void *const key
;
654 struct brw_stage_prog_data
*prog_data
;
655 unsigned int sanity_param_count
;
659 int *virtual_grf_sizes
;
660 int virtual_grf_count
;
661 int virtual_grf_array_size
;
662 int *virtual_grf_start
;
663 int *virtual_grf_end
;
664 brw::fs_live_variables
*live_intervals
;
666 int *regs_live_at_ip
;
668 /** Number of uniform variable components visited. */
671 /** Byte-offset for the next available spot in the scratch space buffer. */
672 unsigned last_scratch
;
675 * Array mapping UNIFORM register numbers to the pull parameter index,
676 * or -1 if this uniform register isn't being uploaded as a pull constant.
678 int *pull_constant_loc
;
681 * Array mapping UNIFORM register numbers to the push parameter index,
682 * or -1 if this uniform register isn't being uploaded as a push constant.
684 int *push_constant_loc
;
686 struct hash_table
*variable_ht
;
689 fs_reg outputs
[VARYING_SLOT_MAX
];
690 unsigned output_components
[VARYING_SLOT_MAX
];
691 fs_reg dual_src_output
;
693 int first_non_payload_grf
;
694 /** Either BRW_MAX_GRF or GEN7_MRF_HACK_START */
697 fs_reg
*fp_temp_regs
;
698 fs_reg
*fp_input_regs
;
706 /** @{ debug annotation info */
707 const char *current_annotation
;
713 bool simd16_unsupported
;
716 /* Result of last visit() method. */
719 /** Register numbers for thread payload fields. */
721 uint8_t source_depth_reg
;
722 uint8_t source_w_reg
;
723 uint8_t aa_dest_stencil_reg
;
724 uint8_t dest_depth_reg
;
725 uint8_t sample_pos_reg
;
726 uint8_t sample_mask_in_reg
;
727 uint8_t barycentric_coord_reg
[BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT
];
729 /** The number of thread payload registers the hardware will supply. */
733 bool source_depth_to_render_target
;
734 bool runtime_check_aads_emit
;
740 fs_reg delta_x
[BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT
];
741 fs_reg delta_y
[BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT
];
742 fs_reg shader_start_time
;
743 fs_reg userplane
[MAX_CLIP_PLANES
];
746 bool spilled_any_registers
;
748 const unsigned dispatch_width
; /**< 8 or 16 */
752 * The fragment shader code generator.
754 * Translates FS IR to actual i965 assembly code.
759 fs_generator(struct brw_context
*brw
,
762 struct brw_stage_prog_data
*prog_data
,
763 struct gl_program
*fp
,
764 bool runtime_check_aads_emit
,
765 const char *stage_abbrev
);
768 void enable_debug(const char *shader_name
);
769 int generate_code(const cfg_t
*cfg
, int dispatch_width
);
770 const unsigned *get_assembly(unsigned int *assembly_size
);
773 void fire_fb_write(fs_inst
*inst
,
774 struct brw_reg payload
,
775 struct brw_reg implied_header
,
777 void generate_fb_write(fs_inst
*inst
, struct brw_reg payload
);
778 void generate_urb_write(fs_inst
*inst
, struct brw_reg payload
);
779 void generate_blorp_fb_write(fs_inst
*inst
);
780 void generate_pixel_xy(struct brw_reg dst
, bool is_x
);
781 void generate_linterp(fs_inst
*inst
, struct brw_reg dst
,
782 struct brw_reg
*src
);
783 void generate_tex(fs_inst
*inst
, struct brw_reg dst
, struct brw_reg src
,
784 struct brw_reg sampler_index
);
785 void generate_math_gen6(fs_inst
*inst
,
788 struct brw_reg src1
);
789 void generate_math_gen4(fs_inst
*inst
,
792 void generate_math_g45(fs_inst
*inst
,
795 void generate_ddx(enum opcode op
, struct brw_reg dst
, struct brw_reg src
);
796 void generate_ddy(enum opcode op
, struct brw_reg dst
, struct brw_reg src
,
798 void generate_scratch_write(fs_inst
*inst
, struct brw_reg src
);
799 void generate_scratch_read(fs_inst
*inst
, struct brw_reg dst
);
800 void generate_scratch_read_gen7(fs_inst
*inst
, struct brw_reg dst
);
801 void generate_uniform_pull_constant_load(fs_inst
*inst
, struct brw_reg dst
,
802 struct brw_reg index
,
803 struct brw_reg offset
);
804 void generate_uniform_pull_constant_load_gen7(fs_inst
*inst
,
806 struct brw_reg surf_index
,
807 struct brw_reg offset
);
808 void generate_varying_pull_constant_load(fs_inst
*inst
, struct brw_reg dst
,
809 struct brw_reg index
,
810 struct brw_reg offset
);
811 void generate_varying_pull_constant_load_gen7(fs_inst
*inst
,
813 struct brw_reg index
,
814 struct brw_reg offset
);
815 void generate_mov_dispatch_to_flags(fs_inst
*inst
);
817 void generate_pixel_interpolator_query(fs_inst
*inst
,
820 struct brw_reg msg_data
,
823 void generate_set_omask(fs_inst
*inst
,
825 struct brw_reg sample_mask
);
827 void generate_set_sample_id(fs_inst
*inst
,
830 struct brw_reg src1
);
832 void generate_set_simd4x2_offset(fs_inst
*inst
,
834 struct brw_reg offset
);
835 void generate_discard_jump(fs_inst
*inst
);
837 void generate_pack_half_2x16_split(fs_inst
*inst
,
841 void generate_unpack_half_2x16_split(fs_inst
*inst
,
845 void generate_shader_time_add(fs_inst
*inst
,
846 struct brw_reg payload
,
847 struct brw_reg offset
,
848 struct brw_reg value
);
850 void generate_untyped_atomic(fs_inst
*inst
,
852 struct brw_reg payload
,
853 struct brw_reg atomic_op
,
854 struct brw_reg surf_index
);
856 void generate_untyped_surface_read(fs_inst
*inst
,
858 struct brw_reg payload
,
859 struct brw_reg surf_index
);
861 bool patch_discard_jumps_to_fb_writes();
863 struct brw_context
*brw
;
864 struct gl_context
*ctx
;
866 struct brw_compile
*p
;
867 const void * const key
;
868 struct brw_stage_prog_data
* const prog_data
;
870 const struct gl_program
*prog
;
872 unsigned dispatch_width
; /**< 8 or 16 */
874 exec_list discard_halt_patches
;
875 bool runtime_check_aads_emit
;
877 const char *shader_name
;
878 const char *stage_abbrev
;
882 bool brw_do_channel_expressions(struct exec_list
*instructions
);
883 bool brw_do_vector_splitting(struct exec_list
*instructions
);