intel/compiler: Add support for variable workgroup size
[mesa.git] / src / intel / compiler / brw_fs.h
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #ifndef BRW_FS_H
29 #define BRW_FS_H
30
31 #include "brw_shader.h"
32 #include "brw_ir_fs.h"
33 #include "brw_fs_builder.h"
34 #include "brw_fs_live_variables.h"
35 #include "compiler/nir/nir.h"
36
37 struct bblock_t;
38 namespace {
39 struct acp_entry;
40 }
41
42 class fs_visitor;
43
44 namespace brw {
45 /**
46 * Register pressure analysis of a shader. Estimates how many registers
47 * are live at any point of the program in GRF units.
48 */
49 struct register_pressure {
50 register_pressure(const fs_visitor *v);
51 ~register_pressure();
52
53 analysis_dependency_class
54 dependency_class() const
55 {
56 return (DEPENDENCY_INSTRUCTION_IDENTITY |
57 DEPENDENCY_INSTRUCTION_DATA_FLOW |
58 DEPENDENCY_VARIABLES);
59 }
60
61 bool
62 validate(const fs_visitor *) const
63 {
64 /* FINISHME */
65 return true;
66 }
67
68 unsigned *regs_live_at_ip;
69 };
70 }
71
72 struct brw_gs_compile;
73
74 static inline fs_reg
75 offset(const fs_reg &reg, const brw::fs_builder &bld, unsigned delta)
76 {
77 return offset(reg, bld.dispatch_width(), delta);
78 }
79
80 #define UBO_START ((1 << 16) - 4)
81
82 struct shader_stats {
83 const char *scheduler_mode;
84 unsigned promoted_constants;
85 };
86
87 /**
88 * The fragment shader front-end.
89 *
90 * Translates either GLSL IR or Mesa IR (for ARB_fragment_program) into FS IR.
91 */
92 class fs_visitor : public backend_shader
93 {
94 public:
95 fs_visitor(const struct brw_compiler *compiler, void *log_data,
96 void *mem_ctx,
97 const brw_base_prog_key *key,
98 struct brw_stage_prog_data *prog_data,
99 const nir_shader *shader,
100 unsigned dispatch_width,
101 int shader_time_index,
102 const struct brw_vue_map *input_vue_map = NULL);
103 fs_visitor(const struct brw_compiler *compiler, void *log_data,
104 void *mem_ctx,
105 struct brw_gs_compile *gs_compile,
106 struct brw_gs_prog_data *prog_data,
107 const nir_shader *shader,
108 int shader_time_index);
109 void init();
110 ~fs_visitor();
111
112 fs_reg vgrf(const glsl_type *const type);
113 void import_uniforms(fs_visitor *v);
114
115 void VARYING_PULL_CONSTANT_LOAD(const brw::fs_builder &bld,
116 const fs_reg &dst,
117 const fs_reg &surf_index,
118 const fs_reg &varying_offset,
119 uint32_t const_offset);
120 void DEP_RESOLVE_MOV(const brw::fs_builder &bld, int grf);
121
122 bool run_fs(bool allow_spilling, bool do_rep_send);
123 bool run_vs();
124 bool run_tcs();
125 bool run_tes();
126 bool run_gs();
127 bool run_cs(unsigned min_dispatch_width);
128 void optimize();
129 void allocate_registers(unsigned min_dispatch_width, bool allow_spilling);
130 void setup_fs_payload_gen4();
131 void setup_fs_payload_gen6();
132 void setup_vs_payload();
133 void setup_gs_payload();
134 void setup_cs_payload();
135 bool fixup_sends_duplicate_payload();
136 void fixup_3src_null_dest();
137 bool fixup_nomask_control_flow();
138 void assign_curb_setup();
139 void assign_urb_setup();
140 void convert_attr_sources_to_hw_regs(fs_inst *inst);
141 void assign_vs_urb_setup();
142 void assign_tcs_urb_setup();
143 void assign_tes_urb_setup();
144 void assign_gs_urb_setup();
145 bool assign_regs(bool allow_spilling, bool spill_all);
146 void assign_regs_trivial();
147 void calculate_payload_ranges(int payload_node_count,
148 int *payload_last_use_ip) const;
149 void split_virtual_grfs();
150 bool compact_virtual_grfs();
151 void assign_constant_locations();
152 bool get_pull_locs(const fs_reg &src, unsigned *out_surf_index,
153 unsigned *out_pull_index);
154 void lower_constant_loads();
155 virtual void invalidate_analysis(brw::analysis_dependency_class c);
156 void validate();
157 bool opt_algebraic();
158 bool opt_redundant_discard_jumps();
159 bool opt_cse();
160 bool opt_cse_local(const brw::fs_live_variables &live, bblock_t *block, int &ip);
161
162 bool opt_copy_propagation();
163 bool try_copy_propagate(fs_inst *inst, int arg, acp_entry *entry);
164 bool try_constant_propagate(fs_inst *inst, acp_entry *entry);
165 bool opt_copy_propagation_local(void *mem_ctx, bblock_t *block,
166 exec_list *acp);
167 bool opt_drop_redundant_mov_to_flags();
168 bool opt_register_renaming();
169 bool opt_bank_conflicts();
170 unsigned bank_conflict_cycles(const fs_inst *inst) const;
171 bool register_coalesce();
172 bool compute_to_mrf();
173 bool eliminate_find_live_channel();
174 bool dead_code_eliminate();
175 bool remove_duplicate_mrf_writes();
176 bool remove_extra_rounding_modes();
177
178 bool opt_sampler_eot();
179 void schedule_instructions(instruction_scheduler_mode mode);
180 void insert_gen4_send_dependency_workarounds();
181 void insert_gen4_pre_send_dependency_workarounds(bblock_t *block,
182 fs_inst *inst);
183 void insert_gen4_post_send_dependency_workarounds(bblock_t *block,
184 fs_inst *inst);
185 void vfail(const char *msg, va_list args);
186 void fail(const char *msg, ...);
187 void limit_dispatch_width(unsigned n, const char *msg);
188 void lower_uniform_pull_constant_loads();
189 bool lower_load_payload();
190 bool lower_pack();
191 bool lower_regioning();
192 bool lower_logical_sends();
193 bool lower_integer_multiplication();
194 bool lower_minmax();
195 bool lower_simd_width();
196 bool lower_barycentrics();
197 bool lower_scoreboard();
198 bool lower_sub_sat();
199 bool opt_combine_constants();
200
201 void emit_dummy_fs();
202 void emit_repclear_shader();
203 void emit_fragcoord_interpolation(fs_reg wpos);
204 fs_reg *emit_frontfacing_interpolation();
205 fs_reg *emit_samplepos_setup();
206 fs_reg *emit_sampleid_setup();
207 fs_reg *emit_samplemaskin_setup();
208 void emit_interpolation_setup_gen4();
209 void emit_interpolation_setup_gen6();
210 void compute_sample_position(fs_reg dst, fs_reg int_sample_pos);
211 fs_reg emit_mcs_fetch(const fs_reg &coordinate, unsigned components,
212 const fs_reg &texture,
213 const fs_reg &texture_handle);
214 void emit_gen6_gather_wa(uint8_t wa, fs_reg dst);
215 fs_reg resolve_source_modifiers(const fs_reg &src);
216 void emit_discard_jump();
217 void emit_fsign(const class brw::fs_builder &, const nir_alu_instr *instr,
218 fs_reg result, fs_reg *op, unsigned fsign_src);
219 void emit_shader_float_controls_execution_mode();
220 bool opt_peephole_sel();
221 bool opt_peephole_predicated_break();
222 bool opt_saturate_propagation();
223 bool opt_cmod_propagation();
224 bool opt_zero_samples();
225
226 void set_tcs_invocation_id();
227
228 void emit_nir_code();
229 void nir_setup_outputs();
230 void nir_setup_uniforms();
231 void nir_emit_system_values();
232 void nir_emit_impl(nir_function_impl *impl);
233 void nir_emit_cf_list(exec_list *list);
234 void nir_emit_if(nir_if *if_stmt);
235 void nir_emit_loop(nir_loop *loop);
236 void nir_emit_block(nir_block *block);
237 void nir_emit_instr(nir_instr *instr);
238 void nir_emit_alu(const brw::fs_builder &bld, nir_alu_instr *instr,
239 bool need_dest);
240 bool try_emit_b2fi_of_inot(const brw::fs_builder &bld, fs_reg result,
241 nir_alu_instr *instr);
242 void nir_emit_load_const(const brw::fs_builder &bld,
243 nir_load_const_instr *instr);
244 void nir_emit_vs_intrinsic(const brw::fs_builder &bld,
245 nir_intrinsic_instr *instr);
246 void nir_emit_tcs_intrinsic(const brw::fs_builder &bld,
247 nir_intrinsic_instr *instr);
248 void nir_emit_gs_intrinsic(const brw::fs_builder &bld,
249 nir_intrinsic_instr *instr);
250 void nir_emit_fs_intrinsic(const brw::fs_builder &bld,
251 nir_intrinsic_instr *instr);
252 void nir_emit_cs_intrinsic(const brw::fs_builder &bld,
253 nir_intrinsic_instr *instr);
254 fs_reg get_nir_image_intrinsic_image(const brw::fs_builder &bld,
255 nir_intrinsic_instr *instr);
256 fs_reg get_nir_ssbo_intrinsic_index(const brw::fs_builder &bld,
257 nir_intrinsic_instr *instr);
258 fs_reg swizzle_nir_scratch_addr(const brw::fs_builder &bld,
259 const fs_reg &addr,
260 bool in_dwords);
261 void nir_emit_intrinsic(const brw::fs_builder &bld,
262 nir_intrinsic_instr *instr);
263 void nir_emit_tes_intrinsic(const brw::fs_builder &bld,
264 nir_intrinsic_instr *instr);
265 void nir_emit_ssbo_atomic(const brw::fs_builder &bld,
266 int op, nir_intrinsic_instr *instr);
267 void nir_emit_ssbo_atomic_float(const brw::fs_builder &bld,
268 int op, nir_intrinsic_instr *instr);
269 void nir_emit_shared_atomic(const brw::fs_builder &bld,
270 int op, nir_intrinsic_instr *instr);
271 void nir_emit_shared_atomic_float(const brw::fs_builder &bld,
272 int op, nir_intrinsic_instr *instr);
273 void nir_emit_global_atomic(const brw::fs_builder &bld,
274 int op, nir_intrinsic_instr *instr);
275 void nir_emit_global_atomic_float(const brw::fs_builder &bld,
276 int op, nir_intrinsic_instr *instr);
277 void nir_emit_texture(const brw::fs_builder &bld,
278 nir_tex_instr *instr);
279 void nir_emit_jump(const brw::fs_builder &bld,
280 nir_jump_instr *instr);
281 fs_reg get_nir_src(const nir_src &src);
282 fs_reg get_nir_src_imm(const nir_src &src);
283 fs_reg get_nir_dest(const nir_dest &dest);
284 fs_reg get_indirect_offset(nir_intrinsic_instr *instr);
285 fs_reg get_tcs_single_patch_icp_handle(const brw::fs_builder &bld,
286 nir_intrinsic_instr *instr);
287 fs_reg get_tcs_eight_patch_icp_handle(const brw::fs_builder &bld,
288 nir_intrinsic_instr *instr);
289 struct brw_reg get_tcs_output_urb_handle();
290
291 void emit_percomp(const brw::fs_builder &bld, const fs_inst &inst,
292 unsigned wr_mask);
293
294 bool optimize_extract_to_float(nir_alu_instr *instr,
295 const fs_reg &result);
296 bool optimize_frontfacing_ternary(nir_alu_instr *instr,
297 const fs_reg &result);
298
299 void emit_alpha_test();
300 fs_inst *emit_single_fb_write(const brw::fs_builder &bld,
301 fs_reg color1, fs_reg color2,
302 fs_reg src0_alpha, unsigned components);
303 void emit_alpha_to_coverage_workaround(const fs_reg &src0_alpha);
304 void emit_fb_writes();
305 fs_inst *emit_non_coherent_fb_read(const brw::fs_builder &bld,
306 const fs_reg &dst, unsigned target);
307 void emit_urb_writes(const fs_reg &gs_vertex_count = fs_reg());
308 void set_gs_stream_control_data_bits(const fs_reg &vertex_count,
309 unsigned stream_id);
310 void emit_gs_control_data_bits(const fs_reg &vertex_count);
311 void emit_gs_end_primitive(const nir_src &vertex_count_nir_src);
312 void emit_gs_vertex(const nir_src &vertex_count_nir_src,
313 unsigned stream_id);
314 void emit_gs_thread_end();
315 void emit_gs_input_load(const fs_reg &dst, const nir_src &vertex_src,
316 unsigned base_offset, const nir_src &offset_src,
317 unsigned num_components, unsigned first_component);
318 void emit_cs_terminate();
319 fs_reg *emit_cs_work_group_id_setup();
320
321 void emit_barrier();
322
323 void emit_shader_time_begin();
324 void emit_shader_time_end();
325 void SHADER_TIME_ADD(const brw::fs_builder &bld,
326 int shader_time_subindex,
327 fs_reg value);
328
329 fs_reg get_timestamp(const brw::fs_builder &bld);
330
331 fs_reg interp_reg(int location, int channel);
332
333 virtual void dump_instructions() const;
334 virtual void dump_instructions(const char *name) const;
335 void dump_instruction(const backend_instruction *inst) const;
336 void dump_instruction(const backend_instruction *inst, FILE *file) const;
337
338 const brw_base_prog_key *const key;
339 const struct brw_sampler_prog_key_data *key_tex;
340
341 struct brw_gs_compile *gs_compile;
342
343 struct brw_stage_prog_data *prog_data;
344
345 const struct brw_vue_map *input_vue_map;
346
347 int *param_size;
348
349 BRW_ANALYSIS(live_analysis, brw::fs_live_variables,
350 backend_shader *) live_analysis;
351 BRW_ANALYSIS(regpressure_analysis, brw::register_pressure,
352 fs_visitor *) regpressure_analysis;
353
354 /** Number of uniform variable components visited. */
355 unsigned uniforms;
356
357 /** Byte-offset for the next available spot in the scratch space buffer. */
358 unsigned last_scratch;
359
360 /**
361 * Array mapping UNIFORM register numbers to the pull parameter index,
362 * or -1 if this uniform register isn't being uploaded as a pull constant.
363 */
364 int *pull_constant_loc;
365
366 /**
367 * Array mapping UNIFORM register numbers to the push parameter index,
368 * or -1 if this uniform register isn't being uploaded as a push constant.
369 */
370 int *push_constant_loc;
371
372 fs_reg subgroup_id;
373 fs_reg group_size[3];
374 fs_reg scratch_base;
375 fs_reg frag_depth;
376 fs_reg frag_stencil;
377 fs_reg sample_mask;
378 fs_reg outputs[VARYING_SLOT_MAX];
379 fs_reg dual_src_output;
380 int first_non_payload_grf;
381 /** Either BRW_MAX_GRF or GEN7_MRF_HACK_START */
382 unsigned max_grf;
383
384 fs_reg *nir_locals;
385 fs_reg *nir_ssa_values;
386 fs_reg *nir_system_values;
387
388 bool failed;
389 char *fail_msg;
390
391 /** Register numbers for thread payload fields. */
392 struct thread_payload {
393 uint8_t subspan_coord_reg[2];
394 uint8_t source_depth_reg[2];
395 uint8_t source_w_reg[2];
396 uint8_t aa_dest_stencil_reg[2];
397 uint8_t dest_depth_reg[2];
398 uint8_t sample_pos_reg[2];
399 uint8_t sample_mask_in_reg[2];
400 uint8_t barycentric_coord_reg[BRW_BARYCENTRIC_MODE_COUNT][2];
401 uint8_t local_invocation_id_reg[2];
402
403 /** The number of thread payload registers the hardware will supply. */
404 uint8_t num_regs;
405 } payload;
406
407 bool source_depth_to_render_target;
408 bool runtime_check_aads_emit;
409
410 fs_reg pixel_x;
411 fs_reg pixel_y;
412 fs_reg wpos_w;
413 fs_reg pixel_w;
414 fs_reg delta_xy[BRW_BARYCENTRIC_MODE_COUNT];
415 fs_reg shader_start_time;
416 fs_reg final_gs_vertex_count;
417 fs_reg control_data_bits;
418 fs_reg invocation_id;
419
420 unsigned grf_used;
421 bool spilled_any_registers;
422
423 const unsigned dispatch_width; /**< 8, 16 or 32 */
424 unsigned max_dispatch_width;
425
426 int shader_time_index;
427
428 struct shader_stats shader_stats;
429
430 brw::fs_builder bld;
431
432 private:
433 fs_reg prepare_alu_destination_and_sources(const brw::fs_builder &bld,
434 nir_alu_instr *instr,
435 fs_reg *op,
436 bool need_dest);
437
438 void resolve_inot_sources(const brw::fs_builder &bld, nir_alu_instr *instr,
439 fs_reg *op);
440 void lower_mul_dword_inst(fs_inst *inst, bblock_t *block);
441 void lower_mul_qword_inst(fs_inst *inst, bblock_t *block);
442 void lower_mulh_inst(fs_inst *inst, bblock_t *block);
443
444 unsigned workgroup_size() const;
445 };
446
447 /**
448 * Return the flag register used in fragment shaders to keep track of live
449 * samples. On Gen7+ we use f1.0-f1.1 to allow discard jumps in SIMD32
450 * dispatch mode, while earlier generations are constrained to f0.1, which
451 * limits the dispatch width to SIMD16 for fragment shaders that use discard.
452 */
453 static inline unsigned
454 sample_mask_flag_subreg(const fs_visitor *shader)
455 {
456 assert(shader->stage == MESA_SHADER_FRAGMENT);
457 return shader->devinfo->gen >= 7 ? 2 : 1;
458 }
459
460 /**
461 * The fragment shader code generator.
462 *
463 * Translates FS IR to actual i965 assembly code.
464 */
465 class fs_generator
466 {
467 public:
468 fs_generator(const struct brw_compiler *compiler, void *log_data,
469 void *mem_ctx,
470 struct brw_stage_prog_data *prog_data,
471 bool runtime_check_aads_emit,
472 gl_shader_stage stage);
473 ~fs_generator();
474
475 void enable_debug(const char *shader_name);
476 int generate_code(const cfg_t *cfg, int dispatch_width,
477 struct shader_stats shader_stats,
478 struct brw_compile_stats *stats);
479 const unsigned *get_assembly();
480
481 private:
482 void fire_fb_write(fs_inst *inst,
483 struct brw_reg payload,
484 struct brw_reg implied_header,
485 GLuint nr);
486 void generate_send(fs_inst *inst,
487 struct brw_reg dst,
488 struct brw_reg desc,
489 struct brw_reg ex_desc,
490 struct brw_reg payload,
491 struct brw_reg payload2);
492 void generate_fb_write(fs_inst *inst, struct brw_reg payload);
493 void generate_fb_read(fs_inst *inst, struct brw_reg dst,
494 struct brw_reg payload);
495 void generate_urb_read(fs_inst *inst, struct brw_reg dst, struct brw_reg payload);
496 void generate_urb_write(fs_inst *inst, struct brw_reg payload);
497 void generate_cs_terminate(fs_inst *inst, struct brw_reg payload);
498 void generate_barrier(fs_inst *inst, struct brw_reg src);
499 bool generate_linterp(fs_inst *inst, struct brw_reg dst,
500 struct brw_reg *src);
501 void generate_tex(fs_inst *inst, struct brw_reg dst,
502 struct brw_reg surface_index,
503 struct brw_reg sampler_index);
504 void generate_get_buffer_size(fs_inst *inst, struct brw_reg dst,
505 struct brw_reg src,
506 struct brw_reg surf_index);
507 void generate_ddx(const fs_inst *inst,
508 struct brw_reg dst, struct brw_reg src);
509 void generate_ddy(const fs_inst *inst,
510 struct brw_reg dst, struct brw_reg src);
511 void generate_scratch_write(fs_inst *inst, struct brw_reg src);
512 void generate_scratch_read(fs_inst *inst, struct brw_reg dst);
513 void generate_scratch_read_gen7(fs_inst *inst, struct brw_reg dst);
514 void generate_uniform_pull_constant_load(fs_inst *inst, struct brw_reg dst,
515 struct brw_reg index,
516 struct brw_reg offset);
517 void generate_uniform_pull_constant_load_gen7(fs_inst *inst,
518 struct brw_reg dst,
519 struct brw_reg surf_index,
520 struct brw_reg payload);
521 void generate_varying_pull_constant_load_gen4(fs_inst *inst,
522 struct brw_reg dst,
523 struct brw_reg index);
524 void generate_mov_dispatch_to_flags(fs_inst *inst);
525
526 void generate_pixel_interpolator_query(fs_inst *inst,
527 struct brw_reg dst,
528 struct brw_reg src,
529 struct brw_reg msg_data,
530 unsigned msg_type);
531
532 void generate_set_sample_id(fs_inst *inst,
533 struct brw_reg dst,
534 struct brw_reg src0,
535 struct brw_reg src1);
536
537 void generate_discard_jump(fs_inst *inst);
538
539 void generate_pack_half_2x16_split(fs_inst *inst,
540 struct brw_reg dst,
541 struct brw_reg x,
542 struct brw_reg y);
543
544 void generate_shader_time_add(fs_inst *inst,
545 struct brw_reg payload,
546 struct brw_reg offset,
547 struct brw_reg value);
548
549 void generate_mov_indirect(fs_inst *inst,
550 struct brw_reg dst,
551 struct brw_reg reg,
552 struct brw_reg indirect_byte_offset);
553
554 void generate_shuffle(fs_inst *inst,
555 struct brw_reg dst,
556 struct brw_reg src,
557 struct brw_reg idx);
558
559 void generate_quad_swizzle(const fs_inst *inst,
560 struct brw_reg dst, struct brw_reg src,
561 unsigned swiz);
562
563 bool patch_discard_jumps_to_fb_writes();
564
565 const struct brw_compiler *compiler;
566 void *log_data; /* Passed to compiler->*_log functions */
567
568 const struct gen_device_info *devinfo;
569
570 struct brw_codegen *p;
571 struct brw_stage_prog_data * const prog_data;
572
573 unsigned dispatch_width; /**< 8, 16 or 32 */
574
575 exec_list discard_halt_patches;
576 bool runtime_check_aads_emit;
577 bool debug_flag;
578 const char *shader_name;
579 gl_shader_stage stage;
580 void *mem_ctx;
581 };
582
583 namespace brw {
584 inline fs_reg
585 fetch_payload_reg(const brw::fs_builder &bld, uint8_t regs[2],
586 brw_reg_type type = BRW_REGISTER_TYPE_F)
587 {
588 if (!regs[0])
589 return fs_reg();
590
591 if (bld.dispatch_width() > 16) {
592 const fs_reg tmp = bld.vgrf(type);
593 const brw::fs_builder hbld = bld.exec_all().group(16, 0);
594 const unsigned m = bld.dispatch_width() / hbld.dispatch_width();
595 fs_reg *const components = new fs_reg[m];
596
597 for (unsigned g = 0; g < m; g++)
598 components[g] = retype(brw_vec8_grf(regs[g], 0), type);
599
600 hbld.LOAD_PAYLOAD(tmp, components, m, 0);
601
602 delete[] components;
603 return tmp;
604
605 } else {
606 return fs_reg(retype(brw_vec8_grf(regs[0], 0), type));
607 }
608 }
609
610 inline fs_reg
611 fetch_barycentric_reg(const brw::fs_builder &bld, uint8_t regs[2])
612 {
613 if (!regs[0])
614 return fs_reg();
615
616 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, 2);
617 const brw::fs_builder hbld = bld.exec_all().group(8, 0);
618 const unsigned m = bld.dispatch_width() / hbld.dispatch_width();
619 fs_reg *const components = new fs_reg[2 * m];
620
621 for (unsigned c = 0; c < 2; c++) {
622 for (unsigned g = 0; g < m; g++)
623 components[c * m + g] = offset(brw_vec8_grf(regs[g / 2], 0),
624 hbld, c + 2 * (g % 2));
625 }
626
627 hbld.LOAD_PAYLOAD(tmp, components, 2 * m, 0);
628
629 delete[] components;
630 return tmp;
631 }
632
633 bool
634 lower_src_modifiers(fs_visitor *v, bblock_t *block, fs_inst *inst, unsigned i);
635 }
636
637 void shuffle_from_32bit_read(const brw::fs_builder &bld,
638 const fs_reg &dst,
639 const fs_reg &src,
640 uint32_t first_component,
641 uint32_t components);
642
643 fs_reg setup_imm_df(const brw::fs_builder &bld,
644 double v);
645
646 fs_reg setup_imm_b(const brw::fs_builder &bld,
647 int8_t v);
648
649 fs_reg setup_imm_ub(const brw::fs_builder &bld,
650 uint8_t v);
651
652 enum brw_barycentric_mode brw_barycentric_mode(enum glsl_interp_mode mode,
653 nir_intrinsic_op op);
654
655 uint32_t brw_fb_write_msg_control(const fs_inst *inst,
656 const struct brw_wm_prog_data *prog_data);
657
658 void brw_compute_urb_setup_index(struct brw_wm_prog_data *wm_prog_data);
659
660 #endif /* BRW_FS_H */