intel/compiler: Introduce backend_shader method to propagate IR changes to analysis...
[mesa.git] / src / intel / compiler / brw_fs.h
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #ifndef BRW_FS_H
29 #define BRW_FS_H
30
31 #include "brw_shader.h"
32 #include "brw_ir_fs.h"
33 #include "brw_fs_builder.h"
34 #include "brw_fs_live_variables.h"
35 #include "compiler/nir/nir.h"
36
37 struct bblock_t;
38 namespace {
39 struct acp_entry;
40 }
41
42 namespace brw {
43 }
44
45 struct brw_gs_compile;
46
47 static inline fs_reg
48 offset(const fs_reg &reg, const brw::fs_builder &bld, unsigned delta)
49 {
50 return offset(reg, bld.dispatch_width(), delta);
51 }
52
53 #define UBO_START ((1 << 16) - 4)
54
55 struct shader_stats {
56 const char *scheduler_mode;
57 unsigned promoted_constants;
58 };
59
60 /**
61 * The fragment shader front-end.
62 *
63 * Translates either GLSL IR or Mesa IR (for ARB_fragment_program) into FS IR.
64 */
65 class fs_visitor : public backend_shader
66 {
67 public:
68 fs_visitor(const struct brw_compiler *compiler, void *log_data,
69 void *mem_ctx,
70 const brw_base_prog_key *key,
71 struct brw_stage_prog_data *prog_data,
72 const nir_shader *shader,
73 unsigned dispatch_width,
74 int shader_time_index,
75 const struct brw_vue_map *input_vue_map = NULL);
76 fs_visitor(const struct brw_compiler *compiler, void *log_data,
77 void *mem_ctx,
78 struct brw_gs_compile *gs_compile,
79 struct brw_gs_prog_data *prog_data,
80 const nir_shader *shader,
81 int shader_time_index);
82 void init();
83 ~fs_visitor();
84
85 fs_reg vgrf(const glsl_type *const type);
86 void import_uniforms(fs_visitor *v);
87
88 void VARYING_PULL_CONSTANT_LOAD(const brw::fs_builder &bld,
89 const fs_reg &dst,
90 const fs_reg &surf_index,
91 const fs_reg &varying_offset,
92 uint32_t const_offset);
93 void DEP_RESOLVE_MOV(const brw::fs_builder &bld, int grf);
94
95 bool run_fs(bool allow_spilling, bool do_rep_send);
96 bool run_vs();
97 bool run_tcs();
98 bool run_tes();
99 bool run_gs();
100 bool run_cs(unsigned min_dispatch_width);
101 void optimize();
102 void allocate_registers(unsigned min_dispatch_width, bool allow_spilling);
103 void setup_fs_payload_gen4();
104 void setup_fs_payload_gen6();
105 void setup_vs_payload();
106 void setup_gs_payload();
107 void setup_cs_payload();
108 bool fixup_sends_duplicate_payload();
109 void fixup_3src_null_dest();
110 bool fixup_nomask_control_flow();
111 void assign_curb_setup();
112 void assign_urb_setup();
113 void convert_attr_sources_to_hw_regs(fs_inst *inst);
114 void assign_vs_urb_setup();
115 void assign_tcs_urb_setup();
116 void assign_tes_urb_setup();
117 void assign_gs_urb_setup();
118 bool assign_regs(bool allow_spilling, bool spill_all);
119 void assign_regs_trivial();
120 void calculate_payload_ranges(int payload_node_count,
121 int *payload_last_use_ip);
122 void split_virtual_grfs();
123 bool compact_virtual_grfs();
124 void assign_constant_locations();
125 bool get_pull_locs(const fs_reg &src, unsigned *out_surf_index,
126 unsigned *out_pull_index);
127 void lower_constant_loads();
128 void invalidate_live_intervals();
129 virtual void invalidate_analysis(brw::analysis_dependency_class c);
130 void calculate_live_intervals();
131 void calculate_register_pressure();
132 void validate();
133 bool opt_algebraic();
134 bool opt_redundant_discard_jumps();
135 bool opt_cse();
136 bool opt_cse_local(bblock_t *block, int &ip);
137 bool opt_copy_propagation();
138 bool try_copy_propagate(fs_inst *inst, int arg, acp_entry *entry);
139 bool try_constant_propagate(fs_inst *inst, acp_entry *entry);
140 bool opt_copy_propagation_local(void *mem_ctx, bblock_t *block,
141 exec_list *acp);
142 bool opt_drop_redundant_mov_to_flags();
143 bool opt_register_renaming();
144 bool opt_bank_conflicts();
145 unsigned bank_conflict_cycles(const fs_inst *inst) const;
146 bool register_coalesce();
147 bool compute_to_mrf();
148 bool eliminate_find_live_channel();
149 bool dead_code_eliminate();
150 bool remove_duplicate_mrf_writes();
151 bool remove_extra_rounding_modes();
152
153 bool opt_sampler_eot();
154 bool virtual_grf_interferes(int a, int b);
155 void schedule_instructions(instruction_scheduler_mode mode);
156 void insert_gen4_send_dependency_workarounds();
157 void insert_gen4_pre_send_dependency_workarounds(bblock_t *block,
158 fs_inst *inst);
159 void insert_gen4_post_send_dependency_workarounds(bblock_t *block,
160 fs_inst *inst);
161 void vfail(const char *msg, va_list args);
162 void fail(const char *msg, ...);
163 void limit_dispatch_width(unsigned n, const char *msg);
164 void lower_uniform_pull_constant_loads();
165 bool lower_load_payload();
166 bool lower_pack();
167 bool lower_regioning();
168 bool lower_logical_sends();
169 bool lower_integer_multiplication();
170 bool lower_minmax();
171 bool lower_simd_width();
172 bool lower_barycentrics();
173 bool lower_scoreboard();
174 bool lower_sub_sat();
175 bool opt_combine_constants();
176
177 void emit_dummy_fs();
178 void emit_repclear_shader();
179 void emit_fragcoord_interpolation(fs_reg wpos);
180 fs_reg *emit_frontfacing_interpolation();
181 fs_reg *emit_samplepos_setup();
182 fs_reg *emit_sampleid_setup();
183 fs_reg *emit_samplemaskin_setup();
184 void emit_interpolation_setup_gen4();
185 void emit_interpolation_setup_gen6();
186 void compute_sample_position(fs_reg dst, fs_reg int_sample_pos);
187 fs_reg emit_mcs_fetch(const fs_reg &coordinate, unsigned components,
188 const fs_reg &texture,
189 const fs_reg &texture_handle);
190 void emit_gen6_gather_wa(uint8_t wa, fs_reg dst);
191 fs_reg resolve_source_modifiers(const fs_reg &src);
192 void emit_discard_jump();
193 void emit_fsign(const class brw::fs_builder &, const nir_alu_instr *instr,
194 fs_reg result, fs_reg *op, unsigned fsign_src);
195 void emit_shader_float_controls_execution_mode();
196 bool opt_peephole_sel();
197 bool opt_peephole_predicated_break();
198 bool opt_saturate_propagation();
199 bool opt_cmod_propagation();
200 bool opt_zero_samples();
201
202 void set_tcs_invocation_id();
203
204 void emit_nir_code();
205 void nir_setup_outputs();
206 void nir_setup_uniforms();
207 void nir_emit_system_values();
208 void nir_emit_impl(nir_function_impl *impl);
209 void nir_emit_cf_list(exec_list *list);
210 void nir_emit_if(nir_if *if_stmt);
211 void nir_emit_loop(nir_loop *loop);
212 void nir_emit_block(nir_block *block);
213 void nir_emit_instr(nir_instr *instr);
214 void nir_emit_alu(const brw::fs_builder &bld, nir_alu_instr *instr,
215 bool need_dest);
216 bool try_emit_b2fi_of_inot(const brw::fs_builder &bld, fs_reg result,
217 nir_alu_instr *instr);
218 void nir_emit_load_const(const brw::fs_builder &bld,
219 nir_load_const_instr *instr);
220 void nir_emit_vs_intrinsic(const brw::fs_builder &bld,
221 nir_intrinsic_instr *instr);
222 void nir_emit_tcs_intrinsic(const brw::fs_builder &bld,
223 nir_intrinsic_instr *instr);
224 void nir_emit_gs_intrinsic(const brw::fs_builder &bld,
225 nir_intrinsic_instr *instr);
226 void nir_emit_fs_intrinsic(const brw::fs_builder &bld,
227 nir_intrinsic_instr *instr);
228 void nir_emit_cs_intrinsic(const brw::fs_builder &bld,
229 nir_intrinsic_instr *instr);
230 fs_reg get_nir_image_intrinsic_image(const brw::fs_builder &bld,
231 nir_intrinsic_instr *instr);
232 fs_reg get_nir_ssbo_intrinsic_index(const brw::fs_builder &bld,
233 nir_intrinsic_instr *instr);
234 fs_reg swizzle_nir_scratch_addr(const brw::fs_builder &bld,
235 const fs_reg &addr,
236 bool in_dwords);
237 void nir_emit_intrinsic(const brw::fs_builder &bld,
238 nir_intrinsic_instr *instr);
239 void nir_emit_tes_intrinsic(const brw::fs_builder &bld,
240 nir_intrinsic_instr *instr);
241 void nir_emit_ssbo_atomic(const brw::fs_builder &bld,
242 int op, nir_intrinsic_instr *instr);
243 void nir_emit_ssbo_atomic_float(const brw::fs_builder &bld,
244 int op, nir_intrinsic_instr *instr);
245 void nir_emit_shared_atomic(const brw::fs_builder &bld,
246 int op, nir_intrinsic_instr *instr);
247 void nir_emit_shared_atomic_float(const brw::fs_builder &bld,
248 int op, nir_intrinsic_instr *instr);
249 void nir_emit_global_atomic(const brw::fs_builder &bld,
250 int op, nir_intrinsic_instr *instr);
251 void nir_emit_global_atomic_float(const brw::fs_builder &bld,
252 int op, nir_intrinsic_instr *instr);
253 void nir_emit_texture(const brw::fs_builder &bld,
254 nir_tex_instr *instr);
255 void nir_emit_jump(const brw::fs_builder &bld,
256 nir_jump_instr *instr);
257 fs_reg get_nir_src(const nir_src &src);
258 fs_reg get_nir_src_imm(const nir_src &src);
259 fs_reg get_nir_dest(const nir_dest &dest);
260 fs_reg get_indirect_offset(nir_intrinsic_instr *instr);
261 fs_reg get_tcs_single_patch_icp_handle(const brw::fs_builder &bld,
262 nir_intrinsic_instr *instr);
263 fs_reg get_tcs_eight_patch_icp_handle(const brw::fs_builder &bld,
264 nir_intrinsic_instr *instr);
265 struct brw_reg get_tcs_output_urb_handle();
266
267 void emit_percomp(const brw::fs_builder &bld, const fs_inst &inst,
268 unsigned wr_mask);
269
270 bool optimize_extract_to_float(nir_alu_instr *instr,
271 const fs_reg &result);
272 bool optimize_frontfacing_ternary(nir_alu_instr *instr,
273 const fs_reg &result);
274
275 void emit_alpha_test();
276 fs_inst *emit_single_fb_write(const brw::fs_builder &bld,
277 fs_reg color1, fs_reg color2,
278 fs_reg src0_alpha, unsigned components);
279 void emit_alpha_to_coverage_workaround(const fs_reg &src0_alpha);
280 void emit_fb_writes();
281 fs_inst *emit_non_coherent_fb_read(const brw::fs_builder &bld,
282 const fs_reg &dst, unsigned target);
283 void emit_urb_writes(const fs_reg &gs_vertex_count = fs_reg());
284 void set_gs_stream_control_data_bits(const fs_reg &vertex_count,
285 unsigned stream_id);
286 void emit_gs_control_data_bits(const fs_reg &vertex_count);
287 void emit_gs_end_primitive(const nir_src &vertex_count_nir_src);
288 void emit_gs_vertex(const nir_src &vertex_count_nir_src,
289 unsigned stream_id);
290 void emit_gs_thread_end();
291 void emit_gs_input_load(const fs_reg &dst, const nir_src &vertex_src,
292 unsigned base_offset, const nir_src &offset_src,
293 unsigned num_components, unsigned first_component);
294 void emit_cs_terminate();
295 fs_reg *emit_cs_work_group_id_setup();
296
297 void emit_barrier();
298
299 void emit_shader_time_begin();
300 void emit_shader_time_end();
301 void SHADER_TIME_ADD(const brw::fs_builder &bld,
302 int shader_time_subindex,
303 fs_reg value);
304
305 fs_reg get_timestamp(const brw::fs_builder &bld);
306
307 fs_reg interp_reg(int location, int channel);
308
309 virtual void dump_instructions();
310 virtual void dump_instructions(const char *name);
311 void dump_instruction(backend_instruction *inst);
312 void dump_instruction(backend_instruction *inst, FILE *file);
313
314 const brw_base_prog_key *const key;
315 const struct brw_sampler_prog_key_data *key_tex;
316
317 struct brw_gs_compile *gs_compile;
318
319 struct brw_stage_prog_data *prog_data;
320
321 const struct brw_vue_map *input_vue_map;
322
323 int *virtual_grf_start;
324 int *virtual_grf_end;
325 brw::fs_live_variables *live_intervals;
326
327 int *regs_live_at_ip;
328
329 /** Number of uniform variable components visited. */
330 unsigned uniforms;
331
332 /** Byte-offset for the next available spot in the scratch space buffer. */
333 unsigned last_scratch;
334
335 /**
336 * Array mapping UNIFORM register numbers to the pull parameter index,
337 * or -1 if this uniform register isn't being uploaded as a pull constant.
338 */
339 int *pull_constant_loc;
340
341 /**
342 * Array mapping UNIFORM register numbers to the push parameter index,
343 * or -1 if this uniform register isn't being uploaded as a push constant.
344 */
345 int *push_constant_loc;
346
347 fs_reg subgroup_id;
348 fs_reg scratch_base;
349 fs_reg frag_depth;
350 fs_reg frag_stencil;
351 fs_reg sample_mask;
352 fs_reg outputs[VARYING_SLOT_MAX];
353 fs_reg dual_src_output;
354 int first_non_payload_grf;
355 /** Either BRW_MAX_GRF or GEN7_MRF_HACK_START */
356 unsigned max_grf;
357
358 fs_reg *nir_locals;
359 fs_reg *nir_ssa_values;
360 fs_reg *nir_system_values;
361
362 bool failed;
363 char *fail_msg;
364
365 /** Register numbers for thread payload fields. */
366 struct thread_payload {
367 uint8_t subspan_coord_reg[2];
368 uint8_t source_depth_reg[2];
369 uint8_t source_w_reg[2];
370 uint8_t aa_dest_stencil_reg[2];
371 uint8_t dest_depth_reg[2];
372 uint8_t sample_pos_reg[2];
373 uint8_t sample_mask_in_reg[2];
374 uint8_t barycentric_coord_reg[BRW_BARYCENTRIC_MODE_COUNT][2];
375 uint8_t local_invocation_id_reg[2];
376
377 /** The number of thread payload registers the hardware will supply. */
378 uint8_t num_regs;
379 } payload;
380
381 bool source_depth_to_render_target;
382 bool runtime_check_aads_emit;
383
384 fs_reg pixel_x;
385 fs_reg pixel_y;
386 fs_reg wpos_w;
387 fs_reg pixel_w;
388 fs_reg delta_xy[BRW_BARYCENTRIC_MODE_COUNT];
389 fs_reg shader_start_time;
390 fs_reg final_gs_vertex_count;
391 fs_reg control_data_bits;
392 fs_reg invocation_id;
393
394 unsigned grf_used;
395 bool spilled_any_registers;
396
397 const unsigned dispatch_width; /**< 8, 16 or 32 */
398 unsigned max_dispatch_width;
399
400 int shader_time_index;
401
402 struct shader_stats shader_stats;
403
404 brw::fs_builder bld;
405
406 private:
407 fs_reg prepare_alu_destination_and_sources(const brw::fs_builder &bld,
408 nir_alu_instr *instr,
409 fs_reg *op,
410 bool need_dest);
411
412 void resolve_inot_sources(const brw::fs_builder &bld, nir_alu_instr *instr,
413 fs_reg *op);
414 void lower_mul_dword_inst(fs_inst *inst, bblock_t *block);
415 void lower_mul_qword_inst(fs_inst *inst, bblock_t *block);
416 void lower_mulh_inst(fs_inst *inst, bblock_t *block);
417
418 unsigned workgroup_size() const;
419 };
420
421 /**
422 * Return the flag register used in fragment shaders to keep track of live
423 * samples. On Gen7+ we use f1.0-f1.1 to allow discard jumps in SIMD32
424 * dispatch mode, while earlier generations are constrained to f0.1, which
425 * limits the dispatch width to SIMD16 for fragment shaders that use discard.
426 */
427 static inline unsigned
428 sample_mask_flag_subreg(const fs_visitor *shader)
429 {
430 assert(shader->stage == MESA_SHADER_FRAGMENT);
431 return shader->devinfo->gen >= 7 ? 2 : 1;
432 }
433
434 /**
435 * The fragment shader code generator.
436 *
437 * Translates FS IR to actual i965 assembly code.
438 */
439 class fs_generator
440 {
441 public:
442 fs_generator(const struct brw_compiler *compiler, void *log_data,
443 void *mem_ctx,
444 struct brw_stage_prog_data *prog_data,
445 struct shader_stats shader_stats,
446 bool runtime_check_aads_emit,
447 gl_shader_stage stage);
448 ~fs_generator();
449
450 void enable_debug(const char *shader_name);
451 int generate_code(const cfg_t *cfg, int dispatch_width,
452 struct brw_compile_stats *stats);
453 const unsigned *get_assembly();
454
455 private:
456 void fire_fb_write(fs_inst *inst,
457 struct brw_reg payload,
458 struct brw_reg implied_header,
459 GLuint nr);
460 void generate_send(fs_inst *inst,
461 struct brw_reg dst,
462 struct brw_reg desc,
463 struct brw_reg ex_desc,
464 struct brw_reg payload,
465 struct brw_reg payload2);
466 void generate_fb_write(fs_inst *inst, struct brw_reg payload);
467 void generate_fb_read(fs_inst *inst, struct brw_reg dst,
468 struct brw_reg payload);
469 void generate_urb_read(fs_inst *inst, struct brw_reg dst, struct brw_reg payload);
470 void generate_urb_write(fs_inst *inst, struct brw_reg payload);
471 void generate_cs_terminate(fs_inst *inst, struct brw_reg payload);
472 void generate_barrier(fs_inst *inst, struct brw_reg src);
473 bool generate_linterp(fs_inst *inst, struct brw_reg dst,
474 struct brw_reg *src);
475 void generate_tex(fs_inst *inst, struct brw_reg dst,
476 struct brw_reg surface_index,
477 struct brw_reg sampler_index);
478 void generate_get_buffer_size(fs_inst *inst, struct brw_reg dst,
479 struct brw_reg src,
480 struct brw_reg surf_index);
481 void generate_ddx(const fs_inst *inst,
482 struct brw_reg dst, struct brw_reg src);
483 void generate_ddy(const fs_inst *inst,
484 struct brw_reg dst, struct brw_reg src);
485 void generate_scratch_write(fs_inst *inst, struct brw_reg src);
486 void generate_scratch_read(fs_inst *inst, struct brw_reg dst);
487 void generate_scratch_read_gen7(fs_inst *inst, struct brw_reg dst);
488 void generate_uniform_pull_constant_load(fs_inst *inst, struct brw_reg dst,
489 struct brw_reg index,
490 struct brw_reg offset);
491 void generate_uniform_pull_constant_load_gen7(fs_inst *inst,
492 struct brw_reg dst,
493 struct brw_reg surf_index,
494 struct brw_reg payload);
495 void generate_varying_pull_constant_load_gen4(fs_inst *inst,
496 struct brw_reg dst,
497 struct brw_reg index);
498 void generate_mov_dispatch_to_flags(fs_inst *inst);
499
500 void generate_pixel_interpolator_query(fs_inst *inst,
501 struct brw_reg dst,
502 struct brw_reg src,
503 struct brw_reg msg_data,
504 unsigned msg_type);
505
506 void generate_set_sample_id(fs_inst *inst,
507 struct brw_reg dst,
508 struct brw_reg src0,
509 struct brw_reg src1);
510
511 void generate_discard_jump(fs_inst *inst);
512
513 void generate_pack_half_2x16_split(fs_inst *inst,
514 struct brw_reg dst,
515 struct brw_reg x,
516 struct brw_reg y);
517
518 void generate_shader_time_add(fs_inst *inst,
519 struct brw_reg payload,
520 struct brw_reg offset,
521 struct brw_reg value);
522
523 void generate_mov_indirect(fs_inst *inst,
524 struct brw_reg dst,
525 struct brw_reg reg,
526 struct brw_reg indirect_byte_offset);
527
528 void generate_shuffle(fs_inst *inst,
529 struct brw_reg dst,
530 struct brw_reg src,
531 struct brw_reg idx);
532
533 void generate_quad_swizzle(const fs_inst *inst,
534 struct brw_reg dst, struct brw_reg src,
535 unsigned swiz);
536
537 bool patch_discard_jumps_to_fb_writes();
538
539 const struct brw_compiler *compiler;
540 void *log_data; /* Passed to compiler->*_log functions */
541
542 const struct gen_device_info *devinfo;
543
544 struct brw_codegen *p;
545 struct brw_stage_prog_data * const prog_data;
546
547 unsigned dispatch_width; /**< 8, 16 or 32 */
548
549 exec_list discard_halt_patches;
550 struct shader_stats shader_stats;
551 bool runtime_check_aads_emit;
552 bool debug_flag;
553 const char *shader_name;
554 gl_shader_stage stage;
555 void *mem_ctx;
556 };
557
558 namespace brw {
559 inline fs_reg
560 fetch_payload_reg(const brw::fs_builder &bld, uint8_t regs[2],
561 brw_reg_type type = BRW_REGISTER_TYPE_F)
562 {
563 if (!regs[0])
564 return fs_reg();
565
566 if (bld.dispatch_width() > 16) {
567 const fs_reg tmp = bld.vgrf(type);
568 const brw::fs_builder hbld = bld.exec_all().group(16, 0);
569 const unsigned m = bld.dispatch_width() / hbld.dispatch_width();
570 fs_reg *const components = new fs_reg[m];
571
572 for (unsigned g = 0; g < m; g++)
573 components[g] = retype(brw_vec8_grf(regs[g], 0), type);
574
575 hbld.LOAD_PAYLOAD(tmp, components, m, 0);
576
577 delete[] components;
578 return tmp;
579
580 } else {
581 return fs_reg(retype(brw_vec8_grf(regs[0], 0), type));
582 }
583 }
584
585 inline fs_reg
586 fetch_barycentric_reg(const brw::fs_builder &bld, uint8_t regs[2])
587 {
588 if (!regs[0])
589 return fs_reg();
590
591 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, 2);
592 const brw::fs_builder hbld = bld.exec_all().group(8, 0);
593 const unsigned m = bld.dispatch_width() / hbld.dispatch_width();
594 fs_reg *const components = new fs_reg[2 * m];
595
596 for (unsigned c = 0; c < 2; c++) {
597 for (unsigned g = 0; g < m; g++)
598 components[c * m + g] = offset(brw_vec8_grf(regs[g / 2], 0),
599 hbld, c + 2 * (g % 2));
600 }
601
602 hbld.LOAD_PAYLOAD(tmp, components, 2 * m, 0);
603
604 delete[] components;
605 return tmp;
606 }
607
608 bool
609 lower_src_modifiers(fs_visitor *v, bblock_t *block, fs_inst *inst, unsigned i);
610 }
611
612 void shuffle_from_32bit_read(const brw::fs_builder &bld,
613 const fs_reg &dst,
614 const fs_reg &src,
615 uint32_t first_component,
616 uint32_t components);
617
618 fs_reg setup_imm_df(const brw::fs_builder &bld,
619 double v);
620
621 fs_reg setup_imm_b(const brw::fs_builder &bld,
622 int8_t v);
623
624 fs_reg setup_imm_ub(const brw::fs_builder &bld,
625 uint8_t v);
626
627 enum brw_barycentric_mode brw_barycentric_mode(enum glsl_interp_mode mode,
628 nir_intrinsic_op op);
629
630 uint32_t brw_fb_write_msg_control(const fs_inst *inst,
631 const struct brw_wm_prog_data *prog_data);
632
633
634 #endif /* BRW_FS_H */