i965: Move the back-end compiler to src/intel/compiler
[mesa.git] / src / intel / compiler / brw_compiler.h
1 /*
2 * Copyright © 2010 - 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #pragma once
25
26 #include <stdio.h>
27 #include "common/gen_device_info.h"
28 #include "main/mtypes.h"
29 #include "main/macros.h"
30
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34
35 struct ra_regs;
36 struct nir_shader;
37 struct brw_program;
38 union gl_constant_value;
39
40 struct brw_compiler {
41 const struct gen_device_info *devinfo;
42
43 struct {
44 struct ra_regs *regs;
45
46 /**
47 * Array of the ra classes for the unaligned contiguous register
48 * block sizes used.
49 */
50 int *classes;
51
52 /**
53 * Mapping for register-allocated objects in *regs to the first
54 * GRF for that object.
55 */
56 uint8_t *ra_reg_to_grf;
57 } vec4_reg_set;
58
59 struct {
60 struct ra_regs *regs;
61
62 /**
63 * Array of the ra classes for the unaligned contiguous register
64 * block sizes used, indexed by register size.
65 */
66 int classes[16];
67
68 /**
69 * Mapping from classes to ra_reg ranges. Each of the per-size
70 * classes corresponds to a range of ra_reg nodes. This array stores
71 * those ranges in the form of first ra_reg in each class and the
72 * total number of ra_reg elements in the last array element. This
73 * way the range of the i'th class is given by:
74 * [ class_to_ra_reg_range[i], class_to_ra_reg_range[i+1] )
75 */
76 int class_to_ra_reg_range[17];
77
78 /**
79 * Mapping for register-allocated objects in *regs to the first
80 * GRF for that object.
81 */
82 uint8_t *ra_reg_to_grf;
83
84 /**
85 * ra class for the aligned pairs we use for PLN, which doesn't
86 * appear in *classes.
87 */
88 int aligned_pairs_class;
89 } fs_reg_sets[3];
90
91 void (*shader_debug_log)(void *, const char *str, ...) PRINTFLIKE(2, 3);
92 void (*shader_perf_log)(void *, const char *str, ...) PRINTFLIKE(2, 3);
93
94 bool scalar_stage[MESA_SHADER_STAGES];
95 struct gl_shader_compiler_options glsl_compiler_options[MESA_SHADER_STAGES];
96
97 /**
98 * Apply workarounds for SIN and COS output range problems.
99 * This can negatively impact performance.
100 */
101 bool precise_trig;
102 };
103
104
105 /**
106 * Program key structures.
107 *
108 * When drawing, we look for the currently bound shaders in the program
109 * cache. This is essentially a hash table lookup, and these are the keys.
110 *
111 * Sometimes OpenGL features specified as state need to be simulated via
112 * shader code, due to a mismatch between the API and the hardware. This
113 * is often referred to as "non-orthagonal state" or "NOS". We store NOS
114 * in the program key so it's considered when searching for a program. If
115 * we haven't seen a particular combination before, we have to recompile a
116 * new specialized version.
117 *
118 * Shader compilation should not look up state in gl_context directly, but
119 * instead use the copy in the program key. This guarantees recompiles will
120 * happen correctly.
121 *
122 * @{
123 */
124
125 enum PACKED gen6_gather_sampler_wa {
126 WA_SIGN = 1, /* whether we need to sign extend */
127 WA_8BIT = 2, /* if we have an 8bit format needing wa */
128 WA_16BIT = 4, /* if we have a 16bit format needing wa */
129 };
130
131 /**
132 * Sampler information needed by VS, WM, and GS program cache keys.
133 */
134 struct brw_sampler_prog_key_data {
135 /**
136 * EXT_texture_swizzle and DEPTH_TEXTURE_MODE swizzles.
137 */
138 uint16_t swizzles[MAX_SAMPLERS];
139
140 uint32_t gl_clamp_mask[3];
141
142 /**
143 * For RG32F, gather4's channel select is broken.
144 */
145 uint32_t gather_channel_quirk_mask;
146
147 /**
148 * Whether this sampler uses the compressed multisample surface layout.
149 */
150 uint32_t compressed_multisample_layout_mask;
151
152 /**
153 * Whether this sampler is using 16x multisampling. If so fetching from
154 * this sampler will be handled with a different instruction, ld2dms_w
155 * instead of ld2dms.
156 */
157 uint32_t msaa_16;
158
159 /**
160 * For Sandybridge, which shader w/a we need for gather quirks.
161 */
162 enum gen6_gather_sampler_wa gen6_gather_wa[MAX_SAMPLERS];
163
164 /**
165 * Texture units that have a YUV image bound.
166 */
167 uint32_t y_u_v_image_mask;
168 uint32_t y_uv_image_mask;
169 uint32_t yx_xuxv_image_mask;
170 };
171
172 /**
173 * The VF can't natively handle certain types of attributes, such as GL_FIXED
174 * or most 10_10_10_2 types. These flags enable various VS workarounds to
175 * "fix" attributes at the beginning of shaders.
176 */
177 #define BRW_ATTRIB_WA_COMPONENT_MASK 7 /* mask for GL_FIXED scale channel count */
178 #define BRW_ATTRIB_WA_NORMALIZE 8 /* normalize in shader */
179 #define BRW_ATTRIB_WA_BGRA 16 /* swap r/b channels in shader */
180 #define BRW_ATTRIB_WA_SIGN 32 /* interpret as signed in shader */
181 #define BRW_ATTRIB_WA_SCALE 64 /* interpret as scaled in shader */
182
183 /** The program key for Vertex Shaders. */
184 struct brw_vs_prog_key {
185 unsigned program_string_id;
186
187 /**
188 * Per-attribute workaround flags
189 *
190 * For each attribute, a combination of BRW_ATTRIB_WA_*.
191 */
192 uint8_t gl_attrib_wa_flags[VERT_ATTRIB_MAX];
193
194 bool copy_edgeflag:1;
195
196 bool clamp_vertex_color:1;
197
198 /**
199 * How many user clipping planes are being uploaded to the vertex shader as
200 * push constants.
201 *
202 * These are used for lowering legacy gl_ClipVertex/gl_Position clipping to
203 * clip distances.
204 */
205 unsigned nr_userclip_plane_consts:4;
206
207 /**
208 * For pre-Gen6 hardware, a bitfield indicating which texture coordinates
209 * are going to be replaced with point coordinates (as a consequence of a
210 * call to glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE)). Because
211 * our SF thread requires exact matching between VS outputs and FS inputs,
212 * these texture coordinates will need to be unconditionally included in
213 * the VUE, even if they aren't written by the vertex shader.
214 */
215 uint8_t point_coord_replace;
216
217 struct brw_sampler_prog_key_data tex;
218 };
219
220 /** The program key for Tessellation Control Shaders. */
221 struct brw_tcs_prog_key
222 {
223 unsigned program_string_id;
224
225 GLenum tes_primitive_mode;
226
227 unsigned input_vertices;
228
229 /** A bitfield of per-patch outputs written. */
230 uint32_t patch_outputs_written;
231
232 /** A bitfield of per-vertex outputs written. */
233 uint64_t outputs_written;
234
235 bool quads_workaround;
236
237 struct brw_sampler_prog_key_data tex;
238 };
239
240 /** The program key for Tessellation Evaluation Shaders. */
241 struct brw_tes_prog_key
242 {
243 unsigned program_string_id;
244
245 /** A bitfield of per-patch inputs read. */
246 uint32_t patch_inputs_read;
247
248 /** A bitfield of per-vertex inputs read. */
249 uint64_t inputs_read;
250
251 struct brw_sampler_prog_key_data tex;
252 };
253
254 /** The program key for Geometry Shaders. */
255 struct brw_gs_prog_key
256 {
257 unsigned program_string_id;
258
259 struct brw_sampler_prog_key_data tex;
260 };
261
262 /* A big lookup table is used to figure out which and how many
263 * additional regs will inserted before the main payload in the WM
264 * program execution. These mainly relate to depth and stencil
265 * processing and the early-depth-test optimization.
266 */
267 enum brw_wm_iz_bits {
268 BRW_WM_IZ_PS_KILL_ALPHATEST_BIT = 0x1,
269 BRW_WM_IZ_PS_COMPUTES_DEPTH_BIT = 0x2,
270 BRW_WM_IZ_DEPTH_WRITE_ENABLE_BIT = 0x4,
271 BRW_WM_IZ_DEPTH_TEST_ENABLE_BIT = 0x8,
272 BRW_WM_IZ_STENCIL_WRITE_ENABLE_BIT = 0x10,
273 BRW_WM_IZ_STENCIL_TEST_ENABLE_BIT = 0x20,
274 BRW_WM_IZ_BIT_MAX = 0x40
275 };
276
277 enum brw_wm_aa_enable {
278 BRW_WM_AA_NEVER,
279 BRW_WM_AA_SOMETIMES,
280 BRW_WM_AA_ALWAYS
281 };
282
283 /** The program key for Fragment/Pixel Shaders. */
284 struct brw_wm_prog_key {
285 /* Some collection of BRW_WM_IZ_* */
286 uint8_t iz_lookup;
287 bool stats_wm:1;
288 bool flat_shade:1;
289 unsigned nr_color_regions:5;
290 bool replicate_alpha:1;
291 bool clamp_fragment_color:1;
292 bool persample_interp:1;
293 bool multisample_fbo:1;
294 enum brw_wm_aa_enable line_aa:2;
295 bool high_quality_derivatives:1;
296 bool force_dual_color_blend:1;
297 bool coherent_fb_fetch:1;
298
299 uint16_t drawable_height;
300 uint64_t input_slots_valid;
301 unsigned program_string_id;
302 GLenum alpha_test_func; /* < For Gen4/5 MRT alpha test */
303 float alpha_test_ref;
304
305 struct brw_sampler_prog_key_data tex;
306 };
307
308 struct brw_cs_prog_key {
309 uint32_t program_string_id;
310 struct brw_sampler_prog_key_data tex;
311 };
312
313 /*
314 * Image metadata structure as laid out in the shader parameter
315 * buffer. Entries have to be 16B-aligned for the vec4 back-end to be
316 * able to use them. That's okay because the padding and any unused
317 * entries [most of them except when we're doing untyped surface
318 * access] will be removed by the uniform packing pass.
319 */
320 #define BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET 0
321 #define BRW_IMAGE_PARAM_OFFSET_OFFSET 4
322 #define BRW_IMAGE_PARAM_SIZE_OFFSET 8
323 #define BRW_IMAGE_PARAM_STRIDE_OFFSET 12
324 #define BRW_IMAGE_PARAM_TILING_OFFSET 16
325 #define BRW_IMAGE_PARAM_SWIZZLING_OFFSET 20
326 #define BRW_IMAGE_PARAM_SIZE 24
327
328 struct brw_image_param {
329 /** Surface binding table index. */
330 uint32_t surface_idx;
331
332 /** Offset applied to the X and Y surface coordinates. */
333 uint32_t offset[2];
334
335 /** Surface X, Y and Z dimensions. */
336 uint32_t size[3];
337
338 /** X-stride in bytes, Y-stride in pixels, horizontal slice stride in
339 * pixels, vertical slice stride in pixels.
340 */
341 uint32_t stride[4];
342
343 /** Log2 of the tiling modulus in the X, Y and Z dimension. */
344 uint32_t tiling[3];
345
346 /**
347 * Right shift to apply for bit 6 address swizzling. Two different
348 * swizzles can be specified and will be applied one after the other. The
349 * resulting address will be:
350 *
351 * addr' = addr ^ ((1 << 6) & ((addr >> swizzling[0]) ^
352 * (addr >> swizzling[1])))
353 *
354 * Use \c 0xff if any of the swizzles is not required.
355 */
356 uint32_t swizzling[2];
357 };
358
359 /** Max number of render targets in a shader */
360 #define BRW_MAX_DRAW_BUFFERS 8
361
362 /**
363 * Max number of binding table entries used for stream output.
364 *
365 * From the OpenGL 3.0 spec, table 6.44 (Transform Feedback State), the
366 * minimum value of MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS is 64.
367 *
368 * On Gen6, the size of transform feedback data is limited not by the number
369 * of components but by the number of binding table entries we set aside. We
370 * use one binding table entry for a float, one entry for a vector, and one
371 * entry per matrix column. Since the only way we can communicate our
372 * transform feedback capabilities to the client is via
373 * MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS, we need to plan for the
374 * worst case, in which all the varyings are floats, so we use up one binding
375 * table entry per component. Therefore we need to set aside at least 64
376 * binding table entries for use by transform feedback.
377 *
378 * Note: since we don't currently pack varyings, it is currently impossible
379 * for the client to actually use up all of these binding table entries--if
380 * all of their varyings were floats, they would run out of varying slots and
381 * fail to link. But that's a bug, so it seems prudent to go ahead and
382 * allocate the number of binding table entries we will need once the bug is
383 * fixed.
384 */
385 #define BRW_MAX_SOL_BINDINGS 64
386
387 /**
388 * Binding table index for the first gen6 SOL binding.
389 */
390 #define BRW_GEN6_SOL_BINDING_START 0
391
392 /**
393 * Stride in bytes between shader_time entries.
394 *
395 * We separate entries by a cacheline to reduce traffic between EUs writing to
396 * different entries.
397 */
398 #define BRW_SHADER_TIME_STRIDE 64
399
400 struct brw_stage_prog_data {
401 struct {
402 /** size of our binding table. */
403 uint32_t size_bytes;
404
405 /** @{
406 * surface indices for the various groups of surfaces
407 */
408 uint32_t pull_constants_start;
409 uint32_t texture_start;
410 uint32_t gather_texture_start;
411 uint32_t ubo_start;
412 uint32_t ssbo_start;
413 uint32_t abo_start;
414 uint32_t image_start;
415 uint32_t shader_time_start;
416 uint32_t plane_start[3];
417 /** @} */
418 } binding_table;
419
420 GLuint nr_params; /**< number of float params/constants */
421 GLuint nr_pull_params;
422 unsigned nr_image_params;
423
424 unsigned curb_read_length;
425 unsigned total_scratch;
426 unsigned total_shared;
427
428 /**
429 * Register where the thread expects to find input data from the URB
430 * (typically uniforms, followed by vertex or fragment attributes).
431 */
432 unsigned dispatch_grf_start_reg;
433
434 bool use_alt_mode; /**< Use ALT floating point mode? Otherwise, IEEE. */
435
436 /* Pointers to tracked values (only valid once
437 * _mesa_load_state_parameters has been called at runtime).
438 */
439 const union gl_constant_value **param;
440 const union gl_constant_value **pull_param;
441
442 /** Image metadata passed to the shader as uniforms. */
443 struct brw_image_param *image_param;
444 };
445
446 static inline void
447 brw_mark_surface_used(struct brw_stage_prog_data *prog_data,
448 unsigned surf_index)
449 {
450 /* A binding table index is 8 bits and the top 3 values are reserved for
451 * special things (stateless and SLM).
452 */
453 assert(surf_index <= 252);
454
455 prog_data->binding_table.size_bytes =
456 MAX2(prog_data->binding_table.size_bytes, (surf_index + 1) * 4);
457 }
458
459 /* Data about a particular attempt to compile a program. Note that
460 * there can be many of these, each in a different GL state
461 * corresponding to a different brw_wm_prog_key struct, with different
462 * compiled programs.
463 */
464 struct brw_wm_prog_data {
465 struct brw_stage_prog_data base;
466
467 GLuint num_varying_inputs;
468
469 uint8_t reg_blocks_0;
470 uint8_t reg_blocks_2;
471
472 uint8_t dispatch_grf_start_reg_2;
473 uint32_t prog_offset_2;
474
475 struct {
476 /** @{
477 * surface indices the WM-specific surfaces
478 */
479 uint32_t render_target_start;
480 uint32_t render_target_read_start;
481 /** @} */
482 } binding_table;
483
484 uint8_t computed_depth_mode;
485 bool computed_stencil;
486
487 bool early_fragment_tests;
488 bool post_depth_coverage;
489 bool inner_coverage;
490 bool dispatch_8;
491 bool dispatch_16;
492 bool dual_src_blend;
493 bool persample_dispatch;
494 bool uses_pos_offset;
495 bool uses_omask;
496 bool uses_kill;
497 bool uses_src_depth;
498 bool uses_src_w;
499 bool uses_sample_mask;
500 bool has_side_effects;
501 bool pulls_bary;
502
503 bool contains_flat_varying;
504 bool contains_noperspective_varying;
505
506 /**
507 * Mask of which interpolation modes are required by the fragment shader.
508 * Used in hardware setup on gen6+.
509 */
510 uint32_t barycentric_interp_modes;
511
512 /**
513 * Mask of which FS inputs are marked flat by the shader source. This is
514 * needed for setting up 3DSTATE_SF/SBE.
515 */
516 uint32_t flat_inputs;
517
518 /* Mapping of VUE slots to interpolation modes.
519 * Used by the Gen4-5 clip/sf/wm stages.
520 */
521 unsigned char interp_mode[65]; /* BRW_VARYING_SLOT_COUNT */
522
523 /**
524 * Map from gl_varying_slot to the position within the FS setup data
525 * payload where the varying's attribute vertex deltas should be delivered.
526 * For varying slots that are not used by the FS, the value is -1.
527 */
528 int urb_setup[VARYING_SLOT_MAX];
529 };
530
531 struct brw_push_const_block {
532 unsigned dwords; /* Dword count, not reg aligned */
533 unsigned regs;
534 unsigned size; /* Bytes, register aligned */
535 };
536
537 struct brw_cs_prog_data {
538 struct brw_stage_prog_data base;
539
540 GLuint dispatch_grf_start_reg_16;
541 unsigned local_size[3];
542 unsigned simd_size;
543 unsigned threads;
544 bool uses_barrier;
545 bool uses_num_work_groups;
546 int thread_local_id_index;
547
548 struct {
549 struct brw_push_const_block cross_thread;
550 struct brw_push_const_block per_thread;
551 struct brw_push_const_block total;
552 } push;
553
554 struct {
555 /** @{
556 * surface indices the CS-specific surfaces
557 */
558 uint32_t work_groups_start;
559 /** @} */
560 } binding_table;
561 };
562
563 /**
564 * Enum representing the i965-specific vertex results that don't correspond
565 * exactly to any element of gl_varying_slot. The values of this enum are
566 * assigned such that they don't conflict with gl_varying_slot.
567 */
568 typedef enum
569 {
570 BRW_VARYING_SLOT_NDC = VARYING_SLOT_MAX,
571 BRW_VARYING_SLOT_PAD,
572 /**
573 * Technically this is not a varying but just a placeholder that
574 * compile_sf_prog() inserts into its VUE map to cause the gl_PointCoord
575 * builtin variable to be compiled correctly. see compile_sf_prog() for
576 * more info.
577 */
578 BRW_VARYING_SLOT_PNTC,
579 BRW_VARYING_SLOT_COUNT
580 } brw_varying_slot;
581
582 /**
583 * We always program SF to start reading at an offset of 1 (2 varying slots)
584 * from the start of the vertex URB entry. This causes it to skip:
585 * - VARYING_SLOT_PSIZ and BRW_VARYING_SLOT_NDC on gen4-5
586 * - VARYING_SLOT_PSIZ and VARYING_SLOT_POS on gen6+
587 */
588 #define BRW_SF_URB_ENTRY_READ_OFFSET 1
589
590 /**
591 * Bitmask indicating which fragment shader inputs represent varyings (and
592 * hence have to be delivered to the fragment shader by the SF/SBE stage).
593 */
594 #define BRW_FS_VARYING_INPUT_MASK \
595 (BITFIELD64_RANGE(0, VARYING_SLOT_MAX) & \
596 ~VARYING_BIT_POS & ~VARYING_BIT_FACE)
597
598 /**
599 * Data structure recording the relationship between the gl_varying_slot enum
600 * and "slots" within the vertex URB entry (VUE). A "slot" is defined as a
601 * single octaword within the VUE (128 bits).
602 *
603 * Note that each BRW register contains 256 bits (2 octawords), so when
604 * accessing the VUE in URB_NOSWIZZLE mode, each register corresponds to two
605 * consecutive VUE slots. When accessing the VUE in URB_INTERLEAVED mode (as
606 * in a vertex shader), each register corresponds to a single VUE slot, since
607 * it contains data for two separate vertices.
608 */
609 struct brw_vue_map {
610 /**
611 * Bitfield representing all varying slots that are (a) stored in this VUE
612 * map, and (b) actually written by the shader. Does not include any of
613 * the additional varying slots defined in brw_varying_slot.
614 */
615 uint64_t slots_valid;
616
617 /**
618 * Is this VUE map for a separate shader pipeline?
619 *
620 * Separable programs (GL_ARB_separate_shader_objects) can be mixed and matched
621 * without the linker having a chance to dead code eliminate unused varyings.
622 *
623 * This means that we have to use a fixed slot layout, based on the output's
624 * location field, rather than assigning slots in a compact contiguous block.
625 */
626 bool separate;
627
628 /**
629 * Map from gl_varying_slot value to VUE slot. For gl_varying_slots that are
630 * not stored in a slot (because they are not written, or because
631 * additional processing is applied before storing them in the VUE), the
632 * value is -1.
633 */
634 signed char varying_to_slot[VARYING_SLOT_TESS_MAX];
635
636 /**
637 * Map from VUE slot to gl_varying_slot value. For slots that do not
638 * directly correspond to a gl_varying_slot, the value comes from
639 * brw_varying_slot.
640 *
641 * For slots that are not in use, the value is BRW_VARYING_SLOT_PAD.
642 */
643 signed char slot_to_varying[VARYING_SLOT_TESS_MAX];
644
645 /**
646 * Total number of VUE slots in use
647 */
648 int num_slots;
649
650 /**
651 * Number of per-patch VUE slots. Only valid for tessellation control
652 * shader outputs and tessellation evaluation shader inputs.
653 */
654 int num_per_patch_slots;
655
656 /**
657 * Number of per-vertex VUE slots. Only valid for tessellation control
658 * shader outputs and tessellation evaluation shader inputs.
659 */
660 int num_per_vertex_slots;
661 };
662
663 void brw_print_vue_map(FILE *fp, const struct brw_vue_map *vue_map);
664
665 /**
666 * Convert a VUE slot number into a byte offset within the VUE.
667 */
668 static inline GLuint brw_vue_slot_to_offset(GLuint slot)
669 {
670 return 16*slot;
671 }
672
673 /**
674 * Convert a vertex output (brw_varying_slot) into a byte offset within the
675 * VUE.
676 */
677 static inline
678 GLuint brw_varying_to_offset(const struct brw_vue_map *vue_map, GLuint varying)
679 {
680 return brw_vue_slot_to_offset(vue_map->varying_to_slot[varying]);
681 }
682
683 void brw_compute_vue_map(const struct gen_device_info *devinfo,
684 struct brw_vue_map *vue_map,
685 uint64_t slots_valid,
686 bool separate_shader);
687
688 void brw_compute_tess_vue_map(struct brw_vue_map *const vue_map,
689 uint64_t slots_valid,
690 uint32_t is_patch);
691
692 /* brw_interpolation_map.c */
693 void brw_setup_vue_interpolation(struct brw_vue_map *vue_map,
694 struct nir_shader *nir,
695 struct brw_wm_prog_data *prog_data,
696 const struct gen_device_info *devinfo);
697
698 enum shader_dispatch_mode {
699 DISPATCH_MODE_4X1_SINGLE = 0,
700 DISPATCH_MODE_4X2_DUAL_INSTANCE = 1,
701 DISPATCH_MODE_4X2_DUAL_OBJECT = 2,
702 DISPATCH_MODE_SIMD8 = 3,
703 };
704
705 /**
706 * @defgroup Tessellator parameter enumerations.
707 *
708 * These correspond to the hardware values in 3DSTATE_TE, and are provided
709 * as part of the tessellation evaluation shader.
710 *
711 * @{
712 */
713 enum brw_tess_partitioning {
714 BRW_TESS_PARTITIONING_INTEGER = 0,
715 BRW_TESS_PARTITIONING_ODD_FRACTIONAL = 1,
716 BRW_TESS_PARTITIONING_EVEN_FRACTIONAL = 2,
717 };
718
719 enum brw_tess_output_topology {
720 BRW_TESS_OUTPUT_TOPOLOGY_POINT = 0,
721 BRW_TESS_OUTPUT_TOPOLOGY_LINE = 1,
722 BRW_TESS_OUTPUT_TOPOLOGY_TRI_CW = 2,
723 BRW_TESS_OUTPUT_TOPOLOGY_TRI_CCW = 3,
724 };
725
726 enum brw_tess_domain {
727 BRW_TESS_DOMAIN_QUAD = 0,
728 BRW_TESS_DOMAIN_TRI = 1,
729 BRW_TESS_DOMAIN_ISOLINE = 2,
730 };
731 /** @} */
732
733 struct brw_vue_prog_data {
734 struct brw_stage_prog_data base;
735 struct brw_vue_map vue_map;
736
737 /** Should the hardware deliver input VUE handles for URB pull loads? */
738 bool include_vue_handles;
739
740 GLuint urb_read_length;
741 GLuint total_grf;
742
743 uint32_t clip_distance_mask;
744 uint32_t cull_distance_mask;
745
746 /* Used for calculating urb partitions. In the VS, this is the size of the
747 * URB entry used for both input and output to the thread. In the GS, this
748 * is the size of the URB entry used for output.
749 */
750 GLuint urb_entry_size;
751
752 enum shader_dispatch_mode dispatch_mode;
753 };
754
755 struct brw_vs_prog_data {
756 struct brw_vue_prog_data base;
757
758 GLbitfield64 inputs_read;
759 GLbitfield64 double_inputs_read;
760
761 unsigned nr_attributes;
762 unsigned nr_attribute_slots;
763
764 bool uses_vertexid;
765 bool uses_instanceid;
766 bool uses_basevertex;
767 bool uses_baseinstance;
768 bool uses_drawid;
769 };
770
771 struct brw_tcs_prog_data
772 {
773 struct brw_vue_prog_data base;
774
775 /** Number vertices in output patch */
776 int instances;
777 };
778
779
780 struct brw_tes_prog_data
781 {
782 struct brw_vue_prog_data base;
783
784 enum brw_tess_partitioning partitioning;
785 enum brw_tess_output_topology output_topology;
786 enum brw_tess_domain domain;
787 };
788
789 struct brw_gs_prog_data
790 {
791 struct brw_vue_prog_data base;
792
793 unsigned vertices_in;
794
795 /**
796 * Size of an output vertex, measured in HWORDS (32 bytes).
797 */
798 unsigned output_vertex_size_hwords;
799
800 unsigned output_topology;
801
802 /**
803 * Size of the control data (cut bits or StreamID bits), in hwords (32
804 * bytes). 0 if there is no control data.
805 */
806 unsigned control_data_header_size_hwords;
807
808 /**
809 * Format of the control data (either GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID
810 * if the control data is StreamID bits, or
811 * GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT if the control data is cut bits).
812 * Ignored if control_data_header_size is 0.
813 */
814 unsigned control_data_format;
815
816 bool include_primitive_id;
817
818 /**
819 * The number of vertices emitted, if constant - otherwise -1.
820 */
821 int static_vertex_count;
822
823 int invocations;
824
825 /**
826 * Gen6: Provoking vertex convention for odd-numbered triangles
827 * in tristrips.
828 */
829 GLuint pv_first:1;
830
831 /**
832 * Gen6: Number of varyings that are output to transform feedback.
833 */
834 GLuint num_transform_feedback_bindings:7; /* 0-BRW_MAX_SOL_BINDINGS */
835
836 /**
837 * Gen6: Map from the index of a transform feedback binding table entry to the
838 * gl_varying_slot that should be streamed out through that binding table
839 * entry.
840 */
841 unsigned char transform_feedback_bindings[64 /* BRW_MAX_SOL_BINDINGS */];
842
843 /**
844 * Gen6: Map from the index of a transform feedback binding table entry to the
845 * swizzles that should be used when streaming out data through that
846 * binding table entry.
847 */
848 unsigned char transform_feedback_swizzles[64 /* BRW_MAX_SOL_BINDINGS */];
849 };
850
851 #define DEFINE_PROG_DATA_DOWNCAST(stage) \
852 static inline struct brw_##stage##_prog_data * \
853 brw_##stage##_prog_data(struct brw_stage_prog_data *prog_data) \
854 { \
855 return (struct brw_##stage##_prog_data *) prog_data; \
856 }
857 DEFINE_PROG_DATA_DOWNCAST(vue)
858 DEFINE_PROG_DATA_DOWNCAST(vs)
859 DEFINE_PROG_DATA_DOWNCAST(tcs)
860 DEFINE_PROG_DATA_DOWNCAST(tes)
861 DEFINE_PROG_DATA_DOWNCAST(gs)
862 DEFINE_PROG_DATA_DOWNCAST(wm)
863 DEFINE_PROG_DATA_DOWNCAST(cs)
864 DEFINE_PROG_DATA_DOWNCAST(ff_gs)
865 DEFINE_PROG_DATA_DOWNCAST(clip)
866 DEFINE_PROG_DATA_DOWNCAST(sf)
867 #undef DEFINE_PROG_DATA_DOWNCAST
868
869 /** @} */
870
871 struct brw_compiler *
872 brw_compiler_create(void *mem_ctx, const struct gen_device_info *devinfo);
873
874 /**
875 * Compile a vertex shader.
876 *
877 * Returns the final assembly and the program's size.
878 */
879 const unsigned *
880 brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
881 void *mem_ctx,
882 const struct brw_vs_prog_key *key,
883 struct brw_vs_prog_data *prog_data,
884 const struct nir_shader *shader,
885 gl_clip_plane *clip_planes,
886 bool use_legacy_snorm_formula,
887 int shader_time_index,
888 unsigned *final_assembly_size,
889 char **error_str);
890
891 /**
892 * Compile a tessellation control shader.
893 *
894 * Returns the final assembly and the program's size.
895 */
896 const unsigned *
897 brw_compile_tcs(const struct brw_compiler *compiler,
898 void *log_data,
899 void *mem_ctx,
900 const struct brw_tcs_prog_key *key,
901 struct brw_tcs_prog_data *prog_data,
902 const struct nir_shader *nir,
903 int shader_time_index,
904 unsigned *final_assembly_size,
905 char **error_str);
906
907 /**
908 * Compile a tessellation evaluation shader.
909 *
910 * Returns the final assembly and the program's size.
911 */
912 const unsigned *
913 brw_compile_tes(const struct brw_compiler *compiler, void *log_data,
914 void *mem_ctx,
915 const struct brw_tes_prog_key *key,
916 const struct brw_vue_map *input_vue_map,
917 struct brw_tes_prog_data *prog_data,
918 const struct nir_shader *shader,
919 struct gl_program *prog,
920 int shader_time_index,
921 unsigned *final_assembly_size,
922 char **error_str);
923
924 /**
925 * Compile a vertex shader.
926 *
927 * Returns the final assembly and the program's size.
928 */
929 const unsigned *
930 brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
931 void *mem_ctx,
932 const struct brw_gs_prog_key *key,
933 struct brw_gs_prog_data *prog_data,
934 const struct nir_shader *shader,
935 struct gl_program *prog,
936 int shader_time_index,
937 unsigned *final_assembly_size,
938 char **error_str);
939
940 /**
941 * Compile a fragment shader.
942 *
943 * Returns the final assembly and the program's size.
944 */
945 const unsigned *
946 brw_compile_fs(const struct brw_compiler *compiler, void *log_data,
947 void *mem_ctx,
948 const struct brw_wm_prog_key *key,
949 struct brw_wm_prog_data *prog_data,
950 const struct nir_shader *shader,
951 struct gl_program *prog,
952 int shader_time_index8,
953 int shader_time_index16,
954 bool allow_spilling,
955 bool use_rep_send, struct brw_vue_map *vue_map,
956 unsigned *final_assembly_size,
957 char **error_str);
958
959 /**
960 * Compile a compute shader.
961 *
962 * Returns the final assembly and the program's size.
963 */
964 const unsigned *
965 brw_compile_cs(const struct brw_compiler *compiler, void *log_data,
966 void *mem_ctx,
967 const struct brw_cs_prog_key *key,
968 struct brw_cs_prog_data *prog_data,
969 const struct nir_shader *shader,
970 int shader_time_index,
971 unsigned *final_assembly_size,
972 char **error_str);
973
974 static inline uint32_t
975 encode_slm_size(unsigned gen, uint32_t bytes)
976 {
977 uint32_t slm_size = 0;
978
979 /* Shared Local Memory is specified as powers of two, and encoded in
980 * INTERFACE_DESCRIPTOR_DATA with the following representations:
981 *
982 * Size | 0 kB | 1 kB | 2 kB | 4 kB | 8 kB | 16 kB | 32 kB | 64 kB |
983 * -------------------------------------------------------------------
984 * Gen7-8 | 0 | none | none | 1 | 2 | 4 | 8 | 16 |
985 * -------------------------------------------------------------------
986 * Gen9+ | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
987 */
988 assert(bytes <= 64 * 1024);
989
990 if (bytes > 0) {
991 /* Shared Local Memory Size is specified as powers of two. */
992 slm_size = util_next_power_of_two(bytes);
993
994 if (gen >= 9) {
995 /* Use a minimum of 1kB; turn an exponent of 10 (1024 kB) into 1. */
996 slm_size = ffs(MAX2(slm_size, 1024)) - 10;
997 } else {
998 /* Use a minimum of 4kB; convert to the pre-Gen9 representation. */
999 slm_size = MAX2(slm_size, 4096) / 4096;
1000 }
1001 }
1002
1003 return slm_size;
1004 }
1005
1006 /**
1007 * Return true if the given shader stage is dispatched contiguously by the
1008 * relevant fixed function starting from channel 0 of the SIMD thread, which
1009 * implies that the dispatch mask of a thread can be assumed to have the form
1010 * '2^n - 1' for some n.
1011 */
1012 static inline bool
1013 brw_stage_has_packed_dispatch(const struct gen_device_info *devinfo,
1014 gl_shader_stage stage,
1015 const struct brw_stage_prog_data *prog_data)
1016 {
1017 /* The code below makes assumptions about the hardware's thread dispatch
1018 * behavior that could be proven wrong in future generations -- Make sure
1019 * to do a full test run with brw_fs_test_dispatch_packing() hooked up to
1020 * the NIR front-end before changing this assertion.
1021 */
1022 assert(devinfo->gen <= 9);
1023
1024 switch (stage) {
1025 case MESA_SHADER_FRAGMENT: {
1026 /* The PSD discards subspans coming in with no lit samples, which in the
1027 * per-pixel shading case implies that each subspan will either be fully
1028 * lit (due to the VMask being used to allow derivative computations),
1029 * or not dispatched at all. In per-sample dispatch mode individual
1030 * samples from the same subspan have a fixed relative location within
1031 * the SIMD thread, so dispatch of unlit samples cannot be avoided in
1032 * general and we should return false.
1033 */
1034 const struct brw_wm_prog_data *wm_prog_data =
1035 (const struct brw_wm_prog_data *)prog_data;
1036 return !wm_prog_data->persample_dispatch;
1037 }
1038 case MESA_SHADER_COMPUTE:
1039 /* Compute shaders will be spawned with either a fully enabled dispatch
1040 * mask or with whatever bottom/right execution mask was given to the
1041 * GPGPU walker command to be used along the workgroup edges -- In both
1042 * cases the dispatch mask is required to be tightly packed for our
1043 * invocation index calculations to work.
1044 */
1045 return true;
1046 default:
1047 /* Most remaining fixed functions are limited to use a packed dispatch
1048 * mask due to the hardware representation of the dispatch mask as a
1049 * single counter representing the number of enabled channels.
1050 */
1051 return true;
1052 }
1053 }
1054
1055 #ifdef __cplusplus
1056 } /* extern "C" */
1057 #endif