Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / mesa / drivers / dri / i965 / brw_compiler.h
1 /*
2 * Copyright © 2010 - 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #pragma once
25
26 #include <stdio.h>
27 #include "brw_device_info.h"
28 #include "main/mtypes.h"
29
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33
34 struct ra_regs;
35 struct nir_shader;
36 struct brw_geometry_program;
37 union gl_constant_value;
38
39 struct brw_compiler {
40 const struct brw_device_info *devinfo;
41
42 struct {
43 struct ra_regs *regs;
44
45 /**
46 * Array of the ra classes for the unaligned contiguous register
47 * block sizes used.
48 */
49 int *classes;
50
51 /**
52 * Mapping for register-allocated objects in *regs to the first
53 * GRF for that object.
54 */
55 uint8_t *ra_reg_to_grf;
56 } vec4_reg_set;
57
58 struct {
59 struct ra_regs *regs;
60
61 /**
62 * Array of the ra classes for the unaligned contiguous register
63 * block sizes used, indexed by register size.
64 */
65 int classes[16];
66
67 /**
68 * Mapping from classes to ra_reg ranges. Each of the per-size
69 * classes corresponds to a range of ra_reg nodes. This array stores
70 * those ranges in the form of first ra_reg in each class and the
71 * total number of ra_reg elements in the last array element. This
72 * way the range of the i'th class is given by:
73 * [ class_to_ra_reg_range[i], class_to_ra_reg_range[i+1] )
74 */
75 int class_to_ra_reg_range[17];
76
77 /**
78 * Mapping for register-allocated objects in *regs to the first
79 * GRF for that object.
80 */
81 uint8_t *ra_reg_to_grf;
82
83 /**
84 * ra class for the aligned pairs we use for PLN, which doesn't
85 * appear in *classes.
86 */
87 int aligned_pairs_class;
88 } fs_reg_sets[2];
89
90 void (*shader_debug_log)(void *, const char *str, ...) PRINTFLIKE(2, 3);
91 void (*shader_perf_log)(void *, const char *str, ...) PRINTFLIKE(2, 3);
92
93 bool scalar_stage[MESA_SHADER_STAGES];
94 struct gl_shader_compiler_options glsl_compiler_options[MESA_SHADER_STAGES];
95 };
96
97 struct brw_compiler *
98 brw_compiler_create(void *mem_ctx, const struct brw_device_info *devinfo);
99
100
101 /**
102 * Program key structures.
103 *
104 * When drawing, we look for the currently bound shaders in the program
105 * cache. This is essentially a hash table lookup, and these are the keys.
106 *
107 * Sometimes OpenGL features specified as state need to be simulated via
108 * shader code, due to a mismatch between the API and the hardware. This
109 * is often referred to as "non-orthagonal state" or "NOS". We store NOS
110 * in the program key so it's considered when searching for a program. If
111 * we haven't seen a particular combination before, we have to recompile a
112 * new specialized version.
113 *
114 * Shader compilation should not look up state in gl_context directly, but
115 * instead use the copy in the program key. This guarantees recompiles will
116 * happen correctly.
117 *
118 * @{
119 */
120
121 enum PACKED gen6_gather_sampler_wa {
122 WA_SIGN = 1, /* whether we need to sign extend */
123 WA_8BIT = 2, /* if we have an 8bit format needing wa */
124 WA_16BIT = 4, /* if we have a 16bit format needing wa */
125 };
126
127 /**
128 * Sampler information needed by VS, WM, and GS program cache keys.
129 */
130 struct brw_sampler_prog_key_data {
131 /**
132 * EXT_texture_swizzle and DEPTH_TEXTURE_MODE swizzles.
133 */
134 uint16_t swizzles[MAX_SAMPLERS];
135
136 uint32_t gl_clamp_mask[3];
137
138 /**
139 * For RG32F, gather4's channel select is broken.
140 */
141 uint32_t gather_channel_quirk_mask;
142
143 /**
144 * Whether this sampler uses the compressed multisample surface layout.
145 */
146 uint32_t compressed_multisample_layout_mask;
147
148 /**
149 * Whether this sampler is using 16x multisampling. If so fetching from
150 * this sampler will be handled with a different instruction, ld2dms_w
151 * instead of ld2dms.
152 */
153 uint32_t msaa_16;
154
155 /**
156 * For Sandybridge, which shader w/a we need for gather quirks.
157 */
158 enum gen6_gather_sampler_wa gen6_gather_wa[MAX_SAMPLERS];
159 };
160
161
162 /** The program key for Vertex Shaders. */
163 struct brw_vs_prog_key {
164 unsigned program_string_id;
165
166 /*
167 * Per-attribute workaround flags
168 */
169 uint8_t gl_attrib_wa_flags[VERT_ATTRIB_MAX];
170
171 bool copy_edgeflag:1;
172
173 bool clamp_vertex_color:1;
174
175 /**
176 * How many user clipping planes are being uploaded to the vertex shader as
177 * push constants.
178 *
179 * These are used for lowering legacy gl_ClipVertex/gl_Position clipping to
180 * clip distances.
181 */
182 unsigned nr_userclip_plane_consts:4;
183
184 /**
185 * For pre-Gen6 hardware, a bitfield indicating which texture coordinates
186 * are going to be replaced with point coordinates (as a consequence of a
187 * call to glTexEnvi(GL_POINT_SPRITE, GL_COORD_REPLACE, GL_TRUE)). Because
188 * our SF thread requires exact matching between VS outputs and FS inputs,
189 * these texture coordinates will need to be unconditionally included in
190 * the VUE, even if they aren't written by the vertex shader.
191 */
192 uint8_t point_coord_replace;
193
194 struct brw_sampler_prog_key_data tex;
195 };
196
197 /** The program key for Geometry Shaders. */
198 struct brw_gs_prog_key
199 {
200 unsigned program_string_id;
201
202 struct brw_sampler_prog_key_data tex;
203 };
204
205 /** The program key for Fragment/Pixel Shaders. */
206 struct brw_wm_prog_key {
207 uint8_t iz_lookup;
208 bool stats_wm:1;
209 bool flat_shade:1;
210 bool persample_shading:1;
211 bool persample_2x:1;
212 unsigned nr_color_regions:5;
213 bool replicate_alpha:1;
214 bool render_to_fbo:1;
215 bool clamp_fragment_color:1;
216 bool compute_pos_offset:1;
217 bool compute_sample_id:1;
218 unsigned line_aa:2;
219 bool high_quality_derivatives:1;
220
221 uint16_t drawable_height;
222 uint64_t input_slots_valid;
223 unsigned program_string_id;
224 GLenum alpha_test_func; /* < For Gen4/5 MRT alpha test */
225 float alpha_test_ref;
226
227 struct brw_sampler_prog_key_data tex;
228 };
229
230 struct brw_cs_prog_key {
231 uint32_t program_string_id;
232 struct brw_sampler_prog_key_data tex;
233 };
234
235 /*
236 * Image metadata structure as laid out in the shader parameter
237 * buffer. Entries have to be 16B-aligned for the vec4 back-end to be
238 * able to use them. That's okay because the padding and any unused
239 * entries [most of them except when we're doing untyped surface
240 * access] will be removed by the uniform packing pass.
241 */
242 #define BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET 0
243 #define BRW_IMAGE_PARAM_OFFSET_OFFSET 4
244 #define BRW_IMAGE_PARAM_SIZE_OFFSET 8
245 #define BRW_IMAGE_PARAM_STRIDE_OFFSET 12
246 #define BRW_IMAGE_PARAM_TILING_OFFSET 16
247 #define BRW_IMAGE_PARAM_SWIZZLING_OFFSET 20
248 #define BRW_IMAGE_PARAM_SIZE 24
249
250 struct brw_image_param {
251 /** Surface binding table index. */
252 uint32_t surface_idx;
253
254 /** Offset applied to the X and Y surface coordinates. */
255 uint32_t offset[2];
256
257 /** Surface X, Y and Z dimensions. */
258 uint32_t size[3];
259
260 /** X-stride in bytes, Y-stride in pixels, horizontal slice stride in
261 * pixels, vertical slice stride in pixels.
262 */
263 uint32_t stride[4];
264
265 /** Log2 of the tiling modulus in the X, Y and Z dimension. */
266 uint32_t tiling[3];
267
268 /**
269 * Right shift to apply for bit 6 address swizzling. Two different
270 * swizzles can be specified and will be applied one after the other. The
271 * resulting address will be:
272 *
273 * addr' = addr ^ ((1 << 6) & ((addr >> swizzling[0]) ^
274 * (addr >> swizzling[1])))
275 *
276 * Use \c 0xff if any of the swizzles is not required.
277 */
278 uint32_t swizzling[2];
279 };
280
281 struct brw_stage_prog_data {
282 struct {
283 /** size of our binding table. */
284 uint32_t size_bytes;
285
286 /** @{
287 * surface indices for the various groups of surfaces
288 */
289 uint32_t pull_constants_start;
290 uint32_t texture_start;
291 uint32_t gather_texture_start;
292 uint32_t ubo_start;
293 uint32_t ssbo_start;
294 uint32_t abo_start;
295 uint32_t image_start;
296 uint32_t shader_time_start;
297 /** @} */
298 } binding_table;
299
300 GLuint nr_params; /**< number of float params/constants */
301 GLuint nr_pull_params;
302 unsigned nr_image_params;
303
304 unsigned curb_read_length;
305 unsigned total_scratch;
306 unsigned total_shared;
307
308 /**
309 * Register where the thread expects to find input data from the URB
310 * (typically uniforms, followed by vertex or fragment attributes).
311 */
312 unsigned dispatch_grf_start_reg;
313
314 bool use_alt_mode; /**< Use ALT floating point mode? Otherwise, IEEE. */
315
316 /* Pointers to tracked values (only valid once
317 * _mesa_load_state_parameters has been called at runtime).
318 */
319 const union gl_constant_value **param;
320 const union gl_constant_value **pull_param;
321
322 /** Image metadata passed to the shader as uniforms. */
323 struct brw_image_param *image_param;
324 };
325
326 /* Data about a particular attempt to compile a program. Note that
327 * there can be many of these, each in a different GL state
328 * corresponding to a different brw_wm_prog_key struct, with different
329 * compiled programs.
330 */
331 struct brw_wm_prog_data {
332 struct brw_stage_prog_data base;
333
334 GLuint num_varying_inputs;
335
336 GLuint dispatch_grf_start_reg_16;
337 GLuint reg_blocks;
338 GLuint reg_blocks_16;
339
340 struct {
341 /** @{
342 * surface indices the WM-specific surfaces
343 */
344 uint32_t render_target_start;
345 /** @} */
346 } binding_table;
347
348 uint8_t computed_depth_mode;
349 bool computed_stencil;
350
351 bool early_fragment_tests;
352 bool no_8;
353 bool dual_src_blend;
354 bool uses_pos_offset;
355 bool uses_omask;
356 bool uses_kill;
357 bool pulls_bary;
358 uint32_t prog_offset_16;
359
360 /**
361 * Mask of which interpolation modes are required by the fragment shader.
362 * Used in hardware setup on gen6+.
363 */
364 uint32_t barycentric_interp_modes;
365
366 /**
367 * Map from gl_varying_slot to the position within the FS setup data
368 * payload where the varying's attribute vertex deltas should be delivered.
369 * For varying slots that are not used by the FS, the value is -1.
370 */
371 int urb_setup[VARYING_SLOT_MAX];
372 };
373
374 struct brw_cs_prog_data {
375 struct brw_stage_prog_data base;
376
377 GLuint dispatch_grf_start_reg_16;
378 unsigned local_size[3];
379 unsigned simd_size;
380 bool uses_barrier;
381 bool uses_num_work_groups;
382 unsigned local_invocation_id_regs;
383
384 struct {
385 /** @{
386 * surface indices the CS-specific surfaces
387 */
388 uint32_t work_groups_start;
389 /** @} */
390 } binding_table;
391 };
392
393 /**
394 * Enum representing the i965-specific vertex results that don't correspond
395 * exactly to any element of gl_varying_slot. The values of this enum are
396 * assigned such that they don't conflict with gl_varying_slot.
397 */
398 typedef enum
399 {
400 BRW_VARYING_SLOT_NDC = VARYING_SLOT_MAX,
401 BRW_VARYING_SLOT_PAD,
402 /**
403 * Technically this is not a varying but just a placeholder that
404 * compile_sf_prog() inserts into its VUE map to cause the gl_PointCoord
405 * builtin variable to be compiled correctly. see compile_sf_prog() for
406 * more info.
407 */
408 BRW_VARYING_SLOT_PNTC,
409 BRW_VARYING_SLOT_COUNT
410 } brw_varying_slot;
411
412 /**
413 * Data structure recording the relationship between the gl_varying_slot enum
414 * and "slots" within the vertex URB entry (VUE). A "slot" is defined as a
415 * single octaword within the VUE (128 bits).
416 *
417 * Note that each BRW register contains 256 bits (2 octawords), so when
418 * accessing the VUE in URB_NOSWIZZLE mode, each register corresponds to two
419 * consecutive VUE slots. When accessing the VUE in URB_INTERLEAVED mode (as
420 * in a vertex shader), each register corresponds to a single VUE slot, since
421 * it contains data for two separate vertices.
422 */
423 struct brw_vue_map {
424 /**
425 * Bitfield representing all varying slots that are (a) stored in this VUE
426 * map, and (b) actually written by the shader. Does not include any of
427 * the additional varying slots defined in brw_varying_slot.
428 */
429 GLbitfield64 slots_valid;
430
431 /**
432 * Is this VUE map for a separate shader pipeline?
433 *
434 * Separable programs (GL_ARB_separate_shader_objects) can be mixed and matched
435 * without the linker having a chance to dead code eliminate unused varyings.
436 *
437 * This means that we have to use a fixed slot layout, based on the output's
438 * location field, rather than assigning slots in a compact contiguous block.
439 */
440 bool separate;
441
442 /**
443 * Map from gl_varying_slot value to VUE slot. For gl_varying_slots that are
444 * not stored in a slot (because they are not written, or because
445 * additional processing is applied before storing them in the VUE), the
446 * value is -1.
447 */
448 signed char varying_to_slot[BRW_VARYING_SLOT_COUNT];
449
450 /**
451 * Map from VUE slot to gl_varying_slot value. For slots that do not
452 * directly correspond to a gl_varying_slot, the value comes from
453 * brw_varying_slot.
454 *
455 * For slots that are not in use, the value is BRW_VARYING_SLOT_PAD.
456 */
457 signed char slot_to_varying[BRW_VARYING_SLOT_COUNT];
458
459 /**
460 * Total number of VUE slots in use
461 */
462 int num_slots;
463 };
464
465 void brw_print_vue_map(FILE *fp, const struct brw_vue_map *vue_map);
466
467 /**
468 * Convert a VUE slot number into a byte offset within the VUE.
469 */
470 static inline GLuint brw_vue_slot_to_offset(GLuint slot)
471 {
472 return 16*slot;
473 }
474
475 /**
476 * Convert a vertex output (brw_varying_slot) into a byte offset within the
477 * VUE.
478 */
479 static inline
480 GLuint brw_varying_to_offset(const struct brw_vue_map *vue_map, GLuint varying)
481 {
482 return brw_vue_slot_to_offset(vue_map->varying_to_slot[varying]);
483 }
484
485 void brw_compute_vue_map(const struct brw_device_info *devinfo,
486 struct brw_vue_map *vue_map,
487 GLbitfield64 slots_valid,
488 bool separate_shader);
489
490 enum shader_dispatch_mode {
491 DISPATCH_MODE_4X1_SINGLE = 0,
492 DISPATCH_MODE_4X2_DUAL_INSTANCE = 1,
493 DISPATCH_MODE_4X2_DUAL_OBJECT = 2,
494 DISPATCH_MODE_SIMD8 = 3,
495 };
496
497 /**
498 * @defgroup Tessellator parameter enumerations.
499 *
500 * These correspond to the hardware values in 3DSTATE_TE, and are provided
501 * as part of the tessellation evaluation shader.
502 *
503 * @{
504 */
505 enum brw_tess_partitioning {
506 BRW_TESS_PARTITIONING_INTEGER = 0,
507 BRW_TESS_PARTITIONING_ODD_FRACTIONAL = 1,
508 BRW_TESS_PARTITIONING_EVEN_FRACTIONAL = 2,
509 };
510
511 enum brw_tess_output_topology {
512 BRW_TESS_OUTPUT_TOPOLOGY_POINT = 0,
513 BRW_TESS_OUTPUT_TOPOLOGY_LINE = 1,
514 BRW_TESS_OUTPUT_TOPOLOGY_TRI_CW = 2,
515 BRW_TESS_OUTPUT_TOPOLOGY_TRI_CCW = 3,
516 };
517
518 enum brw_tess_domain {
519 BRW_TESS_DOMAIN_QUAD = 0,
520 BRW_TESS_DOMAIN_TRI = 1,
521 BRW_TESS_DOMAIN_ISOLINE = 2,
522 };
523 /** @} */
524
525 struct brw_vue_prog_data {
526 struct brw_stage_prog_data base;
527 struct brw_vue_map vue_map;
528
529 /** Should the hardware deliver input VUE handles for URB pull loads? */
530 bool include_vue_handles;
531
532 GLuint urb_read_length;
533 GLuint total_grf;
534
535 /* Used for calculating urb partitions. In the VS, this is the size of the
536 * URB entry used for both input and output to the thread. In the GS, this
537 * is the size of the URB entry used for output.
538 */
539 GLuint urb_entry_size;
540
541 enum shader_dispatch_mode dispatch_mode;
542 };
543
544 struct brw_vs_prog_data {
545 struct brw_vue_prog_data base;
546
547 GLbitfield64 inputs_read;
548
549 unsigned nr_attributes;
550
551 bool uses_vertexid;
552 bool uses_instanceid;
553 };
554
555 struct brw_tcs_prog_data
556 {
557 struct brw_vue_prog_data base;
558
559 /** Number vertices in output patch */
560 int instances;
561 };
562
563
564 struct brw_tes_prog_data
565 {
566 struct brw_vue_prog_data base;
567
568 enum brw_tess_partitioning partitioning;
569 enum brw_tess_output_topology output_topology;
570 enum brw_tess_domain domain;
571 };
572
573 struct brw_gs_prog_data
574 {
575 struct brw_vue_prog_data base;
576
577 /**
578 * Size of an output vertex, measured in HWORDS (32 bytes).
579 */
580 unsigned output_vertex_size_hwords;
581
582 unsigned output_topology;
583
584 /**
585 * Size of the control data (cut bits or StreamID bits), in hwords (32
586 * bytes). 0 if there is no control data.
587 */
588 unsigned control_data_header_size_hwords;
589
590 /**
591 * Format of the control data (either GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID
592 * if the control data is StreamID bits, or
593 * GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT if the control data is cut bits).
594 * Ignored if control_data_header_size is 0.
595 */
596 unsigned control_data_format;
597
598 bool include_primitive_id;
599
600 /**
601 * The number of vertices emitted, if constant - otherwise -1.
602 */
603 int static_vertex_count;
604
605 int invocations;
606
607 /**
608 * Gen6 transform feedback enabled flag.
609 */
610 bool gen6_xfb_enabled;
611
612 /**
613 * Gen6: Provoking vertex convention for odd-numbered triangles
614 * in tristrips.
615 */
616 GLuint pv_first:1;
617
618 /**
619 * Gen6: Number of varyings that are output to transform feedback.
620 */
621 GLuint num_transform_feedback_bindings:7; /* 0-BRW_MAX_SOL_BINDINGS */
622
623 /**
624 * Gen6: Map from the index of a transform feedback binding table entry to the
625 * gl_varying_slot that should be streamed out through that binding table
626 * entry.
627 */
628 unsigned char transform_feedback_bindings[64 /* BRW_MAX_SOL_BINDINGS */];
629
630 /**
631 * Gen6: Map from the index of a transform feedback binding table entry to the
632 * swizzles that should be used when streaming out data through that
633 * binding table entry.
634 */
635 unsigned char transform_feedback_swizzles[64 /* BRW_MAX_SOL_BINDINGS */];
636 };
637
638
639 /** @} */
640
641 /**
642 * Compile a vertex shader.
643 *
644 * Returns the final assembly and the program's size.
645 */
646 const unsigned *
647 brw_compile_vs(const struct brw_compiler *compiler, void *log_data,
648 void *mem_ctx,
649 const struct brw_vs_prog_key *key,
650 struct brw_vs_prog_data *prog_data,
651 const struct nir_shader *shader,
652 gl_clip_plane *clip_planes,
653 bool use_legacy_snorm_formula,
654 int shader_time_index,
655 unsigned *final_assembly_size,
656 char **error_str);
657
658 /**
659 * Compile a vertex shader.
660 *
661 * Returns the final assembly and the program's size.
662 */
663 const unsigned *
664 brw_compile_gs(const struct brw_compiler *compiler, void *log_data,
665 void *mem_ctx,
666 const struct brw_gs_prog_key *key,
667 struct brw_gs_prog_data *prog_data,
668 const struct nir_shader *shader,
669 struct gl_shader_program *shader_prog,
670 int shader_time_index,
671 unsigned *final_assembly_size,
672 char **error_str);
673
674 /**
675 * Compile a fragment shader.
676 *
677 * Returns the final assembly and the program's size.
678 */
679 const unsigned *
680 brw_compile_fs(const struct brw_compiler *compiler, void *log_data,
681 void *mem_ctx,
682 const struct brw_wm_prog_key *key,
683 struct brw_wm_prog_data *prog_data,
684 const struct nir_shader *shader,
685 struct gl_program *prog,
686 int shader_time_index8,
687 int shader_time_index16,
688 bool use_rep_send,
689 unsigned *final_assembly_size,
690 char **error_str);
691
692 /**
693 * Compile a compute shader.
694 *
695 * Returns the final assembly and the program's size.
696 */
697 const unsigned *
698 brw_compile_cs(const struct brw_compiler *compiler, void *log_data,
699 void *mem_ctx,
700 const struct brw_cs_prog_key *key,
701 struct brw_cs_prog_data *prog_data,
702 const struct nir_shader *shader,
703 int shader_time_index,
704 unsigned *final_assembly_size,
705 char **error_str);
706
707 void
708 brw_cs_fill_local_id_payload(const struct brw_cs_prog_data *cs_prog_data,
709 void *buffer, uint32_t threads, uint32_t stride);
710
711 #ifdef __cplusplus
712 } /* extern "C" */
713 #endif