intel/perf: create a vtable entry for emit_report_count
[mesa.git] / src / mesa / state_tracker / st_glsl_to_ir.cpp
1 /*
2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 * Copyright © 2011 Bryan Cain
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27 #include "compiler/glsl/glsl_parser_extras.h"
28 #include "compiler/glsl/ir_optimization.h"
29 #include "compiler/glsl/program.h"
30
31 #include "st_nir.h"
32 #include "st_shader_cache.h"
33 #include "st_glsl_to_tgsi.h"
34
35 #include "tgsi/tgsi_from_mesa.h"
36
37 extern "C" {
38
39 /**
40 * Link a shader.
41 * Called via ctx->Driver.LinkShader()
42 * This is a shared function that branches off to either GLSL IR -> TGSI or
43 * GLSL IR -> NIR
44 */
45 GLboolean
46 st_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
47 {
48 struct pipe_screen *pscreen = ctx->st->pipe->screen;
49
50 enum pipe_shader_ir preferred_ir = (enum pipe_shader_ir)
51 pscreen->get_shader_param(pscreen, PIPE_SHADER_VERTEX,
52 PIPE_SHADER_CAP_PREFERRED_IR);
53 bool use_nir = preferred_ir == PIPE_SHADER_IR_NIR;
54
55 /* Return early if we are loading the shader from on-disk cache */
56 if (st_load_ir_from_disk_cache(ctx, prog, use_nir)) {
57 return GL_TRUE;
58 }
59
60 assert(prog->data->LinkStatus);
61
62 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
63 if (prog->_LinkedShaders[i] == NULL)
64 continue;
65
66 struct gl_linked_shader *shader = prog->_LinkedShaders[i];
67 exec_list *ir = shader->ir;
68 gl_shader_stage stage = shader->Stage;
69 const struct gl_shader_compiler_options *options =
70 &ctx->Const.ShaderCompilerOptions[stage];
71
72 /* If there are forms of indirect addressing that the driver
73 * cannot handle, perform the lowering pass.
74 */
75 if (options->EmitNoIndirectInput || options->EmitNoIndirectOutput ||
76 options->EmitNoIndirectTemp || options->EmitNoIndirectUniform) {
77 lower_variable_index_to_cond_assign(stage, ir,
78 options->EmitNoIndirectInput,
79 options->EmitNoIndirectOutput,
80 options->EmitNoIndirectTemp,
81 options->EmitNoIndirectUniform);
82 }
83
84 enum pipe_shader_type ptarget = pipe_shader_type_from_mesa(stage);
85 bool have_dround = pscreen->get_shader_param(pscreen, ptarget,
86 PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED);
87 bool have_dfrexp = pscreen->get_shader_param(pscreen, ptarget,
88 PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED);
89 bool have_ldexp = pscreen->get_shader_param(pscreen, ptarget,
90 PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED);
91
92 if (!pscreen->get_param(pscreen, PIPE_CAP_INT64_DIVMOD))
93 lower_64bit_integer_instructions(ir, DIV64 | MOD64);
94
95 if (ctx->Extensions.ARB_shading_language_packing) {
96 unsigned lower_inst = LOWER_PACK_SNORM_2x16 |
97 LOWER_UNPACK_SNORM_2x16 |
98 LOWER_PACK_UNORM_2x16 |
99 LOWER_UNPACK_UNORM_2x16 |
100 LOWER_PACK_SNORM_4x8 |
101 LOWER_UNPACK_SNORM_4x8 |
102 LOWER_UNPACK_UNORM_4x8 |
103 LOWER_PACK_UNORM_4x8;
104
105 if (ctx->Extensions.ARB_gpu_shader5)
106 lower_inst |= LOWER_PACK_USE_BFI |
107 LOWER_PACK_USE_BFE;
108 if (!ctx->st->has_half_float_packing)
109 lower_inst |= LOWER_PACK_HALF_2x16 |
110 LOWER_UNPACK_HALF_2x16;
111
112 lower_packing_builtins(ir, lower_inst);
113 }
114
115 if (!pscreen->get_param(pscreen, PIPE_CAP_TEXTURE_GATHER_OFFSETS))
116 lower_offset_arrays(ir);
117 do_mat_op_to_vec(ir);
118
119 if (stage == MESA_SHADER_FRAGMENT)
120 lower_blend_equation_advanced(
121 shader, ctx->Extensions.KHR_blend_equation_advanced_coherent);
122
123 lower_instructions(ir,
124 (use_nir ? 0 : MOD_TO_FLOOR) |
125 FDIV_TO_MUL_RCP |
126 EXP_TO_EXP2 |
127 LOG_TO_LOG2 |
128 MUL64_TO_MUL_AND_MUL_HIGH |
129 (have_ldexp ? 0 : LDEXP_TO_ARITH) |
130 (have_dfrexp ? 0 : DFREXP_DLDEXP_TO_ARITH) |
131 CARRY_TO_ARITH |
132 BORROW_TO_ARITH |
133 (have_dround ? 0 : DOPS_TO_DFRAC) |
134 (options->EmitNoPow ? POW_TO_EXP2 : 0) |
135 (!ctx->Const.NativeIntegers ? INT_DIV_TO_MUL_RCP : 0) |
136 (options->EmitNoSat ? SAT_TO_CLAMP : 0) |
137 (ctx->Const.ForceGLSLAbsSqrt ? SQRT_TO_ABS_SQRT : 0) |
138 /* Assume that if ARB_gpu_shader5 is not supported
139 * then all of the extended integer functions need
140 * lowering. It may be necessary to add some caps
141 * for individual instructions.
142 */
143 (!ctx->Extensions.ARB_gpu_shader5
144 ? BIT_COUNT_TO_MATH |
145 EXTRACT_TO_SHIFTS |
146 INSERT_TO_SHIFTS |
147 REVERSE_TO_SHIFTS |
148 FIND_LSB_TO_FLOAT_CAST |
149 FIND_MSB_TO_FLOAT_CAST |
150 IMUL_HIGH_TO_MUL
151 : 0));
152
153 do_vec_index_to_cond_assign(ir);
154 lower_vector_insert(ir, true);
155 lower_quadop_vector(ir, false);
156 lower_noise(ir);
157 if (options->MaxIfDepth == 0) {
158 lower_discard(ir);
159 }
160
161 validate_ir_tree(ir);
162 }
163
164 build_program_resource_list(ctx, prog);
165
166 if (use_nir)
167 return st_link_nir(ctx, prog);
168 else
169 return st_link_tgsi(ctx, prog);
170 }
171
172 } /* extern "C" */