2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 * Copyright © 2011 Bryan Cain
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
27 #include "compiler/glsl/glsl_parser_extras.h"
28 #include "compiler/glsl/ir_optimization.h"
29 #include "compiler/glsl/program.h"
32 #include "st_shader_cache.h"
33 #include "st_glsl_to_tgsi.h"
35 #include "tgsi/tgsi_from_mesa.h"
41 * Called via ctx->Driver.LinkShader()
42 * This is a shared function that branches off to either GLSL IR -> TGSI or
46 st_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
48 struct pipe_screen
*pscreen
= ctx
->st
->pipe
->screen
;
50 enum pipe_shader_ir preferred_ir
= (enum pipe_shader_ir
)
51 pscreen
->get_shader_param(pscreen
, PIPE_SHADER_VERTEX
,
52 PIPE_SHADER_CAP_PREFERRED_IR
);
53 bool use_nir
= preferred_ir
== PIPE_SHADER_IR_NIR
;
55 /* Return early if we are loading the shader from on-disk cache */
56 if (st_load_ir_from_disk_cache(ctx
, prog
, use_nir
)) {
60 assert(prog
->data
->LinkStatus
);
62 /* Skip the GLSL steps when using SPIR-V. */
63 if (prog
->data
->spirv
) {
65 return st_link_nir(ctx
, prog
);
68 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
69 if (prog
->_LinkedShaders
[i
] == NULL
)
72 struct gl_linked_shader
*shader
= prog
->_LinkedShaders
[i
];
73 exec_list
*ir
= shader
->ir
;
74 gl_shader_stage stage
= shader
->Stage
;
75 const struct gl_shader_compiler_options
*options
=
76 &ctx
->Const
.ShaderCompilerOptions
[stage
];
78 /* If there are forms of indirect addressing that the driver
79 * cannot handle, perform the lowering pass.
81 if (options
->EmitNoIndirectInput
|| options
->EmitNoIndirectOutput
||
82 options
->EmitNoIndirectTemp
|| options
->EmitNoIndirectUniform
) {
83 lower_variable_index_to_cond_assign(stage
, ir
,
84 options
->EmitNoIndirectInput
,
85 options
->EmitNoIndirectOutput
,
86 options
->EmitNoIndirectTemp
,
87 options
->EmitNoIndirectUniform
);
90 enum pipe_shader_type ptarget
= pipe_shader_type_from_mesa(stage
);
91 bool have_dround
= pscreen
->get_shader_param(pscreen
, ptarget
,
92 PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED
);
93 bool have_dfrexp
= pscreen
->get_shader_param(pscreen
, ptarget
,
94 PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED
);
95 bool have_ldexp
= pscreen
->get_shader_param(pscreen
, ptarget
,
96 PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED
);
98 if (!pscreen
->get_param(pscreen
, PIPE_CAP_INT64_DIVMOD
))
99 lower_64bit_integer_instructions(ir
, DIV64
| MOD64
);
101 if (ctx
->Extensions
.ARB_shading_language_packing
) {
102 unsigned lower_inst
= LOWER_PACK_SNORM_2x16
|
103 LOWER_UNPACK_SNORM_2x16
|
104 LOWER_PACK_UNORM_2x16
|
105 LOWER_UNPACK_UNORM_2x16
|
106 LOWER_PACK_SNORM_4x8
|
107 LOWER_UNPACK_SNORM_4x8
|
108 LOWER_UNPACK_UNORM_4x8
|
109 LOWER_PACK_UNORM_4x8
;
111 if (ctx
->Extensions
.ARB_gpu_shader5
)
112 lower_inst
|= LOWER_PACK_USE_BFI
|
114 if (!ctx
->st
->has_half_float_packing
)
115 lower_inst
|= LOWER_PACK_HALF_2x16
|
116 LOWER_UNPACK_HALF_2x16
;
118 lower_packing_builtins(ir
, lower_inst
);
121 if (!pscreen
->get_param(pscreen
, PIPE_CAP_TEXTURE_GATHER_OFFSETS
))
122 lower_offset_arrays(ir
);
123 do_mat_op_to_vec(ir
);
125 if (stage
== MESA_SHADER_FRAGMENT
)
126 lower_blend_equation_advanced(
127 shader
, ctx
->Extensions
.KHR_blend_equation_advanced_coherent
);
129 lower_instructions(ir
,
130 (use_nir
? 0 : MOD_TO_FLOOR
) |
134 MUL64_TO_MUL_AND_MUL_HIGH
|
135 (have_ldexp
? 0 : LDEXP_TO_ARITH
) |
136 (have_dfrexp
? 0 : DFREXP_DLDEXP_TO_ARITH
) |
139 (have_dround
? 0 : DOPS_TO_DFRAC
) |
140 (options
->EmitNoPow
? POW_TO_EXP2
: 0) |
141 (!ctx
->Const
.NativeIntegers
? INT_DIV_TO_MUL_RCP
: 0) |
142 (options
->EmitNoSat
? SAT_TO_CLAMP
: 0) |
143 (ctx
->Const
.ForceGLSLAbsSqrt
? SQRT_TO_ABS_SQRT
: 0) |
144 /* Assume that if ARB_gpu_shader5 is not supported
145 * then all of the extended integer functions need
146 * lowering. It may be necessary to add some caps
147 * for individual instructions.
149 (!ctx
->Extensions
.ARB_gpu_shader5
150 ? BIT_COUNT_TO_MATH
|
154 FIND_LSB_TO_FLOAT_CAST
|
155 FIND_MSB_TO_FLOAT_CAST
|
159 do_vec_index_to_cond_assign(ir
);
160 lower_vector_insert(ir
, true);
161 lower_quadop_vector(ir
, false);
162 if (options
->MaxIfDepth
== 0) {
166 validate_ir_tree(ir
);
169 build_program_resource_list(ctx
, prog
, use_nir
);
172 return st_link_nir(ctx
, prog
);
174 return st_link_tgsi(ctx
, prog
);