2 * Copyright (C) 2005-2007 Brian Paul All Rights Reserved.
3 * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
4 * Copyright © 2010 Intel Corporation
5 * Copyright © 2011 Bryan Cain
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
27 #include "compiler/glsl/glsl_parser_extras.h"
28 #include "compiler/glsl/ir_optimization.h"
29 #include "compiler/glsl/program.h"
32 #include "st_shader_cache.h"
33 #include "st_glsl_to_tgsi.h"
35 #include "tgsi/tgsi_from_mesa.h"
41 * Called via ctx->Driver.LinkShader()
42 * This is a shared function that branches off to either GLSL IR -> TGSI or
46 st_link_shader(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
48 struct pipe_screen
*pscreen
= ctx
->st
->pipe
->screen
;
50 enum pipe_shader_ir preferred_ir
= (enum pipe_shader_ir
)
51 pscreen
->get_shader_param(pscreen
, PIPE_SHADER_VERTEX
,
52 PIPE_SHADER_CAP_PREFERRED_IR
);
53 bool use_nir
= preferred_ir
== PIPE_SHADER_IR_NIR
;
55 /* Return early if we are loading the shader from on-disk cache */
56 if (st_load_ir_from_disk_cache(ctx
, prog
, use_nir
)) {
60 assert(prog
->data
->LinkStatus
);
62 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
63 if (prog
->_LinkedShaders
[i
] == NULL
)
66 struct gl_linked_shader
*shader
= prog
->_LinkedShaders
[i
];
67 exec_list
*ir
= shader
->ir
;
68 gl_shader_stage stage
= shader
->Stage
;
69 const struct gl_shader_compiler_options
*options
=
70 &ctx
->Const
.ShaderCompilerOptions
[stage
];
72 /* If there are forms of indirect addressing that the driver
73 * cannot handle, perform the lowering pass.
75 if (options
->EmitNoIndirectInput
|| options
->EmitNoIndirectOutput
||
76 options
->EmitNoIndirectTemp
|| options
->EmitNoIndirectUniform
) {
77 lower_variable_index_to_cond_assign(stage
, ir
,
78 options
->EmitNoIndirectInput
,
79 options
->EmitNoIndirectOutput
,
80 options
->EmitNoIndirectTemp
,
81 options
->EmitNoIndirectUniform
);
84 enum pipe_shader_type ptarget
= pipe_shader_type_from_mesa(stage
);
85 bool have_dround
= pscreen
->get_shader_param(pscreen
, ptarget
,
86 PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED
);
87 bool have_dfrexp
= pscreen
->get_shader_param(pscreen
, ptarget
,
88 PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED
);
89 bool have_ldexp
= pscreen
->get_shader_param(pscreen
, ptarget
,
90 PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED
);
92 if (!pscreen
->get_param(pscreen
, PIPE_CAP_INT64_DIVMOD
))
93 lower_64bit_integer_instructions(ir
, DIV64
| MOD64
);
95 if (ctx
->Extensions
.ARB_shading_language_packing
) {
96 unsigned lower_inst
= LOWER_PACK_SNORM_2x16
|
97 LOWER_UNPACK_SNORM_2x16
|
98 LOWER_PACK_UNORM_2x16
|
99 LOWER_UNPACK_UNORM_2x16
|
100 LOWER_PACK_SNORM_4x8
|
101 LOWER_UNPACK_SNORM_4x8
|
102 LOWER_UNPACK_UNORM_4x8
|
103 LOWER_PACK_UNORM_4x8
;
105 if (ctx
->Extensions
.ARB_gpu_shader5
)
106 lower_inst
|= LOWER_PACK_USE_BFI
|
108 if (!ctx
->st
->has_half_float_packing
)
109 lower_inst
|= LOWER_PACK_HALF_2x16
|
110 LOWER_UNPACK_HALF_2x16
;
112 lower_packing_builtins(ir
, lower_inst
);
115 if (!pscreen
->get_param(pscreen
, PIPE_CAP_TEXTURE_GATHER_OFFSETS
))
116 lower_offset_arrays(ir
);
117 do_mat_op_to_vec(ir
);
119 if (stage
== MESA_SHADER_FRAGMENT
)
120 lower_blend_equation_advanced(
121 shader
, ctx
->Extensions
.KHR_blend_equation_advanced_coherent
);
123 lower_instructions(ir
,
124 (use_nir
? 0 : MOD_TO_FLOOR
) |
128 MUL64_TO_MUL_AND_MUL_HIGH
|
129 (have_ldexp
? 0 : LDEXP_TO_ARITH
) |
130 (have_dfrexp
? 0 : DFREXP_DLDEXP_TO_ARITH
) |
133 (have_dround
? 0 : DOPS_TO_DFRAC
) |
134 (options
->EmitNoPow
? POW_TO_EXP2
: 0) |
135 (!ctx
->Const
.NativeIntegers
? INT_DIV_TO_MUL_RCP
: 0) |
136 (options
->EmitNoSat
? SAT_TO_CLAMP
: 0) |
137 (ctx
->Const
.ForceGLSLAbsSqrt
? SQRT_TO_ABS_SQRT
: 0) |
138 /* Assume that if ARB_gpu_shader5 is not supported
139 * then all of the extended integer functions need
140 * lowering. It may be necessary to add some caps
141 * for individual instructions.
143 (!ctx
->Extensions
.ARB_gpu_shader5
144 ? BIT_COUNT_TO_MATH
|
148 FIND_LSB_TO_FLOAT_CAST
|
149 FIND_MSB_TO_FLOAT_CAST
|
153 do_vec_index_to_cond_assign(ir
);
154 lower_vector_insert(ir
, true);
155 lower_quadop_vector(ir
, false);
157 if (options
->MaxIfDepth
== 0) {
161 validate_ir_tree(ir
);
164 build_program_resource_list(ctx
, prog
);
167 return st_link_nir(ctx
, prog
);
169 return st_link_tgsi(ctx
, prog
);