+ NIR_PASS_V(nir, gl_nir_lower_samplers, shader_program);
+
+ if (prog) {
+ prog->info.textures_used = nir->info.textures_used;
+ prog->info.textures_used_by_txf = nir->info.textures_used_by_txf;
+ prog->info.images_used = nir->info.images_used;
+ }
+}
+
+static int
+st_packed_uniforms_type_size(const struct glsl_type *type, bool bindless)
+{
+ return glsl_count_dword_slots(type, bindless);
+}
+
+static int
+st_unpacked_uniforms_type_size(const struct glsl_type *type, bool bindless)
+{
+ return glsl_count_vec4_slots(type, false, bindless);
+}
+
+void
+st_nir_lower_uniforms(struct st_context *st, nir_shader *nir)
+{
+ if (st->ctx->Const.PackedDriverUniformStorage) {
+ NIR_PASS_V(nir, nir_lower_io, nir_var_uniform,
+ st_packed_uniforms_type_size,
+ (nir_lower_io_options)0);
+ NIR_PASS_V(nir, nir_lower_uniforms_to_ubo, 4);
+ } else {
+ NIR_PASS_V(nir, nir_lower_io, nir_var_uniform,
+ st_unpacked_uniforms_type_size,
+ (nir_lower_io_options)0);
+ }
+}
+
+/* Last third of preparing nir from glsl, which happens after shader
+ * variant lowering.
+ */
+void
+st_finalize_nir(struct st_context *st, struct gl_program *prog,
+ struct gl_shader_program *shader_program,
+ nir_shader *nir, bool finalize_by_driver)
+{
+ struct pipe_screen *screen = st->pipe->screen;
+
+ NIR_PASS_V(nir, nir_split_var_copies);
+ NIR_PASS_V(nir, nir_lower_var_copies);
+
+ st_nir_assign_varying_locations(st, nir);
+ st_nir_assign_uniform_locations(st->ctx, prog, nir);
+
+ /* Set num_uniforms in number of attribute slots (vec4s) */
+ nir->num_uniforms = DIV_ROUND_UP(prog->Parameters->NumParameterValues, 4);
+
+ st_nir_lower_uniforms(st, nir);
+ st_nir_lower_samplers(screen, nir, shader_program, prog);
+ if (!screen->get_param(screen, PIPE_CAP_NIR_IMAGES_AS_DEREF))
+ NIR_PASS_V(nir, gl_nir_lower_images, false);
+
+ if (finalize_by_driver && screen->finalize_nir)
+ screen->finalize_nir(screen, nir, false);