2 * Copyright 2018 Collabora Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 #include "zink_program.h"
26 #include "zink_compiler.h"
27 #include "zink_context.h"
28 #include "zink_render_pass.h"
29 #include "zink_screen.h"
31 #include "util/hash_table.h"
33 #include "util/u_debug.h"
34 #include "util/u_memory.h"
35 #include "tgsi/tgsi_from_mesa.h"
37 struct pipeline_cache_entry
{
38 struct zink_gfx_pipeline_state state
;
43 debug_describe_zink_gfx_program(char *buf
, const struct zink_gfx_program
*ptr
)
45 sprintf(buf
, "zink_gfx_program");
49 debug_describe_zink_shader_module(char *buf
, const struct zink_shader_module
*ptr
)
51 sprintf(buf
, "zink_shader_module");
54 static VkDescriptorSetLayout
55 create_desc_set_layout(VkDevice dev
,
56 struct zink_shader
*stages
[ZINK_SHADER_COUNT
],
57 unsigned *num_descriptors
)
59 VkDescriptorSetLayoutBinding bindings
[PIPE_SHADER_TYPES
* PIPE_MAX_CONSTANT_BUFFERS
];
62 for (int i
= 0; i
< ZINK_SHADER_COUNT
; i
++) {
63 struct zink_shader
*shader
= stages
[i
];
67 VkShaderStageFlagBits stage_flags
= zink_shader_stage(i
);
68 for (int j
= 0; j
< shader
->num_bindings
; j
++) {
69 assert(num_bindings
< ARRAY_SIZE(bindings
));
70 bindings
[num_bindings
].binding
= shader
->bindings
[j
].binding
;
71 bindings
[num_bindings
].descriptorType
= shader
->bindings
[j
].type
;
72 bindings
[num_bindings
].descriptorCount
= 1;
73 bindings
[num_bindings
].stageFlags
= stage_flags
;
74 bindings
[num_bindings
].pImmutableSamplers
= NULL
;
79 VkDescriptorSetLayoutCreateInfo dcslci
= {};
80 dcslci
.sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO
;
83 dcslci
.bindingCount
= num_bindings
;
84 dcslci
.pBindings
= bindings
;
86 VkDescriptorSetLayout dsl
;
87 if (vkCreateDescriptorSetLayout(dev
, &dcslci
, 0, &dsl
) != VK_SUCCESS
) {
88 debug_printf("vkCreateDescriptorSetLayout failed\n");
89 return VK_NULL_HANDLE
;
92 *num_descriptors
= num_bindings
;
96 static VkPipelineLayout
97 create_pipeline_layout(VkDevice dev
, VkDescriptorSetLayout dsl
)
99 assert(dsl
!= VK_NULL_HANDLE
);
101 VkPipelineLayoutCreateInfo plci
= {};
102 plci
.sType
= VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
;
104 plci
.pSetLayouts
= &dsl
;
105 plci
.setLayoutCount
= 1;
107 VkPipelineLayout layout
;
108 if (vkCreatePipelineLayout(dev
, &plci
, NULL
, &layout
) != VK_SUCCESS
) {
109 debug_printf("vkCreatePipelineLayout failed!\n");
110 return VK_NULL_HANDLE
;
117 zink_destroy_shader_module(struct zink_screen
*screen
, struct zink_shader_module
*zm
)
119 vkDestroyShaderModule(screen
->dev
, zm
->shader
, NULL
);
124 zink_shader_module_reference(struct zink_screen
*screen
,
125 struct zink_shader_module
**dst
,
126 struct zink_shader_module
*src
)
128 struct zink_shader_module
*old_dst
= dst
? *dst
: NULL
;
130 if (pipe_reference_described(old_dst
? &old_dst
->reference
: NULL
, &src
->reference
,
131 (debug_reference_descriptor
)debug_describe_zink_shader_module
))
132 zink_destroy_shader_module(screen
, old_dst
);
137 update_shader_modules(struct zink_context
*ctx
, struct zink_shader
*stages
[ZINK_SHADER_COUNT
], struct zink_gfx_program
*prog
)
139 struct zink_shader
*dirty
[ZINK_SHADER_COUNT
] = {NULL
};
141 /* we need to map pipe_shader_type -> gl_shader_stage so we can ensure that we're compiling
142 * the shaders in pipeline order and have builtin input/output locations match up after being compacted
144 unsigned dirty_shader_stages
= ctx
->dirty_shader_stages
;
145 while (dirty_shader_stages
) {
146 unsigned type
= u_bit_scan(&dirty_shader_stages
);
147 dirty
[tgsi_processor_to_shader_stage(type
)] = stages
[type
];
149 for (int i
= 0; i
< ZINK_SHADER_COUNT
; ++i
) {
150 enum pipe_shader_type type
= pipe_shader_type_from_mesa(i
);
152 prog
->stages
[type
] = CALLOC_STRUCT(zink_shader_module
);
153 assert(prog
->stages
[type
]);
154 pipe_reference_init(&prog
->stages
[type
]->reference
, 1);
155 prog
->stages
[type
]->shader
= zink_shader_compile(zink_screen(ctx
->base
.screen
), dirty
[i
]);
156 } else if (stages
[type
]) /* reuse existing shader module */
157 zink_shader_module_reference(zink_screen(ctx
->base
.screen
), &prog
->stages
[type
], ctx
->curr_program
->stages
[type
]);
158 prog
->shaders
[type
] = stages
[type
];
160 ctx
->dirty_shader_stages
= 0;
164 hash_gfx_pipeline_state(const void *key
)
166 return _mesa_hash_data(key
, sizeof(struct zink_gfx_pipeline_state
));
170 equals_gfx_pipeline_state(const void *a
, const void *b
)
172 return memcmp(a
, b
, sizeof(struct zink_gfx_pipeline_state
)) == 0;
175 struct zink_gfx_program
*
176 zink_create_gfx_program(struct zink_context
*ctx
,
177 struct zink_shader
*stages
[ZINK_SHADER_COUNT
])
179 struct zink_screen
*screen
= zink_screen(ctx
->base
.screen
);
180 struct zink_gfx_program
*prog
= CALLOC_STRUCT(zink_gfx_program
);
184 pipe_reference_init(&prog
->reference
, 1);
186 update_shader_modules(ctx
, stages
, prog
);
188 for (int i
= 0; i
< ARRAY_SIZE(prog
->pipelines
); ++i
) {
189 prog
->pipelines
[i
] = _mesa_hash_table_create(NULL
,
190 hash_gfx_pipeline_state
,
191 equals_gfx_pipeline_state
);
192 if (!prog
->pipelines
[i
])
196 for (int i
= 0; i
< ZINK_SHADER_COUNT
; ++i
) {
197 if (prog
->stages
[i
]) {
198 _mesa_set_add(stages
[i
]->programs
, prog
);
199 zink_gfx_program_reference(screen
, NULL
, prog
);
203 prog
->dsl
= create_desc_set_layout(screen
->dev
, stages
,
204 &prog
->num_descriptors
);
208 prog
->layout
= create_pipeline_layout(screen
->dev
, prog
->dsl
);
212 prog
->render_passes
= _mesa_set_create(NULL
, _mesa_hash_pointer
,
213 _mesa_key_pointer_equal
);
214 if (!prog
->render_passes
)
221 zink_destroy_gfx_program(screen
, prog
);
226 gfx_program_remove_shader(struct zink_gfx_program
*prog
, struct zink_shader
*shader
)
228 enum pipe_shader_type p_stage
= pipe_shader_type_from_mesa(shader
->nir
->info
.stage
);
230 assert(prog
->shaders
[p_stage
] == shader
);
231 prog
->shaders
[p_stage
] = NULL
;
232 _mesa_set_remove_key(shader
->programs
, prog
);
236 zink_destroy_gfx_program(struct zink_screen
*screen
,
237 struct zink_gfx_program
*prog
)
240 vkDestroyPipelineLayout(screen
->dev
, prog
->layout
, NULL
);
243 vkDestroyDescriptorSetLayout(screen
->dev
, prog
->dsl
, NULL
);
245 for (int i
= 0; i
< ZINK_SHADER_COUNT
; ++i
) {
246 if (prog
->shaders
[i
])
247 gfx_program_remove_shader(prog
, prog
->shaders
[i
]);
249 zink_shader_module_reference(screen
, &prog
->stages
[i
], NULL
);
252 /* unref all used render-passes */
253 if (prog
->render_passes
) {
254 set_foreach(prog
->render_passes
, entry
) {
255 struct zink_render_pass
*pres
= (struct zink_render_pass
*)entry
->key
;
256 zink_render_pass_reference(screen
, &pres
, NULL
);
258 _mesa_set_destroy(prog
->render_passes
, NULL
);
261 for (int i
= 0; i
< ARRAY_SIZE(prog
->pipelines
); ++i
) {
262 hash_table_foreach(prog
->pipelines
[i
], entry
) {
263 struct pipeline_cache_entry
*pc_entry
= entry
->data
;
265 vkDestroyPipeline(screen
->dev
, pc_entry
->pipeline
, NULL
);
268 _mesa_hash_table_destroy(prog
->pipelines
[i
], NULL
);
274 static VkPrimitiveTopology
275 primitive_topology(enum pipe_prim_type mode
)
278 case PIPE_PRIM_POINTS
:
279 return VK_PRIMITIVE_TOPOLOGY_POINT_LIST
;
281 case PIPE_PRIM_LINES
:
282 return VK_PRIMITIVE_TOPOLOGY_LINE_LIST
;
284 case PIPE_PRIM_LINE_STRIP
:
285 return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
;
287 case PIPE_PRIM_TRIANGLES
:
288 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
;
290 case PIPE_PRIM_TRIANGLE_STRIP
:
291 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
;
293 case PIPE_PRIM_TRIANGLE_FAN
:
294 return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
;
297 unreachable("unexpected enum pipe_prim_type");
302 reference_render_pass(struct zink_screen
*screen
,
303 struct zink_gfx_program
*prog
,
304 struct zink_render_pass
*render_pass
)
306 struct set_entry
*entry
= _mesa_set_search(prog
->render_passes
,
309 entry
= _mesa_set_add(prog
->render_passes
, render_pass
);
310 pipe_reference(NULL
, &render_pass
->reference
);
315 zink_get_gfx_pipeline(struct zink_screen
*screen
,
316 struct zink_gfx_program
*prog
,
317 struct zink_gfx_pipeline_state
*state
,
318 enum pipe_prim_type mode
)
320 assert(mode
<= ARRAY_SIZE(prog
->pipelines
));
322 /* TODO: use pre-hashed versions to save some time (can re-hash only when
324 struct hash_entry
*entry
= _mesa_hash_table_search(prog
->pipelines
[mode
], state
);
326 VkPrimitiveTopology vkmode
= primitive_topology(mode
);
327 VkPipeline pipeline
= zink_create_gfx_pipeline(screen
, prog
,
329 if (pipeline
== VK_NULL_HANDLE
)
330 return VK_NULL_HANDLE
;
332 struct pipeline_cache_entry
*pc_entry
= CALLOC_STRUCT(pipeline_cache_entry
);
334 return VK_NULL_HANDLE
;
336 memcpy(&pc_entry
->state
, state
, sizeof(*state
));
337 pc_entry
->pipeline
= pipeline
;
339 entry
= _mesa_hash_table_insert(prog
->pipelines
[mode
], &pc_entry
->state
, pc_entry
);
342 reference_render_pass(screen
, prog
, state
->render_pass
);
345 return ((struct pipeline_cache_entry
*)(entry
->data
))->pipeline
;
350 zink_create_vs_state(struct pipe_context
*pctx
,
351 const struct pipe_shader_state
*shader
)
353 struct nir_shader
*nir
;
354 if (shader
->type
!= PIPE_SHADER_IR_NIR
)
355 nir
= zink_tgsi_to_nir(pctx
->screen
, shader
->tokens
);
357 nir
= (struct nir_shader
*)shader
->ir
.nir
;
359 return zink_shader_create(zink_screen(pctx
->screen
), nir
, &shader
->stream_output
);
363 bind_stage(struct zink_context
*ctx
, enum pipe_shader_type stage
,
364 struct zink_shader
*shader
)
366 assert(stage
< PIPE_SHADER_COMPUTE
);
367 ctx
->gfx_stages
[stage
] = shader
;
368 ctx
->dirty_shader_stages
|= 1 << stage
;
372 zink_bind_vs_state(struct pipe_context
*pctx
,
375 bind_stage(zink_context(pctx
), PIPE_SHADER_VERTEX
, cso
);
379 zink_delete_vs_state(struct pipe_context
*pctx
,
382 zink_shader_free(zink_context(pctx
), cso
);
386 zink_create_fs_state(struct pipe_context
*pctx
,
387 const struct pipe_shader_state
*shader
)
389 struct nir_shader
*nir
;
390 if (shader
->type
!= PIPE_SHADER_IR_NIR
)
391 nir
= zink_tgsi_to_nir(pctx
->screen
, shader
->tokens
);
393 nir
= (struct nir_shader
*)shader
->ir
.nir
;
395 return zink_shader_create(zink_screen(pctx
->screen
), nir
, NULL
);
399 zink_bind_fs_state(struct pipe_context
*pctx
,
402 bind_stage(zink_context(pctx
), PIPE_SHADER_FRAGMENT
, cso
);
406 zink_delete_fs_state(struct pipe_context
*pctx
,
409 zink_shader_free(zink_context(pctx
), cso
);
414 zink_program_init(struct zink_context
*ctx
)
416 ctx
->base
.create_vs_state
= zink_create_vs_state
;
417 ctx
->base
.bind_vs_state
= zink_bind_vs_state
;
418 ctx
->base
.delete_vs_state
= zink_delete_vs_state
;
420 ctx
->base
.create_fs_state
= zink_create_fs_state
;
421 ctx
->base
.bind_fs_state
= zink_bind_fs_state
;
422 ctx
->base
.delete_fs_state
= zink_delete_fs_state
;