aco: add framework for testing isel and integration tests
[mesa.git] / src / amd / compiler / tests / helpers.cpp
1 /*
2 * Copyright © 2020 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24 #include "helpers.h"
25 #include "vulkan/vk_format.h"
26 #include "llvm/ac_llvm_util.h"
27 #include <stdio.h>
28 #include <sstream>
29 #include <llvm-c/Target.h>
30 #include <mutex>
31
32 using namespace aco;
33
34 extern "C" {
35 PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
36 VkInstance instance,
37 const char* pName);
38 }
39
40 ac_shader_config config;
41 radv_shader_info info;
42 std::unique_ptr<Program> program;
43 Builder bld(NULL);
44 Temp inputs[16];
45 Temp exec_input;
46 const char *subvariant = "";
47
48 static VkInstance instance_cache[CHIP_LAST] = {VK_NULL_HANDLE};
49 static VkDevice device_cache[CHIP_LAST] = {VK_NULL_HANDLE};
50 static std::mutex create_device_mutex;
51
52 #define FUNCTION_LIST\
53 ITEM(CreateInstance)\
54 ITEM(DestroyInstance)\
55 ITEM(EnumeratePhysicalDevices)\
56 ITEM(GetPhysicalDeviceProperties2)\
57 ITEM(CreateDevice)\
58 ITEM(DestroyDevice)\
59 ITEM(CreateShaderModule)\
60 ITEM(DestroyShaderModule)\
61 ITEM(CreateGraphicsPipelines)\
62 ITEM(CreateComputePipelines)\
63 ITEM(DestroyPipeline)\
64 ITEM(CreateDescriptorSetLayout)\
65 ITEM(DestroyDescriptorSetLayout)\
66 ITEM(CreatePipelineLayout)\
67 ITEM(DestroyPipelineLayout)\
68 ITEM(CreateRenderPass)\
69 ITEM(DestroyRenderPass)\
70 ITEM(GetPipelineExecutablePropertiesKHR)\
71 ITEM(GetPipelineExecutableInternalRepresentationsKHR)
72
73 #define ITEM(n) PFN_vk##n n;
74 FUNCTION_LIST
75 #undef ITEM
76
77 void create_program(enum chip_class chip_class, Stage stage, unsigned wave_size, enum radeon_family family)
78 {
79 memset(&config, 0, sizeof(config));
80 info.wave_size = wave_size;
81
82 program.reset(new Program);
83 aco::init_program(program.get(), stage, &info, chip_class, family, &config);
84
85 Block *block = program->create_and_insert_block();
86 block->kind = block_kind_top_level;
87
88 bld = Builder(program.get(), &program->blocks[0]);
89
90 config.float_mode = program->blocks[0].fp_mode.val;
91 }
92
93 bool setup_cs(const char *input_spec, enum chip_class chip_class,
94 enum radeon_family family, unsigned wave_size)
95 {
96 const char *old_subvariant = subvariant;
97 subvariant = "";
98 if (!set_variant(chip_class, old_subvariant))
99 return false;
100
101 memset(&info, 0, sizeof(info));
102 info.cs.block_size[0] = 1;
103 info.cs.block_size[1] = 1;
104 info.cs.block_size[2] = 1;
105
106 create_program(chip_class, compute_cs, wave_size, family);
107
108 if (input_spec) {
109 unsigned num_inputs = DIV_ROUND_UP(strlen(input_spec), 3u);
110 aco_ptr<Instruction> startpgm{create_instruction<Pseudo_instruction>(aco_opcode::p_startpgm, Format::PSEUDO, 0, num_inputs + 1)};
111 for (unsigned i = 0; i < num_inputs; i++) {
112 RegClass cls(input_spec[i * 3] == 'v' ? RegType::vgpr : RegType::sgpr, input_spec[i * 3 + 1] - '0');
113 inputs[i] = bld.tmp(cls);
114 startpgm->definitions[i] = Definition(inputs[i]);
115 }
116 exec_input = bld.tmp(program->lane_mask);
117 startpgm->definitions[num_inputs] = bld.exec(Definition(exec_input));
118 bld.insert(std::move(startpgm));
119 }
120
121 return true;
122 }
123
124 void finish_program(Program *program)
125 {
126 for (Block& BB : program->blocks) {
127 for (unsigned idx : BB.linear_preds)
128 program->blocks[idx].linear_succs.emplace_back(BB.index);
129 for (unsigned idx : BB.logical_preds)
130 program->blocks[idx].logical_succs.emplace_back(BB.index);
131 }
132
133 for (Block& block : program->blocks) {
134 if (block.linear_succs.size() == 0) {
135 block.kind |= block_kind_uniform;
136 Builder bld(program, &block);
137 if (program->wb_smem_l1_on_end)
138 bld.smem(aco_opcode::s_dcache_wb, false);
139 bld.sopp(aco_opcode::s_endpgm);
140 }
141 }
142 }
143
144 void finish_validator_test()
145 {
146 finish_program(program.get());
147 aco_print_program(program.get(), output);
148 fprintf(output, "Validation results:\n");
149 if (aco::validate(program.get(), output))
150 fprintf(output, "Validation passed\n");
151 else
152 fprintf(output, "Validation failed\n");
153 }
154
155 void finish_opt_test()
156 {
157 finish_program(program.get());
158 if (!aco::validate(program.get(), output)) {
159 fail_test("Validation before optimization failed");
160 return;
161 }
162 aco::optimize(program.get());
163 if (!aco::validate(program.get(), output)) {
164 fail_test("Validation after optimization failed");
165 return;
166 }
167 aco_print_program(program.get(), output);
168 }
169
170 void finish_to_hw_instr_test()
171 {
172 finish_program(program.get());
173 aco::lower_to_hw_instr(program.get());
174 aco_print_program(program.get(), output);
175 }
176
177 void finish_assembler_test()
178 {
179 finish_program(program.get());
180 std::vector<uint32_t> binary;
181 unsigned exec_size = emit_program(program.get(), binary);
182
183 /* we could use CLRX for disassembly but that would require it to be
184 * installed */
185 if (program->chip_class == GFX10_3 && LLVM_VERSION_MAJOR < 9) {
186 skip_test("LLVM 11 needed for GFX10_3 disassembly");
187 } else if (program->chip_class == GFX10 && LLVM_VERSION_MAJOR < 9) {
188 skip_test("LLVM 9 needed for GFX10 disassembly");
189 } else if (program->chip_class >= GFX8) {
190 std::ostringstream ss;
191 print_asm(program.get(), binary, exec_size / 4u, ss);
192
193 fputs(ss.str().c_str(), output);
194 } else {
195 //TODO: maybe we should use CLRX and skip this test if it's not available?
196 for (uint32_t dword : binary)
197 fprintf(output, "%.8x\n", dword);
198 }
199 }
200
201 void writeout(unsigned i, Temp tmp)
202 {
203 if (tmp.id())
204 bld.pseudo(aco_opcode::p_unit_test, Operand(i), tmp);
205 else
206 bld.pseudo(aco_opcode::p_unit_test, Operand(i));
207 }
208
209 VkDevice get_vk_device(enum chip_class chip_class)
210 {
211 enum radeon_family family;
212 switch (chip_class) {
213 case GFX6:
214 family = CHIP_TAHITI;
215 break;
216 case GFX7:
217 family = CHIP_BONAIRE;
218 break;
219 case GFX8:
220 family = CHIP_POLARIS10;
221 break;
222 case GFX9:
223 family = CHIP_VEGA10;
224 break;
225 case GFX10:
226 family = CHIP_NAVI10;
227 break;
228 default:
229 family = CHIP_UNKNOWN;
230 break;
231 }
232 return get_vk_device(family);
233 }
234
235 VkDevice get_vk_device(enum radeon_family family)
236 {
237 assert(family != CHIP_UNKNOWN);
238
239 std::lock_guard<std::mutex> guard(create_device_mutex);
240
241 if (device_cache[family])
242 return device_cache[family];
243
244 setenv("RADV_FORCE_FAMILY", ac_get_llvm_processor_name(family), 1);
245
246 VkApplicationInfo app_info = {};
247 app_info.pApplicationName = "aco_tests";
248 app_info.apiVersion = VK_API_VERSION_1_2;
249 VkInstanceCreateInfo instance_create_info = {};
250 instance_create_info.pApplicationInfo = &app_info;
251 instance_create_info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
252 VkResult result = ((PFN_vkCreateInstance)vk_icdGetInstanceProcAddr(NULL, "vkCreateInstance"))(&instance_create_info, NULL, &instance_cache[family]);
253 assert(result == VK_SUCCESS);
254
255 #define ITEM(n) n = (PFN_vk##n)vk_icdGetInstanceProcAddr(instance_cache[family], "vk" #n);
256 FUNCTION_LIST
257 #undef ITEM
258
259 uint32_t device_count = 1;
260 VkPhysicalDevice device = VK_NULL_HANDLE;
261 result = EnumeratePhysicalDevices(instance_cache[family], &device_count, &device);
262 assert(result == VK_SUCCESS);
263 assert(device != VK_NULL_HANDLE);
264
265 VkDeviceCreateInfo device_create_info = {};
266 device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
267 static const char *extensions[] = {"VK_KHR_pipeline_executable_properties"};
268 device_create_info.enabledExtensionCount = sizeof(extensions) / sizeof(extensions[0]);
269 device_create_info.ppEnabledExtensionNames = extensions;
270 result = CreateDevice(device, &device_create_info, NULL, &device_cache[family]);
271
272 return device_cache[family];
273 }
274
275 static struct DestroyDevices {
276 ~DestroyDevices() {
277 for (unsigned i = 0; i < CHIP_LAST; i++) {
278 if (!device_cache[i])
279 continue;
280 DestroyDevice(device_cache[i], NULL);
281 DestroyInstance(instance_cache[i], NULL);
282 }
283 }
284 } destroy_devices;
285
286 void print_pipeline_ir(VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits stages,
287 const char *name, bool remove_encoding)
288 {
289 uint32_t executable_count = 16;
290 VkPipelineExecutablePropertiesKHR executables[16];
291 VkPipelineInfoKHR pipeline_info;
292 pipeline_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR;
293 pipeline_info.pNext = NULL;
294 pipeline_info.pipeline = pipeline;
295 VkResult result = GetPipelineExecutablePropertiesKHR(device, &pipeline_info, &executable_count, executables);
296 assert(result == VK_SUCCESS);
297
298 uint32_t executable = 0;
299 for (; executable < executable_count; executable++) {
300 if (executables[executable].stages == stages)
301 break;
302 }
303 assert(executable != executable_count);
304
305 VkPipelineExecutableInfoKHR exec_info;
306 exec_info.sType = VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR;
307 exec_info.pNext = NULL;
308 exec_info.pipeline = pipeline;
309 exec_info.executableIndex = executable;
310
311 uint32_t ir_count = 16;
312 VkPipelineExecutableInternalRepresentationKHR ir[16];
313 memset(ir, 0, sizeof(ir));
314 result = GetPipelineExecutableInternalRepresentationsKHR(device, &exec_info, &ir_count, ir);
315 assert(result == VK_SUCCESS);
316
317 for (unsigned i = 0; i < ir_count; i++) {
318 if (strcmp(ir[i].name, name))
319 continue;
320
321 char *data = (char*)malloc(ir[i].dataSize);
322 ir[i].pData = data;
323 result = GetPipelineExecutableInternalRepresentationsKHR(device, &exec_info, &ir_count, ir);
324 assert(result == VK_SUCCESS);
325
326 if (remove_encoding) {
327 for (char *c = data; *c; c++) {
328 if (*c == ';') {
329 for (; *c && *c != '\n'; c++)
330 *c = ' ';
331 }
332 }
333 }
334
335 fprintf(output, "%s", data);
336 free(data);
337 return;
338 }
339 }
340
341 VkShaderModule __qoCreateShaderModule(VkDevice dev, const QoShaderModuleCreateInfo *info)
342 {
343 VkShaderModuleCreateInfo module_info;
344 module_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
345 module_info.pNext = NULL;
346 module_info.flags = 0;
347 module_info.codeSize = info->spirvSize;
348 module_info.pCode = (const uint32_t*)info->pSpirv;
349
350 VkShaderModule module;
351 VkResult result = CreateShaderModule(dev, &module_info, NULL, &module);
352 assert(result == VK_SUCCESS);
353
354 return module;
355 }
356
357 PipelineBuilder::PipelineBuilder(VkDevice dev) {
358 memset(this, 0, sizeof(*this));
359 topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
360 device = dev;
361 }
362
363 PipelineBuilder::~PipelineBuilder()
364 {
365 DestroyPipeline(device, pipeline, NULL);
366
367 for (unsigned i = 0; i < (is_compute() ? 1 : gfx_pipeline_info.stageCount); i++) {
368 VkPipelineShaderStageCreateInfo *stage_info = &stages[i];
369 if (owned_stages & stage_info->stage)
370 DestroyShaderModule(device, stage_info->module, NULL);
371 }
372
373 DestroyPipelineLayout(device, pipeline_layout, NULL);
374
375 for (unsigned i = 0; i < util_bitcount64(desc_layouts_used); i++)
376 DestroyDescriptorSetLayout(device, desc_layouts[i], NULL);
377
378 DestroyRenderPass(device, render_pass, NULL);
379 }
380
381 void PipelineBuilder::add_desc_binding(VkShaderStageFlags stage_flags, uint32_t layout,
382 uint32_t binding, VkDescriptorType type, uint32_t count)
383 {
384 desc_layouts_used |= 1ull << layout;
385 desc_bindings[layout][num_desc_bindings[layout]++] = {binding, type, count, stage_flags, NULL};
386 }
387
388 void PipelineBuilder::add_vertex_binding(uint32_t binding, uint32_t stride, VkVertexInputRate rate)
389 {
390 vs_bindings[vs_input.vertexBindingDescriptionCount++] = {binding, stride, rate};
391 }
392
393 void PipelineBuilder::add_vertex_attribute(uint32_t location, uint32_t binding, VkFormat format, uint32_t offset)
394 {
395 vs_attributes[vs_input.vertexAttributeDescriptionCount++] = {location, binding, format, offset};
396 }
397
398 void PipelineBuilder::add_resource_decls(QoShaderModuleCreateInfo *module)
399 {
400 for (unsigned i = 0; i < module->declarationCount; i++) {
401 const QoShaderDecl *decl = &module->pDeclarations[i];
402 switch (decl->decl_type) {
403 case QoShaderDeclType_ubo:
404 add_desc_binding(module->stage, decl->set, decl->binding, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
405 break;
406 case QoShaderDeclType_ssbo:
407 add_desc_binding(module->stage, decl->set, decl->binding, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
408 break;
409 case QoShaderDeclType_img_buf:
410 add_desc_binding(module->stage, decl->set, decl->binding, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
411 break;
412 case QoShaderDeclType_img:
413 add_desc_binding(module->stage, decl->set, decl->binding, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
414 break;
415 case QoShaderDeclType_tex_buf:
416 add_desc_binding(module->stage, decl->set, decl->binding, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER);
417 break;
418 case QoShaderDeclType_combined:
419 add_desc_binding(module->stage, decl->set, decl->binding, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
420 break;
421 case QoShaderDeclType_tex:
422 add_desc_binding(module->stage, decl->set, decl->binding, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE);
423 break;
424 case QoShaderDeclType_samp:
425 add_desc_binding(module->stage, decl->set, decl->binding, VK_DESCRIPTOR_TYPE_SAMPLER);
426 break;
427 default:
428 break;
429 }
430 }
431 }
432
433 void PipelineBuilder::add_io_decls(QoShaderModuleCreateInfo *module)
434 {
435 unsigned next_vtx_offset = 0;
436 for (unsigned i = 0; i < module->declarationCount; i++) {
437 const QoShaderDecl *decl = &module->pDeclarations[i];
438 switch (decl->decl_type) {
439 case QoShaderDeclType_in:
440 if (module->stage == VK_SHADER_STAGE_VERTEX_BIT) {
441 if (!strcmp(decl->type, "float") || decl->type[0] == 'v')
442 add_vertex_attribute(decl->location, 0, VK_FORMAT_R32G32B32A32_SFLOAT, next_vtx_offset);
443 else if (decl->type[0] == 'u')
444 add_vertex_attribute(decl->location, 0, VK_FORMAT_R32G32B32A32_UINT, next_vtx_offset);
445 else if (decl->type[0] == 'i')
446 add_vertex_attribute(decl->location, 0, VK_FORMAT_R32G32B32A32_SINT, next_vtx_offset);
447 next_vtx_offset += 16;
448 }
449 break;
450 case QoShaderDeclType_out:
451 if (module->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
452 if (!strcmp(decl->type, "float") || decl->type[0] == 'v')
453 color_outputs[decl->location] = VK_FORMAT_R32G32B32A32_SFLOAT;
454 else if (decl->type[0] == 'u')
455 color_outputs[decl->location] = VK_FORMAT_R32G32B32A32_UINT;
456 else if (decl->type[0] == 'i')
457 color_outputs[decl->location] = VK_FORMAT_R32G32B32A32_SINT;
458 }
459 break;
460 default:
461 break;
462 }
463 }
464 if (next_vtx_offset)
465 add_vertex_binding(0, next_vtx_offset);
466 }
467
468 void PipelineBuilder::add_stage(VkShaderStageFlagBits stage, VkShaderModule module, const char *name)
469 {
470 VkPipelineShaderStageCreateInfo *stage_info;
471 if (stage == VK_SHADER_STAGE_COMPUTE_BIT)
472 stage_info = &stages[0];
473 else
474 stage_info = &stages[gfx_pipeline_info.stageCount++];
475 stage_info->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
476 stage_info->pNext = NULL;
477 stage_info->flags = 0;
478 stage_info->stage = stage;
479 stage_info->module = module;
480 stage_info->pName = name;
481 stage_info->pSpecializationInfo = NULL;
482 owned_stages |= stage;
483 }
484
485 void PipelineBuilder::add_vsfs(VkShaderModule vs, VkShaderModule fs)
486 {
487 add_stage(VK_SHADER_STAGE_VERTEX_BIT, vs);
488 add_stage(VK_SHADER_STAGE_FRAGMENT_BIT, fs);
489 }
490
491 void PipelineBuilder::add_vsfs(QoShaderModuleCreateInfo vs, QoShaderModuleCreateInfo fs)
492 {
493 add_vsfs(__qoCreateShaderModule(device, &vs), __qoCreateShaderModule(device, &fs));
494 add_resource_decls(&vs);
495 add_io_decls(&vs);
496 add_resource_decls(&fs);
497 add_io_decls(&fs);
498 }
499
500 void PipelineBuilder::add_cs(VkShaderModule cs)
501 {
502 add_stage(VK_SHADER_STAGE_COMPUTE_BIT, cs);
503 }
504
505 void PipelineBuilder::add_cs(QoShaderModuleCreateInfo cs)
506 {
507 add_cs(__qoCreateShaderModule(device, &cs));
508 add_resource_decls(&cs);
509 }
510
511 bool PipelineBuilder::is_compute() {
512 return gfx_pipeline_info.stageCount == 0;
513 }
514
515 void PipelineBuilder::create_compute_pipeline() {
516 VkComputePipelineCreateInfo create_info;
517 create_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
518 create_info.pNext = NULL;
519 create_info.flags = VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR;
520 create_info.stage = stages[0];
521 create_info.layout = pipeline_layout;
522 create_info.basePipelineHandle = VK_NULL_HANDLE;
523 create_info.basePipelineIndex = 0;
524
525 VkResult result = CreateComputePipelines(device, VK_NULL_HANDLE, 1, &create_info, NULL, &pipeline);
526 assert(result == VK_SUCCESS);
527 }
528
529 void PipelineBuilder::create_graphics_pipeline() {
530 /* create the create infos */
531 if (!samples)
532 samples = VK_SAMPLE_COUNT_1_BIT;
533
534 unsigned num_color_attachments = 0;
535 VkPipelineColorBlendAttachmentState blend_attachment_states[16];
536 VkAttachmentReference color_attachments[16];
537 VkAttachmentDescription attachment_descs[17];
538 for (unsigned i = 0; i < 16; i++) {
539 if (color_outputs[i] == VK_FORMAT_UNDEFINED)
540 continue;
541
542 VkAttachmentDescription *desc = &attachment_descs[num_color_attachments];
543 desc->flags = 0;
544 desc->format = color_outputs[i];
545 desc->samples = samples;
546 desc->loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
547 desc->storeOp = VK_ATTACHMENT_STORE_OP_STORE;
548 desc->stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
549 desc->stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
550 desc->initialLayout = VK_IMAGE_LAYOUT_GENERAL;
551 desc->finalLayout = VK_IMAGE_LAYOUT_GENERAL;
552
553 VkAttachmentReference *ref = &color_attachments[num_color_attachments];
554 ref->attachment = num_color_attachments;
555 ref->layout = VK_IMAGE_LAYOUT_GENERAL;
556
557 VkPipelineColorBlendAttachmentState *blend = &blend_attachment_states[num_color_attachments];
558 blend->blendEnable = false;
559 blend->colorWriteMask = VK_COLOR_COMPONENT_R_BIT |
560 VK_COLOR_COMPONENT_G_BIT |
561 VK_COLOR_COMPONENT_B_BIT |
562 VK_COLOR_COMPONENT_A_BIT;
563
564 num_color_attachments++;
565 }
566
567 unsigned num_attachments = num_color_attachments;
568 VkAttachmentReference ds_attachment;
569 if (ds_output != VK_FORMAT_UNDEFINED) {
570 VkAttachmentDescription *desc = &attachment_descs[num_attachments];
571 desc->flags = 0;
572 desc->format = ds_output;
573 desc->samples = samples;
574 desc->loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
575 desc->storeOp = VK_ATTACHMENT_STORE_OP_STORE;
576 desc->stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
577 desc->stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
578 desc->initialLayout = VK_IMAGE_LAYOUT_GENERAL;
579 desc->finalLayout = VK_IMAGE_LAYOUT_GENERAL;
580
581 ds_attachment.attachment = num_color_attachments;
582 ds_attachment.layout = VK_IMAGE_LAYOUT_GENERAL;
583
584 num_attachments++;
585 }
586
587 vs_input.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
588 vs_input.pNext = NULL;
589 vs_input.flags = 0;
590 vs_input.pVertexBindingDescriptions = vs_bindings;
591 vs_input.pVertexAttributeDescriptions = vs_attributes;
592
593 VkPipelineInputAssemblyStateCreateInfo assembly_state;
594 assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
595 assembly_state.pNext = NULL;
596 assembly_state.flags = 0;
597 assembly_state.topology = topology;
598 assembly_state.primitiveRestartEnable = false;
599
600 VkPipelineTessellationStateCreateInfo tess_state;
601 tess_state.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO;
602 tess_state.pNext = NULL;
603 tess_state.flags = 0;
604 tess_state.patchControlPoints = patch_size;
605
606 VkPipelineViewportStateCreateInfo viewport_state;
607 viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
608 viewport_state.pNext = NULL;
609 viewport_state.flags = 0;
610 viewport_state.viewportCount = 1;
611 viewport_state.pViewports = NULL;
612 viewport_state.scissorCount = 1;
613 viewport_state.pScissors = NULL;
614
615 VkPipelineRasterizationStateCreateInfo rasterization_state;
616 rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
617 rasterization_state.pNext = NULL;
618 rasterization_state.flags = 0;
619 rasterization_state.depthClampEnable = false;
620 rasterization_state.rasterizerDiscardEnable = false;
621 rasterization_state.polygonMode = VK_POLYGON_MODE_FILL;
622 rasterization_state.cullMode = VK_CULL_MODE_NONE;
623 rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
624 rasterization_state.depthBiasEnable = false;
625 rasterization_state.lineWidth = 1.0;
626
627 VkPipelineMultisampleStateCreateInfo ms_state;
628 ms_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
629 ms_state.pNext = NULL;
630 ms_state.flags = 0;
631 ms_state.rasterizationSamples = samples;
632 ms_state.sampleShadingEnable = sample_shading_enable;
633 ms_state.minSampleShading = min_sample_shading;
634 VkSampleMask sample_mask = 0xffffffff;
635 ms_state.pSampleMask = &sample_mask;
636 ms_state.alphaToCoverageEnable = false;
637 ms_state.alphaToOneEnable = false;
638
639 VkPipelineDepthStencilStateCreateInfo ds_state;
640 ds_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
641 ds_state.pNext = NULL;
642 ds_state.flags = 0;
643 ds_state.depthTestEnable = ds_output != VK_FORMAT_UNDEFINED;
644 ds_state.depthWriteEnable = true;
645 ds_state.depthCompareOp = VK_COMPARE_OP_ALWAYS;
646 ds_state.depthBoundsTestEnable = false;
647 ds_state.stencilTestEnable = true;
648 ds_state.front.failOp = VK_STENCIL_OP_KEEP;
649 ds_state.front.passOp = VK_STENCIL_OP_REPLACE;
650 ds_state.front.depthFailOp = VK_STENCIL_OP_REPLACE;
651 ds_state.front.compareOp = VK_COMPARE_OP_ALWAYS;
652 ds_state.front.compareMask = 0xffffffff,
653 ds_state.front.reference = 0;
654 ds_state.back = ds_state.front;
655
656 VkPipelineColorBlendStateCreateInfo color_blend_state;
657 color_blend_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
658 color_blend_state.pNext = NULL;
659 color_blend_state.flags = 0;
660 color_blend_state.logicOpEnable = false;
661 color_blend_state.attachmentCount = num_color_attachments;
662 color_blend_state.pAttachments = blend_attachment_states;
663
664 VkDynamicState dynamic_states[9] = {
665 VK_DYNAMIC_STATE_VIEWPORT,
666 VK_DYNAMIC_STATE_SCISSOR,
667 VK_DYNAMIC_STATE_LINE_WIDTH,
668 VK_DYNAMIC_STATE_DEPTH_BIAS,
669 VK_DYNAMIC_STATE_BLEND_CONSTANTS,
670 VK_DYNAMIC_STATE_DEPTH_BOUNDS,
671 VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
672 VK_DYNAMIC_STATE_STENCIL_WRITE_MASK,
673 VK_DYNAMIC_STATE_STENCIL_REFERENCE
674 };
675
676 VkPipelineDynamicStateCreateInfo dynamic_state;
677 dynamic_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
678 dynamic_state.pNext = NULL;
679 dynamic_state.flags = 0;
680 dynamic_state.dynamicStateCount = sizeof(dynamic_states) / sizeof(VkDynamicState);
681 dynamic_state.pDynamicStates = dynamic_states;
682
683 gfx_pipeline_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
684 gfx_pipeline_info.pNext = NULL;
685 gfx_pipeline_info.flags = VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR;
686 gfx_pipeline_info.pVertexInputState = &vs_input;
687 gfx_pipeline_info.pInputAssemblyState = &assembly_state;
688 gfx_pipeline_info.pTessellationState = &tess_state;
689 gfx_pipeline_info.pViewportState = &viewport_state;
690 gfx_pipeline_info.pRasterizationState = &rasterization_state;
691 gfx_pipeline_info.pMultisampleState = &ms_state;
692 gfx_pipeline_info.pDepthStencilState = &ds_state;
693 gfx_pipeline_info.pColorBlendState = &color_blend_state;
694 gfx_pipeline_info.pDynamicState = &dynamic_state;
695 gfx_pipeline_info.subpass = 0;
696
697 /* create the objects used to create the pipeline */
698 VkSubpassDescription subpass;
699 subpass.flags = 0;
700 subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
701 subpass.inputAttachmentCount = 0;
702 subpass.pInputAttachments = NULL;
703 subpass.colorAttachmentCount = num_color_attachments;
704 subpass.pColorAttachments = color_attachments;
705 subpass.pResolveAttachments = NULL;
706 subpass.pDepthStencilAttachment = ds_output == VK_FORMAT_UNDEFINED ? NULL : &ds_attachment;
707 subpass.preserveAttachmentCount = 0;
708 subpass.pPreserveAttachments = NULL;
709
710 VkRenderPassCreateInfo renderpass_info;
711 renderpass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
712 renderpass_info.pNext = NULL;
713 renderpass_info.flags = 0;
714 renderpass_info.attachmentCount = num_attachments;
715 renderpass_info.pAttachments = attachment_descs;
716 renderpass_info.subpassCount = 1;
717 renderpass_info.pSubpasses = &subpass;
718 renderpass_info.dependencyCount = 0;
719 renderpass_info.pDependencies = NULL;
720
721 VkResult result = CreateRenderPass(device, &renderpass_info, NULL, &render_pass);
722 assert(result == VK_SUCCESS);
723
724 gfx_pipeline_info.layout = pipeline_layout;
725 gfx_pipeline_info.renderPass = render_pass;
726
727 /* create the pipeline */
728 gfx_pipeline_info.pStages = stages;
729
730 result = CreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &gfx_pipeline_info, NULL, &pipeline);
731 assert(result == VK_SUCCESS);
732 }
733
734 void PipelineBuilder::create_pipeline() {
735 unsigned num_desc_layouts = 0;
736 for (unsigned i = 0; i < 64; i++) {
737 if (!(desc_layouts_used & (1ull << i)))
738 continue;
739
740 VkDescriptorSetLayoutCreateInfo desc_layout_info;
741 desc_layout_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
742 desc_layout_info.pNext = NULL;
743 desc_layout_info.flags = 0;
744 desc_layout_info.bindingCount = num_desc_bindings[i];
745 desc_layout_info.pBindings = desc_bindings[i];
746
747 VkResult result = CreateDescriptorSetLayout(device, &desc_layout_info, NULL, &desc_layouts[num_desc_layouts]);
748 assert(result == VK_SUCCESS);
749 num_desc_layouts++;
750 }
751
752 VkPipelineLayoutCreateInfo pipeline_layout_info;
753 pipeline_layout_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
754 pipeline_layout_info.pNext = NULL;
755 pipeline_layout_info.flags = 0;
756 pipeline_layout_info.pushConstantRangeCount = 1;
757 pipeline_layout_info.pPushConstantRanges = &push_constant_range;
758 pipeline_layout_info.setLayoutCount = num_desc_layouts;
759 pipeline_layout_info.pSetLayouts = desc_layouts;
760
761 VkResult result = CreatePipelineLayout(device, &pipeline_layout_info, NULL, &pipeline_layout);
762 assert(result == VK_SUCCESS);
763
764 if (is_compute())
765 create_compute_pipeline();
766 else
767 create_graphics_pipeline();
768 }
769
770 void PipelineBuilder::print_ir(VkShaderStageFlagBits stages, const char *name, bool remove_encoding)
771 {
772 if (!pipeline)
773 create_pipeline();
774 print_pipeline_ir(device, pipeline, stages, name, remove_encoding);
775 }