anv: device: calculate compute thread numbers using subslices numbers
[mesa.git] / src / intel / vulkan / genX_pipeline.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "anv_private.h"
25
26 #include "genxml/gen_macros.h"
27 #include "genxml/genX_pack.h"
28
29 VkResult
30 genX(compute_pipeline_create)(
31 VkDevice _device,
32 struct anv_pipeline_cache * cache,
33 const VkComputePipelineCreateInfo* pCreateInfo,
34 const VkAllocationCallbacks* pAllocator,
35 VkPipeline* pPipeline)
36 {
37 ANV_FROM_HANDLE(anv_device, device, _device);
38 struct anv_physical_device *physical_device =
39 &device->instance->physicalDevice;
40 struct anv_pipeline *pipeline;
41 VkResult result;
42
43 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
44
45 pipeline = anv_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
46 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
47 if (pipeline == NULL)
48 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
49
50 pipeline->device = device;
51 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
52
53 pipeline->blend_state.map = NULL;
54
55 result = anv_reloc_list_init(&pipeline->batch_relocs,
56 pAllocator ? pAllocator : &device->alloc);
57 if (result != VK_SUCCESS) {
58 anv_free2(&device->alloc, pAllocator, pipeline);
59 return result;
60 }
61 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
62 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
63 pipeline->batch.relocs = &pipeline->batch_relocs;
64
65 /* When we free the pipeline, we detect stages based on the NULL status
66 * of various prog_data pointers. Make them NULL by default.
67 */
68 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
69
70 pipeline->vs_simd8 = NO_KERNEL;
71 pipeline->vs_vec4 = NO_KERNEL;
72 pipeline->gs_kernel = NO_KERNEL;
73
74 pipeline->active_stages = 0;
75
76 pipeline->needs_data_cache = false;
77
78 assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT);
79 ANV_FROM_HANDLE(anv_shader_module, module, pCreateInfo->stage.module);
80 result = anv_pipeline_compile_cs(pipeline, cache, pCreateInfo, module,
81 pCreateInfo->stage.pName,
82 pCreateInfo->stage.pSpecializationInfo);
83 if (result != VK_SUCCESS) {
84 anv_free2(&device->alloc, pAllocator, pipeline);
85 return result;
86 }
87
88 pipeline->use_repclear = false;
89
90 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
91
92 anv_pipeline_setup_l3_config(pipeline, cs_prog_data->base.total_shared > 0);
93
94 uint32_t group_size = cs_prog_data->local_size[0] *
95 cs_prog_data->local_size[1] * cs_prog_data->local_size[2];
96 uint32_t remainder = group_size & (cs_prog_data->simd_size - 1);
97
98 if (remainder > 0)
99 pipeline->cs_right_mask = ~0u >> (32 - remainder);
100 else
101 pipeline->cs_right_mask = ~0u >> (32 - cs_prog_data->simd_size);
102
103 const uint32_t vfe_curbe_allocation =
104 ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
105 cs_prog_data->push.cross_thread.regs, 2);
106
107 anv_batch_emit(&pipeline->batch, GENX(MEDIA_VFE_STATE), vfe) {
108 vfe.ScratchSpaceBasePointer = (struct anv_address) {
109 .bo = anv_scratch_pool_alloc(device, &device->scratch_pool,
110 MESA_SHADER_COMPUTE,
111 cs_prog_data->base.total_scratch),
112 .offset = 0,
113 };
114 vfe.PerThreadScratchSpace = ffs(cs_prog_data->base.total_scratch / 2048);
115 #if GEN_GEN > 7
116 vfe.StackSize = 0;
117 #else
118 vfe.GPGPUMode = true;
119 #endif
120 vfe.MaximumNumberofThreads = physical_device->max_cs_threads - 1;
121 vfe.NumberofURBEntries = GEN_GEN <= 7 ? 0 : 2;
122 vfe.ResetGatewayTimer = true;
123 #if GEN_GEN <= 8
124 vfe.BypassGatewayControl = true;
125 #endif
126 vfe.URBEntryAllocationSize = GEN_GEN <= 7 ? 0 : 2;
127 vfe.CURBEAllocationSize = vfe_curbe_allocation;
128 }
129
130 *pPipeline = anv_pipeline_to_handle(pipeline);
131
132 return VK_SUCCESS;
133 }