anv: Properly handle destroying NULL devices and instances
[mesa.git] / src / intel / vulkan / genX_query.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
34
35 VkResult genX(CreateQueryPool)(
36 VkDevice _device,
37 const VkQueryPoolCreateInfo* pCreateInfo,
38 const VkAllocationCallbacks* pAllocator,
39 VkQueryPool* pQueryPool)
40 {
41 ANV_FROM_HANDLE(anv_device, device, _device);
42 struct anv_query_pool *pool;
43 VkResult result;
44 uint32_t slot_size;
45 uint64_t size;
46
47 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
48
49 switch (pCreateInfo->queryType) {
50 case VK_QUERY_TYPE_OCCLUSION:
51 case VK_QUERY_TYPE_TIMESTAMP:
52 break;
53 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
54 return VK_ERROR_INCOMPATIBLE_DRIVER;
55 default:
56 assert(!"Invalid query type");
57 }
58
59 slot_size = sizeof(struct anv_query_pool_slot);
60 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
61 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
62 if (pool == NULL)
63 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
64
65 pool->type = pCreateInfo->queryType;
66 pool->slots = pCreateInfo->queryCount;
67
68 size = pCreateInfo->queryCount * slot_size;
69 result = anv_bo_init_new(&pool->bo, device, size);
70 if (result != VK_SUCCESS)
71 goto fail;
72
73 pool->bo.map = anv_gem_mmap(device, pool->bo.gem_handle, 0, size, 0);
74
75 *pQueryPool = anv_query_pool_to_handle(pool);
76
77 return VK_SUCCESS;
78
79 fail:
80 vk_free2(&device->alloc, pAllocator, pool);
81
82 return result;
83 }
84
85 void genX(DestroyQueryPool)(
86 VkDevice _device,
87 VkQueryPool _pool,
88 const VkAllocationCallbacks* pAllocator)
89 {
90 ANV_FROM_HANDLE(anv_device, device, _device);
91 ANV_FROM_HANDLE(anv_query_pool, pool, _pool);
92
93 if (!pool)
94 return;
95
96 anv_gem_munmap(pool->bo.map, pool->bo.size);
97 anv_gem_close(device, pool->bo.gem_handle);
98 vk_free2(&device->alloc, pAllocator, pool);
99 }
100
101 VkResult genX(GetQueryPoolResults)(
102 VkDevice _device,
103 VkQueryPool queryPool,
104 uint32_t firstQuery,
105 uint32_t queryCount,
106 size_t dataSize,
107 void* pData,
108 VkDeviceSize stride,
109 VkQueryResultFlags flags)
110 {
111 ANV_FROM_HANDLE(anv_device, device, _device);
112 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
113 int64_t timeout = INT64_MAX;
114 uint64_t result;
115 int ret;
116
117 assert(pool->type == VK_QUERY_TYPE_OCCLUSION ||
118 pool->type == VK_QUERY_TYPE_TIMESTAMP);
119
120 if (pData == NULL)
121 return VK_SUCCESS;
122
123 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
124 ret = anv_gem_wait(device, pool->bo.gem_handle, &timeout);
125 if (ret == -1) {
126 /* We don't know the real error. */
127 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
128 "gem_wait failed %m");
129 }
130 }
131
132 void *data_end = pData + dataSize;
133 struct anv_query_pool_slot *slot = pool->bo.map;
134
135 if (!device->info.has_llc)
136 anv_invalidate_range(slot, MIN2(queryCount * sizeof(*slot), pool->bo.size));
137
138 for (uint32_t i = 0; i < queryCount; i++) {
139 switch (pool->type) {
140 case VK_QUERY_TYPE_OCCLUSION: {
141 result = slot[firstQuery + i].end - slot[firstQuery + i].begin;
142 break;
143 }
144 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
145 unreachable("pipeline stats not supported");
146 case VK_QUERY_TYPE_TIMESTAMP: {
147 result = slot[firstQuery + i].begin;
148 break;
149 }
150 default:
151 unreachable("invalid pool type");
152 }
153
154 if (flags & VK_QUERY_RESULT_64_BIT) {
155 uint64_t *dst = pData;
156 dst[0] = result;
157 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
158 dst[1] = slot[firstQuery + i].available;
159 } else {
160 uint32_t *dst = pData;
161 if (result > UINT32_MAX)
162 result = UINT32_MAX;
163 dst[0] = result;
164 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
165 dst[1] = slot[firstQuery + i].available;
166 }
167
168 pData += stride;
169 if (pData >= data_end)
170 break;
171 }
172
173 return VK_SUCCESS;
174 }
175
176 static void
177 emit_ps_depth_count(struct anv_cmd_buffer *cmd_buffer,
178 struct anv_bo *bo, uint32_t offset)
179 {
180 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
181 pc.DestinationAddressType = DAT_PPGTT;
182 pc.PostSyncOperation = WritePSDepthCount;
183 pc.DepthStallEnable = true;
184 pc.Address = (struct anv_address) { bo, offset };
185
186 if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4)
187 pc.CommandStreamerStallEnable = true;
188 }
189 }
190
191 static void
192 emit_query_availability(struct anv_cmd_buffer *cmd_buffer,
193 struct anv_bo *bo, uint32_t offset)
194 {
195 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
196 pc.DestinationAddressType = DAT_PPGTT;
197 pc.PostSyncOperation = WriteImmediateData;
198 pc.Address = (struct anv_address) { bo, offset };
199 pc.ImmediateData = 1;
200 }
201 }
202
203 void genX(CmdResetQueryPool)(
204 VkCommandBuffer commandBuffer,
205 VkQueryPool queryPool,
206 uint32_t firstQuery,
207 uint32_t queryCount)
208 {
209 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
210 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
211
212 for (uint32_t i = 0; i < queryCount; i++) {
213 switch (pool->type) {
214 case VK_QUERY_TYPE_OCCLUSION:
215 case VK_QUERY_TYPE_TIMESTAMP: {
216 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdm) {
217 sdm.Address = (struct anv_address) {
218 .bo = &pool->bo,
219 .offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot) +
220 offsetof(struct anv_query_pool_slot, available),
221 };
222 sdm.DataDWord0 = 0;
223 sdm.DataDWord1 = 0;
224 }
225 break;
226 }
227 default:
228 assert(!"Invalid query type");
229 }
230 }
231 }
232
233 void genX(CmdBeginQuery)(
234 VkCommandBuffer commandBuffer,
235 VkQueryPool queryPool,
236 uint32_t query,
237 VkQueryControlFlags flags)
238 {
239 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
240 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
241
242 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
243 * that the pipelining of the depth write breaks. What we see is that
244 * samples from the render pass clear leaks into the first query
245 * immediately after the clear. Doing a pipecontrol with a post-sync
246 * operation and DepthStallEnable seems to work around the issue.
247 */
248 if (cmd_buffer->state.need_query_wa) {
249 cmd_buffer->state.need_query_wa = false;
250 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
251 pc.DepthCacheFlushEnable = true;
252 pc.DepthStallEnable = true;
253 }
254 }
255
256 switch (pool->type) {
257 case VK_QUERY_TYPE_OCCLUSION:
258 emit_ps_depth_count(cmd_buffer, &pool->bo,
259 query * sizeof(struct anv_query_pool_slot));
260 break;
261
262 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
263 default:
264 unreachable("");
265 }
266 }
267
268 void genX(CmdEndQuery)(
269 VkCommandBuffer commandBuffer,
270 VkQueryPool queryPool,
271 uint32_t query)
272 {
273 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
274 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
275
276 switch (pool->type) {
277 case VK_QUERY_TYPE_OCCLUSION:
278 emit_ps_depth_count(cmd_buffer, &pool->bo,
279 query * sizeof(struct anv_query_pool_slot) + 8);
280
281 emit_query_availability(cmd_buffer, &pool->bo,
282 query * sizeof(struct anv_query_pool_slot) + 16);
283 break;
284
285 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
286 default:
287 unreachable("");
288 }
289 }
290
291 #define TIMESTAMP 0x2358
292
293 void genX(CmdWriteTimestamp)(
294 VkCommandBuffer commandBuffer,
295 VkPipelineStageFlagBits pipelineStage,
296 VkQueryPool queryPool,
297 uint32_t query)
298 {
299 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
300 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
301 uint32_t offset = query * sizeof(struct anv_query_pool_slot);
302
303 assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
304
305 switch (pipelineStage) {
306 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
307 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
308 srm.RegisterAddress = TIMESTAMP;
309 srm.MemoryAddress = (struct anv_address) { &pool->bo, offset };
310 }
311 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
312 srm.RegisterAddress = TIMESTAMP + 4;
313 srm.MemoryAddress = (struct anv_address) { &pool->bo, offset + 4 };
314 }
315 break;
316
317 default:
318 /* Everything else is bottom-of-pipe */
319 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
320 pc.DestinationAddressType = DAT_PPGTT;
321 pc.PostSyncOperation = WriteTimestamp;
322 pc.Address = (struct anv_address) { &pool->bo, offset };
323
324 if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4)
325 pc.CommandStreamerStallEnable = true;
326 }
327 break;
328 }
329
330 emit_query_availability(cmd_buffer, &pool->bo, query + 16);
331 }
332
333 #if GEN_GEN > 7 || GEN_IS_HASWELL
334
335 #define alu_opcode(v) __gen_uint((v), 20, 31)
336 #define alu_operand1(v) __gen_uint((v), 10, 19)
337 #define alu_operand2(v) __gen_uint((v), 0, 9)
338 #define alu(opcode, operand1, operand2) \
339 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
340
341 #define OPCODE_NOOP 0x000
342 #define OPCODE_LOAD 0x080
343 #define OPCODE_LOADINV 0x480
344 #define OPCODE_LOAD0 0x081
345 #define OPCODE_LOAD1 0x481
346 #define OPCODE_ADD 0x100
347 #define OPCODE_SUB 0x101
348 #define OPCODE_AND 0x102
349 #define OPCODE_OR 0x103
350 #define OPCODE_XOR 0x104
351 #define OPCODE_STORE 0x180
352 #define OPCODE_STOREINV 0x580
353
354 #define OPERAND_R0 0x00
355 #define OPERAND_R1 0x01
356 #define OPERAND_R2 0x02
357 #define OPERAND_R3 0x03
358 #define OPERAND_R4 0x04
359 #define OPERAND_SRCA 0x20
360 #define OPERAND_SRCB 0x21
361 #define OPERAND_ACCU 0x31
362 #define OPERAND_ZF 0x32
363 #define OPERAND_CF 0x33
364
365 #define CS_GPR(n) (0x2600 + (n) * 8)
366
367 static void
368 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
369 struct anv_bo *bo, uint32_t offset)
370 {
371 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
372 lrm.RegisterAddress = reg,
373 lrm.MemoryAddress = (struct anv_address) { bo, offset };
374 }
375 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
376 lrm.RegisterAddress = reg + 4;
377 lrm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
378 }
379 }
380
381 static void
382 store_query_result(struct anv_batch *batch, uint32_t reg,
383 struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
384 {
385 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
386 srm.RegisterAddress = reg;
387 srm.MemoryAddress = (struct anv_address) { bo, offset };
388 }
389
390 if (flags & VK_QUERY_RESULT_64_BIT) {
391 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
392 srm.RegisterAddress = reg + 4;
393 srm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
394 }
395 }
396 }
397
398 void genX(CmdCopyQueryPoolResults)(
399 VkCommandBuffer commandBuffer,
400 VkQueryPool queryPool,
401 uint32_t firstQuery,
402 uint32_t queryCount,
403 VkBuffer destBuffer,
404 VkDeviceSize destOffset,
405 VkDeviceSize destStride,
406 VkQueryResultFlags flags)
407 {
408 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
409 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
410 ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
411 uint32_t slot_offset, dst_offset;
412
413 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
414 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
415 pc.CommandStreamerStallEnable = true;
416 pc.StallAtPixelScoreboard = true;
417 }
418 }
419
420 dst_offset = buffer->offset + destOffset;
421 for (uint32_t i = 0; i < queryCount; i++) {
422
423 slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
424 switch (pool->type) {
425 case VK_QUERY_TYPE_OCCLUSION:
426 emit_load_alu_reg_u64(&cmd_buffer->batch,
427 CS_GPR(0), &pool->bo, slot_offset);
428 emit_load_alu_reg_u64(&cmd_buffer->batch,
429 CS_GPR(1), &pool->bo, slot_offset + 8);
430
431 /* FIXME: We need to clamp the result for 32 bit. */
432
433 uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
434 dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
435 dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
436 dw[3] = alu(OPCODE_SUB, 0, 0);
437 dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
438 break;
439
440 case VK_QUERY_TYPE_TIMESTAMP:
441 emit_load_alu_reg_u64(&cmd_buffer->batch,
442 CS_GPR(2), &pool->bo, slot_offset);
443 break;
444
445 default:
446 unreachable("unhandled query type");
447 }
448
449 store_query_result(&cmd_buffer->batch,
450 CS_GPR(2), buffer->bo, dst_offset, flags);
451
452 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
453 emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
454 &pool->bo, slot_offset + 16);
455 if (flags & VK_QUERY_RESULT_64_BIT)
456 store_query_result(&cmd_buffer->batch,
457 CS_GPR(0), buffer->bo, dst_offset + 8, flags);
458 else
459 store_query_result(&cmd_buffer->batch,
460 CS_GPR(0), buffer->bo, dst_offset + 4, flags);
461 }
462
463 dst_offset += destStride;
464 }
465 }
466
467 #else
468 void genX(CmdCopyQueryPoolResults)(
469 VkCommandBuffer commandBuffer,
470 VkQueryPool queryPool,
471 uint32_t firstQuery,
472 uint32_t queryCount,
473 VkBuffer destBuffer,
474 VkDeviceSize destOffset,
475 VkDeviceSize destStride,
476 VkQueryResultFlags flags)
477 {
478 anv_finishme("Queries not yet supported on Ivy Bridge");
479 }
480 #endif