fd5d0659a9ed77203f9f334728cfd27178445cf1
[mesa.git] / src / amd / vulkan / radv_query.c
1 /*
2 * Copyrigh 2016 Red Hat Inc.
3 * Based on anv:
4 * Copyright © 2015 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 */
25
26 #include <assert.h>
27 #include <stdbool.h>
28 #include <string.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31
32 #include "radv_private.h"
33 #include "radv_cs.h"
34 #include "sid.h"
35
36 static unsigned get_max_db(struct radv_device *device)
37 {
38 unsigned num_db = device->physical_device->rad_info.num_render_backends;
39 MAYBE_UNUSED unsigned rb_mask = device->physical_device->rad_info.enabled_rb_mask;
40
41 if (device->physical_device->rad_info.chip_class == SI)
42 num_db = 8;
43 else
44 num_db = MAX2(8, num_db);
45
46 /* Otherwise we need to change the query reset procedure */
47 assert(rb_mask == ((1ull << num_db) - 1));
48
49 return num_db;
50 }
51
52 VkResult radv_CreateQueryPool(
53 VkDevice _device,
54 const VkQueryPoolCreateInfo* pCreateInfo,
55 const VkAllocationCallbacks* pAllocator,
56 VkQueryPool* pQueryPool)
57 {
58 RADV_FROM_HANDLE(radv_device, device, _device);
59 uint64_t size;
60 struct radv_query_pool *pool = vk_alloc2(&device->alloc, pAllocator,
61 sizeof(*pool), 8,
62 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
63
64 if (!pool)
65 return VK_ERROR_OUT_OF_HOST_MEMORY;
66
67
68 switch(pCreateInfo->queryType) {
69 case VK_QUERY_TYPE_OCCLUSION:
70 /* 16 bytes tmp. buffer as the compute packet writes 64 bits, but
71 * the app. may have 32 bits of space. */
72 pool->stride = 16 * get_max_db(device) + 16;
73 break;
74 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
75 pool->stride = 16 * 11;
76 break;
77 case VK_QUERY_TYPE_TIMESTAMP:
78 pool->stride = 8;
79 break;
80 default:
81 unreachable("creating unhandled query type");
82 }
83
84 pool->type = pCreateInfo->queryType;
85 pool->availability_offset = pool->stride * pCreateInfo->queryCount;
86 size = pool->availability_offset + 4 * pCreateInfo->queryCount;
87
88 pool->bo = device->ws->buffer_create(device->ws, size,
89 64, RADEON_DOMAIN_GTT, 0);
90
91 if (!pool->bo) {
92 vk_free2(&device->alloc, pAllocator, pool);
93 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
94 }
95
96 pool->ptr = device->ws->buffer_map(pool->bo);
97
98 if (!pool->ptr) {
99 device->ws->buffer_destroy(pool->bo);
100 vk_free2(&device->alloc, pAllocator, pool);
101 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
102 }
103 memset(pool->ptr, 0, size);
104
105 *pQueryPool = radv_query_pool_to_handle(pool);
106 return VK_SUCCESS;
107 }
108
109 void radv_DestroyQueryPool(
110 VkDevice _device,
111 VkQueryPool _pool,
112 const VkAllocationCallbacks* pAllocator)
113 {
114 RADV_FROM_HANDLE(radv_device, device, _device);
115 RADV_FROM_HANDLE(radv_query_pool, pool, _pool);
116
117 if (!pool)
118 return;
119
120 device->ws->buffer_destroy(pool->bo);
121 vk_free2(&device->alloc, pAllocator, pool);
122 }
123
124 VkResult radv_GetQueryPoolResults(
125 VkDevice _device,
126 VkQueryPool queryPool,
127 uint32_t firstQuery,
128 uint32_t queryCount,
129 size_t dataSize,
130 void* pData,
131 VkDeviceSize stride,
132 VkQueryResultFlags flags)
133 {
134 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
135 char *data = pData;
136 VkResult result = VK_SUCCESS;
137
138 for(unsigned i = 0; i < queryCount; ++i, data += stride) {
139 char *dest = data;
140 unsigned query = firstQuery + i;
141 char *src = pool->ptr + query * pool->stride;
142 uint32_t available;
143
144 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
145 while(!*(volatile uint32_t*)(pool->ptr + pool->availability_offset + 4 * query))
146 ;
147 }
148
149 if (!*(uint32_t*)(pool->ptr + pool->availability_offset + 4 * query) &&
150 !(flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
151 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
152 *(uint32_t*)dest = 0;
153 result = VK_NOT_READY;
154 continue;
155
156 }
157
158 available = *(uint32_t*)(pool->ptr + pool->availability_offset + 4 * query);
159 switch (pool->type) {
160 case VK_QUERY_TYPE_TIMESTAMP:
161 if (flags & VK_QUERY_RESULT_64_BIT) {
162 *(uint64_t*)dest = *(uint64_t*)src;
163 dest += 8;
164 } else {
165 *(uint32_t*)dest = *(uint32_t*)src;
166 dest += 4;
167 }
168 break;
169 case VK_QUERY_TYPE_OCCLUSION: {
170 uint64_t result = *(uint64_t*)(src + pool->stride - 16);
171
172 if (flags & VK_QUERY_RESULT_64_BIT) {
173 *(uint64_t*)dest = result;
174 dest += 8;
175 } else {
176 *(uint32_t*)dest = result;
177 dest += 4;
178 }
179 break;
180 default:
181 unreachable("trying to get results of unhandled query type");
182 }
183 }
184
185 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
186 *(uint32_t*)dest = available;
187 dest += 4;
188 }
189 }
190
191 return result;
192 }
193
194 void radv_CmdCopyQueryPoolResults(
195 VkCommandBuffer commandBuffer,
196 VkQueryPool queryPool,
197 uint32_t firstQuery,
198 uint32_t queryCount,
199 VkBuffer dstBuffer,
200 VkDeviceSize dstOffset,
201 VkDeviceSize stride,
202 VkQueryResultFlags flags)
203 {
204 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
205 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
206 RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
207 struct radeon_winsys_cs *cs = cmd_buffer->cs;
208 uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
209 uint64_t dest_va = cmd_buffer->device->ws->buffer_get_va(dst_buffer->bo);
210 dest_va += dst_buffer->offset + dstOffset;
211
212 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, pool->bo, 8);
213 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, dst_buffer->bo, 8);
214 cmd_buffer->no_draws = false;
215
216 for(unsigned i = 0; i < queryCount; ++i, dest_va += stride) {
217 unsigned query = firstQuery + i;
218 uint64_t local_src_va = va + query * pool->stride;
219 unsigned elem_size = (flags & VK_QUERY_RESULT_64_BIT) ? 8 : 4;
220
221 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 26);
222
223 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
224 /* TODO, not sure if there is any case where we won't always be ready yet */
225 uint64_t avail_va = va + pool->availability_offset + 4 * query;
226
227
228 /* This waits on the ME. All copies below are done on the ME */
229 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
230 radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
231 radeon_emit(cs, avail_va);
232 radeon_emit(cs, avail_va >> 32);
233 radeon_emit(cs, 1); /* reference value */
234 radeon_emit(cs, 0xffffffff); /* mask */
235 radeon_emit(cs, 4); /* poll interval */
236 }
237
238 switch (pool->type) {
239 case VK_QUERY_TYPE_OCCLUSION:
240 local_src_va += pool->stride - 16;
241
242 case VK_QUERY_TYPE_TIMESTAMP:
243 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
244 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
245 COPY_DATA_DST_SEL(COPY_DATA_MEM) |
246 ((flags & VK_QUERY_RESULT_64_BIT) ? COPY_DATA_COUNT_SEL : 0));
247 radeon_emit(cs, local_src_va);
248 radeon_emit(cs, local_src_va >> 32);
249 radeon_emit(cs, dest_va);
250 radeon_emit(cs, dest_va >> 32);
251 break;
252 default:
253 unreachable("trying to get results of unhandled query type");
254 }
255
256 /* The flag could be still changed while the data copy is busy and we
257 * then might have invalid data, but a ready flag. However, the availability
258 * writes happen on the ME too, so they should be synchronized. Might need to
259 * revisit this with multiple queues.
260 */
261 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
262 uint64_t avail_va = va + pool->availability_offset + 4 * query;
263 uint64_t avail_dest_va = dest_va;
264 if (pool->type != VK_QUERY_TYPE_PIPELINE_STATISTICS)
265 avail_dest_va += elem_size;
266 else
267 abort();
268
269 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
270 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
271 COPY_DATA_DST_SEL(COPY_DATA_MEM));
272 radeon_emit(cs, avail_va);
273 radeon_emit(cs, avail_va >> 32);
274 radeon_emit(cs, avail_dest_va);
275 radeon_emit(cs, avail_dest_va >> 32);
276 }
277
278 assert(cs->cdw <= cdw_max);
279 }
280
281 }
282
283 void radv_CmdResetQueryPool(
284 VkCommandBuffer commandBuffer,
285 VkQueryPool queryPool,
286 uint32_t firstQuery,
287 uint32_t queryCount)
288 {
289 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
290 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
291 uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
292
293 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, pool->bo, 8);
294
295 si_cp_dma_clear_buffer(cmd_buffer, va + firstQuery * pool->stride,
296 queryCount * pool->stride, 0);
297 si_cp_dma_clear_buffer(cmd_buffer, va + pool->availability_offset + firstQuery * 4,
298 queryCount * 4, 0);
299 }
300
301 void radv_CmdBeginQuery(
302 VkCommandBuffer commandBuffer,
303 VkQueryPool queryPool,
304 uint32_t query,
305 VkQueryControlFlags flags)
306 {
307 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
308 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
309 struct radeon_winsys_cs *cs = cmd_buffer->cs;
310 uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
311 va += pool->stride * query;
312
313 cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 8);
314 cmd_buffer->no_draws = false;
315
316 switch (pool->type) {
317 case VK_QUERY_TYPE_OCCLUSION:
318 radeon_check_space(cmd_buffer->device->ws, cs, 7);
319
320 ++cmd_buffer->state.active_occlusion_queries;
321 if (cmd_buffer->state.active_occlusion_queries == 1)
322 radv_set_db_count_control(cmd_buffer);
323
324 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
325 radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
326 radeon_emit(cs, va);
327 radeon_emit(cs, va >> 32);
328 break;
329 default:
330 unreachable("beginning unhandled query type");
331 }
332 }
333
334
335 void radv_CmdEndQuery(
336 VkCommandBuffer commandBuffer,
337 VkQueryPool queryPool,
338 uint32_t query)
339 {
340 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
341 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
342 struct radeon_winsys_cs *cs = cmd_buffer->cs;
343 uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
344 uint64_t avail_va = va + pool->availability_offset + 4 * query;
345 va += pool->stride * query;
346
347 cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 8);
348 cmd_buffer->no_draws = false;
349
350 switch (pool->type) {
351 case VK_QUERY_TYPE_OCCLUSION:
352 radeon_check_space(cmd_buffer->device->ws, cs, 14);
353
354 cmd_buffer->state.active_occlusion_queries--;
355 if (cmd_buffer->state.active_occlusion_queries == 0)
356 radv_set_db_count_control(cmd_buffer);
357
358 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
359 radeon_emit(cs, EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
360 radeon_emit(cs, va + 8);
361 radeon_emit(cs, (va + 8) >> 32);
362
363 radeon_emit(cs, PKT3(PKT3_OCCLUSION_QUERY, 3, 0));
364 radeon_emit(cs, va);
365 radeon_emit(cs, va >> 32);
366 radeon_emit(cs, va + pool->stride - 16);
367 radeon_emit(cs, (va + pool->stride - 16) >> 32);
368
369 break;
370 default:
371 unreachable("ending unhandled query type");
372 }
373
374 radeon_check_space(cmd_buffer->device->ws, cs, 5);
375
376 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
377 radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
378 S_370_WR_CONFIRM(1) |
379 S_370_ENGINE_SEL(V_370_ME));
380 radeon_emit(cs, avail_va);
381 radeon_emit(cs, avail_va >> 32);
382 radeon_emit(cs, 1);
383 }
384
385 void radv_CmdWriteTimestamp(
386 VkCommandBuffer commandBuffer,
387 VkPipelineStageFlagBits pipelineStage,
388 VkQueryPool queryPool,
389 uint32_t query)
390 {
391 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
392 RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
393 bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
394 struct radeon_winsys_cs *cs = cmd_buffer->cs;
395 uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
396 uint64_t avail_va = va + pool->availability_offset + 4 * query;
397 uint64_t query_va = va + pool->stride * query;
398
399 cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 5);
400 cmd_buffer->no_draws = false;
401
402 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 12);
403
404 if (mec) {
405 radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 5, 0));
406 radeon_emit(cs, EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
407 radeon_emit(cs, 3 << 29);
408 radeon_emit(cs, query_va);
409 radeon_emit(cs, query_va >> 32);
410 radeon_emit(cs, 0);
411 radeon_emit(cs, 0);
412 } else {
413 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
414 radeon_emit(cs, EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) | EVENT_INDEX(5));
415 radeon_emit(cs, query_va);
416 radeon_emit(cs, (3 << 29) | ((query_va >> 32) & 0xFFFF));
417 radeon_emit(cs, 0);
418 radeon_emit(cs, 0);
419 }
420
421 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
422 radeon_emit(cs, S_370_DST_SEL(mec ? V_370_MEM_ASYNC : V_370_MEMORY_SYNC) |
423 S_370_WR_CONFIRM(1) |
424 S_370_ENGINE_SEL(V_370_ME));
425 radeon_emit(cs, avail_va);
426 radeon_emit(cs, avail_va >> 32);
427 radeon_emit(cs, 1);
428
429 assert(cmd_buffer->cs->cdw <= cdw_max);
430 }