VkQueryPool* pQueryPool)
{
ANV_FROM_HANDLE(anv_device, device, _device);
+ const struct anv_physical_device *pdevice = &device->instance->physicalDevice;
struct anv_query_pool *pool;
VkResult result;
- uint32_t slot_size;
- uint64_t size;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
+ /* Query pool slots are made up of some number of 64-bit values packed
+ * tightly together. The first 64-bit value is always the "available" bit
+ * which is 0 when the query is unavailable and 1 when it is available.
+ * The 64-bit values that follow are determined by the type of query.
+ */
+ uint32_t uint64s_per_slot = 1;
+
+ VkQueryPipelineStatisticFlags pipeline_statistics = 0;
switch (pCreateInfo->queryType) {
case VK_QUERY_TYPE_OCCLUSION:
+ /* Occlusion queries have two values: begin and end. */
+ uint64s_per_slot += 2;
+ break;
case VK_QUERY_TYPE_TIMESTAMP:
+ /* Timestamps just have the one timestamp value */
+ uint64s_per_slot += 1;
break;
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
- return VK_ERROR_INCOMPATIBLE_DRIVER;
+ pipeline_statistics = pCreateInfo->pipelineStatistics;
+ /* We're going to trust this field implicitly so we need to ensure that
+ * no unhandled extension bits leak in.
+ */
+ pipeline_statistics &= ANV_PIPELINE_STATISTICS_MASK;
+
+ /* Statistics queries have a min and max for every statistic */
+ uint64s_per_slot += 2 * util_bitcount(pipeline_statistics);
+ break;
default:
assert(!"Invalid query type");
}
- slot_size = sizeof(struct anv_query_pool_slot);
pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
pool->type = pCreateInfo->queryType;
+ pool->pipeline_statistics = pipeline_statistics;
+ pool->stride = uint64s_per_slot * sizeof(uint64_t);
pool->slots = pCreateInfo->queryCount;
- size = pCreateInfo->queryCount * slot_size;
+ uint64_t size = pool->slots * pool->stride;
result = anv_bo_init_new(&pool->bo, device, size);
if (result != VK_SUCCESS)
goto fail;
+ if (pdevice->supports_48bit_addresses)
+ pool->bo.flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+ if (pdevice->use_softpin)
+ pool->bo.flags |= EXEC_OBJECT_PINNED;
+
+ if (pdevice->has_exec_async)
+ pool->bo.flags |= EXEC_OBJECT_ASYNC;
+
+ anv_vma_alloc(device, &pool->bo);
+
+ /* For query pools, we set the caching mode to I915_CACHING_CACHED. On LLC
+ * platforms, this does nothing. On non-LLC platforms, this means snooping
+ * which comes at a slight cost. However, the buffers aren't big, won't be
+ * written frequently, and trying to handle the flushing manually without
+ * doing too much flushing is extremely painful.
+ */
+ anv_gem_set_caching(device, pool->bo.gem_handle, I915_CACHING_CACHED);
+
pool->bo.map = anv_gem_mmap(device, pool->bo.gem_handle, 0, size, 0);
*pQueryPool = anv_query_pool_to_handle(pool);
return;
anv_gem_munmap(pool->bo.map, pool->bo.size);
+ anv_vma_free(device, &pool->bo);
anv_gem_close(device, pool->bo.gem_handle);
vk_free2(&device->alloc, pAllocator, pool);
}
+static struct anv_address
+anv_query_address(struct anv_query_pool *pool, uint32_t query)
+{
+ return (struct anv_address) {
+ .bo = &pool->bo,
+ .offset = query * pool->stride,
+ };
+}
+
+static void
+cpu_write_query_result(void *dst_slot, VkQueryResultFlags flags,
+ uint32_t value_index, uint64_t result)
+{
+ if (flags & VK_QUERY_RESULT_64_BIT) {
+ uint64_t *dst64 = dst_slot;
+ dst64[value_index] = result;
+ } else {
+ uint32_t *dst32 = dst_slot;
+ dst32[value_index] = result;
+ }
+}
+
+static bool
+query_is_available(uint64_t *slot)
+{
+ return *(volatile uint64_t *)slot;
+}
+
+static VkResult
+wait_for_available(struct anv_device *device,
+ struct anv_query_pool *pool, uint64_t *slot)
+{
+ while (true) {
+ if (query_is_available(slot))
+ return VK_SUCCESS;
+
+ int ret = anv_gem_busy(device, pool->bo.gem_handle);
+ if (ret == 1) {
+ /* The BO is still busy, keep waiting. */
+ continue;
+ } else if (ret == -1) {
+ /* We don't know the real error. */
+ device->lost = true;
+ return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
+ "gem wait failed: %m");
+ } else {
+ assert(ret == 0);
+ /* The BO is no longer busy. */
+ if (query_is_available(slot)) {
+ return VK_SUCCESS;
+ } else {
+ VkResult status = anv_device_query_status(device);
+ if (status != VK_SUCCESS)
+ return status;
+
+ /* If we haven't seen availability yet, then we never will. This
+ * can only happen if we have a client error where they call
+ * GetQueryPoolResults on a query that they haven't submitted to
+ * the GPU yet. The spec allows us to do anything in this case,
+ * but returning VK_SUCCESS doesn't seem right and we shouldn't
+ * just keep spinning.
+ */
+ return VK_NOT_READY;
+ }
+ }
+ }
+}
+
VkResult genX(GetQueryPoolResults)(
VkDevice _device,
VkQueryPool queryPool,
{
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
- int64_t timeout = INT64_MAX;
- uint64_t result;
- int ret;
assert(pool->type == VK_QUERY_TYPE_OCCLUSION ||
+ pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS ||
pool->type == VK_QUERY_TYPE_TIMESTAMP);
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
if (pData == NULL)
return VK_SUCCESS;
- if (flags & VK_QUERY_RESULT_WAIT_BIT) {
- ret = anv_gem_wait(device, pool->bo.gem_handle, &timeout);
- if (ret == -1) {
- /* We don't know the real error. */
- return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
- "gem_wait failed %m");
- }
- }
-
void *data_end = pData + dataSize;
- struct anv_query_pool_slot *slot = pool->bo.map;
-
- if (!device->info.has_llc)
- anv_invalidate_range(slot, MIN2(queryCount * sizeof(*slot), pool->bo.size));
+ VkResult status = VK_SUCCESS;
for (uint32_t i = 0; i < queryCount; i++) {
+ uint64_t *slot = pool->bo.map + (firstQuery + i) * pool->stride;
+
+ /* Availability is always at the start of the slot */
+ bool available = slot[0];
+
+ if (!available && (flags & VK_QUERY_RESULT_WAIT_BIT)) {
+ status = wait_for_available(device, pool, slot);
+ if (status != VK_SUCCESS)
+ return status;
+
+ available = true;
+ }
+
+ /* From the Vulkan 1.0.42 spec:
+ *
+ * "If VK_QUERY_RESULT_WAIT_BIT and VK_QUERY_RESULT_PARTIAL_BIT are
+ * both not set then no result values are written to pData for
+ * queries that are in the unavailable state at the time of the call,
+ * and vkGetQueryPoolResults returns VK_NOT_READY. However,
+ * availability state is still written to pData for those queries if
+ * VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set."
+ */
+ bool write_results = available || (flags & VK_QUERY_RESULT_PARTIAL_BIT);
+
+ uint32_t idx = 0;
switch (pool->type) {
- case VK_QUERY_TYPE_OCCLUSION: {
- result = slot[firstQuery + i].end - slot[firstQuery + i].begin;
+ case VK_QUERY_TYPE_OCCLUSION:
+ if (write_results)
+ cpu_write_query_result(pData, flags, idx, slot[2] - slot[1]);
+ idx++;
break;
- }
- case VK_QUERY_TYPE_PIPELINE_STATISTICS:
- unreachable("pipeline stats not supported");
- case VK_QUERY_TYPE_TIMESTAMP: {
- result = slot[firstQuery + i].begin;
+
+ case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
+ uint32_t statistics = pool->pipeline_statistics;
+ while (statistics) {
+ uint32_t stat = u_bit_scan(&statistics);
+ if (write_results) {
+ uint64_t result = slot[idx * 2 + 2] - slot[idx * 2 + 1];
+
+ /* WaDividePSInvocationCountBy4:HSW,BDW */
+ if ((device->info.gen == 8 || device->info.is_haswell) &&
+ (1 << stat) == VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT)
+ result >>= 2;
+
+ cpu_write_query_result(pData, flags, idx, result);
+ }
+ idx++;
+ }
+ assert(idx == util_bitcount(pool->pipeline_statistics));
break;
}
+
+ case VK_QUERY_TYPE_TIMESTAMP:
+ if (write_results)
+ cpu_write_query_result(pData, flags, idx, slot[1]);
+ idx++;
+ break;
+
default:
unreachable("invalid pool type");
}
- if (flags & VK_QUERY_RESULT_64_BIT) {
- uint64_t *dst = pData;
- dst[0] = result;
- if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
- dst[1] = slot[firstQuery + i].available;
- } else {
- uint32_t *dst = pData;
- if (result > UINT32_MAX)
- result = UINT32_MAX;
- dst[0] = result;
- if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
- dst[1] = slot[firstQuery + i].available;
- }
+ if (!write_results)
+ status = VK_NOT_READY;
+
+ if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
+ cpu_write_query_result(pData, flags, idx, available);
pData += stride;
if (pData >= data_end)
break;
}
- return VK_SUCCESS;
+ return status;
+}
+
+static void
+emit_srm32(struct anv_batch *batch, struct anv_address addr, uint32_t reg)
+{
+ anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
+ srm.MemoryAddress = addr;
+ srm.RegisterAddress = reg;
+ }
+}
+
+static void
+emit_srm64(struct anv_batch *batch, struct anv_address addr, uint32_t reg)
+{
+ emit_srm32(batch, anv_address_add(addr, 0), reg + 0);
+ emit_srm32(batch, anv_address_add(addr, 4), reg + 4);
}
static void
emit_ps_depth_count(struct anv_cmd_buffer *cmd_buffer,
- struct anv_bo *bo, uint32_t offset)
+ struct anv_address addr)
{
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
pc.DestinationAddressType = DAT_PPGTT;
pc.PostSyncOperation = WritePSDepthCount;
pc.DepthStallEnable = true;
- pc.Address = (struct anv_address) { bo, offset };
+ pc.Address = addr;
if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4)
pc.CommandStreamerStallEnable = true;
static void
emit_query_availability(struct anv_cmd_buffer *cmd_buffer,
- struct anv_bo *bo, uint32_t offset)
+ struct anv_address addr)
{
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
pc.DestinationAddressType = DAT_PPGTT;
pc.PostSyncOperation = WriteImmediateData;
- pc.Address = (struct anv_address) { bo, offset };
+ pc.Address = addr;
pc.ImmediateData = 1;
}
}
+/**
+ * Goes through a series of consecutive query indices in the given pool
+ * setting all element values to 0 and emitting them as available.
+ */
+static void
+emit_zero_queries(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_query_pool *pool,
+ uint32_t first_index, uint32_t num_queries)
+{
+ for (uint32_t i = 0; i < num_queries; i++) {
+ struct anv_address slot_addr =
+ anv_query_address(pool, first_index + i);
+ genX(cmd_buffer_mi_memset)(cmd_buffer, anv_address_add(slot_addr, 8),
+ 0, pool->stride - 8);
+ emit_query_availability(cmd_buffer, slot_addr);
+ }
+}
+
void genX(CmdResetQueryPool)(
VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
for (uint32_t i = 0; i < queryCount; i++) {
- switch (pool->type) {
- case VK_QUERY_TYPE_OCCLUSION:
- case VK_QUERY_TYPE_TIMESTAMP: {
- anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdm) {
- sdm.Address = (struct anv_address) {
- .bo = &pool->bo,
- .offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot) +
- offsetof(struct anv_query_pool_slot, available),
- };
- sdm.DataDWord0 = 0;
- sdm.DataDWord1 = 0;
- }
- break;
- }
- default:
- assert(!"Invalid query type");
+ anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdm) {
+ sdm.Address = anv_query_address(pool, firstQuery + i);
+ sdm.ImmediateData = 0;
}
}
}
+static const uint32_t vk_pipeline_stat_to_reg[] = {
+ GENX(IA_VERTICES_COUNT_num),
+ GENX(IA_PRIMITIVES_COUNT_num),
+ GENX(VS_INVOCATION_COUNT_num),
+ GENX(GS_INVOCATION_COUNT_num),
+ GENX(GS_PRIMITIVES_COUNT_num),
+ GENX(CL_INVOCATION_COUNT_num),
+ GENX(CL_PRIMITIVES_COUNT_num),
+ GENX(PS_INVOCATION_COUNT_num),
+ GENX(HS_INVOCATION_COUNT_num),
+ GENX(DS_INVOCATION_COUNT_num),
+ GENX(CS_INVOCATION_COUNT_num),
+};
+
+static void
+emit_pipeline_stat(struct anv_cmd_buffer *cmd_buffer, uint32_t stat,
+ struct anv_address addr)
+{
+ STATIC_ASSERT(ANV_PIPELINE_STATISTICS_MASK ==
+ (1 << ARRAY_SIZE(vk_pipeline_stat_to_reg)) - 1);
+
+ assert(stat < ARRAY_SIZE(vk_pipeline_stat_to_reg));
+ emit_srm64(&cmd_buffer->batch, addr, vk_pipeline_stat_to_reg[stat]);
+}
+
void genX(CmdBeginQuery)(
VkCommandBuffer commandBuffer,
VkQueryPool queryPool,
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+ struct anv_address query_addr = anv_query_address(pool, query);
- /* Workaround: When meta uses the pipeline with the VS disabled, it seems
- * that the pipelining of the depth write breaks. What we see is that
- * samples from the render pass clear leaks into the first query
- * immediately after the clear. Doing a pipecontrol with a post-sync
- * operation and DepthStallEnable seems to work around the issue.
- */
- if (cmd_buffer->state.need_query_wa) {
- cmd_buffer->state.need_query_wa = false;
+ switch (pool->type) {
+ case VK_QUERY_TYPE_OCCLUSION:
+ emit_ps_depth_count(cmd_buffer, anv_address_add(query_addr, 8));
+ break;
+
+ case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
+ /* TODO: This might only be necessary for certain stats */
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
- pc.DepthCacheFlushEnable = true;
- pc.DepthStallEnable = true;
+ pc.CommandStreamerStallEnable = true;
+ pc.StallAtPixelScoreboard = true;
}
- }
- switch (pool->type) {
- case VK_QUERY_TYPE_OCCLUSION:
- emit_ps_depth_count(cmd_buffer, &pool->bo,
- query * sizeof(struct anv_query_pool_slot));
+ uint32_t statistics = pool->pipeline_statistics;
+ uint32_t offset = 8;
+ while (statistics) {
+ uint32_t stat = u_bit_scan(&statistics);
+ emit_pipeline_stat(cmd_buffer, stat,
+ anv_address_add(query_addr, offset));
+ offset += 16;
+ }
break;
+ }
- case VK_QUERY_TYPE_PIPELINE_STATISTICS:
default:
unreachable("");
}
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+ struct anv_address query_addr = anv_query_address(pool, query);
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
- emit_ps_depth_count(cmd_buffer, &pool->bo,
- query * sizeof(struct anv_query_pool_slot) + 8);
+ emit_ps_depth_count(cmd_buffer, anv_address_add(query_addr, 16));
+ emit_query_availability(cmd_buffer, query_addr);
+ break;
+
+ case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
+ /* TODO: This might only be necessary for certain stats */
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+ pc.CommandStreamerStallEnable = true;
+ pc.StallAtPixelScoreboard = true;
+ }
+
+ uint32_t statistics = pool->pipeline_statistics;
+ uint32_t offset = 16;
+ while (statistics) {
+ uint32_t stat = u_bit_scan(&statistics);
+ emit_pipeline_stat(cmd_buffer, stat,
+ anv_address_add(query_addr, offset));
+ offset += 16;
+ }
- emit_query_availability(cmd_buffer, &pool->bo,
- query * sizeof(struct anv_query_pool_slot) + 16);
+ emit_query_availability(cmd_buffer, query_addr);
break;
+ }
- case VK_QUERY_TYPE_PIPELINE_STATISTICS:
default:
unreachable("");
}
+
+ /* When multiview is active the spec requires that N consecutive query
+ * indices are used, where N is the number of active views in the subpass.
+ * The spec allows that we only write the results to one of the queries
+ * but we still need to manage result availability for all the query indices.
+ * Since we only emit a single query for all active views in the
+ * first index, mark the other query indices as being already available
+ * with result 0.
+ */
+ if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
+ const uint32_t num_queries =
+ util_bitcount(cmd_buffer->state.subpass->view_mask);
+ if (num_queries > 1)
+ emit_zero_queries(cmd_buffer, pool, query + 1, num_queries - 1);
+ }
}
#define TIMESTAMP 0x2358
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
- uint32_t offset = query * sizeof(struct anv_query_pool_slot);
+ struct anv_address query_addr = anv_query_address(pool, query);
assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
switch (pipelineStage) {
case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
- anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
- srm.RegisterAddress = TIMESTAMP;
- srm.MemoryAddress = (struct anv_address) { &pool->bo, offset };
- }
- anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
- srm.RegisterAddress = TIMESTAMP + 4;
- srm.MemoryAddress = (struct anv_address) { &pool->bo, offset + 4 };
- }
+ emit_srm64(&cmd_buffer->batch, anv_address_add(query_addr, 8), TIMESTAMP);
break;
default:
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
pc.DestinationAddressType = DAT_PPGTT;
pc.PostSyncOperation = WriteTimestamp;
- pc.Address = (struct anv_address) { &pool->bo, offset };
+ pc.Address = anv_address_add(query_addr, 8);
if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4)
pc.CommandStreamerStallEnable = true;
break;
}
- emit_query_availability(cmd_buffer, &pool->bo, offset + 16);
+ emit_query_availability(cmd_buffer, query_addr);
+
+ /* When multiview is active the spec requires that N consecutive query
+ * indices are used, where N is the number of active views in the subpass.
+ * The spec allows that we only write the results to one of the queries
+ * but we still need to manage result availability for all the query indices.
+ * Since we only emit a single query for all active views in the
+ * first index, mark the other query indices as being already available
+ * with result 0.
+ */
+ if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
+ const uint32_t num_queries =
+ util_bitcount(cmd_buffer->state.subpass->view_mask);
+ if (num_queries > 1)
+ emit_zero_queries(cmd_buffer, pool, query + 1, num_queries - 1);
+ }
}
#if GEN_GEN > 7 || GEN_IS_HASWELL
-#define alu_opcode(v) __gen_uint((v), 20, 31)
-#define alu_operand1(v) __gen_uint((v), 10, 19)
-#define alu_operand2(v) __gen_uint((v), 0, 9)
-#define alu(opcode, operand1, operand2) \
- alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
-
-#define OPCODE_NOOP 0x000
-#define OPCODE_LOAD 0x080
-#define OPCODE_LOADINV 0x480
-#define OPCODE_LOAD0 0x081
-#define OPCODE_LOAD1 0x481
-#define OPCODE_ADD 0x100
-#define OPCODE_SUB 0x101
-#define OPCODE_AND 0x102
-#define OPCODE_OR 0x103
-#define OPCODE_XOR 0x104
-#define OPCODE_STORE 0x180
-#define OPCODE_STOREINV 0x580
-
-#define OPERAND_R0 0x00
-#define OPERAND_R1 0x01
-#define OPERAND_R2 0x02
-#define OPERAND_R3 0x03
-#define OPERAND_R4 0x04
-#define OPERAND_SRCA 0x20
-#define OPERAND_SRCB 0x21
-#define OPERAND_ACCU 0x31
-#define OPERAND_ZF 0x32
-#define OPERAND_CF 0x33
+static uint32_t
+mi_alu(uint32_t opcode, uint32_t operand1, uint32_t operand2)
+{
+ struct GENX(MI_MATH_ALU_INSTRUCTION) instr = {
+ .ALUOpcode = opcode,
+ .Operand1 = operand1,
+ .Operand2 = operand2,
+ };
+
+ uint32_t dw;
+ GENX(MI_MATH_ALU_INSTRUCTION_pack)(NULL, &dw, &instr);
+
+ return dw;
+}
#define CS_GPR(n) (0x2600 + (n) * 8)
static void
emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
- struct anv_bo *bo, uint32_t offset)
+ struct anv_address addr)
{
anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
- lrm.RegisterAddress = reg,
- lrm.MemoryAddress = (struct anv_address) { bo, offset };
+ lrm.RegisterAddress = reg;
+ lrm.MemoryAddress = anv_address_add(addr, 0);
}
anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
lrm.RegisterAddress = reg + 4;
- lrm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
+ lrm.MemoryAddress = anv_address_add(addr, 4);
}
}
static void
-store_query_result(struct anv_batch *batch, uint32_t reg,
- struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
+emit_load_alu_reg_imm32(struct anv_batch *batch, uint32_t reg, uint32_t imm)
{
- anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
- srm.RegisterAddress = reg;
- srm.MemoryAddress = (struct anv_address) { bo, offset };
+ anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
+ lri.RegisterOffset = reg;
+ lri.DataDWord = imm;
}
+}
- if (flags & VK_QUERY_RESULT_64_BIT) {
- anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
- srm.RegisterAddress = reg + 4;
- srm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
+static void
+emit_load_alu_reg_imm64(struct anv_batch *batch, uint32_t reg, uint64_t imm)
+{
+ emit_load_alu_reg_imm32(batch, reg, (uint32_t)imm);
+ emit_load_alu_reg_imm32(batch, reg + 4, (uint32_t)(imm >> 32));
+}
+
+static void
+emit_load_alu_reg_reg32(struct anv_batch *batch, uint32_t src, uint32_t dst)
+{
+ anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_REG), lrr) {
+ lrr.SourceRegisterAddress = src;
+ lrr.DestinationRegisterAddress = dst;
+ }
+}
+
+/*
+ * GPR0 = GPR0 & ((1ull << n) - 1);
+ */
+static void
+keep_gpr0_lower_n_bits(struct anv_batch *batch, uint32_t n)
+{
+ assert(n < 64);
+ emit_load_alu_reg_imm64(batch, CS_GPR(1), (1ull << n) - 1);
+
+ uint32_t *dw = anv_batch_emitn(batch, 5, GENX(MI_MATH));
+ if (!dw) {
+ anv_batch_set_error(batch, VK_ERROR_OUT_OF_HOST_MEMORY);
+ return;
+ }
+
+ dw[1] = mi_alu(MI_ALU_LOAD, MI_ALU_SRCA, MI_ALU_REG0);
+ dw[2] = mi_alu(MI_ALU_LOAD, MI_ALU_SRCB, MI_ALU_REG1);
+ dw[3] = mi_alu(MI_ALU_AND, 0, 0);
+ dw[4] = mi_alu(MI_ALU_STORE, MI_ALU_REG0, MI_ALU_ACCU);
+}
+
+/*
+ * GPR0 = GPR0 << 30;
+ */
+static void
+shl_gpr0_by_30_bits(struct anv_batch *batch)
+{
+ /* First we mask 34 bits of GPR0 to prevent overflow */
+ keep_gpr0_lower_n_bits(batch, 34);
+
+ const uint32_t outer_count = 5;
+ const uint32_t inner_count = 6;
+ STATIC_ASSERT(outer_count * inner_count == 30);
+ const uint32_t cmd_len = 1 + inner_count * 4;
+
+ /* We'll emit 5 commands, each shifting GPR0 left by 6 bits, for a total of
+ * 30 left shifts.
+ */
+ for (int o = 0; o < outer_count; o++) {
+ /* Submit one MI_MATH to shift left by 6 bits */
+ uint32_t *dw = anv_batch_emitn(batch, cmd_len, GENX(MI_MATH));
+ if (!dw) {
+ anv_batch_set_error(batch, VK_ERROR_OUT_OF_HOST_MEMORY);
+ return;
}
+
+ dw++;
+ for (int i = 0; i < inner_count; i++, dw += 4) {
+ dw[0] = mi_alu(MI_ALU_LOAD, MI_ALU_SRCA, MI_ALU_REG0);
+ dw[1] = mi_alu(MI_ALU_LOAD, MI_ALU_SRCB, MI_ALU_REG0);
+ dw[2] = mi_alu(MI_ALU_ADD, 0, 0);
+ dw[3] = mi_alu(MI_ALU_STORE, MI_ALU_REG0, MI_ALU_ACCU);
+ }
+ }
+}
+
+/*
+ * GPR0 = GPR0 >> 2;
+ *
+ * Note that the upper 30 bits of GPR are lost!
+ */
+static void
+shr_gpr0_by_2_bits(struct anv_batch *batch)
+{
+ shl_gpr0_by_30_bits(batch);
+ emit_load_alu_reg_reg32(batch, CS_GPR(0) + 4, CS_GPR(0));
+ emit_load_alu_reg_imm32(batch, CS_GPR(0) + 4, 0);
+}
+
+static void
+gpu_write_query_result(struct anv_batch *batch,
+ struct anv_address dst_addr,
+ VkQueryResultFlags flags,
+ uint32_t value_index, uint32_t reg)
+{
+ if (flags & VK_QUERY_RESULT_64_BIT) {
+ emit_srm64(batch, anv_address_add(dst_addr, value_index * 8), reg);
+ } else {
+ emit_srm32(batch, anv_address_add(dst_addr, value_index * 4), reg);
+ }
+}
+
+static void
+compute_query_result(struct anv_batch *batch, uint32_t dst_reg,
+ struct anv_address addr)
+{
+ emit_load_alu_reg_u64(batch, CS_GPR(0), anv_address_add(addr, 0));
+ emit_load_alu_reg_u64(batch, CS_GPR(1), anv_address_add(addr, 8));
+
+ /* FIXME: We need to clamp the result for 32 bit. */
+
+ uint32_t *dw = anv_batch_emitn(batch, 5, GENX(MI_MATH));
+ if (!dw) {
+ anv_batch_set_error(batch, VK_ERROR_OUT_OF_HOST_MEMORY);
+ return;
}
+
+ dw[1] = mi_alu(MI_ALU_LOAD, MI_ALU_SRCA, MI_ALU_REG1);
+ dw[2] = mi_alu(MI_ALU_LOAD, MI_ALU_SRCB, MI_ALU_REG0);
+ dw[3] = mi_alu(MI_ALU_SUB, 0, 0);
+ dw[4] = mi_alu(MI_ALU_STORE, dst_reg, MI_ALU_ACCU);
}
void genX(CmdCopyQueryPoolResults)(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
- uint32_t slot_offset, dst_offset;
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
}
}
- dst_offset = buffer->offset + destOffset;
+ struct anv_address dest_addr = anv_address_add(buffer->address, destOffset);
for (uint32_t i = 0; i < queryCount; i++) {
-
- slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
+ struct anv_address query_addr = anv_query_address(pool, firstQuery + i);
+ uint32_t idx = 0;
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
- emit_load_alu_reg_u64(&cmd_buffer->batch,
- CS_GPR(0), &pool->bo, slot_offset);
- emit_load_alu_reg_u64(&cmd_buffer->batch,
- CS_GPR(1), &pool->bo, slot_offset + 8);
+ compute_query_result(&cmd_buffer->batch, MI_ALU_REG2,
+ anv_address_add(query_addr, 8));
+ gpu_write_query_result(&cmd_buffer->batch, dest_addr,
+ flags, idx++, CS_GPR(2));
+ break;
- /* FIXME: We need to clamp the result for 32 bit. */
+ case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
+ uint32_t statistics = pool->pipeline_statistics;
+ while (statistics) {
+ uint32_t stat = u_bit_scan(&statistics);
- uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
- dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
- dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
- dw[3] = alu(OPCODE_SUB, 0, 0);
- dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
+ compute_query_result(&cmd_buffer->batch, MI_ALU_REG0,
+ anv_address_add(query_addr, idx * 16 + 8));
+
+ /* WaDividePSInvocationCountBy4:HSW,BDW */
+ if ((cmd_buffer->device->info.gen == 8 ||
+ cmd_buffer->device->info.is_haswell) &&
+ (1 << stat) == VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT) {
+ shr_gpr0_by_2_bits(&cmd_buffer->batch);
+ }
+
+ gpu_write_query_result(&cmd_buffer->batch, dest_addr,
+ flags, idx++, CS_GPR(0));
+ }
+ assert(idx == util_bitcount(pool->pipeline_statistics));
break;
+ }
case VK_QUERY_TYPE_TIMESTAMP:
emit_load_alu_reg_u64(&cmd_buffer->batch,
- CS_GPR(2), &pool->bo, slot_offset);
+ CS_GPR(2), anv_address_add(query_addr, 8));
+ gpu_write_query_result(&cmd_buffer->batch, dest_addr,
+ flags, 0, CS_GPR(2));
break;
default:
unreachable("unhandled query type");
}
- store_query_result(&cmd_buffer->batch,
- CS_GPR(2), buffer->bo, dst_offset, flags);
-
if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
- emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
- &pool->bo, slot_offset + 16);
- if (flags & VK_QUERY_RESULT_64_BIT)
- store_query_result(&cmd_buffer->batch,
- CS_GPR(0), buffer->bo, dst_offset + 8, flags);
- else
- store_query_result(&cmd_buffer->batch,
- CS_GPR(0), buffer->bo, dst_offset + 4, flags);
+ emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0), query_addr);
+ gpu_write_query_result(&cmd_buffer->batch, dest_addr,
+ flags, idx, CS_GPR(0));
}
- dst_offset += destStride;
+ dest_addr = anv_address_add(dest_addr, destStride);
}
}