VkQueryPool* pQueryPool)
{
ANV_FROM_HANDLE(anv_device, device, _device);
+ const struct anv_physical_device *pdevice = &device->instance->physicalDevice;
struct anv_query_pool *pool;
VkResult result;
if (result != VK_SUCCESS)
goto fail;
+ if (pdevice->supports_48bit_addresses)
+ pool->bo.flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+ if (pdevice->has_exec_async)
+ pool->bo.flags |= EXEC_OBJECT_ASYNC;
+
+ /* For query pools, we set the caching mode to I915_CACHING_CACHED. On LLC
+ * platforms, this does nothing. On non-LLC platforms, this means snooping
+ * which comes at a slight cost. However, the buffers aren't big, won't be
+ * written frequently, and trying to handle the flushing manually without
+ * doing too much flushing is extremely painful.
+ */
+ anv_gem_set_caching(device, pool->bo.gem_handle, I915_CACHING_CACHED);
+
pool->bo.map = anv_gem_mmap(device, pool->bo.gem_handle, 0, size, 0);
*pQueryPool = anv_query_pool_to_handle(pool);
}
}
+static bool
+query_is_available(uint64_t *slot)
+{
+ return *(volatile uint64_t *)slot;
+}
+
+static VkResult
+wait_for_available(struct anv_device *device,
+ struct anv_query_pool *pool, uint64_t *slot)
+{
+ while (true) {
+ if (query_is_available(slot))
+ return VK_SUCCESS;
+
+ int ret = anv_gem_busy(device, pool->bo.gem_handle);
+ if (ret == 1) {
+ /* The BO is still busy, keep waiting. */
+ continue;
+ } else if (ret == -1) {
+ /* We don't know the real error. */
+ device->lost = true;
+ return vk_errorf(device->instance, device, VK_ERROR_DEVICE_LOST,
+ "gem wait failed: %m");
+ } else {
+ assert(ret == 0);
+ /* The BO is no longer busy. */
+ if (query_is_available(slot)) {
+ return VK_SUCCESS;
+ } else {
+ VkResult status = anv_device_query_status(device);
+ if (status != VK_SUCCESS)
+ return status;
+
+ /* If we haven't seen availability yet, then we never will. This
+ * can only happen if we have a client error where they call
+ * GetQueryPoolResults on a query that they haven't submitted to
+ * the GPU yet. The spec allows us to do anything in this case,
+ * but returning VK_SUCCESS doesn't seem right and we shouldn't
+ * just keep spinning.
+ */
+ return VK_NOT_READY;
+ }
+ }
+ }
+}
+
VkResult genX(GetQueryPoolResults)(
VkDevice _device,
VkQueryPool queryPool,
{
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
- int64_t timeout = INT64_MAX;
- int ret;
assert(pool->type == VK_QUERY_TYPE_OCCLUSION ||
pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS ||
pool->type == VK_QUERY_TYPE_TIMESTAMP);
+ if (unlikely(device->lost))
+ return VK_ERROR_DEVICE_LOST;
+
if (pData == NULL)
return VK_SUCCESS;
- if (flags & VK_QUERY_RESULT_WAIT_BIT) {
- ret = anv_gem_wait(device, pool->bo.gem_handle, &timeout);
- if (ret == -1) {
- /* We don't know the real error. */
- return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
- "gem_wait failed %m");
- }
- }
-
void *data_end = pData + dataSize;
- if (!device->info.has_llc) {
- uint64_t offset = firstQuery * pool->stride;
- uint64_t size = queryCount * pool->stride;
- anv_invalidate_range(pool->bo.map + offset,
- MIN2(size, pool->bo.size - offset));
- }
-
VkResult status = VK_SUCCESS;
for (uint32_t i = 0; i < queryCount; i++) {
uint64_t *slot = pool->bo.map + (firstQuery + i) * pool->stride;
/* Availability is always at the start of the slot */
bool available = slot[0];
+ if (!available && (flags & VK_QUERY_RESULT_WAIT_BIT)) {
+ status = wait_for_available(device, pool, slot);
+ if (status != VK_SUCCESS)
+ return status;
+
+ available = true;
+ }
+
/* From the Vulkan 1.0.42 spec:
*
* "If VK_QUERY_RESULT_WAIT_BIT and VK_QUERY_RESULT_PARTIAL_BIT are
.bo = &pool->bo,
.offset = (firstQuery + i) * pool->stride,
};
- sdm.DataDWord0 = 0;
- sdm.DataDWord1 = 0;
+ sdm.ImmediateData = 0;
}
}
}
#if GEN_GEN > 7 || GEN_IS_HASWELL
-#define alu_opcode(v) __gen_uint((v), 20, 31)
-#define alu_operand1(v) __gen_uint((v), 10, 19)
-#define alu_operand2(v) __gen_uint((v), 0, 9)
-#define alu(opcode, operand1, operand2) \
- alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
-
-#define OPCODE_NOOP 0x000
-#define OPCODE_LOAD 0x080
-#define OPCODE_LOADINV 0x480
-#define OPCODE_LOAD0 0x081
-#define OPCODE_LOAD1 0x481
-#define OPCODE_ADD 0x100
-#define OPCODE_SUB 0x101
-#define OPCODE_AND 0x102
-#define OPCODE_OR 0x103
-#define OPCODE_XOR 0x104
-#define OPCODE_STORE 0x180
-#define OPCODE_STOREINV 0x580
-
-#define OPERAND_R0 0x00
-#define OPERAND_R1 0x01
-#define OPERAND_R2 0x02
-#define OPERAND_R3 0x03
-#define OPERAND_R4 0x04
-#define OPERAND_SRCA 0x20
-#define OPERAND_SRCB 0x21
-#define OPERAND_ACCU 0x31
-#define OPERAND_ZF 0x32
-#define OPERAND_CF 0x33
+static uint32_t
+mi_alu(uint32_t opcode, uint32_t operand1, uint32_t operand2)
+{
+ struct GENX(MI_MATH_ALU_INSTRUCTION) instr = {
+ .ALUOpcode = opcode,
+ .Operand1 = operand1,
+ .Operand2 = operand2,
+ };
+
+ uint32_t dw;
+ GENX(MI_MATH_ALU_INSTRUCTION_pack)(NULL, &dw, &instr);
+
+ return dw;
+}
#define CS_GPR(n) (0x2600 + (n) * 8)
emit_load_alu_reg_imm64(batch, CS_GPR(1), (1ull << n) - 1);
uint32_t *dw = anv_batch_emitn(batch, 5, GENX(MI_MATH));
- dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R0);
- dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R1);
- dw[3] = alu(OPCODE_AND, 0, 0);
- dw[4] = alu(OPCODE_STORE, OPERAND_R0, OPERAND_ACCU);
+ if (!dw) {
+ anv_batch_set_error(batch, VK_ERROR_OUT_OF_HOST_MEMORY);
+ return;
+ }
+
+ dw[1] = mi_alu(MI_ALU_LOAD, MI_ALU_SRCA, MI_ALU_REG0);
+ dw[2] = mi_alu(MI_ALU_LOAD, MI_ALU_SRCB, MI_ALU_REG1);
+ dw[3] = mi_alu(MI_ALU_AND, 0, 0);
+ dw[4] = mi_alu(MI_ALU_STORE, MI_ALU_REG0, MI_ALU_ACCU);
}
/*
for (int o = 0; o < outer_count; o++) {
/* Submit one MI_MATH to shift left by 6 bits */
uint32_t *dw = anv_batch_emitn(batch, cmd_len, GENX(MI_MATH));
+ if (!dw) {
+ anv_batch_set_error(batch, VK_ERROR_OUT_OF_HOST_MEMORY);
+ return;
+ }
+
dw++;
for (int i = 0; i < inner_count; i++, dw += 4) {
- dw[0] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R0);
- dw[1] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
- dw[2] = alu(OPCODE_ADD, 0, 0);
- dw[3] = alu(OPCODE_STORE, OPERAND_R0, OPERAND_ACCU);
+ dw[0] = mi_alu(MI_ALU_LOAD, MI_ALU_SRCA, MI_ALU_REG0);
+ dw[1] = mi_alu(MI_ALU_LOAD, MI_ALU_SRCB, MI_ALU_REG0);
+ dw[2] = mi_alu(MI_ALU_ADD, 0, 0);
+ dw[3] = mi_alu(MI_ALU_STORE, MI_ALU_REG0, MI_ALU_ACCU);
}
}
}
/* FIXME: We need to clamp the result for 32 bit. */
uint32_t *dw = anv_batch_emitn(batch, 5, GENX(MI_MATH));
- dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
- dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
- dw[3] = alu(OPCODE_SUB, 0, 0);
- dw[4] = alu(OPCODE_STORE, dst_reg, OPERAND_ACCU);
+ if (!dw) {
+ anv_batch_set_error(batch, VK_ERROR_OUT_OF_HOST_MEMORY);
+ return;
+ }
+
+ dw[1] = mi_alu(MI_ALU_LOAD, MI_ALU_SRCA, MI_ALU_REG1);
+ dw[2] = mi_alu(MI_ALU_LOAD, MI_ALU_SRCB, MI_ALU_REG0);
+ dw[3] = mi_alu(MI_ALU_SUB, 0, 0);
+ dw[4] = mi_alu(MI_ALU_STORE, dst_reg, MI_ALU_ACCU);
}
void genX(CmdCopyQueryPoolResults)(
slot_offset = (firstQuery + i) * pool->stride;
switch (pool->type) {
case VK_QUERY_TYPE_OCCLUSION:
- compute_query_result(&cmd_buffer->batch, OPERAND_R2,
+ compute_query_result(&cmd_buffer->batch, MI_ALU_REG2,
&pool->bo, slot_offset + 8);
gpu_write_query_result(&cmd_buffer->batch, buffer, destOffset,
flags, 0, CS_GPR(2));
while (statistics) {
uint32_t stat = u_bit_scan(&statistics);
- compute_query_result(&cmd_buffer->batch, OPERAND_R0,
+ compute_query_result(&cmd_buffer->batch, MI_ALU_REG0,
&pool->bo, slot_offset + idx * 16 + 8);
/* WaDividePSInvocationCountBy4:HSW,BDW */