nir: Add options to nir_lower_compute_system_values to control compute ID base lowering
[mesa.git] / src / intel / vulkan / genX_query.c
index 938acd7622f25118e4a30249ae7dab0023eb9eef..3fd662cc06249d0b810a7002b30818d36cee6188 100644 (file)
 #include "genxml/gen_macros.h"
 #include "genxml/genX_pack.h"
 
+/* We reserve :
+ *    - GPR 14 for perf queries
+ *    - GPR 15 for conditional rendering
+ */
+#define GEN_MI_BUILDER_NUM_ALLOC_GPRS 14
+#define GEN_MI_BUILDER_CAN_WRITE_BATCH GEN_GEN >= 8
+#define __gen_get_batch_dwords anv_batch_emit_dwords
+#define __gen_address_offset anv_address_add
+#define __gen_get_batch_address(b, a) anv_address_physical(anv_batch_address(b, a))
+#include "common/gen_mi_builder.h"
+#include "perf/gen_perf.h"
+#include "perf/gen_perf_mdapi.h"
+
+#define OA_REPORT_N_UINT64 (256 / sizeof(uint64_t))
+
+#include "vk_util.h"
+
+static struct anv_address
+anv_query_address(struct anv_query_pool *pool, uint32_t query)
+{
+   return (struct anv_address) {
+      .bo = pool->bo,
+      .offset = query * pool->stride,
+   };
+}
+
 VkResult genX(CreateQueryPool)(
     VkDevice                                    _device,
     const VkQueryPoolCreateInfo*                pCreateInfo,
@@ -39,45 +65,144 @@ VkResult genX(CreateQueryPool)(
     VkQueryPool*                                pQueryPool)
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
+   const struct anv_physical_device *pdevice = device->physical;
+   const VkQueryPoolPerformanceCreateInfoKHR *perf_query_info = NULL;
    struct anv_query_pool *pool;
+   struct gen_perf_counter_pass *counter_pass;
+   struct gen_perf_query_info **pass_query;
+   ANV_MULTIALLOC(ma);
    VkResult result;
-   uint32_t slot_size;
-   uint64_t size;
 
    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO);
 
+   /* Query pool slots are made up of some number of 64-bit values packed
+    * tightly together. For most query types have the first 64-bit value is
+    * the "available" bit which is 0 when the query is unavailable and 1 when
+    * it is available. The 64-bit values that follow are determined by the
+    * type of query.
+    *
+    * For performance queries, we have a requirement to align OA reports at
+    * 64bytes so we put those first and have the "available" bit behind
+    * together with some other counters.
+    */
+   uint32_t uint64s_per_slot = 0;
+   UNUSED uint32_t n_passes = 0;
+
+   anv_multialloc_add(&ma, &pool, 1);
+
+   VkQueryPipelineStatisticFlags pipeline_statistics = 0;
    switch (pCreateInfo->queryType) {
    case VK_QUERY_TYPE_OCCLUSION:
+      /* Occlusion queries have two values: begin and end. */
+      uint64s_per_slot = 1 + 2;
+      break;
    case VK_QUERY_TYPE_TIMESTAMP:
+      /* Timestamps just have the one timestamp value */
+      uint64s_per_slot = 1 + 1;
       break;
    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
-      return VK_ERROR_INCOMPATIBLE_DRIVER;
+      pipeline_statistics = pCreateInfo->pipelineStatistics;
+      /* We're going to trust this field implicitly so we need to ensure that
+       * no unhandled extension bits leak in.
+       */
+      pipeline_statistics &= ANV_PIPELINE_STATISTICS_MASK;
+
+      /* Statistics queries have a min and max for every statistic */
+      uint64s_per_slot = 1 + 2 * util_bitcount(pipeline_statistics);
+      break;
+   case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
+      /* Transform feedback queries are 4 values, begin/end for
+       * written/available.
+       */
+      uint64s_per_slot = 1 + 4;
+      break;
+   case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL:
+      uint64s_per_slot = 72; /* 576 bytes, see layout below */
+      break;
+   case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
+      perf_query_info = vk_find_struct_const(pCreateInfo->pNext,
+                                             QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR);
+      n_passes = gen_perf_get_n_passes(pdevice->perf,
+                                       perf_query_info->pCounterIndices,
+                                       perf_query_info->counterIndexCount,
+                                       NULL);
+      anv_multialloc_add(&ma, &counter_pass, perf_query_info->counterIndexCount);
+      anv_multialloc_add(&ma, &pass_query, n_passes);
+      STATIC_ASSERT(ANV_KHR_PERF_QUERY_SIZE % sizeof(uint64_t) == 0);
+      uint64s_per_slot = (ANV_KHR_PERF_QUERY_SIZE / sizeof(uint64_t)) * n_passes;
+      break;
    default:
       assert(!"Invalid query type");
    }
 
-   slot_size = sizeof(struct anv_query_pool_slot);
-   pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
-                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-   if (pool == NULL)
+   if (!anv_multialloc_alloc2(&ma, &device->vk.alloc,
+                              pAllocator,
+                              VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
 
+   vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_QUERY_POOL);
    pool->type = pCreateInfo->queryType;
+   pool->pipeline_statistics = pipeline_statistics;
+   pool->stride = uint64s_per_slot * sizeof(uint64_t);
    pool->slots = pCreateInfo->queryCount;
 
-   size = pCreateInfo->queryCount * slot_size;
-   result = anv_bo_init_new(&pool->bo, device, size);
+   if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
+      pool->n_counters = perf_query_info->counterIndexCount;
+      pool->counter_pass = counter_pass;
+      gen_perf_get_counters_passes(pdevice->perf,
+                                   perf_query_info->pCounterIndices,
+                                   perf_query_info->counterIndexCount,
+                                   pool->counter_pass);
+      pool->n_passes = n_passes;
+      pool->pass_query = pass_query;
+      gen_perf_get_n_passes(pdevice->perf,
+                            perf_query_info->pCounterIndices,
+                            perf_query_info->counterIndexCount,
+                            pool->pass_query);
+   }
+
+   uint32_t bo_flags = 0;
+   if (pdevice->supports_48bit_addresses)
+      bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+   if (pdevice->use_softpin)
+      bo_flags |= EXEC_OBJECT_PINNED;
+
+   if (pdevice->has_exec_async)
+      bo_flags |= EXEC_OBJECT_ASYNC;
+
+   uint64_t size = pool->slots * pool->stride;
+   result = anv_device_alloc_bo(device, size,
+                                ANV_BO_ALLOC_MAPPED |
+                                ANV_BO_ALLOC_SNOOPED,
+                                0 /* explicit_address */,
+                                &pool->bo);
    if (result != VK_SUCCESS)
       goto fail;
 
-   pool->bo.map = anv_gem_mmap(device, pool->bo.gem_handle, 0, size, 0);
+   if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
+      for (uint32_t p = 0; p < pool->n_passes; p++) {
+         struct gen_mi_builder b;
+         struct anv_batch batch = {
+            .start = pool->bo->map + ANV_KHR_PERF_QUERY_SIZE * p + 8,
+            .end = pool->bo->map + ANV_KHR_PERF_QUERY_SIZE * p + 64,
+         };
+         batch.next = batch.start;
+
+         gen_mi_builder_init(&b, &batch);
+         gen_mi_store(&b, gen_mi_reg64(ANV_PERF_QUERY_OFFSET_REG),
+                      gen_mi_imm(p * ANV_KHR_PERF_QUERY_SIZE));
+         anv_batch_emit(&batch, GENX(MI_BATCH_BUFFER_END), bbe);
+         assert(batch.next <= (pool->bo->map + ANV_KHR_PERF_QUERY_SIZE * p + 64));
+      }
+   }
 
    *pQueryPool = anv_query_pool_to_handle(pool);
 
    return VK_SUCCESS;
 
  fail:
-   vk_free2(&device->alloc, pAllocator, pool);
+   vk_free2(&device->vk.alloc, pAllocator, pool);
 
    return result;
 }
@@ -93,9 +218,178 @@ void genX(DestroyQueryPool)(
    if (!pool)
       return;
 
-   anv_gem_munmap(pool->bo.map, pool->bo.size);
-   anv_gem_close(device, pool->bo.gem_handle);
-   vk_free2(&device->alloc, pAllocator, pool);
+   anv_device_release_bo(device, pool->bo);
+   vk_object_base_finish(&pool->base);
+   vk_free2(&device->vk.alloc, pAllocator, pool);
+}
+
+/**
+ * VK_KHR_performance_query layout (576 bytes * number of passes) :
+ *
+ * -----------------------------------------
+ * |       availability (8b)    | |        |
+ * |----------------------------| |        |
+ * |     Small batch loading    | |        |
+ * |  ANV_PERF_QUERY_OFFSET_REG | |        |
+ * |          (56b)             | | Pass 0 |
+ * |----------------------------| |        |
+ * |     begin MI_RPC (256b)    | |        |
+ * |----------------------------| |        |
+ * |       end MI_RPC (256b)    | |        |
+ * |----------------------------|--        | Query 0
+ * |       availability (8b)    | |        |
+ * |----------------------------| |        |
+ * |     Small batch loading    | |        |
+ * |  ANV_PERF_QUERY_OFFSET_REG | |        |
+ * |          (56b)             | | Pass 1 |
+ * |----------------------------| |        |
+ * |     begin MI_RPC (256b)    | |        |
+ * |----------------------------| |        |
+ * |       end MI_RPC (256b)    | |        |
+ * |----------------------------|-----------
+ * |       availability (8b)    | |        |
+ * |----------------------------| |        |
+ * |        Unused (48b)        | |        |
+ * |----------------------------| | Pass 0 |
+ * |     begin MI_RPC (256b)    | |        |
+ * |----------------------------| |        | Query 1
+ * |       end MI_RPC (256b)    | |        |
+ * |----------------------------|--        |
+ * |             ...            | |        |
+ * -----------------------------------------
+ */
+UNUSED static uint64_t
+khr_perf_query_availability_offset(struct anv_query_pool *pool, uint32_t query, uint32_t pass)
+{
+   return query * (pool->n_passes * ANV_KHR_PERF_QUERY_SIZE) +
+      pass * ANV_KHR_PERF_QUERY_SIZE;
+}
+
+UNUSED static uint64_t
+khr_perf_query_oa_offset(struct anv_query_pool *pool, uint32_t query, uint32_t pass, bool end)
+{
+   return query * (pool->n_passes * ANV_KHR_PERF_QUERY_SIZE) +
+      pass * ANV_KHR_PERF_QUERY_SIZE +
+      64 + (end ? OA_SNAPSHOT_SIZE : 0);
+}
+
+UNUSED static struct anv_address
+khr_perf_query_availability_address(struct anv_query_pool *pool, uint32_t query, uint32_t pass)
+{
+   return anv_address_add(
+      (struct anv_address) { .bo = pool->bo, },
+      khr_perf_query_availability_offset(pool, query, pass));
+}
+
+UNUSED static struct anv_address
+khr_perf_query_oa_address(struct anv_query_pool *pool, uint32_t query, uint32_t pass, bool end)
+{
+   return anv_address_add(
+      (struct anv_address) { .bo = pool->bo, },
+      khr_perf_query_oa_offset(pool, query, pass, end));
+}
+
+
+/**
+ * VK_INTEL_performance_query layout (576 bytes) :
+ *
+ * ------------------------------
+ * |       availability (8b)    |
+ * |----------------------------|
+ * |         marker (8b)        |
+ * |----------------------------|
+ * | begin RPSTAT register (4b) |
+ * |----------------------------|
+ * |  end RPSTAT register (4b)  |
+ * |----------------------------|
+ * | begin perfcntr 1 & 2 (16b) |
+ * |----------------------------|
+ * |  end perfcntr 1 & 2 (16b)  |
+ * |----------------------------|
+ * |          Unused (8b)       |
+ * |----------------------------|
+ * |     begin MI_RPC (256b)    |
+ * |----------------------------|
+ * |       end MI_RPC (256b)    |
+ * ------------------------------
+ */
+
+static uint32_t
+intel_perf_marker_offset(void)
+{
+   return 8;
+}
+
+static uint32_t
+intel_perf_rpstart_offset(bool end)
+{
+   return 16 + (end ? sizeof(uint32_t) : 0);
+}
+
+#if GEN_GEN >= 8 && GEN_GEN <= 11
+static uint32_t
+intel_perf_counter(bool end)
+{
+   return 24 + (end ? (2 * sizeof(uint64_t)) : 0);
+}
+#endif
+
+static uint32_t
+intel_perf_mi_rpc_offset(bool end)
+{
+   return 64 + (end ? 256 : 0);
+}
+
+static void
+cpu_write_query_result(void *dst_slot, VkQueryResultFlags flags,
+                       uint32_t value_index, uint64_t result)
+{
+   if (flags & VK_QUERY_RESULT_64_BIT) {
+      uint64_t *dst64 = dst_slot;
+      dst64[value_index] = result;
+   } else {
+      uint32_t *dst32 = dst_slot;
+      dst32[value_index] = result;
+   }
+}
+
+static void *
+query_slot(struct anv_query_pool *pool, uint32_t query)
+{
+   return pool->bo->map + query * pool->stride;
+}
+
+static bool
+query_is_available(struct anv_query_pool *pool, uint32_t query)
+{
+   if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
+      for (uint32_t p = 0; p < pool->n_passes; p++) {
+         volatile uint64_t *slot =
+            pool->bo->map + khr_perf_query_availability_offset(pool, query, p);
+         if (!slot[0])
+            return false;
+      }
+      return true;
+   } else {
+      return *(volatile uint64_t *)query_slot(pool, query);
+   }
+}
+
+static VkResult
+wait_for_available(struct anv_device *device,
+                   struct anv_query_pool *pool, uint32_t query)
+{
+   uint64_t abs_timeout = anv_get_absolute_timeout(5 * NSEC_PER_SEC);
+
+   while (anv_gettime_ns() < abs_timeout) {
+      if (query_is_available(pool, query))
+         return VK_SUCCESS;
+      VkResult status = anv_device_query_status(device);
+      if (status != VK_SUCCESS)
+         return status;
+   }
+
+   return anv_device_set_lost(device, "query timeout");
 }
 
 VkResult genX(GetQueryPoolResults)(
@@ -110,38 +404,33 @@ VkResult genX(GetQueryPoolResults)(
 {
    ANV_FROM_HANDLE(anv_device, device, _device);
    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
-   int64_t timeout = INT64_MAX;
-   uint64_t result;
-   int ret;
 
    assert(pool->type == VK_QUERY_TYPE_OCCLUSION ||
-          pool->type == VK_QUERY_TYPE_TIMESTAMP);
+          pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS ||
+          pool->type == VK_QUERY_TYPE_TIMESTAMP ||
+          pool->type == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT ||
+          pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR ||
+          pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL);
+
+   if (anv_device_is_lost(device))
+      return VK_ERROR_DEVICE_LOST;
 
    if (pData == NULL)
       return VK_SUCCESS;
 
-   if (flags & VK_QUERY_RESULT_WAIT_BIT) {
-      ret = anv_gem_wait(device, pool->bo.gem_handle, &timeout);
-      if (ret == -1) {
-         /* We don't know the real error. */
-         return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
-                          "gem_wait failed %m");
-      }
-   }
-
    void *data_end = pData + dataSize;
-   struct anv_query_pool_slot *slot = pool->bo.map;
-
-   if (!device->info.has_llc) {
-      uint64_t offset = firstQuery * sizeof(*slot);
-      uint64_t size = queryCount * sizeof(*slot);
-      anv_invalidate_range(pool->bo.map + offset,
-                           MIN2(size, pool->bo.size - offset));
-   }
 
    VkResult status = VK_SUCCESS;
    for (uint32_t i = 0; i < queryCount; i++) {
-      bool available = slot[firstQuery + i].available;
+      bool available = query_is_available(pool, firstQuery + i);
+
+      if (!available && (flags & VK_QUERY_RESULT_WAIT_BIT)) {
+         status = wait_for_available(device, pool, firstQuery + i);
+         if (status != VK_SUCCESS)
+            return status;
+
+         available = true;
+      }
 
       /* From the Vulkan 1.0.42 spec:
        *
@@ -151,42 +440,137 @@ VkResult genX(GetQueryPoolResults)(
        *    and vkGetQueryPoolResults returns VK_NOT_READY. However,
        *    availability state is still written to pData for those queries if
        *    VK_QUERY_RESULT_WITH_AVAILABILITY_BIT is set."
+       *
+       * From VK_KHR_performance_query :
+       *
+       *    "VK_QUERY_RESULT_PERFORMANCE_QUERY_RECORDED_COUNTERS_BIT_KHR specifies
+       *     that the result should contain the number of counters that were recorded
+       *     into a query pool of type ename:VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR"
        */
       bool write_results = available || (flags & VK_QUERY_RESULT_PARTIAL_BIT);
 
-      if (write_results) {
-         switch (pool->type) {
-         case VK_QUERY_TYPE_OCCLUSION: {
-            result = slot[firstQuery + i].end - slot[firstQuery + i].begin;
-            break;
-         }
-         case VK_QUERY_TYPE_PIPELINE_STATISTICS:
-            unreachable("pipeline stats not supported");
-         case VK_QUERY_TYPE_TIMESTAMP: {
-            result = slot[firstQuery + i].begin;
-            break;
+      uint32_t idx = 0;
+      switch (pool->type) {
+      case VK_QUERY_TYPE_OCCLUSION: {
+         uint64_t *slot = query_slot(pool, firstQuery + i);
+         if (write_results) {
+            /* From the Vulkan 1.2.132 spec:
+             *
+             *    "If VK_QUERY_RESULT_PARTIAL_BIT is set,
+             *    VK_QUERY_RESULT_WAIT_BIT is not set, and the query’s status
+             *    is unavailable, an intermediate result value between zero and
+             *    the final result value is written to pData for that query."
+             */
+            uint64_t result = available ? slot[2] - slot[1] : 0;
+            cpu_write_query_result(pData, flags, idx, result);
          }
-         default:
-            unreachable("invalid pool type");
+         idx++;
+         break;
+      }
+
+      case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
+         uint64_t *slot = query_slot(pool, firstQuery + i);
+         uint32_t statistics = pool->pipeline_statistics;
+         while (statistics) {
+            uint32_t stat = u_bit_scan(&statistics);
+            if (write_results) {
+               uint64_t result = slot[idx * 2 + 2] - slot[idx * 2 + 1];
+
+               /* WaDividePSInvocationCountBy4:HSW,BDW */
+               if ((device->info.gen == 8 || device->info.is_haswell) &&
+                   (1 << stat) == VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT)
+                  result >>= 2;
+
+               cpu_write_query_result(pData, flags, idx, result);
+            }
+            idx++;
          }
-      } else {
-         status = VK_NOT_READY;
+         assert(idx == util_bitcount(pool->pipeline_statistics));
+         break;
       }
 
-      if (flags & VK_QUERY_RESULT_64_BIT) {
-         uint64_t *dst = pData;
+      case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: {
+         uint64_t *slot = query_slot(pool, firstQuery + i);
          if (write_results)
-            dst[0] = result;
-         if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
-            dst[1] = slot[firstQuery + i].available;
-      } else {
-         uint32_t *dst = pData;
+            cpu_write_query_result(pData, flags, idx, slot[2] - slot[1]);
+         idx++;
          if (write_results)
-            dst[0] = result;
-         if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
-            dst[1] = slot[firstQuery + i].available;
+            cpu_write_query_result(pData, flags, idx, slot[4] - slot[3]);
+         idx++;
+         break;
+      }
+
+      case VK_QUERY_TYPE_TIMESTAMP: {
+         uint64_t *slot = query_slot(pool, firstQuery + i);
+         if (write_results)
+            cpu_write_query_result(pData, flags, idx, slot[1]);
+         idx++;
+         break;
       }
 
+#if GEN_GEN >= 8
+      case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
+         const struct anv_physical_device *pdevice = device->physical;
+         assert((flags & (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT |
+                          VK_QUERY_RESULT_PARTIAL_BIT)) == 0);
+         for (uint32_t p = 0; p < pool->n_passes; p++) {
+            const uint32_t *begin = pool->bo->map + khr_perf_query_oa_offset(pool, firstQuery + i, p, false);
+            const uint32_t *end = pool->bo->map + khr_perf_query_oa_offset(pool, firstQuery + i, p, true);
+            struct gen_perf_query_result result;
+            gen_perf_query_result_clear(&result);
+            gen_perf_query_result_accumulate(&result, pool->pass_query[p], begin, end);
+            anv_perf_write_pass_results(pdevice->perf, pool, p, &result, pData);
+         }
+         break;
+      }
+#endif
+
+      case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
+         if (!write_results)
+            break;
+         const void *query_data = query_slot(pool, firstQuery + i);
+         const uint32_t *oa_begin = query_data + intel_perf_mi_rpc_offset(false);
+         const uint32_t *oa_end = query_data + intel_perf_mi_rpc_offset(true);
+         const uint32_t *rpstat_begin = query_data + intel_perf_rpstart_offset(false);
+         const uint32_t *rpstat_end = query_data + intel_perf_mi_rpc_offset(true);
+         struct gen_perf_query_result result;
+         uint32_t core_freq[2];
+#if GEN_GEN < 9
+         core_freq[0] = ((*rpstat_begin >> 7) & 0x7f) * 1000000ULL;
+         core_freq[1] = ((*rpstat_end >> 7) & 0x7f) * 1000000ULL;
+#else
+         core_freq[0] = ((*rpstat_begin >> 23) & 0x1ff) * 1000000ULL;
+         core_freq[1] = ((*rpstat_end >> 23) & 0x1ff) * 1000000ULL;
+#endif
+         gen_perf_query_result_clear(&result);
+         gen_perf_query_result_accumulate(&result, &device->physical->perf->queries[0],
+                                          oa_begin, oa_end);
+         gen_perf_query_result_read_frequencies(&result, &device->info,
+                                                oa_begin, oa_end);
+         gen_perf_query_result_write_mdapi(pData, stride,
+                                           &device->info,
+                                           &result,
+                                           core_freq[0], core_freq[1]);
+#if GEN_GEN >= 8 && GEN_GEN <= 11
+         gen_perf_query_mdapi_write_perfcntr(pData, stride, &device->info,
+                                             query_data + intel_perf_counter(false),
+                                             query_data + intel_perf_counter(true));
+#endif
+         const uint64_t *marker = query_data + intel_perf_marker_offset();
+         gen_perf_query_mdapi_write_marker(pData, stride, &device->info, *marker);
+         break;
+      }
+
+      default:
+         unreachable("invalid pool type");
+      }
+
+      if (!write_results)
+         status = VK_NOT_READY;
+
+      if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
+         cpu_write_query_result(pData, flags, idx, available);
+
       pData += stride;
       if (pData >= data_end)
          break;
@@ -197,13 +581,16 @@ VkResult genX(GetQueryPoolResults)(
 
 static void
 emit_ps_depth_count(struct anv_cmd_buffer *cmd_buffer,
-                    struct anv_bo *bo, uint32_t offset)
+                    struct anv_address addr)
 {
+   cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
+   genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
+
    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
       pc.DestinationAddressType  = DAT_PPGTT;
       pc.PostSyncOperation       = WritePSDepthCount;
       pc.DepthStallEnable        = true;
-      pc.Address                 = (struct anv_address) { bo, offset };
+      pc.Address                 = addr;
 
       if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4)
          pc.CommandStreamerStallEnable = true;
@@ -211,14 +598,97 @@ emit_ps_depth_count(struct anv_cmd_buffer *cmd_buffer,
 }
 
 static void
-emit_query_availability(struct anv_cmd_buffer *cmd_buffer,
-                        struct anv_bo *bo, uint32_t offset)
+emit_query_mi_availability(struct gen_mi_builder *b,
+                           struct anv_address addr,
+                           bool available)
+{
+   gen_mi_store(b, gen_mi_mem64(addr), gen_mi_imm(available));
+}
+
+static void
+emit_query_pc_availability(struct anv_cmd_buffer *cmd_buffer,
+                           struct anv_address addr,
+                           bool available)
 {
+   cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
+   genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
+
    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
       pc.DestinationAddressType  = DAT_PPGTT;
       pc.PostSyncOperation       = WriteImmediateData;
-      pc.Address                 = (struct anv_address) { bo, offset };
-      pc.ImmediateData           = 1;
+      pc.Address                 = addr;
+      pc.ImmediateData           = available;
+   }
+}
+
+/**
+ * Goes through a series of consecutive query indices in the given pool
+ * setting all element values to 0 and emitting them as available.
+ */
+static void
+emit_zero_queries(struct anv_cmd_buffer *cmd_buffer,
+                  struct gen_mi_builder *b, struct anv_query_pool *pool,
+                  uint32_t first_index, uint32_t num_queries)
+{
+   switch (pool->type) {
+   case VK_QUERY_TYPE_OCCLUSION:
+   case VK_QUERY_TYPE_TIMESTAMP:
+      /* These queries are written with a PIPE_CONTROL so clear them using the
+       * PIPE_CONTROL as well so we don't have to synchronize between 2 types
+       * of operations.
+       */
+      assert((pool->stride % 8) == 0);
+      for (uint32_t i = 0; i < num_queries; i++) {
+         struct anv_address slot_addr =
+            anv_query_address(pool, first_index + i);
+
+         for (uint32_t qword = 1; qword < (pool->stride / 8); qword++) {
+            emit_query_pc_availability(cmd_buffer,
+                                       anv_address_add(slot_addr, qword * 8),
+                                       false);
+         }
+         emit_query_pc_availability(cmd_buffer, slot_addr, true);
+      }
+      break;
+
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+   case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
+      for (uint32_t i = 0; i < num_queries; i++) {
+         struct anv_address slot_addr =
+            anv_query_address(pool, first_index + i);
+         gen_mi_memset(b, anv_address_add(slot_addr, 8), 0, pool->stride - 8);
+         emit_query_mi_availability(b, slot_addr, true);
+      }
+      break;
+
+#if GEN_GEN >= 8
+   case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
+      for (uint32_t i = 0; i < num_queries; i++) {
+         for (uint32_t p = 0; p < pool->n_passes; p++) {
+            gen_mi_memset(b,
+                          khr_perf_query_oa_address(pool,
+                                                    first_index + i, p, false),
+                          0, 2 * OA_SNAPSHOT_SIZE);
+            emit_query_mi_availability(b,
+                                       khr_perf_query_availability_address(pool, first_index + i, p),
+                                       true);
+         }
+      }
+      break;
+   }
+#endif
+
+   case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL:
+      for (uint32_t i = 0; i < num_queries; i++) {
+         struct anv_address slot_addr =
+            anv_query_address(pool, first_index + i);
+         gen_mi_memset(b, anv_address_add(slot_addr, 8), 0, pool->stride - 8);
+         emit_query_mi_availability(b, slot_addr, true);
+      }
+      break;
+
+   default:
+      unreachable("Unsupported query type");
    }
 }
 
@@ -231,48 +701,236 @@ void genX(CmdResetQueryPool)(
    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
 
+   switch (pool->type) {
+   case VK_QUERY_TYPE_OCCLUSION:
+   case VK_QUERY_TYPE_TIMESTAMP:
+      for (uint32_t i = 0; i < queryCount; i++) {
+         emit_query_pc_availability(cmd_buffer,
+                                    anv_query_address(pool, firstQuery + i),
+                                    false);
+      }
+      break;
+
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+   case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: {
+      struct gen_mi_builder b;
+      gen_mi_builder_init(&b, &cmd_buffer->batch);
+
+      for (uint32_t i = 0; i < queryCount; i++)
+         emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
+      break;
+   }
+
+#if GEN_GEN >= 8
+   case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
+      struct gen_mi_builder b;
+      gen_mi_builder_init(&b, &cmd_buffer->batch);
+
+      for (uint32_t i = 0; i < queryCount; i++) {
+         for (uint32_t p = 0; p < pool->n_passes; p++) {
+            emit_query_mi_availability(
+               &b,
+               khr_perf_query_availability_address(pool, firstQuery + i, p),
+               false);
+         }
+      }
+      break;
+   }
+#endif
+
+   case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
+      struct gen_mi_builder b;
+      gen_mi_builder_init(&b, &cmd_buffer->batch);
+
+      for (uint32_t i = 0; i < queryCount; i++)
+         emit_query_mi_availability(&b, anv_query_address(pool, firstQuery + i), false);
+      break;
+   }
+
+   default:
+      unreachable("Unsupported query type");
+   }
+}
+
+void genX(ResetQueryPool)(
+    VkDevice                                    _device,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    firstQuery,
+    uint32_t                                    queryCount)
+{
+   ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+
    for (uint32_t i = 0; i < queryCount; i++) {
-      anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdm) {
-         sdm.Address = (struct anv_address) {
-            .bo = &pool->bo,
-            .offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot),
-         };
-         sdm.DataDWord0 = 0;
-         sdm.DataDWord1 = 0;
+      if (pool->type == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
+         for (uint32_t p = 0; p < pool->n_passes; p++) {
+            uint64_t *pass_slot = pool->bo->map +
+               khr_perf_query_availability_offset(pool, firstQuery + i, p);
+            *pass_slot = 0;
+         }
+      } else {
+         uint64_t *slot = query_slot(pool, firstQuery + i);
+         *slot = 0;
       }
    }
 }
 
+static const uint32_t vk_pipeline_stat_to_reg[] = {
+   GENX(IA_VERTICES_COUNT_num),
+   GENX(IA_PRIMITIVES_COUNT_num),
+   GENX(VS_INVOCATION_COUNT_num),
+   GENX(GS_INVOCATION_COUNT_num),
+   GENX(GS_PRIMITIVES_COUNT_num),
+   GENX(CL_INVOCATION_COUNT_num),
+   GENX(CL_PRIMITIVES_COUNT_num),
+   GENX(PS_INVOCATION_COUNT_num),
+   GENX(HS_INVOCATION_COUNT_num),
+   GENX(DS_INVOCATION_COUNT_num),
+   GENX(CS_INVOCATION_COUNT_num),
+};
+
+static void
+emit_pipeline_stat(struct gen_mi_builder *b, uint32_t stat,
+                   struct anv_address addr)
+{
+   STATIC_ASSERT(ANV_PIPELINE_STATISTICS_MASK ==
+                 (1 << ARRAY_SIZE(vk_pipeline_stat_to_reg)) - 1);
+
+   assert(stat < ARRAY_SIZE(vk_pipeline_stat_to_reg));
+   gen_mi_store(b, gen_mi_mem64(addr),
+                gen_mi_reg64(vk_pipeline_stat_to_reg[stat]));
+}
+
+static void
+emit_xfb_query(struct gen_mi_builder *b, uint32_t stream,
+               struct anv_address addr)
+{
+   assert(stream < MAX_XFB_STREAMS);
+
+   gen_mi_store(b, gen_mi_mem64(anv_address_add(addr, 0)),
+                gen_mi_reg64(GENX(SO_NUM_PRIMS_WRITTEN0_num) + stream * 8));
+   gen_mi_store(b, gen_mi_mem64(anv_address_add(addr, 16)),
+                gen_mi_reg64(GENX(SO_PRIM_STORAGE_NEEDED0_num) + stream * 8));
+}
+
 void genX(CmdBeginQuery)(
     VkCommandBuffer                             commandBuffer,
     VkQueryPool                                 queryPool,
     uint32_t                                    query,
     VkQueryControlFlags                         flags)
+{
+   genX(CmdBeginQueryIndexedEXT)(commandBuffer, queryPool, query, flags, 0);
+}
+
+void genX(CmdBeginQueryIndexedEXT)(
+    VkCommandBuffer                             commandBuffer,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    query,
+    VkQueryControlFlags                         flags,
+    uint32_t                                    index)
 {
    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+   struct anv_address query_addr = anv_query_address(pool, query);
 
-   /* Workaround: When meta uses the pipeline with the VS disabled, it seems
-    * that the pipelining of the depth write breaks. What we see is that
-    * samples from the render pass clear leaks into the first query
-    * immediately after the clear. Doing a pipecontrol with a post-sync
-    * operation and DepthStallEnable seems to work around the issue.
-    */
-   if (cmd_buffer->state.need_query_wa) {
-      cmd_buffer->state.need_query_wa = false;
+   struct gen_mi_builder b;
+   gen_mi_builder_init(&b, &cmd_buffer->batch);
+
+   switch (pool->type) {
+   case VK_QUERY_TYPE_OCCLUSION:
+      emit_ps_depth_count(cmd_buffer, anv_address_add(query_addr, 8));
+      break;
+
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
+      /* TODO: This might only be necessary for certain stats */
       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
-         pc.DepthCacheFlushEnable   = true;
-         pc.DepthStallEnable        = true;
+         pc.CommandStreamerStallEnable = true;
+         pc.StallAtPixelScoreboard = true;
+      }
+
+      uint32_t statistics = pool->pipeline_statistics;
+      uint32_t offset = 8;
+      while (statistics) {
+         uint32_t stat = u_bit_scan(&statistics);
+         emit_pipeline_stat(&b, stat, anv_address_add(query_addr, offset));
+         offset += 16;
       }
+      break;
    }
 
-   switch (pool->type) {
-   case VK_QUERY_TYPE_OCCLUSION:
-      emit_ps_depth_count(cmd_buffer, &pool->bo,
-                          query * sizeof(struct anv_query_pool_slot) + 8);
+   case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+         pc.CommandStreamerStallEnable = true;
+         pc.StallAtPixelScoreboard = true;
+      }
+      emit_xfb_query(&b, index, anv_address_add(query_addr, 8));
       break;
 
-   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+#if GEN_GEN >= 8
+   case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+         pc.CommandStreamerStallEnable = true;
+         pc.StallAtPixelScoreboard = true;
+      }
+      cmd_buffer->perf_query_pool = pool;
+
+      /* We know the bottom bits of the address are 0s which match what we
+       * want in the MI_RPC packet.
+       */
+      struct gen_mi_value mi_rpc_write_offset =
+         gen_mi_iadd(
+            &b,
+            gen_mi_imm(
+               gen_canonical_address(
+                  pool->bo->offset +
+                  khr_perf_query_oa_offset(pool, query, 0 /* pass */, false))),
+            gen_mi_reg64(ANV_PERF_QUERY_OFFSET_REG));
+      struct gen_mi_address_token mi_rpc_addr_dest =
+         gen_mi_store_address(&b, mi_rpc_write_offset);
+      gen_mi_self_mod_barrier(&b);
+
+      void *mi_rpc_dws =
+         anv_batch_emitn(&cmd_buffer->batch,
+                         GENX(MI_REPORT_PERF_COUNT_length),
+                         GENX(MI_REPORT_PERF_COUNT),
+                         .MemoryAddress = query_addr /* Will be overwritten */ );
+      _gen_mi_resolve_address_token(&b, mi_rpc_addr_dest,
+                                    mi_rpc_dws +
+                                    GENX(MI_REPORT_PERF_COUNT_MemoryAddress_start) / 8);
+      break;
+   }
+#endif
+
+   case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+         pc.CommandStreamerStallEnable = true;
+         pc.StallAtPixelScoreboard = true;
+      }
+      anv_batch_emit(&cmd_buffer->batch, GENX(MI_REPORT_PERF_COUNT), rpc) {
+         rpc.MemoryAddress =
+            anv_address_add(query_addr, intel_perf_mi_rpc_offset(false));
+      }
+#if GEN_GEN < 9
+      gen_mi_store(&b,
+                   gen_mi_mem32(anv_address_add(query_addr,
+                                                intel_perf_rpstart_offset(false))),
+                   gen_mi_reg32(GENX(RPSTAT1_num)));
+#else
+      gen_mi_store(&b,
+                   gen_mi_mem32(anv_address_add(query_addr,
+                                                intel_perf_rpstart_offset(false))),
+                   gen_mi_reg32(GENX(RPSTAT0_num)));
+#endif
+#if GEN_GEN >= 8 && GEN_GEN <= 11
+      gen_mi_store(&b, gen_mi_mem64(anv_address_add(query_addr,
+                                                    intel_perf_counter(false))),
+                   gen_mi_reg64(GENX(PERFCNT1_num)));
+      gen_mi_store(&b, gen_mi_mem64(anv_address_add(query_addr,
+                                                    intel_perf_counter(false) + 8)),
+                   gen_mi_reg64(GENX(PERFCNT2_num)));
+#endif
+      break;
+   }
+
    default:
       unreachable("");
    }
@@ -282,23 +940,167 @@ void genX(CmdEndQuery)(
     VkCommandBuffer                             commandBuffer,
     VkQueryPool                                 queryPool,
     uint32_t                                    query)
+{
+   genX(CmdEndQueryIndexedEXT)(commandBuffer, queryPool, query, 0);
+}
+
+void genX(CmdEndQueryIndexedEXT)(
+    VkCommandBuffer                             commandBuffer,
+    VkQueryPool                                 queryPool,
+    uint32_t                                    query,
+    uint32_t                                    index)
 {
    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+   struct anv_address query_addr = anv_query_address(pool, query);
+
+   struct gen_mi_builder b;
+   gen_mi_builder_init(&b, &cmd_buffer->batch);
 
    switch (pool->type) {
    case VK_QUERY_TYPE_OCCLUSION:
-      emit_ps_depth_count(cmd_buffer, &pool->bo,
-                          query * sizeof(struct anv_query_pool_slot) + 16);
+      emit_ps_depth_count(cmd_buffer, anv_address_add(query_addr, 16));
+      emit_query_pc_availability(cmd_buffer, query_addr, true);
+      break;
 
-      emit_query_availability(cmd_buffer, &pool->bo,
-                              query * sizeof(struct anv_query_pool_slot));
+   case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
+      /* TODO: This might only be necessary for certain stats */
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+         pc.CommandStreamerStallEnable = true;
+         pc.StallAtPixelScoreboard = true;
+      }
+
+      uint32_t statistics = pool->pipeline_statistics;
+      uint32_t offset = 16;
+      while (statistics) {
+         uint32_t stat = u_bit_scan(&statistics);
+         emit_pipeline_stat(&b, stat, anv_address_add(query_addr, offset));
+         offset += 16;
+      }
+
+      emit_query_mi_availability(&b, query_addr, true);
       break;
+   }
+
+   case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+         pc.CommandStreamerStallEnable = true;
+         pc.StallAtPixelScoreboard = true;
+      }
+
+      emit_xfb_query(&b, index, anv_address_add(query_addr, 16));
+      emit_query_mi_availability(&b, query_addr, true);
+      break;
+
+#if GEN_GEN >= 8
+   case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: {
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+         pc.CommandStreamerStallEnable = true;
+         pc.StallAtPixelScoreboard = true;
+      }
+
+      /* We know the bottom bits of the address are 0s which match what we
+       * want in the MI_RPC/MI_SDI packets.
+       */
+      struct gen_mi_value mi_rpc_write_offset =
+         gen_mi_iadd(
+            &b,
+            gen_mi_imm(
+               gen_canonical_address(
+                  pool->bo->offset +
+                  khr_perf_query_oa_offset(pool, query, 0 /* pass*/, true))),
+            gen_mi_reg64(ANV_PERF_QUERY_OFFSET_REG));
+      struct gen_mi_value availability_write_offset =
+         gen_mi_iadd(
+            &b,
+            gen_mi_imm(
+               gen_canonical_address(
+                  pool->bo->offset +
+                  khr_perf_query_availability_offset(pool, query, 0 /* pass */))),
+            gen_mi_reg64(ANV_PERF_QUERY_OFFSET_REG));
+
+      struct gen_mi_address_token mi_rpc_addr_dest =
+         gen_mi_store_address(&b, mi_rpc_write_offset);
+      struct gen_mi_address_token availability_addr_dest =
+         gen_mi_store_address(&b, availability_write_offset);
+      gen_mi_self_mod_barrier(&b);
+
+      void *mi_rpc_dws =
+         anv_batch_emitn(&cmd_buffer->batch,
+                         GENX(MI_REPORT_PERF_COUNT_length),
+                         GENX(MI_REPORT_PERF_COUNT),
+                         .MemoryAddress = query_addr /* Will be overwritten */ );
+      _gen_mi_resolve_address_token(&b, mi_rpc_addr_dest,
+                                    mi_rpc_dws +
+                                    GENX(MI_REPORT_PERF_COUNT_MemoryAddress_start) / 8);
+
+      void *availability_dws =
+         anv_batch_emitn(&cmd_buffer->batch,
+                         GENX(MI_STORE_DATA_IMM_length),
+                         GENX(MI_STORE_DATA_IMM),
+                         .ImmediateData = true);
+      _gen_mi_resolve_address_token(&b, availability_addr_dest,
+                                    availability_dws +
+                                    GENX(MI_STORE_DATA_IMM_Address_start) / 8);
+      break;
+   }
+#endif
+
+   case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: {
+      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
+         pc.CommandStreamerStallEnable = true;
+         pc.StallAtPixelScoreboard = true;
+      }
+      uint32_t marker_offset = intel_perf_marker_offset();
+      gen_mi_store(&b, gen_mi_mem64(anv_address_add(query_addr, marker_offset)),
+                   gen_mi_imm(cmd_buffer->intel_perf_marker));
+#if GEN_GEN >= 8 && GEN_GEN <= 11
+      gen_mi_store(&b, gen_mi_mem64(anv_address_add(query_addr, intel_perf_counter(true))),
+                   gen_mi_reg64(GENX(PERFCNT1_num)));
+      gen_mi_store(&b, gen_mi_mem64(anv_address_add(query_addr, intel_perf_counter(true) + 8)),
+                   gen_mi_reg64(GENX(PERFCNT2_num)));
+#endif
+#if GEN_GEN < 9
+      gen_mi_store(&b,
+                   gen_mi_mem32(anv_address_add(query_addr,
+                                                intel_perf_rpstart_offset(true))),
+                   gen_mi_reg32(GENX(RPSTAT1_num)));
+#else
+      gen_mi_store(&b,
+                   gen_mi_mem32(anv_address_add(query_addr,
+                                                intel_perf_rpstart_offset(true))),
+                   gen_mi_reg32(GENX(RPSTAT0_num)));
+#endif
+      /* Position the last OA snapshot at the beginning of the query so that
+       * we can tell whether it's ready.
+       */
+      anv_batch_emit(&cmd_buffer->batch, GENX(MI_REPORT_PERF_COUNT), rpc) {
+         rpc.MemoryAddress = anv_address_add(query_addr,
+                                             intel_perf_mi_rpc_offset(true));
+         rpc.ReportID = 0xdeadbeef; /* This goes in the first dword */
+      }
+      emit_query_mi_availability(&b, query_addr, true);
+      break;
+   }
 
-   case VK_QUERY_TYPE_PIPELINE_STATISTICS:
    default:
       unreachable("");
    }
+
+   /* When multiview is active the spec requires that N consecutive query
+    * indices are used, where N is the number of active views in the subpass.
+    * The spec allows that we only write the results to one of the queries
+    * but we still need to manage result availability for all the query indices.
+    * Since we only emit a single query for all active views in the
+    * first index, mark the other query indices as being already available
+    * with result 0.
+    */
+   if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
+      const uint32_t num_queries =
+         util_bitcount(cmd_buffer->state.subpass->view_mask);
+      if (num_queries > 1)
+         emit_zero_queries(cmd_buffer, &b, pool, query + 1, num_queries - 1);
+   }
 }
 
 #define TIMESTAMP 0x2358
@@ -311,28 +1113,28 @@ void genX(CmdWriteTimestamp)(
 {
    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
-   uint32_t offset = query * sizeof(struct anv_query_pool_slot);
+   struct anv_address query_addr = anv_query_address(pool, query);
 
    assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
 
+   struct gen_mi_builder b;
+   gen_mi_builder_init(&b, &cmd_buffer->batch);
+
    switch (pipelineStage) {
    case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
-      anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
-         srm.RegisterAddress  = TIMESTAMP;
-         srm.MemoryAddress    = (struct anv_address) { &pool->bo, offset + 8 };
-      }
-      anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
-         srm.RegisterAddress  = TIMESTAMP + 4;
-         srm.MemoryAddress    = (struct anv_address) { &pool->bo, offset + 12 };
-      }
+      gen_mi_store(&b, gen_mi_mem64(anv_address_add(query_addr, 8)),
+                       gen_mi_reg64(TIMESTAMP));
       break;
 
    default:
       /* Everything else is bottom-of-pipe */
+      cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
+      genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
+
       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
          pc.DestinationAddressType  = DAT_PPGTT;
          pc.PostSyncOperation       = WriteTimestamp;
-         pc.Address = (struct anv_address) { &pool->bo, offset + 8 };
+         pc.Address                 = anv_address_add(query_addr, 8);
 
          if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4)
             pc.CommandStreamerStallEnable = true;
@@ -340,74 +1142,88 @@ void genX(CmdWriteTimestamp)(
       break;
    }
 
-   emit_query_availability(cmd_buffer, &pool->bo, offset);
+   emit_query_pc_availability(cmd_buffer, query_addr, true);
+
+   /* When multiview is active the spec requires that N consecutive query
+    * indices are used, where N is the number of active views in the subpass.
+    * The spec allows that we only write the results to one of the queries
+    * but we still need to manage result availability for all the query indices.
+    * Since we only emit a single query for all active views in the
+    * first index, mark the other query indices as being already available
+    * with result 0.
+    */
+   if (cmd_buffer->state.subpass && cmd_buffer->state.subpass->view_mask) {
+      const uint32_t num_queries =
+         util_bitcount(cmd_buffer->state.subpass->view_mask);
+      if (num_queries > 1)
+         emit_zero_queries(cmd_buffer, &b, pool, query + 1, num_queries - 1);
+   }
 }
 
 #if GEN_GEN > 7 || GEN_IS_HASWELL
 
-#define alu_opcode(v)   __gen_uint((v),  20, 31)
-#define alu_operand1(v) __gen_uint((v),  10, 19)
-#define alu_operand2(v) __gen_uint((v),   0,  9)
-#define alu(opcode, operand1, operand2) \
-   alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
-
-#define OPCODE_NOOP      0x000
-#define OPCODE_LOAD      0x080
-#define OPCODE_LOADINV   0x480
-#define OPCODE_LOAD0     0x081
-#define OPCODE_LOAD1     0x481
-#define OPCODE_ADD       0x100
-#define OPCODE_SUB       0x101
-#define OPCODE_AND       0x102
-#define OPCODE_OR        0x103
-#define OPCODE_XOR       0x104
-#define OPCODE_STORE     0x180
-#define OPCODE_STOREINV  0x580
-
-#define OPERAND_R0   0x00
-#define OPERAND_R1   0x01
-#define OPERAND_R2   0x02
-#define OPERAND_R3   0x03
-#define OPERAND_R4   0x04
-#define OPERAND_SRCA 0x20
-#define OPERAND_SRCB 0x21
-#define OPERAND_ACCU 0x31
-#define OPERAND_ZF   0x32
-#define OPERAND_CF   0x33
-
-#define CS_GPR(n) (0x2600 + (n) * 8)
+#if GEN_GEN >= 8 || GEN_IS_HASWELL
+
+#define MI_PREDICATE_SRC0    0x2400
+#define MI_PREDICATE_SRC1    0x2408
+#define MI_PREDICATE_RESULT  0x2418
 
+/**
+ * Writes the results of a query to dst_addr is the value at poll_addr is equal
+ * to the reference value.
+ */
 static void
-emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
-                      struct anv_bo *bo, uint32_t offset)
+gpu_write_query_result_cond(struct anv_cmd_buffer *cmd_buffer,
+                            struct gen_mi_builder *b,
+                            struct anv_address poll_addr,
+                            struct anv_address dst_addr,
+                            uint64_t ref_value,
+                            VkQueryResultFlags flags,
+                            uint32_t value_index,
+                            struct gen_mi_value query_result)
 {
-   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
-      lrm.RegisterAddress  = reg,
-      lrm.MemoryAddress    = (struct anv_address) { bo, offset };
+   gen_mi_store(b, gen_mi_reg64(MI_PREDICATE_SRC0), gen_mi_mem64(poll_addr));
+   gen_mi_store(b, gen_mi_reg64(MI_PREDICATE_SRC1), gen_mi_imm(ref_value));
+   anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
+      mip.LoadOperation    = LOAD_LOAD;
+      mip.CombineOperation = COMBINE_SET;
+      mip.CompareOperation = COMPARE_SRCS_EQUAL;
    }
-   anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
-      lrm.RegisterAddress  = reg + 4;
-      lrm.MemoryAddress    = (struct anv_address) { bo, offset + 4 };
+
+   if (flags & VK_QUERY_RESULT_64_BIT) {
+      struct anv_address res_addr = anv_address_add(dst_addr, value_index * 8);
+      gen_mi_store_if(b, gen_mi_mem64(res_addr), query_result);
+   } else {
+      struct anv_address res_addr = anv_address_add(dst_addr, value_index * 4);
+      gen_mi_store_if(b, gen_mi_mem32(res_addr), query_result);
    }
 }
 
+#endif /* GEN_GEN >= 8 || GEN_IS_HASWELL */
+
 static void
-store_query_result(struct anv_batch *batch, uint32_t reg,
-                   struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
+gpu_write_query_result(struct gen_mi_builder *b,
+                       struct anv_address dst_addr,
+                       VkQueryResultFlags flags,
+                       uint32_t value_index,
+                       struct gen_mi_value query_result)
 {
-   anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
-      srm.RegisterAddress  = reg;
-      srm.MemoryAddress    = (struct anv_address) { bo, offset };
-   }
-
    if (flags & VK_QUERY_RESULT_64_BIT) {
-      anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
-         srm.RegisterAddress  = reg + 4;
-         srm.MemoryAddress    = (struct anv_address) { bo, offset + 4 };
-      }
+      struct anv_address res_addr = anv_address_add(dst_addr, value_index * 8);
+      gen_mi_store(b, gen_mi_mem64(res_addr), query_result);
+   } else {
+      struct anv_address res_addr = anv_address_add(dst_addr, value_index * 4);
+      gen_mi_store(b, gen_mi_mem32(res_addr), query_result);
    }
 }
 
+static struct gen_mi_value
+compute_query_result(struct gen_mi_builder *b, struct anv_address addr)
+{
+   return gen_mi_isub(b, gen_mi_mem64(anv_address_add(addr, 8)),
+                         gen_mi_mem64(anv_address_add(addr, 0)));
+}
+
 void genX(CmdCopyQueryPoolResults)(
     VkCommandBuffer                             commandBuffer,
     VkQueryPool                                 queryPool,
@@ -421,59 +1237,113 @@ void genX(CmdCopyQueryPoolResults)(
    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
    ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
-   uint32_t slot_offset, dst_offset;
 
-   if (flags & VK_QUERY_RESULT_WAIT_BIT) {
-      anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
-         pc.CommandStreamerStallEnable = true;
-         pc.StallAtPixelScoreboard     = true;
-      }
+   struct gen_mi_builder b;
+   gen_mi_builder_init(&b, &cmd_buffer->batch);
+   struct gen_mi_value result;
+
+   /* If render target writes are ongoing, request a render target cache flush
+    * to ensure proper ordering of the commands from the 3d pipe and the
+    * command streamer.
+    */
+   if (cmd_buffer->state.pending_pipe_bits & ANV_PIPE_RENDER_TARGET_BUFFER_WRITES) {
+      cmd_buffer->state.pending_pipe_bits |=
+         ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
    }
 
-   dst_offset = buffer->offset + destOffset;
-   for (uint32_t i = 0; i < queryCount; i++) {
+   if ((flags & VK_QUERY_RESULT_WAIT_BIT) ||
+       (cmd_buffer->state.pending_pipe_bits & ANV_PIPE_FLUSH_BITS) ||
+       /* Occlusion & timestamp queries are written using a PIPE_CONTROL and
+        * because we're about to copy values from MI commands, we need to
+        * stall the command streamer to make sure the PIPE_CONTROL values have
+        * landed, otherwise we could see inconsistent values & availability.
+        *
+        *  From the vulkan spec:
+        *
+        *     "vkCmdCopyQueryPoolResults is guaranteed to see the effect of
+        *     previous uses of vkCmdResetQueryPool in the same queue, without
+        *     any additional synchronization."
+        */
+       pool->type == VK_QUERY_TYPE_OCCLUSION ||
+       pool->type == VK_QUERY_TYPE_TIMESTAMP) {
+      cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT;
+      genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
+   }
 
-      slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
+   struct anv_address dest_addr = anv_address_add(buffer->address, destOffset);
+   for (uint32_t i = 0; i < queryCount; i++) {
+      struct anv_address query_addr = anv_query_address(pool, firstQuery + i);
+      uint32_t idx = 0;
       switch (pool->type) {
       case VK_QUERY_TYPE_OCCLUSION:
-         emit_load_alu_reg_u64(&cmd_buffer->batch,
-                               CS_GPR(0), &pool->bo, slot_offset + 8);
-         emit_load_alu_reg_u64(&cmd_buffer->batch,
-                               CS_GPR(1), &pool->bo, slot_offset + 16);
-
-         /* FIXME: We need to clamp the result for 32 bit. */
-
-         uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
-         dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
-         dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
-         dw[3] = alu(OPCODE_SUB, 0, 0);
-         dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
+         result = compute_query_result(&b, anv_address_add(query_addr, 8));
+#if GEN_GEN >= 8 || GEN_IS_HASWELL
+         /* Like in the case of vkGetQueryPoolResults, if the query is
+          * unavailable and the VK_QUERY_RESULT_PARTIAL_BIT flag is set,
+          * conservatively write 0 as the query result. If the
+          * VK_QUERY_RESULT_PARTIAL_BIT isn't set, don't write any value.
+          */
+         gpu_write_query_result_cond(cmd_buffer, &b, query_addr, dest_addr,
+               1 /* available */, flags, idx, result);
+         if (flags & VK_QUERY_RESULT_PARTIAL_BIT) {
+            gpu_write_query_result_cond(cmd_buffer, &b, query_addr, dest_addr,
+                  0 /* unavailable */, flags, idx, gen_mi_imm(0));
+         }
+         idx++;
+#else /* GEN_GEN < 8 && !GEN_IS_HASWELL */
+         gpu_write_query_result(&b, dest_addr, flags, idx++, result);
+#endif
+         break;
+
+      case VK_QUERY_TYPE_PIPELINE_STATISTICS: {
+         uint32_t statistics = pool->pipeline_statistics;
+         while (statistics) {
+            uint32_t stat = u_bit_scan(&statistics);
+
+            result = compute_query_result(&b, anv_address_add(query_addr,
+                                                              idx * 16 + 8));
+
+            /* WaDividePSInvocationCountBy4:HSW,BDW */
+            if ((cmd_buffer->device->info.gen == 8 ||
+                 cmd_buffer->device->info.is_haswell) &&
+                (1 << stat) == VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT) {
+               result = gen_mi_ushr32_imm(&b, result, 2);
+            }
+
+            gpu_write_query_result(&b, dest_addr, flags, idx++, result);
+         }
+         assert(idx == util_bitcount(pool->pipeline_statistics));
+         break;
+      }
+
+      case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
+         result = compute_query_result(&b, anv_address_add(query_addr, 8));
+         gpu_write_query_result(&b, dest_addr, flags, idx++, result);
+         result = compute_query_result(&b, anv_address_add(query_addr, 24));
+         gpu_write_query_result(&b, dest_addr, flags, idx++, result);
          break;
 
       case VK_QUERY_TYPE_TIMESTAMP:
-         emit_load_alu_reg_u64(&cmd_buffer->batch,
-                               CS_GPR(2), &pool->bo, slot_offset + 8);
+         result = gen_mi_mem64(anv_address_add(query_addr, 8));
+         gpu_write_query_result(&b, dest_addr, flags, 0, result);
          break;
 
+#if GEN_GEN >= 8
+      case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
+         unreachable("Copy KHR performance query results not implemented");
+         break;
+#endif
+
       default:
          unreachable("unhandled query type");
       }
 
-      store_query_result(&cmd_buffer->batch,
-                         CS_GPR(2), buffer->bo, dst_offset, flags);
-
       if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
-         emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
-                               &pool->bo, slot_offset);
-         if (flags & VK_QUERY_RESULT_64_BIT)
-            store_query_result(&cmd_buffer->batch,
-                               CS_GPR(0), buffer->bo, dst_offset + 8, flags);
-         else
-            store_query_result(&cmd_buffer->batch,
-                               CS_GPR(0), buffer->bo, dst_offset + 4, flags);
+         gpu_write_query_result(&b, dest_addr, flags, idx,
+                                gen_mi_mem64(query_addr));
       }
 
-      dst_offset += destStride;
+      dest_addr = anv_address_add(dest_addr, destStride);
    }
 }