nvc0: fix valid range for shader buffers
[mesa.git] / src / gallium / drivers / nouveau / nvc0 / nvc0_query_hw_sm.c
index db36b8a1b9f87ff4dc22bb042d1367fb61ba0799..d4721201bbaa84d59d8463e54ac04a3a45f1a676 100644 (file)
 #include "nvc0/nvc0_compute.xml.h"
 
 /* NOTE: intentionally using the same names as NV */
-#define _Q(t, n) { NVC0_HW_SM_QUERY_##t, n }
-struct {
+#define _Q(t, n, d) { NVC0_HW_SM_QUERY_##t, n, d }
+static const struct {
    unsigned type;
    const char *name;
+   const char *desc;
 } nvc0_hw_sm_queries[] = {
-   _Q(ACTIVE_CYCLES,                "active_cycles"                           ),
-   _Q(ACTIVE_WARPS,                 "active_warps"                            ),
-   _Q(ATOM_CAS_COUNT,               "atom_cas_count"                          ),
-   _Q(ATOM_COUNT,                   "atom_count"                              ),
-   _Q(BRANCH,                       "branch"                                  ),
-   _Q(DIVERGENT_BRANCH,             "divergent_branch"                        ),
-   _Q(GLD_REQUEST,                  "gld_request"                             ),
-   _Q(GLD_MEM_DIV_REPLAY,           "global_ld_mem_divergence_replays"        ),
-   _Q(GST_TRANSACTIONS,             "global_store_transaction"                ),
-   _Q(GST_MEM_DIV_REPLAY,           "global_st_mem_divergence_replays"        ),
-   _Q(GRED_COUNT,                   "gred_count"                              ),
-   _Q(GST_REQUEST,                  "gst_request"                             ),
-   _Q(INST_EXECUTED,                "inst_executed"                           ),
-   _Q(INST_ISSUED,                  "inst_issued"                             ),
-   _Q(INST_ISSUED1,                 "inst_issued1"                            ),
-   _Q(INST_ISSUED2,                 "inst_issued2"                            ),
-   _Q(INST_ISSUED1_0,               "inst_issued1_0"                          ),
-   _Q(INST_ISSUED1_1,               "inst_issued1_1"                          ),
-   _Q(INST_ISSUED2_0,               "inst_issued2_0"                          ),
-   _Q(INST_ISSUED2_1,               "inst_issued2_1"                          ),
-   _Q(L1_GLD_HIT,                   "l1_global_load_hit"                      ),
-   _Q(L1_GLD_MISS,                  "l1_global_load_miss"                     ),
-   _Q(L1_GLD_TRANSACTIONS,          "__l1_global_load_transactions"           ),
-   _Q(L1_GST_TRANSACTIONS,          "__l1_global_store_transactions"          ),
-   _Q(L1_LOCAL_LD_HIT,              "l1_local_load_hit"                       ),
-   _Q(L1_LOCAL_LD_MISS,             "l1_local_load_miss"                      ),
-   _Q(L1_LOCAL_ST_HIT,              "l1_local_store_hit"                      ),
-   _Q(L1_LOCAL_ST_MISS,             "l1_local_store_miss"                     ),
-   _Q(L1_SHARED_LD_TRANSACTIONS,    "l1_shared_load_transactions"             ),
-   _Q(L1_SHARED_ST_TRANSACTIONS,    "l1_shared_store_transactions"            ),
-   _Q(LOCAL_LD,                     "local_load"                              ),
-   _Q(LOCAL_LD_TRANSACTIONS,        "local_load_transactions"                 ),
-   _Q(LOCAL_ST,                     "local_store"                             ),
-   _Q(LOCAL_ST_TRANSACTIONS,        "local_store_transactions"                ),
-   _Q(NOT_PRED_OFF_INST_EXECUTED,   "not_predicated_off_thread_inst_executed" ),
-   _Q(PROF_TRIGGER_0,               "prof_trigger_00"                         ),
-   _Q(PROF_TRIGGER_1,               "prof_trigger_01"                         ),
-   _Q(PROF_TRIGGER_2,               "prof_trigger_02"                         ),
-   _Q(PROF_TRIGGER_3,               "prof_trigger_03"                         ),
-   _Q(PROF_TRIGGER_4,               "prof_trigger_04"                         ),
-   _Q(PROF_TRIGGER_5,               "prof_trigger_05"                         ),
-   _Q(PROF_TRIGGER_6,               "prof_trigger_06"                         ),
-   _Q(PROF_TRIGGER_7,               "prof_trigger_07"                         ),
-   _Q(SHARED_LD,                    "shared_load"                             ),
-   _Q(SHARED_LD_REPLAY,             "shared_load_replay"                      ),
-   _Q(SHARED_ST,                    "shared_store"                            ),
-   _Q(SHARED_ST_REPLAY,             "shared_store_replay"                     ),
-   _Q(SM_CTA_LAUNCHED,              "sm_cta_launched"                         ),
-   _Q(THREADS_LAUNCHED,             "threads_launched"                        ),
-   _Q(TH_INST_EXECUTED,             "thread_inst_executed"                    ),
-   _Q(TH_INST_EXECUTED_0,           "thread_inst_executed_0"                  ),
-   _Q(TH_INST_EXECUTED_1,           "thread_inst_executed_1"                  ),
-   _Q(TH_INST_EXECUTED_2,           "thread_inst_executed_2"                  ),
-   _Q(TH_INST_EXECUTED_3,           "thread_inst_executed_3"                  ),
-   _Q(UNCACHED_GLD_TRANSACTIONS,    "uncached_global_load_transaction"        ),
-   _Q(WARPS_LAUNCHED,               "warps_launched"                          ),
+   _Q(ACTIVE_CYCLES,
+      "active_cycles",
+      "Number of cycles a multiprocessor has at least one active warp"),
+
+   _Q(ACTIVE_WARPS,
+      "active_warps",
+      "Accumulated number of active warps per cycle. For every cycle it "
+      "increments by the number of active warps in the cycle which can be in "
+      "the range 0 to 64"),
+
+   _Q(ATOM_CAS_COUNT,
+      "atom_cas_count",
+      "Number of warps executing atomic compare and swap operations. Increments "
+      "by one if at least one thread in a warp executes the instruction."),
+
+   _Q(ATOM_COUNT,
+      "atom_count",
+      "Number of warps executing atomic reduction operations. Increments by one "
+      "if at least one thread in a warp executes the instruction"),
+
+   _Q(BRANCH,
+      "branch",
+      "Number of branch instructions executed per warp on a multiprocessor"),
+
+   _Q(DIVERGENT_BRANCH,
+      "divergent_branch",
+      "Number of divergent branches within a warp. This counter will be "
+      "incremented by one if at least one thread in a warp diverges (that is, "
+      "follows a different execution path) via a conditional branch"),
+
+   _Q(GLD_REQUEST,
+      "gld_request",
+      "Number of executed load instructions where the state space is not "
+      "specified and hence generic addressing is used, increments per warp on a "
+      "multiprocessor. It can include the load operations from global,local and "
+      "shared state space"),
+
+   _Q(GLD_MEM_DIV_REPLAY,
+      "global_ld_mem_divergence_replays",
+      "Number of instruction replays for global memory loads. Instruction is "
+      "replayed if the instruction is accessing more than one cache line of "
+      "128 bytes. For each extra cache line access the counter is incremented "
+      "by 1"),
+
+   _Q(GST_TRANSACTIONS,
+      "global_store_transaction",
+      "Number of global store transactions. Increments by 1 per transaction. "
+      "Transaction can be 32/64/96/128B"),
+
+   _Q(GST_MEM_DIV_REPLAY,
+      "global_st_mem_divergence_replays",
+      "Number of instruction replays for global memory stores. Instruction is "
+      "replayed if the instruction is accessing more than one cache line of "
+      "128 bytes. For each extra cache line access the counter is incremented "
+      "by 1"),
+
+   _Q(GRED_COUNT,
+      "gred_count",
+      "Number of warps executing reduction operations on global memory. "
+      "Increments by one if at least one thread in a warp executes the "
+      "instruction"),
+
+   _Q(GST_REQUEST,
+      "gst_request",
+      "Number of executed store instructions where the state space is not "
+      "specified and hence generic addressing is used, increments per warp on a "
+      "multiprocessor. It can include the store operations to global,local and "
+      "shared state space"),
+
+   _Q(INST_EXECUTED,
+      "inst_executed",
+      "Number of instructions executed, do not include replays"),
+
+   _Q(INST_ISSUED,
+      "inst_issued",
+      "Number of instructions issued including replays"),
+
+   _Q(INST_ISSUED1,
+      "inst_issued1",
+      "Number of single instruction issued per cycle"),
+
+   _Q(INST_ISSUED2,
+      "inst_issued2",
+      "Number of dual instructions issued per cycle"),
+
+   _Q(INST_ISSUED1_0,
+      "inst_issued1_0",
+      "Number of single instruction issued per cycle in pipeline 0"),
+
+   _Q(INST_ISSUED1_1,
+      "inst_issued1_1",
+      "Number of single instruction issued per cycle in pipeline 1"),
+
+   _Q(INST_ISSUED2_0,
+      "inst_issued2_0",
+      "Number of dual instructions issued per cycle in pipeline 0"),
+
+   _Q(INST_ISSUED2_1,
+      "inst_issued2_1",
+      "Number of dual instructions issued per cycle in pipeline 1"),
+
+   _Q(L1_GLD_HIT,
+      "l1_global_load_hit",
+      "Number of cache lines that hit in L1 cache for global memory load "
+      "accesses. In case of perfect coalescing this increments by 1,2, and 4 for "
+      "32, 64 and 128 bit accesses by a warp respectively"),
+
+   _Q(L1_GLD_MISS,
+      "l1_global_load_miss",
+      "Number of cache lines that miss in L1 cache for global memory load "
+      "accesses. In case of perfect coalescing this increments by 1,2, and 4 for "
+      "32, 64 and 128 bit accesses by a warp respectively"),
+
+   _Q(L1_GLD_TRANSACTIONS,
+      "__l1_global_load_transactions",
+      "Number of global load transactions from L1 cache. Increments by 1 per "
+      "transaction. Transaction can be 32/64/96/128B"),
+
+   _Q(L1_GST_TRANSACTIONS,
+      "__l1_global_store_transactions",
+      "Number of global store transactions from L1 cache. Increments by 1 per "
+      "transaction. Transaction can be 32/64/96/128B"),
+
+   _Q(L1_LOCAL_LD_HIT,
+      "l1_local_load_hit",
+      "Number of cache lines that hit in L1 cache for local memory load "
+      "accesses. In case of perfect coalescing this increments by 1,2, and 4 for "
+      "32, 64 and 128 bit accesses by a warp respectively"),
+
+   _Q(L1_LOCAL_LD_MISS,
+      "l1_local_load_miss",
+      "Number of cache lines that miss in L1 cache for local memory load "
+      "accesses. In case of perfect coalescing this increments by 1,2, and 4 for "
+      "32, 64 and 128 bit accesses by a warp respectively"),
+
+   _Q(L1_LOCAL_ST_HIT,
+      "l1_local_store_hit",
+      "Number of cache lines that hit in L1 cache for local memory store "
+      "accesses. In case of perfect coalescing this increments by 1,2, and 4 for "
+      "32, 64 and 128 bit accesses by a warp respectively"),
+
+   _Q(L1_LOCAL_ST_MISS,
+      "l1_local_store_miss",
+      "Number of cache lines that miss in L1 cache for local memory store "
+      "accesses. In case of perfect coalescing this increments by 1,2, and 4 for "
+      "32,64 and 128 bit accesses by a warp respectively"),
+
+   _Q(L1_SHARED_LD_TRANSACTIONS,
+      "l1_shared_load_transactions",
+      "Number of shared load transactions. Increments by 1 per transaction. "
+      "Transaction can be 32/64/96/128B"),
+
+   _Q(L1_SHARED_ST_TRANSACTIONS,
+      "l1_shared_store_transactions",
+      "Number of shared store transactions. Increments by 1 per transaction. "
+      "Transaction can be 32/64/96/128B"),
+
+   _Q(LOCAL_LD,
+      "local_load",
+      "Number of executed load instructions where state space is specified as "
+      "local, increments per warp on a multiprocessor"),
+
+   _Q(LOCAL_LD_TRANSACTIONS,
+      "local_load_transactions",
+      "Number of local load transactions from L1 cache. Increments by 1 per "
+      "transaction. Transaction can be 32/64/96/128B"),
+
+   _Q(LOCAL_ST,
+      "local_store",
+      "Number of executed store instructions where state space is specified as "
+      "local, increments per warp on a multiprocessor"),
+
+   _Q(LOCAL_ST_TRANSACTIONS,
+      "local_store_transactions",
+      "Number of local store transactions to L1 cache. Increments by 1 per "
+      "transaction. Transaction can be 32/64/96/128B."),
+
+   _Q(NOT_PRED_OFF_INST_EXECUTED,
+      "not_predicated_off_thread_inst_executed",
+      "Number of not predicated off instructions executed by all threads, does "
+      "not include replays. For each instruction it increments by the number of "
+      "threads that execute this instruction"),
+
+   _Q(PROF_TRIGGER_0,
+      "prof_trigger_00",
+      "User profiled generic trigger that can be inserted in any place of the "
+      "code to collect the related information. Increments per warp."),
+
+   _Q(PROF_TRIGGER_1,
+      "prof_trigger_01",
+      "User profiled generic trigger that can be inserted in any place of the "
+      "code to collect the related information. Increments per warp."),
+
+   _Q(PROF_TRIGGER_2,
+      "prof_trigger_02",
+      "User profiled generic trigger that can be inserted in any place of the "
+      "code to collect the related information. Increments per warp."),
+
+   _Q(PROF_TRIGGER_3,
+      "prof_trigger_03",
+      "User profiled generic trigger that can be inserted in any place of the "
+      "code to collect the related information. Increments per warp."),
+
+   _Q(PROF_TRIGGER_4,
+      "prof_trigger_04",
+      "User profiled generic trigger that can be inserted in any place of the "
+      "code to collect the related information. Increments per warp."),
+
+   _Q(PROF_TRIGGER_5,
+      "prof_trigger_05",
+      "User profiled generic trigger that can be inserted in any place of the "
+      "code to collect the related information. Increments per warp."),
+
+   _Q(PROF_TRIGGER_6,
+      "prof_trigger_06",
+      "User profiled generic trigger that can be inserted in any place of the "
+      "code to collect the related information. Increments per warp."),
+
+   _Q(PROF_TRIGGER_7,
+      "prof_trigger_07",
+      "User profiled generic trigger that can be inserted in any place of the "
+      "code to collect the related information. Increments per warp."),
+
+   _Q(SHARED_LD,
+      "shared_load",
+      "Number of executed load instructions where state space is specified as "
+      "shared, increments per warp on a multiprocessor"),
+
+   _Q(SHARED_LD_REPLAY,
+      "shared_load_replay",
+      "Replays caused due to shared load bank conflict (when the addresses for "
+      "two or more shared memory load requests fall in the same memory bank) or "
+      "when there is no conflict but the total number of words accessed by all "
+      "threads in the warp executing that instruction exceed the number of words "
+      "that can be loaded in one cycle (256 bytes)"),
+
+   _Q(SHARED_ST,
+      "shared_store",
+      "Number of executed store instructions where state space is specified as "
+      "shared, increments per warp on a multiprocessor"),
+
+   _Q(SHARED_ST_REPLAY,
+      "shared_store_replay",
+      "Replays caused due to shared store bank conflict (when the addresses for "
+      "two or more shared memory store requests fall in the same memory bank) or "
+      "when there is no conflict but the total number of words accessed by all "
+      "threads in the warp executing that instruction exceed the number of words "
+      "that can be stored in one cycle"),
+
+   _Q(SM_CTA_LAUNCHED,
+      "sm_cta_launched",
+      "Number of thread blocks launched on a multiprocessor"),
+
+   _Q(THREADS_LAUNCHED,
+      "threads_launched",
+      "Number of threads launched on a multiprocessor"),
+
+   _Q(TH_INST_EXECUTED,
+      "thread_inst_executed",
+      "Number of instructions executed by all threads, does not include "
+      "replays. For each instruction it increments by the number of threads in "
+      "the warp that execute the instruction"),
+
+   _Q(TH_INST_EXECUTED_0,
+      "thread_inst_executed_0",
+      "Number of instructions executed by all threads, does not include "
+      "replays. For each instruction it increments by the number of threads in "
+      "the warp that execute the instruction in pipeline 0"),
+
+   _Q(TH_INST_EXECUTED_1,
+      "thread_inst_executed_1",
+      "Number of instructions executed by all threads, does not include "
+      "replays. For each instruction it increments by the number of threads in "
+      "the warp that execute the instruction in pipeline 1"),
+
+   _Q(TH_INST_EXECUTED_2,
+      "thread_inst_executed_2",
+      "Number of instructions executed by all threads, does not include "
+      "replays. For each instruction it increments by the number of threads in "
+      "the warp that execute the instruction in pipeline 2"),
+
+   _Q(TH_INST_EXECUTED_3,
+      "thread_inst_executed_3",
+      "Number of instructions executed by all threads, does not include "
+      "replays. For each instruction it increments by the number of threads in "
+      "the warp that execute the instruction in pipeline 3"),
+
+   _Q(UNCACHED_GLD_TRANSACTIONS,
+      "uncached_global_load_transaction",
+      "Number of uncached global load transactions. Increments by 1 per "
+      "transaction. Transaction can be 32/64/96/128B."),
+
+   _Q(WARPS_LAUNCHED,
+      "warps_launched",
+      "Number of warps launched on a multiprocessor"),
 };
 
 #undef _Q
@@ -132,9 +372,9 @@ static const uint64_t nve4_read_hw_sm_counters_code[] =
     * mov b32 $r6 $pm6
     * mov b32 $r7 $pm7
     * set $p0 0x1 eq u32 $r8 0x0
-    * mov b32 $r10 c0[0x0]
+    * mov b32 $r10 c7[0x620]
     * ext u32 $r8 $r12 0x414
-    * mov b32 $r11 c0[0x4]
+    * mov b32 $r11 c7[0x624]
     * sched 0x04 0x2e 0x04 0x20 0x20 0x28 0x04
     * ext u32 $r9 $r12 0x208
     * (not $p0) exit
@@ -152,7 +392,7 @@ static const uint64_t nve4_read_hw_sm_counters_code[] =
     * add b32 $r12 $c $r12 $r9
     * st b128 wt g[$r10d] $r0q
     * sched 0x4 0x2c 0x20 0x04 0x2e 0x00 0x00
-    * mov b32 $r0 c0[0x8]
+    * mov b32 $r0 c7[0x628]
     * add b32 $r13 $r13 0x0 $c
     * $p1 st b128 wt g[$r12d+0x40] $r4q
     * st b32 wt g[$r12d+0x50] $r0
@@ -170,9 +410,9 @@ static const uint64_t nve4_read_hw_sm_counters_code[] =
    0x2c00000028019c04ULL,
    0x2c0000002c01dc04ULL,
    0x190e0000fc81dc03ULL,
-   0x2800400000029de4ULL,
+   0x28005c1880029de4ULL,
    0x7000c01050c21c03ULL,
-   0x280040001002dde4ULL,
+   0x28005c189002dde4ULL,
    0x204282020042e047ULL,
    0x7000c00820c25c03ULL,
    0x80000000000021e7ULL,
@@ -190,7 +430,7 @@ static const uint64_t nve4_read_hw_sm_counters_code[] =
    0x4801000024c31c03ULL,
    0x9400000000a01fc5ULL,
    0x200002e04202c047ULL,
-   0x2800400020001de4ULL,
+   0x28005c18a0001de4ULL,
    0x0800000000d35c42ULL,
    0x9400000100c107c5ULL,
    0x9400000140c01f85ULL,
@@ -213,9 +453,9 @@ static const uint64_t nvf0_read_hw_sm_counters_code[] =
    0x86400000051c001aULL,
    0x86400000059c001eULL,
    0xdb201c007f9c201eULL,
-   0x64c03c00001c002aULL,
+   0x64c03ce0c41c002aULL,
    0xc00000020a1c3021ULL,
-   0x64c03c00009c002eULL,
+   0x64c03ce0c49c002eULL,
    0x0810a0808010b810ULL,
    0xc0000001041c3025ULL,
    0x180000000020003cULL,
@@ -233,7 +473,7 @@ static const uint64_t nvf0_read_hw_sm_counters_code[] =
    0xe0840000049c3032ULL,
    0xfe800000001c2800ULL,
    0x080000b81080b010ULL,
-   0x64c03c00011c0002ULL,
+   0x64c03ce0c51c0002ULL,
    0xe08040007f9c3436ULL,
    0xfe80000020043010ULL,
    0xfc800000281c3000ULL,
@@ -865,14 +1105,14 @@ static const uint64_t nvc0_read_hw_sm_counters_code[] =
     * mov b32 $r6 $pm6
     * mov b32 $r7 $pm7
     * set $p0 0x1 eq u32 $r8 0x0
-    * mov b32 $r10 c0[0x0]
-    * mov b32 $r11 c0[0x4]
+    * mov b32 $r10 c15[0x620]
+    * mov b32 $r11 c15[0x624]
     * ext u32 $r8 $r9 0x414
     * (not $p0) exit
     * mul $r8 u32 $r8 u32 48
     * add b32 $r10 $c $r10 $r8
     * add b32 $r11 $r11 0x0 $c
-    * mov b32 $r8 c0[0x8]
+    * mov b32 $r8 c15[0x628]
     * st b128 wt g[$r10d+0x00] $r0q
     * st b128 wt g[$r10d+0x10] $r4q
     * st b32 wt g[$r10d+0x20] $r8
@@ -888,14 +1128,14 @@ static const uint64_t nvc0_read_hw_sm_counters_code[] =
    0x2c00000028019c04ULL,
    0x2c0000002c01dc04ULL,
    0x190e0000fc81dc03ULL,
-   0x2800400000029de4ULL,
-   0x280040001002dde4ULL,
+   0x28007c1880029de4ULL,
+   0x28007c189002dde4ULL,
    0x7000c01050921c03ULL,
    0x80000000000021e7ULL,
    0x10000000c0821c02ULL,
    0x4801000020a29c03ULL,
    0x0800000000b2dc42ULL,
-   0x2800400020021de4ULL,
+   0x28007c18a0021de4ULL,
    0x9400000000a01fc5ULL,
    0x9400000040a11fc5ULL,
    0x9400000080a21f85ULL,
@@ -1573,6 +1813,40 @@ nvc0_hw_sm_get_program(struct nvc0_screen *screen)
    return prog;
 }
 
+static inline void
+nvc0_hw_sm_upload_input(struct nvc0_context *nvc0, struct nvc0_hw_query *hq)
+{
+   struct nouveau_pushbuf *push = nvc0->base.pushbuf;
+   struct nvc0_screen *screen = nvc0->screen;
+   uint64_t address;
+   const int s = 5;
+
+   address = screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s);
+
+   PUSH_SPACE(push, 11);
+
+   if (screen->base.class_3d >= NVE4_3D_CLASS) {
+      BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
+      PUSH_DATAh(push, address + NVC0_CB_AUX_MP_INFO);
+      PUSH_DATA (push, address + NVC0_CB_AUX_MP_INFO);
+      BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
+      PUSH_DATA (push, 3 * 4);
+      PUSH_DATA (push, 0x1);
+      BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 3);
+      PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
+   } else {
+      BEGIN_NVC0(push, NVC0_CP(CB_SIZE), 3);
+      PUSH_DATA (push, NVC0_CB_AUX_SIZE);
+      PUSH_DATAh(push, address);
+      PUSH_DATA (push, address);
+      BEGIN_1IC0(push, NVC0_CP(CB_POS), 1 + 3);
+      PUSH_DATA (push, NVC0_CB_AUX_MP_INFO);
+   }
+   PUSH_DATA (push, (hq->bo->offset + hq->base_offset));
+   PUSH_DATAh(push, (hq->bo->offset + hq->base_offset));
+   PUSH_DATA (push, hq->sequence);
+}
+
 static void
 nvc0_hw_sm_end_query(struct nvc0_context *nvc0, struct nvc0_hw_query *hq)
 {
@@ -1581,6 +1855,7 @@ nvc0_hw_sm_end_query(struct nvc0_context *nvc0, struct nvc0_hw_query *hq)
    struct nouveau_pushbuf *push = nvc0->base.pushbuf;
    const bool is_nve4 = screen->base.class_3d >= NVE4_3D_CLASS;
    struct nvc0_hw_sm_query *hsq = nvc0_hw_sm_query(hq);
+   struct nvc0_program *old = nvc0->compprog;
    struct pipe_grid_info info = {};
    uint32_t mask;
    uint32_t input[3];
@@ -1616,11 +1891,10 @@ nvc0_hw_sm_end_query(struct nvc0_context *nvc0, struct nvc0_hw_query *hq)
    PUSH_SPACE(push, 1);
    IMMED_NVC0(push, SUBC_CP(NV50_GRAPH_SERIALIZE), 0);
 
-   pipe->bind_compute_state(pipe, screen->pm.prog);
-   input[0] = (hq->bo->offset + hq->base_offset);
-   input[1] = (hq->bo->offset + hq->base_offset) >> 32;
-   input[2] = hq->sequence;
+   /* upload input data for the compute shader which reads MP counters */
+   nvc0_hw_sm_upload_input(nvc0, hq);
 
+   pipe->bind_compute_state(pipe, screen->pm.prog);
    for (i = 0; i < 3; i++) {
       info.block[i] = block[i];
       info.grid[i] = grid[i];
@@ -1628,6 +1902,7 @@ nvc0_hw_sm_end_query(struct nvc0_context *nvc0, struct nvc0_hw_query *hq)
    info.pc = 0;
    info.input = input;
    pipe->launch_grid(pipe, &info);
+   pipe->bind_compute_state(pipe, old);
 
    nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_QUERY);