#include "nvc0/nvc0_compute.xml.h"
/* NOTE: intentionally using the same names as NV */
-#define _Q(t, n) { NVC0_HW_SM_QUERY_##t, n }
-struct {
+#define _Q(t, n, d) { NVC0_HW_SM_QUERY_##t, n, d }
+static const struct {
unsigned type;
const char *name;
+ const char *desc;
} nvc0_hw_sm_queries[] = {
- _Q(ACTIVE_CYCLES, "active_cycles" ),
- _Q(ACTIVE_WARPS, "active_warps" ),
- _Q(ATOM_CAS_COUNT, "atom_cas_count" ),
- _Q(ATOM_COUNT, "atom_count" ),
- _Q(BRANCH, "branch" ),
- _Q(DIVERGENT_BRANCH, "divergent_branch" ),
- _Q(GLD_REQUEST, "gld_request" ),
- _Q(GLD_MEM_DIV_REPLAY, "global_ld_mem_divergence_replays" ),
- _Q(GST_TRANSACTIONS, "global_store_transaction" ),
- _Q(GST_MEM_DIV_REPLAY, "global_st_mem_divergence_replays" ),
- _Q(GRED_COUNT, "gred_count" ),
- _Q(GST_REQUEST, "gst_request" ),
- _Q(INST_EXECUTED, "inst_executed" ),
- _Q(INST_ISSUED, "inst_issued" ),
- _Q(INST_ISSUED1, "inst_issued1" ),
- _Q(INST_ISSUED2, "inst_issued2" ),
- _Q(INST_ISSUED1_0, "inst_issued1_0" ),
- _Q(INST_ISSUED1_1, "inst_issued1_1" ),
- _Q(INST_ISSUED2_0, "inst_issued2_0" ),
- _Q(INST_ISSUED2_1, "inst_issued2_1" ),
- _Q(L1_GLD_HIT, "l1_global_load_hit" ),
- _Q(L1_GLD_MISS, "l1_global_load_miss" ),
- _Q(L1_GLD_TRANSACTIONS, "__l1_global_load_transactions" ),
- _Q(L1_GST_TRANSACTIONS, "__l1_global_store_transactions" ),
- _Q(L1_LOCAL_LD_HIT, "l1_local_load_hit" ),
- _Q(L1_LOCAL_LD_MISS, "l1_local_load_miss" ),
- _Q(L1_LOCAL_ST_HIT, "l1_local_store_hit" ),
- _Q(L1_LOCAL_ST_MISS, "l1_local_store_miss" ),
- _Q(L1_SHARED_LD_TRANSACTIONS, "l1_shared_load_transactions" ),
- _Q(L1_SHARED_ST_TRANSACTIONS, "l1_shared_store_transactions" ),
- _Q(LOCAL_LD, "local_load" ),
- _Q(LOCAL_LD_TRANSACTIONS, "local_load_transactions" ),
- _Q(LOCAL_ST, "local_store" ),
- _Q(LOCAL_ST_TRANSACTIONS, "local_store_transactions" ),
- _Q(PROF_TRIGGER_0, "prof_trigger_00" ),
- _Q(PROF_TRIGGER_1, "prof_trigger_01" ),
- _Q(PROF_TRIGGER_2, "prof_trigger_02" ),
- _Q(PROF_TRIGGER_3, "prof_trigger_03" ),
- _Q(PROF_TRIGGER_4, "prof_trigger_04" ),
- _Q(PROF_TRIGGER_5, "prof_trigger_05" ),
- _Q(PROF_TRIGGER_6, "prof_trigger_06" ),
- _Q(PROF_TRIGGER_7, "prof_trigger_07" ),
- _Q(SHARED_LD, "shared_load" ),
- _Q(SHARED_LD_REPLAY, "shared_load_replay" ),
- _Q(SHARED_ST, "shared_store" ),
- _Q(SHARED_ST_REPLAY, "shared_store_replay" ),
- _Q(SM_CTA_LAUNCHED, "sm_cta_launched" ),
- _Q(THREADS_LAUNCHED, "threads_launched" ),
- _Q(TH_INST_EXECUTED_0, "thread_inst_executed_0" ),
- _Q(TH_INST_EXECUTED_1, "thread_inst_executed_1" ),
- _Q(TH_INST_EXECUTED_2, "thread_inst_executed_2" ),
- _Q(TH_INST_EXECUTED_3, "thread_inst_executed_3" ),
- _Q(UNCACHED_GLD_TRANSACTIONS, "uncached_global_load_transaction" ),
- _Q(WARPS_LAUNCHED, "warps_launched" ),
+ _Q(ACTIVE_CYCLES,
+ "active_cycles",
+ "Number of cycles a multiprocessor has at least one active warp"),
+
+ _Q(ACTIVE_WARPS,
+ "active_warps",
+ "Accumulated number of active warps per cycle. For every cycle it "
+ "increments by the number of active warps in the cycle which can be in "
+ "the range 0 to 64"),
+
+ _Q(ATOM_CAS_COUNT,
+ "atom_cas_count",
+ "Number of warps executing atomic compare and swap operations. Increments "
+ "by one if at least one thread in a warp executes the instruction."),
+
+ _Q(ATOM_COUNT,
+ "atom_count",
+ "Number of warps executing atomic reduction operations. Increments by one "
+ "if at least one thread in a warp executes the instruction"),
+
+ _Q(BRANCH,
+ "branch",
+ "Number of branch instructions executed per warp on a multiprocessor"),
+
+ _Q(DIVERGENT_BRANCH,
+ "divergent_branch",
+ "Number of divergent branches within a warp. This counter will be "
+ "incremented by one if at least one thread in a warp diverges (that is, "
+ "follows a different execution path) via a conditional branch"),
+
+ _Q(GLD_REQUEST,
+ "gld_request",
+ "Number of executed load instructions where the state space is not "
+ "specified and hence generic addressing is used, increments per warp on a "
+ "multiprocessor. It can include the load operations from global,local and "
+ "shared state space"),
+
+ _Q(GLD_MEM_DIV_REPLAY,
+ "global_ld_mem_divergence_replays",
+ "Number of instruction replays for global memory loads. Instruction is "
+ "replayed if the instruction is accessing more than one cache line of "
+ "128 bytes. For each extra cache line access the counter is incremented "
+ "by 1"),
+
+ _Q(GST_TRANSACTIONS,
+ "global_store_transaction",
+ "Number of global store transactions. Increments by 1 per transaction. "
+ "Transaction can be 32/64/96/128B"),
+
+ _Q(GST_MEM_DIV_REPLAY,
+ "global_st_mem_divergence_replays",
+ "Number of instruction replays for global memory stores. Instruction is "
+ "replayed if the instruction is accessing more than one cache line of "
+ "128 bytes. For each extra cache line access the counter is incremented "
+ "by 1"),
+
+ _Q(GRED_COUNT,
+ "gred_count",
+ "Number of warps executing reduction operations on global memory. "
+ "Increments by one if at least one thread in a warp executes the "
+ "instruction"),
+
+ _Q(GST_REQUEST,
+ "gst_request",
+ "Number of executed store instructions where the state space is not "
+ "specified and hence generic addressing is used, increments per warp on a "
+ "multiprocessor. It can include the store operations to global,local and "
+ "shared state space"),
+
+ _Q(INST_EXECUTED,
+ "inst_executed",
+ "Number of instructions executed, do not include replays"),
+
+ _Q(INST_ISSUED,
+ "inst_issued",
+ "Number of instructions issued including replays"),
+
+ _Q(INST_ISSUED1,
+ "inst_issued1",
+ "Number of single instruction issued per cycle"),
+
+ _Q(INST_ISSUED2,
+ "inst_issued2",
+ "Number of dual instructions issued per cycle"),
+
+ _Q(INST_ISSUED1_0,
+ "inst_issued1_0",
+ "Number of single instruction issued per cycle in pipeline 0"),
+
+ _Q(INST_ISSUED1_1,
+ "inst_issued1_1",
+ "Number of single instruction issued per cycle in pipeline 1"),
+
+ _Q(INST_ISSUED2_0,
+ "inst_issued2_0",
+ "Number of dual instructions issued per cycle in pipeline 0"),
+
+ _Q(INST_ISSUED2_1,
+ "inst_issued2_1",
+ "Number of dual instructions issued per cycle in pipeline 1"),
+
+ _Q(L1_GLD_HIT,
+ "l1_global_load_hit",
+ "Number of cache lines that hit in L1 cache for global memory load "
+ "accesses. In case of perfect coalescing this increments by 1,2, and 4 for "
+ "32, 64 and 128 bit accesses by a warp respectively"),
+
+ _Q(L1_GLD_MISS,
+ "l1_global_load_miss",
+ "Number of cache lines that miss in L1 cache for global memory load "
+ "accesses. In case of perfect coalescing this increments by 1,2, and 4 for "
+ "32, 64 and 128 bit accesses by a warp respectively"),
+
+ _Q(L1_GLD_TRANSACTIONS,
+ "__l1_global_load_transactions",
+ "Number of global load transactions from L1 cache. Increments by 1 per "
+ "transaction. Transaction can be 32/64/96/128B"),
+
+ _Q(L1_GST_TRANSACTIONS,
+ "__l1_global_store_transactions",
+ "Number of global store transactions from L1 cache. Increments by 1 per "
+ "transaction. Transaction can be 32/64/96/128B"),
+
+ _Q(L1_LOCAL_LD_HIT,
+ "l1_local_load_hit",
+ "Number of cache lines that hit in L1 cache for local memory load "
+ "accesses. In case of perfect coalescing this increments by 1,2, and 4 for "
+ "32, 64 and 128 bit accesses by a warp respectively"),
+
+ _Q(L1_LOCAL_LD_MISS,
+ "l1_local_load_miss",
+ "Number of cache lines that miss in L1 cache for local memory load "
+ "accesses. In case of perfect coalescing this increments by 1,2, and 4 for "
+ "32, 64 and 128 bit accesses by a warp respectively"),
+
+ _Q(L1_LOCAL_ST_HIT,
+ "l1_local_store_hit",
+ "Number of cache lines that hit in L1 cache for local memory store "
+ "accesses. In case of perfect coalescing this increments by 1,2, and 4 for "
+ "32, 64 and 128 bit accesses by a warp respectively"),
+
+ _Q(L1_LOCAL_ST_MISS,
+ "l1_local_store_miss",
+ "Number of cache lines that miss in L1 cache for local memory store "
+ "accesses. In case of perfect coalescing this increments by 1,2, and 4 for "
+ "32,64 and 128 bit accesses by a warp respectively"),
+
+ _Q(L1_SHARED_LD_TRANSACTIONS,
+ "l1_shared_load_transactions",
+ "Number of shared load transactions. Increments by 1 per transaction. "
+ "Transaction can be 32/64/96/128B"),
+
+ _Q(L1_SHARED_ST_TRANSACTIONS,
+ "l1_shared_store_transactions",
+ "Number of shared store transactions. Increments by 1 per transaction. "
+ "Transaction can be 32/64/96/128B"),
+
+ _Q(LOCAL_LD,
+ "local_load",
+ "Number of executed load instructions where state space is specified as "
+ "local, increments per warp on a multiprocessor"),
+
+ _Q(LOCAL_LD_TRANSACTIONS,
+ "local_load_transactions",
+ "Number of local load transactions from L1 cache. Increments by 1 per "
+ "transaction. Transaction can be 32/64/96/128B"),
+
+ _Q(LOCAL_ST,
+ "local_store",
+ "Number of executed store instructions where state space is specified as "
+ "local, increments per warp on a multiprocessor"),
+
+ _Q(LOCAL_ST_TRANSACTIONS,
+ "local_store_transactions",
+ "Number of local store transactions to L1 cache. Increments by 1 per "
+ "transaction. Transaction can be 32/64/96/128B."),
+
+ _Q(NOT_PRED_OFF_INST_EXECUTED,
+ "not_predicated_off_thread_inst_executed",
+ "Number of not predicated off instructions executed by all threads, does "
+ "not include replays. For each instruction it increments by the number of "
+ "threads that execute this instruction"),
+
+ _Q(PROF_TRIGGER_0,
+ "prof_trigger_00",
+ "User profiled generic trigger that can be inserted in any place of the "
+ "code to collect the related information. Increments per warp."),
+
+ _Q(PROF_TRIGGER_1,
+ "prof_trigger_01",
+ "User profiled generic trigger that can be inserted in any place of the "
+ "code to collect the related information. Increments per warp."),
+
+ _Q(PROF_TRIGGER_2,
+ "prof_trigger_02",
+ "User profiled generic trigger that can be inserted in any place of the "
+ "code to collect the related information. Increments per warp."),
+
+ _Q(PROF_TRIGGER_3,
+ "prof_trigger_03",
+ "User profiled generic trigger that can be inserted in any place of the "
+ "code to collect the related information. Increments per warp."),
+
+ _Q(PROF_TRIGGER_4,
+ "prof_trigger_04",
+ "User profiled generic trigger that can be inserted in any place of the "
+ "code to collect the related information. Increments per warp."),
+
+ _Q(PROF_TRIGGER_5,
+ "prof_trigger_05",
+ "User profiled generic trigger that can be inserted in any place of the "
+ "code to collect the related information. Increments per warp."),
+
+ _Q(PROF_TRIGGER_6,
+ "prof_trigger_06",
+ "User profiled generic trigger that can be inserted in any place of the "
+ "code to collect the related information. Increments per warp."),
+
+ _Q(PROF_TRIGGER_7,
+ "prof_trigger_07",
+ "User profiled generic trigger that can be inserted in any place of the "
+ "code to collect the related information. Increments per warp."),
+
+ _Q(SHARED_LD,
+ "shared_load",
+ "Number of executed load instructions where state space is specified as "
+ "shared, increments per warp on a multiprocessor"),
+
+ _Q(SHARED_LD_REPLAY,
+ "shared_load_replay",
+ "Replays caused due to shared load bank conflict (when the addresses for "
+ "two or more shared memory load requests fall in the same memory bank) or "
+ "when there is no conflict but the total number of words accessed by all "
+ "threads in the warp executing that instruction exceed the number of words "
+ "that can be loaded in one cycle (256 bytes)"),
+
+ _Q(SHARED_ST,
+ "shared_store",
+ "Number of executed store instructions where state space is specified as "
+ "shared, increments per warp on a multiprocessor"),
+
+ _Q(SHARED_ST_REPLAY,
+ "shared_store_replay",
+ "Replays caused due to shared store bank conflict (when the addresses for "
+ "two or more shared memory store requests fall in the same memory bank) or "
+ "when there is no conflict but the total number of words accessed by all "
+ "threads in the warp executing that instruction exceed the number of words "
+ "that can be stored in one cycle"),
+
+ _Q(SM_CTA_LAUNCHED,
+ "sm_cta_launched",
+ "Number of thread blocks launched on a multiprocessor"),
+
+ _Q(THREADS_LAUNCHED,
+ "threads_launched",
+ "Number of threads launched on a multiprocessor"),
+
+ _Q(TH_INST_EXECUTED,
+ "thread_inst_executed",
+ "Number of instructions executed by all threads, does not include "
+ "replays. For each instruction it increments by the number of threads in "
+ "the warp that execute the instruction"),
+
+ _Q(TH_INST_EXECUTED_0,
+ "thread_inst_executed_0",
+ "Number of instructions executed by all threads, does not include "
+ "replays. For each instruction it increments by the number of threads in "
+ "the warp that execute the instruction in pipeline 0"),
+
+ _Q(TH_INST_EXECUTED_1,
+ "thread_inst_executed_1",
+ "Number of instructions executed by all threads, does not include "
+ "replays. For each instruction it increments by the number of threads in "
+ "the warp that execute the instruction in pipeline 1"),
+
+ _Q(TH_INST_EXECUTED_2,
+ "thread_inst_executed_2",
+ "Number of instructions executed by all threads, does not include "
+ "replays. For each instruction it increments by the number of threads in "
+ "the warp that execute the instruction in pipeline 2"),
+
+ _Q(TH_INST_EXECUTED_3,
+ "thread_inst_executed_3",
+ "Number of instructions executed by all threads, does not include "
+ "replays. For each instruction it increments by the number of threads in "
+ "the warp that execute the instruction in pipeline 3"),
+
+ _Q(UNCACHED_GLD_TRANSACTIONS,
+ "uncached_global_load_transaction",
+ "Number of uncached global load transactions. Increments by 1 per "
+ "transaction. Transaction can be 32/64/96/128B."),
+
+ _Q(WARPS_LAUNCHED,
+ "warps_launched",
+ "Number of warps launched on a multiprocessor"),
};
#undef _Q
* mov b32 $r6 $pm6
* mov b32 $r7 $pm7
* set $p0 0x1 eq u32 $r8 0x0
- * mov b32 $r10 c0[0x0]
+ * mov b32 $r10 c7[0x620]
* ext u32 $r8 $r12 0x414
- * mov b32 $r11 c0[0x4]
+ * mov b32 $r11 c7[0x624]
* sched 0x04 0x2e 0x04 0x20 0x20 0x28 0x04
* ext u32 $r9 $r12 0x208
* (not $p0) exit
* add b32 $r12 $c $r12 $r9
* st b128 wt g[$r10d] $r0q
* sched 0x4 0x2c 0x20 0x04 0x2e 0x00 0x00
- * mov b32 $r0 c0[0x8]
+ * mov b32 $r0 c7[0x628]
* add b32 $r13 $r13 0x0 $c
* $p1 st b128 wt g[$r12d+0x40] $r4q
* st b32 wt g[$r12d+0x50] $r0
0x2c00000028019c04ULL,
0x2c0000002c01dc04ULL,
0x190e0000fc81dc03ULL,
- 0x2800400000029de4ULL,
+ 0x28005c1880029de4ULL,
0x7000c01050c21c03ULL,
- 0x280040001002dde4ULL,
+ 0x28005c189002dde4ULL,
0x204282020042e047ULL,
0x7000c00820c25c03ULL,
0x80000000000021e7ULL,
0x4801000024c31c03ULL,
0x9400000000a01fc5ULL,
0x200002e04202c047ULL,
- 0x2800400020001de4ULL,
+ 0x28005c18a0001de4ULL,
0x0800000000d35c42ULL,
0x9400000100c107c5ULL,
0x9400000140c01f85ULL,
0x8000000000001de7ULL
};
+static const uint64_t nvf0_read_hw_sm_counters_code[] =
+{
+ /* Same kernel as GK104 */
+ 0x0880808080808080ULL,
+ 0x86400000109c0022ULL,
+ 0x86400000019c0032ULL,
+ 0x86400000021c0002ULL,
+ 0x86400000029c0006ULL,
+ 0x86400000031c000aULL,
+ 0x86400000039c000eULL,
+ 0x86400000041c0012ULL,
+ 0x08ac1080108c8080ULL,
+ 0x86400000049c0016ULL,
+ 0x86400000051c001aULL,
+ 0x86400000059c001eULL,
+ 0xdb201c007f9c201eULL,
+ 0x64c03ce0c41c002aULL,
+ 0xc00000020a1c3021ULL,
+ 0x64c03ce0c49c002eULL,
+ 0x0810a0808010b810ULL,
+ 0xc0000001041c3025ULL,
+ 0x180000000020003cULL,
+ 0xdb201c007f9c243eULL,
+ 0xc1c00000301c2021ULL,
+ 0xc1c00000081c2431ULL,
+ 0xc1c00000021c2435ULL,
+ 0xe0800000069c2026ULL,
+ 0x08b010b010b010a0ULL,
+ 0xe0800000061c2022ULL,
+ 0xe4c03c00051c0032ULL,
+ 0xe0840000041c282aULL,
+ 0xe4c03c00059c0036ULL,
+ 0xe08040007f9c2c2eULL,
+ 0xe0840000049c3032ULL,
+ 0xfe800000001c2800ULL,
+ 0x080000b81080b010ULL,
+ 0x64c03ce0c51c0002ULL,
+ 0xe08040007f9c3436ULL,
+ 0xfe80000020043010ULL,
+ 0xfc800000281c3000ULL,
+ 0x18000000001c003cULL,
+};
+
/* For simplicity, we will allocate as many group slots as we allocate counter
* slots. This means that a single counter which wants to source from 2 groups
* will have to be declared as using 2 counter slots. This shouldn't really be
uint8_t norm[2]; /* normalization num,denom */
};
-#define _Q1A(n, f, m, g, s, nu, dn) { NVC0_HW_SM_QUERY_##n, { { f, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m, 0, NVE4_COMPUTE_MP_PM_A_SIGSEL_##g, 0, s }, {}, {}, {} }, 1, { nu, dn } }
-#define _Q1B(n, f, m, g, s, nu, dn) { NVC0_HW_SM_QUERY_##n, { { f, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m, 1, NVE4_COMPUTE_MP_PM_B_SIGSEL_##g, 0, s }, {}, {}, {} }, 1, { nu, dn } }
+#define _CA(f, m, g, s) { f, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m, 0, NVE4_COMPUTE_MP_PM_A_SIGSEL_##g, 0, s }
+#define _CB(f, m, g, s) { f, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m, 1, NVE4_COMPUTE_MP_PM_B_SIGSEL_##g, 0, s }
+#define _Q(n, c) [NVE4_HW_SM_QUERY_##n] = c
+
+/* ==== Compute capability 3.0 (GK104:GK110) ==== */
+static const struct nvc0_hw_sm_query_cfg
+sm30_active_cycles =
+{
+ .type = NVC0_HW_SM_QUERY_ACTIVE_CYCLES,
+ .ctr[0] = _CB(0x0001, B6, WARP, 0x00000000),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_active_warps =
+{
+ .type = NVC0_HW_SM_QUERY_ACTIVE_WARPS,
+ .ctr[0] = _CB(0x003f, B6, WARP, 0x31483104),
+ .num_counters = 1,
+ .norm = { 2, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_atom_cas_count =
+{
+ .type = NVC0_HW_SM_QUERY_ATOM_CAS_COUNT,
+ .ctr[0] = _CA(0x0001, B6, BRANCH, 0x000000004),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_atom_count =
+{
+ .type = NVC0_HW_SM_QUERY_ATOM_COUNT,
+ .ctr[0] = _CA(0x0001, B6, BRANCH, 0x00000000),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_branch =
+{
+ .type = NVC0_HW_SM_QUERY_BRANCH,
+ .ctr[0] = _CA(0x0001, B6, BRANCH, 0x0000000c),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_divergent_branch =
+{
+ .type = NVC0_HW_SM_QUERY_DIVERGENT_BRANCH,
+ .ctr[0] = _CA(0x0001, B6, BRANCH, 0x00000010),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_gld_request =
+{
+ .type = NVC0_HW_SM_QUERY_GLD_REQUEST,
+ .ctr[0] = _CA(0x0001, B6, LDST, 0x00000010),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_gld_mem_div_replay =
+{
+ .type = NVC0_HW_SM_QUERY_GLD_MEM_DIV_REPLAY,
+ .ctr[0] = _CB(0x0001, B6, REPLAY, 0x00000010),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_gst_transactions =
+{
+ .type = NVC0_HW_SM_QUERY_GST_TRANSACTIONS,
+ .ctr[0] = _CB(0x0001, B6, MEM, 0x00000004),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_gst_mem_div_replay =
+{
+ .type = NVC0_HW_SM_QUERY_GST_MEM_DIV_REPLAY,
+ .ctr[0] = _CB(0x0001, B6, REPLAY, 0x00000014),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_gred_count =
+{
+ .type = NVC0_HW_SM_QUERY_GRED_COUNT,
+ .ctr[0] = _CA(0x0001, B6, BRANCH, 0x00000008),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_gst_request =
+{
+ .type = NVC0_HW_SM_QUERY_GST_REQUEST,
+ .ctr[0] = _CA(0x0001, B6, LDST, 0x00000014),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_inst_executed =
+{
+ .type = NVC0_HW_SM_QUERY_INST_EXECUTED,
+ .ctr[0] = _CA(0x0003, B6, EXEC, 0x00000398),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_inst_issued1 =
+{
+ .type = NVC0_HW_SM_QUERY_INST_ISSUED1,
+ .ctr[0] = _CA(0x0001, B6, ISSUE, 0x00000004),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_inst_issued2 =
+{
+ .type = NVC0_HW_SM_QUERY_INST_ISSUED2,
+ .ctr[0] = _CA(0x0001, B6, ISSUE, 0x00000008),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_l1_gld_hit =
+{
+ .type = NVC0_HW_SM_QUERY_L1_GLD_HIT,
+ .ctr[0] = _CB(0x0001, B6, L1, 0x00000010),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_l1_gld_miss =
+{
+ .type = NVC0_HW_SM_QUERY_L1_GLD_MISS,
+ .ctr[0] = _CB(0x0001, B6, L1, 0x00000014),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_l1_gld_transactions =
+{
+ .type = NVC0_HW_SM_QUERY_L1_GLD_TRANSACTIONS,
+ .ctr[0] = _CB(0x0001, B6, UNK0F, 0x00000000),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_l1_gst_transactions =
+{
+ .type = NVC0_HW_SM_QUERY_L1_GST_TRANSACTIONS,
+ .ctr[0] = _CB(0x0001, B6, UNK0F, 0x00000004),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_l1_local_ld_hit =
+{
+ .type = NVC0_HW_SM_QUERY_L1_LOCAL_LD_HIT,
+ .ctr[0] = _CB(0x0001, B6, L1, 0x00000000),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_l1_local_ld_miss =
+{
+ .type = NVC0_HW_SM_QUERY_L1_LOCAL_LD_MISS,
+ .ctr[0] = _CB(0x0001, B6, L1, 0x00000004),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_l1_local_st_hit =
+{
+ .type = NVC0_HW_SM_QUERY_L1_LOCAL_ST_HIT,
+ .ctr[0] = _CB(0x0001, B6, L1, 0x00000008),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_l1_local_st_miss =
+{
+ .type = NVC0_HW_SM_QUERY_L1_LOCAL_ST_MISS,
+ .ctr[0] = _CB(0x0001, B6, L1, 0x0000000c),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_l1_shared_ld_transactions =
+{
+ .type = NVC0_HW_SM_QUERY_L1_SHARED_LD_TRANSACTIONS,
+ .ctr[0] = _CB(0x0001, B6, TRANSACTION, 0x00000008),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_l1_shared_st_transactions =
+{
+ .type = NVC0_HW_SM_QUERY_L1_SHARED_ST_TRANSACTIONS,
+ .ctr[0] = _CB(0x0001, B6, TRANSACTION, 0x0000000c),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_local_ld =
+{
+ .type = NVC0_HW_SM_QUERY_LOCAL_LD,
+ .ctr[0] = _CA(0x0001, B6, LDST, 0x00000008),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_local_ld_transactions =
+{
+ .type = NVC0_HW_SM_QUERY_LOCAL_LD_TRANSACTIONS,
+ .ctr[0] = _CB(0x0001, B6, TRANSACTION, 0x00000000),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_local_st =
+{
+ .type = NVC0_HW_SM_QUERY_LOCAL_ST,
+ .ctr[0] = _CA(0x0001, B6, LDST, 0x0000000c),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_local_st_transactions =
+{
+ .type = NVC0_HW_SM_QUERY_LOCAL_ST_TRANSACTIONS,
+ .ctr[0] = _CB(0x0001, B6, TRANSACTION, 0x00000004),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_prof_trigger_0 =
+{
+ .type = NVC0_HW_SM_QUERY_PROF_TRIGGER_0,
+ .ctr[0] = _CA(0x0001, B6, USER, 0x00000000),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_prof_trigger_1 =
+{
+ .type = NVC0_HW_SM_QUERY_PROF_TRIGGER_1,
+ .ctr[0] = _CA(0x0001, B6, USER, 0x00000004),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_prof_trigger_2 =
+{
+ .type = NVC0_HW_SM_QUERY_PROF_TRIGGER_2,
+ .ctr[0] = _CA(0x0001, B6, USER, 0x00000008),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_prof_trigger_3 =
+{
+ .type = NVC0_HW_SM_QUERY_PROF_TRIGGER_3,
+ .ctr[0] = _CA(0x0001, B6, USER, 0x0000000c),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_prof_trigger_4 =
+{
+ .type = NVC0_HW_SM_QUERY_PROF_TRIGGER_4,
+ .ctr[0] = _CA(0x0001, B6, USER, 0x00000010),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_prof_trigger_5 =
+{
+ .type = NVC0_HW_SM_QUERY_PROF_TRIGGER_5,
+ .ctr[0] = _CA(0x0001, B6, USER, 0x00000014),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_prof_trigger_6 =
+{
+ .type = NVC0_HW_SM_QUERY_PROF_TRIGGER_6,
+ .ctr[0] = _CA(0x0001, B6, USER, 0x00000018),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_prof_trigger_7 =
+{
+ .type = NVC0_HW_SM_QUERY_PROF_TRIGGER_7,
+ .ctr[0] = _CA(0x0001, B6, USER, 0x0000001c),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_shared_ld =
+{
+ .type = NVC0_HW_SM_QUERY_SHARED_LD,
+ .ctr[0] = _CA(0x0001, B6, LDST, 0x00000000),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_shared_ld_replay =
+{
+ .type = NVC0_HW_SM_QUERY_SHARED_LD_REPLAY,
+ .ctr[0] = _CB(0x0001, B6, REPLAY, 0x00000008),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_shared_st =
+{
+ .type = NVC0_HW_SM_QUERY_SHARED_ST,
+ .ctr[0] = _CA(0x0001, B6, LDST, 0x00000004),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_shared_st_replay =
+{
+ .type = NVC0_HW_SM_QUERY_SHARED_ST_REPLAY,
+ .ctr[0] = _CB(0x0001, B6, REPLAY, 0x0000000c),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_sm_cta_launched =
+{
+ .type = NVC0_HW_SM_QUERY_SM_CTA_LAUNCHED,
+ .ctr[0] = _CB(0x0001, B6, WARP, 0x0000001c),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_threads_launched =
+{
+ .type = NVC0_HW_SM_QUERY_THREADS_LAUNCHED,
+ .ctr[0] = _CA(0x003f, B6, LAUNCH, 0x398a4188),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_uncached_gld_transactions =
+{
+ .type = NVC0_HW_SM_QUERY_UNCACHED_GLD_TRANSACTIONS,
+ .ctr[0] = _CB(0x0001, B6, MEM, 0x00000000),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm30_warps_launched =
+{
+ .type = NVC0_HW_SM_QUERY_WARPS_LAUNCHED,
+ .ctr[0] = _CA(0x0001, B6, LAUNCH, 0x00000004),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
/* NOTES:
* active_warps: bit 0 alternates btw 0 and 1 for odd nr of warps
* inst_executed etc.: we only count a single warp scheduler
*/
-static const struct nvc0_hw_sm_query_cfg sm30_hw_sm_queries[] =
-{
- _Q1B(ACTIVE_CYCLES, 0x0001, B6, WARP, 0x00000000, 1, 1),
- _Q1B(ACTIVE_WARPS, 0x003f, B6, WARP, 0x31483104, 2, 1),
- _Q1A(ATOM_CAS_COUNT, 0x0001, B6, BRANCH, 0x000000004, 1, 1),
- _Q1A(ATOM_COUNT, 0x0001, B6, BRANCH, 0x00000000, 1, 1),
- _Q1A(BRANCH, 0x0001, B6, BRANCH, 0x0000000c, 1, 1),
- _Q1A(DIVERGENT_BRANCH, 0x0001, B6, BRANCH, 0x00000010, 1, 1),
- _Q1A(GLD_REQUEST, 0x0001, B6, LDST, 0x00000010, 1, 1),
- _Q1B(GLD_MEM_DIV_REPLAY, 0x0001, B6, REPLAY, 0x00000010, 1, 1),
- _Q1B(GST_TRANSACTIONS, 0x0001, B6, MEM, 0x00000004, 1, 1),
- _Q1B(GST_MEM_DIV_REPLAY, 0x0001, B6, REPLAY, 0x00000014, 1, 1),
- _Q1A(GRED_COUNT, 0x0001, B6, BRANCH, 0x00000008, 1, 1),
- _Q1A(GST_REQUEST, 0x0001, B6, LDST, 0x00000014, 1, 1),
- _Q1A(INST_EXECUTED, 0x0003, B6, EXEC, 0x00000398, 1, 1),
- _Q1A(INST_ISSUED1, 0x0001, B6, ISSUE, 0x00000004, 1, 1),
- _Q1A(INST_ISSUED2, 0x0001, B6, ISSUE, 0x00000008, 1, 1),
- _Q1B(L1_GLD_HIT, 0x0001, B6, L1, 0x00000010, 1, 1),
- _Q1B(L1_GLD_MISS, 0x0001, B6, L1, 0x00000014, 1, 1),
- _Q1B(L1_GLD_TRANSACTIONS, 0x0001, B6, UNK0F, 0x00000000, 1, 1),
- _Q1B(L1_GST_TRANSACTIONS, 0x0001, B6, UNK0F, 0x00000004, 1, 1),
- _Q1B(L1_LOCAL_LD_HIT, 0x0001, B6, L1, 0x00000000, 1, 1),
- _Q1B(L1_LOCAL_LD_MISS, 0x0001, B6, L1, 0x00000004, 1, 1),
- _Q1B(L1_LOCAL_ST_HIT, 0x0001, B6, L1, 0x00000008, 1, 1),
- _Q1B(L1_LOCAL_ST_MISS, 0x0001, B6, L1, 0x0000000c, 1, 1),
- _Q1B(L1_SHARED_LD_TRANSACTIONS, 0x0001, B6, TRANSACTION, 0x00000008, 1, 1),
- _Q1B(L1_SHARED_ST_TRANSACTIONS, 0x0001, B6, TRANSACTION, 0x0000000c, 1, 1),
- _Q1A(LOCAL_LD, 0x0001, B6, LDST, 0x00000008, 1, 1),
- _Q1B(LOCAL_LD_TRANSACTIONS, 0x0001, B6, TRANSACTION, 0x00000000, 1, 1),
- _Q1A(LOCAL_ST, 0x0001, B6, LDST, 0x0000000c, 1, 1),
- _Q1B(LOCAL_ST_TRANSACTIONS, 0x0001, B6, TRANSACTION, 0x00000004, 1, 1),
- _Q1A(PROF_TRIGGER_0, 0x0001, B6, USER, 0x00000000, 1, 1),
- _Q1A(PROF_TRIGGER_1, 0x0001, B6, USER, 0x00000004, 1, 1),
- _Q1A(PROF_TRIGGER_2, 0x0001, B6, USER, 0x00000008, 1, 1),
- _Q1A(PROF_TRIGGER_3, 0x0001, B6, USER, 0x0000000c, 1, 1),
- _Q1A(PROF_TRIGGER_4, 0x0001, B6, USER, 0x00000010, 1, 1),
- _Q1A(PROF_TRIGGER_5, 0x0001, B6, USER, 0x00000014, 1, 1),
- _Q1A(PROF_TRIGGER_6, 0x0001, B6, USER, 0x00000018, 1, 1),
- _Q1A(PROF_TRIGGER_7, 0x0001, B6, USER, 0x0000001c, 1, 1),
- _Q1A(SHARED_LD, 0x0001, B6, LDST, 0x00000000, 1, 1),
- _Q1B(SHARED_LD_REPLAY, 0x0001, B6, REPLAY, 0x00000008, 1, 1),
- _Q1A(SHARED_ST, 0x0001, B6, LDST, 0x00000004, 1, 1),
- _Q1B(SHARED_ST_REPLAY, 0x0001, B6, REPLAY, 0x0000000c, 1, 1),
- _Q1B(SM_CTA_LAUNCHED, 0x0001, B6, WARP, 0x0000001c, 1, 1),
- _Q1A(THREADS_LAUNCHED, 0x003f, B6, LAUNCH, 0x398a4188, 1, 1),
- _Q1B(UNCACHED_GLD_TRANSACTIONS, 0x0001, B6, MEM, 0x00000000, 1, 1),
- _Q1A(WARPS_LAUNCHED, 0x0001, B6, LAUNCH, 0x00000004, 1, 1),
-};
-
-#undef _Q1A
-#undef _Q1B
+static const struct nvc0_hw_sm_query_cfg *sm30_hw_sm_queries[] =
+{
+ &sm30_active_cycles,
+ &sm30_active_warps,
+ &sm30_atom_cas_count,
+ &sm30_atom_count,
+ &sm30_branch,
+ &sm30_divergent_branch,
+ &sm30_gld_request,
+ &sm30_gld_mem_div_replay,
+ &sm30_gst_transactions,
+ &sm30_gst_mem_div_replay,
+ &sm30_gred_count,
+ &sm30_gst_request,
+ &sm30_inst_executed,
+ &sm30_inst_issued1,
+ &sm30_inst_issued2,
+ &sm30_l1_gld_hit,
+ &sm30_l1_gld_miss,
+ &sm30_l1_gld_transactions,
+ &sm30_l1_gst_transactions,
+ &sm30_l1_local_ld_hit,
+ &sm30_l1_local_ld_miss,
+ &sm30_l1_local_st_hit,
+ &sm30_l1_local_st_miss,
+ &sm30_l1_shared_ld_transactions,
+ &sm30_l1_shared_st_transactions,
+ &sm30_local_ld,
+ &sm30_local_ld_transactions,
+ &sm30_local_st,
+ &sm30_local_st_transactions,
+ &sm30_prof_trigger_0,
+ &sm30_prof_trigger_1,
+ &sm30_prof_trigger_2,
+ &sm30_prof_trigger_3,
+ &sm30_prof_trigger_4,
+ &sm30_prof_trigger_5,
+ &sm30_prof_trigger_6,
+ &sm30_prof_trigger_7,
+ &sm30_shared_ld,
+ &sm30_shared_ld_replay,
+ &sm30_shared_st,
+ &sm30_shared_st_replay,
+ &sm30_sm_cta_launched,
+ &sm30_threads_launched,
+ &sm30_uncached_gld_transactions,
+ &sm30_warps_launched,
+};
+
+/* ==== Compute capability 3.5 (GK110/GK208) ==== */
+static const struct nvc0_hw_sm_query_cfg
+sm35_atom_cas_count =
+{
+ .type = NVC0_HW_SM_QUERY_ATOM_CAS_COUNT,
+ .ctr[0] = _CA(0x0001, B6, UNK1A, 0x00000014),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm35_atom_count =
+{
+ .type = NVC0_HW_SM_QUERY_ATOM_COUNT,
+ .ctr[0] = _CA(0x0001, B6, UNK1A, 0x00000010),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm35_gred_count =
+{
+ .type = NVC0_HW_SM_QUERY_GRED_COUNT,
+ .ctr[0] = _CA(0x0001, B6, UNK1A, 0x00000018),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm35_not_pred_off_inst_executed =
+{
+ .type = NVC0_HW_SM_QUERY_NOT_PRED_OFF_INST_EXECUTED,
+ .ctr[0] = _CA(0x003f, B6, UNK14, 0x29062080),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm35_shared_ld_replay =
+{
+ .type = NVC0_HW_SM_QUERY_SHARED_LD_REPLAY,
+ .ctr[0] = _CB(0xaaaa, LOGOP, UNK13, 0x00000018),
+ .ctr[1] = _CB(0x8888, LOGOP, REPLAY, 0x00000151),
+ .num_counters = 2,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm35_shared_st_replay =
+{
+ .type = NVC0_HW_SM_QUERY_SHARED_ST_REPLAY,
+ .ctr[0] = _CB(0xaaaa, LOGOP, UNK13, 0x00000018),
+ .ctr[1] = _CB(0x8888, LOGOP, REPLAY, 0x000001d1),
+ .num_counters = 2,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg
+sm35_th_inst_executed =
+{
+ .type = NVC0_HW_SM_QUERY_TH_INST_EXECUTED,
+ .ctr[0] = _CA(0x003f, B6, UNK11, 0x29062080),
+ .num_counters = 1,
+ .norm = { 1, 1 },
+};
+
+static const struct nvc0_hw_sm_query_cfg *sm35_hw_sm_queries[] =
+{
+ &sm30_active_cycles,
+ &sm30_active_warps,
+ &sm35_atom_cas_count,
+ &sm35_atom_count,
+ &sm30_gld_request,
+ &sm30_gld_mem_div_replay,
+ &sm30_gst_transactions,
+ &sm30_gst_mem_div_replay,
+ &sm35_gred_count,
+ &sm30_gst_request,
+ &sm30_inst_executed,
+ &sm30_inst_issued1,
+ &sm30_inst_issued2,
+ &sm30_l1_gld_hit,
+ &sm30_l1_gld_miss,
+ &sm30_l1_gld_transactions,
+ &sm30_l1_gst_transactions,
+ &sm30_l1_local_ld_hit,
+ &sm30_l1_local_ld_miss,
+ &sm30_l1_local_st_hit,
+ &sm30_l1_local_st_miss,
+ &sm30_l1_shared_ld_transactions,
+ &sm30_l1_shared_st_transactions,
+ &sm30_local_ld,
+ &sm30_local_ld_transactions,
+ &sm30_local_st,
+ &sm30_local_st_transactions,
+ &sm35_not_pred_off_inst_executed,
+ &sm30_prof_trigger_0,
+ &sm30_prof_trigger_1,
+ &sm30_prof_trigger_2,
+ &sm30_prof_trigger_3,
+ &sm30_prof_trigger_4,
+ &sm30_prof_trigger_5,
+ &sm30_prof_trigger_6,
+ &sm30_prof_trigger_7,
+ &sm30_shared_ld,
+ &sm35_shared_ld_replay,
+ &sm30_shared_st,
+ &sm35_shared_st_replay,
+ &sm30_sm_cta_launched,
+ &sm35_th_inst_executed,
+ &sm30_threads_launched,
+ &sm30_uncached_gld_transactions,
+ &sm30_warps_launched,
+};
+
+#undef _Q
+#undef _CA
+#undef _CB
/* === PERFORMANCE MONITORING COUNTERS for NVC0:NVE4 === */
/* NOTES:
* mov b32 $r6 $pm6
* mov b32 $r7 $pm7
* set $p0 0x1 eq u32 $r8 0x0
- * mov b32 $r10 c0[0x0]
- * mov b32 $r11 c0[0x4]
+ * mov b32 $r10 c15[0x620]
+ * mov b32 $r11 c15[0x624]
* ext u32 $r8 $r9 0x414
* (not $p0) exit
* mul $r8 u32 $r8 u32 48
* add b32 $r10 $c $r10 $r8
* add b32 $r11 $r11 0x0 $c
- * mov b32 $r8 c0[0x8]
+ * mov b32 $r8 c15[0x628]
* st b128 wt g[$r10d+0x00] $r0q
* st b128 wt g[$r10d+0x10] $r4q
* st b32 wt g[$r10d+0x20] $r8
0x2c00000028019c04ULL,
0x2c0000002c01dc04ULL,
0x190e0000fc81dc03ULL,
- 0x2800400000029de4ULL,
- 0x280040001002dde4ULL,
+ 0x28007c1880029de4ULL,
+ 0x28007c189002dde4ULL,
0x7000c01050921c03ULL,
0x80000000000021e7ULL,
0x10000000c0821c02ULL,
0x4801000020a29c03ULL,
0x0800000000b2dc42ULL,
- 0x2800400020021de4ULL,
+ 0x28007c18a0021de4ULL,
0x9400000000a01fc5ULL,
0x9400000040a11fc5ULL,
0x9400000080a21f85ULL,
{
struct nouveau_device *dev = screen->base.device;
- if (dev->chipset == 0xc0 || dev->chipset == 0xc8)
- return sm20_hw_sm_queries;
- return sm21_hw_sm_queries;
+ switch (screen->base.class_3d) {
+ case NVF0_3D_CLASS:
+ return sm35_hw_sm_queries;
+ case NVE4_3D_CLASS:
+ return sm30_hw_sm_queries;
+ default:
+ if (dev->chipset == 0xc0 || dev->chipset == 0xc8)
+ return sm20_hw_sm_queries;
+ return sm21_hw_sm_queries;
+ }
+ assert(0);
+ return NULL;
}
unsigned
struct nouveau_device *dev = screen->base.device;
switch (screen->base.class_3d) {
+ case NVF0_3D_CLASS:
+ return ARRAY_SIZE(sm35_hw_sm_queries);
case NVE4_3D_CLASS:
return ARRAY_SIZE(sm30_hw_sm_queries);
default:
static const struct nvc0_hw_sm_query_cfg *
nvc0_hw_sm_query_get_cfg(struct nvc0_context *nvc0, struct nvc0_hw_query *hq)
{
+ const struct nvc0_hw_sm_query_cfg **queries;
struct nvc0_screen *screen = nvc0->screen;
struct nvc0_query *q = &hq->base;
unsigned num_queries;
unsigned i;
num_queries = nvc0_hw_sm_get_num_queries(screen);
+ queries = nvc0_hw_sm_get_queries(screen);
- if (screen->base.class_3d >= NVE4_3D_CLASS) {
- for (i = 0; i < num_queries; i++) {
- if (NVC0_HW_SM_QUERY(sm30_hw_sm_queries[i].type) == q->type)
- return &sm30_hw_sm_queries[i];
- }
- } else {
- const struct nvc0_hw_sm_query_cfg **queries =
- nvc0_hw_sm_get_queries(screen);
-
- for (i = 0; i < num_queries; i++) {
- if (NVC0_HW_SM_QUERY(queries[i]->type) == q->type)
- return queries[i];
- }
+ for (i = 0; i < num_queries; i++) {
+ if (NVC0_HW_SM_QUERY(queries[i]->type) == q->type)
+ return queries[i];
}
assert(0);
return NULL;
return true;
}
+static inline struct nvc0_program *
+nvc0_hw_sm_get_program(struct nvc0_screen *screen)
+{
+ struct nvc0_program *prog;
+
+ prog = CALLOC_STRUCT(nvc0_program);
+ if (!prog)
+ return NULL;
+
+ prog->type = PIPE_SHADER_COMPUTE;
+ prog->translated = true;
+ prog->parm_size = 12;
+
+ if (screen->base.class_3d == NVE4_3D_CLASS ||
+ screen->base.class_3d == NVF0_3D_CLASS) {
+ if (screen->base.class_3d == NVE4_3D_CLASS) {
+ prog->code = (uint32_t *)nve4_read_hw_sm_counters_code;
+ prog->code_size = sizeof(nve4_read_hw_sm_counters_code);
+ } else {
+ prog->code = (uint32_t *)nvf0_read_hw_sm_counters_code;
+ prog->code_size = sizeof(nvf0_read_hw_sm_counters_code);
+ }
+ prog->num_gprs = 14;
+ } else {
+ prog->code = (uint32_t *)nvc0_read_hw_sm_counters_code;
+ prog->code_size = sizeof(nvc0_read_hw_sm_counters_code);
+ prog->num_gprs = 12;
+ }
+ return prog;
+}
+
+static inline void
+nvc0_hw_sm_upload_input(struct nvc0_context *nvc0, struct nvc0_hw_query *hq)
+{
+ struct nouveau_pushbuf *push = nvc0->base.pushbuf;
+ struct nvc0_screen *screen = nvc0->screen;
+ uint64_t address;
+ const int s = 5;
+
+ address = screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s);
+
+ PUSH_SPACE(push, 11);
+
+ if (screen->base.class_3d >= NVE4_3D_CLASS) {
+ BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
+ PUSH_DATAh(push, address + NVC0_CB_AUX_MP_INFO);
+ PUSH_DATA (push, address + NVC0_CB_AUX_MP_INFO);
+ BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
+ PUSH_DATA (push, 3 * 4);
+ PUSH_DATA (push, 0x1);
+ BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 1 + 3);
+ PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
+ } else {
+ BEGIN_NVC0(push, NVC0_CP(CB_SIZE), 3);
+ PUSH_DATA (push, NVC0_CB_AUX_SIZE);
+ PUSH_DATAh(push, address);
+ PUSH_DATA (push, address);
+ BEGIN_1IC0(push, NVC0_CP(CB_POS), 1 + 3);
+ PUSH_DATA (push, NVC0_CB_AUX_MP_INFO);
+ }
+ PUSH_DATA (push, (hq->bo->offset + hq->base_offset));
+ PUSH_DATAh(push, (hq->bo->offset + hq->base_offset));
+ PUSH_DATA (push, hq->sequence);
+}
+
static void
nvc0_hw_sm_end_query(struct nvc0_context *nvc0, struct nvc0_hw_query *hq)
{
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
const bool is_nve4 = screen->base.class_3d >= NVE4_3D_CLASS;
struct nvc0_hw_sm_query *hsq = nvc0_hw_sm_query(hq);
+ struct nvc0_program *old = nvc0->compprog;
struct pipe_grid_info info = {};
uint32_t mask;
uint32_t input[3];
const uint grid[3] = { screen->mp_count, screen->gpc_count, 1 };
unsigned c, i;
- if (unlikely(!screen->pm.prog)) {
- struct nvc0_program *prog = CALLOC_STRUCT(nvc0_program);
- prog->type = PIPE_SHADER_COMPUTE;
- prog->translated = true;
- prog->parm_size = 12;
- if (is_nve4) {
- prog->code = (uint32_t *)nve4_read_hw_sm_counters_code;
- prog->code_size = sizeof(nve4_read_hw_sm_counters_code);
- prog->num_gprs = 14;
- } else {
- prog->code = (uint32_t *)nvc0_read_hw_sm_counters_code;
- prog->code_size = sizeof(nvc0_read_hw_sm_counters_code);
- prog->num_gprs = 12;
- }
- screen->pm.prog = prog;
- }
+ if (unlikely(!screen->pm.prog))
+ screen->pm.prog = nvc0_hw_sm_get_program(screen);
/* disable all counting */
PUSH_SPACE(push, 8);
PUSH_SPACE(push, 1);
IMMED_NVC0(push, SUBC_CP(NV50_GRAPH_SERIALIZE), 0);
- pipe->bind_compute_state(pipe, screen->pm.prog);
- input[0] = (hq->bo->offset + hq->base_offset);
- input[1] = (hq->bo->offset + hq->base_offset) >> 32;
- input[2] = hq->sequence;
+ /* upload input data for the compute shader which reads MP counters */
+ nvc0_hw_sm_upload_input(nvc0, hq);
+ pipe->bind_compute_state(pipe, screen->pm.prog);
for (i = 0; i < 3; i++) {
info.block[i] = block[i];
info.grid[i] = grid[i];
info.pc = 0;
info.input = input;
pipe->launch_grid(pipe, &info);
+ pipe->bind_compute_state(pipe, old);
nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_QUERY);
if (id < count) {
if (screen->compute) {
- if (screen->base.class_3d == NVE4_3D_CLASS) {
- const struct nvc0_hw_sm_query_cfg *q = &sm30_hw_sm_queries[id];
-
- info->name = nvc0_hw_sm_query_get_name(q->type);
- info->query_type = NVC0_HW_SM_QUERY(q->type);
- info->group_id = NVC0_HW_SM_QUERY_GROUP;
- return 1;
- } else
- if (screen->base.class_3d < NVE4_3D_CLASS) {
+ if (screen->base.class_3d <= NVF0_3D_CLASS) {
const struct nvc0_hw_sm_query_cfg **queries =
nvc0_hw_sm_get_queries(screen);