2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 #include <sys/sysmacros.h>
33 #include "util/hash_table.h"
34 #include "compiler/glsl/list.h"
35 #include "util/ralloc.h"
37 struct gen_device_info
;
39 struct gen_perf_config
;
40 struct gen_perf_query_info
;
42 #define GEN7_RPSTAT1 0xA01C
43 #define GEN7_RPSTAT1_CURR_GT_FREQ_SHIFT 7
44 #define GEN7_RPSTAT1_CURR_GT_FREQ_MASK INTEL_MASK(13, 7)
45 #define GEN7_RPSTAT1_PREV_GT_FREQ_SHIFT 0
46 #define GEN7_RPSTAT1_PREV_GT_FREQ_MASK INTEL_MASK(6, 0)
48 #define GEN9_RPSTAT0 0xA01C
49 #define GEN9_RPSTAT0_CURR_GT_FREQ_SHIFT 23
50 #define GEN9_RPSTAT0_CURR_GT_FREQ_MASK INTEL_MASK(31, 23)
51 #define GEN9_RPSTAT0_PREV_GT_FREQ_SHIFT 0
52 #define GEN9_RPSTAT0_PREV_GT_FREQ_MASK INTEL_MASK(8, 0)
54 enum gen_perf_counter_type
{
55 GEN_PERF_COUNTER_TYPE_EVENT
,
56 GEN_PERF_COUNTER_TYPE_DURATION_NORM
,
57 GEN_PERF_COUNTER_TYPE_DURATION_RAW
,
58 GEN_PERF_COUNTER_TYPE_THROUGHPUT
,
59 GEN_PERF_COUNTER_TYPE_RAW
,
60 GEN_PERF_COUNTER_TYPE_TIMESTAMP
,
63 enum gen_perf_counter_data_type
{
64 GEN_PERF_COUNTER_DATA_TYPE_BOOL32
,
65 GEN_PERF_COUNTER_DATA_TYPE_UINT32
,
66 GEN_PERF_COUNTER_DATA_TYPE_UINT64
,
67 GEN_PERF_COUNTER_DATA_TYPE_FLOAT
,
68 GEN_PERF_COUNTER_DATA_TYPE_DOUBLE
,
71 struct gen_pipeline_stat
{
78 * The largest OA formats we can use include:
80 * 1 timestamp, 45 A counters, 8 B counters and 8 C counters.
82 * 1 timestamp, 1 clock, 36 A counters, 8 B counters and 8 C counters
84 #define MAX_OA_REPORT_COUNTERS 62
86 #define IA_VERTICES_COUNT 0x2310
87 #define IA_PRIMITIVES_COUNT 0x2318
88 #define VS_INVOCATION_COUNT 0x2320
89 #define HS_INVOCATION_COUNT 0x2300
90 #define DS_INVOCATION_COUNT 0x2308
91 #define GS_INVOCATION_COUNT 0x2328
92 #define GS_PRIMITIVES_COUNT 0x2330
93 #define CL_INVOCATION_COUNT 0x2338
94 #define CL_PRIMITIVES_COUNT 0x2340
95 #define PS_INVOCATION_COUNT 0x2348
96 #define CS_INVOCATION_COUNT 0x2290
97 #define PS_DEPTH_COUNT 0x2350
100 * When currently allocate only one page for pipeline statistics queries. Here
101 * we derived the maximum number of counters for that amount.
103 #define STATS_BO_SIZE 4096
104 #define STATS_BO_END_OFFSET_BYTES (STATS_BO_SIZE / 2)
105 #define MAX_STAT_COUNTERS (STATS_BO_END_OFFSET_BYTES / 8)
107 #define I915_PERF_OA_SAMPLE_SIZE (8 + /* drm_i915_perf_record_header */ \
108 256) /* OA counter report */
110 struct gen_perf_query_result
{
112 * Storage for the final accumulated OA counters.
114 uint64_t accumulator
[MAX_OA_REPORT_COUNTERS
];
117 * Hw ID used by the context on which the query was running.
122 * Number of reports accumulated to produce the results.
124 uint32_t reports_accumulated
;
127 * Frequency in the slices of the GT at the begin and end of the
130 uint64_t slice_frequency
[2];
133 * Frequency in the unslice of the GT at the begin and end of the
136 uint64_t unslice_frequency
[2];
139 struct gen_perf_query_counter
{
142 enum gen_perf_counter_type type
;
143 enum gen_perf_counter_data_type data_type
;
148 uint64_t (*oa_counter_read_uint64
)(struct gen_perf_config
*perf
,
149 const struct gen_perf_query_info
*query
,
150 const uint64_t *accumulator
);
151 float (*oa_counter_read_float
)(struct gen_perf_config
*perf
,
152 const struct gen_perf_query_info
*query
,
153 const uint64_t *accumulator
);
154 struct gen_pipeline_stat pipeline_stat
;
158 struct gen_perf_query_register_prog
{
163 struct gen_perf_query_info
{
164 enum gen_perf_query_type
{
165 GEN_PERF_QUERY_TYPE_OA
,
166 GEN_PERF_QUERY_TYPE_RAW
,
167 GEN_PERF_QUERY_TYPE_PIPELINE
,
171 struct gen_perf_query_counter
*counters
;
177 uint64_t oa_metrics_set_id
;
180 /* For indexing into the accumulator[] ... */
182 int gpu_clock_offset
;
187 /* Register programming for a given query */
188 struct gen_perf_query_register_prog
*flex_regs
;
189 uint32_t n_flex_regs
;
191 struct gen_perf_query_register_prog
*mux_regs
;
194 struct gen_perf_query_register_prog
*b_counter_regs
;
195 uint32_t n_b_counter_regs
;
198 struct gen_perf_config
{
199 struct gen_perf_query_info
*queries
;
202 /* Variables referenced in the XML meta data for OA performance
203 * counters, e.g in the normalization equations.
205 * All uint64_t for consistent operand types in generated code
208 uint64_t timestamp_frequency
; /** $GpuTimestampFrequency */
209 uint64_t n_eus
; /** $EuCoresTotalCount */
210 uint64_t n_eu_slices
; /** $EuSlicesTotalCount */
211 uint64_t n_eu_sub_slices
; /** $EuSubslicesTotalCount */
212 uint64_t eu_threads_count
; /** $EuThreadsCount */
213 uint64_t slice_mask
; /** $SliceMask */
214 uint64_t subslice_mask
; /** $SubsliceMask */
215 uint64_t gt_min_freq
; /** $GpuMinFrequency */
216 uint64_t gt_max_freq
; /** $GpuMaxFrequency */
217 uint64_t revision
; /** $SkuRevisionId */
220 /* OA metric sets, indexed by GUID, as know by Mesa at build time, to
221 * cross-reference with the GUIDs of configs advertised by the kernel at
224 struct hash_table
*oa_metrics_table
;
226 /* Location of the device's sysfs entry. */
227 char sysfs_dev_dir
[256];
230 void *(*bo_alloc
)(void *bufmgr
, const char *name
, uint64_t size
);
231 void (*bo_unreference
)(void *bo
);
232 void *(*bo_map
)(void *ctx
, void *bo
, unsigned flags
);
233 void (*bo_unmap
)(void *bo
);
234 bool (*batch_references
)(void *batch
, void *bo
);
235 void (*bo_wait_rendering
)(void *bo
);
236 int (*bo_busy
)(void *bo
);
237 void (*emit_mi_flush
)(void *ctx
);
238 void (*emit_mi_report_perf_count
)(void *ctx
,
240 uint32_t offset_in_bytes
,
242 void (*batchbuffer_flush
)(void *ctx
,
243 const char *file
, int line
);
244 void (*capture_frequency_stat_register
)(void *ctx
, void *bo
,
246 void (*store_register_mem64
)(void *ctx
, void *bo
, uint32_t reg
, uint32_t offset
);
251 struct gen_perf_query_object
;
252 const struct gen_perf_query_info
* gen_perf_query_info(const struct gen_perf_query_object
*);
254 struct gen_perf_context
;
255 struct gen_perf_context
*gen_perf_new_context(void *parent
);
257 void gen_perf_init_metrics(struct gen_perf_config
*perf_cfg
,
258 const struct gen_device_info
*devinfo
,
260 void gen_perf_init_context(struct gen_perf_context
*perf_ctx
,
261 struct gen_perf_config
*perf_cfg
,
262 void * ctx
, /* driver context (eg, brw_context) */
263 void * bufmgr
, /* eg brw_bufmgr */
264 const struct gen_device_info
*devinfo
,
268 struct gen_perf_config
*gen_perf_config(struct gen_perf_context
*ctx
);
270 int gen_perf_active_queries(struct gen_perf_context
*perf_ctx
,
271 const struct gen_perf_query_info
*query
);
274 gen_perf_query_counter_get_size(const struct gen_perf_query_counter
*counter
)
276 switch (counter
->data_type
) {
277 case GEN_PERF_COUNTER_DATA_TYPE_BOOL32
:
278 return sizeof(uint32_t);
279 case GEN_PERF_COUNTER_DATA_TYPE_UINT32
:
280 return sizeof(uint32_t);
281 case GEN_PERF_COUNTER_DATA_TYPE_UINT64
:
282 return sizeof(uint64_t);
283 case GEN_PERF_COUNTER_DATA_TYPE_FLOAT
:
284 return sizeof(float);
285 case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE
:
286 return sizeof(double);
288 unreachable("invalid counter data type");
292 static inline struct gen_perf_config
*
293 gen_perf_new(void *ctx
)
295 struct gen_perf_config
*perf
= rzalloc(ctx
, struct gen_perf_config
);
299 struct gen_perf_query_object
*
300 gen_perf_new_query(struct gen_perf_context
*, unsigned query_index
);
303 bool gen_perf_begin_query(struct gen_perf_context
*perf_ctx
,
304 struct gen_perf_query_object
*query
);
305 void gen_perf_end_query(struct gen_perf_context
*perf_ctx
,
306 struct gen_perf_query_object
*query
);
307 void gen_perf_wait_query(struct gen_perf_context
*perf_ctx
,
308 struct gen_perf_query_object
*query
,
309 void *current_batch
);
310 bool gen_perf_is_query_ready(struct gen_perf_context
*perf_ctx
,
311 struct gen_perf_query_object
*query
,
312 void *current_batch
);
313 void gen_perf_delete_query(struct gen_perf_context
*perf_ctx
,
314 struct gen_perf_query_object
*query
);
315 void gen_perf_get_query_data(struct gen_perf_context
*perf_ctx
,
316 struct gen_perf_query_object
*query
,
319 unsigned *bytes_written
);
321 void gen_perf_dump_query_count(struct gen_perf_context
*perf_ctx
);
322 void gen_perf_dump_query(struct gen_perf_context
*perf_ctx
,
323 struct gen_perf_query_object
*obj
,
324 void *current_batch
);
326 #endif /* GEN_PERF_H */