2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * Authors: Marek Olšák <maraeo@gmail.com>
27 /* The GPU load is measured as follows.
29 * There is a thread which samples the GRBM_STATUS register at a certain
30 * frequency and the "busy" or "idle" counter is incremented based on
31 * whether the GUI_ACTIVE bit is set or not.
33 * Then, the user can sample the counters twice and calculate the average
34 * GPU load between the two samples.
37 #include "r600_pipe_common.h"
38 #include "r600_query.h"
39 #include "os/os_time.h"
41 /* For good accuracy at 1000 fps or lower. This will be inaccurate for higher
42 * fps (there are too few samples per frame). */
43 #define SAMPLES_PER_SEC 10000
45 #define GRBM_STATUS 0x8010
46 #define TA_BUSY(x) (((x) >> 14) & 0x1)
47 #define GDS_BUSY(x) (((x) >> 15) & 0x1)
48 #define VGT_BUSY(x) (((x) >> 17) & 0x1)
49 #define IA_BUSY(x) (((x) >> 19) & 0x1)
50 #define SX_BUSY(x) (((x) >> 20) & 0x1)
51 #define WD_BUSY(x) (((x) >> 21) & 0x1)
52 #define SPI_BUSY(x) (((x) >> 22) & 0x1)
53 #define BCI_BUSY(x) (((x) >> 23) & 0x1)
54 #define SC_BUSY(x) (((x) >> 24) & 0x1)
55 #define PA_BUSY(x) (((x) >> 25) & 0x1)
56 #define DB_BUSY(x) (((x) >> 26) & 0x1)
57 #define CP_BUSY(x) (((x) >> 29) & 0x1)
58 #define CB_BUSY(x) (((x) >> 30) & 0x1)
59 #define GUI_ACTIVE(x) (((x) >> 31) & 0x1)
61 #define SRBM_STATUS2 0x0e4c
62 #define SDMA_BUSY(x) (((x) >> 5) & 0x1)
64 #define CP_STAT 0x8680
65 #define PFP_BUSY(x) (((x) >> 15) & 0x1)
66 #define MEQ_BUSY(x) (((x) >> 16) & 0x1)
67 #define ME_BUSY(x) (((x) >> 17) & 0x1)
68 #define SURFACE_SYNC_BUSY(x) (((x) >> 21) & 0x1)
69 #define DMA_BUSY(x) (((x) >> 22) & 0x1)
70 #define SCRATCH_RAM_BUSY(x) (((x) >> 24) & 0x1)
71 #define CE_BUSY(x) (((x) >> 26) & 0x1)
75 #define UPDATE_COUNTER(field, mask) \
78 p_atomic_inc(&counters->named.field.busy); \
80 p_atomic_inc(&counters->named.field.idle); \
83 static void r600_update_mmio_counters(struct r600_common_screen
*rscreen
,
84 union r600_mmio_counters
*counters
)
87 bool gui_busy
, sdma_busy
= false;
90 rscreen
->ws
->read_registers(rscreen
->ws
, GRBM_STATUS
, 1, &value
);
92 UPDATE_COUNTER(ta
, TA_BUSY
);
93 UPDATE_COUNTER(gds
, GDS_BUSY
);
94 UPDATE_COUNTER(vgt
, VGT_BUSY
);
95 UPDATE_COUNTER(ia
, IA_BUSY
);
96 UPDATE_COUNTER(sx
, SX_BUSY
);
97 UPDATE_COUNTER(wd
, WD_BUSY
);
98 UPDATE_COUNTER(spi
, SPI_BUSY
);
99 UPDATE_COUNTER(bci
, BCI_BUSY
);
100 UPDATE_COUNTER(sc
, SC_BUSY
);
101 UPDATE_COUNTER(pa
, PA_BUSY
);
102 UPDATE_COUNTER(db
, DB_BUSY
);
103 UPDATE_COUNTER(cp
, CP_BUSY
);
104 UPDATE_COUNTER(cb
, CB_BUSY
);
105 UPDATE_COUNTER(gui
, GUI_ACTIVE
);
106 gui_busy
= GUI_ACTIVE(value
);
108 if (rscreen
->chip_class
>= EVERGREEN
) {
110 rscreen
->ws
->read_registers(rscreen
->ws
, SRBM_STATUS2
, 1, &value
);
112 UPDATE_COUNTER(sdma
, SDMA_BUSY
);
113 sdma_busy
= SDMA_BUSY(value
);
116 if (rscreen
->chip_class
>= VI
) {
118 rscreen
->ws
->read_registers(rscreen
->ws
, CP_STAT
, 1, &value
);
120 UPDATE_COUNTER(pfp
, PFP_BUSY
);
121 UPDATE_COUNTER(meq
, MEQ_BUSY
);
122 UPDATE_COUNTER(me
, ME_BUSY
);
123 UPDATE_COUNTER(surf_sync
, SURFACE_SYNC_BUSY
);
124 UPDATE_COUNTER(dma
, DMA_BUSY
);
125 UPDATE_COUNTER(scratch_ram
, SCRATCH_RAM_BUSY
);
126 UPDATE_COUNTER(ce
, CE_BUSY
);
129 value
= gui_busy
|| sdma_busy
;
130 UPDATE_COUNTER(gpu
, IDENTITY
);
133 #undef UPDATE_COUNTER
135 static PIPE_THREAD_ROUTINE(r600_gpu_load_thread
, param
)
137 struct r600_common_screen
*rscreen
= (struct r600_common_screen
*)param
;
138 const int period_us
= 1000000 / SAMPLES_PER_SEC
;
139 int sleep_us
= period_us
;
140 int64_t cur_time
, last_time
= os_time_get();
142 while (!p_atomic_read(&rscreen
->gpu_load_stop_thread
)) {
144 os_time_sleep(sleep_us
);
146 /* Make sure we sleep the ideal amount of time to match
147 * the expected frequency. */
148 cur_time
= os_time_get();
150 if (os_time_timeout(last_time
, last_time
+ period_us
,
152 sleep_us
= MAX2(sleep_us
- 1, 1);
156 /*printf("Hz: %.1f\n", 1000000.0 / (cur_time - last_time));*/
157 last_time
= cur_time
;
159 /* Update the counters. */
160 r600_update_mmio_counters(rscreen
, &rscreen
->mmio_counters
);
162 p_atomic_dec(&rscreen
->gpu_load_stop_thread
);
166 void r600_gpu_load_kill_thread(struct r600_common_screen
*rscreen
)
168 if (!rscreen
->gpu_load_thread
)
171 p_atomic_inc(&rscreen
->gpu_load_stop_thread
);
172 pipe_thread_wait(rscreen
->gpu_load_thread
);
173 rscreen
->gpu_load_thread
= 0;
176 static uint64_t r600_read_mmio_counter(struct r600_common_screen
*rscreen
,
179 /* Start the thread if needed. */
180 if (!rscreen
->gpu_load_thread
) {
181 pipe_mutex_lock(rscreen
->gpu_load_mutex
);
182 /* Check again inside the mutex. */
183 if (!rscreen
->gpu_load_thread
)
184 rscreen
->gpu_load_thread
=
185 pipe_thread_create(r600_gpu_load_thread
, rscreen
);
186 pipe_mutex_unlock(rscreen
->gpu_load_mutex
);
189 unsigned busy
= p_atomic_read(&rscreen
->mmio_counters
.array
[busy_index
]);
190 unsigned idle
= p_atomic_read(&rscreen
->mmio_counters
.array
[busy_index
+ 1]);
192 return busy
| ((uint64_t)idle
<< 32);
195 static unsigned r600_end_mmio_counter(struct r600_common_screen
*rscreen
,
196 uint64_t begin
, unsigned busy_index
)
198 uint64_t end
= r600_read_mmio_counter(rscreen
, busy_index
);
199 unsigned busy
= (end
& 0xffffffff) - (begin
& 0xffffffff);
200 unsigned idle
= (end
>> 32) - (begin
>> 32);
202 /* Calculate the % of time the busy counter was being incremented.
204 * If no counters were incremented, return the current counter status.
205 * It's for the case when the load is queried faster than
206 * the counters are updated.
209 return busy
*100 / (busy
+ idle
);
211 union r600_mmio_counters counters
;
213 memset(&counters
, 0, sizeof(counters
));
214 r600_update_mmio_counters(rscreen
, &counters
);
215 return counters
.array
[busy_index
] ? 100 : 0;
219 #define BUSY_INDEX(rscreen, field) (&rscreen->mmio_counters.named.field.busy - \
220 rscreen->mmio_counters.array)
222 static unsigned busy_index_from_type(struct r600_common_screen
*rscreen
,
226 case R600_QUERY_GPU_LOAD
:
227 return BUSY_INDEX(rscreen
, gpu
);
228 case R600_QUERY_GPU_SHADERS_BUSY
:
229 return BUSY_INDEX(rscreen
, spi
);
230 case R600_QUERY_GPU_TA_BUSY
:
231 return BUSY_INDEX(rscreen
, ta
);
232 case R600_QUERY_GPU_GDS_BUSY
:
233 return BUSY_INDEX(rscreen
, gds
);
234 case R600_QUERY_GPU_VGT_BUSY
:
235 return BUSY_INDEX(rscreen
, vgt
);
236 case R600_QUERY_GPU_IA_BUSY
:
237 return BUSY_INDEX(rscreen
, ia
);
238 case R600_QUERY_GPU_SX_BUSY
:
239 return BUSY_INDEX(rscreen
, sx
);
240 case R600_QUERY_GPU_WD_BUSY
:
241 return BUSY_INDEX(rscreen
, wd
);
242 case R600_QUERY_GPU_BCI_BUSY
:
243 return BUSY_INDEX(rscreen
, bci
);
244 case R600_QUERY_GPU_SC_BUSY
:
245 return BUSY_INDEX(rscreen
, sc
);
246 case R600_QUERY_GPU_PA_BUSY
:
247 return BUSY_INDEX(rscreen
, pa
);
248 case R600_QUERY_GPU_DB_BUSY
:
249 return BUSY_INDEX(rscreen
, db
);
250 case R600_QUERY_GPU_CP_BUSY
:
251 return BUSY_INDEX(rscreen
, cp
);
252 case R600_QUERY_GPU_CB_BUSY
:
253 return BUSY_INDEX(rscreen
, cb
);
254 case R600_QUERY_GPU_SDMA_BUSY
:
255 return BUSY_INDEX(rscreen
, sdma
);
256 case R600_QUERY_GPU_PFP_BUSY
:
257 return BUSY_INDEX(rscreen
, pfp
);
258 case R600_QUERY_GPU_MEQ_BUSY
:
259 return BUSY_INDEX(rscreen
, meq
);
260 case R600_QUERY_GPU_ME_BUSY
:
261 return BUSY_INDEX(rscreen
, me
);
262 case R600_QUERY_GPU_SURF_SYNC_BUSY
:
263 return BUSY_INDEX(rscreen
, surf_sync
);
264 case R600_QUERY_GPU_DMA_BUSY
:
265 return BUSY_INDEX(rscreen
, dma
);
266 case R600_QUERY_GPU_SCRATCH_RAM_BUSY
:
267 return BUSY_INDEX(rscreen
, scratch_ram
);
268 case R600_QUERY_GPU_CE_BUSY
:
269 return BUSY_INDEX(rscreen
, ce
);
271 unreachable("invalid query type");
275 uint64_t r600_begin_counter(struct r600_common_screen
*rscreen
, unsigned type
)
277 unsigned busy_index
= busy_index_from_type(rscreen
, type
);
278 return r600_read_mmio_counter(rscreen
, busy_index
);
281 unsigned r600_end_counter(struct r600_common_screen
*rscreen
, unsigned type
,
284 unsigned busy_index
= busy_index_from_type(rscreen
, type
);
285 return r600_end_mmio_counter(rscreen
, begin
, busy_index
);