gallium/util: replace pipe_thread_wait() with thrd_join()
[mesa.git] / src / gallium / drivers / radeon / r600_gpu_load.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors: Marek Olšák <maraeo@gmail.com>
24 *
25 */
26
27 /* The GPU load is measured as follows.
28 *
29 * There is a thread which samples the GRBM_STATUS register at a certain
30 * frequency and the "busy" or "idle" counter is incremented based on
31 * whether the GUI_ACTIVE bit is set or not.
32 *
33 * Then, the user can sample the counters twice and calculate the average
34 * GPU load between the two samples.
35 */
36
37 #include "r600_pipe_common.h"
38 #include "r600_query.h"
39 #include "os/os_time.h"
40
41 /* For good accuracy at 1000 fps or lower. This will be inaccurate for higher
42 * fps (there are too few samples per frame). */
43 #define SAMPLES_PER_SEC 10000
44
45 #define GRBM_STATUS 0x8010
46 #define TA_BUSY(x) (((x) >> 14) & 0x1)
47 #define GDS_BUSY(x) (((x) >> 15) & 0x1)
48 #define VGT_BUSY(x) (((x) >> 17) & 0x1)
49 #define IA_BUSY(x) (((x) >> 19) & 0x1)
50 #define SX_BUSY(x) (((x) >> 20) & 0x1)
51 #define WD_BUSY(x) (((x) >> 21) & 0x1)
52 #define SPI_BUSY(x) (((x) >> 22) & 0x1)
53 #define BCI_BUSY(x) (((x) >> 23) & 0x1)
54 #define SC_BUSY(x) (((x) >> 24) & 0x1)
55 #define PA_BUSY(x) (((x) >> 25) & 0x1)
56 #define DB_BUSY(x) (((x) >> 26) & 0x1)
57 #define CP_BUSY(x) (((x) >> 29) & 0x1)
58 #define CB_BUSY(x) (((x) >> 30) & 0x1)
59 #define GUI_ACTIVE(x) (((x) >> 31) & 0x1)
60
61 #define SRBM_STATUS2 0x0e4c
62 #define SDMA_BUSY(x) (((x) >> 5) & 0x1)
63
64 #define CP_STAT 0x8680
65 #define PFP_BUSY(x) (((x) >> 15) & 0x1)
66 #define MEQ_BUSY(x) (((x) >> 16) & 0x1)
67 #define ME_BUSY(x) (((x) >> 17) & 0x1)
68 #define SURFACE_SYNC_BUSY(x) (((x) >> 21) & 0x1)
69 #define DMA_BUSY(x) (((x) >> 22) & 0x1)
70 #define SCRATCH_RAM_BUSY(x) (((x) >> 24) & 0x1)
71 #define CE_BUSY(x) (((x) >> 26) & 0x1)
72
73 #define IDENTITY(x) x
74
75 #define UPDATE_COUNTER(field, mask) \
76 do { \
77 if (mask(value)) \
78 p_atomic_inc(&counters->named.field.busy); \
79 else \
80 p_atomic_inc(&counters->named.field.idle); \
81 } while (0)
82
83 static void r600_update_mmio_counters(struct r600_common_screen *rscreen,
84 union r600_mmio_counters *counters)
85 {
86 uint32_t value = 0;
87 bool gui_busy, sdma_busy = false;
88
89 /* GRBM_STATUS */
90 rscreen->ws->read_registers(rscreen->ws, GRBM_STATUS, 1, &value);
91
92 UPDATE_COUNTER(ta, TA_BUSY);
93 UPDATE_COUNTER(gds, GDS_BUSY);
94 UPDATE_COUNTER(vgt, VGT_BUSY);
95 UPDATE_COUNTER(ia, IA_BUSY);
96 UPDATE_COUNTER(sx, SX_BUSY);
97 UPDATE_COUNTER(wd, WD_BUSY);
98 UPDATE_COUNTER(spi, SPI_BUSY);
99 UPDATE_COUNTER(bci, BCI_BUSY);
100 UPDATE_COUNTER(sc, SC_BUSY);
101 UPDATE_COUNTER(pa, PA_BUSY);
102 UPDATE_COUNTER(db, DB_BUSY);
103 UPDATE_COUNTER(cp, CP_BUSY);
104 UPDATE_COUNTER(cb, CB_BUSY);
105 UPDATE_COUNTER(gui, GUI_ACTIVE);
106 gui_busy = GUI_ACTIVE(value);
107
108 if (rscreen->chip_class >= CIK) {
109 /* SRBM_STATUS2 */
110 rscreen->ws->read_registers(rscreen->ws, SRBM_STATUS2, 1, &value);
111
112 UPDATE_COUNTER(sdma, SDMA_BUSY);
113 sdma_busy = SDMA_BUSY(value);
114 }
115
116 if (rscreen->chip_class >= VI) {
117 /* CP_STAT */
118 rscreen->ws->read_registers(rscreen->ws, CP_STAT, 1, &value);
119
120 UPDATE_COUNTER(pfp, PFP_BUSY);
121 UPDATE_COUNTER(meq, MEQ_BUSY);
122 UPDATE_COUNTER(me, ME_BUSY);
123 UPDATE_COUNTER(surf_sync, SURFACE_SYNC_BUSY);
124 UPDATE_COUNTER(dma, DMA_BUSY);
125 UPDATE_COUNTER(scratch_ram, SCRATCH_RAM_BUSY);
126 UPDATE_COUNTER(ce, CE_BUSY);
127 }
128
129 value = gui_busy || sdma_busy;
130 UPDATE_COUNTER(gpu, IDENTITY);
131 }
132
133 #undef UPDATE_COUNTER
134
135 static int
136 r600_gpu_load_thread(void *param)
137 {
138 struct r600_common_screen *rscreen = (struct r600_common_screen*)param;
139 const int period_us = 1000000 / SAMPLES_PER_SEC;
140 int sleep_us = period_us;
141 int64_t cur_time, last_time = os_time_get();
142
143 while (!p_atomic_read(&rscreen->gpu_load_stop_thread)) {
144 if (sleep_us)
145 os_time_sleep(sleep_us);
146
147 /* Make sure we sleep the ideal amount of time to match
148 * the expected frequency. */
149 cur_time = os_time_get();
150
151 if (os_time_timeout(last_time, last_time + period_us,
152 cur_time))
153 sleep_us = MAX2(sleep_us - 1, 1);
154 else
155 sleep_us += 1;
156
157 /*printf("Hz: %.1f\n", 1000000.0 / (cur_time - last_time));*/
158 last_time = cur_time;
159
160 /* Update the counters. */
161 r600_update_mmio_counters(rscreen, &rscreen->mmio_counters);
162 }
163 p_atomic_dec(&rscreen->gpu_load_stop_thread);
164 return 0;
165 }
166
167 void r600_gpu_load_kill_thread(struct r600_common_screen *rscreen)
168 {
169 if (!rscreen->gpu_load_thread)
170 return;
171
172 p_atomic_inc(&rscreen->gpu_load_stop_thread);
173 thrd_join(rscreen->gpu_load_thread, NULL);
174 rscreen->gpu_load_thread = 0;
175 }
176
177 static uint64_t r600_read_mmio_counter(struct r600_common_screen *rscreen,
178 unsigned busy_index)
179 {
180 /* Start the thread if needed. */
181 if (!rscreen->gpu_load_thread) {
182 mtx_lock(&rscreen->gpu_load_mutex);
183 /* Check again inside the mutex. */
184 if (!rscreen->gpu_load_thread)
185 rscreen->gpu_load_thread =
186 pipe_thread_create(r600_gpu_load_thread, rscreen);
187 mtx_unlock(&rscreen->gpu_load_mutex);
188 }
189
190 unsigned busy = p_atomic_read(&rscreen->mmio_counters.array[busy_index]);
191 unsigned idle = p_atomic_read(&rscreen->mmio_counters.array[busy_index + 1]);
192
193 return busy | ((uint64_t)idle << 32);
194 }
195
196 static unsigned r600_end_mmio_counter(struct r600_common_screen *rscreen,
197 uint64_t begin, unsigned busy_index)
198 {
199 uint64_t end = r600_read_mmio_counter(rscreen, busy_index);
200 unsigned busy = (end & 0xffffffff) - (begin & 0xffffffff);
201 unsigned idle = (end >> 32) - (begin >> 32);
202
203 /* Calculate the % of time the busy counter was being incremented.
204 *
205 * If no counters were incremented, return the current counter status.
206 * It's for the case when the load is queried faster than
207 * the counters are updated.
208 */
209 if (idle || busy) {
210 return busy*100 / (busy + idle);
211 } else {
212 union r600_mmio_counters counters;
213
214 memset(&counters, 0, sizeof(counters));
215 r600_update_mmio_counters(rscreen, &counters);
216 return counters.array[busy_index] ? 100 : 0;
217 }
218 }
219
220 #define BUSY_INDEX(rscreen, field) (&rscreen->mmio_counters.named.field.busy - \
221 rscreen->mmio_counters.array)
222
223 static unsigned busy_index_from_type(struct r600_common_screen *rscreen,
224 unsigned type)
225 {
226 switch (type) {
227 case R600_QUERY_GPU_LOAD:
228 return BUSY_INDEX(rscreen, gpu);
229 case R600_QUERY_GPU_SHADERS_BUSY:
230 return BUSY_INDEX(rscreen, spi);
231 case R600_QUERY_GPU_TA_BUSY:
232 return BUSY_INDEX(rscreen, ta);
233 case R600_QUERY_GPU_GDS_BUSY:
234 return BUSY_INDEX(rscreen, gds);
235 case R600_QUERY_GPU_VGT_BUSY:
236 return BUSY_INDEX(rscreen, vgt);
237 case R600_QUERY_GPU_IA_BUSY:
238 return BUSY_INDEX(rscreen, ia);
239 case R600_QUERY_GPU_SX_BUSY:
240 return BUSY_INDEX(rscreen, sx);
241 case R600_QUERY_GPU_WD_BUSY:
242 return BUSY_INDEX(rscreen, wd);
243 case R600_QUERY_GPU_BCI_BUSY:
244 return BUSY_INDEX(rscreen, bci);
245 case R600_QUERY_GPU_SC_BUSY:
246 return BUSY_INDEX(rscreen, sc);
247 case R600_QUERY_GPU_PA_BUSY:
248 return BUSY_INDEX(rscreen, pa);
249 case R600_QUERY_GPU_DB_BUSY:
250 return BUSY_INDEX(rscreen, db);
251 case R600_QUERY_GPU_CP_BUSY:
252 return BUSY_INDEX(rscreen, cp);
253 case R600_QUERY_GPU_CB_BUSY:
254 return BUSY_INDEX(rscreen, cb);
255 case R600_QUERY_GPU_SDMA_BUSY:
256 return BUSY_INDEX(rscreen, sdma);
257 case R600_QUERY_GPU_PFP_BUSY:
258 return BUSY_INDEX(rscreen, pfp);
259 case R600_QUERY_GPU_MEQ_BUSY:
260 return BUSY_INDEX(rscreen, meq);
261 case R600_QUERY_GPU_ME_BUSY:
262 return BUSY_INDEX(rscreen, me);
263 case R600_QUERY_GPU_SURF_SYNC_BUSY:
264 return BUSY_INDEX(rscreen, surf_sync);
265 case R600_QUERY_GPU_DMA_BUSY:
266 return BUSY_INDEX(rscreen, dma);
267 case R600_QUERY_GPU_SCRATCH_RAM_BUSY:
268 return BUSY_INDEX(rscreen, scratch_ram);
269 case R600_QUERY_GPU_CE_BUSY:
270 return BUSY_INDEX(rscreen, ce);
271 default:
272 unreachable("invalid query type");
273 }
274 }
275
276 uint64_t r600_begin_counter(struct r600_common_screen *rscreen, unsigned type)
277 {
278 unsigned busy_index = busy_index_from_type(rscreen, type);
279 return r600_read_mmio_counter(rscreen, busy_index);
280 }
281
282 unsigned r600_end_counter(struct r600_common_screen *rscreen, unsigned type,
283 uint64_t begin)
284 {
285 unsigned busy_index = busy_index_from_type(rscreen, type);
286 return r600_end_mmio_counter(rscreen, begin, busy_index);
287 }