winsys/amdgpu: Keep a list of amdgpu_screen_winsyses in amdgpu_winsys
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_winsys.c
1 /*
2 * Copyright © 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright © 2009 Joakim Sindholt <opensource@zhasha.com>
4 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Copyright © 2015 Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining
9 * a copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
18 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
20 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * The above copyright notice and this permission notice (including the
26 * next paragraph) shall be included in all copies or substantial portions
27 * of the Software.
28 */
29
30 #include "amdgpu_cs.h"
31 #include "amdgpu_public.h"
32
33 #include "util/u_cpu_detect.h"
34 #include "util/u_hash_table.h"
35 #include "util/hash_table.h"
36 #include "util/xmlconfig.h"
37 #include <amdgpu_drm.h>
38 #include <xf86drm.h>
39 #include <stdio.h>
40 #include <sys/stat.h>
41 #include <fcntl.h>
42 #include "ac_llvm_util.h"
43 #include "sid.h"
44
45 #ifndef AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS
46 #define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
47 #endif
48
49 static struct util_hash_table *dev_tab = NULL;
50 static simple_mtx_t dev_tab_mutex = _SIMPLE_MTX_INITIALIZER_NP;
51
52 DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
53
54 static void handle_env_var_force_family(struct amdgpu_winsys *ws)
55 {
56 const char *family = debug_get_option("SI_FORCE_FAMILY", NULL);
57 unsigned i;
58
59 if (!family)
60 return;
61
62 for (i = CHIP_TAHITI; i < CHIP_LAST; i++) {
63 if (!strcmp(family, ac_get_llvm_processor_name(i))) {
64 /* Override family and chip_class. */
65 ws->info.family = i;
66 ws->info.name = "GCN-NOOP";
67
68 if (i >= CHIP_NAVI10)
69 ws->info.chip_class = GFX10;
70 else if (i >= CHIP_VEGA10)
71 ws->info.chip_class = GFX9;
72 else if (i >= CHIP_TONGA)
73 ws->info.chip_class = GFX8;
74 else if (i >= CHIP_BONAIRE)
75 ws->info.chip_class = GFX7;
76 else
77 ws->info.chip_class = GFX6;
78
79 /* Don't submit any IBs. */
80 setenv("RADEON_NOOP", "1", 1);
81 return;
82 }
83 }
84
85 fprintf(stderr, "radeonsi: Unknown family: %s\n", family);
86 exit(1);
87 }
88
89 /* Helper function to do the ioctls needed for setup and init. */
90 static bool do_winsys_init(struct amdgpu_winsys *ws,
91 const struct pipe_screen_config *config,
92 int fd)
93 {
94 if (!ac_query_gpu_info(fd, ws->dev, &ws->info, &ws->amdinfo))
95 goto fail;
96
97 /* TODO: Enable this once the kernel handles it efficiently. */
98 if (ws->info.has_dedicated_vram)
99 ws->info.has_local_buffers = false;
100
101 handle_env_var_force_family(ws);
102
103 ws->addrlib = amdgpu_addr_create(&ws->info, &ws->amdinfo, &ws->info.max_alignment);
104 if (!ws->addrlib) {
105 fprintf(stderr, "amdgpu: Cannot create addrlib.\n");
106 goto fail;
107 }
108
109 ws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL ||
110 strstr(debug_get_option("AMD_DEBUG", ""), "check_vm") != NULL;
111 ws->debug_all_bos = debug_get_option_all_bos();
112 ws->reserve_vmid = strstr(debug_get_option("R600_DEBUG", ""), "reserve_vmid") != NULL ||
113 strstr(debug_get_option("AMD_DEBUG", ""), "reserve_vmid") != NULL;
114 ws->zero_all_vram_allocs = strstr(debug_get_option("R600_DEBUG", ""), "zerovram") != NULL ||
115 strstr(debug_get_option("AMD_DEBUG", ""), "zerovram") != NULL ||
116 driQueryOptionb(config->options, "radeonsi_zerovram");
117
118 return true;
119
120 fail:
121 amdgpu_device_deinitialize(ws->dev);
122 ws->dev = NULL;
123 return false;
124 }
125
126 static void do_winsys_deinit(struct amdgpu_winsys *ws)
127 {
128 if (ws->reserve_vmid)
129 amdgpu_vm_unreserve_vmid(ws->dev, 0);
130
131 if (util_queue_is_initialized(&ws->cs_queue))
132 util_queue_destroy(&ws->cs_queue);
133
134 simple_mtx_destroy(&ws->bo_fence_lock);
135 for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
136 if (ws->bo_slabs[i].groups)
137 pb_slabs_deinit(&ws->bo_slabs[i]);
138 }
139 pb_cache_deinit(&ws->bo_cache);
140 util_hash_table_destroy(ws->bo_export_table);
141 simple_mtx_destroy(&ws->sws_list_lock);
142 simple_mtx_destroy(&ws->global_bo_list_lock);
143 simple_mtx_destroy(&ws->bo_export_table_lock);
144
145 AddrDestroy(ws->addrlib);
146 amdgpu_device_deinitialize(ws->dev);
147 FREE(ws);
148 }
149
150 static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
151 {
152 struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
153 struct amdgpu_winsys *ws = sws->aws;
154 bool destroy;
155
156 /* When the reference counter drops to zero, remove the device pointer
157 * from the table.
158 * This must happen while the mutex is locked, so that
159 * amdgpu_winsys_create in another thread doesn't get the winsys
160 * from the table when the counter drops to 0.
161 */
162 simple_mtx_lock(&dev_tab_mutex);
163
164 destroy = pipe_reference(&ws->reference, NULL);
165 if (destroy && dev_tab) {
166 util_hash_table_remove(dev_tab, ws->dev);
167 if (util_hash_table_count(dev_tab) == 0) {
168 util_hash_table_destroy(dev_tab);
169 dev_tab = NULL;
170 }
171 }
172
173 simple_mtx_unlock(&dev_tab_mutex);
174
175 if (destroy) {
176 do_winsys_deinit(ws);
177 } else {
178 struct amdgpu_screen_winsys **sws_iter;
179
180 /* Remove this amdgpu_screen_winsys from amdgpu_winsys' list */
181 simple_mtx_lock(&ws->sws_list_lock);
182 for (sws_iter = &ws->sws_list; *sws_iter; sws_iter = &(*sws_iter)->next) {
183 if (*sws_iter == sws) {
184 *sws_iter = sws->next;
185 break;
186 }
187 }
188 simple_mtx_unlock(&ws->sws_list_lock);
189 }
190
191 close(sws->fd);
192 FREE(rws);
193 }
194
195 static void amdgpu_winsys_query_info(struct radeon_winsys *rws,
196 struct radeon_info *info)
197 {
198 *info = amdgpu_winsys(rws)->info;
199 }
200
201 static bool amdgpu_cs_request_feature(struct radeon_cmdbuf *rcs,
202 enum radeon_feature_id fid,
203 bool enable)
204 {
205 return false;
206 }
207
208 static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
209 enum radeon_value_id value)
210 {
211 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
212 struct amdgpu_heap_info heap;
213 uint64_t retval = 0;
214
215 switch (value) {
216 case RADEON_REQUESTED_VRAM_MEMORY:
217 return ws->allocated_vram;
218 case RADEON_REQUESTED_GTT_MEMORY:
219 return ws->allocated_gtt;
220 case RADEON_MAPPED_VRAM:
221 return ws->mapped_vram;
222 case RADEON_MAPPED_GTT:
223 return ws->mapped_gtt;
224 case RADEON_BUFFER_WAIT_TIME_NS:
225 return ws->buffer_wait_time;
226 case RADEON_NUM_MAPPED_BUFFERS:
227 return ws->num_mapped_buffers;
228 case RADEON_TIMESTAMP:
229 amdgpu_query_info(ws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
230 return retval;
231 case RADEON_NUM_GFX_IBS:
232 return ws->num_gfx_IBs;
233 case RADEON_NUM_SDMA_IBS:
234 return ws->num_sdma_IBs;
235 case RADEON_GFX_BO_LIST_COUNTER:
236 return ws->gfx_bo_list_counter;
237 case RADEON_GFX_IB_SIZE_COUNTER:
238 return ws->gfx_ib_size_counter;
239 case RADEON_NUM_BYTES_MOVED:
240 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
241 return retval;
242 case RADEON_NUM_EVICTIONS:
243 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
244 return retval;
245 case RADEON_NUM_VRAM_CPU_PAGE_FAULTS:
246 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS, 8, &retval);
247 return retval;
248 case RADEON_VRAM_USAGE:
249 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
250 return heap.heap_usage;
251 case RADEON_VRAM_VIS_USAGE:
252 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM,
253 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
254 return heap.heap_usage;
255 case RADEON_GTT_USAGE:
256 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
257 return heap.heap_usage;
258 case RADEON_GPU_TEMPERATURE:
259 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
260 return retval;
261 case RADEON_CURRENT_SCLK:
262 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
263 return retval;
264 case RADEON_CURRENT_MCLK:
265 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
266 return retval;
267 case RADEON_CS_THREAD_TIME:
268 return util_queue_get_thread_time_nano(&ws->cs_queue, 0);
269 }
270 return 0;
271 }
272
273 static bool amdgpu_read_registers(struct radeon_winsys *rws,
274 unsigned reg_offset,
275 unsigned num_registers, uint32_t *out)
276 {
277 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
278
279 return amdgpu_read_mm_registers(ws->dev, reg_offset / 4, num_registers,
280 0xffffffff, 0, out) == 0;
281 }
282
283 static unsigned hash_pointer(void *key)
284 {
285 return _mesa_hash_pointer(key);
286 }
287
288 static int compare_pointers(void *key1, void *key2)
289 {
290 return key1 != key2;
291 }
292
293 static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
294 {
295 /* radeon_winsys corresponds to amdgpu_screen_winsys, which is never
296 * referenced multiple times, so amdgpu_winsys_destroy always needs to be
297 * called. It handles reference counting for amdgpu_winsys.
298 */
299 return true;
300 }
301
302 static void amdgpu_pin_threads_to_L3_cache(struct radeon_winsys *rws,
303 unsigned cache)
304 {
305 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
306
307 util_pin_thread_to_L3(ws->cs_queue.threads[0], cache,
308 util_cpu_caps.cores_per_L3);
309 }
310
311 PUBLIC struct radeon_winsys *
312 amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
313 radeon_screen_create_t screen_create)
314 {
315 struct amdgpu_screen_winsys *ws;
316 struct amdgpu_winsys *aws;
317 amdgpu_device_handle dev;
318 uint32_t drm_major, drm_minor, r;
319
320 ws = CALLOC_STRUCT(amdgpu_screen_winsys);
321 if (!ws)
322 return NULL;
323
324 ws->fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
325
326 /* Look up the winsys from the dev table. */
327 simple_mtx_lock(&dev_tab_mutex);
328 if (!dev_tab)
329 dev_tab = util_hash_table_create(hash_pointer, compare_pointers);
330
331 /* Initialize the amdgpu device. This should always return the same pointer
332 * for the same fd. */
333 r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
334 if (r) {
335 fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
336 goto fail;
337 }
338
339 /* Lookup a winsys if we have already created one for this device. */
340 aws = util_hash_table_get(dev_tab, dev);
341 if (aws) {
342 pipe_reference(NULL, &aws->reference);
343
344 /* Release the device handle, because we don't need it anymore.
345 * This function is returning an existing winsys instance, which
346 * has its own device handle.
347 */
348 amdgpu_device_deinitialize(dev);
349 } else {
350 /* Create a new winsys. */
351 aws = CALLOC_STRUCT(amdgpu_winsys);
352 if (!aws)
353 goto fail;
354
355 aws->dev = dev;
356 aws->info.drm_major = drm_major;
357 aws->info.drm_minor = drm_minor;
358
359 if (!do_winsys_init(aws, config, fd))
360 goto fail_alloc;
361
362 /* Create managers. */
363 pb_cache_init(&aws->bo_cache, RADEON_MAX_CACHED_HEAPS,
364 500000, aws->check_vm ? 1.0f : 2.0f, 0,
365 (aws->info.vram_size + aws->info.gart_size) / 8,
366 amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
367
368 unsigned min_slab_order = 9; /* 512 bytes */
369 unsigned max_slab_order = 18; /* 256 KB - higher numbers increase memory usage */
370 unsigned num_slab_orders_per_allocator = (max_slab_order - min_slab_order) /
371 NUM_SLAB_ALLOCATORS;
372
373 /* Divide the size order range among slab managers. */
374 for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
375 unsigned min_order = min_slab_order;
376 unsigned max_order = MIN2(min_order + num_slab_orders_per_allocator,
377 max_slab_order);
378
379 if (!pb_slabs_init(&aws->bo_slabs[i],
380 min_order, max_order,
381 RADEON_MAX_SLAB_HEAPS,
382 aws,
383 amdgpu_bo_can_reclaim_slab,
384 amdgpu_bo_slab_alloc,
385 amdgpu_bo_slab_free)) {
386 amdgpu_winsys_destroy(&ws->base);
387 simple_mtx_unlock(&dev_tab_mutex);
388 return NULL;
389 }
390
391 min_slab_order = max_order + 1;
392 }
393
394 aws->info.min_alloc_size = 1 << aws->bo_slabs[0].min_order;
395
396 /* init reference */
397 pipe_reference_init(&aws->reference, 1);
398
399 list_inithead(&aws->global_bo_list);
400 aws->bo_export_table = util_hash_table_create(hash_pointer, compare_pointers);
401
402 (void) simple_mtx_init(&aws->sws_list_lock, mtx_plain);
403 (void) simple_mtx_init(&aws->global_bo_list_lock, mtx_plain);
404 (void) simple_mtx_init(&aws->bo_fence_lock, mtx_plain);
405 (void) simple_mtx_init(&aws->bo_export_table_lock, mtx_plain);
406
407 if (!util_queue_init(&aws->cs_queue, "cs", 8, 1,
408 UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
409 amdgpu_winsys_destroy(&ws->base);
410 simple_mtx_unlock(&dev_tab_mutex);
411 return NULL;
412 }
413
414 util_hash_table_set(dev_tab, dev, aws);
415
416 if (aws->reserve_vmid) {
417 r = amdgpu_vm_reserve_vmid(dev, 0);
418 if (r) {
419 amdgpu_winsys_destroy(&ws->base);
420 simple_mtx_unlock(&dev_tab_mutex);
421 return NULL;
422 }
423 }
424 }
425
426 ws->aws = aws;
427
428 /* Set functions. */
429 ws->base.unref = amdgpu_winsys_unref;
430 ws->base.destroy = amdgpu_winsys_destroy;
431 ws->base.query_info = amdgpu_winsys_query_info;
432 ws->base.cs_request_feature = amdgpu_cs_request_feature;
433 ws->base.query_value = amdgpu_query_value;
434 ws->base.read_registers = amdgpu_read_registers;
435 ws->base.pin_threads_to_L3_cache = amdgpu_pin_threads_to_L3_cache;
436
437 amdgpu_bo_init_functions(ws);
438 amdgpu_cs_init_functions(ws);
439 amdgpu_surface_init_functions(ws);
440
441 /* Create the screen at the end. The winsys must be initialized
442 * completely.
443 *
444 * Alternatively, we could create the screen based on "ws->gen"
445 * and link all drivers into one binary blob. */
446 ws->base.screen = screen_create(&ws->base, config);
447 if (!ws->base.screen) {
448 amdgpu_winsys_destroy(&ws->base);
449 simple_mtx_unlock(&dev_tab_mutex);
450 return NULL;
451 }
452
453 simple_mtx_lock(&aws->sws_list_lock);
454 ws->next = aws->sws_list;
455 aws->sws_list = ws;
456 simple_mtx_unlock(&aws->sws_list_lock);
457
458 /* We must unlock the mutex once the winsys is fully initialized, so that
459 * other threads attempting to create the winsys from the same fd will
460 * get a fully initialized winsys and not just half-way initialized. */
461 simple_mtx_unlock(&dev_tab_mutex);
462
463 return &ws->base;
464
465 fail_alloc:
466 FREE(aws);
467 fail:
468 close(ws->fd);
469 FREE(ws);
470 simple_mtx_unlock(&dev_tab_mutex);
471 return NULL;
472 }