gallium/hash_table: turn it into a wrapper around util/hash_table
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_winsys.c
1 /*
2 * Copyright © 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright © 2009 Joakim Sindholt <opensource@zhasha.com>
4 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Copyright © 2015 Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining
9 * a copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
18 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
20 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * The above copyright notice and this permission notice (including the
26 * next paragraph) shall be included in all copies or substantial portions
27 * of the Software.
28 */
29
30 #include "amdgpu_cs.h"
31 #include "amdgpu_public.h"
32
33 #include "util/os_file.h"
34 #include "util/os_misc.h"
35 #include "util/u_cpu_detect.h"
36 #include "util/u_hash_table.h"
37 #include "util/hash_table.h"
38 #include "util/xmlconfig.h"
39 #include <amdgpu_drm.h>
40 #include <xf86drm.h>
41 #include <stdio.h>
42 #include <sys/stat.h>
43 #include <fcntl.h>
44 #include "ac_llvm_util.h"
45 #include "sid.h"
46
47 #ifndef AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS
48 #define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
49 #endif
50
51 static struct hash_table *dev_tab = NULL;
52 static simple_mtx_t dev_tab_mutex = _SIMPLE_MTX_INITIALIZER_NP;
53
54 DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
55
56 static void handle_env_var_force_family(struct amdgpu_winsys *ws)
57 {
58 const char *family = debug_get_option("SI_FORCE_FAMILY", NULL);
59 unsigned i;
60
61 if (!family)
62 return;
63
64 for (i = CHIP_TAHITI; i < CHIP_LAST; i++) {
65 if (!strcmp(family, ac_get_llvm_processor_name(i))) {
66 /* Override family and chip_class. */
67 ws->info.family = i;
68 ws->info.name = "GCN-NOOP";
69
70 if (i >= CHIP_NAVI10)
71 ws->info.chip_class = GFX10;
72 else if (i >= CHIP_VEGA10)
73 ws->info.chip_class = GFX9;
74 else if (i >= CHIP_TONGA)
75 ws->info.chip_class = GFX8;
76 else if (i >= CHIP_BONAIRE)
77 ws->info.chip_class = GFX7;
78 else
79 ws->info.chip_class = GFX6;
80
81 /* Don't submit any IBs. */
82 setenv("RADEON_NOOP", "1", 1);
83 return;
84 }
85 }
86
87 fprintf(stderr, "radeonsi: Unknown family: %s\n", family);
88 exit(1);
89 }
90
91 /* Helper function to do the ioctls needed for setup and init. */
92 static bool do_winsys_init(struct amdgpu_winsys *ws,
93 const struct pipe_screen_config *config,
94 int fd)
95 {
96 if (!ac_query_gpu_info(fd, ws->dev, &ws->info, &ws->amdinfo))
97 goto fail;
98
99 /* TODO: Enable this once the kernel handles it efficiently. */
100 if (ws->info.has_dedicated_vram)
101 ws->info.has_local_buffers = false;
102
103 handle_env_var_force_family(ws);
104
105 ws->addrlib = amdgpu_addr_create(&ws->info, &ws->amdinfo, &ws->info.max_alignment);
106 if (!ws->addrlib) {
107 fprintf(stderr, "amdgpu: Cannot create addrlib.\n");
108 goto fail;
109 }
110
111 ws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL ||
112 strstr(debug_get_option("AMD_DEBUG", ""), "check_vm") != NULL;
113 ws->debug_all_bos = debug_get_option_all_bos();
114 ws->reserve_vmid = strstr(debug_get_option("R600_DEBUG", ""), "reserve_vmid") != NULL ||
115 strstr(debug_get_option("AMD_DEBUG", ""), "reserve_vmid") != NULL;
116 ws->zero_all_vram_allocs = strstr(debug_get_option("R600_DEBUG", ""), "zerovram") != NULL ||
117 strstr(debug_get_option("AMD_DEBUG", ""), "zerovram") != NULL ||
118 driQueryOptionb(config->options, "radeonsi_zerovram");
119
120 return true;
121
122 fail:
123 amdgpu_device_deinitialize(ws->dev);
124 ws->dev = NULL;
125 return false;
126 }
127
128 static void do_winsys_deinit(struct amdgpu_winsys *ws)
129 {
130 if (ws->reserve_vmid)
131 amdgpu_vm_unreserve_vmid(ws->dev, 0);
132
133 if (util_queue_is_initialized(&ws->cs_queue))
134 util_queue_destroy(&ws->cs_queue);
135
136 simple_mtx_destroy(&ws->bo_fence_lock);
137 for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
138 if (ws->bo_slabs[i].groups)
139 pb_slabs_deinit(&ws->bo_slabs[i]);
140 }
141 pb_cache_deinit(&ws->bo_cache);
142 util_hash_table_destroy(ws->bo_export_table);
143 simple_mtx_destroy(&ws->sws_list_lock);
144 simple_mtx_destroy(&ws->global_bo_list_lock);
145 simple_mtx_destroy(&ws->bo_export_table_lock);
146
147 AddrDestroy(ws->addrlib);
148 amdgpu_device_deinitialize(ws->dev);
149 FREE(ws);
150 }
151
152 static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
153 {
154 struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
155 struct amdgpu_winsys *ws = sws->aws;
156 bool destroy;
157
158 /* When the reference counter drops to zero, remove the device pointer
159 * from the table.
160 * This must happen while the mutex is locked, so that
161 * amdgpu_winsys_create in another thread doesn't get the winsys
162 * from the table when the counter drops to 0.
163 */
164 simple_mtx_lock(&dev_tab_mutex);
165
166 destroy = pipe_reference(&ws->reference, NULL);
167 if (destroy && dev_tab) {
168 util_hash_table_remove(dev_tab, ws->dev);
169 if (util_hash_table_count(dev_tab) == 0) {
170 util_hash_table_destroy(dev_tab);
171 dev_tab = NULL;
172 }
173 }
174
175 simple_mtx_unlock(&dev_tab_mutex);
176
177 if (destroy)
178 do_winsys_deinit(ws);
179
180 close(sws->fd);
181 FREE(rws);
182 }
183
184 static void amdgpu_winsys_query_info(struct radeon_winsys *rws,
185 struct radeon_info *info)
186 {
187 *info = amdgpu_winsys(rws)->info;
188 }
189
190 static bool amdgpu_cs_request_feature(struct radeon_cmdbuf *rcs,
191 enum radeon_feature_id fid,
192 bool enable)
193 {
194 return false;
195 }
196
197 static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
198 enum radeon_value_id value)
199 {
200 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
201 struct amdgpu_heap_info heap;
202 uint64_t retval = 0;
203
204 switch (value) {
205 case RADEON_REQUESTED_VRAM_MEMORY:
206 return ws->allocated_vram;
207 case RADEON_REQUESTED_GTT_MEMORY:
208 return ws->allocated_gtt;
209 case RADEON_MAPPED_VRAM:
210 return ws->mapped_vram;
211 case RADEON_MAPPED_GTT:
212 return ws->mapped_gtt;
213 case RADEON_BUFFER_WAIT_TIME_NS:
214 return ws->buffer_wait_time;
215 case RADEON_NUM_MAPPED_BUFFERS:
216 return ws->num_mapped_buffers;
217 case RADEON_TIMESTAMP:
218 amdgpu_query_info(ws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
219 return retval;
220 case RADEON_NUM_GFX_IBS:
221 return ws->num_gfx_IBs;
222 case RADEON_NUM_SDMA_IBS:
223 return ws->num_sdma_IBs;
224 case RADEON_GFX_BO_LIST_COUNTER:
225 return ws->gfx_bo_list_counter;
226 case RADEON_GFX_IB_SIZE_COUNTER:
227 return ws->gfx_ib_size_counter;
228 case RADEON_NUM_BYTES_MOVED:
229 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
230 return retval;
231 case RADEON_NUM_EVICTIONS:
232 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
233 return retval;
234 case RADEON_NUM_VRAM_CPU_PAGE_FAULTS:
235 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS, 8, &retval);
236 return retval;
237 case RADEON_VRAM_USAGE:
238 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
239 return heap.heap_usage;
240 case RADEON_VRAM_VIS_USAGE:
241 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM,
242 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
243 return heap.heap_usage;
244 case RADEON_GTT_USAGE:
245 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
246 return heap.heap_usage;
247 case RADEON_GPU_TEMPERATURE:
248 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
249 return retval;
250 case RADEON_CURRENT_SCLK:
251 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
252 return retval;
253 case RADEON_CURRENT_MCLK:
254 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
255 return retval;
256 case RADEON_CS_THREAD_TIME:
257 return util_queue_get_thread_time_nano(&ws->cs_queue, 0);
258 }
259 return 0;
260 }
261
262 static bool amdgpu_read_registers(struct radeon_winsys *rws,
263 unsigned reg_offset,
264 unsigned num_registers, uint32_t *out)
265 {
266 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
267
268 return amdgpu_read_mm_registers(ws->dev, reg_offset / 4, num_registers,
269 0xffffffff, 0, out) == 0;
270 }
271
272 static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
273 {
274 struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
275 struct amdgpu_winsys *aws = sws->aws;
276 bool ret;
277
278 simple_mtx_lock(&aws->sws_list_lock);
279
280 ret = pipe_reference(&sws->reference, NULL);
281 if (ret) {
282 struct amdgpu_screen_winsys **sws_iter;
283 struct amdgpu_winsys *aws = sws->aws;
284
285 /* Remove this amdgpu_screen_winsys from amdgpu_winsys' list, so that
286 * amdgpu_winsys_create can't re-use it anymore
287 */
288 for (sws_iter = &aws->sws_list; *sws_iter; sws_iter = &(*sws_iter)->next) {
289 if (*sws_iter == sws) {
290 *sws_iter = sws->next;
291 break;
292 }
293 }
294 }
295
296 simple_mtx_unlock(&aws->sws_list_lock);
297
298 if (ret && sws->kms_handles) {
299 struct drm_gem_close args;
300
301 hash_table_foreach(sws->kms_handles, entry) {
302 args.handle = (uintptr_t)entry->data;
303 drmIoctl(sws->fd, DRM_IOCTL_GEM_CLOSE, &args);
304 }
305 _mesa_hash_table_destroy(sws->kms_handles, NULL);
306 }
307
308 return ret;
309 }
310
311 static void amdgpu_pin_threads_to_L3_cache(struct radeon_winsys *rws,
312 unsigned cache)
313 {
314 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
315
316 util_pin_thread_to_L3(ws->cs_queue.threads[0], cache,
317 util_cpu_caps.cores_per_L3);
318 }
319
320 static uint32_t kms_handle_hash(const void *key)
321 {
322 const struct amdgpu_winsys_bo *bo = key;
323
324 return bo->u.real.kms_handle;
325 }
326
327 static bool kms_handle_equals(const void *a, const void *b)
328 {
329 return a == b;
330 }
331
332 PUBLIC struct radeon_winsys *
333 amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
334 radeon_screen_create_t screen_create)
335 {
336 struct amdgpu_screen_winsys *ws;
337 struct amdgpu_winsys *aws;
338 amdgpu_device_handle dev;
339 uint32_t drm_major, drm_minor;
340 int r;
341
342 ws = CALLOC_STRUCT(amdgpu_screen_winsys);
343 if (!ws)
344 return NULL;
345
346 pipe_reference_init(&ws->reference, 1);
347 ws->fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
348
349 /* Look up the winsys from the dev table. */
350 simple_mtx_lock(&dev_tab_mutex);
351 if (!dev_tab)
352 dev_tab = util_hash_table_create_ptr_keys();
353
354 /* Initialize the amdgpu device. This should always return the same pointer
355 * for the same fd. */
356 r = amdgpu_device_initialize(ws->fd, &drm_major, &drm_minor, &dev);
357 if (r) {
358 fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
359 goto fail;
360 }
361
362 /* Lookup a winsys if we have already created one for this device. */
363 aws = util_hash_table_get(dev_tab, dev);
364 if (aws) {
365 struct amdgpu_screen_winsys *sws_iter;
366
367 /* Release the device handle, because we don't need it anymore.
368 * This function is returning an existing winsys instance, which
369 * has its own device handle.
370 */
371 amdgpu_device_deinitialize(dev);
372
373 simple_mtx_lock(&aws->sws_list_lock);
374 for (sws_iter = aws->sws_list; sws_iter; sws_iter = sws_iter->next) {
375 r = os_same_file_description(sws_iter->fd, ws->fd);
376
377 if (r == 0) {
378 close(ws->fd);
379 FREE(ws);
380 ws = sws_iter;
381 pipe_reference(NULL, &ws->reference);
382 simple_mtx_unlock(&aws->sws_list_lock);
383 goto unlock;
384 } else if (r < 0) {
385 static bool logged;
386
387 if (!logged) {
388 os_log_message("amdgpu: os_same_file_description couldn't "
389 "determine if two DRM fds reference the same "
390 "file description.\n"
391 "If they do, bad things may happen!\n");
392 logged = true;
393 }
394 }
395 }
396 simple_mtx_unlock(&aws->sws_list_lock);
397
398 ws->kms_handles = _mesa_hash_table_create(NULL, kms_handle_hash,
399 kms_handle_equals);
400 if (!ws->kms_handles)
401 goto fail;
402
403 pipe_reference(NULL, &aws->reference);
404 } else {
405 /* Create a new winsys. */
406 aws = CALLOC_STRUCT(amdgpu_winsys);
407 if (!aws)
408 goto fail;
409
410 aws->dev = dev;
411 aws->fd = ws->fd;
412 aws->info.drm_major = drm_major;
413 aws->info.drm_minor = drm_minor;
414
415 if (!do_winsys_init(aws, config, fd))
416 goto fail_alloc;
417
418 /* Create managers. */
419 pb_cache_init(&aws->bo_cache, RADEON_MAX_CACHED_HEAPS,
420 500000, aws->check_vm ? 1.0f : 2.0f, 0,
421 (aws->info.vram_size + aws->info.gart_size) / 8,
422 amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
423
424 unsigned min_slab_order = 9; /* 512 bytes */
425 unsigned max_slab_order = 18; /* 256 KB - higher numbers increase memory usage */
426 unsigned num_slab_orders_per_allocator = (max_slab_order - min_slab_order) /
427 NUM_SLAB_ALLOCATORS;
428
429 /* Divide the size order range among slab managers. */
430 for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
431 unsigned min_order = min_slab_order;
432 unsigned max_order = MIN2(min_order + num_slab_orders_per_allocator,
433 max_slab_order);
434
435 if (!pb_slabs_init(&aws->bo_slabs[i],
436 min_order, max_order,
437 RADEON_MAX_SLAB_HEAPS,
438 aws,
439 amdgpu_bo_can_reclaim_slab,
440 amdgpu_bo_slab_alloc,
441 amdgpu_bo_slab_free)) {
442 amdgpu_winsys_destroy(&ws->base);
443 simple_mtx_unlock(&dev_tab_mutex);
444 return NULL;
445 }
446
447 min_slab_order = max_order + 1;
448 }
449
450 aws->info.min_alloc_size = 1 << aws->bo_slabs[0].min_order;
451
452 /* init reference */
453 pipe_reference_init(&aws->reference, 1);
454
455 list_inithead(&aws->global_bo_list);
456 aws->bo_export_table = util_hash_table_create_ptr_keys();
457
458 (void) simple_mtx_init(&aws->sws_list_lock, mtx_plain);
459 (void) simple_mtx_init(&aws->global_bo_list_lock, mtx_plain);
460 (void) simple_mtx_init(&aws->bo_fence_lock, mtx_plain);
461 (void) simple_mtx_init(&aws->bo_export_table_lock, mtx_plain);
462
463 if (!util_queue_init(&aws->cs_queue, "cs", 8, 1,
464 UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
465 amdgpu_winsys_destroy(&ws->base);
466 simple_mtx_unlock(&dev_tab_mutex);
467 return NULL;
468 }
469
470 util_hash_table_set(dev_tab, dev, aws);
471
472 if (aws->reserve_vmid) {
473 r = amdgpu_vm_reserve_vmid(dev, 0);
474 if (r) {
475 amdgpu_winsys_destroy(&ws->base);
476 simple_mtx_unlock(&dev_tab_mutex);
477 return NULL;
478 }
479 }
480 }
481
482 ws->aws = aws;
483
484 /* Set functions. */
485 ws->base.unref = amdgpu_winsys_unref;
486 ws->base.destroy = amdgpu_winsys_destroy;
487 ws->base.query_info = amdgpu_winsys_query_info;
488 ws->base.cs_request_feature = amdgpu_cs_request_feature;
489 ws->base.query_value = amdgpu_query_value;
490 ws->base.read_registers = amdgpu_read_registers;
491 ws->base.pin_threads_to_L3_cache = amdgpu_pin_threads_to_L3_cache;
492
493 amdgpu_bo_init_functions(ws);
494 amdgpu_cs_init_functions(ws);
495 amdgpu_surface_init_functions(ws);
496
497 /* Create the screen at the end. The winsys must be initialized
498 * completely.
499 *
500 * Alternatively, we could create the screen based on "ws->gen"
501 * and link all drivers into one binary blob. */
502 ws->base.screen = screen_create(&ws->base, config);
503 if (!ws->base.screen) {
504 amdgpu_winsys_destroy(&ws->base);
505 simple_mtx_unlock(&dev_tab_mutex);
506 return NULL;
507 }
508
509 simple_mtx_lock(&aws->sws_list_lock);
510 ws->next = aws->sws_list;
511 aws->sws_list = ws;
512 simple_mtx_unlock(&aws->sws_list_lock);
513
514 unlock:
515 /* We must unlock the mutex once the winsys is fully initialized, so that
516 * other threads attempting to create the winsys from the same fd will
517 * get a fully initialized winsys and not just half-way initialized. */
518 simple_mtx_unlock(&dev_tab_mutex);
519
520 return &ws->base;
521
522 fail_alloc:
523 FREE(aws);
524 fail:
525 if (ws->kms_handles)
526 _mesa_hash_table_destroy(ws->kms_handles, NULL);
527 close(ws->fd);
528 FREE(ws);
529 simple_mtx_unlock(&dev_tab_mutex);
530 return NULL;
531 }