2 * Copyright © 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright © 2009 Joakim Sindholt <opensource@zhasha.com>
4 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Copyright © 2015 Advanced Micro Devices, Inc.
8 * Permission is hereby granted, free of charge, to any person obtaining
9 * a copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
18 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
20 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * The above copyright notice and this permission notice (including the
26 * next paragraph) shall be included in all copies or substantial portions
30 #include "amdgpu_cs.h"
31 #include "amdgpu_public.h"
33 #include "util/os_file.h"
34 #include "util/os_misc.h"
35 #include "util/u_cpu_detect.h"
36 #include "util/u_hash_table.h"
37 #include "util/hash_table.h"
38 #include "util/xmlconfig.h"
39 #include "drm-uapi/amdgpu_drm.h"
44 #include "ac_llvm_util.h"
47 static struct hash_table
*dev_tab
= NULL
;
48 static simple_mtx_t dev_tab_mutex
= _SIMPLE_MTX_INITIALIZER_NP
;
50 DEBUG_GET_ONCE_BOOL_OPTION(all_bos
, "RADEON_ALL_BOS", false)
52 static void handle_env_var_force_family(struct amdgpu_winsys
*ws
)
54 const char *family
= debug_get_option("SI_FORCE_FAMILY", NULL
);
60 for (i
= CHIP_TAHITI
; i
< CHIP_LAST
; i
++) {
61 if (!strcmp(family
, ac_get_llvm_processor_name(i
))) {
62 /* Override family and chip_class. */
64 ws
->info
.name
= "GCN-NOOP";
67 ws
->info
.chip_class
= GFX10_3
;
68 else if (i
>= CHIP_NAVI10
)
69 ws
->info
.chip_class
= GFX10
;
70 else if (i
>= CHIP_VEGA10
)
71 ws
->info
.chip_class
= GFX9
;
72 else if (i
>= CHIP_TONGA
)
73 ws
->info
.chip_class
= GFX8
;
74 else if (i
>= CHIP_BONAIRE
)
75 ws
->info
.chip_class
= GFX7
;
77 ws
->info
.chip_class
= GFX6
;
79 /* Don't submit any IBs. */
80 setenv("RADEON_NOOP", "1", 1);
85 fprintf(stderr
, "radeonsi: Unknown family: %s\n", family
);
89 /* Helper function to do the ioctls needed for setup and init. */
90 static bool do_winsys_init(struct amdgpu_winsys
*ws
,
91 const struct pipe_screen_config
*config
,
94 if (!ac_query_gpu_info(fd
, ws
->dev
, &ws
->info
, &ws
->amdinfo
))
97 /* TODO: Enable this once the kernel handles it efficiently. */
98 if (ws
->info
.has_dedicated_vram
)
99 ws
->info
.has_local_buffers
= false;
101 handle_env_var_force_family(ws
);
103 ws
->addrlib
= ac_addrlib_create(&ws
->info
, &ws
->amdinfo
, &ws
->info
.max_alignment
);
105 fprintf(stderr
, "amdgpu: Cannot create addrlib.\n");
109 ws
->check_vm
= strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL
||
110 strstr(debug_get_option("AMD_DEBUG", ""), "check_vm") != NULL
;
111 ws
->debug_all_bos
= debug_get_option_all_bos();
112 ws
->reserve_vmid
= strstr(debug_get_option("R600_DEBUG", ""), "reserve_vmid") != NULL
||
113 strstr(debug_get_option("AMD_DEBUG", ""), "reserve_vmid") != NULL
;
114 ws
->zero_all_vram_allocs
= strstr(debug_get_option("R600_DEBUG", ""), "zerovram") != NULL
||
115 strstr(debug_get_option("AMD_DEBUG", ""), "zerovram") != NULL
||
116 driQueryOptionb(config
->options
, "radeonsi_zerovram");
117 ws
->secure
= strstr(debug_get_option("AMD_DEBUG", ""), "tmz");
120 fprintf(stderr
, "=== TMZ usage enabled ===\n");
126 amdgpu_device_deinitialize(ws
->dev
);
131 static void do_winsys_deinit(struct amdgpu_winsys
*ws
)
133 if (ws
->reserve_vmid
)
134 amdgpu_vm_unreserve_vmid(ws
->dev
, 0);
136 if (util_queue_is_initialized(&ws
->cs_queue
))
137 util_queue_destroy(&ws
->cs_queue
);
139 simple_mtx_destroy(&ws
->bo_fence_lock
);
140 for (unsigned i
= 0; i
< NUM_SLAB_ALLOCATORS
; i
++) {
141 if (ws
->bo_slabs
[i
].groups
)
142 pb_slabs_deinit(&ws
->bo_slabs
[i
]);
144 pb_cache_deinit(&ws
->bo_cache
);
145 _mesa_hash_table_destroy(ws
->bo_export_table
, NULL
);
146 simple_mtx_destroy(&ws
->sws_list_lock
);
147 simple_mtx_destroy(&ws
->global_bo_list_lock
);
148 simple_mtx_destroy(&ws
->bo_export_table_lock
);
150 ac_addrlib_destroy(ws
->addrlib
);
151 amdgpu_device_deinitialize(ws
->dev
);
155 static void amdgpu_winsys_destroy(struct radeon_winsys
*rws
)
157 struct amdgpu_screen_winsys
*sws
= amdgpu_screen_winsys(rws
);
158 struct amdgpu_winsys
*ws
= sws
->aws
;
161 /* When the reference counter drops to zero, remove the device pointer
163 * This must happen while the mutex is locked, so that
164 * amdgpu_winsys_create in another thread doesn't get the winsys
165 * from the table when the counter drops to 0.
167 simple_mtx_lock(&dev_tab_mutex
);
169 destroy
= pipe_reference(&ws
->reference
, NULL
);
170 if (destroy
&& dev_tab
) {
171 _mesa_hash_table_remove_key(dev_tab
, ws
->dev
);
172 if (_mesa_hash_table_num_entries(dev_tab
) == 0) {
173 _mesa_hash_table_destroy(dev_tab
, NULL
);
178 simple_mtx_unlock(&dev_tab_mutex
);
181 do_winsys_deinit(ws
);
187 static void amdgpu_winsys_query_info(struct radeon_winsys
*rws
,
188 struct radeon_info
*info
)
190 *info
= amdgpu_winsys(rws
)->info
;
193 static bool amdgpu_cs_request_feature(struct radeon_cmdbuf
*rcs
,
194 enum radeon_feature_id fid
,
200 static uint64_t amdgpu_query_value(struct radeon_winsys
*rws
,
201 enum radeon_value_id value
)
203 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
204 struct amdgpu_heap_info heap
;
208 case RADEON_REQUESTED_VRAM_MEMORY
:
209 return ws
->allocated_vram
;
210 case RADEON_REQUESTED_GTT_MEMORY
:
211 return ws
->allocated_gtt
;
212 case RADEON_MAPPED_VRAM
:
213 return ws
->mapped_vram
;
214 case RADEON_MAPPED_GTT
:
215 return ws
->mapped_gtt
;
216 case RADEON_BUFFER_WAIT_TIME_NS
:
217 return ws
->buffer_wait_time
;
218 case RADEON_NUM_MAPPED_BUFFERS
:
219 return ws
->num_mapped_buffers
;
220 case RADEON_TIMESTAMP
:
221 amdgpu_query_info(ws
->dev
, AMDGPU_INFO_TIMESTAMP
, 8, &retval
);
223 case RADEON_NUM_GFX_IBS
:
224 return ws
->num_gfx_IBs
;
225 case RADEON_NUM_SDMA_IBS
:
226 return ws
->num_sdma_IBs
;
227 case RADEON_GFX_BO_LIST_COUNTER
:
228 return ws
->gfx_bo_list_counter
;
229 case RADEON_GFX_IB_SIZE_COUNTER
:
230 return ws
->gfx_ib_size_counter
;
231 case RADEON_NUM_BYTES_MOVED
:
232 amdgpu_query_info(ws
->dev
, AMDGPU_INFO_NUM_BYTES_MOVED
, 8, &retval
);
234 case RADEON_NUM_EVICTIONS
:
235 amdgpu_query_info(ws
->dev
, AMDGPU_INFO_NUM_EVICTIONS
, 8, &retval
);
237 case RADEON_NUM_VRAM_CPU_PAGE_FAULTS
:
238 amdgpu_query_info(ws
->dev
, AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS
, 8, &retval
);
240 case RADEON_VRAM_USAGE
:
241 amdgpu_query_heap_info(ws
->dev
, AMDGPU_GEM_DOMAIN_VRAM
, 0, &heap
);
242 return heap
.heap_usage
;
243 case RADEON_VRAM_VIS_USAGE
:
244 amdgpu_query_heap_info(ws
->dev
, AMDGPU_GEM_DOMAIN_VRAM
,
245 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
, &heap
);
246 return heap
.heap_usage
;
247 case RADEON_GTT_USAGE
:
248 amdgpu_query_heap_info(ws
->dev
, AMDGPU_GEM_DOMAIN_GTT
, 0, &heap
);
249 return heap
.heap_usage
;
250 case RADEON_GPU_TEMPERATURE
:
251 amdgpu_query_sensor_info(ws
->dev
, AMDGPU_INFO_SENSOR_GPU_TEMP
, 4, &retval
);
253 case RADEON_CURRENT_SCLK
:
254 amdgpu_query_sensor_info(ws
->dev
, AMDGPU_INFO_SENSOR_GFX_SCLK
, 4, &retval
);
256 case RADEON_CURRENT_MCLK
:
257 amdgpu_query_sensor_info(ws
->dev
, AMDGPU_INFO_SENSOR_GFX_MCLK
, 4, &retval
);
259 case RADEON_CS_THREAD_TIME
:
260 return util_queue_get_thread_time_nano(&ws
->cs_queue
, 0);
265 static bool amdgpu_read_registers(struct radeon_winsys
*rws
,
267 unsigned num_registers
, uint32_t *out
)
269 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
271 return amdgpu_read_mm_registers(ws
->dev
, reg_offset
/ 4, num_registers
,
272 0xffffffff, 0, out
) == 0;
275 static bool amdgpu_winsys_unref(struct radeon_winsys
*rws
)
277 struct amdgpu_screen_winsys
*sws
= amdgpu_screen_winsys(rws
);
278 struct amdgpu_winsys
*aws
= sws
->aws
;
281 simple_mtx_lock(&aws
->sws_list_lock
);
283 ret
= pipe_reference(&sws
->reference
, NULL
);
285 struct amdgpu_screen_winsys
**sws_iter
;
286 struct amdgpu_winsys
*aws
= sws
->aws
;
288 /* Remove this amdgpu_screen_winsys from amdgpu_winsys' list, so that
289 * amdgpu_winsys_create can't re-use it anymore
291 for (sws_iter
= &aws
->sws_list
; *sws_iter
; sws_iter
= &(*sws_iter
)->next
) {
292 if (*sws_iter
== sws
) {
293 *sws_iter
= sws
->next
;
299 simple_mtx_unlock(&aws
->sws_list_lock
);
301 if (ret
&& sws
->kms_handles
) {
302 struct drm_gem_close args
;
304 hash_table_foreach(sws
->kms_handles
, entry
) {
305 args
.handle
= (uintptr_t)entry
->data
;
306 drmIoctl(sws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
308 _mesa_hash_table_destroy(sws
->kms_handles
, NULL
);
314 static void amdgpu_pin_threads_to_L3_cache(struct radeon_winsys
*rws
,
317 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
319 util_pin_thread_to_L3(ws
->cs_queue
.threads
[0], cache
,
320 util_cpu_caps
.cores_per_L3
);
323 static uint32_t kms_handle_hash(const void *key
)
325 const struct amdgpu_winsys_bo
*bo
= key
;
327 return bo
->u
.real
.kms_handle
;
330 static bool kms_handle_equals(const void *a
, const void *b
)
335 static bool amdgpu_ws_is_secure(struct radeon_winsys
*rws
)
337 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
341 static bool amdgpu_cs_is_secure(struct radeon_cmdbuf
*rcs
)
343 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
344 return cs
->csc
->secure
;
347 static void amdgpu_cs_set_secure(struct radeon_cmdbuf
*rcs
, bool secure
)
349 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
350 cs
->csc
->secure
= secure
;
353 PUBLIC
struct radeon_winsys
*
354 amdgpu_winsys_create(int fd
, const struct pipe_screen_config
*config
,
355 radeon_screen_create_t screen_create
)
357 struct amdgpu_screen_winsys
*ws
;
358 struct amdgpu_winsys
*aws
;
359 amdgpu_device_handle dev
;
360 uint32_t drm_major
, drm_minor
;
363 ws
= CALLOC_STRUCT(amdgpu_screen_winsys
);
367 pipe_reference_init(&ws
->reference
, 1);
368 ws
->fd
= os_dupfd_cloexec(fd
);
370 /* Look up the winsys from the dev table. */
371 simple_mtx_lock(&dev_tab_mutex
);
373 dev_tab
= util_hash_table_create_ptr_keys();
375 /* Initialize the amdgpu device. This should always return the same pointer
376 * for the same fd. */
377 r
= amdgpu_device_initialize(ws
->fd
, &drm_major
, &drm_minor
, &dev
);
379 fprintf(stderr
, "amdgpu: amdgpu_device_initialize failed.\n");
383 /* Lookup a winsys if we have already created one for this device. */
384 aws
= util_hash_table_get(dev_tab
, dev
);
386 struct amdgpu_screen_winsys
*sws_iter
;
388 /* Release the device handle, because we don't need it anymore.
389 * This function is returning an existing winsys instance, which
390 * has its own device handle.
392 amdgpu_device_deinitialize(dev
);
394 simple_mtx_lock(&aws
->sws_list_lock
);
395 for (sws_iter
= aws
->sws_list
; sws_iter
; sws_iter
= sws_iter
->next
) {
396 r
= os_same_file_description(sws_iter
->fd
, ws
->fd
);
402 pipe_reference(NULL
, &ws
->reference
);
403 simple_mtx_unlock(&aws
->sws_list_lock
);
409 os_log_message("amdgpu: os_same_file_description couldn't "
410 "determine if two DRM fds reference the same "
411 "file description.\n"
412 "If they do, bad things may happen!\n");
417 simple_mtx_unlock(&aws
->sws_list_lock
);
419 ws
->kms_handles
= _mesa_hash_table_create(NULL
, kms_handle_hash
,
421 if (!ws
->kms_handles
)
424 pipe_reference(NULL
, &aws
->reference
);
426 /* Create a new winsys. */
427 aws
= CALLOC_STRUCT(amdgpu_winsys
);
433 aws
->info
.drm_major
= drm_major
;
434 aws
->info
.drm_minor
= drm_minor
;
436 if (!do_winsys_init(aws
, config
, fd
))
439 /* Create managers. */
440 pb_cache_init(&aws
->bo_cache
, RADEON_MAX_CACHED_HEAPS
,
441 500000, aws
->check_vm
? 1.0f
: 2.0f
, 0,
442 (aws
->info
.vram_size
+ aws
->info
.gart_size
) / 8,
443 amdgpu_bo_destroy
, amdgpu_bo_can_reclaim
);
445 unsigned min_slab_order
= 9; /* 512 bytes */
446 unsigned max_slab_order
= 18; /* 256 KB - higher numbers increase memory usage */
447 unsigned num_slab_orders_per_allocator
= (max_slab_order
- min_slab_order
) /
450 /* Divide the size order range among slab managers. */
451 for (unsigned i
= 0; i
< NUM_SLAB_ALLOCATORS
; i
++) {
452 unsigned min_order
= min_slab_order
;
453 unsigned max_order
= MIN2(min_order
+ num_slab_orders_per_allocator
,
456 if (!pb_slabs_init(&aws
->bo_slabs
[i
],
457 min_order
, max_order
,
458 RADEON_MAX_SLAB_HEAPS
,
460 amdgpu_bo_can_reclaim_slab
,
461 amdgpu_bo_slab_alloc_normal
,
462 amdgpu_bo_slab_free
)) {
463 amdgpu_winsys_destroy(&ws
->base
);
464 simple_mtx_unlock(&dev_tab_mutex
);
468 if (aws
->secure
&& !pb_slabs_init(&aws
->bo_slabs_encrypted
[i
],
469 min_order
, max_order
,
470 RADEON_MAX_SLAB_HEAPS
,
472 amdgpu_bo_can_reclaim_slab
,
473 amdgpu_bo_slab_alloc_encrypted
,
474 amdgpu_bo_slab_free
)) {
475 amdgpu_winsys_destroy(&ws
->base
);
476 simple_mtx_unlock(&dev_tab_mutex
);
480 min_slab_order
= max_order
+ 1;
483 aws
->info
.min_alloc_size
= 1 << aws
->bo_slabs
[0].min_order
;
486 pipe_reference_init(&aws
->reference
, 1);
488 list_inithead(&aws
->global_bo_list
);
489 aws
->bo_export_table
= util_hash_table_create_ptr_keys();
491 (void) simple_mtx_init(&aws
->sws_list_lock
, mtx_plain
);
492 (void) simple_mtx_init(&aws
->global_bo_list_lock
, mtx_plain
);
493 (void) simple_mtx_init(&aws
->bo_fence_lock
, mtx_plain
);
494 (void) simple_mtx_init(&aws
->bo_export_table_lock
, mtx_plain
);
496 if (!util_queue_init(&aws
->cs_queue
, "cs", 8, 1,
497 UTIL_QUEUE_INIT_RESIZE_IF_FULL
)) {
498 amdgpu_winsys_destroy(&ws
->base
);
499 simple_mtx_unlock(&dev_tab_mutex
);
503 _mesa_hash_table_insert(dev_tab
, dev
, aws
);
505 if (aws
->reserve_vmid
) {
506 r
= amdgpu_vm_reserve_vmid(dev
, 0);
508 amdgpu_winsys_destroy(&ws
->base
);
509 simple_mtx_unlock(&dev_tab_mutex
);
518 ws
->base
.unref
= amdgpu_winsys_unref
;
519 ws
->base
.destroy
= amdgpu_winsys_destroy
;
520 ws
->base
.query_info
= amdgpu_winsys_query_info
;
521 ws
->base
.cs_request_feature
= amdgpu_cs_request_feature
;
522 ws
->base
.query_value
= amdgpu_query_value
;
523 ws
->base
.read_registers
= amdgpu_read_registers
;
524 ws
->base
.pin_threads_to_L3_cache
= amdgpu_pin_threads_to_L3_cache
;
525 ws
->base
.ws_is_secure
= amdgpu_ws_is_secure
;
526 ws
->base
.cs_is_secure
= amdgpu_cs_is_secure
;
527 ws
->base
.cs_set_secure
= amdgpu_cs_set_secure
;
529 amdgpu_bo_init_functions(ws
);
530 amdgpu_cs_init_functions(ws
);
531 amdgpu_surface_init_functions(ws
);
533 /* Create the screen at the end. The winsys must be initialized
536 * Alternatively, we could create the screen based on "ws->gen"
537 * and link all drivers into one binary blob. */
538 ws
->base
.screen
= screen_create(&ws
->base
, config
);
539 if (!ws
->base
.screen
) {
540 amdgpu_winsys_destroy(&ws
->base
);
541 simple_mtx_unlock(&dev_tab_mutex
);
545 simple_mtx_lock(&aws
->sws_list_lock
);
546 ws
->next
= aws
->sws_list
;
548 simple_mtx_unlock(&aws
->sws_list_lock
);
551 /* We must unlock the mutex once the winsys is fully initialized, so that
552 * other threads attempting to create the winsys from the same fd will
553 * get a fully initialized winsys and not just half-way initialized. */
554 simple_mtx_unlock(&dev_tab_mutex
);
562 _mesa_hash_table_destroy(ws
->kms_handles
, NULL
);
565 simple_mtx_unlock(&dev_tab_mutex
);