winsys/amdgpu: use a better hash_pointer function
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_winsys.c
1 /*
2 * Copyright © 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright © 2009 Joakim Sindholt <opensource@zhasha.com>
4 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Copyright © 2015 Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining
9 * a copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
18 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
20 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * The above copyright notice and this permission notice (including the
26 * next paragraph) shall be included in all copies or substantial portions
27 * of the Software.
28 */
29
30 #include "amdgpu_cs.h"
31 #include "amdgpu_public.h"
32
33 #include "util/u_hash_table.h"
34 #include "util/hash_table.h"
35 #include <amdgpu_drm.h>
36 #include <xf86drm.h>
37 #include <stdio.h>
38 #include <sys/stat.h>
39 #include "amd/common/sid.h"
40 #include "amd/common/gfx9d.h"
41
42 #ifndef AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS
43 #define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
44 #endif
45
46 static struct util_hash_table *dev_tab = NULL;
47 static simple_mtx_t dev_tab_mutex = _SIMPLE_MTX_INITIALIZER_NP;
48
49 DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
50
51 /* Helper function to do the ioctls needed for setup and init. */
52 static bool do_winsys_init(struct amdgpu_winsys *ws, int fd)
53 {
54 if (!ac_query_gpu_info(fd, ws->dev, &ws->info, &ws->amdinfo))
55 goto fail;
56
57 ws->addrlib = amdgpu_addr_create(&ws->info, &ws->amdinfo, &ws->info.max_alignment);
58 if (!ws->addrlib) {
59 fprintf(stderr, "amdgpu: Cannot create addrlib.\n");
60 goto fail;
61 }
62
63 ws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL;
64 ws->debug_all_bos = debug_get_option_all_bos();
65 ws->reserve_vmid = strstr(debug_get_option("R600_DEBUG", ""), "reserve_vmid") != NULL;
66 ws->zero_all_vram_allocs = strstr(debug_get_option("R600_DEBUG", ""), "zerovram") != NULL;
67
68 return true;
69
70 fail:
71 amdgpu_device_deinitialize(ws->dev);
72 ws->dev = NULL;
73 return false;
74 }
75
76 static void do_winsys_deinit(struct amdgpu_winsys *ws)
77 {
78 AddrDestroy(ws->addrlib);
79 amdgpu_device_deinitialize(ws->dev);
80 }
81
82 static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
83 {
84 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
85
86 if (ws->reserve_vmid)
87 amdgpu_vm_unreserve_vmid(ws->dev, 0);
88
89 if (util_queue_is_initialized(&ws->cs_queue))
90 util_queue_destroy(&ws->cs_queue);
91
92 simple_mtx_destroy(&ws->bo_fence_lock);
93 pb_slabs_deinit(&ws->bo_slabs);
94 pb_cache_deinit(&ws->bo_cache);
95 simple_mtx_destroy(&ws->global_bo_list_lock);
96 do_winsys_deinit(ws);
97 FREE(rws);
98 }
99
100 static void amdgpu_winsys_query_info(struct radeon_winsys *rws,
101 struct radeon_info *info)
102 {
103 *info = ((struct amdgpu_winsys *)rws)->info;
104 }
105
106 static bool amdgpu_cs_request_feature(struct radeon_cmdbuf *rcs,
107 enum radeon_feature_id fid,
108 bool enable)
109 {
110 return false;
111 }
112
113 static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
114 enum radeon_value_id value)
115 {
116 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
117 struct amdgpu_heap_info heap;
118 uint64_t retval = 0;
119
120 switch (value) {
121 case RADEON_REQUESTED_VRAM_MEMORY:
122 return ws->allocated_vram;
123 case RADEON_REQUESTED_GTT_MEMORY:
124 return ws->allocated_gtt;
125 case RADEON_MAPPED_VRAM:
126 return ws->mapped_vram;
127 case RADEON_MAPPED_GTT:
128 return ws->mapped_gtt;
129 case RADEON_BUFFER_WAIT_TIME_NS:
130 return ws->buffer_wait_time;
131 case RADEON_NUM_MAPPED_BUFFERS:
132 return ws->num_mapped_buffers;
133 case RADEON_TIMESTAMP:
134 amdgpu_query_info(ws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
135 return retval;
136 case RADEON_NUM_GFX_IBS:
137 return ws->num_gfx_IBs;
138 case RADEON_NUM_SDMA_IBS:
139 return ws->num_sdma_IBs;
140 case RADEON_GFX_BO_LIST_COUNTER:
141 return ws->gfx_bo_list_counter;
142 case RADEON_GFX_IB_SIZE_COUNTER:
143 return ws->gfx_ib_size_counter;
144 case RADEON_NUM_BYTES_MOVED:
145 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
146 return retval;
147 case RADEON_NUM_EVICTIONS:
148 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
149 return retval;
150 case RADEON_NUM_VRAM_CPU_PAGE_FAULTS:
151 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS, 8, &retval);
152 return retval;
153 case RADEON_VRAM_USAGE:
154 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
155 return heap.heap_usage;
156 case RADEON_VRAM_VIS_USAGE:
157 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM,
158 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
159 return heap.heap_usage;
160 case RADEON_GTT_USAGE:
161 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
162 return heap.heap_usage;
163 case RADEON_GPU_TEMPERATURE:
164 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
165 return retval;
166 case RADEON_CURRENT_SCLK:
167 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
168 return retval;
169 case RADEON_CURRENT_MCLK:
170 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
171 return retval;
172 case RADEON_GPU_RESET_COUNTER:
173 assert(0);
174 return 0;
175 case RADEON_CS_THREAD_TIME:
176 return util_queue_get_thread_time_nano(&ws->cs_queue, 0);
177 }
178 return 0;
179 }
180
181 static bool amdgpu_read_registers(struct radeon_winsys *rws,
182 unsigned reg_offset,
183 unsigned num_registers, uint32_t *out)
184 {
185 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
186
187 return amdgpu_read_mm_registers(ws->dev, reg_offset / 4, num_registers,
188 0xffffffff, 0, out) == 0;
189 }
190
191 static unsigned hash_pointer(void *key)
192 {
193 return _mesa_hash_pointer(key);
194 }
195
196 static int compare_pointers(void *key1, void *key2)
197 {
198 return key1 != key2;
199 }
200
201 static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
202 {
203 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
204 bool destroy;
205
206 /* When the reference counter drops to zero, remove the device pointer
207 * from the table.
208 * This must happen while the mutex is locked, so that
209 * amdgpu_winsys_create in another thread doesn't get the winsys
210 * from the table when the counter drops to 0. */
211 simple_mtx_lock(&dev_tab_mutex);
212
213 destroy = pipe_reference(&ws->reference, NULL);
214 if (destroy && dev_tab) {
215 util_hash_table_remove(dev_tab, ws->dev);
216 if (util_hash_table_count(dev_tab) == 0) {
217 util_hash_table_destroy(dev_tab);
218 dev_tab = NULL;
219 }
220 }
221
222 simple_mtx_unlock(&dev_tab_mutex);
223 return destroy;
224 }
225
226 static const char* amdgpu_get_chip_name(struct radeon_winsys *ws)
227 {
228 amdgpu_device_handle dev = ((struct amdgpu_winsys *)ws)->dev;
229 return amdgpu_get_marketing_name(dev);
230 }
231
232
233 PUBLIC struct radeon_winsys *
234 amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
235 radeon_screen_create_t screen_create)
236 {
237 struct amdgpu_winsys *ws;
238 drmVersionPtr version = drmGetVersion(fd);
239 amdgpu_device_handle dev;
240 uint32_t drm_major, drm_minor, r;
241
242 /* The DRM driver version of amdgpu is 3.x.x. */
243 if (version->version_major != 3) {
244 drmFreeVersion(version);
245 return NULL;
246 }
247 drmFreeVersion(version);
248
249 /* Look up the winsys from the dev table. */
250 simple_mtx_lock(&dev_tab_mutex);
251 if (!dev_tab)
252 dev_tab = util_hash_table_create(hash_pointer, compare_pointers);
253
254 /* Initialize the amdgpu device. This should always return the same pointer
255 * for the same fd. */
256 r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
257 if (r) {
258 simple_mtx_unlock(&dev_tab_mutex);
259 fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
260 return NULL;
261 }
262
263 /* Lookup a winsys if we have already created one for this device. */
264 ws = util_hash_table_get(dev_tab, dev);
265 if (ws) {
266 pipe_reference(NULL, &ws->reference);
267 simple_mtx_unlock(&dev_tab_mutex);
268 return &ws->base;
269 }
270
271 /* Create a new winsys. */
272 ws = CALLOC_STRUCT(amdgpu_winsys);
273 if (!ws)
274 goto fail;
275
276 ws->dev = dev;
277 ws->info.drm_major = drm_major;
278 ws->info.drm_minor = drm_minor;
279
280 if (!do_winsys_init(ws, fd))
281 goto fail_alloc;
282
283 /* Create managers. */
284 pb_cache_init(&ws->bo_cache, RADEON_MAX_CACHED_HEAPS,
285 500000, ws->check_vm ? 1.0f : 2.0f, 0,
286 (ws->info.vram_size + ws->info.gart_size) / 8,
287 amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
288
289 if (!pb_slabs_init(&ws->bo_slabs,
290 AMDGPU_SLAB_MIN_SIZE_LOG2, AMDGPU_SLAB_MAX_SIZE_LOG2,
291 RADEON_MAX_SLAB_HEAPS,
292 ws,
293 amdgpu_bo_can_reclaim_slab,
294 amdgpu_bo_slab_alloc,
295 amdgpu_bo_slab_free))
296 goto fail_cache;
297
298 ws->info.min_alloc_size = 1 << AMDGPU_SLAB_MIN_SIZE_LOG2;
299
300 /* init reference */
301 pipe_reference_init(&ws->reference, 1);
302
303 /* Set functions. */
304 ws->base.unref = amdgpu_winsys_unref;
305 ws->base.destroy = amdgpu_winsys_destroy;
306 ws->base.query_info = amdgpu_winsys_query_info;
307 ws->base.cs_request_feature = amdgpu_cs_request_feature;
308 ws->base.query_value = amdgpu_query_value;
309 ws->base.read_registers = amdgpu_read_registers;
310 ws->base.get_chip_name = amdgpu_get_chip_name;
311
312 amdgpu_bo_init_functions(ws);
313 amdgpu_cs_init_functions(ws);
314 amdgpu_surface_init_functions(ws);
315
316 LIST_INITHEAD(&ws->global_bo_list);
317 (void) simple_mtx_init(&ws->global_bo_list_lock, mtx_plain);
318 (void) simple_mtx_init(&ws->bo_fence_lock, mtx_plain);
319
320 if (!util_queue_init(&ws->cs_queue, "cs", 8, 1,
321 UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
322 amdgpu_winsys_destroy(&ws->base);
323 simple_mtx_unlock(&dev_tab_mutex);
324 return NULL;
325 }
326
327 /* Create the screen at the end. The winsys must be initialized
328 * completely.
329 *
330 * Alternatively, we could create the screen based on "ws->gen"
331 * and link all drivers into one binary blob. */
332 ws->base.screen = screen_create(&ws->base, config);
333 if (!ws->base.screen) {
334 amdgpu_winsys_destroy(&ws->base);
335 simple_mtx_unlock(&dev_tab_mutex);
336 return NULL;
337 }
338
339 util_hash_table_set(dev_tab, dev, ws);
340
341 if (ws->reserve_vmid) {
342 r = amdgpu_vm_reserve_vmid(dev, 0);
343 if (r) {
344 fprintf(stderr, "amdgpu: amdgpu_vm_reserve_vmid failed. (%i)\n", r);
345 goto fail_cache;
346 }
347 }
348
349 /* We must unlock the mutex once the winsys is fully initialized, so that
350 * other threads attempting to create the winsys from the same fd will
351 * get a fully initialized winsys and not just half-way initialized. */
352 simple_mtx_unlock(&dev_tab_mutex);
353
354 return &ws->base;
355
356 fail_cache:
357 pb_cache_deinit(&ws->bo_cache);
358 do_winsys_deinit(ws);
359 fail_alloc:
360 FREE(ws);
361 fail:
362 simple_mtx_unlock(&dev_tab_mutex);
363 return NULL;
364 }