radeonsi/gfx9: add support for Raven
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_winsys.c
1 /*
2 * Copyright © 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright © 2009 Joakim Sindholt <opensource@zhasha.com>
4 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Copyright © 2015 Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining
9 * a copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
18 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
20 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * The above copyright notice and this permission notice (including the
26 * next paragraph) shall be included in all copies or substantial portions
27 * of the Software.
28 */
29 /*
30 * Authors:
31 * Marek Olšák <maraeo@gmail.com>
32 */
33
34 #include "amdgpu_cs.h"
35 #include "amdgpu_public.h"
36
37 #include "util/u_hash_table.h"
38 #include <amdgpu_drm.h>
39 #include <xf86drm.h>
40 #include <stdio.h>
41 #include <sys/stat.h>
42 #include "amd/common/amdgpu_id.h"
43 #include "amd/common/sid.h"
44 #include "amd/common/gfx9d.h"
45
46 #define CIK_TILE_MODE_COLOR_2D 14
47
48 #define CIK__GB_TILE_MODE__PIPE_CONFIG(x) (((x) >> 6) & 0x1f)
49 #define CIK__PIPE_CONFIG__ADDR_SURF_P2 0
50 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16 4
51 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16 5
52 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32 6
53 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32 7
54 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16 8
55 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16 9
56 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16 10
57 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16 11
58 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16 12
59 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32 13
60 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32 14
61 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16 16
62 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16 17
63
64 static struct util_hash_table *dev_tab = NULL;
65 static mtx_t dev_tab_mutex = _MTX_INITIALIZER_NP;
66
67 static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info *info)
68 {
69 unsigned mode2d = info->gb_tile_mode[CIK_TILE_MODE_COLOR_2D];
70
71 switch (CIK__GB_TILE_MODE__PIPE_CONFIG(mode2d)) {
72 case CIK__PIPE_CONFIG__ADDR_SURF_P2:
73 return 2;
74 case CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16:
75 case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16:
76 case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32:
77 case CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32:
78 return 4;
79 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16:
80 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16:
81 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16:
82 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16:
83 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16:
84 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32:
85 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32:
86 return 8;
87 case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16:
88 case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16:
89 return 16;
90 default:
91 fprintf(stderr, "Invalid CIK pipe configuration, assuming P2\n");
92 assert(!"this should never occur");
93 return 2;
94 }
95 }
96
97 /* Helper function to do the ioctls needed for setup and init. */
98 static bool do_winsys_init(struct amdgpu_winsys *ws, int fd)
99 {
100 struct amdgpu_buffer_size_alignments alignment_info = {};
101 struct amdgpu_heap_info vram, vram_vis, gtt;
102 struct drm_amdgpu_info_hw_ip dma = {}, uvd = {}, vce = {};
103 uint32_t vce_version = 0, vce_feature = 0, uvd_version = 0, uvd_feature = 0;
104 uint32_t unused_feature;
105 int r, i, j;
106 drmDevicePtr devinfo;
107
108 /* Get PCI info. */
109 r = drmGetDevice2(fd, 0, &devinfo);
110 if (r) {
111 fprintf(stderr, "amdgpu: drmGetDevice2 failed.\n");
112 goto fail;
113 }
114 ws->info.pci_domain = devinfo->businfo.pci->domain;
115 ws->info.pci_bus = devinfo->businfo.pci->bus;
116 ws->info.pci_dev = devinfo->businfo.pci->dev;
117 ws->info.pci_func = devinfo->businfo.pci->func;
118 drmFreeDevice(&devinfo);
119
120 /* Query hardware and driver information. */
121 r = amdgpu_query_gpu_info(ws->dev, &ws->amdinfo);
122 if (r) {
123 fprintf(stderr, "amdgpu: amdgpu_query_gpu_info failed.\n");
124 goto fail;
125 }
126
127 r = amdgpu_query_buffer_size_alignment(ws->dev, &alignment_info);
128 if (r) {
129 fprintf(stderr, "amdgpu: amdgpu_query_buffer_size_alignment failed.\n");
130 goto fail;
131 }
132
133 r = amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &vram);
134 if (r) {
135 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram) failed.\n");
136 goto fail;
137 }
138
139 r = amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM,
140 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
141 &vram_vis);
142 if (r) {
143 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram_vis) failed.\n");
144 goto fail;
145 }
146
147 r = amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &gtt);
148 if (r) {
149 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(gtt) failed.\n");
150 goto fail;
151 }
152
153 r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_DMA, 0, &dma);
154 if (r) {
155 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(dma) failed.\n");
156 goto fail;
157 }
158
159 r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_UVD, 0, &uvd);
160 if (r) {
161 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(uvd) failed.\n");
162 goto fail;
163 }
164
165 r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_GFX_ME, 0, 0,
166 &ws->info.me_fw_version, &unused_feature);
167 if (r) {
168 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(me) failed.\n");
169 goto fail;
170 }
171
172 r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_GFX_PFP, 0, 0,
173 &ws->info.pfp_fw_version, &unused_feature);
174 if (r) {
175 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(pfp) failed.\n");
176 goto fail;
177 }
178
179 r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_GFX_CE, 0, 0,
180 &ws->info.ce_fw_version, &unused_feature);
181 if (r) {
182 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(ce) failed.\n");
183 goto fail;
184 }
185
186 r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_UVD, 0, 0,
187 &uvd_version, &uvd_feature);
188 if (r) {
189 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(uvd) failed.\n");
190 goto fail;
191 }
192
193 r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_VCE, 0, &vce);
194 if (r) {
195 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vce) failed.\n");
196 goto fail;
197 }
198
199 r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_VCE, 0, 0,
200 &vce_version, &vce_feature);
201 if (r) {
202 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(vce) failed.\n");
203 goto fail;
204 }
205
206 /* Set chip identification. */
207 ws->info.pci_id = ws->amdinfo.asic_id; /* TODO: is this correct? */
208 ws->info.vce_harvest_config = ws->amdinfo.vce_harvest_config;
209
210 switch (ws->info.pci_id) {
211 #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; break;
212 #include "pci_ids/radeonsi_pci_ids.h"
213 #undef CHIPSET
214
215 default:
216 fprintf(stderr, "amdgpu: Invalid PCI ID.\n");
217 goto fail;
218 }
219
220 if (ws->info.family >= CHIP_VEGA10)
221 ws->info.chip_class = GFX9;
222 else if (ws->info.family >= CHIP_TONGA)
223 ws->info.chip_class = VI;
224 else if (ws->info.family >= CHIP_BONAIRE)
225 ws->info.chip_class = CIK;
226 else if (ws->info.family >= CHIP_TAHITI)
227 ws->info.chip_class = SI;
228 else {
229 fprintf(stderr, "amdgpu: Unknown family.\n");
230 goto fail;
231 }
232
233 /* LLVM 5.0 is required for GFX9. */
234 if (ws->info.chip_class >= GFX9 && HAVE_LLVM < 0x0500) {
235 fprintf(stderr, "amdgpu: LLVM 5.0 is required, got LLVM %i.%i\n",
236 HAVE_LLVM >> 8, HAVE_LLVM & 255);
237 goto fail;
238 }
239
240 /* family and rev_id are for addrlib */
241 switch (ws->info.family) {
242 case CHIP_TAHITI:
243 ws->family = FAMILY_SI;
244 ws->rev_id = SI_TAHITI_P_A0;
245 break;
246 case CHIP_PITCAIRN:
247 ws->family = FAMILY_SI;
248 ws->rev_id = SI_PITCAIRN_PM_A0;
249 break;
250 case CHIP_VERDE:
251 ws->family = FAMILY_SI;
252 ws->rev_id = SI_CAPEVERDE_M_A0;
253 break;
254 case CHIP_OLAND:
255 ws->family = FAMILY_SI;
256 ws->rev_id = SI_OLAND_M_A0;
257 break;
258 case CHIP_HAINAN:
259 ws->family = FAMILY_SI;
260 ws->rev_id = SI_HAINAN_V_A0;
261 break;
262 case CHIP_BONAIRE:
263 ws->family = FAMILY_CI;
264 ws->rev_id = CI_BONAIRE_M_A0;
265 break;
266 case CHIP_KAVERI:
267 ws->family = FAMILY_KV;
268 ws->rev_id = KV_SPECTRE_A0;
269 break;
270 case CHIP_KABINI:
271 ws->family = FAMILY_KV;
272 ws->rev_id = KB_KALINDI_A0;
273 break;
274 case CHIP_HAWAII:
275 ws->family = FAMILY_CI;
276 ws->rev_id = CI_HAWAII_P_A0;
277 break;
278 case CHIP_MULLINS:
279 ws->family = FAMILY_KV;
280 ws->rev_id = ML_GODAVARI_A0;
281 break;
282 case CHIP_TONGA:
283 ws->family = FAMILY_VI;
284 ws->rev_id = VI_TONGA_P_A0;
285 break;
286 case CHIP_ICELAND:
287 ws->family = FAMILY_VI;
288 ws->rev_id = VI_ICELAND_M_A0;
289 break;
290 case CHIP_CARRIZO:
291 ws->family = FAMILY_CZ;
292 ws->rev_id = CARRIZO_A0;
293 break;
294 case CHIP_STONEY:
295 ws->family = FAMILY_CZ;
296 ws->rev_id = STONEY_A0;
297 break;
298 case CHIP_FIJI:
299 ws->family = FAMILY_VI;
300 ws->rev_id = VI_FIJI_P_A0;
301 break;
302 case CHIP_POLARIS10:
303 ws->family = FAMILY_VI;
304 ws->rev_id = VI_POLARIS10_P_A0;
305 break;
306 case CHIP_POLARIS11:
307 ws->family = FAMILY_VI;
308 ws->rev_id = VI_POLARIS11_M_A0;
309 break;
310 case CHIP_POLARIS12:
311 ws->family = FAMILY_VI;
312 ws->rev_id = VI_POLARIS12_V_A0;
313 break;
314 case CHIP_VEGA10:
315 ws->family = FAMILY_AI;
316 ws->rev_id = AI_VEGA10_P_A0;
317 break;
318 case CHIP_RAVEN:
319 ws->family = FAMILY_RV;
320 ws->rev_id = RAVEN_A0;
321 break;
322 default:
323 fprintf(stderr, "amdgpu: Unknown family.\n");
324 goto fail;
325 }
326
327 ws->addrlib = amdgpu_addr_create(ws);
328 if (!ws->addrlib) {
329 fprintf(stderr, "amdgpu: Cannot create addrlib.\n");
330 goto fail;
331 }
332
333 /* Set which chips have dedicated VRAM. */
334 ws->info.has_dedicated_vram =
335 !(ws->amdinfo.ids_flags & AMDGPU_IDS_FLAGS_FUSION);
336
337 /* Set hardware information. */
338 ws->info.gart_size = gtt.heap_size;
339 ws->info.vram_size = vram.heap_size;
340 ws->info.vram_vis_size = vram_vis.heap_size;
341 /* The kernel can split large buffers in VRAM but not in GTT, so large
342 * allocations can fail or cause buffer movement failures in the kernel.
343 */
344 ws->info.max_alloc_size = MIN2(ws->info.vram_size * 0.9, ws->info.gart_size * 0.7);
345 /* convert the shader clock from KHz to MHz */
346 ws->info.max_shader_clock = ws->amdinfo.max_engine_clk / 1000;
347 ws->info.max_se = ws->amdinfo.num_shader_engines;
348 ws->info.max_sh_per_se = ws->amdinfo.num_shader_arrays_per_engine;
349 ws->info.has_uvd = uvd.available_rings != 0;
350 ws->info.uvd_fw_version =
351 uvd.available_rings ? uvd_version : 0;
352 ws->info.vce_fw_version =
353 vce.available_rings ? vce_version : 0;
354 ws->info.has_userptr = true;
355 ws->info.num_render_backends = ws->amdinfo.rb_pipes;
356 ws->info.clock_crystal_freq = ws->amdinfo.gpu_counter_freq;
357 ws->info.tcc_cache_line_size = 64; /* TC L2 line size on GCN */
358 if (ws->info.chip_class == GFX9) {
359 ws->info.num_tile_pipes = 1 << G_0098F8_NUM_PIPES(ws->amdinfo.gb_addr_cfg);
360 ws->info.pipe_interleave_bytes =
361 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(ws->amdinfo.gb_addr_cfg);
362 } else {
363 ws->info.num_tile_pipes = cik_get_num_tile_pipes(&ws->amdinfo);
364 ws->info.pipe_interleave_bytes =
365 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(ws->amdinfo.gb_addr_cfg);
366 }
367 ws->info.has_virtual_memory = true;
368 ws->info.has_sdma = dma.available_rings != 0;
369
370 /* Get the number of good compute units. */
371 ws->info.num_good_compute_units = 0;
372 for (i = 0; i < ws->info.max_se; i++)
373 for (j = 0; j < ws->info.max_sh_per_se; j++)
374 ws->info.num_good_compute_units +=
375 util_bitcount(ws->amdinfo.cu_bitmap[i][j]);
376
377 memcpy(ws->info.si_tile_mode_array, ws->amdinfo.gb_tile_mode,
378 sizeof(ws->amdinfo.gb_tile_mode));
379 ws->info.enabled_rb_mask = ws->amdinfo.enabled_rb_pipes_mask;
380
381 memcpy(ws->info.cik_macrotile_mode_array, ws->amdinfo.gb_macro_tile_mode,
382 sizeof(ws->amdinfo.gb_macro_tile_mode));
383
384 ws->info.gart_page_size = alignment_info.size_remote;
385
386 if (ws->info.chip_class == SI)
387 ws->info.gfx_ib_pad_with_type2 = TRUE;
388
389 ws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL;
390
391 return true;
392
393 fail:
394 if (ws->addrlib)
395 AddrDestroy(ws->addrlib);
396 amdgpu_device_deinitialize(ws->dev);
397 ws->dev = NULL;
398 return false;
399 }
400
401 static void do_winsys_deinit(struct amdgpu_winsys *ws)
402 {
403 AddrDestroy(ws->addrlib);
404 amdgpu_device_deinitialize(ws->dev);
405 }
406
407 static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
408 {
409 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
410
411 if (util_queue_is_initialized(&ws->cs_queue))
412 util_queue_destroy(&ws->cs_queue);
413
414 mtx_destroy(&ws->bo_fence_lock);
415 pb_slabs_deinit(&ws->bo_slabs);
416 pb_cache_deinit(&ws->bo_cache);
417 mtx_destroy(&ws->global_bo_list_lock);
418 do_winsys_deinit(ws);
419 FREE(rws);
420 }
421
422 static void amdgpu_winsys_query_info(struct radeon_winsys *rws,
423 struct radeon_info *info)
424 {
425 *info = ((struct amdgpu_winsys *)rws)->info;
426 }
427
428 static bool amdgpu_cs_request_feature(struct radeon_winsys_cs *rcs,
429 enum radeon_feature_id fid,
430 bool enable)
431 {
432 return false;
433 }
434
435 static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
436 enum radeon_value_id value)
437 {
438 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
439 struct amdgpu_heap_info heap;
440 uint64_t retval = 0;
441
442 switch (value) {
443 case RADEON_REQUESTED_VRAM_MEMORY:
444 return ws->allocated_vram;
445 case RADEON_REQUESTED_GTT_MEMORY:
446 return ws->allocated_gtt;
447 case RADEON_MAPPED_VRAM:
448 return ws->mapped_vram;
449 case RADEON_MAPPED_GTT:
450 return ws->mapped_gtt;
451 case RADEON_BUFFER_WAIT_TIME_NS:
452 return ws->buffer_wait_time;
453 case RADEON_NUM_MAPPED_BUFFERS:
454 return ws->num_mapped_buffers;
455 case RADEON_TIMESTAMP:
456 amdgpu_query_info(ws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
457 return retval;
458 case RADEON_NUM_GFX_IBS:
459 return ws->num_gfx_IBs;
460 case RADEON_NUM_SDMA_IBS:
461 return ws->num_sdma_IBs;
462 case RADEON_NUM_BYTES_MOVED:
463 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
464 return retval;
465 case RADEON_NUM_EVICTIONS:
466 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
467 return retval;
468 case RADEON_VRAM_USAGE:
469 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
470 return heap.heap_usage;
471 case RADEON_VRAM_VIS_USAGE:
472 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM,
473 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
474 return heap.heap_usage;
475 case RADEON_GTT_USAGE:
476 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
477 return heap.heap_usage;
478 case RADEON_GPU_TEMPERATURE:
479 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
480 return retval;
481 case RADEON_CURRENT_SCLK:
482 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
483 return retval;
484 case RADEON_CURRENT_MCLK:
485 amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
486 return retval;
487 case RADEON_GPU_RESET_COUNTER:
488 assert(0);
489 return 0;
490 case RADEON_CS_THREAD_TIME:
491 return util_queue_get_thread_time_nano(&ws->cs_queue, 0);
492 }
493 return 0;
494 }
495
496 static bool amdgpu_read_registers(struct radeon_winsys *rws,
497 unsigned reg_offset,
498 unsigned num_registers, uint32_t *out)
499 {
500 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
501
502 return amdgpu_read_mm_registers(ws->dev, reg_offset / 4, num_registers,
503 0xffffffff, 0, out) == 0;
504 }
505
506 static unsigned hash_dev(void *key)
507 {
508 #if defined(PIPE_ARCH_X86_64)
509 return pointer_to_intptr(key) ^ (pointer_to_intptr(key) >> 32);
510 #else
511 return pointer_to_intptr(key);
512 #endif
513 }
514
515 static int compare_dev(void *key1, void *key2)
516 {
517 return key1 != key2;
518 }
519
520 static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
521 {
522 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
523 bool destroy;
524
525 /* When the reference counter drops to zero, remove the device pointer
526 * from the table.
527 * This must happen while the mutex is locked, so that
528 * amdgpu_winsys_create in another thread doesn't get the winsys
529 * from the table when the counter drops to 0. */
530 mtx_lock(&dev_tab_mutex);
531
532 destroy = pipe_reference(&ws->reference, NULL);
533 if (destroy && dev_tab)
534 util_hash_table_remove(dev_tab, ws->dev);
535
536 mtx_unlock(&dev_tab_mutex);
537 return destroy;
538 }
539
540 PUBLIC struct radeon_winsys *
541 amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
542 {
543 struct amdgpu_winsys *ws;
544 drmVersionPtr version = drmGetVersion(fd);
545 amdgpu_device_handle dev;
546 uint32_t drm_major, drm_minor, r;
547
548 /* The DRM driver version of amdgpu is 3.x.x. */
549 if (version->version_major != 3) {
550 drmFreeVersion(version);
551 return NULL;
552 }
553 drmFreeVersion(version);
554
555 /* Look up the winsys from the dev table. */
556 mtx_lock(&dev_tab_mutex);
557 if (!dev_tab)
558 dev_tab = util_hash_table_create(hash_dev, compare_dev);
559
560 /* Initialize the amdgpu device. This should always return the same pointer
561 * for the same fd. */
562 r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
563 if (r) {
564 mtx_unlock(&dev_tab_mutex);
565 fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
566 return NULL;
567 }
568
569 /* Lookup a winsys if we have already created one for this device. */
570 ws = util_hash_table_get(dev_tab, dev);
571 if (ws) {
572 pipe_reference(NULL, &ws->reference);
573 mtx_unlock(&dev_tab_mutex);
574 return &ws->base;
575 }
576
577 /* Create a new winsys. */
578 ws = CALLOC_STRUCT(amdgpu_winsys);
579 if (!ws)
580 goto fail;
581
582 ws->dev = dev;
583 ws->info.drm_major = drm_major;
584 ws->info.drm_minor = drm_minor;
585
586 if (!do_winsys_init(ws, fd))
587 goto fail_alloc;
588
589 /* Create managers. */
590 pb_cache_init(&ws->bo_cache, 500000, ws->check_vm ? 1.0f : 2.0f, 0,
591 (ws->info.vram_size + ws->info.gart_size) / 8,
592 amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
593
594 if (!pb_slabs_init(&ws->bo_slabs,
595 AMDGPU_SLAB_MIN_SIZE_LOG2, AMDGPU_SLAB_MAX_SIZE_LOG2,
596 12, /* number of heaps (domain/flags combinations) */
597 ws,
598 amdgpu_bo_can_reclaim_slab,
599 amdgpu_bo_slab_alloc,
600 amdgpu_bo_slab_free))
601 goto fail_cache;
602
603 ws->info.min_alloc_size = 1 << AMDGPU_SLAB_MIN_SIZE_LOG2;
604
605 /* init reference */
606 pipe_reference_init(&ws->reference, 1);
607
608 /* Set functions. */
609 ws->base.unref = amdgpu_winsys_unref;
610 ws->base.destroy = amdgpu_winsys_destroy;
611 ws->base.query_info = amdgpu_winsys_query_info;
612 ws->base.cs_request_feature = amdgpu_cs_request_feature;
613 ws->base.query_value = amdgpu_query_value;
614 ws->base.read_registers = amdgpu_read_registers;
615
616 amdgpu_bo_init_functions(ws);
617 amdgpu_cs_init_functions(ws);
618 amdgpu_surface_init_functions(ws);
619
620 LIST_INITHEAD(&ws->global_bo_list);
621 (void) mtx_init(&ws->global_bo_list_lock, mtx_plain);
622 (void) mtx_init(&ws->bo_fence_lock, mtx_plain);
623
624 if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1)) {
625 amdgpu_winsys_destroy(&ws->base);
626 mtx_unlock(&dev_tab_mutex);
627 return NULL;
628 }
629
630 /* Create the screen at the end. The winsys must be initialized
631 * completely.
632 *
633 * Alternatively, we could create the screen based on "ws->gen"
634 * and link all drivers into one binary blob. */
635 ws->base.screen = screen_create(&ws->base);
636 if (!ws->base.screen) {
637 amdgpu_winsys_destroy(&ws->base);
638 mtx_unlock(&dev_tab_mutex);
639 return NULL;
640 }
641
642 util_hash_table_set(dev_tab, dev, ws);
643
644 /* We must unlock the mutex once the winsys is fully initialized, so that
645 * other threads attempting to create the winsys from the same fd will
646 * get a fully initialized winsys and not just half-way initialized. */
647 mtx_unlock(&dev_tab_mutex);
648
649 return &ws->base;
650
651 fail_cache:
652 pb_cache_deinit(&ws->bo_cache);
653 do_winsys_deinit(ws);
654 fail_alloc:
655 FREE(ws);
656 fail:
657 mtx_unlock(&dev_tab_mutex);
658 return NULL;
659 }