radeonsi: use a clever alignment for constant buffer uploads
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_winsys.c
1 /*
2 * Copyright © 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright © 2009 Joakim Sindholt <opensource@zhasha.com>
4 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Copyright © 2015 Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining
9 * a copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
18 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
20 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * The above copyright notice and this permission notice (including the
26 * next paragraph) shall be included in all copies or substantial portions
27 * of the Software.
28 */
29 /*
30 * Authors:
31 * Marek Olšák <maraeo@gmail.com>
32 */
33
34 #include "amdgpu_cs.h"
35 #include "amdgpu_public.h"
36
37 #include "util/u_hash_table.h"
38 #include <amdgpu_drm.h>
39 #include <xf86drm.h>
40 #include <stdio.h>
41 #include <sys/stat.h>
42 #include "amd/common/amdgpu_id.h"
43
44 #define CIK_TILE_MODE_COLOR_2D 14
45
46 #define CIK__GB_TILE_MODE__PIPE_CONFIG(x) (((x) >> 6) & 0x1f)
47 #define CIK__PIPE_CONFIG__ADDR_SURF_P2 0
48 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16 4
49 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16 5
50 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32 6
51 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32 7
52 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16 8
53 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16 9
54 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16 10
55 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16 11
56 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16 12
57 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32 13
58 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32 14
59 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16 16
60 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16 17
61
62 #ifndef AMDGPU_INFO_NUM_EVICTIONS
63 #define AMDGPU_INFO_NUM_EVICTIONS 0x18
64 #endif
65
66 static struct util_hash_table *dev_tab = NULL;
67 pipe_static_mutex(dev_tab_mutex);
68
69 static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info *info)
70 {
71 unsigned mode2d = info->gb_tile_mode[CIK_TILE_MODE_COLOR_2D];
72
73 switch (CIK__GB_TILE_MODE__PIPE_CONFIG(mode2d)) {
74 case CIK__PIPE_CONFIG__ADDR_SURF_P2:
75 return 2;
76 case CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16:
77 case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16:
78 case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32:
79 case CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32:
80 return 4;
81 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16:
82 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16:
83 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16:
84 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16:
85 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16:
86 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32:
87 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32:
88 return 8;
89 case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16:
90 case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16:
91 return 16;
92 default:
93 fprintf(stderr, "Invalid CIK pipe configuration, assuming P2\n");
94 assert(!"this should never occur");
95 return 2;
96 }
97 }
98
99 /* Helper function to do the ioctls needed for setup and init. */
100 static bool do_winsys_init(struct amdgpu_winsys *ws, int fd)
101 {
102 struct amdgpu_buffer_size_alignments alignment_info = {};
103 struct amdgpu_heap_info vram, vram_vis, gtt;
104 struct drm_amdgpu_info_hw_ip dma = {}, uvd = {}, vce = {};
105 uint32_t vce_version = 0, vce_feature = 0, uvd_version = 0, uvd_feature = 0;
106 uint32_t unused_feature;
107 int r, i, j;
108 drmDevicePtr devinfo;
109
110 /* Get PCI info. */
111 r = drmGetDevice(fd, &devinfo);
112 if (r) {
113 fprintf(stderr, "amdgpu: drmGetDevice failed.\n");
114 goto fail;
115 }
116 ws->info.pci_domain = devinfo->businfo.pci->domain;
117 ws->info.pci_bus = devinfo->businfo.pci->bus;
118 ws->info.pci_dev = devinfo->businfo.pci->dev;
119 ws->info.pci_func = devinfo->businfo.pci->func;
120 drmFreeDevice(&devinfo);
121
122 /* Query hardware and driver information. */
123 r = amdgpu_query_gpu_info(ws->dev, &ws->amdinfo);
124 if (r) {
125 fprintf(stderr, "amdgpu: amdgpu_query_gpu_info failed.\n");
126 goto fail;
127 }
128
129 r = amdgpu_query_buffer_size_alignment(ws->dev, &alignment_info);
130 if (r) {
131 fprintf(stderr, "amdgpu: amdgpu_query_buffer_size_alignment failed.\n");
132 goto fail;
133 }
134
135 r = amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &vram);
136 if (r) {
137 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram) failed.\n");
138 goto fail;
139 }
140
141 r = amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM,
142 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
143 &vram_vis);
144 if (r) {
145 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram_vis) failed.\n");
146 goto fail;
147 }
148
149 r = amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &gtt);
150 if (r) {
151 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(gtt) failed.\n");
152 goto fail;
153 }
154
155 r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_DMA, 0, &dma);
156 if (r) {
157 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(dma) failed.\n");
158 goto fail;
159 }
160
161 r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_UVD, 0, &uvd);
162 if (r) {
163 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(uvd) failed.\n");
164 goto fail;
165 }
166
167 r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_GFX_ME, 0, 0,
168 &ws->info.me_fw_version, &unused_feature);
169 if (r) {
170 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(me) failed.\n");
171 goto fail;
172 }
173
174 r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_GFX_PFP, 0, 0,
175 &ws->info.pfp_fw_version, &unused_feature);
176 if (r) {
177 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(pfp) failed.\n");
178 goto fail;
179 }
180
181 r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_GFX_CE, 0, 0,
182 &ws->info.ce_fw_version, &unused_feature);
183 if (r) {
184 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(ce) failed.\n");
185 goto fail;
186 }
187
188 r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_UVD, 0, 0,
189 &uvd_version, &uvd_feature);
190 if (r) {
191 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(uvd) failed.\n");
192 goto fail;
193 }
194
195 r = amdgpu_query_hw_ip_info(ws->dev, AMDGPU_HW_IP_VCE, 0, &vce);
196 if (r) {
197 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vce) failed.\n");
198 goto fail;
199 }
200
201 r = amdgpu_query_firmware_version(ws->dev, AMDGPU_INFO_FW_VCE, 0, 0,
202 &vce_version, &vce_feature);
203 if (r) {
204 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(vce) failed.\n");
205 goto fail;
206 }
207
208 /* Set chip identification. */
209 ws->info.pci_id = ws->amdinfo.asic_id; /* TODO: is this correct? */
210 ws->info.vce_harvest_config = ws->amdinfo.vce_harvest_config;
211
212 switch (ws->info.pci_id) {
213 #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; break;
214 #include "pci_ids/radeonsi_pci_ids.h"
215 #undef CHIPSET
216
217 default:
218 fprintf(stderr, "amdgpu: Invalid PCI ID.\n");
219 goto fail;
220 }
221
222 if (ws->info.family >= CHIP_TONGA)
223 ws->info.chip_class = VI;
224 else if (ws->info.family >= CHIP_BONAIRE)
225 ws->info.chip_class = CIK;
226 else if (ws->info.family >= CHIP_TAHITI)
227 ws->info.chip_class = SI;
228 else {
229 fprintf(stderr, "amdgpu: Unknown family.\n");
230 goto fail;
231 }
232
233 /* LLVM 3.6.1 is required for VI. */
234 if (ws->info.chip_class >= VI &&
235 HAVE_LLVM == 0x0306 && MESA_LLVM_VERSION_PATCH < 1) {
236 fprintf(stderr, "amdgpu: LLVM 3.6.1 is required, got LLVM %i.%i.%i\n",
237 HAVE_LLVM >> 8, HAVE_LLVM & 255, MESA_LLVM_VERSION_PATCH);
238 goto fail;
239 }
240
241 /* family and rev_id are for addrlib */
242 switch (ws->info.family) {
243 case CHIP_TAHITI:
244 ws->family = FAMILY_SI;
245 ws->rev_id = SI_TAHITI_P_A0;
246 break;
247 case CHIP_PITCAIRN:
248 ws->family = FAMILY_SI;
249 ws->rev_id = SI_PITCAIRN_PM_A0;
250 break;
251 case CHIP_VERDE:
252 ws->family = FAMILY_SI;
253 ws->rev_id = SI_CAPEVERDE_M_A0;
254 break;
255 case CHIP_OLAND:
256 ws->family = FAMILY_SI;
257 ws->rev_id = SI_OLAND_M_A0;
258 break;
259 case CHIP_HAINAN:
260 ws->family = FAMILY_SI;
261 ws->rev_id = SI_HAINAN_V_A0;
262 break;
263 case CHIP_BONAIRE:
264 ws->family = FAMILY_CI;
265 ws->rev_id = CI_BONAIRE_M_A0;
266 break;
267 case CHIP_KAVERI:
268 ws->family = FAMILY_KV;
269 ws->rev_id = KV_SPECTRE_A0;
270 break;
271 case CHIP_KABINI:
272 ws->family = FAMILY_KV;
273 ws->rev_id = KB_KALINDI_A0;
274 break;
275 case CHIP_HAWAII:
276 ws->family = FAMILY_CI;
277 ws->rev_id = CI_HAWAII_P_A0;
278 break;
279 case CHIP_MULLINS:
280 ws->family = FAMILY_KV;
281 ws->rev_id = ML_GODAVARI_A0;
282 break;
283 case CHIP_TONGA:
284 ws->family = FAMILY_VI;
285 ws->rev_id = VI_TONGA_P_A0;
286 break;
287 case CHIP_ICELAND:
288 ws->family = FAMILY_VI;
289 ws->rev_id = VI_ICELAND_M_A0;
290 break;
291 case CHIP_CARRIZO:
292 ws->family = FAMILY_CZ;
293 ws->rev_id = CARRIZO_A0;
294 break;
295 case CHIP_STONEY:
296 ws->family = FAMILY_CZ;
297 ws->rev_id = STONEY_A0;
298 break;
299 case CHIP_FIJI:
300 ws->family = FAMILY_VI;
301 ws->rev_id = VI_FIJI_P_A0;
302 break;
303 case CHIP_POLARIS10:
304 ws->family = FAMILY_VI;
305 ws->rev_id = VI_POLARIS10_P_A0;
306 break;
307 case CHIP_POLARIS11:
308 ws->family = FAMILY_VI;
309 ws->rev_id = VI_POLARIS11_M_A0;
310 break;
311 case CHIP_POLARIS12:
312 ws->family = FAMILY_VI;
313 ws->rev_id = VI_POLARIS12_V_A0;
314 break;
315 default:
316 fprintf(stderr, "amdgpu: Unknown family.\n");
317 goto fail;
318 }
319
320 ws->addrlib = amdgpu_addr_create(ws);
321 if (!ws->addrlib) {
322 fprintf(stderr, "amdgpu: Cannot create addrlib.\n");
323 goto fail;
324 }
325
326 /* Set which chips have dedicated VRAM. */
327 ws->info.has_dedicated_vram =
328 !(ws->amdinfo.ids_flags & AMDGPU_IDS_FLAGS_FUSION);
329
330 /* Set hardware information. */
331 ws->info.gart_size = gtt.heap_size;
332 ws->info.vram_size = vram.heap_size;
333 ws->info.vram_vis_size = vram_vis.heap_size;
334 /* The kernel can split large buffers, so we can do large allocations. */
335 ws->info.max_alloc_size = MAX2(ws->info.vram_size, ws->info.gart_size) * 0.9;
336 /* convert the shader clock from KHz to MHz */
337 ws->info.max_shader_clock = ws->amdinfo.max_engine_clk / 1000;
338 ws->info.max_se = ws->amdinfo.num_shader_engines;
339 ws->info.max_sh_per_se = ws->amdinfo.num_shader_arrays_per_engine;
340 ws->info.has_uvd = uvd.available_rings != 0;
341 ws->info.uvd_fw_version =
342 uvd.available_rings ? uvd_version : 0;
343 ws->info.vce_fw_version =
344 vce.available_rings ? vce_version : 0;
345 ws->info.has_userptr = true;
346 ws->info.num_render_backends = ws->amdinfo.rb_pipes;
347 ws->info.clock_crystal_freq = ws->amdinfo.gpu_counter_freq;
348 ws->info.tcc_cache_line_size = 64; /* TC L2 line size on GCN */
349 ws->info.num_tile_pipes = cik_get_num_tile_pipes(&ws->amdinfo);
350 ws->info.pipe_interleave_bytes = 256 << ((ws->amdinfo.gb_addr_cfg >> 4) & 0x7);
351 ws->info.has_virtual_memory = true;
352 ws->info.has_sdma = dma.available_rings != 0;
353
354 /* Get the number of good compute units. */
355 ws->info.num_good_compute_units = 0;
356 for (i = 0; i < ws->info.max_se; i++)
357 for (j = 0; j < ws->info.max_sh_per_se; j++)
358 ws->info.num_good_compute_units +=
359 util_bitcount(ws->amdinfo.cu_bitmap[i][j]);
360
361 memcpy(ws->info.si_tile_mode_array, ws->amdinfo.gb_tile_mode,
362 sizeof(ws->amdinfo.gb_tile_mode));
363 ws->info.enabled_rb_mask = ws->amdinfo.enabled_rb_pipes_mask;
364
365 memcpy(ws->info.cik_macrotile_mode_array, ws->amdinfo.gb_macro_tile_mode,
366 sizeof(ws->amdinfo.gb_macro_tile_mode));
367
368 ws->info.gart_page_size = alignment_info.size_remote;
369
370 if (ws->info.chip_class == SI)
371 ws->info.gfx_ib_pad_with_type2 = TRUE;
372
373 ws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL;
374
375 return true;
376
377 fail:
378 if (ws->addrlib)
379 AddrDestroy(ws->addrlib);
380 amdgpu_device_deinitialize(ws->dev);
381 ws->dev = NULL;
382 return false;
383 }
384
385 static void do_winsys_deinit(struct amdgpu_winsys *ws)
386 {
387 AddrDestroy(ws->addrlib);
388 amdgpu_device_deinitialize(ws->dev);
389 }
390
391 static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
392 {
393 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
394
395 if (util_queue_is_initialized(&ws->cs_queue))
396 util_queue_destroy(&ws->cs_queue);
397
398 pipe_mutex_destroy(ws->bo_fence_lock);
399 pb_slabs_deinit(&ws->bo_slabs);
400 pb_cache_deinit(&ws->bo_cache);
401 pipe_mutex_destroy(ws->global_bo_list_lock);
402 do_winsys_deinit(ws);
403 FREE(rws);
404 }
405
406 static void amdgpu_winsys_query_info(struct radeon_winsys *rws,
407 struct radeon_info *info)
408 {
409 *info = ((struct amdgpu_winsys *)rws)->info;
410 }
411
412 static bool amdgpu_cs_request_feature(struct radeon_winsys_cs *rcs,
413 enum radeon_feature_id fid,
414 bool enable)
415 {
416 return false;
417 }
418
419 static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
420 enum radeon_value_id value)
421 {
422 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
423 struct amdgpu_heap_info heap;
424 uint64_t retval = 0;
425
426 switch (value) {
427 case RADEON_REQUESTED_VRAM_MEMORY:
428 return ws->allocated_vram;
429 case RADEON_REQUESTED_GTT_MEMORY:
430 return ws->allocated_gtt;
431 case RADEON_MAPPED_VRAM:
432 return ws->mapped_vram;
433 case RADEON_MAPPED_GTT:
434 return ws->mapped_gtt;
435 case RADEON_BUFFER_WAIT_TIME_NS:
436 return ws->buffer_wait_time;
437 case RADEON_NUM_MAPPED_BUFFERS:
438 return ws->num_mapped_buffers;
439 case RADEON_TIMESTAMP:
440 amdgpu_query_info(ws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
441 return retval;
442 case RADEON_NUM_GFX_IBS:
443 return ws->num_gfx_IBs;
444 case RADEON_NUM_SDMA_IBS:
445 return ws->num_sdma_IBs;
446 case RADEON_NUM_BYTES_MOVED:
447 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
448 return retval;
449 case RADEON_NUM_EVICTIONS:
450 amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
451 return retval;
452 case RADEON_VRAM_USAGE:
453 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
454 return heap.heap_usage;
455 case RADEON_VRAM_VIS_USAGE:
456 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM,
457 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
458 return heap.heap_usage;
459 case RADEON_GTT_USAGE:
460 amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
461 return heap.heap_usage;
462 case RADEON_GPU_TEMPERATURE:
463 case RADEON_CURRENT_SCLK:
464 case RADEON_CURRENT_MCLK:
465 return 0;
466 case RADEON_GPU_RESET_COUNTER:
467 assert(0);
468 return 0;
469 case RADEON_CS_THREAD_TIME:
470 return util_queue_get_thread_time_nano(&ws->cs_queue, 0);
471 }
472 return 0;
473 }
474
475 static bool amdgpu_read_registers(struct radeon_winsys *rws,
476 unsigned reg_offset,
477 unsigned num_registers, uint32_t *out)
478 {
479 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
480
481 return amdgpu_read_mm_registers(ws->dev, reg_offset / 4, num_registers,
482 0xffffffff, 0, out) == 0;
483 }
484
485 static unsigned hash_dev(void *key)
486 {
487 #if defined(PIPE_ARCH_X86_64)
488 return pointer_to_intptr(key) ^ (pointer_to_intptr(key) >> 32);
489 #else
490 return pointer_to_intptr(key);
491 #endif
492 }
493
494 static int compare_dev(void *key1, void *key2)
495 {
496 return key1 != key2;
497 }
498
499 static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
500 {
501 struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
502 bool destroy;
503
504 /* When the reference counter drops to zero, remove the device pointer
505 * from the table.
506 * This must happen while the mutex is locked, so that
507 * amdgpu_winsys_create in another thread doesn't get the winsys
508 * from the table when the counter drops to 0. */
509 pipe_mutex_lock(dev_tab_mutex);
510
511 destroy = pipe_reference(&ws->reference, NULL);
512 if (destroy && dev_tab)
513 util_hash_table_remove(dev_tab, ws->dev);
514
515 pipe_mutex_unlock(dev_tab_mutex);
516 return destroy;
517 }
518
519 PUBLIC struct radeon_winsys *
520 amdgpu_winsys_create(int fd, radeon_screen_create_t screen_create)
521 {
522 struct amdgpu_winsys *ws;
523 drmVersionPtr version = drmGetVersion(fd);
524 amdgpu_device_handle dev;
525 uint32_t drm_major, drm_minor, r;
526
527 /* The DRM driver version of amdgpu is 3.x.x. */
528 if (version->version_major != 3) {
529 drmFreeVersion(version);
530 return NULL;
531 }
532 drmFreeVersion(version);
533
534 /* Look up the winsys from the dev table. */
535 pipe_mutex_lock(dev_tab_mutex);
536 if (!dev_tab)
537 dev_tab = util_hash_table_create(hash_dev, compare_dev);
538
539 /* Initialize the amdgpu device. This should always return the same pointer
540 * for the same fd. */
541 r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
542 if (r) {
543 pipe_mutex_unlock(dev_tab_mutex);
544 fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
545 return NULL;
546 }
547
548 /* Lookup a winsys if we have already created one for this device. */
549 ws = util_hash_table_get(dev_tab, dev);
550 if (ws) {
551 pipe_reference(NULL, &ws->reference);
552 pipe_mutex_unlock(dev_tab_mutex);
553 return &ws->base;
554 }
555
556 /* Create a new winsys. */
557 ws = CALLOC_STRUCT(amdgpu_winsys);
558 if (!ws)
559 goto fail;
560
561 ws->dev = dev;
562 ws->info.drm_major = drm_major;
563 ws->info.drm_minor = drm_minor;
564
565 if (!do_winsys_init(ws, fd))
566 goto fail_alloc;
567
568 /* Create managers. */
569 pb_cache_init(&ws->bo_cache, 500000, ws->check_vm ? 1.0f : 2.0f, 0,
570 (ws->info.vram_size + ws->info.gart_size) / 8,
571 amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
572
573 if (!pb_slabs_init(&ws->bo_slabs,
574 AMDGPU_SLAB_MIN_SIZE_LOG2, AMDGPU_SLAB_MAX_SIZE_LOG2,
575 12, /* number of heaps (domain/flags combinations) */
576 ws,
577 amdgpu_bo_can_reclaim_slab,
578 amdgpu_bo_slab_alloc,
579 amdgpu_bo_slab_free))
580 goto fail_cache;
581
582 ws->info.min_alloc_size = 1 << AMDGPU_SLAB_MIN_SIZE_LOG2;
583
584 /* init reference */
585 pipe_reference_init(&ws->reference, 1);
586
587 /* Set functions. */
588 ws->base.unref = amdgpu_winsys_unref;
589 ws->base.destroy = amdgpu_winsys_destroy;
590 ws->base.query_info = amdgpu_winsys_query_info;
591 ws->base.cs_request_feature = amdgpu_cs_request_feature;
592 ws->base.query_value = amdgpu_query_value;
593 ws->base.read_registers = amdgpu_read_registers;
594
595 amdgpu_bo_init_functions(ws);
596 amdgpu_cs_init_functions(ws);
597 amdgpu_surface_init_functions(ws);
598
599 LIST_INITHEAD(&ws->global_bo_list);
600 pipe_mutex_init(ws->global_bo_list_lock);
601 pipe_mutex_init(ws->bo_fence_lock);
602
603 if (!util_queue_init(&ws->cs_queue, "amdgpu_cs", 8, 1)) {
604 amdgpu_winsys_destroy(&ws->base);
605 pipe_mutex_unlock(dev_tab_mutex);
606 return NULL;
607 }
608
609 /* Create the screen at the end. The winsys must be initialized
610 * completely.
611 *
612 * Alternatively, we could create the screen based on "ws->gen"
613 * and link all drivers into one binary blob. */
614 ws->base.screen = screen_create(&ws->base);
615 if (!ws->base.screen) {
616 amdgpu_winsys_destroy(&ws->base);
617 pipe_mutex_unlock(dev_tab_mutex);
618 return NULL;
619 }
620
621 util_hash_table_set(dev_tab, dev, ws);
622
623 /* We must unlock the mutex once the winsys is fully initialized, so that
624 * other threads attempting to create the winsys from the same fd will
625 * get a fully initialized winsys and not just half-way initialized. */
626 pipe_mutex_unlock(dev_tab_mutex);
627
628 return &ws->base;
629
630 fail_cache:
631 pb_cache_deinit(&ws->bo_cache);
632 do_winsys_deinit(ws);
633 fail_alloc:
634 FREE(ws);
635 fail:
636 pipe_mutex_unlock(dev_tab_mutex);
637 return NULL;
638 }