2 * Copyright © 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright © 2009 Joakim Sindholt <opensource@zhasha.com>
4 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Copyright © 2015 Advanced Micro Devices, Inc.
8 * Permission is hereby granted, free of charge, to any person obtaining
9 * a copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
18 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
20 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * The above copyright notice and this permission notice (including the
26 * next paragraph) shall be included in all copies or substantial portions
31 * Marek Olšák <maraeo@gmail.com>
34 #include "amdgpu_cs.h"
35 #include "amdgpu_public.h"
37 #include "util/u_hash_table.h"
38 #include <amdgpu_drm.h>
42 #include "amdgpu_id.h"
44 #define CIK_TILE_MODE_COLOR_2D 14
46 #define CIK__GB_TILE_MODE__PIPE_CONFIG(x) (((x) >> 6) & 0x1f)
47 #define CIK__PIPE_CONFIG__ADDR_SURF_P2 0
48 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16 4
49 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16 5
50 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32 6
51 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32 7
52 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16 8
53 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16 9
54 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16 10
55 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16 11
56 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16 12
57 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32 13
58 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32 14
59 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16 16
60 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16 17
62 static struct util_hash_table
*dev_tab
= NULL
;
63 pipe_static_mutex(dev_tab_mutex
);
65 static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info
*info
)
67 unsigned mode2d
= info
->gb_tile_mode
[CIK_TILE_MODE_COLOR_2D
];
69 switch (CIK__GB_TILE_MODE__PIPE_CONFIG(mode2d
)) {
70 case CIK__PIPE_CONFIG__ADDR_SURF_P2
:
73 case CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16
:
74 case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16
:
75 case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32
:
76 case CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32
:
78 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16
:
79 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16
:
80 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16
:
81 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16
:
82 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16
:
83 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32
:
84 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32
:
86 case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16
:
87 case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16
:
92 /* Convert Sea Islands register values GB_ADDR_CFG and MC_ADDR_CFG
93 * into GB_TILING_CONFIG register which is only present on R600-R700. */
94 static unsigned r600_get_gb_tiling_config(struct amdgpu_gpu_info
*info
)
96 unsigned num_pipes
= info
->gb_addr_cfg
& 0x7;
97 unsigned num_banks
= info
->mc_arb_ramcfg
& 0x3;
98 unsigned pipe_interleave_bytes
= (info
->gb_addr_cfg
>> 4) & 0x7;
99 unsigned row_size
= (info
->gb_addr_cfg
>> 28) & 0x3;
101 return num_pipes
| (num_banks
<< 4) |
102 (pipe_interleave_bytes
<< 8) |
106 /* Helper function to do the ioctls needed for setup and init. */
107 static boolean
do_winsys_init(struct amdgpu_winsys
*ws
)
109 struct amdgpu_buffer_size_alignments alignment_info
= {};
110 struct amdgpu_heap_info vram
, gtt
;
111 struct drm_amdgpu_info_hw_ip dma
= {}, uvd
= {}, vce
= {};
112 uint32_t vce_version
= 0, vce_feature
= 0;
115 /* Query hardware and driver information. */
116 r
= amdgpu_query_gpu_info(ws
->dev
, &ws
->amdinfo
);
118 fprintf(stderr
, "amdgpu: amdgpu_query_gpu_info failed.\n");
122 r
= amdgpu_query_buffer_size_alignment(ws
->dev
, &alignment_info
);
124 fprintf(stderr
, "amdgpu: amdgpu_query_buffer_size_alignment failed.\n");
128 r
= amdgpu_query_heap_info(ws
->dev
, AMDGPU_GEM_DOMAIN_VRAM
, 0, &vram
);
130 fprintf(stderr
, "amdgpu: amdgpu_query_heap_info(vram) failed.\n");
134 r
= amdgpu_query_heap_info(ws
->dev
, AMDGPU_GEM_DOMAIN_GTT
, 0, >t
);
136 fprintf(stderr
, "amdgpu: amdgpu_query_heap_info(gtt) failed.\n");
140 r
= amdgpu_query_hw_ip_info(ws
->dev
, AMDGPU_HW_IP_DMA
, 0, &dma
);
142 fprintf(stderr
, "amdgpu: amdgpu_query_hw_ip_info(dma) failed.\n");
146 r
= amdgpu_query_hw_ip_info(ws
->dev
, AMDGPU_HW_IP_UVD
, 0, &uvd
);
148 fprintf(stderr
, "amdgpu: amdgpu_query_hw_ip_info(uvd) failed.\n");
152 r
= amdgpu_query_hw_ip_info(ws
->dev
, AMDGPU_HW_IP_VCE
, 0, &vce
);
154 fprintf(stderr
, "amdgpu: amdgpu_query_hw_ip_info(vce) failed.\n");
158 r
= amdgpu_query_firmware_version(ws
->dev
, AMDGPU_INFO_FW_VCE
, 0, 0,
159 &vce_version
, &vce_feature
);
161 fprintf(stderr
, "amdgpu: amdgpu_query_firmware_version(vce) failed.\n");
165 /* Set chip identification. */
166 ws
->info
.pci_id
= ws
->amdinfo
.asic_id
; /* TODO: is this correct? */
167 ws
->info
.vce_harvest_config
= ws
->amdinfo
.vce_harvest_config
;
169 switch (ws
->info
.pci_id
) {
170 #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; break;
171 #include "pci_ids/radeonsi_pci_ids.h"
175 fprintf(stderr
, "amdgpu: Invalid PCI ID.\n");
179 if (ws
->info
.family
>= CHIP_TONGA
)
180 ws
->info
.chip_class
= VI
;
181 else if (ws
->info
.family
>= CHIP_BONAIRE
)
182 ws
->info
.chip_class
= CIK
;
184 fprintf(stderr
, "amdgpu: Unknown family.\n");
188 /* LLVM 3.6 is required for VI. */
189 if (ws
->info
.chip_class
>= VI
&&
190 (HAVE_LLVM
< 0x0306 ||
191 (HAVE_LLVM
== 0x0306 && MESA_LLVM_VERSION_PATCH
< 1))) {
192 fprintf(stderr
, "amdgpu: LLVM 3.6.1 is required, got LLVM %i.%i.%i\n",
193 HAVE_LLVM
>> 8, HAVE_LLVM
& 255, MESA_LLVM_VERSION_PATCH
);
197 /* family and rev_id are for addrlib */
198 switch (ws
->info
.family
) {
200 ws
->family
= FAMILY_CI
;
201 ws
->rev_id
= CI_BONAIRE_M_A0
;
204 ws
->family
= FAMILY_KV
;
205 ws
->rev_id
= KV_SPECTRE_A0
;
208 ws
->family
= FAMILY_KV
;
209 ws
->rev_id
= KB_KALINDI_A0
;
212 ws
->family
= FAMILY_CI
;
213 ws
->rev_id
= CI_HAWAII_P_A0
;
216 ws
->family
= FAMILY_KV
;
217 ws
->rev_id
= ML_GODAVARI_A0
;
220 ws
->family
= FAMILY_VI
;
221 ws
->rev_id
= VI_TONGA_P_A0
;
224 ws
->family
= FAMILY_VI
;
225 ws
->rev_id
= VI_ICELAND_M_A0
;
228 ws
->family
= FAMILY_CZ
;
229 ws
->rev_id
= CZ_CARRIZO_A0
;
232 ws
->family
= FAMILY_VI
;
233 ws
->rev_id
= VI_FIJI_P_A0
;
236 fprintf(stderr
, "amdgpu: Unknown family.\n");
240 ws
->addrlib
= amdgpu_addr_create(ws
);
242 fprintf(stderr
, "amdgpu: Cannot create addrlib.\n");
246 /* Set hardware information. */
247 ws
->info
.gart_size
= gtt
.heap_size
;
248 ws
->info
.vram_size
= vram
.heap_size
;
249 /* convert the shader clock from KHz to MHz */
250 ws
->info
.max_sclk
= ws
->amdinfo
.max_engine_clk
/ 1000;
251 ws
->info
.max_se
= ws
->amdinfo
.num_shader_engines
;
252 ws
->info
.max_sh_per_se
= ws
->amdinfo
.num_shader_arrays_per_engine
;
253 ws
->info
.has_uvd
= uvd
.available_rings
!= 0;
254 ws
->info
.vce_fw_version
=
255 vce
.available_rings
? vce_version
: 0;
256 ws
->info
.has_userptr
= TRUE
;
257 ws
->info
.r600_num_backends
= ws
->amdinfo
.rb_pipes
;
258 ws
->info
.r600_clock_crystal_freq
= ws
->amdinfo
.gpu_counter_freq
;
259 ws
->info
.r600_tiling_config
= r600_get_gb_tiling_config(&ws
->amdinfo
);
260 ws
->info
.r600_num_tile_pipes
= cik_get_num_tile_pipes(&ws
->amdinfo
);
261 ws
->info
.r600_max_pipes
= ws
->amdinfo
.max_quad_shader_pipes
; /* TODO: is this correct? */
262 ws
->info
.r600_virtual_address
= TRUE
;
263 ws
->info
.r600_has_dma
= dma
.available_rings
!= 0;
265 /* Guess what the maximum compute unit number is by looking at the mask
268 for (i
= 0; i
< ws
->info
.max_se
; i
++)
269 for (j
= 0; j
< ws
->info
.max_sh_per_se
; j
++) {
270 unsigned max
= util_last_bit(ws
->amdinfo
.cu_bitmap
[i
][j
]);
272 if (ws
->info
.max_compute_units
< max
)
273 ws
->info
.max_compute_units
= max
;
275 ws
->info
.max_compute_units
*= ws
->info
.max_se
* ws
->info
.max_sh_per_se
;
277 memcpy(ws
->info
.si_tile_mode_array
, ws
->amdinfo
.gb_tile_mode
,
278 sizeof(ws
->amdinfo
.gb_tile_mode
));
279 ws
->info
.si_tile_mode_array_valid
= TRUE
;
280 ws
->info
.si_backend_enabled_mask
= ws
->amdinfo
.enabled_rb_pipes_mask
;
282 memcpy(ws
->info
.cik_macrotile_mode_array
, ws
->amdinfo
.gb_macro_tile_mode
,
283 sizeof(ws
->amdinfo
.gb_macro_tile_mode
));
284 ws
->info
.cik_macrotile_mode_array_valid
= TRUE
;
286 ws
->gart_page_size
= alignment_info
.size_remote
;
292 AddrDestroy(ws
->addrlib
);
293 amdgpu_device_deinitialize(ws
->dev
);
298 static void amdgpu_winsys_destroy(struct radeon_winsys
*rws
)
300 struct amdgpu_winsys
*ws
= (struct amdgpu_winsys
*)rws
;
302 pipe_mutex_destroy(ws
->bo_fence_lock
);
304 ws
->cman
->destroy(ws
->cman
);
305 ws
->kman
->destroy(ws
->kman
);
306 AddrDestroy(ws
->addrlib
);
308 amdgpu_device_deinitialize(ws
->dev
);
312 static void amdgpu_winsys_query_info(struct radeon_winsys
*rws
,
313 struct radeon_info
*info
)
315 *info
= ((struct amdgpu_winsys
*)rws
)->info
;
318 static boolean
amdgpu_cs_request_feature(struct radeon_winsys_cs
*rcs
,
319 enum radeon_feature_id fid
,
325 static uint64_t amdgpu_query_value(struct radeon_winsys
*rws
,
326 enum radeon_value_id value
)
328 struct amdgpu_winsys
*ws
= (struct amdgpu_winsys
*)rws
;
329 struct amdgpu_heap_info heap
;
333 case RADEON_REQUESTED_VRAM_MEMORY
:
334 return ws
->allocated_vram
;
335 case RADEON_REQUESTED_GTT_MEMORY
:
336 return ws
->allocated_gtt
;
337 case RADEON_BUFFER_WAIT_TIME_NS
:
338 return ws
->buffer_wait_time
;
339 case RADEON_TIMESTAMP
:
340 amdgpu_query_info(ws
->dev
, AMDGPU_INFO_TIMESTAMP
, 8, &retval
);
342 case RADEON_NUM_CS_FLUSHES
:
343 return ws
->num_cs_flushes
;
344 case RADEON_NUM_BYTES_MOVED
:
345 amdgpu_query_info(ws
->dev
, AMDGPU_INFO_NUM_BYTES_MOVED
, 8, &retval
);
347 case RADEON_VRAM_USAGE
:
348 amdgpu_query_heap_info(ws
->dev
, AMDGPU_GEM_DOMAIN_VRAM
, 0, &heap
);
349 return heap
.heap_usage
;
350 case RADEON_GTT_USAGE
:
351 amdgpu_query_heap_info(ws
->dev
, AMDGPU_GEM_DOMAIN_GTT
, 0, &heap
);
352 return heap
.heap_usage
;
353 case RADEON_GPU_TEMPERATURE
:
354 case RADEON_CURRENT_SCLK
:
355 case RADEON_CURRENT_MCLK
:
357 case RADEON_GPU_RESET_COUNTER
:
364 static bool amdgpu_read_registers(struct radeon_winsys
*rws
,
366 unsigned num_registers
, uint32_t *out
)
368 struct amdgpu_winsys
*ws
= (struct amdgpu_winsys
*)rws
;
370 return amdgpu_read_mm_registers(ws
->dev
, reg_offset
/ 4, num_registers
,
371 0xffffffff, 0, out
) == 0;
374 static unsigned hash_dev(void *key
)
376 #if defined(PIPE_ARCH_X86_64)
377 return pointer_to_intptr(key
) ^ (pointer_to_intptr(key
) >> 32);
379 return pointer_to_intptr(key
);
383 static int compare_dev(void *key1
, void *key2
)
388 static bool amdgpu_winsys_unref(struct radeon_winsys
*ws
)
390 struct amdgpu_winsys
*rws
= (struct amdgpu_winsys
*)ws
;
393 /* When the reference counter drops to zero, remove the device pointer
395 * This must happen while the mutex is locked, so that
396 * amdgpu_winsys_create in another thread doesn't get the winsys
397 * from the table when the counter drops to 0. */
398 pipe_mutex_lock(dev_tab_mutex
);
400 destroy
= pipe_reference(&rws
->reference
, NULL
);
401 if (destroy
&& dev_tab
)
402 util_hash_table_remove(dev_tab
, rws
->dev
);
404 pipe_mutex_unlock(dev_tab_mutex
);
408 PUBLIC
struct radeon_winsys
*
409 amdgpu_winsys_create(int fd
, radeon_screen_create_t screen_create
)
411 struct amdgpu_winsys
*ws
;
412 drmVersionPtr version
= drmGetVersion(fd
);
413 amdgpu_device_handle dev
;
414 uint32_t drm_major
, drm_minor
, r
;
416 /* The DRM driver version of amdgpu is 3.x.x. */
417 if (version
->version_major
!= 3) {
418 drmFreeVersion(version
);
421 drmFreeVersion(version
);
423 /* Look up the winsys from the dev table. */
424 pipe_mutex_lock(dev_tab_mutex
);
426 dev_tab
= util_hash_table_create(hash_dev
, compare_dev
);
428 /* Initialize the amdgpu device. This should always return the same pointer
429 * for the same fd. */
430 r
= amdgpu_device_initialize(fd
, &drm_major
, &drm_minor
, &dev
);
432 pipe_mutex_unlock(dev_tab_mutex
);
433 fprintf(stderr
, "amdgpu: amdgpu_device_initialize failed.\n");
437 /* Lookup a winsys if we have already created one for this device. */
438 ws
= util_hash_table_get(dev_tab
, dev
);
440 pipe_reference(NULL
, &ws
->reference
);
441 pipe_mutex_unlock(dev_tab_mutex
);
445 /* Create a new winsys. */
446 ws
= CALLOC_STRUCT(amdgpu_winsys
);
448 pipe_mutex_unlock(dev_tab_mutex
);
453 ws
->info
.drm_major
= drm_major
;
454 ws
->info
.drm_minor
= drm_minor
;
456 if (!do_winsys_init(ws
))
459 /* Create managers. */
460 ws
->kman
= amdgpu_bomgr_create(ws
);
463 ws
->cman
= pb_cache_manager_create(ws
->kman
, 500000, 2.0f
, 0,
464 (ws
->info
.vram_size
+ ws
->info
.gart_size
) / 8);
469 pipe_reference_init(&ws
->reference
, 1);
472 ws
->base
.unref
= amdgpu_winsys_unref
;
473 ws
->base
.destroy
= amdgpu_winsys_destroy
;
474 ws
->base
.query_info
= amdgpu_winsys_query_info
;
475 ws
->base
.cs_request_feature
= amdgpu_cs_request_feature
;
476 ws
->base
.query_value
= amdgpu_query_value
;
477 ws
->base
.read_registers
= amdgpu_read_registers
;
479 amdgpu_bomgr_init_functions(ws
);
480 amdgpu_cs_init_functions(ws
);
481 amdgpu_surface_init_functions(ws
);
483 pipe_mutex_init(ws
->bo_fence_lock
);
485 /* Create the screen at the end. The winsys must be initialized
488 * Alternatively, we could create the screen based on "ws->gen"
489 * and link all drivers into one binary blob. */
490 ws
->base
.screen
= screen_create(&ws
->base
);
491 if (!ws
->base
.screen
) {
492 amdgpu_winsys_destroy(&ws
->base
);
493 pipe_mutex_unlock(dev_tab_mutex
);
497 util_hash_table_set(dev_tab
, dev
, ws
);
499 /* We must unlock the mutex once the winsys is fully initialized, so that
500 * other threads attempting to create the winsys from the same fd will
501 * get a fully initialized winsys and not just half-way initialized. */
502 pipe_mutex_unlock(dev_tab_mutex
);
507 pipe_mutex_unlock(dev_tab_mutex
);
509 ws
->cman
->destroy(ws
->cman
);
511 ws
->kman
->destroy(ws
->kman
);