2 * Copyright © 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
13 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
14 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
15 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
16 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
26 #include "ac_gpu_info.h"
28 #include "addrlib/src/amdgpu_asic_addr.h"
29 #include "drm-uapi/amdgpu_drm.h"
31 #include "util/macros.h"
32 #include "util/u_math.h"
38 #define CIK_TILE_MODE_COLOR_2D 14
40 #define CIK__GB_TILE_MODE__PIPE_CONFIG(x) (((x) >> 6) & 0x1f)
41 #define CIK__PIPE_CONFIG__ADDR_SURF_P2 0
42 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16 4
43 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16 5
44 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32 6
45 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32 7
46 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16 8
47 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16 9
48 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16 10
49 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16 11
50 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16 12
51 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32 13
52 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32 14
53 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16 16
54 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16 17
56 static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info
*info
)
58 unsigned mode2d
= info
->gb_tile_mode
[CIK_TILE_MODE_COLOR_2D
];
60 switch (CIK__GB_TILE_MODE__PIPE_CONFIG(mode2d
)) {
61 case CIK__PIPE_CONFIG__ADDR_SURF_P2
:
63 case CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16
:
64 case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16
:
65 case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32
:
66 case CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32
:
68 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16
:
69 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16
:
70 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16
:
71 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16
:
72 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16
:
73 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32
:
74 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32
:
76 case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16
:
77 case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16
:
80 fprintf(stderr
, "Invalid GFX7 pipe configuration, assuming P2\n");
81 assert(!"this should never occur");
86 static bool has_syncobj(int fd
)
89 if (drmGetCap(fd
, DRM_CAP_SYNCOBJ
, &value
))
91 return value
? true : false;
94 static bool has_timeline_syncobj(int fd
)
97 if (drmGetCap(fd
, DRM_CAP_SYNCOBJ_TIMELINE
, &value
))
99 return value
? true : false;
102 static uint64_t fix_vram_size(uint64_t size
)
104 /* The VRAM size is underreported, so we need to fix it, because
105 * it's used to compute the number of memory modules for harvesting.
107 return align64(size
, 256 * 1024 * 1024);
110 static uint32_t get_l2_cache_size(enum radeon_family family
)
143 bool ac_query_gpu_info(int fd
, void *dev_p
, struct radeon_info
*info
,
144 struct amdgpu_gpu_info
*amdinfo
)
146 struct drm_amdgpu_info_device device_info
= {};
147 struct amdgpu_buffer_size_alignments alignment_info
= {};
148 struct drm_amdgpu_info_hw_ip dma
= {}, compute
= {}, uvd
= {};
149 struct drm_amdgpu_info_hw_ip uvd_enc
= {}, vce
= {}, vcn_dec
= {}, vcn_jpeg
= {};
150 struct drm_amdgpu_info_hw_ip vcn_enc
= {}, gfx
= {};
151 struct amdgpu_gds_resource_info gds
= {};
152 uint32_t vce_version
= 0, vce_feature
= 0, uvd_version
= 0, uvd_feature
= 0;
154 amdgpu_device_handle dev
= dev_p
;
155 drmDevicePtr devinfo
;
158 r
= drmGetDevice2(fd
, 0, &devinfo
);
160 fprintf(stderr
, "amdgpu: drmGetDevice2 failed.\n");
163 info
->pci_domain
= devinfo
->businfo
.pci
->domain
;
164 info
->pci_bus
= devinfo
->businfo
.pci
->bus
;
165 info
->pci_dev
= devinfo
->businfo
.pci
->dev
;
166 info
->pci_func
= devinfo
->businfo
.pci
->func
;
167 drmFreeDevice(&devinfo
);
169 assert(info
->drm_major
== 3);
170 info
->is_amdgpu
= true;
172 /* Query hardware and driver information. */
173 r
= amdgpu_query_gpu_info(dev
, amdinfo
);
175 fprintf(stderr
, "amdgpu: amdgpu_query_gpu_info failed.\n");
179 r
= amdgpu_query_info(dev
, AMDGPU_INFO_DEV_INFO
, sizeof(device_info
), &device_info
);
181 fprintf(stderr
, "amdgpu: amdgpu_query_info(dev_info) failed.\n");
185 r
= amdgpu_query_buffer_size_alignment(dev
, &alignment_info
);
187 fprintf(stderr
, "amdgpu: amdgpu_query_buffer_size_alignment failed.\n");
191 r
= amdgpu_query_hw_ip_info(dev
, AMDGPU_HW_IP_DMA
, 0, &dma
);
193 fprintf(stderr
, "amdgpu: amdgpu_query_hw_ip_info(dma) failed.\n");
197 r
= amdgpu_query_hw_ip_info(dev
, AMDGPU_HW_IP_GFX
, 0, &gfx
);
199 fprintf(stderr
, "amdgpu: amdgpu_query_hw_ip_info(gfx) failed.\n");
203 r
= amdgpu_query_hw_ip_info(dev
, AMDGPU_HW_IP_COMPUTE
, 0, &compute
);
205 fprintf(stderr
, "amdgpu: amdgpu_query_hw_ip_info(compute) failed.\n");
209 r
= amdgpu_query_hw_ip_info(dev
, AMDGPU_HW_IP_UVD
, 0, &uvd
);
211 fprintf(stderr
, "amdgpu: amdgpu_query_hw_ip_info(uvd) failed.\n");
215 if (info
->drm_minor
>= 17) {
216 r
= amdgpu_query_hw_ip_info(dev
, AMDGPU_HW_IP_UVD_ENC
, 0, &uvd_enc
);
218 fprintf(stderr
, "amdgpu: amdgpu_query_hw_ip_info(uvd_enc) failed.\n");
223 if (info
->drm_minor
>= 17) {
224 r
= amdgpu_query_hw_ip_info(dev
, AMDGPU_HW_IP_VCN_DEC
, 0, &vcn_dec
);
226 fprintf(stderr
, "amdgpu: amdgpu_query_hw_ip_info(vcn_dec) failed.\n");
231 if (info
->drm_minor
>= 17) {
232 r
= amdgpu_query_hw_ip_info(dev
, AMDGPU_HW_IP_VCN_ENC
, 0, &vcn_enc
);
234 fprintf(stderr
, "amdgpu: amdgpu_query_hw_ip_info(vcn_enc) failed.\n");
239 if (info
->drm_minor
>= 27) {
240 r
= amdgpu_query_hw_ip_info(dev
, AMDGPU_HW_IP_VCN_JPEG
, 0, &vcn_jpeg
);
242 fprintf(stderr
, "amdgpu: amdgpu_query_hw_ip_info(vcn_jpeg) failed.\n");
247 r
= amdgpu_query_firmware_version(dev
, AMDGPU_INFO_FW_GFX_ME
, 0, 0, &info
->me_fw_version
,
248 &info
->me_fw_feature
);
250 fprintf(stderr
, "amdgpu: amdgpu_query_firmware_version(me) failed.\n");
254 r
= amdgpu_query_firmware_version(dev
, AMDGPU_INFO_FW_GFX_PFP
, 0, 0, &info
->pfp_fw_version
,
255 &info
->pfp_fw_feature
);
257 fprintf(stderr
, "amdgpu: amdgpu_query_firmware_version(pfp) failed.\n");
261 r
= amdgpu_query_firmware_version(dev
, AMDGPU_INFO_FW_GFX_CE
, 0, 0, &info
->ce_fw_version
,
262 &info
->ce_fw_feature
);
264 fprintf(stderr
, "amdgpu: amdgpu_query_firmware_version(ce) failed.\n");
268 r
= amdgpu_query_firmware_version(dev
, AMDGPU_INFO_FW_UVD
, 0, 0, &uvd_version
, &uvd_feature
);
270 fprintf(stderr
, "amdgpu: amdgpu_query_firmware_version(uvd) failed.\n");
274 r
= amdgpu_query_hw_ip_info(dev
, AMDGPU_HW_IP_VCE
, 0, &vce
);
276 fprintf(stderr
, "amdgpu: amdgpu_query_hw_ip_info(vce) failed.\n");
280 r
= amdgpu_query_firmware_version(dev
, AMDGPU_INFO_FW_VCE
, 0, 0, &vce_version
, &vce_feature
);
282 fprintf(stderr
, "amdgpu: amdgpu_query_firmware_version(vce) failed.\n");
286 r
= amdgpu_query_sw_info(dev
, amdgpu_sw_info_address32_hi
, &info
->address32_hi
);
288 fprintf(stderr
, "amdgpu: amdgpu_query_sw_info(address32_hi) failed.\n");
292 r
= amdgpu_query_gds_info(dev
, &gds
);
294 fprintf(stderr
, "amdgpu: amdgpu_query_gds_info failed.\n");
298 if (info
->drm_minor
>= 9) {
299 struct drm_amdgpu_memory_info meminfo
= {};
301 r
= amdgpu_query_info(dev
, AMDGPU_INFO_MEMORY
, sizeof(meminfo
), &meminfo
);
303 fprintf(stderr
, "amdgpu: amdgpu_query_info(memory) failed.\n");
307 /* Note: usable_heap_size values can be random and can't be relied on. */
308 info
->gart_size
= meminfo
.gtt
.total_heap_size
;
309 info
->vram_size
= fix_vram_size(meminfo
.vram
.total_heap_size
);
310 info
->vram_vis_size
= meminfo
.cpu_accessible_vram
.total_heap_size
;
312 /* This is a deprecated interface, which reports usable sizes
313 * (total minus pinned), but the pinned size computation is
314 * buggy, so the values returned from these functions can be
317 struct amdgpu_heap_info vram
, vram_vis
, gtt
;
319 r
= amdgpu_query_heap_info(dev
, AMDGPU_GEM_DOMAIN_VRAM
, 0, &vram
);
321 fprintf(stderr
, "amdgpu: amdgpu_query_heap_info(vram) failed.\n");
325 r
= amdgpu_query_heap_info(dev
, AMDGPU_GEM_DOMAIN_VRAM
, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
,
328 fprintf(stderr
, "amdgpu: amdgpu_query_heap_info(vram_vis) failed.\n");
332 r
= amdgpu_query_heap_info(dev
, AMDGPU_GEM_DOMAIN_GTT
, 0, >t
);
334 fprintf(stderr
, "amdgpu: amdgpu_query_heap_info(gtt) failed.\n");
338 info
->gart_size
= gtt
.heap_size
;
339 info
->vram_size
= fix_vram_size(vram
.heap_size
);
340 info
->vram_vis_size
= vram_vis
.heap_size
;
343 /* Set chip identification. */
344 info
->pci_id
= amdinfo
->asic_id
; /* TODO: is this correct? */
345 info
->pci_rev_id
= amdinfo
->pci_rev_id
;
346 info
->vce_harvest_config
= amdinfo
->vce_harvest_config
;
348 #define identify_chip2(asic, chipname) \
349 if (ASICREV_IS(amdinfo->chip_external_rev, asic)) { \
350 info->family = CHIP_##chipname; \
351 info->name = #chipname; \
353 #define identify_chip(chipname) identify_chip2(chipname, chipname)
355 switch (amdinfo
->family_id
) {
357 identify_chip(TAHITI
);
358 identify_chip(PITCAIRN
);
359 identify_chip2(CAPEVERDE
, VERDE
);
360 identify_chip(OLAND
);
361 identify_chip(HAINAN
);
364 identify_chip(BONAIRE
);
365 identify_chip(HAWAII
);
368 identify_chip2(SPECTRE
, KAVERI
);
369 identify_chip2(SPOOKY
, KAVERI
);
370 identify_chip2(KALINDI
, KABINI
);
371 identify_chip2(GODAVARI
, KABINI
);
374 identify_chip(ICELAND
);
375 identify_chip(TONGA
);
377 identify_chip(POLARIS10
);
378 identify_chip(POLARIS11
);
379 identify_chip(POLARIS12
);
380 identify_chip(VEGAM
);
383 identify_chip(CARRIZO
);
384 identify_chip(STONEY
);
387 identify_chip(VEGA10
);
388 identify_chip(VEGA12
);
389 identify_chip(VEGA20
);
390 identify_chip(ARCTURUS
);
393 identify_chip(RAVEN
);
394 identify_chip(RAVEN2
);
395 identify_chip(RENOIR
);
398 identify_chip(NAVI10
);
399 identify_chip(NAVI12
);
400 identify_chip(NAVI14
);
401 identify_chip(SIENNA_CICHLID
);
402 identify_chip(NAVY_FLOUNDER
);
407 fprintf(stderr
, "amdgpu: unknown (family_id, chip_external_rev): (%u, %u)\n",
408 amdinfo
->family_id
, amdinfo
->chip_external_rev
);
412 if (info
->family
>= CHIP_SIENNA_CICHLID
)
413 info
->chip_class
= GFX10_3
;
414 else if (info
->family
>= CHIP_NAVI10
)
415 info
->chip_class
= GFX10
;
416 else if (info
->family
>= CHIP_VEGA10
)
417 info
->chip_class
= GFX9
;
418 else if (info
->family
>= CHIP_TONGA
)
419 info
->chip_class
= GFX8
;
420 else if (info
->family
>= CHIP_BONAIRE
)
421 info
->chip_class
= GFX7
;
422 else if (info
->family
>= CHIP_TAHITI
)
423 info
->chip_class
= GFX6
;
425 fprintf(stderr
, "amdgpu: Unknown family.\n");
429 info
->family_id
= amdinfo
->family_id
;
430 info
->chip_external_rev
= amdinfo
->chip_external_rev
;
431 info
->marketing_name
= amdgpu_get_marketing_name(dev
);
432 info
->is_pro_graphics
= info
->marketing_name
&& (!strcmp(info
->marketing_name
, "Pro") ||
433 !strcmp(info
->marketing_name
, "PRO") ||
434 !strcmp(info
->marketing_name
, "Frontier"));
436 /* Set which chips have dedicated VRAM. */
437 info
->has_dedicated_vram
= !(amdinfo
->ids_flags
& AMDGPU_IDS_FLAGS_FUSION
);
439 /* The kernel can split large buffers in VRAM but not in GTT, so large
440 * allocations can fail or cause buffer movement failures in the kernel.
442 if (info
->has_dedicated_vram
)
443 info
->max_alloc_size
= info
->vram_size
* 0.8;
445 info
->max_alloc_size
= info
->gart_size
* 0.7;
447 info
->vram_type
= amdinfo
->vram_type
;
448 info
->vram_bit_width
= amdinfo
->vram_bit_width
;
449 info
->ce_ram_size
= amdinfo
->ce_ram_size
;
451 info
->l2_cache_size
= get_l2_cache_size(info
->family
);
452 info
->l1_cache_size
= 16384;
454 /* Set which chips have uncached device memory. */
455 info
->has_l2_uncached
= info
->chip_class
>= GFX9
;
457 /* Set hardware information. */
458 info
->gds_size
= gds
.gds_total_size
;
459 info
->gds_gfx_partition_size
= gds
.gds_gfx_partition_size
;
460 /* convert the shader/memory clocks from KHz to MHz */
461 info
->max_shader_clock
= amdinfo
->max_engine_clk
/ 1000;
462 info
->max_memory_clock
= amdinfo
->max_memory_clk
/ 1000;
463 info
->num_tcc_blocks
= device_info
.num_tcc_blocks
;
464 info
->max_se
= amdinfo
->num_shader_engines
;
465 info
->max_sh_per_se
= amdinfo
->num_shader_arrays_per_engine
;
466 info
->has_hw_decode
= (uvd
.available_rings
!= 0) || (vcn_dec
.available_rings
!= 0) ||
467 (vcn_jpeg
.available_rings
!= 0);
468 info
->uvd_fw_version
= uvd
.available_rings
? uvd_version
: 0;
469 info
->vce_fw_version
= vce
.available_rings
? vce_version
: 0;
470 info
->uvd_enc_supported
= uvd_enc
.available_rings
? true : false;
471 info
->has_userptr
= true;
472 info
->has_syncobj
= has_syncobj(fd
);
473 info
->has_timeline_syncobj
= has_timeline_syncobj(fd
);
474 info
->has_syncobj_wait_for_submit
= info
->has_syncobj
&& info
->drm_minor
>= 20;
475 info
->has_fence_to_handle
= info
->has_syncobj
&& info
->drm_minor
>= 21;
476 info
->has_ctx_priority
= info
->drm_minor
>= 22;
477 info
->has_local_buffers
= info
->drm_minor
>= 20;
478 info
->kernel_flushes_hdp_before_ib
= true;
479 info
->htile_cmask_support_1d_tiling
= true;
480 info
->si_TA_CS_BC_BASE_ADDR_allowed
= true;
481 info
->has_bo_metadata
= true;
482 info
->has_gpu_reset_status_query
= true;
483 info
->has_eqaa_surface_allocator
= true;
484 info
->has_format_bc1_through_bc7
= true;
485 /* DRM 3.1.0 doesn't flush TC for GFX8 correctly. */
486 info
->kernel_flushes_tc_l2_after_ib
= info
->chip_class
!= GFX8
|| info
->drm_minor
>= 2;
487 info
->has_indirect_compute_dispatch
= true;
488 /* GFX6 doesn't support unaligned loads. */
489 info
->has_unaligned_shader_loads
= info
->chip_class
!= GFX6
;
490 /* Disable sparse mappings on GFX6 due to VM faults in CP DMA. Enable them once
491 * these faults are mitigated in software.
493 info
->has_sparse_vm_mappings
= info
->chip_class
>= GFX7
&& info
->drm_minor
>= 13;
494 info
->has_2d_tiling
= true;
495 info
->has_read_registers_query
= true;
496 info
->has_scheduled_fence_dependency
= info
->drm_minor
>= 28;
497 info
->mid_command_buffer_preemption_enabled
= amdinfo
->ids_flags
& AMDGPU_IDS_FLAGS_PREEMPTION
;
499 info
->pa_sc_tile_steering_override
= device_info
.pa_sc_tile_steering_override
;
500 info
->num_render_backends
= amdinfo
->rb_pipes
;
501 /* The value returned by the kernel driver was wrong. */
502 if (info
->family
== CHIP_KAVERI
)
503 info
->num_render_backends
= 2;
505 info
->clock_crystal_freq
= amdinfo
->gpu_counter_freq
;
506 if (!info
->clock_crystal_freq
) {
507 fprintf(stderr
, "amdgpu: clock crystal frequency is 0, timestamps will be wrong\n");
508 info
->clock_crystal_freq
= 1;
510 if (info
->chip_class
>= GFX10
) {
511 info
->tcc_cache_line_size
= 128;
513 if (info
->drm_minor
>= 35) {
514 info
->tcc_harvested
= device_info
.tcc_disabled_mask
!= 0;
516 /* This is a hack, but it's all we can do without a kernel upgrade. */
517 info
->tcc_harvested
= (info
->vram_size
/ info
->num_tcc_blocks
) != 512 * 1024 * 1024;
520 info
->tcc_cache_line_size
= 64;
522 info
->gb_addr_config
= amdinfo
->gb_addr_cfg
;
523 if (info
->chip_class
>= GFX9
) {
524 info
->num_tile_pipes
= 1 << G_0098F8_NUM_PIPES(amdinfo
->gb_addr_cfg
);
525 info
->pipe_interleave_bytes
= 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(amdinfo
->gb_addr_cfg
);
527 info
->num_tile_pipes
= cik_get_num_tile_pipes(amdinfo
);
528 info
->pipe_interleave_bytes
= 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(amdinfo
->gb_addr_cfg
);
530 info
->r600_has_virtual_memory
= true;
532 /* LDS is 64KB per CU (4 SIMDs), which is 16KB per SIMD (usage above
533 * 16KB makes some SIMDs unoccupied).
535 * LDS is 128KB in WGP mode and 64KB in CU mode. Assume the WGP mode is used.
537 info
->lds_size_per_workgroup
= info
->chip_class
>= GFX10
? 128 * 1024 : 64 * 1024;
538 info
->lds_granularity
= info
->chip_class
>= GFX7
? 128 * 4 : 64 * 4;
540 assert(util_is_power_of_two_or_zero(dma
.available_rings
+ 1));
541 assert(util_is_power_of_two_or_zero(compute
.available_rings
+ 1));
543 info
->has_graphics
= gfx
.available_rings
> 0;
544 info
->num_rings
[RING_GFX
] = util_bitcount(gfx
.available_rings
);
545 info
->num_rings
[RING_COMPUTE
] = util_bitcount(compute
.available_rings
);
546 info
->num_rings
[RING_DMA
] = util_bitcount(dma
.available_rings
);
547 info
->num_rings
[RING_UVD
] = util_bitcount(uvd
.available_rings
);
548 info
->num_rings
[RING_VCE
] = util_bitcount(vce
.available_rings
);
549 info
->num_rings
[RING_UVD_ENC
] = util_bitcount(uvd_enc
.available_rings
);
550 info
->num_rings
[RING_VCN_DEC
] = util_bitcount(vcn_dec
.available_rings
);
551 info
->num_rings
[RING_VCN_ENC
] = util_bitcount(vcn_enc
.available_rings
);
552 info
->num_rings
[RING_VCN_JPEG
] = util_bitcount(vcn_jpeg
.available_rings
);
554 /* This is "align_mask" copied from the kernel, maximums of all IP versions. */
555 info
->ib_pad_dw_mask
[RING_GFX
] = 0xff;
556 info
->ib_pad_dw_mask
[RING_COMPUTE
] = 0xff;
557 info
->ib_pad_dw_mask
[RING_DMA
] = 0xf;
558 info
->ib_pad_dw_mask
[RING_UVD
] = 0xf;
559 info
->ib_pad_dw_mask
[RING_VCE
] = 0x3f;
560 info
->ib_pad_dw_mask
[RING_UVD_ENC
] = 0x3f;
561 info
->ib_pad_dw_mask
[RING_VCN_DEC
] = 0xf;
562 info
->ib_pad_dw_mask
[RING_VCN_ENC
] = 0x3f;
563 info
->ib_pad_dw_mask
[RING_VCN_JPEG
] = 0xf;
565 /* The mere presence of CLEAR_STATE in the IB causes random GPU hangs
566 * on GFX6. Some CLEAR_STATE cause asic hang on radeon kernel, etc.
567 * SPI_VS_OUT_CONFIG. So only enable GFX7 CLEAR_STATE on amdgpu kernel.
569 info
->has_clear_state
= info
->chip_class
>= GFX7
;
571 info
->has_distributed_tess
=
572 info
->chip_class
>= GFX10
|| (info
->chip_class
>= GFX8
&& info
->max_se
>= 2);
574 info
->has_dcc_constant_encode
=
575 info
->family
== CHIP_RAVEN2
|| info
->family
== CHIP_RENOIR
|| info
->chip_class
>= GFX10
;
577 info
->has_rbplus
= info
->family
== CHIP_STONEY
|| info
->chip_class
>= GFX9
;
579 /* Some chips have RB+ registers, but don't support RB+. Those must
582 info
->rbplus_allowed
=
584 (info
->family
== CHIP_STONEY
|| info
->family
== CHIP_VEGA12
|| info
->family
== CHIP_RAVEN
||
585 info
->family
== CHIP_RAVEN2
|| info
->family
== CHIP_RENOIR
|| info
->chip_class
>= GFX10_3
);
587 info
->has_out_of_order_rast
=
588 info
->chip_class
>= GFX8
&& info
->chip_class
<= GFX9
&& info
->max_se
>= 2;
590 /* Whether chips support double rate packed math instructions. */
591 info
->has_packed_math_16bit
= info
->chip_class
>= GFX9
;
593 /* TODO: Figure out how to use LOAD_CONTEXT_REG on GFX6-GFX7. */
594 info
->has_load_ctx_reg_pkt
=
595 info
->chip_class
>= GFX9
|| (info
->chip_class
>= GFX8
&& info
->me_fw_feature
>= 41);
597 info
->cpdma_prefetch_writes_memory
= info
->chip_class
<= GFX8
;
599 info
->has_gfx9_scissor_bug
= info
->family
== CHIP_VEGA10
|| info
->family
== CHIP_RAVEN
;
601 info
->has_tc_compat_zrange_bug
= info
->chip_class
>= GFX8
&& info
->chip_class
<= GFX9
;
603 info
->has_msaa_sample_loc_bug
=
604 (info
->family
>= CHIP_POLARIS10
&& info
->family
<= CHIP_POLARIS12
) ||
605 info
->family
== CHIP_VEGA10
|| info
->family
== CHIP_RAVEN
;
607 info
->has_ls_vgpr_init_bug
= info
->family
== CHIP_VEGA10
|| info
->family
== CHIP_RAVEN
;
609 /* Get the number of good compute units. */
610 info
->num_good_compute_units
= 0;
611 for (i
= 0; i
< info
->max_se
; i
++) {
612 for (j
= 0; j
< info
->max_sh_per_se
; j
++) {
614 * The cu bitmap in amd gpu info structure is
615 * 4x4 size array, and it's usually suitable for Vega
616 * ASICs which has 4*2 SE/SH layout.
617 * But for Arcturus, SE/SH layout is changed to 8*1.
618 * To mostly reduce the impact, we make it compatible
619 * with current bitmap array as below:
620 * SE4,SH0 --> cu_bitmap[0][1]
621 * SE5,SH0 --> cu_bitmap[1][1]
622 * SE6,SH0 --> cu_bitmap[2][1]
623 * SE7,SH0 --> cu_bitmap[3][1]
625 info
->cu_mask
[i
% 4][j
+ i
/ 4] = amdinfo
->cu_bitmap
[i
% 4][j
+ i
/ 4];
626 info
->num_good_compute_units
+= util_bitcount(info
->cu_mask
[i
][j
]);
630 /* On GFX10, only whole WGPs (in units of 2 CUs) can be disabled,
631 * and max - min <= 2.
633 unsigned cu_group
= info
->chip_class
>= GFX10
? 2 : 1;
634 info
->max_good_cu_per_sa
=
635 DIV_ROUND_UP(info
->num_good_compute_units
, (info
->max_se
* info
->max_sh_per_se
* cu_group
)) *
637 info
->min_good_cu_per_sa
=
638 (info
->num_good_compute_units
/ (info
->max_se
* info
->max_sh_per_se
* cu_group
)) * cu_group
;
640 memcpy(info
->si_tile_mode_array
, amdinfo
->gb_tile_mode
, sizeof(amdinfo
->gb_tile_mode
));
641 info
->enabled_rb_mask
= amdinfo
->enabled_rb_pipes_mask
;
643 memcpy(info
->cik_macrotile_mode_array
, amdinfo
->gb_macro_tile_mode
,
644 sizeof(amdinfo
->gb_macro_tile_mode
));
646 info
->pte_fragment_size
= alignment_info
.size_local
;
647 info
->gart_page_size
= alignment_info
.size_remote
;
649 if (info
->chip_class
== GFX6
)
650 info
->gfx_ib_pad_with_type2
= true;
652 unsigned ib_align
= 0;
653 ib_align
= MAX2(ib_align
, gfx
.ib_start_alignment
);
654 ib_align
= MAX2(ib_align
, gfx
.ib_size_alignment
);
655 ib_align
= MAX2(ib_align
, compute
.ib_start_alignment
);
656 ib_align
= MAX2(ib_align
, compute
.ib_size_alignment
);
657 ib_align
= MAX2(ib_align
, dma
.ib_start_alignment
);
658 ib_align
= MAX2(ib_align
, dma
.ib_size_alignment
);
659 ib_align
= MAX2(ib_align
, uvd
.ib_start_alignment
);
660 ib_align
= MAX2(ib_align
, uvd
.ib_size_alignment
);
661 ib_align
= MAX2(ib_align
, uvd_enc
.ib_start_alignment
);
662 ib_align
= MAX2(ib_align
, uvd_enc
.ib_size_alignment
);
663 ib_align
= MAX2(ib_align
, vce
.ib_start_alignment
);
664 ib_align
= MAX2(ib_align
, vce
.ib_size_alignment
);
665 ib_align
= MAX2(ib_align
, vcn_dec
.ib_start_alignment
);
666 ib_align
= MAX2(ib_align
, vcn_dec
.ib_size_alignment
);
667 ib_align
= MAX2(ib_align
, vcn_enc
.ib_start_alignment
);
668 ib_align
= MAX2(ib_align
, vcn_enc
.ib_size_alignment
);
669 ib_align
= MAX2(ib_align
, vcn_jpeg
.ib_start_alignment
);
670 ib_align
= MAX2(ib_align
, vcn_jpeg
.ib_size_alignment
);
671 /* GFX10 and maybe GFX9 need this alignment for cache coherency. */
672 if (info
->chip_class
>= GFX9
)
673 ib_align
= MAX2(ib_align
, info
->tcc_cache_line_size
);
674 /* The kernel pads gfx and compute IBs to 256 dwords since:
675 * 66f3b2d527154bd258a57c8815004b5964aa1cf5
678 ib_align
= MAX2(ib_align
, 1024);
679 info
->ib_alignment
= ib_align
;
681 if ((info
->drm_minor
>= 31 && (info
->family
== CHIP_RAVEN
|| info
->family
== CHIP_RAVEN2
||
682 info
->family
== CHIP_RENOIR
)) ||
683 (info
->drm_minor
>= 34 && (info
->family
== CHIP_NAVI12
|| info
->family
== CHIP_NAVI14
)) ||
684 info
->chip_class
>= GFX10_3
) {
685 if (info
->num_render_backends
== 1)
686 info
->use_display_dcc_unaligned
= true;
688 info
->use_display_dcc_with_retile_blit
= true;
691 info
->has_gds_ordered_append
= info
->chip_class
>= GFX7
&& info
->drm_minor
>= 29;
693 if (info
->chip_class
>= GFX9
) {
694 unsigned pc_lines
= 0;
696 switch (info
->family
) {
707 case CHIP_SIENNA_CICHLID
:
708 case CHIP_NAVY_FLOUNDER
:
720 info
->pc_lines
= pc_lines
;
722 if (info
->chip_class
>= GFX10
) {
723 info
->pbb_max_alloc_count
= pc_lines
/ 3;
725 info
->pbb_max_alloc_count
= MIN2(128, pc_lines
/ (4 * info
->max_se
));
729 /* The number of SDPs is the same as the number of TCCs for now. */
730 if (info
->chip_class
>= GFX10
)
731 info
->num_sdp_interfaces
= device_info
.num_tcc_blocks
;
733 if (info
->chip_class
>= GFX10_3
)
734 info
->max_wave64_per_simd
= 16;
735 else if (info
->chip_class
== GFX10
)
736 info
->max_wave64_per_simd
= 20;
737 else if (info
->family
>= CHIP_POLARIS10
&& info
->family
<= CHIP_VEGAM
)
738 info
->max_wave64_per_simd
= 8;
740 info
->max_wave64_per_simd
= 10;
742 if (info
->chip_class
>= GFX10
) {
743 info
->num_physical_sgprs_per_simd
= 128 * info
->max_wave64_per_simd
;
744 info
->min_sgpr_alloc
= 128;
745 info
->sgpr_alloc_granularity
= 128;
746 /* Don't use late alloc on small chips. */
747 info
->use_late_alloc
= info
->num_render_backends
> 4;
748 } else if (info
->chip_class
>= GFX8
) {
749 info
->num_physical_sgprs_per_simd
= 800;
750 info
->min_sgpr_alloc
= 16;
751 info
->sgpr_alloc_granularity
= 16;
752 info
->use_late_alloc
= true;
754 info
->num_physical_sgprs_per_simd
= 512;
755 info
->min_sgpr_alloc
= 8;
756 info
->sgpr_alloc_granularity
= 8;
757 /* Potential hang on Kabini: */
758 info
->use_late_alloc
= info
->family
!= CHIP_KABINI
;
761 info
->max_sgpr_alloc
= info
->family
== CHIP_TONGA
|| info
->family
== CHIP_ICELAND
? 96 : 104;
763 info
->min_wave64_vgpr_alloc
= 4;
764 info
->max_vgpr_alloc
= 256;
765 info
->wave64_vgpr_alloc_granularity
= 4;
767 info
->num_physical_wave64_vgprs_per_simd
= info
->chip_class
>= GFX10
? 512 : 256;
768 info
->num_simd_per_compute_unit
= info
->chip_class
>= GFX10
? 2 : 4;
773 void ac_compute_driver_uuid(char *uuid
, size_t size
)
775 char amd_uuid
[] = "AMD-MESA-DRV";
777 assert(size
>= sizeof(amd_uuid
));
779 memset(uuid
, 0, size
);
780 strncpy(uuid
, amd_uuid
, size
);
783 void ac_compute_device_uuid(struct radeon_info
*info
, char *uuid
, size_t size
)
785 uint32_t *uint_uuid
= (uint32_t *)uuid
;
787 assert(size
>= sizeof(uint32_t) * 4);
790 * Use the device info directly instead of using a sha1. GL/VK UUIDs
791 * are 16 byte vs 20 byte for sha1, and the truncation that would be
792 * required would get rid of part of the little entropy we have.
794 memset(uuid
, 0, size
);
795 uint_uuid
[0] = info
->pci_domain
;
796 uint_uuid
[1] = info
->pci_bus
;
797 uint_uuid
[2] = info
->pci_dev
;
798 uint_uuid
[3] = info
->pci_func
;
801 void ac_print_gpu_info(struct radeon_info
*info
)
803 printf("Device info:\n");
804 printf(" pci (domain:bus:dev.func): %04x:%02x:%02x.%x\n", info
->pci_domain
, info
->pci_bus
,
805 info
->pci_dev
, info
->pci_func
);
807 printf(" name = %s\n", info
->name
);
808 printf(" marketing_name = %s\n", info
->marketing_name
);
809 printf(" is_pro_graphics = %u\n", info
->is_pro_graphics
);
810 printf(" pci_id = 0x%x\n", info
->pci_id
);
811 printf(" pci_rev_id = 0x%x\n", info
->pci_rev_id
);
812 printf(" family = %i\n", info
->family
);
813 printf(" chip_class = %i\n", info
->chip_class
);
814 printf(" family_id = %i\n", info
->family_id
);
815 printf(" chip_external_rev = %i\n", info
->chip_external_rev
);
816 printf(" clock_crystal_freq = %i\n", info
->clock_crystal_freq
);
818 printf("Features:\n");
819 printf(" has_graphics = %i\n", info
->has_graphics
);
820 printf(" num_rings[RING_GFX] = %i\n", info
->num_rings
[RING_GFX
]);
821 printf(" num_rings[RING_DMA] = %i\n", info
->num_rings
[RING_DMA
]);
822 printf(" num_rings[RING_COMPUTE] = %u\n", info
->num_rings
[RING_COMPUTE
]);
823 printf(" num_rings[RING_UVD] = %i\n", info
->num_rings
[RING_UVD
]);
824 printf(" num_rings[RING_VCE] = %i\n", info
->num_rings
[RING_VCE
]);
825 printf(" num_rings[RING_UVD_ENC] = %i\n", info
->num_rings
[RING_UVD_ENC
]);
826 printf(" num_rings[RING_VCN_DEC] = %i\n", info
->num_rings
[RING_VCN_DEC
]);
827 printf(" num_rings[RING_VCN_ENC] = %i\n", info
->num_rings
[RING_VCN_ENC
]);
828 printf(" num_rings[RING_VCN_JPEG] = %i\n", info
->num_rings
[RING_VCN_JPEG
]);
829 printf(" has_clear_state = %u\n", info
->has_clear_state
);
830 printf(" has_distributed_tess = %u\n", info
->has_distributed_tess
);
831 printf(" has_dcc_constant_encode = %u\n", info
->has_dcc_constant_encode
);
832 printf(" has_rbplus = %u\n", info
->has_rbplus
);
833 printf(" rbplus_allowed = %u\n", info
->rbplus_allowed
);
834 printf(" has_load_ctx_reg_pkt = %u\n", info
->has_load_ctx_reg_pkt
);
835 printf(" has_out_of_order_rast = %u\n", info
->has_out_of_order_rast
);
836 printf(" cpdma_prefetch_writes_memory = %u\n", info
->cpdma_prefetch_writes_memory
);
837 printf(" has_gfx9_scissor_bug = %i\n", info
->has_gfx9_scissor_bug
);
838 printf(" has_tc_compat_zrange_bug = %i\n", info
->has_tc_compat_zrange_bug
);
839 printf(" has_msaa_sample_loc_bug = %i\n", info
->has_msaa_sample_loc_bug
);
840 printf(" has_ls_vgpr_init_bug = %i\n", info
->has_ls_vgpr_init_bug
);
842 printf("Display features:\n");
843 printf(" use_display_dcc_unaligned = %u\n", info
->use_display_dcc_unaligned
);
844 printf(" use_display_dcc_with_retile_blit = %u\n", info
->use_display_dcc_with_retile_blit
);
846 printf("Memory info:\n");
847 printf(" pte_fragment_size = %u\n", info
->pte_fragment_size
);
848 printf(" gart_page_size = %u\n", info
->gart_page_size
);
849 printf(" gart_size = %i MB\n", (int)DIV_ROUND_UP(info
->gart_size
, 1024 * 1024));
850 printf(" vram_size = %i MB\n", (int)DIV_ROUND_UP(info
->vram_size
, 1024 * 1024));
851 printf(" vram_vis_size = %i MB\n", (int)DIV_ROUND_UP(info
->vram_vis_size
, 1024 * 1024));
852 printf(" vram_type = %i\n", info
->vram_type
);
853 printf(" vram_bit_width = %i\n", info
->vram_bit_width
);
854 printf(" gds_size = %u kB\n", info
->gds_size
/ 1024);
855 printf(" gds_gfx_partition_size = %u kB\n", info
->gds_gfx_partition_size
/ 1024);
856 printf(" max_alloc_size = %i MB\n", (int)DIV_ROUND_UP(info
->max_alloc_size
, 1024 * 1024));
857 printf(" min_alloc_size = %u\n", info
->min_alloc_size
);
858 printf(" address32_hi = %u\n", info
->address32_hi
);
859 printf(" has_dedicated_vram = %u\n", info
->has_dedicated_vram
);
860 printf(" num_sdp_interfaces = %u\n", info
->num_sdp_interfaces
);
861 printf(" num_tcc_blocks = %i\n", info
->num_tcc_blocks
);
862 printf(" tcc_cache_line_size = %u\n", info
->tcc_cache_line_size
);
863 printf(" tcc_harvested = %u\n", info
->tcc_harvested
);
864 printf(" pc_lines = %u\n", info
->pc_lines
);
865 printf(" lds_size_per_workgroup = %u\n", info
->lds_size_per_workgroup
);
866 printf(" lds_granularity = %i\n", info
->lds_granularity
);
867 printf(" max_memory_clock = %i\n", info
->max_memory_clock
);
868 printf(" ce_ram_size = %i\n", info
->ce_ram_size
);
869 printf(" l1_cache_size = %i\n", info
->l1_cache_size
);
870 printf(" l2_cache_size = %i\n", info
->l2_cache_size
);
872 printf("CP info:\n");
873 printf(" gfx_ib_pad_with_type2 = %i\n", info
->gfx_ib_pad_with_type2
);
874 printf(" ib_alignment = %u\n", info
->ib_alignment
);
875 printf(" me_fw_version = %i\n", info
->me_fw_version
);
876 printf(" me_fw_feature = %i\n", info
->me_fw_feature
);
877 printf(" pfp_fw_version = %i\n", info
->pfp_fw_version
);
878 printf(" pfp_fw_feature = %i\n", info
->pfp_fw_feature
);
879 printf(" ce_fw_version = %i\n", info
->ce_fw_version
);
880 printf(" ce_fw_feature = %i\n", info
->ce_fw_feature
);
882 printf("Multimedia info:\n");
883 printf(" has_hw_decode = %u\n", info
->has_hw_decode
);
884 printf(" uvd_enc_supported = %u\n", info
->uvd_enc_supported
);
885 printf(" uvd_fw_version = %u\n", info
->uvd_fw_version
);
886 printf(" vce_fw_version = %u\n", info
->vce_fw_version
);
887 printf(" vce_harvest_config = %i\n", info
->vce_harvest_config
);
889 printf("Kernel & winsys capabilities:\n");
890 printf(" drm = %i.%i.%i\n", info
->drm_major
, info
->drm_minor
, info
->drm_patchlevel
);
891 printf(" has_userptr = %i\n", info
->has_userptr
);
892 printf(" has_syncobj = %u\n", info
->has_syncobj
);
893 printf(" has_syncobj_wait_for_submit = %u\n", info
->has_syncobj_wait_for_submit
);
894 printf(" has_timeline_syncobj = %u\n", info
->has_timeline_syncobj
);
895 printf(" has_fence_to_handle = %u\n", info
->has_fence_to_handle
);
896 printf(" has_ctx_priority = %u\n", info
->has_ctx_priority
);
897 printf(" has_local_buffers = %u\n", info
->has_local_buffers
);
898 printf(" kernel_flushes_hdp_before_ib = %u\n", info
->kernel_flushes_hdp_before_ib
);
899 printf(" htile_cmask_support_1d_tiling = %u\n", info
->htile_cmask_support_1d_tiling
);
900 printf(" si_TA_CS_BC_BASE_ADDR_allowed = %u\n", info
->si_TA_CS_BC_BASE_ADDR_allowed
);
901 printf(" has_bo_metadata = %u\n", info
->has_bo_metadata
);
902 printf(" has_gpu_reset_status_query = %u\n", info
->has_gpu_reset_status_query
);
903 printf(" has_eqaa_surface_allocator = %u\n", info
->has_eqaa_surface_allocator
);
904 printf(" has_format_bc1_through_bc7 = %u\n", info
->has_format_bc1_through_bc7
);
905 printf(" kernel_flushes_tc_l2_after_ib = %u\n", info
->kernel_flushes_tc_l2_after_ib
);
906 printf(" has_indirect_compute_dispatch = %u\n", info
->has_indirect_compute_dispatch
);
907 printf(" has_unaligned_shader_loads = %u\n", info
->has_unaligned_shader_loads
);
908 printf(" has_sparse_vm_mappings = %u\n", info
->has_sparse_vm_mappings
);
909 printf(" has_2d_tiling = %u\n", info
->has_2d_tiling
);
910 printf(" has_read_registers_query = %u\n", info
->has_read_registers_query
);
911 printf(" has_gds_ordered_append = %u\n", info
->has_gds_ordered_append
);
912 printf(" has_scheduled_fence_dependency = %u\n", info
->has_scheduled_fence_dependency
);
913 printf(" mid_command_buffer_preemption_enabled = %u\n",
914 info
->mid_command_buffer_preemption_enabled
);
916 printf("Shader core info:\n");
917 printf(" max_shader_clock = %i\n", info
->max_shader_clock
);
918 printf(" num_good_compute_units = %i\n", info
->num_good_compute_units
);
919 printf(" max_good_cu_per_sa = %i\n", info
->max_good_cu_per_sa
);
920 printf(" min_good_cu_per_sa = %i\n", info
->min_good_cu_per_sa
);
921 printf(" max_se = %i\n", info
->max_se
);
922 printf(" max_sh_per_se = %i\n", info
->max_sh_per_se
);
923 printf(" max_wave64_per_simd = %i\n", info
->max_wave64_per_simd
);
924 printf(" num_physical_sgprs_per_simd = %i\n", info
->num_physical_sgprs_per_simd
);
925 printf(" num_physical_wave64_vgprs_per_simd = %i\n",
926 info
->num_physical_wave64_vgprs_per_simd
);
927 printf(" num_simd_per_compute_unit = %i\n", info
->num_simd_per_compute_unit
);
928 printf(" min_sgpr_alloc = %i\n", info
->min_sgpr_alloc
);
929 printf(" max_sgpr_alloc = %i\n", info
->max_sgpr_alloc
);
930 printf(" sgpr_alloc_granularity = %i\n", info
->sgpr_alloc_granularity
);
931 printf(" min_wave64_vgpr_alloc = %i\n", info
->min_wave64_vgpr_alloc
);
932 printf(" max_vgpr_alloc = %i\n", info
->max_vgpr_alloc
);
933 printf(" wave64_vgpr_alloc_granularity = %i\n", info
->wave64_vgpr_alloc_granularity
);
935 printf("Render backend info:\n");
936 printf(" pa_sc_tile_steering_override = 0x%x\n", info
->pa_sc_tile_steering_override
);
937 printf(" num_render_backends = %i\n", info
->num_render_backends
);
938 printf(" num_tile_pipes = %i\n", info
->num_tile_pipes
);
939 printf(" pipe_interleave_bytes = %i\n", info
->pipe_interleave_bytes
);
940 printf(" enabled_rb_mask = 0x%x\n", info
->enabled_rb_mask
);
941 printf(" max_alignment = %u\n", (unsigned)info
->max_alignment
);
942 printf(" pbb_max_alloc_count = %u\n", info
->pbb_max_alloc_count
);
944 printf("GB_ADDR_CONFIG: 0x%08x\n", info
->gb_addr_config
);
945 if (info
->chip_class
>= GFX10
) {
946 printf(" num_pipes = %u\n", 1 << G_0098F8_NUM_PIPES(info
->gb_addr_config
));
947 printf(" pipe_interleave_size = %u\n",
948 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info
->gb_addr_config
));
949 printf(" max_compressed_frags = %u\n",
950 1 << G_0098F8_MAX_COMPRESSED_FRAGS(info
->gb_addr_config
));
951 } else if (info
->chip_class
== GFX9
) {
952 printf(" num_pipes = %u\n", 1 << G_0098F8_NUM_PIPES(info
->gb_addr_config
));
953 printf(" pipe_interleave_size = %u\n",
954 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info
->gb_addr_config
));
955 printf(" max_compressed_frags = %u\n",
956 1 << G_0098F8_MAX_COMPRESSED_FRAGS(info
->gb_addr_config
));
957 printf(" bank_interleave_size = %u\n",
958 1 << G_0098F8_BANK_INTERLEAVE_SIZE(info
->gb_addr_config
));
959 printf(" num_banks = %u\n", 1 << G_0098F8_NUM_BANKS(info
->gb_addr_config
));
960 printf(" shader_engine_tile_size = %u\n",
961 16 << G_0098F8_SHADER_ENGINE_TILE_SIZE(info
->gb_addr_config
));
962 printf(" num_shader_engines = %u\n",
963 1 << G_0098F8_NUM_SHADER_ENGINES_GFX9(info
->gb_addr_config
));
964 printf(" num_gpus = %u (raw)\n", G_0098F8_NUM_GPUS_GFX9(info
->gb_addr_config
));
965 printf(" multi_gpu_tile_size = %u (raw)\n",
966 G_0098F8_MULTI_GPU_TILE_SIZE(info
->gb_addr_config
));
967 printf(" num_rb_per_se = %u\n", 1 << G_0098F8_NUM_RB_PER_SE(info
->gb_addr_config
));
968 printf(" row_size = %u\n", 1024 << G_0098F8_ROW_SIZE(info
->gb_addr_config
));
969 printf(" num_lower_pipes = %u (raw)\n", G_0098F8_NUM_LOWER_PIPES(info
->gb_addr_config
));
970 printf(" se_enable = %u (raw)\n", G_0098F8_SE_ENABLE(info
->gb_addr_config
));
972 printf(" num_pipes = %u\n", 1 << G_0098F8_NUM_PIPES(info
->gb_addr_config
));
973 printf(" pipe_interleave_size = %u\n",
974 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(info
->gb_addr_config
));
975 printf(" bank_interleave_size = %u\n",
976 1 << G_0098F8_BANK_INTERLEAVE_SIZE(info
->gb_addr_config
));
977 printf(" num_shader_engines = %u\n",
978 1 << G_0098F8_NUM_SHADER_ENGINES_GFX6(info
->gb_addr_config
));
979 printf(" shader_engine_tile_size = %u\n",
980 16 << G_0098F8_SHADER_ENGINE_TILE_SIZE(info
->gb_addr_config
));
981 printf(" num_gpus = %u (raw)\n", G_0098F8_NUM_GPUS_GFX6(info
->gb_addr_config
));
982 printf(" multi_gpu_tile_size = %u (raw)\n",
983 G_0098F8_MULTI_GPU_TILE_SIZE(info
->gb_addr_config
));
984 printf(" row_size = %u\n", 1024 << G_0098F8_ROW_SIZE(info
->gb_addr_config
));
985 printf(" num_lower_pipes = %u (raw)\n", G_0098F8_NUM_LOWER_PIPES(info
->gb_addr_config
));
989 int ac_get_gs_table_depth(enum chip_class chip_class
, enum radeon_family family
)
991 if (chip_class
>= GFX9
)
1010 case CHIP_POLARIS10
:
1011 case CHIP_POLARIS11
:
1012 case CHIP_POLARIS12
:
1016 unreachable("Unknown GPU");
1020 void ac_get_raster_config(struct radeon_info
*info
, uint32_t *raster_config_p
,
1021 uint32_t *raster_config_1_p
, uint32_t *se_tile_repeat_p
)
1023 unsigned raster_config
, raster_config_1
, se_tile_repeat
;
1025 switch (info
->family
) {
1030 raster_config
= 0x00000000;
1031 raster_config_1
= 0x00000000;
1035 raster_config
= 0x0000124a;
1036 raster_config_1
= 0x00000000;
1038 /* 1 SE / 2 RBs (Oland is special) */
1040 raster_config
= 0x00000082;
1041 raster_config_1
= 0x00000000;
1047 raster_config
= 0x00000002;
1048 raster_config_1
= 0x00000000;
1052 case CHIP_POLARIS11
:
1053 case CHIP_POLARIS12
:
1054 raster_config
= 0x16000012;
1055 raster_config_1
= 0x00000000;
1060 raster_config
= 0x2a00126a;
1061 raster_config_1
= 0x00000000;
1065 case CHIP_POLARIS10
:
1066 raster_config
= 0x16000012;
1067 raster_config_1
= 0x0000002a;
1069 /* 4 SEs / 16 RBs */
1073 raster_config
= 0x3a00161a;
1074 raster_config_1
= 0x0000002e;
1077 fprintf(stderr
, "ac: Unknown GPU, using 0 for raster_config\n");
1078 raster_config
= 0x00000000;
1079 raster_config_1
= 0x00000000;
1083 /* drm/radeon on Kaveri is buggy, so disable 1 RB to work around it.
1084 * This decreases performance by up to 50% when the RB is the bottleneck.
1086 if (info
->family
== CHIP_KAVERI
&& !info
->is_amdgpu
)
1087 raster_config
= 0x00000000;
1089 /* Fiji: Old kernels have incorrect tiling config. This decreases
1090 * RB performance by 25%. (it disables 1 RB in the second packer)
1092 if (info
->family
== CHIP_FIJI
&& info
->cik_macrotile_mode_array
[0] == 0x000000e8) {
1093 raster_config
= 0x16000012;
1094 raster_config_1
= 0x0000002a;
1097 unsigned se_width
= 8 << G_028350_SE_XSEL_GFX6(raster_config
);
1098 unsigned se_height
= 8 << G_028350_SE_YSEL_GFX6(raster_config
);
1100 /* I don't know how to calculate this, though this is probably a good guess. */
1101 se_tile_repeat
= MAX2(se_width
, se_height
) * info
->max_se
;
1103 *raster_config_p
= raster_config
;
1104 *raster_config_1_p
= raster_config_1
;
1105 if (se_tile_repeat_p
)
1106 *se_tile_repeat_p
= se_tile_repeat
;
1109 void ac_get_harvested_configs(struct radeon_info
*info
, unsigned raster_config
,
1110 unsigned *cik_raster_config_1_p
, unsigned *raster_config_se
)
1112 unsigned sh_per_se
= MAX2(info
->max_sh_per_se
, 1);
1113 unsigned num_se
= MAX2(info
->max_se
, 1);
1114 unsigned rb_mask
= info
->enabled_rb_mask
;
1115 unsigned num_rb
= MIN2(info
->num_render_backends
, 16);
1116 unsigned rb_per_pkr
= MIN2(num_rb
/ num_se
/ sh_per_se
, 2);
1117 unsigned rb_per_se
= num_rb
/ num_se
;
1118 unsigned se_mask
[4];
1121 se_mask
[0] = ((1 << rb_per_se
) - 1) & rb_mask
;
1122 se_mask
[1] = (se_mask
[0] << rb_per_se
) & rb_mask
;
1123 se_mask
[2] = (se_mask
[1] << rb_per_se
) & rb_mask
;
1124 se_mask
[3] = (se_mask
[2] << rb_per_se
) & rb_mask
;
1126 assert(num_se
== 1 || num_se
== 2 || num_se
== 4);
1127 assert(sh_per_se
== 1 || sh_per_se
== 2);
1128 assert(rb_per_pkr
== 1 || rb_per_pkr
== 2);
1130 if (info
->chip_class
>= GFX7
) {
1131 unsigned raster_config_1
= *cik_raster_config_1_p
;
1132 if ((num_se
> 2) && ((!se_mask
[0] && !se_mask
[1]) || (!se_mask
[2] && !se_mask
[3]))) {
1133 raster_config_1
&= C_028354_SE_PAIR_MAP
;
1135 if (!se_mask
[0] && !se_mask
[1]) {
1136 raster_config_1
|= S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_3
);
1138 raster_config_1
|= S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_0
);
1140 *cik_raster_config_1_p
= raster_config_1
;
1144 for (se
= 0; se
< num_se
; se
++) {
1145 unsigned pkr0_mask
= ((1 << rb_per_pkr
) - 1) << (se
* rb_per_se
);
1146 unsigned pkr1_mask
= pkr0_mask
<< rb_per_pkr
;
1147 int idx
= (se
/ 2) * 2;
1149 raster_config_se
[se
] = raster_config
;
1150 if ((num_se
> 1) && (!se_mask
[idx
] || !se_mask
[idx
+ 1])) {
1151 raster_config_se
[se
] &= C_028350_SE_MAP
;
1153 if (!se_mask
[idx
]) {
1154 raster_config_se
[se
] |= S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_3
);
1156 raster_config_se
[se
] |= S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_0
);
1160 pkr0_mask
&= rb_mask
;
1161 pkr1_mask
&= rb_mask
;
1162 if (rb_per_se
> 2 && (!pkr0_mask
|| !pkr1_mask
)) {
1163 raster_config_se
[se
] &= C_028350_PKR_MAP
;
1166 raster_config_se
[se
] |= S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_3
);
1168 raster_config_se
[se
] |= S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_0
);
1172 if (rb_per_se
>= 2) {
1173 unsigned rb0_mask
= 1 << (se
* rb_per_se
);
1174 unsigned rb1_mask
= rb0_mask
<< 1;
1176 rb0_mask
&= rb_mask
;
1177 rb1_mask
&= rb_mask
;
1178 if (!rb0_mask
|| !rb1_mask
) {
1179 raster_config_se
[se
] &= C_028350_RB_MAP_PKR0
;
1182 raster_config_se
[se
] |= S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_3
);
1184 raster_config_se
[se
] |= S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_0
);
1188 if (rb_per_se
> 2) {
1189 rb0_mask
= 1 << (se
* rb_per_se
+ rb_per_pkr
);
1190 rb1_mask
= rb0_mask
<< 1;
1191 rb0_mask
&= rb_mask
;
1192 rb1_mask
&= rb_mask
;
1193 if (!rb0_mask
|| !rb1_mask
) {
1194 raster_config_se
[se
] &= C_028350_RB_MAP_PKR1
;
1197 raster_config_se
[se
] |= S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_3
);
1199 raster_config_se
[se
] |= S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_0
);
1207 unsigned ac_get_compute_resource_limits(struct radeon_info
*info
, unsigned waves_per_threadgroup
,
1208 unsigned max_waves_per_sh
, unsigned threadgroups_per_cu
)
1210 unsigned compute_resource_limits
= S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup
% 4 == 0);
1212 if (info
->chip_class
>= GFX7
) {
1213 unsigned num_cu_per_se
= info
->num_good_compute_units
/ info
->max_se
;
1215 /* Force even distribution on all SIMDs in CU if the workgroup
1216 * size is 64. This has shown some good improvements if # of CUs
1217 * per SE is not a multiple of 4.
1219 if (num_cu_per_se
% 4 && waves_per_threadgroup
== 1)
1220 compute_resource_limits
|= S_00B854_FORCE_SIMD_DIST(1);
1222 assert(threadgroups_per_cu
>= 1 && threadgroups_per_cu
<= 8);
1223 compute_resource_limits
|=
1224 S_00B854_WAVES_PER_SH(max_waves_per_sh
) | S_00B854_CU_GROUP_COUNT(threadgroups_per_cu
- 1);
1227 if (max_waves_per_sh
) {
1228 unsigned limit_div16
= DIV_ROUND_UP(max_waves_per_sh
, 16);
1229 compute_resource_limits
|= S_00B854_WAVES_PER_SH_GFX6(limit_div16
);
1232 return compute_resource_limits
;