ac: import ac_get_compute_resource_limits() from RadeonSI
[mesa.git] / src / amd / common / ac_gpu_info.c
1 /*
2 * Copyright © 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
13 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
14 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
15 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
16 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 */
25
26 #include "ac_gpu_info.h"
27 #include "sid.h"
28
29 #include "util/u_math.h"
30
31 #include <stdio.h>
32
33 #include <xf86drm.h>
34 #include <amdgpu_drm.h>
35
36 #include <amdgpu.h>
37
38 #define CIK_TILE_MODE_COLOR_2D 14
39
40 #define CIK__GB_TILE_MODE__PIPE_CONFIG(x) (((x) >> 6) & 0x1f)
41 #define CIK__PIPE_CONFIG__ADDR_SURF_P2 0
42 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16 4
43 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16 5
44 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32 6
45 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32 7
46 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16 8
47 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16 9
48 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16 10
49 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16 11
50 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16 12
51 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32 13
52 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32 14
53 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16 16
54 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16 17
55
56 static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info *info)
57 {
58 unsigned mode2d = info->gb_tile_mode[CIK_TILE_MODE_COLOR_2D];
59
60 switch (CIK__GB_TILE_MODE__PIPE_CONFIG(mode2d)) {
61 case CIK__PIPE_CONFIG__ADDR_SURF_P2:
62 return 2;
63 case CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16:
64 case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16:
65 case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32:
66 case CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32:
67 return 4;
68 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16:
69 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16:
70 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16:
71 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16:
72 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16:
73 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32:
74 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32:
75 return 8;
76 case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16:
77 case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16:
78 return 16;
79 default:
80 fprintf(stderr, "Invalid GFX7 pipe configuration, assuming P2\n");
81 assert(!"this should never occur");
82 return 2;
83 }
84 }
85
86 static bool has_syncobj(int fd)
87 {
88 uint64_t value;
89 if (drmGetCap(fd, DRM_CAP_SYNCOBJ, &value))
90 return false;
91 return value ? true : false;
92 }
93
94 bool ac_query_gpu_info(int fd, void *dev_p,
95 struct radeon_info *info,
96 struct amdgpu_gpu_info *amdinfo)
97 {
98 struct drm_amdgpu_info_device device_info = {};
99 struct amdgpu_buffer_size_alignments alignment_info = {};
100 struct drm_amdgpu_info_hw_ip dma = {}, compute = {}, uvd = {};
101 struct drm_amdgpu_info_hw_ip uvd_enc = {}, vce = {}, vcn_dec = {}, vcn_jpeg = {};
102 struct drm_amdgpu_info_hw_ip vcn_enc = {}, gfx = {};
103 struct amdgpu_gds_resource_info gds = {};
104 uint32_t vce_version = 0, vce_feature = 0, uvd_version = 0, uvd_feature = 0;
105 int r, i, j;
106 amdgpu_device_handle dev = dev_p;
107 drmDevicePtr devinfo;
108
109 /* Get PCI info. */
110 r = drmGetDevice2(fd, 0, &devinfo);
111 if (r) {
112 fprintf(stderr, "amdgpu: drmGetDevice2 failed.\n");
113 return false;
114 }
115 info->pci_domain = devinfo->businfo.pci->domain;
116 info->pci_bus = devinfo->businfo.pci->bus;
117 info->pci_dev = devinfo->businfo.pci->dev;
118 info->pci_func = devinfo->businfo.pci->func;
119 drmFreeDevice(&devinfo);
120
121 assert(info->drm_major == 3);
122 info->is_amdgpu = true;
123
124 /* Query hardware and driver information. */
125 r = amdgpu_query_gpu_info(dev, amdinfo);
126 if (r) {
127 fprintf(stderr, "amdgpu: amdgpu_query_gpu_info failed.\n");
128 return false;
129 }
130
131 r = amdgpu_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(device_info),
132 &device_info);
133 if (r) {
134 fprintf(stderr, "amdgpu: amdgpu_query_info(dev_info) failed.\n");
135 return false;
136 }
137
138 r = amdgpu_query_buffer_size_alignment(dev, &alignment_info);
139 if (r) {
140 fprintf(stderr, "amdgpu: amdgpu_query_buffer_size_alignment failed.\n");
141 return false;
142 }
143
144 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_DMA, 0, &dma);
145 if (r) {
146 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(dma) failed.\n");
147 return false;
148 }
149
150 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_GFX, 0, &gfx);
151 if (r) {
152 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(gfx) failed.\n");
153 return false;
154 }
155
156 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_COMPUTE, 0, &compute);
157 if (r) {
158 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(compute) failed.\n");
159 return false;
160 }
161
162 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_UVD, 0, &uvd);
163 if (r) {
164 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(uvd) failed.\n");
165 return false;
166 }
167
168 if (info->drm_minor >= 17) {
169 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_UVD_ENC, 0, &uvd_enc);
170 if (r) {
171 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(uvd_enc) failed.\n");
172 return false;
173 }
174 }
175
176 if (info->drm_minor >= 17) {
177 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCN_DEC, 0, &vcn_dec);
178 if (r) {
179 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vcn_dec) failed.\n");
180 return false;
181 }
182 }
183
184 if (info->drm_minor >= 17) {
185 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCN_ENC, 0, &vcn_enc);
186 if (r) {
187 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vcn_enc) failed.\n");
188 return false;
189 }
190 }
191
192 if (info->drm_minor >= 27) {
193 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCN_JPEG, 0, &vcn_jpeg);
194 if (r) {
195 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vcn_jpeg) failed.\n");
196 return false;
197 }
198 }
199
200 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_ME, 0, 0,
201 &info->me_fw_version,
202 &info->me_fw_feature);
203 if (r) {
204 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(me) failed.\n");
205 return false;
206 }
207
208 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_PFP, 0, 0,
209 &info->pfp_fw_version,
210 &info->pfp_fw_feature);
211 if (r) {
212 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(pfp) failed.\n");
213 return false;
214 }
215
216 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_CE, 0, 0,
217 &info->ce_fw_version,
218 &info->ce_fw_feature);
219 if (r) {
220 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(ce) failed.\n");
221 return false;
222 }
223
224 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_UVD, 0, 0,
225 &uvd_version, &uvd_feature);
226 if (r) {
227 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(uvd) failed.\n");
228 return false;
229 }
230
231 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCE, 0, &vce);
232 if (r) {
233 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vce) failed.\n");
234 return false;
235 }
236
237 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_VCE, 0, 0,
238 &vce_version, &vce_feature);
239 if (r) {
240 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(vce) failed.\n");
241 return false;
242 }
243
244 r = amdgpu_query_sw_info(dev, amdgpu_sw_info_address32_hi, &info->address32_hi);
245 if (r) {
246 fprintf(stderr, "amdgpu: amdgpu_query_sw_info(address32_hi) failed.\n");
247 return false;
248 }
249
250 r = amdgpu_query_gds_info(dev, &gds);
251 if (r) {
252 fprintf(stderr, "amdgpu: amdgpu_query_gds_info failed.\n");
253 return false;
254 }
255
256 if (info->drm_minor >= 9) {
257 struct drm_amdgpu_memory_info meminfo = {};
258
259 r = amdgpu_query_info(dev, AMDGPU_INFO_MEMORY, sizeof(meminfo), &meminfo);
260 if (r) {
261 fprintf(stderr, "amdgpu: amdgpu_query_info(memory) failed.\n");
262 return false;
263 }
264
265 /* Note: usable_heap_size values can be random and can't be relied on. */
266 info->gart_size = meminfo.gtt.total_heap_size;
267 info->vram_size = meminfo.vram.total_heap_size;
268 info->vram_vis_size = meminfo.cpu_accessible_vram.total_heap_size;
269 } else {
270 /* This is a deprecated interface, which reports usable sizes
271 * (total minus pinned), but the pinned size computation is
272 * buggy, so the values returned from these functions can be
273 * random.
274 */
275 struct amdgpu_heap_info vram, vram_vis, gtt;
276
277 r = amdgpu_query_heap_info(dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &vram);
278 if (r) {
279 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram) failed.\n");
280 return false;
281 }
282
283 r = amdgpu_query_heap_info(dev, AMDGPU_GEM_DOMAIN_VRAM,
284 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
285 &vram_vis);
286 if (r) {
287 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram_vis) failed.\n");
288 return false;
289 }
290
291 r = amdgpu_query_heap_info(dev, AMDGPU_GEM_DOMAIN_GTT, 0, &gtt);
292 if (r) {
293 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(gtt) failed.\n");
294 return false;
295 }
296
297 info->gart_size = gtt.heap_size;
298 info->vram_size = vram.heap_size;
299 info->vram_vis_size = vram_vis.heap_size;
300 }
301
302 /* Set chip identification. */
303 info->pci_id = amdinfo->asic_id; /* TODO: is this correct? */
304 info->vce_harvest_config = amdinfo->vce_harvest_config;
305
306 switch (info->pci_id) {
307 #define CHIPSET(pci_id, cfamily) \
308 case pci_id: \
309 info->family = CHIP_##cfamily; \
310 info->name = #cfamily; \
311 break;
312 #include "pci_ids/radeonsi_pci_ids.h"
313 #undef CHIPSET
314
315 default:
316 fprintf(stderr, "amdgpu: Invalid PCI ID.\n");
317 return false;
318 }
319
320 /* Raven2 uses the same PCI IDs as Raven1, but different revision IDs. */
321 if (info->family == CHIP_RAVEN && amdinfo->chip_rev >= 0x8) {
322 info->family = CHIP_RAVEN2;
323 info->name = "RAVEN2";
324 }
325
326 if (info->family >= CHIP_NAVI10)
327 info->chip_class = GFX10;
328 else if (info->family >= CHIP_VEGA10)
329 info->chip_class = GFX9;
330 else if (info->family >= CHIP_TONGA)
331 info->chip_class = GFX8;
332 else if (info->family >= CHIP_BONAIRE)
333 info->chip_class = GFX7;
334 else if (info->family >= CHIP_TAHITI)
335 info->chip_class = GFX6;
336 else {
337 fprintf(stderr, "amdgpu: Unknown family.\n");
338 return false;
339 }
340
341 info->family_id = amdinfo->family_id;
342 info->chip_external_rev = amdinfo->chip_external_rev;
343 info->marketing_name = amdgpu_get_marketing_name(dev);
344 info->is_pro_graphics = info->marketing_name &&
345 (!strcmp(info->marketing_name, "Pro") ||
346 !strcmp(info->marketing_name, "PRO") ||
347 !strcmp(info->marketing_name, "Frontier"));
348
349 /* Set which chips have dedicated VRAM. */
350 info->has_dedicated_vram =
351 !(amdinfo->ids_flags & AMDGPU_IDS_FLAGS_FUSION);
352
353 /* The kernel can split large buffers in VRAM but not in GTT, so large
354 * allocations can fail or cause buffer movement failures in the kernel.
355 */
356 if (info->has_dedicated_vram)
357 info->max_alloc_size = info->vram_size * 0.8;
358 else
359 info->max_alloc_size = info->gart_size * 0.7;
360
361 /* Set hardware information. */
362 info->gds_size = gds.gds_total_size;
363 info->gds_gfx_partition_size = gds.gds_gfx_partition_size;
364 /* convert the shader clock from KHz to MHz */
365 info->max_shader_clock = amdinfo->max_engine_clk / 1000;
366 info->num_tcc_blocks = device_info.num_tcc_blocks;
367 info->max_se = amdinfo->num_shader_engines;
368 info->max_sh_per_se = amdinfo->num_shader_arrays_per_engine;
369 info->has_hw_decode =
370 (uvd.available_rings != 0) || (vcn_dec.available_rings != 0) ||
371 (vcn_jpeg.available_rings != 0);
372 info->uvd_fw_version =
373 uvd.available_rings ? uvd_version : 0;
374 info->vce_fw_version =
375 vce.available_rings ? vce_version : 0;
376 info->uvd_enc_supported =
377 uvd_enc.available_rings ? true : false;
378 info->has_userptr = true;
379 info->has_syncobj = has_syncobj(fd);
380 info->has_syncobj_wait_for_submit = info->has_syncobj && info->drm_minor >= 20;
381 info->has_fence_to_handle = info->has_syncobj && info->drm_minor >= 21;
382 info->has_ctx_priority = info->drm_minor >= 22;
383 info->has_local_buffers = info->drm_minor >= 20;
384 info->kernel_flushes_hdp_before_ib = true;
385 info->htile_cmask_support_1d_tiling = true;
386 info->si_TA_CS_BC_BASE_ADDR_allowed = true;
387 info->has_bo_metadata = true;
388 info->has_gpu_reset_status_query = true;
389 info->has_eqaa_surface_allocator = true;
390 info->has_format_bc1_through_bc7 = true;
391 /* DRM 3.1.0 doesn't flush TC for GFX8 correctly. */
392 info->kernel_flushes_tc_l2_after_ib = info->chip_class != GFX8 ||
393 info->drm_minor >= 2;
394 info->has_indirect_compute_dispatch = true;
395 /* GFX6 doesn't support unaligned loads. */
396 info->has_unaligned_shader_loads = info->chip_class != GFX6;
397 /* Disable sparse mappings on GFX6 due to VM faults in CP DMA. Enable them once
398 * these faults are mitigated in software.
399 * Disable sparse mappings on GFX9 due to hangs.
400 */
401 info->has_sparse_vm_mappings =
402 info->chip_class >= GFX7 && info->chip_class <= GFX8 &&
403 info->drm_minor >= 13;
404 info->has_2d_tiling = true;
405 info->has_read_registers_query = true;
406 info->has_scheduled_fence_dependency = info->drm_minor >= 28;
407
408 info->pa_sc_tile_steering_override = device_info.pa_sc_tile_steering_override;
409 info->num_render_backends = amdinfo->rb_pipes;
410 /* The value returned by the kernel driver was wrong. */
411 if (info->family == CHIP_KAVERI)
412 info->num_render_backends = 2;
413
414 info->clock_crystal_freq = amdinfo->gpu_counter_freq;
415 if (!info->clock_crystal_freq) {
416 fprintf(stderr, "amdgpu: clock crystal frequency is 0, timestamps will be wrong\n");
417 info->clock_crystal_freq = 1;
418 }
419 if (info->chip_class >= GFX10) {
420 info->tcc_cache_line_size = 128;
421 } else {
422 info->tcc_cache_line_size = 64;
423 }
424 info->gb_addr_config = amdinfo->gb_addr_cfg;
425 if (info->chip_class == GFX9) {
426 info->num_tile_pipes = 1 << G_0098F8_NUM_PIPES(amdinfo->gb_addr_cfg);
427 info->pipe_interleave_bytes =
428 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(amdinfo->gb_addr_cfg);
429 } else {
430 info->num_tile_pipes = cik_get_num_tile_pipes(amdinfo);
431 info->pipe_interleave_bytes =
432 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(amdinfo->gb_addr_cfg);
433 }
434 info->r600_has_virtual_memory = true;
435
436 assert(util_is_power_of_two_or_zero(dma.available_rings + 1));
437 assert(util_is_power_of_two_or_zero(compute.available_rings + 1));
438
439 info->num_sdma_rings = util_bitcount(dma.available_rings);
440 info->num_compute_rings = util_bitcount(compute.available_rings);
441
442 /* Get the number of good compute units. */
443 info->num_good_compute_units = 0;
444 for (i = 0; i < info->max_se; i++)
445 for (j = 0; j < info->max_sh_per_se; j++)
446 info->num_good_compute_units +=
447 util_bitcount(amdinfo->cu_bitmap[i][j]);
448 info->num_good_cu_per_sh = info->num_good_compute_units /
449 (info->max_se * info->max_sh_per_se);
450
451 memcpy(info->si_tile_mode_array, amdinfo->gb_tile_mode,
452 sizeof(amdinfo->gb_tile_mode));
453 info->enabled_rb_mask = amdinfo->enabled_rb_pipes_mask;
454
455 memcpy(info->cik_macrotile_mode_array, amdinfo->gb_macro_tile_mode,
456 sizeof(amdinfo->gb_macro_tile_mode));
457
458 info->pte_fragment_size = alignment_info.size_local;
459 info->gart_page_size = alignment_info.size_remote;
460
461 if (info->chip_class == GFX6)
462 info->gfx_ib_pad_with_type2 = TRUE;
463
464 unsigned ib_align = 0;
465 ib_align = MAX2(ib_align, gfx.ib_start_alignment);
466 ib_align = MAX2(ib_align, compute.ib_start_alignment);
467 ib_align = MAX2(ib_align, dma.ib_start_alignment);
468 ib_align = MAX2(ib_align, uvd.ib_start_alignment);
469 ib_align = MAX2(ib_align, uvd_enc.ib_start_alignment);
470 ib_align = MAX2(ib_align, vce.ib_start_alignment);
471 ib_align = MAX2(ib_align, vcn_dec.ib_start_alignment);
472 ib_align = MAX2(ib_align, vcn_enc.ib_start_alignment);
473 ib_align = MAX2(ib_align, vcn_jpeg.ib_start_alignment);
474 assert(ib_align);
475 info->ib_start_alignment = ib_align;
476
477 if (info->drm_minor >= 31 &&
478 (info->family == CHIP_RAVEN ||
479 info->family == CHIP_RAVEN2)) {
480 if (info->num_render_backends == 1)
481 info->use_display_dcc_unaligned = true;
482 else
483 info->use_display_dcc_with_retile_blit = true;
484 }
485
486 info->has_gds_ordered_append = info->chip_class >= GFX7 &&
487 info->drm_minor >= 29 &&
488 HAVE_LLVM >= 0x0800;
489 return true;
490 }
491
492 void ac_compute_driver_uuid(char *uuid, size_t size)
493 {
494 char amd_uuid[] = "AMD-MESA-DRV";
495
496 assert(size >= sizeof(amd_uuid));
497
498 memset(uuid, 0, size);
499 strncpy(uuid, amd_uuid, size);
500 }
501
502 void ac_compute_device_uuid(struct radeon_info *info, char *uuid, size_t size)
503 {
504 uint32_t *uint_uuid = (uint32_t*)uuid;
505
506 assert(size >= sizeof(uint32_t)*4);
507
508 /**
509 * Use the device info directly instead of using a sha1. GL/VK UUIDs
510 * are 16 byte vs 20 byte for sha1, and the truncation that would be
511 * required would get rid of part of the little entropy we have.
512 * */
513 memset(uuid, 0, size);
514 uint_uuid[0] = info->pci_domain;
515 uint_uuid[1] = info->pci_bus;
516 uint_uuid[2] = info->pci_dev;
517 uint_uuid[3] = info->pci_func;
518 }
519
520 void ac_print_gpu_info(struct radeon_info *info)
521 {
522 printf("Device info:\n");
523 printf(" pci (domain:bus:dev.func): %04x:%02x:%02x.%x\n",
524 info->pci_domain, info->pci_bus,
525 info->pci_dev, info->pci_func);
526 printf(" pci_id = 0x%x\n", info->pci_id);
527 printf(" family = %i\n", info->family);
528 printf(" chip_class = %i\n", info->chip_class);
529 printf(" chip_external_rev = %i\n", info->chip_external_rev);
530 printf(" num_compute_rings = %u\n", info->num_compute_rings);
531 printf(" num_sdma_rings = %i\n", info->num_sdma_rings);
532 printf(" clock_crystal_freq = %i\n", info->clock_crystal_freq);
533 printf(" tcc_cache_line_size = %u\n", info->tcc_cache_line_size);
534
535 printf(" use_display_dcc_unaligned = %u\n", info->use_display_dcc_unaligned);
536 printf(" use_display_dcc_with_retile_blit = %u\n", info->use_display_dcc_with_retile_blit);
537
538 printf("Memory info:\n");
539 printf(" pte_fragment_size = %u\n", info->pte_fragment_size);
540 printf(" gart_page_size = %u\n", info->gart_page_size);
541 printf(" gart_size = %i MB\n", (int)DIV_ROUND_UP(info->gart_size, 1024*1024));
542 printf(" vram_size = %i MB\n", (int)DIV_ROUND_UP(info->vram_size, 1024*1024));
543 printf(" vram_vis_size = %i MB\n", (int)DIV_ROUND_UP(info->vram_vis_size, 1024*1024));
544 printf(" gds_size = %u kB\n", info->gds_size / 1024);
545 printf(" gds_gfx_partition_size = %u kB\n", info->gds_gfx_partition_size / 1024);
546 printf(" max_alloc_size = %i MB\n",
547 (int)DIV_ROUND_UP(info->max_alloc_size, 1024*1024));
548 printf(" min_alloc_size = %u\n", info->min_alloc_size);
549 printf(" address32_hi = %u\n", info->address32_hi);
550 printf(" has_dedicated_vram = %u\n", info->has_dedicated_vram);
551
552 printf("CP info:\n");
553 printf(" gfx_ib_pad_with_type2 = %i\n", info->gfx_ib_pad_with_type2);
554 printf(" ib_start_alignment = %u\n", info->ib_start_alignment);
555 printf(" me_fw_version = %i\n", info->me_fw_version);
556 printf(" me_fw_feature = %i\n", info->me_fw_feature);
557 printf(" pfp_fw_version = %i\n", info->pfp_fw_version);
558 printf(" pfp_fw_feature = %i\n", info->pfp_fw_feature);
559 printf(" ce_fw_version = %i\n", info->ce_fw_version);
560 printf(" ce_fw_feature = %i\n", info->ce_fw_feature);
561
562 printf("Multimedia info:\n");
563 printf(" has_hw_decode = %u\n", info->has_hw_decode);
564 printf(" uvd_enc_supported = %u\n", info->uvd_enc_supported);
565 printf(" uvd_fw_version = %u\n", info->uvd_fw_version);
566 printf(" vce_fw_version = %u\n", info->vce_fw_version);
567 printf(" vce_harvest_config = %i\n", info->vce_harvest_config);
568
569 printf("Kernel & winsys capabilities:\n");
570 printf(" drm = %i.%i.%i\n", info->drm_major,
571 info->drm_minor, info->drm_patchlevel);
572 printf(" has_userptr = %i\n", info->has_userptr);
573 printf(" has_syncobj = %u\n", info->has_syncobj);
574 printf(" has_syncobj_wait_for_submit = %u\n", info->has_syncobj_wait_for_submit);
575 printf(" has_fence_to_handle = %u\n", info->has_fence_to_handle);
576 printf(" has_ctx_priority = %u\n", info->has_ctx_priority);
577 printf(" has_local_buffers = %u\n", info->has_local_buffers);
578 printf(" kernel_flushes_hdp_before_ib = %u\n", info->kernel_flushes_hdp_before_ib);
579 printf(" htile_cmask_support_1d_tiling = %u\n", info->htile_cmask_support_1d_tiling);
580 printf(" si_TA_CS_BC_BASE_ADDR_allowed = %u\n", info->si_TA_CS_BC_BASE_ADDR_allowed);
581 printf(" has_bo_metadata = %u\n", info->has_bo_metadata);
582 printf(" has_gpu_reset_status_query = %u\n", info->has_gpu_reset_status_query);
583 printf(" has_eqaa_surface_allocator = %u\n", info->has_eqaa_surface_allocator);
584 printf(" has_format_bc1_through_bc7 = %u\n", info->has_format_bc1_through_bc7);
585 printf(" kernel_flushes_tc_l2_after_ib = %u\n", info->kernel_flushes_tc_l2_after_ib);
586 printf(" has_indirect_compute_dispatch = %u\n", info->has_indirect_compute_dispatch);
587 printf(" has_unaligned_shader_loads = %u\n", info->has_unaligned_shader_loads);
588 printf(" has_sparse_vm_mappings = %u\n", info->has_sparse_vm_mappings);
589 printf(" has_2d_tiling = %u\n", info->has_2d_tiling);
590 printf(" has_read_registers_query = %u\n", info->has_read_registers_query);
591 printf(" has_gds_ordered_append = %u\n", info->has_gds_ordered_append);
592 printf(" has_scheduled_fence_dependency = %u\n", info->has_scheduled_fence_dependency);
593
594 printf("Shader core info:\n");
595 printf(" max_shader_clock = %i\n", info->max_shader_clock);
596 printf(" num_good_compute_units = %i\n", info->num_good_compute_units);
597 printf(" num_good_cu_per_sh = %i\n", info->num_good_cu_per_sh);
598 printf(" num_tcc_blocks = %i\n", info->num_tcc_blocks);
599 printf(" max_se = %i\n", info->max_se);
600 printf(" max_sh_per_se = %i\n", info->max_sh_per_se);
601
602 printf("Render backend info:\n");
603 printf(" pa_sc_tile_steering_override = 0x%x\n", info->pa_sc_tile_steering_override);
604 printf(" num_render_backends = %i\n", info->num_render_backends);
605 printf(" num_tile_pipes = %i\n", info->num_tile_pipes);
606 printf(" pipe_interleave_bytes = %i\n", info->pipe_interleave_bytes);
607 printf(" enabled_rb_mask = 0x%x\n", info->enabled_rb_mask);
608 printf(" max_alignment = %u\n", (unsigned)info->max_alignment);
609
610 printf("GB_ADDR_CONFIG:\n");
611 if (info->chip_class >= GFX9) {
612 printf(" num_pipes = %u\n",
613 1 << G_0098F8_NUM_PIPES(info->gb_addr_config));
614 printf(" pipe_interleave_size = %u\n",
615 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config));
616 printf(" max_compressed_frags = %u\n",
617 1 << G_0098F8_MAX_COMPRESSED_FRAGS(info->gb_addr_config));
618 printf(" bank_interleave_size = %u\n",
619 1 << G_0098F8_BANK_INTERLEAVE_SIZE(info->gb_addr_config));
620 printf(" num_banks = %u\n",
621 1 << G_0098F8_NUM_BANKS(info->gb_addr_config));
622 printf(" shader_engine_tile_size = %u\n",
623 16 << G_0098F8_SHADER_ENGINE_TILE_SIZE(info->gb_addr_config));
624 printf(" num_shader_engines = %u\n",
625 1 << G_0098F8_NUM_SHADER_ENGINES_GFX9(info->gb_addr_config));
626 printf(" num_gpus = %u (raw)\n",
627 G_0098F8_NUM_GPUS_GFX9(info->gb_addr_config));
628 printf(" multi_gpu_tile_size = %u (raw)\n",
629 G_0098F8_MULTI_GPU_TILE_SIZE(info->gb_addr_config));
630 printf(" num_rb_per_se = %u\n",
631 1 << G_0098F8_NUM_RB_PER_SE(info->gb_addr_config));
632 printf(" row_size = %u\n",
633 1024 << G_0098F8_ROW_SIZE(info->gb_addr_config));
634 printf(" num_lower_pipes = %u (raw)\n",
635 G_0098F8_NUM_LOWER_PIPES(info->gb_addr_config));
636 printf(" se_enable = %u (raw)\n",
637 G_0098F8_SE_ENABLE(info->gb_addr_config));
638 } else {
639 printf(" num_pipes = %u\n",
640 1 << G_0098F8_NUM_PIPES(info->gb_addr_config));
641 printf(" pipe_interleave_size = %u\n",
642 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(info->gb_addr_config));
643 printf(" bank_interleave_size = %u\n",
644 1 << G_0098F8_BANK_INTERLEAVE_SIZE(info->gb_addr_config));
645 printf(" num_shader_engines = %u\n",
646 1 << G_0098F8_NUM_SHADER_ENGINES_GFX6(info->gb_addr_config));
647 printf(" shader_engine_tile_size = %u\n",
648 16 << G_0098F8_SHADER_ENGINE_TILE_SIZE(info->gb_addr_config));
649 printf(" num_gpus = %u (raw)\n",
650 G_0098F8_NUM_GPUS_GFX6(info->gb_addr_config));
651 printf(" multi_gpu_tile_size = %u (raw)\n",
652 G_0098F8_MULTI_GPU_TILE_SIZE(info->gb_addr_config));
653 printf(" row_size = %u\n",
654 1024 << G_0098F8_ROW_SIZE(info->gb_addr_config));
655 printf(" num_lower_pipes = %u (raw)\n",
656 G_0098F8_NUM_LOWER_PIPES(info->gb_addr_config));
657 }
658 }
659
660 int
661 ac_get_gs_table_depth(enum chip_class chip_class, enum radeon_family family)
662 {
663 if (chip_class >= GFX9)
664 return -1;
665
666 switch (family) {
667 case CHIP_OLAND:
668 case CHIP_HAINAN:
669 case CHIP_KAVERI:
670 case CHIP_KABINI:
671 case CHIP_ICELAND:
672 case CHIP_CARRIZO:
673 case CHIP_STONEY:
674 return 16;
675 case CHIP_TAHITI:
676 case CHIP_PITCAIRN:
677 case CHIP_VERDE:
678 case CHIP_BONAIRE:
679 case CHIP_HAWAII:
680 case CHIP_TONGA:
681 case CHIP_FIJI:
682 case CHIP_POLARIS10:
683 case CHIP_POLARIS11:
684 case CHIP_POLARIS12:
685 case CHIP_VEGAM:
686 return 32;
687 default:
688 unreachable("Unknown GPU");
689 }
690 }
691
692 void
693 ac_get_raster_config(struct radeon_info *info,
694 uint32_t *raster_config_p,
695 uint32_t *raster_config_1_p,
696 uint32_t *se_tile_repeat_p)
697 {
698 unsigned raster_config, raster_config_1, se_tile_repeat;
699
700 switch (info->family) {
701 /* 1 SE / 1 RB */
702 case CHIP_HAINAN:
703 case CHIP_KABINI:
704 case CHIP_STONEY:
705 raster_config = 0x00000000;
706 raster_config_1 = 0x00000000;
707 break;
708 /* 1 SE / 4 RBs */
709 case CHIP_VERDE:
710 raster_config = 0x0000124a;
711 raster_config_1 = 0x00000000;
712 break;
713 /* 1 SE / 2 RBs (Oland is special) */
714 case CHIP_OLAND:
715 raster_config = 0x00000082;
716 raster_config_1 = 0x00000000;
717 break;
718 /* 1 SE / 2 RBs */
719 case CHIP_KAVERI:
720 case CHIP_ICELAND:
721 case CHIP_CARRIZO:
722 raster_config = 0x00000002;
723 raster_config_1 = 0x00000000;
724 break;
725 /* 2 SEs / 4 RBs */
726 case CHIP_BONAIRE:
727 case CHIP_POLARIS11:
728 case CHIP_POLARIS12:
729 raster_config = 0x16000012;
730 raster_config_1 = 0x00000000;
731 break;
732 /* 2 SEs / 8 RBs */
733 case CHIP_TAHITI:
734 case CHIP_PITCAIRN:
735 raster_config = 0x2a00126a;
736 raster_config_1 = 0x00000000;
737 break;
738 /* 4 SEs / 8 RBs */
739 case CHIP_TONGA:
740 case CHIP_POLARIS10:
741 raster_config = 0x16000012;
742 raster_config_1 = 0x0000002a;
743 break;
744 /* 4 SEs / 16 RBs */
745 case CHIP_HAWAII:
746 case CHIP_FIJI:
747 case CHIP_VEGAM:
748 raster_config = 0x3a00161a;
749 raster_config_1 = 0x0000002e;
750 break;
751 default:
752 fprintf(stderr,
753 "ac: Unknown GPU, using 0 for raster_config\n");
754 raster_config = 0x00000000;
755 raster_config_1 = 0x00000000;
756 break;
757 }
758
759 /* drm/radeon on Kaveri is buggy, so disable 1 RB to work around it.
760 * This decreases performance by up to 50% when the RB is the bottleneck.
761 */
762 if (info->family == CHIP_KAVERI && !info->is_amdgpu)
763 raster_config = 0x00000000;
764
765 /* Fiji: Old kernels have incorrect tiling config. This decreases
766 * RB performance by 25%. (it disables 1 RB in the second packer)
767 */
768 if (info->family == CHIP_FIJI &&
769 info->cik_macrotile_mode_array[0] == 0x000000e8) {
770 raster_config = 0x16000012;
771 raster_config_1 = 0x0000002a;
772 }
773
774 unsigned se_width = 8 << G_028350_SE_XSEL_GFX6(raster_config);
775 unsigned se_height = 8 << G_028350_SE_YSEL_GFX6(raster_config);
776
777 /* I don't know how to calculate this, though this is probably a good guess. */
778 se_tile_repeat = MAX2(se_width, se_height) * info->max_se;
779
780 *raster_config_p = raster_config;
781 *raster_config_1_p = raster_config_1;
782 if (se_tile_repeat_p)
783 *se_tile_repeat_p = se_tile_repeat;
784 }
785
786 void
787 ac_get_harvested_configs(struct radeon_info *info,
788 unsigned raster_config,
789 unsigned *cik_raster_config_1_p,
790 unsigned *raster_config_se)
791 {
792 unsigned sh_per_se = MAX2(info->max_sh_per_se, 1);
793 unsigned num_se = MAX2(info->max_se, 1);
794 unsigned rb_mask = info->enabled_rb_mask;
795 unsigned num_rb = MIN2(info->num_render_backends, 16);
796 unsigned rb_per_pkr = MIN2(num_rb / num_se / sh_per_se, 2);
797 unsigned rb_per_se = num_rb / num_se;
798 unsigned se_mask[4];
799 unsigned se;
800
801 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
802 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
803 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
804 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
805
806 assert(num_se == 1 || num_se == 2 || num_se == 4);
807 assert(sh_per_se == 1 || sh_per_se == 2);
808 assert(rb_per_pkr == 1 || rb_per_pkr == 2);
809
810
811 if (info->chip_class >= GFX7) {
812 unsigned raster_config_1 = *cik_raster_config_1_p;
813 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
814 (!se_mask[2] && !se_mask[3]))) {
815 raster_config_1 &= C_028354_SE_PAIR_MAP;
816
817 if (!se_mask[0] && !se_mask[1]) {
818 raster_config_1 |=
819 S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_3);
820 } else {
821 raster_config_1 |=
822 S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_0);
823 }
824 *cik_raster_config_1_p = raster_config_1;
825 }
826 }
827
828 for (se = 0; se < num_se; se++) {
829 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
830 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
831 int idx = (se / 2) * 2;
832
833 raster_config_se[se] = raster_config;
834 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
835 raster_config_se[se] &= C_028350_SE_MAP;
836
837 if (!se_mask[idx]) {
838 raster_config_se[se] |=
839 S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_3);
840 } else {
841 raster_config_se[se] |=
842 S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_0);
843 }
844 }
845
846 pkr0_mask &= rb_mask;
847 pkr1_mask &= rb_mask;
848 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
849 raster_config_se[se] &= C_028350_PKR_MAP;
850
851 if (!pkr0_mask) {
852 raster_config_se[se] |=
853 S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_3);
854 } else {
855 raster_config_se[se] |=
856 S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_0);
857 }
858 }
859
860 if (rb_per_se >= 2) {
861 unsigned rb0_mask = 1 << (se * rb_per_se);
862 unsigned rb1_mask = rb0_mask << 1;
863
864 rb0_mask &= rb_mask;
865 rb1_mask &= rb_mask;
866 if (!rb0_mask || !rb1_mask) {
867 raster_config_se[se] &= C_028350_RB_MAP_PKR0;
868
869 if (!rb0_mask) {
870 raster_config_se[se] |=
871 S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_3);
872 } else {
873 raster_config_se[se] |=
874 S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_0);
875 }
876 }
877
878 if (rb_per_se > 2) {
879 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
880 rb1_mask = rb0_mask << 1;
881 rb0_mask &= rb_mask;
882 rb1_mask &= rb_mask;
883 if (!rb0_mask || !rb1_mask) {
884 raster_config_se[se] &= C_028350_RB_MAP_PKR1;
885
886 if (!rb0_mask) {
887 raster_config_se[se] |=
888 S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_3);
889 } else {
890 raster_config_se[se] |=
891 S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_0);
892 }
893 }
894 }
895 }
896 }
897 }
898
899 unsigned ac_get_compute_resource_limits(struct radeon_info *info,
900 unsigned waves_per_threadgroup,
901 unsigned max_waves_per_sh,
902 unsigned threadgroups_per_cu)
903 {
904 unsigned compute_resource_limits =
905 S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0);
906
907 if (info->chip_class >= GFX7) {
908 unsigned num_cu_per_se = info->num_good_compute_units /
909 info->max_se;
910
911 /* Force even distribution on all SIMDs in CU if the workgroup
912 * size is 64. This has shown some good improvements if # of CUs
913 * per SE is not a multiple of 4.
914 */
915 if (num_cu_per_se % 4 && waves_per_threadgroup == 1)
916 compute_resource_limits |= S_00B854_FORCE_SIMD_DIST(1);
917
918 assert(threadgroups_per_cu >= 1 && threadgroups_per_cu <= 8);
919 compute_resource_limits |= S_00B854_WAVES_PER_SH(max_waves_per_sh) |
920 S_00B854_CU_GROUP_COUNT(threadgroups_per_cu - 1);
921 } else {
922 /* GFX6 */
923 if (max_waves_per_sh) {
924 unsigned limit_div16 = DIV_ROUND_UP(max_waves_per_sh, 16);
925 compute_resource_limits |= S_00B854_WAVES_PER_SH_SI(limit_div16);
926 }
927 }
928 return compute_resource_limits;
929 }