radeonsi: enable displayable DCC on Ravens
[mesa.git] / src / amd / common / ac_gpu_info.c
1 /*
2 * Copyright © 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
13 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
14 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
15 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
16 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 */
25
26 #include "ac_gpu_info.h"
27 #include "sid.h"
28 #include "gfx9d.h"
29
30 #include "util/u_math.h"
31
32 #include <stdio.h>
33
34 #include <xf86drm.h>
35 #include <amdgpu_drm.h>
36
37 #include <amdgpu.h>
38
39 #define CIK_TILE_MODE_COLOR_2D 14
40
41 #define CIK__GB_TILE_MODE__PIPE_CONFIG(x) (((x) >> 6) & 0x1f)
42 #define CIK__PIPE_CONFIG__ADDR_SURF_P2 0
43 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16 4
44 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16 5
45 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32 6
46 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32 7
47 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16 8
48 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16 9
49 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16 10
50 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16 11
51 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16 12
52 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32 13
53 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32 14
54 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16 16
55 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16 17
56
57 static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info *info)
58 {
59 unsigned mode2d = info->gb_tile_mode[CIK_TILE_MODE_COLOR_2D];
60
61 switch (CIK__GB_TILE_MODE__PIPE_CONFIG(mode2d)) {
62 case CIK__PIPE_CONFIG__ADDR_SURF_P2:
63 return 2;
64 case CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16:
65 case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16:
66 case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32:
67 case CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32:
68 return 4;
69 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16:
70 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16:
71 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16:
72 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16:
73 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16:
74 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32:
75 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32:
76 return 8;
77 case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16:
78 case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16:
79 return 16;
80 default:
81 fprintf(stderr, "Invalid CIK pipe configuration, assuming P2\n");
82 assert(!"this should never occur");
83 return 2;
84 }
85 }
86
87 static bool has_syncobj(int fd)
88 {
89 uint64_t value;
90 if (drmGetCap(fd, DRM_CAP_SYNCOBJ, &value))
91 return false;
92 return value ? true : false;
93 }
94
95 bool ac_query_gpu_info(int fd, amdgpu_device_handle dev,
96 struct radeon_info *info,
97 struct amdgpu_gpu_info *amdinfo)
98 {
99 struct drm_amdgpu_info_device device_info = {};
100 struct amdgpu_buffer_size_alignments alignment_info = {};
101 struct drm_amdgpu_info_hw_ip dma = {}, compute = {}, uvd = {};
102 struct drm_amdgpu_info_hw_ip uvd_enc = {}, vce = {}, vcn_dec = {}, vcn_jpeg = {};
103 struct drm_amdgpu_info_hw_ip vcn_enc = {}, gfx = {};
104 struct amdgpu_gds_resource_info gds = {};
105 uint32_t vce_version = 0, vce_feature = 0, uvd_version = 0, uvd_feature = 0;
106 int r, i, j;
107 drmDevicePtr devinfo;
108
109 /* Get PCI info. */
110 r = drmGetDevice2(fd, 0, &devinfo);
111 if (r) {
112 fprintf(stderr, "amdgpu: drmGetDevice2 failed.\n");
113 return false;
114 }
115 info->pci_domain = devinfo->businfo.pci->domain;
116 info->pci_bus = devinfo->businfo.pci->bus;
117 info->pci_dev = devinfo->businfo.pci->dev;
118 info->pci_func = devinfo->businfo.pci->func;
119 drmFreeDevice(&devinfo);
120
121 /* Query hardware and driver information. */
122 r = amdgpu_query_gpu_info(dev, amdinfo);
123 if (r) {
124 fprintf(stderr, "amdgpu: amdgpu_query_gpu_info failed.\n");
125 return false;
126 }
127
128 r = amdgpu_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(device_info),
129 &device_info);
130 if (r) {
131 fprintf(stderr, "amdgpu: amdgpu_query_info(dev_info) failed.\n");
132 return false;
133 }
134
135 r = amdgpu_query_buffer_size_alignment(dev, &alignment_info);
136 if (r) {
137 fprintf(stderr, "amdgpu: amdgpu_query_buffer_size_alignment failed.\n");
138 return false;
139 }
140
141 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_DMA, 0, &dma);
142 if (r) {
143 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(dma) failed.\n");
144 return false;
145 }
146
147 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_GFX, 0, &gfx);
148 if (r) {
149 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(gfx) failed.\n");
150 return false;
151 }
152
153 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_COMPUTE, 0, &compute);
154 if (r) {
155 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(compute) failed.\n");
156 return false;
157 }
158
159 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_UVD, 0, &uvd);
160 if (r) {
161 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(uvd) failed.\n");
162 return false;
163 }
164
165 if (info->drm_major == 3 && info->drm_minor >= 17) {
166 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_UVD_ENC, 0, &uvd_enc);
167 if (r) {
168 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(uvd_enc) failed.\n");
169 return false;
170 }
171 }
172
173 if (info->drm_major == 3 && info->drm_minor >= 17) {
174 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCN_DEC, 0, &vcn_dec);
175 if (r) {
176 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vcn_dec) failed.\n");
177 return false;
178 }
179 }
180
181 if (info->drm_major == 3 && info->drm_minor >= 17) {
182 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCN_ENC, 0, &vcn_enc);
183 if (r) {
184 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vcn_enc) failed.\n");
185 return false;
186 }
187 }
188
189 if (info->drm_major == 3 && info->drm_minor >= 27) {
190 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCN_JPEG, 0, &vcn_jpeg);
191 if (r) {
192 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vcn_jpeg) failed.\n");
193 return false;
194 }
195 }
196
197 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_ME, 0, 0,
198 &info->me_fw_version,
199 &info->me_fw_feature);
200 if (r) {
201 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(me) failed.\n");
202 return false;
203 }
204
205 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_PFP, 0, 0,
206 &info->pfp_fw_version,
207 &info->pfp_fw_feature);
208 if (r) {
209 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(pfp) failed.\n");
210 return false;
211 }
212
213 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_CE, 0, 0,
214 &info->ce_fw_version,
215 &info->ce_fw_feature);
216 if (r) {
217 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(ce) failed.\n");
218 return false;
219 }
220
221 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_UVD, 0, 0,
222 &uvd_version, &uvd_feature);
223 if (r) {
224 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(uvd) failed.\n");
225 return false;
226 }
227
228 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCE, 0, &vce);
229 if (r) {
230 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vce) failed.\n");
231 return false;
232 }
233
234 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_VCE, 0, 0,
235 &vce_version, &vce_feature);
236 if (r) {
237 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(vce) failed.\n");
238 return false;
239 }
240
241 r = amdgpu_query_sw_info(dev, amdgpu_sw_info_address32_hi, &info->address32_hi);
242 if (r) {
243 fprintf(stderr, "amdgpu: amdgpu_query_sw_info(address32_hi) failed.\n");
244 return false;
245 }
246
247 r = amdgpu_query_gds_info(dev, &gds);
248 if (r) {
249 fprintf(stderr, "amdgpu: amdgpu_query_gds_info failed.\n");
250 return false;
251 }
252
253 if (info->drm_minor >= 9) {
254 struct drm_amdgpu_memory_info meminfo = {};
255
256 r = amdgpu_query_info(dev, AMDGPU_INFO_MEMORY, sizeof(meminfo), &meminfo);
257 if (r) {
258 fprintf(stderr, "amdgpu: amdgpu_query_info(memory) failed.\n");
259 return false;
260 }
261
262 /* Note: usable_heap_size values can be random and can't be relied on. */
263 info->gart_size = meminfo.gtt.total_heap_size;
264 info->vram_size = meminfo.vram.total_heap_size;
265 info->vram_vis_size = meminfo.cpu_accessible_vram.total_heap_size;
266 } else {
267 /* This is a deprecated interface, which reports usable sizes
268 * (total minus pinned), but the pinned size computation is
269 * buggy, so the values returned from these functions can be
270 * random.
271 */
272 struct amdgpu_heap_info vram, vram_vis, gtt;
273
274 r = amdgpu_query_heap_info(dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &vram);
275 if (r) {
276 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram) failed.\n");
277 return false;
278 }
279
280 r = amdgpu_query_heap_info(dev, AMDGPU_GEM_DOMAIN_VRAM,
281 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
282 &vram_vis);
283 if (r) {
284 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram_vis) failed.\n");
285 return false;
286 }
287
288 r = amdgpu_query_heap_info(dev, AMDGPU_GEM_DOMAIN_GTT, 0, &gtt);
289 if (r) {
290 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(gtt) failed.\n");
291 return false;
292 }
293
294 info->gart_size = gtt.heap_size;
295 info->vram_size = vram.heap_size;
296 info->vram_vis_size = vram_vis.heap_size;
297 }
298
299 /* Set chip identification. */
300 info->pci_id = amdinfo->asic_id; /* TODO: is this correct? */
301 info->vce_harvest_config = amdinfo->vce_harvest_config;
302
303 switch (info->pci_id) {
304 #define CHIPSET(pci_id, cfamily) \
305 case pci_id: \
306 info->family = CHIP_##cfamily; \
307 info->name = #cfamily; \
308 break;
309 #include "pci_ids/radeonsi_pci_ids.h"
310 #undef CHIPSET
311
312 default:
313 fprintf(stderr, "amdgpu: Invalid PCI ID.\n");
314 return false;
315 }
316
317 /* Raven2 uses the same PCI IDs as Raven1, but different revision IDs. */
318 if (info->family == CHIP_RAVEN && amdinfo->chip_rev >= 0x8) {
319 info->family = CHIP_RAVEN2;
320 info->name = "RAVEN2";
321 }
322
323 if (info->family >= CHIP_VEGA10)
324 info->chip_class = GFX9;
325 else if (info->family >= CHIP_TONGA)
326 info->chip_class = VI;
327 else if (info->family >= CHIP_BONAIRE)
328 info->chip_class = CIK;
329 else if (info->family >= CHIP_TAHITI)
330 info->chip_class = SI;
331 else {
332 fprintf(stderr, "amdgpu: Unknown family.\n");
333 return false;
334 }
335
336 /* Set which chips have dedicated VRAM. */
337 info->has_dedicated_vram =
338 !(amdinfo->ids_flags & AMDGPU_IDS_FLAGS_FUSION);
339
340 /* The kernel can split large buffers in VRAM but not in GTT, so large
341 * allocations can fail or cause buffer movement failures in the kernel.
342 */
343 if (info->has_dedicated_vram)
344 info->max_alloc_size = info->vram_size * 0.8;
345 else
346 info->max_alloc_size = info->gart_size * 0.7;
347
348 /* Set hardware information. */
349 info->gds_size = gds.gds_total_size;
350 info->gds_gfx_partition_size = gds.gds_gfx_partition_size;
351 /* convert the shader clock from KHz to MHz */
352 info->max_shader_clock = amdinfo->max_engine_clk / 1000;
353 info->num_tcc_blocks = device_info.num_tcc_blocks;
354 info->max_se = amdinfo->num_shader_engines;
355 info->max_sh_per_se = amdinfo->num_shader_arrays_per_engine;
356 info->has_hw_decode =
357 (uvd.available_rings != 0) || (vcn_dec.available_rings != 0) ||
358 (vcn_jpeg.available_rings != 0);
359 info->uvd_fw_version =
360 uvd.available_rings ? uvd_version : 0;
361 info->vce_fw_version =
362 vce.available_rings ? vce_version : 0;
363 info->uvd_enc_supported =
364 uvd_enc.available_rings ? true : false;
365 info->has_userptr = true;
366 info->has_syncobj = has_syncobj(fd);
367 info->has_syncobj_wait_for_submit = info->has_syncobj && info->drm_minor >= 20;
368 info->has_fence_to_handle = info->has_syncobj && info->drm_minor >= 21;
369 info->has_ctx_priority = info->drm_minor >= 22;
370 /* TODO: Enable this once the kernel handles it efficiently. */
371 info->has_local_buffers = info->drm_minor >= 20 &&
372 !info->has_dedicated_vram;
373 info->kernel_flushes_hdp_before_ib = true;
374 info->htile_cmask_support_1d_tiling = true;
375 info->si_TA_CS_BC_BASE_ADDR_allowed = true;
376 info->has_bo_metadata = true;
377 info->has_gpu_reset_status_query = true;
378 info->has_gpu_reset_counter_query = false;
379 info->has_eqaa_surface_allocator = true;
380 info->has_format_bc1_through_bc7 = true;
381 /* DRM 3.1.0 doesn't flush TC for VI correctly. */
382 info->kernel_flushes_tc_l2_after_ib = info->chip_class != VI ||
383 info->drm_minor >= 2;
384 info->has_indirect_compute_dispatch = true;
385 /* SI doesn't support unaligned loads. */
386 info->has_unaligned_shader_loads = info->chip_class != SI;
387 /* Disable sparse mappings on SI due to VM faults in CP DMA. Enable them once
388 * these faults are mitigated in software.
389 * Disable sparse mappings on GFX9 due to hangs.
390 */
391 info->has_sparse_vm_mappings =
392 info->chip_class >= CIK && info->chip_class <= VI &&
393 info->drm_minor >= 13;
394 info->has_2d_tiling = true;
395 info->has_read_registers_query = true;
396
397 info->num_render_backends = amdinfo->rb_pipes;
398 /* The value returned by the kernel driver was wrong. */
399 if (info->family == CHIP_KAVERI)
400 info->num_render_backends = 2;
401
402 info->clock_crystal_freq = amdinfo->gpu_counter_freq;
403 if (!info->clock_crystal_freq) {
404 fprintf(stderr, "amdgpu: clock crystal frequency is 0, timestamps will be wrong\n");
405 info->clock_crystal_freq = 1;
406 }
407 info->tcc_cache_line_size = 64; /* TC L2 line size on GCN */
408 info->gb_addr_config = amdinfo->gb_addr_cfg;
409 if (info->chip_class == GFX9) {
410 info->num_tile_pipes = 1 << G_0098F8_NUM_PIPES(amdinfo->gb_addr_cfg);
411 info->pipe_interleave_bytes =
412 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(amdinfo->gb_addr_cfg);
413 } else {
414 info->num_tile_pipes = cik_get_num_tile_pipes(amdinfo);
415 info->pipe_interleave_bytes =
416 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(amdinfo->gb_addr_cfg);
417 }
418 info->r600_has_virtual_memory = true;
419
420 assert(util_is_power_of_two_or_zero(dma.available_rings + 1));
421 assert(util_is_power_of_two_or_zero(compute.available_rings + 1));
422
423 info->num_sdma_rings = util_bitcount(dma.available_rings);
424 info->num_compute_rings = util_bitcount(compute.available_rings);
425
426 /* Get the number of good compute units. */
427 info->num_good_compute_units = 0;
428 for (i = 0; i < info->max_se; i++)
429 for (j = 0; j < info->max_sh_per_se; j++)
430 info->num_good_compute_units +=
431 util_bitcount(amdinfo->cu_bitmap[i][j]);
432 info->num_good_cu_per_sh = info->num_good_compute_units /
433 (info->max_se * info->max_sh_per_se);
434
435 memcpy(info->si_tile_mode_array, amdinfo->gb_tile_mode,
436 sizeof(amdinfo->gb_tile_mode));
437 info->enabled_rb_mask = amdinfo->enabled_rb_pipes_mask;
438
439 memcpy(info->cik_macrotile_mode_array, amdinfo->gb_macro_tile_mode,
440 sizeof(amdinfo->gb_macro_tile_mode));
441
442 info->pte_fragment_size = alignment_info.size_local;
443 info->gart_page_size = alignment_info.size_remote;
444
445 if (info->chip_class == SI)
446 info->gfx_ib_pad_with_type2 = TRUE;
447
448 unsigned ib_align = 0;
449 ib_align = MAX2(ib_align, gfx.ib_start_alignment);
450 ib_align = MAX2(ib_align, compute.ib_start_alignment);
451 ib_align = MAX2(ib_align, dma.ib_start_alignment);
452 ib_align = MAX2(ib_align, uvd.ib_start_alignment);
453 ib_align = MAX2(ib_align, uvd_enc.ib_start_alignment);
454 ib_align = MAX2(ib_align, vce.ib_start_alignment);
455 ib_align = MAX2(ib_align, vcn_dec.ib_start_alignment);
456 ib_align = MAX2(ib_align, vcn_enc.ib_start_alignment);
457 ib_align = MAX2(ib_align, vcn_jpeg.ib_start_alignment);
458 assert(ib_align);
459 info->ib_start_alignment = ib_align;
460
461 if (info->drm_minor >= 31 &&
462 (info->family == CHIP_RAVEN ||
463 info->family == CHIP_RAVEN2)) {
464 if (info->num_render_backends == 1)
465 info->use_display_dcc_unaligned = true;
466 else
467 info->use_display_dcc_with_retile_blit = true;
468 }
469 return true;
470 }
471
472 void ac_compute_driver_uuid(char *uuid, size_t size)
473 {
474 char amd_uuid[] = "AMD-MESA-DRV";
475
476 assert(size >= sizeof(amd_uuid));
477
478 memset(uuid, 0, size);
479 strncpy(uuid, amd_uuid, size);
480 }
481
482 void ac_compute_device_uuid(struct radeon_info *info, char *uuid, size_t size)
483 {
484 uint32_t *uint_uuid = (uint32_t*)uuid;
485
486 assert(size >= sizeof(uint32_t)*4);
487
488 /**
489 * Use the device info directly instead of using a sha1. GL/VK UUIDs
490 * are 16 byte vs 20 byte for sha1, and the truncation that would be
491 * required would get rid of part of the little entropy we have.
492 * */
493 memset(uuid, 0, size);
494 uint_uuid[0] = info->pci_domain;
495 uint_uuid[1] = info->pci_bus;
496 uint_uuid[2] = info->pci_dev;
497 uint_uuid[3] = info->pci_func;
498 }
499
500 void ac_print_gpu_info(struct radeon_info *info)
501 {
502 printf("Device info:\n");
503 printf(" pci (domain:bus:dev.func): %04x:%02x:%02x.%x\n",
504 info->pci_domain, info->pci_bus,
505 info->pci_dev, info->pci_func);
506 printf(" pci_id = 0x%x\n", info->pci_id);
507 printf(" family = %i\n", info->family);
508 printf(" chip_class = %i\n", info->chip_class);
509 printf(" num_compute_rings = %u\n", info->num_compute_rings);
510 printf(" num_sdma_rings = %i\n", info->num_sdma_rings);
511 printf(" clock_crystal_freq = %i\n", info->clock_crystal_freq);
512 printf(" tcc_cache_line_size = %u\n", info->tcc_cache_line_size);
513
514 printf(" use_display_dcc_unaligned = %u\n", info->use_display_dcc_unaligned);
515 printf(" use_display_dcc_with_retile_blit = %u\n", info->use_display_dcc_with_retile_blit);
516
517 printf("Memory info:\n");
518 printf(" pte_fragment_size = %u\n", info->pte_fragment_size);
519 printf(" gart_page_size = %u\n", info->gart_page_size);
520 printf(" gart_size = %i MB\n", (int)DIV_ROUND_UP(info->gart_size, 1024*1024));
521 printf(" vram_size = %i MB\n", (int)DIV_ROUND_UP(info->vram_size, 1024*1024));
522 printf(" vram_vis_size = %i MB\n", (int)DIV_ROUND_UP(info->vram_vis_size, 1024*1024));
523 printf(" gds_size = %u kB\n", info->gds_size / 1024);
524 printf(" gds_gfx_partition_size = %u kB\n", info->gds_gfx_partition_size / 1024);
525 printf(" max_alloc_size = %i MB\n",
526 (int)DIV_ROUND_UP(info->max_alloc_size, 1024*1024));
527 printf(" min_alloc_size = %u\n", info->min_alloc_size);
528 printf(" address32_hi = %u\n", info->address32_hi);
529 printf(" has_dedicated_vram = %u\n", info->has_dedicated_vram);
530
531 printf("CP info:\n");
532 printf(" gfx_ib_pad_with_type2 = %i\n", info->gfx_ib_pad_with_type2);
533 printf(" ib_start_alignment = %u\n", info->ib_start_alignment);
534 printf(" me_fw_version = %i\n", info->me_fw_version);
535 printf(" me_fw_feature = %i\n", info->me_fw_feature);
536 printf(" pfp_fw_version = %i\n", info->pfp_fw_version);
537 printf(" pfp_fw_feature = %i\n", info->pfp_fw_feature);
538 printf(" ce_fw_version = %i\n", info->ce_fw_version);
539 printf(" ce_fw_feature = %i\n", info->ce_fw_feature);
540
541 printf("Multimedia info:\n");
542 printf(" has_hw_decode = %u\n", info->has_hw_decode);
543 printf(" uvd_enc_supported = %u\n", info->uvd_enc_supported);
544 printf(" uvd_fw_version = %u\n", info->uvd_fw_version);
545 printf(" vce_fw_version = %u\n", info->vce_fw_version);
546 printf(" vce_harvest_config = %i\n", info->vce_harvest_config);
547
548 printf("Kernel & winsys capabilities:\n");
549 printf(" drm = %i.%i.%i\n", info->drm_major,
550 info->drm_minor, info->drm_patchlevel);
551 printf(" has_userptr = %i\n", info->has_userptr);
552 printf(" has_syncobj = %u\n", info->has_syncobj);
553 printf(" has_syncobj_wait_for_submit = %u\n", info->has_syncobj_wait_for_submit);
554 printf(" has_fence_to_handle = %u\n", info->has_fence_to_handle);
555 printf(" has_ctx_priority = %u\n", info->has_ctx_priority);
556 printf(" has_local_buffers = %u\n", info->has_local_buffers);
557 printf(" kernel_flushes_hdp_before_ib = %u\n", info->kernel_flushes_hdp_before_ib);
558 printf(" htile_cmask_support_1d_tiling = %u\n", info->htile_cmask_support_1d_tiling);
559 printf(" si_TA_CS_BC_BASE_ADDR_allowed = %u\n", info->si_TA_CS_BC_BASE_ADDR_allowed);
560 printf(" has_bo_metadata = %u\n", info->has_bo_metadata);
561 printf(" has_gpu_reset_status_query = %u\n", info->has_gpu_reset_status_query);
562 printf(" has_gpu_reset_counter_query = %u\n", info->has_gpu_reset_counter_query);
563 printf(" has_eqaa_surface_allocator = %u\n", info->has_eqaa_surface_allocator);
564 printf(" has_format_bc1_through_bc7 = %u\n", info->has_format_bc1_through_bc7);
565 printf(" kernel_flushes_tc_l2_after_ib = %u\n", info->kernel_flushes_tc_l2_after_ib);
566 printf(" has_indirect_compute_dispatch = %u\n", info->has_indirect_compute_dispatch);
567 printf(" has_unaligned_shader_loads = %u\n", info->has_unaligned_shader_loads);
568 printf(" has_sparse_vm_mappings = %u\n", info->has_sparse_vm_mappings);
569 printf(" has_2d_tiling = %u\n", info->has_2d_tiling);
570 printf(" has_read_registers_query = %u\n", info->has_read_registers_query);
571
572 printf("Shader core info:\n");
573 printf(" max_shader_clock = %i\n", info->max_shader_clock);
574 printf(" num_good_compute_units = %i\n", info->num_good_compute_units);
575 printf(" num_good_cu_per_sh = %i\n", info->num_good_cu_per_sh);
576 printf(" num_tcc_blocks = %i\n", info->num_tcc_blocks);
577 printf(" max_se = %i\n", info->max_se);
578 printf(" max_sh_per_se = %i\n", info->max_sh_per_se);
579
580 printf("Render backend info:\n");
581 printf(" num_render_backends = %i\n", info->num_render_backends);
582 printf(" num_tile_pipes = %i\n", info->num_tile_pipes);
583 printf(" pipe_interleave_bytes = %i\n", info->pipe_interleave_bytes);
584 printf(" enabled_rb_mask = 0x%x\n", info->enabled_rb_mask);
585 printf(" max_alignment = %u\n", (unsigned)info->max_alignment);
586
587 printf("GB_ADDR_CONFIG:\n");
588 if (info->chip_class >= GFX9) {
589 printf(" num_pipes = %u\n",
590 1 << G_0098F8_NUM_PIPES(info->gb_addr_config));
591 printf(" pipe_interleave_size = %u\n",
592 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config));
593 printf(" max_compressed_frags = %u\n",
594 1 << G_0098F8_MAX_COMPRESSED_FRAGS(info->gb_addr_config));
595 printf(" bank_interleave_size = %u\n",
596 1 << G_0098F8_BANK_INTERLEAVE_SIZE(info->gb_addr_config));
597 printf(" num_banks = %u\n",
598 1 << G_0098F8_NUM_BANKS(info->gb_addr_config));
599 printf(" shader_engine_tile_size = %u\n",
600 16 << G_0098F8_SHADER_ENGINE_TILE_SIZE(info->gb_addr_config));
601 printf(" num_shader_engines = %u\n",
602 1 << G_0098F8_NUM_SHADER_ENGINES_GFX9(info->gb_addr_config));
603 printf(" num_gpus = %u (raw)\n",
604 G_0098F8_NUM_GPUS_GFX9(info->gb_addr_config));
605 printf(" multi_gpu_tile_size = %u (raw)\n",
606 G_0098F8_MULTI_GPU_TILE_SIZE(info->gb_addr_config));
607 printf(" num_rb_per_se = %u\n",
608 1 << G_0098F8_NUM_RB_PER_SE(info->gb_addr_config));
609 printf(" row_size = %u\n",
610 1024 << G_0098F8_ROW_SIZE(info->gb_addr_config));
611 printf(" num_lower_pipes = %u (raw)\n",
612 G_0098F8_NUM_LOWER_PIPES(info->gb_addr_config));
613 printf(" se_enable = %u (raw)\n",
614 G_0098F8_SE_ENABLE(info->gb_addr_config));
615 } else {
616 printf(" num_pipes = %u\n",
617 1 << G_0098F8_NUM_PIPES(info->gb_addr_config));
618 printf(" pipe_interleave_size = %u\n",
619 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(info->gb_addr_config));
620 printf(" bank_interleave_size = %u\n",
621 1 << G_0098F8_BANK_INTERLEAVE_SIZE(info->gb_addr_config));
622 printf(" num_shader_engines = %u\n",
623 1 << G_0098F8_NUM_SHADER_ENGINES_GFX6(info->gb_addr_config));
624 printf(" shader_engine_tile_size = %u\n",
625 16 << G_0098F8_SHADER_ENGINE_TILE_SIZE(info->gb_addr_config));
626 printf(" num_gpus = %u (raw)\n",
627 G_0098F8_NUM_GPUS_GFX6(info->gb_addr_config));
628 printf(" multi_gpu_tile_size = %u (raw)\n",
629 G_0098F8_MULTI_GPU_TILE_SIZE(info->gb_addr_config));
630 printf(" row_size = %u\n",
631 1024 << G_0098F8_ROW_SIZE(info->gb_addr_config));
632 printf(" num_lower_pipes = %u (raw)\n",
633 G_0098F8_NUM_LOWER_PIPES(info->gb_addr_config));
634 }
635 }
636
637 int
638 ac_get_gs_table_depth(enum chip_class chip_class, enum radeon_family family)
639 {
640 if (chip_class >= GFX9)
641 return -1;
642
643 switch (family) {
644 case CHIP_OLAND:
645 case CHIP_HAINAN:
646 case CHIP_KAVERI:
647 case CHIP_KABINI:
648 case CHIP_MULLINS:
649 case CHIP_ICELAND:
650 case CHIP_CARRIZO:
651 case CHIP_STONEY:
652 return 16;
653 case CHIP_TAHITI:
654 case CHIP_PITCAIRN:
655 case CHIP_VERDE:
656 case CHIP_BONAIRE:
657 case CHIP_HAWAII:
658 case CHIP_TONGA:
659 case CHIP_FIJI:
660 case CHIP_POLARIS10:
661 case CHIP_POLARIS11:
662 case CHIP_POLARIS12:
663 case CHIP_VEGAM:
664 return 32;
665 default:
666 unreachable("Unknown GPU");
667 }
668 }
669
670 void
671 ac_get_raster_config(struct radeon_info *info,
672 uint32_t *raster_config_p,
673 uint32_t *raster_config_1_p,
674 uint32_t *se_tile_repeat_p)
675 {
676 unsigned raster_config, raster_config_1, se_tile_repeat;
677
678 switch (info->family) {
679 /* 1 SE / 1 RB */
680 case CHIP_HAINAN:
681 case CHIP_KABINI:
682 case CHIP_MULLINS:
683 case CHIP_STONEY:
684 raster_config = 0x00000000;
685 raster_config_1 = 0x00000000;
686 break;
687 /* 1 SE / 4 RBs */
688 case CHIP_VERDE:
689 raster_config = 0x0000124a;
690 raster_config_1 = 0x00000000;
691 break;
692 /* 1 SE / 2 RBs (Oland is special) */
693 case CHIP_OLAND:
694 raster_config = 0x00000082;
695 raster_config_1 = 0x00000000;
696 break;
697 /* 1 SE / 2 RBs */
698 case CHIP_KAVERI:
699 case CHIP_ICELAND:
700 case CHIP_CARRIZO:
701 raster_config = 0x00000002;
702 raster_config_1 = 0x00000000;
703 break;
704 /* 2 SEs / 4 RBs */
705 case CHIP_BONAIRE:
706 case CHIP_POLARIS11:
707 case CHIP_POLARIS12:
708 raster_config = 0x16000012;
709 raster_config_1 = 0x00000000;
710 break;
711 /* 2 SEs / 8 RBs */
712 case CHIP_TAHITI:
713 case CHIP_PITCAIRN:
714 raster_config = 0x2a00126a;
715 raster_config_1 = 0x00000000;
716 break;
717 /* 4 SEs / 8 RBs */
718 case CHIP_TONGA:
719 case CHIP_POLARIS10:
720 raster_config = 0x16000012;
721 raster_config_1 = 0x0000002a;
722 break;
723 /* 4 SEs / 16 RBs */
724 case CHIP_HAWAII:
725 case CHIP_FIJI:
726 case CHIP_VEGAM:
727 raster_config = 0x3a00161a;
728 raster_config_1 = 0x0000002e;
729 break;
730 default:
731 fprintf(stderr,
732 "ac: Unknown GPU, using 0 for raster_config\n");
733 raster_config = 0x00000000;
734 raster_config_1 = 0x00000000;
735 break;
736 }
737
738 /* drm/radeon on Kaveri is buggy, so disable 1 RB to work around it.
739 * This decreases performance by up to 50% when the RB is the bottleneck.
740 */
741 if (info->family == CHIP_KAVERI && info->drm_major == 2)
742 raster_config = 0x00000000;
743
744 /* Fiji: Old kernels have incorrect tiling config. This decreases
745 * RB performance by 25%. (it disables 1 RB in the second packer)
746 */
747 if (info->family == CHIP_FIJI &&
748 info->cik_macrotile_mode_array[0] == 0x000000e8) {
749 raster_config = 0x16000012;
750 raster_config_1 = 0x0000002a;
751 }
752
753 unsigned se_width = 8 << G_028350_SE_XSEL_GFX6(raster_config);
754 unsigned se_height = 8 << G_028350_SE_YSEL_GFX6(raster_config);
755
756 /* I don't know how to calculate this, though this is probably a good guess. */
757 se_tile_repeat = MAX2(se_width, se_height) * info->max_se;
758
759 *raster_config_p = raster_config;
760 *raster_config_1_p = raster_config_1;
761 if (se_tile_repeat_p)
762 *se_tile_repeat_p = se_tile_repeat;
763 }
764
765 void
766 ac_get_harvested_configs(struct radeon_info *info,
767 unsigned raster_config,
768 unsigned *cik_raster_config_1_p,
769 unsigned *raster_config_se)
770 {
771 unsigned sh_per_se = MAX2(info->max_sh_per_se, 1);
772 unsigned num_se = MAX2(info->max_se, 1);
773 unsigned rb_mask = info->enabled_rb_mask;
774 unsigned num_rb = MIN2(info->num_render_backends, 16);
775 unsigned rb_per_pkr = MIN2(num_rb / num_se / sh_per_se, 2);
776 unsigned rb_per_se = num_rb / num_se;
777 unsigned se_mask[4];
778 unsigned se;
779
780 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
781 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
782 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
783 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
784
785 assert(num_se == 1 || num_se == 2 || num_se == 4);
786 assert(sh_per_se == 1 || sh_per_se == 2);
787 assert(rb_per_pkr == 1 || rb_per_pkr == 2);
788
789
790 if (info->chip_class >= CIK) {
791 unsigned raster_config_1 = *cik_raster_config_1_p;
792 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
793 (!se_mask[2] && !se_mask[3]))) {
794 raster_config_1 &= C_028354_SE_PAIR_MAP;
795
796 if (!se_mask[0] && !se_mask[1]) {
797 raster_config_1 |=
798 S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_3);
799 } else {
800 raster_config_1 |=
801 S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_0);
802 }
803 *cik_raster_config_1_p = raster_config_1;
804 }
805 }
806
807 for (se = 0; se < num_se; se++) {
808 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
809 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
810 int idx = (se / 2) * 2;
811
812 raster_config_se[se] = raster_config;
813 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
814 raster_config_se[se] &= C_028350_SE_MAP;
815
816 if (!se_mask[idx]) {
817 raster_config_se[se] |=
818 S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_3);
819 } else {
820 raster_config_se[se] |=
821 S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_0);
822 }
823 }
824
825 pkr0_mask &= rb_mask;
826 pkr1_mask &= rb_mask;
827 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
828 raster_config_se[se] &= C_028350_PKR_MAP;
829
830 if (!pkr0_mask) {
831 raster_config_se[se] |=
832 S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_3);
833 } else {
834 raster_config_se[se] |=
835 S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_0);
836 }
837 }
838
839 if (rb_per_se >= 2) {
840 unsigned rb0_mask = 1 << (se * rb_per_se);
841 unsigned rb1_mask = rb0_mask << 1;
842
843 rb0_mask &= rb_mask;
844 rb1_mask &= rb_mask;
845 if (!rb0_mask || !rb1_mask) {
846 raster_config_se[se] &= C_028350_RB_MAP_PKR0;
847
848 if (!rb0_mask) {
849 raster_config_se[se] |=
850 S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_3);
851 } else {
852 raster_config_se[se] |=
853 S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_0);
854 }
855 }
856
857 if (rb_per_se > 2) {
858 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
859 rb1_mask = rb0_mask << 1;
860 rb0_mask &= rb_mask;
861 rb1_mask &= rb_mask;
862 if (!rb0_mask || !rb1_mask) {
863 raster_config_se[se] &= C_028350_RB_MAP_PKR1;
864
865 if (!rb0_mask) {
866 raster_config_se[se] |=
867 S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_3);
868 } else {
869 raster_config_se[se] |=
870 S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_0);
871 }
872 }
873 }
874 }
875 }
876 }