ac: add more ac_gpu_info related shader fields
[mesa.git] / src / amd / common / ac_gpu_info.c
1 /*
2 * Copyright © 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
13 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
14 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
15 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
16 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 */
25
26 #include "ac_gpu_info.h"
27 #include "addrlib/src/amdgpu_asic_addr.h"
28 #include "sid.h"
29
30 #include "util/macros.h"
31 #include "util/u_math.h"
32
33 #include <stdio.h>
34
35 #include <xf86drm.h>
36 #include <amdgpu_drm.h>
37
38 #include <amdgpu.h>
39
40 #define CIK_TILE_MODE_COLOR_2D 14
41
42 #define CIK__GB_TILE_MODE__PIPE_CONFIG(x) (((x) >> 6) & 0x1f)
43 #define CIK__PIPE_CONFIG__ADDR_SURF_P2 0
44 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16 4
45 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16 5
46 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32 6
47 #define CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32 7
48 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16 8
49 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16 9
50 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16 10
51 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16 11
52 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16 12
53 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32 13
54 #define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32 14
55 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16 16
56 #define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16 17
57
58 static unsigned cik_get_num_tile_pipes(struct amdgpu_gpu_info *info)
59 {
60 unsigned mode2d = info->gb_tile_mode[CIK_TILE_MODE_COLOR_2D];
61
62 switch (CIK__GB_TILE_MODE__PIPE_CONFIG(mode2d)) {
63 case CIK__PIPE_CONFIG__ADDR_SURF_P2:
64 return 2;
65 case CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16:
66 case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16:
67 case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32:
68 case CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32:
69 return 4;
70 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16:
71 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16:
72 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16:
73 case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16:
74 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16:
75 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32:
76 case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32:
77 return 8;
78 case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16:
79 case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16:
80 return 16;
81 default:
82 fprintf(stderr, "Invalid GFX7 pipe configuration, assuming P2\n");
83 assert(!"this should never occur");
84 return 2;
85 }
86 }
87
88 static bool has_syncobj(int fd)
89 {
90 uint64_t value;
91 if (drmGetCap(fd, DRM_CAP_SYNCOBJ, &value))
92 return false;
93 return value ? true : false;
94 }
95
96 static uint64_t fix_vram_size(uint64_t size)
97 {
98 /* The VRAM size is underreported, so we need to fix it, because
99 * it's used to compute the number of memory modules for harvesting.
100 */
101 return align64(size, 256*1024*1024);
102 }
103
104 bool ac_query_gpu_info(int fd, void *dev_p,
105 struct radeon_info *info,
106 struct amdgpu_gpu_info *amdinfo)
107 {
108 struct drm_amdgpu_info_device device_info = {};
109 struct amdgpu_buffer_size_alignments alignment_info = {};
110 struct drm_amdgpu_info_hw_ip dma = {}, compute = {}, uvd = {};
111 struct drm_amdgpu_info_hw_ip uvd_enc = {}, vce = {}, vcn_dec = {}, vcn_jpeg = {};
112 struct drm_amdgpu_info_hw_ip vcn_enc = {}, gfx = {};
113 struct amdgpu_gds_resource_info gds = {};
114 uint32_t vce_version = 0, vce_feature = 0, uvd_version = 0, uvd_feature = 0;
115 int r, i, j;
116 amdgpu_device_handle dev = dev_p;
117 drmDevicePtr devinfo;
118
119 /* Get PCI info. */
120 r = drmGetDevice2(fd, 0, &devinfo);
121 if (r) {
122 fprintf(stderr, "amdgpu: drmGetDevice2 failed.\n");
123 return false;
124 }
125 info->pci_domain = devinfo->businfo.pci->domain;
126 info->pci_bus = devinfo->businfo.pci->bus;
127 info->pci_dev = devinfo->businfo.pci->dev;
128 info->pci_func = devinfo->businfo.pci->func;
129 drmFreeDevice(&devinfo);
130
131 assert(info->drm_major == 3);
132 info->is_amdgpu = true;
133
134 /* Query hardware and driver information. */
135 r = amdgpu_query_gpu_info(dev, amdinfo);
136 if (r) {
137 fprintf(stderr, "amdgpu: amdgpu_query_gpu_info failed.\n");
138 return false;
139 }
140
141 r = amdgpu_query_info(dev, AMDGPU_INFO_DEV_INFO, sizeof(device_info),
142 &device_info);
143 if (r) {
144 fprintf(stderr, "amdgpu: amdgpu_query_info(dev_info) failed.\n");
145 return false;
146 }
147
148 r = amdgpu_query_buffer_size_alignment(dev, &alignment_info);
149 if (r) {
150 fprintf(stderr, "amdgpu: amdgpu_query_buffer_size_alignment failed.\n");
151 return false;
152 }
153
154 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_DMA, 0, &dma);
155 if (r) {
156 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(dma) failed.\n");
157 return false;
158 }
159
160 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_GFX, 0, &gfx);
161 if (r) {
162 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(gfx) failed.\n");
163 return false;
164 }
165
166 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_COMPUTE, 0, &compute);
167 if (r) {
168 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(compute) failed.\n");
169 return false;
170 }
171
172 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_UVD, 0, &uvd);
173 if (r) {
174 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(uvd) failed.\n");
175 return false;
176 }
177
178 if (info->drm_minor >= 17) {
179 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_UVD_ENC, 0, &uvd_enc);
180 if (r) {
181 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(uvd_enc) failed.\n");
182 return false;
183 }
184 }
185
186 if (info->drm_minor >= 17) {
187 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCN_DEC, 0, &vcn_dec);
188 if (r) {
189 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vcn_dec) failed.\n");
190 return false;
191 }
192 }
193
194 if (info->drm_minor >= 17) {
195 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCN_ENC, 0, &vcn_enc);
196 if (r) {
197 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vcn_enc) failed.\n");
198 return false;
199 }
200 }
201
202 if (info->drm_minor >= 27) {
203 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCN_JPEG, 0, &vcn_jpeg);
204 if (r) {
205 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vcn_jpeg) failed.\n");
206 return false;
207 }
208 }
209
210 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_ME, 0, 0,
211 &info->me_fw_version,
212 &info->me_fw_feature);
213 if (r) {
214 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(me) failed.\n");
215 return false;
216 }
217
218 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_PFP, 0, 0,
219 &info->pfp_fw_version,
220 &info->pfp_fw_feature);
221 if (r) {
222 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(pfp) failed.\n");
223 return false;
224 }
225
226 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_GFX_CE, 0, 0,
227 &info->ce_fw_version,
228 &info->ce_fw_feature);
229 if (r) {
230 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(ce) failed.\n");
231 return false;
232 }
233
234 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_UVD, 0, 0,
235 &uvd_version, &uvd_feature);
236 if (r) {
237 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(uvd) failed.\n");
238 return false;
239 }
240
241 r = amdgpu_query_hw_ip_info(dev, AMDGPU_HW_IP_VCE, 0, &vce);
242 if (r) {
243 fprintf(stderr, "amdgpu: amdgpu_query_hw_ip_info(vce) failed.\n");
244 return false;
245 }
246
247 r = amdgpu_query_firmware_version(dev, AMDGPU_INFO_FW_VCE, 0, 0,
248 &vce_version, &vce_feature);
249 if (r) {
250 fprintf(stderr, "amdgpu: amdgpu_query_firmware_version(vce) failed.\n");
251 return false;
252 }
253
254 r = amdgpu_query_sw_info(dev, amdgpu_sw_info_address32_hi, &info->address32_hi);
255 if (r) {
256 fprintf(stderr, "amdgpu: amdgpu_query_sw_info(address32_hi) failed.\n");
257 return false;
258 }
259
260 r = amdgpu_query_gds_info(dev, &gds);
261 if (r) {
262 fprintf(stderr, "amdgpu: amdgpu_query_gds_info failed.\n");
263 return false;
264 }
265
266 if (info->drm_minor >= 9) {
267 struct drm_amdgpu_memory_info meminfo = {};
268
269 r = amdgpu_query_info(dev, AMDGPU_INFO_MEMORY, sizeof(meminfo), &meminfo);
270 if (r) {
271 fprintf(stderr, "amdgpu: amdgpu_query_info(memory) failed.\n");
272 return false;
273 }
274
275 /* Note: usable_heap_size values can be random and can't be relied on. */
276 info->gart_size = meminfo.gtt.total_heap_size;
277 info->vram_size = fix_vram_size(meminfo.vram.total_heap_size);
278 info->vram_vis_size = meminfo.cpu_accessible_vram.total_heap_size;
279 } else {
280 /* This is a deprecated interface, which reports usable sizes
281 * (total minus pinned), but the pinned size computation is
282 * buggy, so the values returned from these functions can be
283 * random.
284 */
285 struct amdgpu_heap_info vram, vram_vis, gtt;
286
287 r = amdgpu_query_heap_info(dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &vram);
288 if (r) {
289 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram) failed.\n");
290 return false;
291 }
292
293 r = amdgpu_query_heap_info(dev, AMDGPU_GEM_DOMAIN_VRAM,
294 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
295 &vram_vis);
296 if (r) {
297 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(vram_vis) failed.\n");
298 return false;
299 }
300
301 r = amdgpu_query_heap_info(dev, AMDGPU_GEM_DOMAIN_GTT, 0, &gtt);
302 if (r) {
303 fprintf(stderr, "amdgpu: amdgpu_query_heap_info(gtt) failed.\n");
304 return false;
305 }
306
307 info->gart_size = gtt.heap_size;
308 info->vram_size = fix_vram_size(vram.heap_size);
309 info->vram_vis_size = vram_vis.heap_size;
310 }
311
312 /* Set chip identification. */
313 info->pci_id = amdinfo->asic_id; /* TODO: is this correct? */
314 info->vce_harvest_config = amdinfo->vce_harvest_config;
315
316 #define identify_chip2(asic, chipname) \
317 if (ASICREV_IS(amdinfo->chip_external_rev, asic)) { \
318 info->family = CHIP_##chipname; \
319 info->name = #chipname; \
320 }
321 #define identify_chip(chipname) identify_chip2(chipname, chipname)
322
323 switch (amdinfo->family_id) {
324 case FAMILY_SI:
325 identify_chip(TAHITI);
326 identify_chip(PITCAIRN);
327 identify_chip2(CAPEVERDE, VERDE);
328 identify_chip(OLAND);
329 identify_chip(HAINAN);
330 break;
331 case FAMILY_CI:
332 identify_chip(BONAIRE);
333 identify_chip(HAWAII);
334 break;
335 case FAMILY_KV:
336 identify_chip2(SPECTRE, KAVERI);
337 identify_chip2(SPOOKY, KAVERI);
338 identify_chip2(KALINDI, KABINI);
339 identify_chip2(GODAVARI, KABINI);
340 break;
341 case FAMILY_VI:
342 identify_chip(ICELAND);
343 identify_chip(TONGA);
344 identify_chip(FIJI);
345 identify_chip(POLARIS10);
346 identify_chip(POLARIS11);
347 identify_chip(POLARIS12);
348 identify_chip(VEGAM);
349 break;
350 case FAMILY_CZ:
351 identify_chip(CARRIZO);
352 identify_chip(STONEY);
353 break;
354 case FAMILY_AI:
355 identify_chip(VEGA10);
356 identify_chip(VEGA12);
357 identify_chip(VEGA20);
358 identify_chip(ARCTURUS);
359 break;
360 case FAMILY_RV:
361 identify_chip(RAVEN);
362 identify_chip(RAVEN2);
363 identify_chip(RENOIR);
364 break;
365 case FAMILY_NV:
366 identify_chip(NAVI10);
367 identify_chip(NAVI12);
368 identify_chip(NAVI14);
369 break;
370 }
371
372 if (!info->name) {
373 fprintf(stderr, "amdgpu: unknown (family_id, chip_external_rev): (%u, %u)\n",
374 amdinfo->family_id, amdinfo->chip_external_rev);
375 return false;
376 }
377
378 if (info->family >= CHIP_NAVI10)
379 info->chip_class = GFX10;
380 else if (info->family >= CHIP_VEGA10)
381 info->chip_class = GFX9;
382 else if (info->family >= CHIP_TONGA)
383 info->chip_class = GFX8;
384 else if (info->family >= CHIP_BONAIRE)
385 info->chip_class = GFX7;
386 else if (info->family >= CHIP_TAHITI)
387 info->chip_class = GFX6;
388 else {
389 fprintf(stderr, "amdgpu: Unknown family.\n");
390 return false;
391 }
392
393 info->family_id = amdinfo->family_id;
394 info->chip_external_rev = amdinfo->chip_external_rev;
395 info->marketing_name = amdgpu_get_marketing_name(dev);
396 info->is_pro_graphics = info->marketing_name &&
397 (!strcmp(info->marketing_name, "Pro") ||
398 !strcmp(info->marketing_name, "PRO") ||
399 !strcmp(info->marketing_name, "Frontier"));
400
401 /* Set which chips have dedicated VRAM. */
402 info->has_dedicated_vram =
403 !(amdinfo->ids_flags & AMDGPU_IDS_FLAGS_FUSION);
404
405 /* The kernel can split large buffers in VRAM but not in GTT, so large
406 * allocations can fail or cause buffer movement failures in the kernel.
407 */
408 if (info->has_dedicated_vram)
409 info->max_alloc_size = info->vram_size * 0.8;
410 else
411 info->max_alloc_size = info->gart_size * 0.7;
412
413 /* Set which chips have uncached device memory. */
414 info->has_l2_uncached = info->chip_class >= GFX9;
415
416 /* Set hardware information. */
417 info->gds_size = gds.gds_total_size;
418 info->gds_gfx_partition_size = gds.gds_gfx_partition_size;
419 /* convert the shader clock from KHz to MHz */
420 info->max_shader_clock = amdinfo->max_engine_clk / 1000;
421 info->num_tcc_blocks = device_info.num_tcc_blocks;
422 info->max_se = amdinfo->num_shader_engines;
423 info->max_sh_per_se = amdinfo->num_shader_arrays_per_engine;
424 info->has_hw_decode =
425 (uvd.available_rings != 0) || (vcn_dec.available_rings != 0) ||
426 (vcn_jpeg.available_rings != 0);
427 info->uvd_fw_version =
428 uvd.available_rings ? uvd_version : 0;
429 info->vce_fw_version =
430 vce.available_rings ? vce_version : 0;
431 info->uvd_enc_supported =
432 uvd_enc.available_rings ? true : false;
433 info->has_userptr = true;
434 info->has_syncobj = has_syncobj(fd);
435 info->has_syncobj_wait_for_submit = info->has_syncobj && info->drm_minor >= 20;
436 info->has_fence_to_handle = info->has_syncobj && info->drm_minor >= 21;
437 info->has_ctx_priority = info->drm_minor >= 22;
438 info->has_local_buffers = info->drm_minor >= 20;
439 info->kernel_flushes_hdp_before_ib = true;
440 info->htile_cmask_support_1d_tiling = true;
441 info->si_TA_CS_BC_BASE_ADDR_allowed = true;
442 info->has_bo_metadata = true;
443 info->has_gpu_reset_status_query = true;
444 info->has_eqaa_surface_allocator = true;
445 info->has_format_bc1_through_bc7 = true;
446 /* DRM 3.1.0 doesn't flush TC for GFX8 correctly. */
447 info->kernel_flushes_tc_l2_after_ib = info->chip_class != GFX8 ||
448 info->drm_minor >= 2;
449 info->has_indirect_compute_dispatch = true;
450 /* GFX6 doesn't support unaligned loads. */
451 info->has_unaligned_shader_loads = info->chip_class != GFX6;
452 /* Disable sparse mappings on GFX6 due to VM faults in CP DMA. Enable them once
453 * these faults are mitigated in software.
454 * Disable sparse mappings on GFX9 due to hangs.
455 */
456 info->has_sparse_vm_mappings =
457 info->chip_class >= GFX7 && info->chip_class <= GFX8 &&
458 info->drm_minor >= 13;
459 info->has_2d_tiling = true;
460 info->has_read_registers_query = true;
461 info->has_scheduled_fence_dependency = info->drm_minor >= 28;
462
463 info->pa_sc_tile_steering_override = device_info.pa_sc_tile_steering_override;
464 info->num_render_backends = amdinfo->rb_pipes;
465 /* The value returned by the kernel driver was wrong. */
466 if (info->family == CHIP_KAVERI)
467 info->num_render_backends = 2;
468
469 info->clock_crystal_freq = amdinfo->gpu_counter_freq;
470 if (!info->clock_crystal_freq) {
471 fprintf(stderr, "amdgpu: clock crystal frequency is 0, timestamps will be wrong\n");
472 info->clock_crystal_freq = 1;
473 }
474 if (info->chip_class >= GFX10) {
475 info->tcc_cache_line_size = 128;
476
477 if (info->drm_minor >= 35) {
478 info->tcc_harvested = device_info.tcc_disabled_mask != 0;
479 } else {
480 /* This is a hack, but it's all we can do without a kernel upgrade. */
481 info->tcc_harvested =
482 (info->vram_size / info->num_tcc_blocks) != 512*1024*1024;
483 }
484 } else {
485 info->tcc_cache_line_size = 64;
486 }
487 info->gb_addr_config = amdinfo->gb_addr_cfg;
488 if (info->chip_class == GFX9) {
489 info->num_tile_pipes = 1 << G_0098F8_NUM_PIPES(amdinfo->gb_addr_cfg);
490 info->pipe_interleave_bytes =
491 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(amdinfo->gb_addr_cfg);
492 } else {
493 info->num_tile_pipes = cik_get_num_tile_pipes(amdinfo);
494 info->pipe_interleave_bytes =
495 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(amdinfo->gb_addr_cfg);
496 }
497 info->r600_has_virtual_memory = true;
498
499 /* LDS is 64KB per CU (4 SIMDs), which is 16KB per SIMD (usage above
500 * 16KB makes some SIMDs unoccupied).
501 *
502 * LDS is 128KB in WGP mode and 64KB in CU mode. Assume the WGP mode is used.
503 */
504 info->lds_size_per_cu = info->chip_class >= GFX10 ? 128 * 1024 : 64 * 1024;
505
506 assert(util_is_power_of_two_or_zero(dma.available_rings + 1));
507 assert(util_is_power_of_two_or_zero(compute.available_rings + 1));
508
509 info->has_graphics = gfx.available_rings > 0;
510 info->num_rings[RING_GFX] = util_bitcount(gfx.available_rings);
511 info->num_rings[RING_COMPUTE] = util_bitcount(compute.available_rings);
512 info->num_rings[RING_DMA] = util_bitcount(dma.available_rings);
513 info->num_rings[RING_UVD] = util_bitcount(uvd.available_rings);
514 info->num_rings[RING_VCE] = util_bitcount(vce.available_rings);
515 info->num_rings[RING_UVD_ENC] = util_bitcount(uvd_enc.available_rings);
516 info->num_rings[RING_VCN_DEC] = util_bitcount(vcn_dec.available_rings);
517 info->num_rings[RING_VCN_ENC] = util_bitcount(vcn_enc.available_rings);
518 info->num_rings[RING_VCN_JPEG] = util_bitcount(vcn_jpeg.available_rings);
519
520 /* The mere presence of CLEAR_STATE in the IB causes random GPU hangs
521 * on GFX6. Some CLEAR_STATE cause asic hang on radeon kernel, etc.
522 * SPI_VS_OUT_CONFIG. So only enable GFX7 CLEAR_STATE on amdgpu kernel.
523 */
524 info->has_clear_state = info->chip_class >= GFX7;
525
526 info->has_distributed_tess = info->chip_class >= GFX10 ||
527 (info->chip_class >= GFX8 && info->max_se >= 2);
528
529 info->has_dcc_constant_encode = info->family == CHIP_RAVEN2 ||
530 info->family == CHIP_RENOIR ||
531 info->chip_class >= GFX10;
532
533 info->has_rbplus = info->family == CHIP_STONEY ||
534 info->chip_class >= GFX9;
535
536 /* Some chips have RB+ registers, but don't support RB+. Those must
537 * always disable it.
538 */
539 info->rbplus_allowed = info->has_rbplus &&
540 (info->family == CHIP_STONEY ||
541 info->family == CHIP_VEGA12 ||
542 info->family == CHIP_RAVEN ||
543 info->family == CHIP_RAVEN2 ||
544 info->family == CHIP_RENOIR);
545
546 info->has_out_of_order_rast = info->chip_class >= GFX8 &&
547 info->max_se >= 2;
548
549 /* TODO: Figure out how to use LOAD_CONTEXT_REG on GFX6-GFX7. */
550 info->has_load_ctx_reg_pkt = info->chip_class >= GFX9 ||
551 (info->chip_class >= GFX8 &&
552 info->me_fw_feature >= 41);
553
554 info->cpdma_prefetch_writes_memory = info->chip_class <= GFX8;
555
556 info->has_gfx9_scissor_bug = info->family == CHIP_VEGA10 ||
557 info->family == CHIP_RAVEN;
558
559 info->has_tc_compat_zrange_bug = info->chip_class >= GFX8 &&
560 info->chip_class <= GFX9;
561
562 info->has_msaa_sample_loc_bug = (info->family >= CHIP_POLARIS10 &&
563 info->family <= CHIP_POLARIS12) ||
564 info->family == CHIP_VEGA10 ||
565 info->family == CHIP_RAVEN;
566
567 info->has_ls_vgpr_init_bug = info->family == CHIP_VEGA10 ||
568 info->family == CHIP_RAVEN;
569
570 /* Get the number of good compute units. */
571 info->num_good_compute_units = 0;
572 for (i = 0; i < info->max_se; i++)
573 for (j = 0; j < info->max_sh_per_se; j++)
574 info->num_good_compute_units +=
575 util_bitcount(amdinfo->cu_bitmap[i][j]);
576 info->num_good_cu_per_sh = info->num_good_compute_units /
577 (info->max_se * info->max_sh_per_se);
578
579 /* Round down to the nearest multiple of 2, because the hw can't
580 * disable CUs. It can only disable whole WGPs (dual-CUs).
581 */
582 if (info->chip_class >= GFX10)
583 info->num_good_cu_per_sh -= info->num_good_cu_per_sh % 2;
584
585 memcpy(info->si_tile_mode_array, amdinfo->gb_tile_mode,
586 sizeof(amdinfo->gb_tile_mode));
587 info->enabled_rb_mask = amdinfo->enabled_rb_pipes_mask;
588
589 memcpy(info->cik_macrotile_mode_array, amdinfo->gb_macro_tile_mode,
590 sizeof(amdinfo->gb_macro_tile_mode));
591
592 info->pte_fragment_size = alignment_info.size_local;
593 info->gart_page_size = alignment_info.size_remote;
594
595 if (info->chip_class == GFX6)
596 info->gfx_ib_pad_with_type2 = true;
597
598 unsigned ib_align = 0;
599 ib_align = MAX2(ib_align, gfx.ib_start_alignment);
600 ib_align = MAX2(ib_align, compute.ib_start_alignment);
601 ib_align = MAX2(ib_align, dma.ib_start_alignment);
602 ib_align = MAX2(ib_align, uvd.ib_start_alignment);
603 ib_align = MAX2(ib_align, uvd_enc.ib_start_alignment);
604 ib_align = MAX2(ib_align, vce.ib_start_alignment);
605 ib_align = MAX2(ib_align, vcn_dec.ib_start_alignment);
606 ib_align = MAX2(ib_align, vcn_enc.ib_start_alignment);
607 ib_align = MAX2(ib_align, vcn_jpeg.ib_start_alignment);
608 assert(ib_align);
609 info->ib_start_alignment = ib_align;
610
611 if (info->drm_minor >= 31 &&
612 (info->family == CHIP_RAVEN ||
613 info->family == CHIP_RAVEN2 ||
614 info->family == CHIP_RENOIR)) {
615 if (info->num_render_backends == 1)
616 info->use_display_dcc_unaligned = true;
617 else
618 info->use_display_dcc_with_retile_blit = true;
619 }
620
621 info->has_gds_ordered_append = info->chip_class >= GFX7 &&
622 info->drm_minor >= 29;
623
624 if (info->chip_class >= GFX9) {
625 unsigned pc_lines = 0;
626
627 switch (info->family) {
628 case CHIP_VEGA10:
629 case CHIP_VEGA12:
630 case CHIP_VEGA20:
631 pc_lines = 2048;
632 break;
633 case CHIP_RAVEN:
634 case CHIP_RAVEN2:
635 case CHIP_RENOIR:
636 case CHIP_NAVI10:
637 case CHIP_NAVI12:
638 pc_lines = 1024;
639 break;
640 case CHIP_NAVI14:
641 pc_lines = 512;
642 break;
643 case CHIP_ARCTURUS:
644 break;
645 default:
646 assert(0);
647 }
648
649 info->pc_lines = pc_lines;
650
651 if (info->chip_class >= GFX10) {
652 info->pbb_max_alloc_count = pc_lines / 3;
653 } else {
654 info->pbb_max_alloc_count =
655 MIN2(128, pc_lines / (4 * info->max_se));
656 }
657 }
658
659 /* The number of SDPs is the same as the number of TCCs for now. */
660 if (info->chip_class >= GFX10)
661 info->num_sdp_interfaces = device_info.num_tcc_blocks;
662
663 info->max_wave64_per_simd = info->family >= CHIP_POLARIS10 &&
664 info->family <= CHIP_VEGAM ? 8 : 10;
665
666 /* The number is per SIMD. There is enough SGPRs for the maximum number
667 * of Wave32, which is double the number for Wave64.
668 */
669 if (info->chip_class >= GFX10) {
670 info->num_physical_sgprs_per_simd = 128 * info->max_wave64_per_simd * 2;
671 info->min_sgpr_alloc = 128;
672 info->sgpr_alloc_granularity = 128;
673 } else if (info->chip_class >= GFX8) {
674 info->num_physical_sgprs_per_simd = 800;
675 info->min_sgpr_alloc = 16;
676 info->sgpr_alloc_granularity = 16;
677 } else {
678 info->num_physical_sgprs_per_simd = 512;
679 info->min_sgpr_alloc = 8;
680 info->sgpr_alloc_granularity = 8;
681 }
682
683 info->max_sgpr_alloc = info->family == CHIP_TONGA ||
684 info->family == CHIP_ICELAND ? 96 : 104;
685
686 info->min_vgpr_alloc = 4;
687 info->max_vgpr_alloc = 256;
688 info->vgpr_alloc_granularity = info->chip_class >= GFX10 ? 8 : 4;
689
690 info->num_physical_wave64_vgprs_per_simd = info->chip_class >= GFX10 ? 512 : 256;
691 info->num_simd_per_compute_unit = info->chip_class >= GFX10 ? 2 : 4;
692
693 return true;
694 }
695
696 void ac_compute_driver_uuid(char *uuid, size_t size)
697 {
698 char amd_uuid[] = "AMD-MESA-DRV";
699
700 assert(size >= sizeof(amd_uuid));
701
702 memset(uuid, 0, size);
703 strncpy(uuid, amd_uuid, size);
704 }
705
706 void ac_compute_device_uuid(struct radeon_info *info, char *uuid, size_t size)
707 {
708 uint32_t *uint_uuid = (uint32_t*)uuid;
709
710 assert(size >= sizeof(uint32_t)*4);
711
712 /**
713 * Use the device info directly instead of using a sha1. GL/VK UUIDs
714 * are 16 byte vs 20 byte for sha1, and the truncation that would be
715 * required would get rid of part of the little entropy we have.
716 * */
717 memset(uuid, 0, size);
718 uint_uuid[0] = info->pci_domain;
719 uint_uuid[1] = info->pci_bus;
720 uint_uuid[2] = info->pci_dev;
721 uint_uuid[3] = info->pci_func;
722 }
723
724 void ac_print_gpu_info(struct radeon_info *info)
725 {
726 printf("Device info:\n");
727 printf(" pci (domain:bus:dev.func): %04x:%02x:%02x.%x\n",
728 info->pci_domain, info->pci_bus,
729 info->pci_dev, info->pci_func);
730
731 printf(" name = %s\n", info->name);
732 printf(" marketing_name = %s\n", info->marketing_name);
733 printf(" is_pro_graphics = %u\n", info->is_pro_graphics);
734 printf(" pci_id = 0x%x\n", info->pci_id);
735 printf(" family = %i\n", info->family);
736 printf(" chip_class = %i\n", info->chip_class);
737 printf(" family_id = %i\n", info->family_id);
738 printf(" chip_external_rev = %i\n", info->chip_external_rev);
739 printf(" clock_crystal_freq = %i\n", info->clock_crystal_freq);
740
741 printf("Features:\n");
742 printf(" has_graphics = %i\n", info->has_graphics);
743 printf(" num_rings[RING_GFX] = %i\n", info->num_rings[RING_GFX]);
744 printf(" num_rings[RING_DMA] = %i\n", info->num_rings[RING_DMA]);
745 printf(" num_rings[RING_COMPUTE] = %u\n", info->num_rings[RING_COMPUTE]);
746 printf(" num_rings[RING_UVD] = %i\n", info->num_rings[RING_UVD]);
747 printf(" num_rings[RING_VCE] = %i\n", info->num_rings[RING_VCE]);
748 printf(" num_rings[RING_UVD_ENC] = %i\n", info->num_rings[RING_UVD_ENC]);
749 printf(" num_rings[RING_VCN_DEC] = %i\n", info->num_rings[RING_VCN_DEC]);
750 printf(" num_rings[RING_VCN_ENC] = %i\n", info->num_rings[RING_VCN_ENC]);
751 printf(" num_rings[RING_VCN_JPEG] = %i\n", info->num_rings[RING_VCN_JPEG]);
752 printf(" has_clear_state = %u\n", info->has_clear_state);
753 printf(" has_distributed_tess = %u\n", info->has_distributed_tess);
754 printf(" has_dcc_constant_encode = %u\n", info->has_dcc_constant_encode);
755 printf(" has_rbplus = %u\n", info->has_rbplus);
756 printf(" rbplus_allowed = %u\n", info->rbplus_allowed);
757 printf(" has_load_ctx_reg_pkt = %u\n", info->has_load_ctx_reg_pkt);
758 printf(" has_out_of_order_rast = %u\n", info->has_out_of_order_rast);
759 printf(" cpdma_prefetch_writes_memory = %u\n", info->cpdma_prefetch_writes_memory);
760 printf(" has_gfx9_scissor_bug = %i\n", info->has_gfx9_scissor_bug);
761 printf(" has_tc_compat_zrange_bug = %i\n", info->has_tc_compat_zrange_bug);
762 printf(" has_msaa_sample_loc_bug = %i\n", info->has_msaa_sample_loc_bug);
763 printf(" has_ls_vgpr_init_bug = %i\n", info->has_ls_vgpr_init_bug);
764
765 printf("Display features:\n");
766 printf(" use_display_dcc_unaligned = %u\n", info->use_display_dcc_unaligned);
767 printf(" use_display_dcc_with_retile_blit = %u\n", info->use_display_dcc_with_retile_blit);
768
769 printf("Memory info:\n");
770 printf(" pte_fragment_size = %u\n", info->pte_fragment_size);
771 printf(" gart_page_size = %u\n", info->gart_page_size);
772 printf(" gart_size = %i MB\n", (int)DIV_ROUND_UP(info->gart_size, 1024*1024));
773 printf(" vram_size = %i MB\n", (int)DIV_ROUND_UP(info->vram_size, 1024*1024));
774 printf(" vram_vis_size = %i MB\n", (int)DIV_ROUND_UP(info->vram_vis_size, 1024*1024));
775 printf(" gds_size = %u kB\n", info->gds_size / 1024);
776 printf(" gds_gfx_partition_size = %u kB\n", info->gds_gfx_partition_size / 1024);
777 printf(" max_alloc_size = %i MB\n",
778 (int)DIV_ROUND_UP(info->max_alloc_size, 1024*1024));
779 printf(" min_alloc_size = %u\n", info->min_alloc_size);
780 printf(" address32_hi = %u\n", info->address32_hi);
781 printf(" has_dedicated_vram = %u\n", info->has_dedicated_vram);
782 printf(" num_sdp_interfaces = %u\n", info->num_sdp_interfaces);
783 printf(" num_tcc_blocks = %i\n", info->num_tcc_blocks);
784 printf(" tcc_cache_line_size = %u\n", info->tcc_cache_line_size);
785 printf(" tcc_harvested = %u\n", info->tcc_harvested);
786 printf(" pc_lines = %u\n", info->pc_lines);
787 printf(" lds_size_per_cu = %u\n", info->lds_size_per_cu);
788
789 printf("CP info:\n");
790 printf(" gfx_ib_pad_with_type2 = %i\n", info->gfx_ib_pad_with_type2);
791 printf(" ib_start_alignment = %u\n", info->ib_start_alignment);
792 printf(" me_fw_version = %i\n", info->me_fw_version);
793 printf(" me_fw_feature = %i\n", info->me_fw_feature);
794 printf(" pfp_fw_version = %i\n", info->pfp_fw_version);
795 printf(" pfp_fw_feature = %i\n", info->pfp_fw_feature);
796 printf(" ce_fw_version = %i\n", info->ce_fw_version);
797 printf(" ce_fw_feature = %i\n", info->ce_fw_feature);
798
799 printf("Multimedia info:\n");
800 printf(" has_hw_decode = %u\n", info->has_hw_decode);
801 printf(" uvd_enc_supported = %u\n", info->uvd_enc_supported);
802 printf(" uvd_fw_version = %u\n", info->uvd_fw_version);
803 printf(" vce_fw_version = %u\n", info->vce_fw_version);
804 printf(" vce_harvest_config = %i\n", info->vce_harvest_config);
805
806 printf("Kernel & winsys capabilities:\n");
807 printf(" drm = %i.%i.%i\n", info->drm_major,
808 info->drm_minor, info->drm_patchlevel);
809 printf(" has_userptr = %i\n", info->has_userptr);
810 printf(" has_syncobj = %u\n", info->has_syncobj);
811 printf(" has_syncobj_wait_for_submit = %u\n", info->has_syncobj_wait_for_submit);
812 printf(" has_fence_to_handle = %u\n", info->has_fence_to_handle);
813 printf(" has_ctx_priority = %u\n", info->has_ctx_priority);
814 printf(" has_local_buffers = %u\n", info->has_local_buffers);
815 printf(" kernel_flushes_hdp_before_ib = %u\n", info->kernel_flushes_hdp_before_ib);
816 printf(" htile_cmask_support_1d_tiling = %u\n", info->htile_cmask_support_1d_tiling);
817 printf(" si_TA_CS_BC_BASE_ADDR_allowed = %u\n", info->si_TA_CS_BC_BASE_ADDR_allowed);
818 printf(" has_bo_metadata = %u\n", info->has_bo_metadata);
819 printf(" has_gpu_reset_status_query = %u\n", info->has_gpu_reset_status_query);
820 printf(" has_eqaa_surface_allocator = %u\n", info->has_eqaa_surface_allocator);
821 printf(" has_format_bc1_through_bc7 = %u\n", info->has_format_bc1_through_bc7);
822 printf(" kernel_flushes_tc_l2_after_ib = %u\n", info->kernel_flushes_tc_l2_after_ib);
823 printf(" has_indirect_compute_dispatch = %u\n", info->has_indirect_compute_dispatch);
824 printf(" has_unaligned_shader_loads = %u\n", info->has_unaligned_shader_loads);
825 printf(" has_sparse_vm_mappings = %u\n", info->has_sparse_vm_mappings);
826 printf(" has_2d_tiling = %u\n", info->has_2d_tiling);
827 printf(" has_read_registers_query = %u\n", info->has_read_registers_query);
828 printf(" has_gds_ordered_append = %u\n", info->has_gds_ordered_append);
829 printf(" has_scheduled_fence_dependency = %u\n", info->has_scheduled_fence_dependency);
830
831 printf("Shader core info:\n");
832 printf(" max_shader_clock = %i\n", info->max_shader_clock);
833 printf(" num_good_compute_units = %i\n", info->num_good_compute_units);
834 printf(" num_good_cu_per_sh = %i\n", info->num_good_cu_per_sh);
835 printf(" max_se = %i\n", info->max_se);
836 printf(" max_sh_per_se = %i\n", info->max_sh_per_se);
837 printf(" max_wave64_per_simd = %i\n", info->max_wave64_per_simd);
838 printf(" num_physical_sgprs_per_simd = %i\n", info->num_physical_sgprs_per_simd);
839 printf(" num_physical_wave64_vgprs_per_simd = %i\n", info->num_physical_wave64_vgprs_per_simd);
840 printf(" num_simd_per_compute_unit = %i\n", info->num_simd_per_compute_unit);
841 printf(" min_sgpr_alloc = %i\n", info->min_sgpr_alloc);
842 printf(" max_sgpr_alloc = %i\n", info->max_sgpr_alloc);
843 printf(" sgpr_alloc_granularity = %i\n", info->sgpr_alloc_granularity);
844 printf(" min_vgpr_alloc = %i\n", info->min_vgpr_alloc);
845 printf(" max_vgpr_alloc = %i\n", info->max_vgpr_alloc);
846 printf(" vgpr_alloc_granularity = %i\n", info->vgpr_alloc_granularity);
847
848 printf("Render backend info:\n");
849 printf(" pa_sc_tile_steering_override = 0x%x\n", info->pa_sc_tile_steering_override);
850 printf(" num_render_backends = %i\n", info->num_render_backends);
851 printf(" num_tile_pipes = %i\n", info->num_tile_pipes);
852 printf(" pipe_interleave_bytes = %i\n", info->pipe_interleave_bytes);
853 printf(" enabled_rb_mask = 0x%x\n", info->enabled_rb_mask);
854 printf(" max_alignment = %u\n", (unsigned)info->max_alignment);
855 printf(" pbb_max_alloc_count = %u\n", info->pbb_max_alloc_count);
856
857 printf("GB_ADDR_CONFIG: 0x%08x\n", info->gb_addr_config);
858 if (info->chip_class >= GFX10) {
859 printf(" num_pipes = %u\n",
860 1 << G_0098F8_NUM_PIPES(info->gb_addr_config));
861 printf(" pipe_interleave_size = %u\n",
862 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config));
863 printf(" max_compressed_frags = %u\n",
864 1 << G_0098F8_MAX_COMPRESSED_FRAGS(info->gb_addr_config));
865 } else if (info->chip_class == GFX9) {
866 printf(" num_pipes = %u\n",
867 1 << G_0098F8_NUM_PIPES(info->gb_addr_config));
868 printf(" pipe_interleave_size = %u\n",
869 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config));
870 printf(" max_compressed_frags = %u\n",
871 1 << G_0098F8_MAX_COMPRESSED_FRAGS(info->gb_addr_config));
872 printf(" bank_interleave_size = %u\n",
873 1 << G_0098F8_BANK_INTERLEAVE_SIZE(info->gb_addr_config));
874 printf(" num_banks = %u\n",
875 1 << G_0098F8_NUM_BANKS(info->gb_addr_config));
876 printf(" shader_engine_tile_size = %u\n",
877 16 << G_0098F8_SHADER_ENGINE_TILE_SIZE(info->gb_addr_config));
878 printf(" num_shader_engines = %u\n",
879 1 << G_0098F8_NUM_SHADER_ENGINES_GFX9(info->gb_addr_config));
880 printf(" num_gpus = %u (raw)\n",
881 G_0098F8_NUM_GPUS_GFX9(info->gb_addr_config));
882 printf(" multi_gpu_tile_size = %u (raw)\n",
883 G_0098F8_MULTI_GPU_TILE_SIZE(info->gb_addr_config));
884 printf(" num_rb_per_se = %u\n",
885 1 << G_0098F8_NUM_RB_PER_SE(info->gb_addr_config));
886 printf(" row_size = %u\n",
887 1024 << G_0098F8_ROW_SIZE(info->gb_addr_config));
888 printf(" num_lower_pipes = %u (raw)\n",
889 G_0098F8_NUM_LOWER_PIPES(info->gb_addr_config));
890 printf(" se_enable = %u (raw)\n",
891 G_0098F8_SE_ENABLE(info->gb_addr_config));
892 } else {
893 printf(" num_pipes = %u\n",
894 1 << G_0098F8_NUM_PIPES(info->gb_addr_config));
895 printf(" pipe_interleave_size = %u\n",
896 256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(info->gb_addr_config));
897 printf(" bank_interleave_size = %u\n",
898 1 << G_0098F8_BANK_INTERLEAVE_SIZE(info->gb_addr_config));
899 printf(" num_shader_engines = %u\n",
900 1 << G_0098F8_NUM_SHADER_ENGINES_GFX6(info->gb_addr_config));
901 printf(" shader_engine_tile_size = %u\n",
902 16 << G_0098F8_SHADER_ENGINE_TILE_SIZE(info->gb_addr_config));
903 printf(" num_gpus = %u (raw)\n",
904 G_0098F8_NUM_GPUS_GFX6(info->gb_addr_config));
905 printf(" multi_gpu_tile_size = %u (raw)\n",
906 G_0098F8_MULTI_GPU_TILE_SIZE(info->gb_addr_config));
907 printf(" row_size = %u\n",
908 1024 << G_0098F8_ROW_SIZE(info->gb_addr_config));
909 printf(" num_lower_pipes = %u (raw)\n",
910 G_0098F8_NUM_LOWER_PIPES(info->gb_addr_config));
911 }
912 }
913
914 int
915 ac_get_gs_table_depth(enum chip_class chip_class, enum radeon_family family)
916 {
917 if (chip_class >= GFX9)
918 return -1;
919
920 switch (family) {
921 case CHIP_OLAND:
922 case CHIP_HAINAN:
923 case CHIP_KAVERI:
924 case CHIP_KABINI:
925 case CHIP_ICELAND:
926 case CHIP_CARRIZO:
927 case CHIP_STONEY:
928 return 16;
929 case CHIP_TAHITI:
930 case CHIP_PITCAIRN:
931 case CHIP_VERDE:
932 case CHIP_BONAIRE:
933 case CHIP_HAWAII:
934 case CHIP_TONGA:
935 case CHIP_FIJI:
936 case CHIP_POLARIS10:
937 case CHIP_POLARIS11:
938 case CHIP_POLARIS12:
939 case CHIP_VEGAM:
940 return 32;
941 default:
942 unreachable("Unknown GPU");
943 }
944 }
945
946 void
947 ac_get_raster_config(struct radeon_info *info,
948 uint32_t *raster_config_p,
949 uint32_t *raster_config_1_p,
950 uint32_t *se_tile_repeat_p)
951 {
952 unsigned raster_config, raster_config_1, se_tile_repeat;
953
954 switch (info->family) {
955 /* 1 SE / 1 RB */
956 case CHIP_HAINAN:
957 case CHIP_KABINI:
958 case CHIP_STONEY:
959 raster_config = 0x00000000;
960 raster_config_1 = 0x00000000;
961 break;
962 /* 1 SE / 4 RBs */
963 case CHIP_VERDE:
964 raster_config = 0x0000124a;
965 raster_config_1 = 0x00000000;
966 break;
967 /* 1 SE / 2 RBs (Oland is special) */
968 case CHIP_OLAND:
969 raster_config = 0x00000082;
970 raster_config_1 = 0x00000000;
971 break;
972 /* 1 SE / 2 RBs */
973 case CHIP_KAVERI:
974 case CHIP_ICELAND:
975 case CHIP_CARRIZO:
976 raster_config = 0x00000002;
977 raster_config_1 = 0x00000000;
978 break;
979 /* 2 SEs / 4 RBs */
980 case CHIP_BONAIRE:
981 case CHIP_POLARIS11:
982 case CHIP_POLARIS12:
983 raster_config = 0x16000012;
984 raster_config_1 = 0x00000000;
985 break;
986 /* 2 SEs / 8 RBs */
987 case CHIP_TAHITI:
988 case CHIP_PITCAIRN:
989 raster_config = 0x2a00126a;
990 raster_config_1 = 0x00000000;
991 break;
992 /* 4 SEs / 8 RBs */
993 case CHIP_TONGA:
994 case CHIP_POLARIS10:
995 raster_config = 0x16000012;
996 raster_config_1 = 0x0000002a;
997 break;
998 /* 4 SEs / 16 RBs */
999 case CHIP_HAWAII:
1000 case CHIP_FIJI:
1001 case CHIP_VEGAM:
1002 raster_config = 0x3a00161a;
1003 raster_config_1 = 0x0000002e;
1004 break;
1005 default:
1006 fprintf(stderr,
1007 "ac: Unknown GPU, using 0 for raster_config\n");
1008 raster_config = 0x00000000;
1009 raster_config_1 = 0x00000000;
1010 break;
1011 }
1012
1013 /* drm/radeon on Kaveri is buggy, so disable 1 RB to work around it.
1014 * This decreases performance by up to 50% when the RB is the bottleneck.
1015 */
1016 if (info->family == CHIP_KAVERI && !info->is_amdgpu)
1017 raster_config = 0x00000000;
1018
1019 /* Fiji: Old kernels have incorrect tiling config. This decreases
1020 * RB performance by 25%. (it disables 1 RB in the second packer)
1021 */
1022 if (info->family == CHIP_FIJI &&
1023 info->cik_macrotile_mode_array[0] == 0x000000e8) {
1024 raster_config = 0x16000012;
1025 raster_config_1 = 0x0000002a;
1026 }
1027
1028 unsigned se_width = 8 << G_028350_SE_XSEL_GFX6(raster_config);
1029 unsigned se_height = 8 << G_028350_SE_YSEL_GFX6(raster_config);
1030
1031 /* I don't know how to calculate this, though this is probably a good guess. */
1032 se_tile_repeat = MAX2(se_width, se_height) * info->max_se;
1033
1034 *raster_config_p = raster_config;
1035 *raster_config_1_p = raster_config_1;
1036 if (se_tile_repeat_p)
1037 *se_tile_repeat_p = se_tile_repeat;
1038 }
1039
1040 void
1041 ac_get_harvested_configs(struct radeon_info *info,
1042 unsigned raster_config,
1043 unsigned *cik_raster_config_1_p,
1044 unsigned *raster_config_se)
1045 {
1046 unsigned sh_per_se = MAX2(info->max_sh_per_se, 1);
1047 unsigned num_se = MAX2(info->max_se, 1);
1048 unsigned rb_mask = info->enabled_rb_mask;
1049 unsigned num_rb = MIN2(info->num_render_backends, 16);
1050 unsigned rb_per_pkr = MIN2(num_rb / num_se / sh_per_se, 2);
1051 unsigned rb_per_se = num_rb / num_se;
1052 unsigned se_mask[4];
1053 unsigned se;
1054
1055 se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
1056 se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
1057 se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
1058 se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
1059
1060 assert(num_se == 1 || num_se == 2 || num_se == 4);
1061 assert(sh_per_se == 1 || sh_per_se == 2);
1062 assert(rb_per_pkr == 1 || rb_per_pkr == 2);
1063
1064
1065 if (info->chip_class >= GFX7) {
1066 unsigned raster_config_1 = *cik_raster_config_1_p;
1067 if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
1068 (!se_mask[2] && !se_mask[3]))) {
1069 raster_config_1 &= C_028354_SE_PAIR_MAP;
1070
1071 if (!se_mask[0] && !se_mask[1]) {
1072 raster_config_1 |=
1073 S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_3);
1074 } else {
1075 raster_config_1 |=
1076 S_028354_SE_PAIR_MAP(V_028354_RASTER_CONFIG_SE_PAIR_MAP_0);
1077 }
1078 *cik_raster_config_1_p = raster_config_1;
1079 }
1080 }
1081
1082 for (se = 0; se < num_se; se++) {
1083 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
1084 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
1085 int idx = (se / 2) * 2;
1086
1087 raster_config_se[se] = raster_config;
1088 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
1089 raster_config_se[se] &= C_028350_SE_MAP;
1090
1091 if (!se_mask[idx]) {
1092 raster_config_se[se] |=
1093 S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_3);
1094 } else {
1095 raster_config_se[se] |=
1096 S_028350_SE_MAP(V_028350_RASTER_CONFIG_SE_MAP_0);
1097 }
1098 }
1099
1100 pkr0_mask &= rb_mask;
1101 pkr1_mask &= rb_mask;
1102 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
1103 raster_config_se[se] &= C_028350_PKR_MAP;
1104
1105 if (!pkr0_mask) {
1106 raster_config_se[se] |=
1107 S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_3);
1108 } else {
1109 raster_config_se[se] |=
1110 S_028350_PKR_MAP(V_028350_RASTER_CONFIG_PKR_MAP_0);
1111 }
1112 }
1113
1114 if (rb_per_se >= 2) {
1115 unsigned rb0_mask = 1 << (se * rb_per_se);
1116 unsigned rb1_mask = rb0_mask << 1;
1117
1118 rb0_mask &= rb_mask;
1119 rb1_mask &= rb_mask;
1120 if (!rb0_mask || !rb1_mask) {
1121 raster_config_se[se] &= C_028350_RB_MAP_PKR0;
1122
1123 if (!rb0_mask) {
1124 raster_config_se[se] |=
1125 S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_3);
1126 } else {
1127 raster_config_se[se] |=
1128 S_028350_RB_MAP_PKR0(V_028350_RASTER_CONFIG_RB_MAP_0);
1129 }
1130 }
1131
1132 if (rb_per_se > 2) {
1133 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
1134 rb1_mask = rb0_mask << 1;
1135 rb0_mask &= rb_mask;
1136 rb1_mask &= rb_mask;
1137 if (!rb0_mask || !rb1_mask) {
1138 raster_config_se[se] &= C_028350_RB_MAP_PKR1;
1139
1140 if (!rb0_mask) {
1141 raster_config_se[se] |=
1142 S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_3);
1143 } else {
1144 raster_config_se[se] |=
1145 S_028350_RB_MAP_PKR1(V_028350_RASTER_CONFIG_RB_MAP_0);
1146 }
1147 }
1148 }
1149 }
1150 }
1151 }
1152
1153 unsigned ac_get_compute_resource_limits(struct radeon_info *info,
1154 unsigned waves_per_threadgroup,
1155 unsigned max_waves_per_sh,
1156 unsigned threadgroups_per_cu)
1157 {
1158 unsigned compute_resource_limits =
1159 S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0);
1160
1161 if (info->chip_class >= GFX7) {
1162 unsigned num_cu_per_se = info->num_good_compute_units /
1163 info->max_se;
1164
1165 /* Force even distribution on all SIMDs in CU if the workgroup
1166 * size is 64. This has shown some good improvements if # of CUs
1167 * per SE is not a multiple of 4.
1168 */
1169 if (num_cu_per_se % 4 && waves_per_threadgroup == 1)
1170 compute_resource_limits |= S_00B854_FORCE_SIMD_DIST(1);
1171
1172 assert(threadgroups_per_cu >= 1 && threadgroups_per_cu <= 8);
1173 compute_resource_limits |= S_00B854_WAVES_PER_SH(max_waves_per_sh) |
1174 S_00B854_CU_GROUP_COUNT(threadgroups_per_cu - 1);
1175 } else {
1176 /* GFX6 */
1177 if (max_waves_per_sh) {
1178 unsigned limit_div16 = DIV_ROUND_UP(max_waves_per_sh, 16);
1179 compute_resource_limits |= S_00B854_WAVES_PER_SH_SI(limit_div16);
1180 }
1181 }
1182 return compute_resource_limits;
1183 }