unsigned
radv_nir_get_max_workgroup_size(enum chip_class chip_class,
+ gl_shader_stage stage,
const struct nir_shader *nir)
{
- switch (nir->info.stage) {
+ switch (stage) {
case MESA_SHADER_TESS_CTRL:
return chip_class >= GFX7 ? 128 : 64;
case MESA_SHADER_GEOMETRY:
return 0;
}
+ if (!nir)
+ return chip_class >= GFX9 ? 128 : 64;
unsigned max_workgroup_size = nir->info.cs.local_size[0] *
nir->info.cs.local_size[1] *
nir->info.cs.local_size[2];
for (int i = 0; i < shader_count; ++i) {
ctx.max_workgroup_size = MAX2(ctx.max_workgroup_size,
radv_nir_get_max_workgroup_size(ctx.options->chip_class,
- shaders[i]));
+ shaders[i]->info.stage,
+ shaders[i]));
}
if (ctx.ac.chip_class >= GFX10) {
const struct radv_nir_compiler_options *options);
unsigned radv_nir_get_max_workgroup_size(enum chip_class chip_class,
+ gl_shader_stage stage,
const struct nir_shader *nir);
/* radv_shader_info.h */
lds_increment);
} else if (stage == MESA_SHADER_COMPUTE) {
unsigned max_workgroup_size =
- radv_nir_get_max_workgroup_size(chip_class, variant->nir);
+ radv_nir_get_max_workgroup_size(chip_class, stage, variant->nir);
lds_per_wave = (conf->lds_size * lds_increment) /
DIV_ROUND_UP(max_workgroup_size, 64);
}