if (size == 0) {
/* We own the lock. Allocate a buffer */
- struct anv_physical_device *physical_device =
+ const struct anv_physical_device *physical_device =
&device->instance->physicalDevice;
+ const struct gen_device_info *devinfo = &physical_device->info;
/* WaCSScratchSize:hsw
*
*/
const unsigned subslices = MAX2(physical_device->subslice_total, 1);
const unsigned scratch_ids_per_subslice =
- device->info.is_haswell ? 16 * 8 : physical_device->max_cs_threads;
+ device->info.is_haswell ? 16 * 8 : devinfo->max_cs_threads;
uint32_t max_threads[] = {
- [MESA_SHADER_VERTEX] = physical_device->max_vs_threads,
- [MESA_SHADER_TESS_CTRL] = physical_device->max_hs_threads,
- [MESA_SHADER_TESS_EVAL] = physical_device->max_ds_threads,
- [MESA_SHADER_GEOMETRY] = physical_device->max_gs_threads,
- [MESA_SHADER_FRAGMENT] = physical_device->max_wm_threads,
+ [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
+ [MESA_SHADER_TESS_CTRL] = devinfo->max_hs_threads,
+ [MESA_SHADER_TESS_EVAL] = devinfo->max_ds_threads,
+ [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
+ [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
[MESA_SHADER_COMPUTE] = scratch_ids_per_subslice * subslices,
};
bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
- device->max_vs_threads = device->info.max_vs_threads;
- device->max_hs_threads = device->info.max_hs_threads;
- device->max_ds_threads = device->info.max_ds_threads;
- device->max_gs_threads = device->info.max_gs_threads;
- device->max_wm_threads = device->info.max_wm_threads;
-
/* GENs prior to 8 do not support EU/Subslice info */
if (device->info.gen >= 8) {
device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
if (device->info.is_cherryview &&
device->subslice_total > 0 && device->eu_total > 0) {
/* Logical CS threads = EUs per subslice * 7 threads per EU */
- device->max_cs_threads = device->eu_total / device->subslice_total * 7;
+ uint32_t max_cs_threads = device->eu_total / device->subslice_total * 7;
/* Fuse configurations may give more threads than expected, never less. */
- if (device->max_cs_threads < device->info.max_cs_threads)
- device->max_cs_threads = device->info.max_cs_threads;
- } else {
- device->max_cs_threads = device->info.max_cs_threads;
+ if (max_cs_threads > device->info.max_cs_threads)
+ device->info.max_cs_threads = max_cs_threads;
}
close(fd);
.maxFragmentCombinedOutputResources = 8,
.maxComputeSharedMemorySize = 32768,
.maxComputeWorkGroupCount = { 65535, 65535, 65535 },
- .maxComputeWorkGroupInvocations = 16 * pdevice->max_cs_threads,
+ .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
.maxComputeWorkGroupSize = {
- 16 * pdevice->max_cs_threads,
- 16 * pdevice->max_cs_threads,
- 16 * pdevice->max_cs_threads,
+ 16 * devinfo->max_cs_threads,
+ 16 * devinfo->max_cs_threads,
+ 16 * devinfo->max_cs_threads,
},
.subPixelPrecisionBits = 4 /* FIXME */,
.subTexelPrecisionBits = 4 /* FIXME */,
uint32_t eu_total;
uint32_t subslice_total;
- /**
- * Platform specific constants containing the maximum number of threads
- * for each pipeline stage.
- */
- uint32_t max_vs_threads;
- uint32_t max_hs_threads;
- uint32_t max_ds_threads;
- uint32_t max_gs_threads;
- uint32_t max_wm_threads;
- uint32_t max_cs_threads;
-
struct anv_wsi_interface * wsi[VK_ICD_WSI_PLATFORM_MAX];
};
{
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
- struct anv_physical_device *physical_device =
+ const struct anv_physical_device *physical_device =
&device->instance->physicalDevice;
+ const struct gen_device_info *devinfo = &physical_device->info;
struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
struct anv_pipeline *pipeline;
VkResult result;
vs.VertexURBEntryReadLength = vs_prog_data->base.urb_read_length;
vs.VertexURBEntryReadOffset = 0;
- vs.MaximumNumberofThreads = physical_device->max_vs_threads - 1;
+ vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
vs.StatisticsEnable = true;
vs.VSFunctionEnable = true;
}
gs.DispatchGRFStartRegisterforURBData =
gs_prog_data->base.base.dispatch_grf_start_reg;
- gs.MaximumNumberofThreads = physical_device->max_gs_threads - 1;
+ gs.MaximumNumberofThreads = devinfo->max_gs_threads - 1;
/* This in the next dword on HSW. */
gs.ControlDataFormat = gs_prog_data->control_data_format;
gs.ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords;
* don't at least set the maximum number of threads.
*/
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) {
- ps.MaximumNumberofThreads = physical_device->max_wm_threads - 1;
+ ps.MaximumNumberofThreads = devinfo->max_wm_threads - 1;
}
} else {
const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
.offset = 0,
};
ps.PerThreadScratchSpace = scratch_space(&wm_prog_data->base);
- ps.MaximumNumberofThreads = physical_device->max_wm_threads - 1;
+ ps.MaximumNumberofThreads = devinfo->max_wm_threads - 1;
ps.PushConstantEnable = wm_prog_data->base.nr_params > 0;
ps.AttributeEnable = wm_prog_data->num_varying_inputs > 0;
ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
{
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
- struct anv_physical_device *physical_device =
+ const struct anv_physical_device *physical_device =
&device->instance->physicalDevice;
+ const struct gen_device_info *devinfo = &physical_device->info;
struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
struct anv_pipeline *pipeline;
VkResult result;
gs.DispatchGRFStartRegisterForURBData =
gs_prog_data->base.base.dispatch_grf_start_reg;
- gs.MaximumNumberofThreads = physical_device->max_gs_threads / 2 - 1;
+ gs.MaximumNumberofThreads = devinfo->max_gs_threads / 2 - 1;
gs.ControlDataHeaderSize = gs_prog_data->control_data_header_size_hwords;
gs.DispatchMode = gs_prog_data->base.dispatch_mode;
gs.StatisticsEnable = true;
vs.VertexURBEntryReadLength = vs_prog_data->base.urb_read_length;
vs.VertexURBEntryReadOffset = 0;
- vs.MaximumNumberofThreads = physical_device->max_vs_threads - 1;
+ vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
vs.StatisticsEnable = false;
vs.SIMD8DispatchEnable = pipeline->vs_simd8 != NO_KERNEL;
vs.VertexCacheDisable = false;
VkPipeline* pPipeline)
{
ANV_FROM_HANDLE(anv_device, device, _device);
- struct anv_physical_device *physical_device =
+ const struct anv_physical_device *physical_device =
&device->instance->physicalDevice;
+ const struct gen_device_info *devinfo = &physical_device->info;
struct anv_pipeline *pipeline;
VkResult result;
vfe.GPGPUMode = true;
#endif
vfe.MaximumNumberofThreads =
- physical_device->max_cs_threads * subslices - 1;
+ devinfo->max_cs_threads * subslices - 1;
vfe.NumberofURBEntries = GEN_GEN <= 7 ? 0 : 2;
vfe.ResetGatewayTimer = true;
#if GEN_GEN <= 8