return anv_descriptor_data_size(anv_descriptor_data_for_type(pdevice, type));
}
+static bool
+anv_descriptor_data_supports_bindless(const struct anv_physical_device *pdevice,
+ enum anv_descriptor_data data,
+ bool sampler)
+{
+ return false;
+}
+
+bool
+anv_descriptor_supports_bindless(const struct anv_physical_device *pdevice,
+ const struct anv_descriptor_set_binding_layout *binding,
+ bool sampler)
+{
+ return anv_descriptor_data_supports_bindless(pdevice, binding->data,
+ sampler);
+}
+
+bool
+anv_descriptor_requires_bindless(const struct anv_physical_device *pdevice,
+ const struct anv_descriptor_set_binding_layout *binding,
+ bool sampler)
+{
+ if (pdevice->always_use_bindless)
+ return anv_descriptor_supports_bindless(pdevice, binding, sampler);
+
+ return false;
+}
+
void anv_GetDescriptorSetLayoutSupport(
- VkDevice device,
+ VkDevice _device,
const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
VkDescriptorSetLayoutSupport* pSupport)
{
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ const struct anv_physical_device *pdevice =
+ &device->instance->physicalDevice;
+
uint32_t surface_count[MESA_SHADER_STAGES] = { 0, };
for (uint32_t b = 0; b < pCreateInfo->bindingCount; b++) {
const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[b];
+ enum anv_descriptor_data desc_data =
+ anv_descriptor_data_for_type(pdevice, binding->descriptorType);
+
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
/* There is no real limit on samplers */
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ if (anv_descriptor_data_supports_bindless(pdevice, desc_data, false))
+ break;
+
if (binding->pImmutableSamplers) {
for (uint32_t i = 0; i < binding->descriptorCount; i++) {
ANV_FROM_HANDLE(anv_sampler, sampler,
break;
default:
+ if (anv_descriptor_data_supports_bindless(pdevice, desc_data, false))
+ break;
+
anv_foreach_stage(s, binding->stageFlags)
surface_count[s] += binding->descriptorCount;
break;
_mesa_sha1_update(&sha1_ctx, build_id_data(note), build_id_len);
_mesa_sha1_update(&sha1_ctx, &device->chipset_id,
sizeof(device->chipset_id));
+ _mesa_sha1_update(&sha1_ctx, &device->always_use_bindless,
+ sizeof(device->always_use_bindless));
_mesa_sha1_final(&sha1_ctx, sha1);
memcpy(device->pipeline_cache_uuid, sha1, VK_UUID_SIZE);
device->has_context_isolation =
anv_gem_get_param(fd, I915_PARAM_HAS_CONTEXT_ISOLATION);
+ device->always_use_bindless =
+ env_var_as_boolean("ANV_ALWAYS_BINDLESS", false);
+
+
/* Starting with Gen10, the timestamp frequency of the command streamer may
* vary from one part to another. We can query the value from the kernel.
*/
#include "nir/nir_builder.h"
#include "compiler/brw_nir.h"
+/* Sampler tables don't actually have a maximum size but we pick one just so
+ * that we don't end up emitting too much state on-the-fly.
+ */
+#define MAX_SAMPLER_TABLE_SIZE 128
+#define BINDLESS_OFFSET 255
+
struct apply_pipeline_layout_state {
const struct anv_physical_device *pdevice;
&layout->set[set].layout->binding[b];
/* Do a fixed-point calculation to generate a score based on the
- * number of uses and the binding array size.
+ * number of uses and the binding array size. We shift by 7 instead
+ * of 8 because we're going to use the top bit below to make
+ * everything which does not support bindless super higher priority
+ * than things which do.
*/
uint16_t score = ((uint16_t)state.set[set].use_count[b] << 7) /
binding->array_size;
+ /* If the descriptor type doesn't support bindless then put it at the
+ * beginning so we guarantee it gets a slot.
+ */
+ if (!anv_descriptor_supports_bindless(pdevice, binding, true) ||
+ !anv_descriptor_supports_bindless(pdevice, binding, false))
+ score |= 1 << 15;
+
infos[used_binding_count++] = (struct binding_info) {
.set = set,
.binding = b,
struct anv_descriptor_set_binding_layout *binding =
&layout->set[set].layout->binding[b];
+ const uint32_t array_size = binding->array_size;
+
if (binding->data & ANV_DESCRIPTOR_SURFACE_STATE) {
- state.set[set].surface_offsets[b] = map->surface_count;
- struct anv_sampler **samplers = binding->immutable_samplers;
- for (unsigned i = 0; i < binding->array_size; i++) {
- uint8_t planes = samplers ? samplers[i]->n_planes : 1;
- for (uint8_t p = 0; p < planes; p++) {
- map->surface_to_descriptor[map->surface_count++] =
- (struct anv_pipeline_binding) {
- .set = set,
- .binding = b,
- .index = i,
- .plane = p,
- };
+ if (map->surface_count + array_size > MAX_BINDING_TABLE_SIZE ||
+ anv_descriptor_requires_bindless(pdevice, binding, false)) {
+ /* If this descriptor doesn't fit in the binding table or if it
+ * requires bindless for some reason, flag it as bindless.
+ */
+ assert(anv_descriptor_supports_bindless(pdevice, binding, false));
+ state.set[set].surface_offsets[b] = BINDLESS_OFFSET;
+ } else {
+ state.set[set].surface_offsets[b] = map->surface_count;
+ struct anv_sampler **samplers = binding->immutable_samplers;
+ for (unsigned i = 0; i < binding->array_size; i++) {
+ uint8_t planes = samplers ? samplers[i]->n_planes : 1;
+ for (uint8_t p = 0; p < planes; p++) {
+ map->surface_to_descriptor[map->surface_count++] =
+ (struct anv_pipeline_binding) {
+ .set = set,
+ .binding = b,
+ .index = i,
+ .plane = p,
+ };
+ }
}
}
+ assert(map->surface_count <= MAX_BINDING_TABLE_SIZE);
}
- assert(map->surface_count <= MAX_BINDING_TABLE_SIZE);
if (binding->data & ANV_DESCRIPTOR_SAMPLER_STATE) {
- state.set[set].sampler_offsets[b] = map->sampler_count;
- struct anv_sampler **samplers = binding->immutable_samplers;
- for (unsigned i = 0; i < binding->array_size; i++) {
- uint8_t planes = samplers ? samplers[i]->n_planes : 1;
- for (uint8_t p = 0; p < planes; p++) {
- map->sampler_to_descriptor[map->sampler_count++] =
- (struct anv_pipeline_binding) {
- .set = set,
- .binding = b,
- .index = i,
- .plane = p,
- };
+ if (map->sampler_count + array_size > MAX_SAMPLER_TABLE_SIZE ||
+ anv_descriptor_requires_bindless(pdevice, binding, true)) {
+ /* If this descriptor doesn't fit in the binding table or if it
+ * requires bindless for some reason, flag it as bindless.
+ */
+ assert(anv_descriptor_supports_bindless(pdevice, binding, true));
+ state.set[set].sampler_offsets[b] = BINDLESS_OFFSET;
+ } else {
+ state.set[set].sampler_offsets[b] = map->sampler_count;
+ struct anv_sampler **samplers = binding->immutable_samplers;
+ for (unsigned i = 0; i < binding->array_size; i++) {
+ uint8_t planes = samplers ? samplers[i]->n_planes : 1;
+ for (uint8_t p = 0; p < planes; p++) {
+ map->sampler_to_descriptor[map->sampler_count++] =
+ (struct anv_pipeline_binding) {
+ .set = set,
+ .binding = b,
+ .index = i,
+ .plane = p,
+ };
+ }
}
}
}
if (state.set[set].use_count[binding] == 0)
continue;
+ if (state.set[set].surface_offsets[binding] >= MAX_BINDING_TABLE_SIZE)
+ continue;
+
struct anv_pipeline_binding *pipe_binding =
&map->surface_to_descriptor[state.set[set].surface_offsets[binding]];
for (unsigned i = 0; i < array_size; i++) {