const struct gen_device_info *devinfo);
void brw_nir_rewrite_image_intrinsic(nir_intrinsic_instr *intrin,
nir_ssa_def *index);
+void brw_nir_rewrite_bindless_image_intrinsic(nir_intrinsic_instr *intrin,
+ nir_ssa_def *handle);
bool brw_nir_lower_mem_access_bit_sizes(nir_shader *shader);
data = ANV_DESCRIPTOR_SURFACE_STATE;
if (device->info.gen < 9)
data |= ANV_DESCRIPTOR_IMAGE_PARAM;
+ if (device->has_bindless_images)
+ data |= ANV_DESCRIPTOR_STORAGE_IMAGE;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
if (data & ANV_DESCRIPTOR_SAMPLED_IMAGE)
size += sizeof(struct anv_sampled_image_descriptor);
+ if (data & ANV_DESCRIPTOR_STORAGE_IMAGE)
+ size += sizeof(struct anv_storage_image_descriptor);
+
if (data & ANV_DESCRIPTOR_IMAGE_PARAM)
size += BRW_IMAGE_PARAM_SIZE * 4;
pdevice->has_bindless_images;
}
+ if (data & ANV_DESCRIPTOR_STORAGE_IMAGE) {
+ assert(pdevice->has_bindless_images);
+ return true;
+ }
+
return false;
}
MAX2(1, bind_layout->max_plane_count) * sizeof(desc_data[0]));
}
+ if (bind_layout->data & ANV_DESCRIPTOR_STORAGE_IMAGE) {
+ assert(!(bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM));
+ assert(image_view->n_planes == 1);
+ struct anv_storage_image_descriptor desc_data = {
+ .read_write = anv_surface_state_to_handle(
+ image_view->planes[0].storage_surface_state.state),
+ .write_only = anv_surface_state_to_handle(
+ image_view->planes[0].writeonly_storage_surface_state.state),
+ };
+ memcpy(desc_map, &desc_data, sizeof(desc_data));
+ }
+
if (bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM) {
/* Storage images can only ever have one plane */
assert(image_view->n_planes == 1);
memcpy(desc_map, &desc_data, sizeof(desc_data));
}
+ if (bind_layout->data & ANV_DESCRIPTOR_STORAGE_IMAGE) {
+ assert(!(bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM));
+ struct anv_storage_image_descriptor desc_data = {
+ .read_write = anv_surface_state_to_handle(
+ buffer_view->storage_surface_state),
+ .write_only = anv_surface_state_to_handle(
+ buffer_view->writeonly_storage_surface_state),
+ };
+ memcpy(desc_map, &desc_data, sizeof(desc_data));
+ }
+
if (bind_layout->data & ANV_DESCRIPTOR_IMAGE_PARAM) {
anv_descriptor_set_write_image_param(desc_map,
&buffer_view->storage_image_param);
const uint32_t max_samplers =
pdevice->has_bindless_samplers ? UINT16_MAX :
(devinfo->gen >= 8 || devinfo->is_haswell) ? 128 : 16;
+ const uint32_t max_images =
+ pdevice->has_bindless_images ? UINT16_MAX : MAX_IMAGES;
/* The moment we have anything bindless, claim a high per-stage limit */
const uint32_t max_per_stage =
.maxPerStageDescriptorUniformBuffers = 64,
.maxPerStageDescriptorStorageBuffers = max_ssbos,
.maxPerStageDescriptorSampledImages = max_textures,
- .maxPerStageDescriptorStorageImages = MAX_IMAGES,
+ .maxPerStageDescriptorStorageImages = max_images,
.maxPerStageDescriptorInputAttachments = 64,
.maxPerStageResources = max_per_stage,
.maxDescriptorSetSamplers = 6 * max_samplers, /* number of stages * maxPerStageDescriptorSamplers */
.maxDescriptorSetStorageBuffers = 6 * max_ssbos, /* number of stages * maxPerStageDescriptorStorageBuffers */
.maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
.maxDescriptorSetSampledImages = 6 * max_textures, /* number of stages * maxPerStageDescriptorSampledImages */
- .maxDescriptorSetStorageImages = 6 * MAX_IMAGES, /* number of stages * maxPerStageDescriptorStorageImages */
+ .maxDescriptorSetStorageImages = 6 * max_images, /* number of stages * maxPerStageDescriptorStorageImages */
.maxDescriptorSetInputAttachments = 256,
.maxVertexInputAttributes = MAX_VBS,
.maxVertexInputBindings = MAX_VBS,
struct apply_pipeline_layout_state *state)
{
nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
+ nir_variable *var = nir_deref_instr_get_variable(deref);
nir_builder *b = &state->builder;
b->cursor = nir_before_instr(&intrin->instr);
+ const bool use_bindless = state->pdevice->has_bindless_images;
+
if (intrin->intrinsic == nir_intrinsic_image_deref_load_param_intel) {
b->cursor = nir_instr_remove(&intrin->instr);
+ assert(!use_bindless); /* Otherwise our offsets would be wrong */
const unsigned param = nir_intrinsic_base(intrin);
nir_ssa_def *desc =
intrin->dest.ssa.bit_size, state);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(desc));
+ } else if (use_bindless) {
+ const bool write_only =
+ (var->data.image.access & ACCESS_NON_READABLE) != 0;
+ nir_ssa_def *desc =
+ build_descriptor_load(deref, 0, 2, 32, state);
+ nir_ssa_def *handle = nir_channel(b, desc, write_only ? 1 : 0);
+ nir_rewrite_image_intrinsic(intrin, handle, true);
} else {
- nir_variable *var = nir_deref_instr_get_variable(deref);
-
unsigned set = var->data.descriptor_set;
unsigned binding = var->data.binding;
unsigned binding_offset = state->set[set].surface_offsets[binding];
uint32_t sampler;
};
+/** Struct representing a storage image descriptor */
+struct anv_storage_image_descriptor {
+ /** Bindless image handles
+ *
+ * These are expected to already be shifted such that the 20-bit
+ * SURFACE_STATE table index is in the top 20 bits.
+ */
+ uint32_t read_write;
+ uint32_t write_only;
+};
+
/** Struct representing a address/range descriptor
*
* The fields of this struct correspond directly to the data layout of
ANV_DESCRIPTOR_ADDRESS_RANGE = (1 << 5),
/** Bindless surface handle */
ANV_DESCRIPTOR_SAMPLED_IMAGE = (1 << 6),
+ /** Storage image handles */
+ ANV_DESCRIPTOR_STORAGE_IMAGE = (1 << 7),
};
struct anv_descriptor_set_binding_layout {