+ if (free) {
+ sv = free;
+ } else {
+ if (views->count >= views->max) {
+ /* Allocate a larger container. */
+ unsigned new_max = 2 * views->max;
+ unsigned new_size = sizeof(*views) + new_max * sizeof(views->views[0]);
+
+ if (new_max < views->max ||
+ new_max > (UINT_MAX - sizeof(*views)) / sizeof(views->views[0])) {
+ pipe_sampler_view_release(st->pipe, &view);
+ goto out;
+ }
+
+ struct st_sampler_views *new_views = malloc(new_size);
+ if (!new_views) {
+ pipe_sampler_view_release(st->pipe, &view);
+ goto out;
+ }
+
+ new_views->count = views->count;
+ new_views->max = new_max;
+ memcpy(&new_views->views[0], &views->views[0],
+ views->count * sizeof(views->views[0]));
+
+ /* Initialize the pipe_sampler_view pointers to zero so that we don't
+ * have to worry about racing against readers when incrementing
+ * views->count.
+ */
+ memset(&new_views->views[views->count], 0,
+ (new_max - views->count) * sizeof(views->views[0]));
+
+ /* Use memory release semantics to ensure that concurrent readers will
+ * get the correct contents of the new container.
+ *
+ * Also, the write should be atomic, but that's guaranteed anyway on
+ * all supported platforms.
+ */
+ p_atomic_set(&stObj->sampler_views, new_views);
+
+ /* We keep the old container around until the texture object is
+ * deleted, because another thread may still be reading from it. We
+ * double the size of the container each time, so we end up with
+ * at most twice the total memory allocation.
+ */
+ views->next = stObj->sampler_views_old;
+ stObj->sampler_views_old = views;
+
+ views = new_views;
+ }
+
+ sv = &views->views[views->count];