+bool
+virgl_can_rebind_resource(struct virgl_context *vctx,
+ struct pipe_resource *res)
+{
+ /* We cannot rebind resources that are referenced by host objects, which
+ * are
+ *
+ * - VIRGL_OBJECT_SURFACE
+ * - VIRGL_OBJECT_SAMPLER_VIEW
+ * - VIRGL_OBJECT_STREAMOUT_TARGET
+ *
+ * Because surfaces cannot be created from buffers, we require the resource
+ * to be a buffer instead (and avoid tracking VIRGL_OBJECT_SURFACE binds).
+ */
+ const unsigned unsupported_bind = (PIPE_BIND_SAMPLER_VIEW |
+ PIPE_BIND_STREAM_OUTPUT);
+ const unsigned bind_history = virgl_resource(res)->bind_history;
+ return res->target == PIPE_BUFFER && !(bind_history & unsupported_bind);
+}
+
+void
+virgl_rebind_resource(struct virgl_context *vctx,
+ struct pipe_resource *res)
+{
+ /* Queries use internally created buffers and do not go through transfers.
+ * Index buffers are not bindable. They are not tracked.
+ */
+ ASSERTED const unsigned tracked_bind = (PIPE_BIND_VERTEX_BUFFER |
+ PIPE_BIND_CONSTANT_BUFFER |
+ PIPE_BIND_SHADER_BUFFER |
+ PIPE_BIND_SHADER_IMAGE);
+ const unsigned bind_history = virgl_resource(res)->bind_history;
+ unsigned i;
+
+ assert(virgl_can_rebind_resource(vctx, res) &&
+ (bind_history & tracked_bind) == bind_history);
+
+ if (bind_history & PIPE_BIND_VERTEX_BUFFER) {
+ for (i = 0; i < vctx->num_vertex_buffers; i++) {
+ if (vctx->vertex_buffer[i].buffer.resource == res) {
+ vctx->vertex_array_dirty = true;
+ break;
+ }
+ }
+ }
+
+ if (bind_history & PIPE_BIND_SHADER_BUFFER) {
+ uint32_t remaining_mask = vctx->atomic_buffer_enabled_mask;
+ while (remaining_mask) {
+ int i = u_bit_scan(&remaining_mask);
+ if (vctx->atomic_buffers[i].buffer == res) {
+ const struct pipe_shader_buffer *abo = &vctx->atomic_buffers[i];
+ virgl_encode_set_hw_atomic_buffers(vctx, i, 1, abo);
+ }
+ }
+ }
+
+ /* check per-stage shader bindings */
+ if (bind_history & (PIPE_BIND_CONSTANT_BUFFER |
+ PIPE_BIND_SHADER_BUFFER |
+ PIPE_BIND_SHADER_IMAGE)) {
+ enum pipe_shader_type shader_type;
+ for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
+ const struct virgl_shader_binding_state *binding =
+ &vctx->shader_bindings[shader_type];
+
+ if (bind_history & PIPE_BIND_CONSTANT_BUFFER) {
+ uint32_t remaining_mask = binding->ubo_enabled_mask;
+ while (remaining_mask) {
+ int i = u_bit_scan(&remaining_mask);
+ if (binding->ubos[i].buffer == res) {
+ const struct pipe_constant_buffer *ubo = &binding->ubos[i];
+ virgl_encoder_set_uniform_buffer(vctx, shader_type, i,
+ ubo->buffer_offset,
+ ubo->buffer_size,
+ virgl_resource(res));
+ }
+ }
+ }
+
+ if (bind_history & PIPE_BIND_SHADER_BUFFER) {
+ uint32_t remaining_mask = binding->ssbo_enabled_mask;
+ while (remaining_mask) {
+ int i = u_bit_scan(&remaining_mask);
+ if (binding->ssbos[i].buffer == res) {
+ const struct pipe_shader_buffer *ssbo = &binding->ssbos[i];
+ virgl_encode_set_shader_buffers(vctx, shader_type, i, 1,
+ ssbo);
+ }
+ }
+ }
+
+ if (bind_history & PIPE_BIND_SHADER_IMAGE) {
+ uint32_t remaining_mask = binding->image_enabled_mask;
+ while (remaining_mask) {
+ int i = u_bit_scan(&remaining_mask);
+ if (binding->images[i].resource == res) {
+ const struct pipe_image_view *image = &binding->images[i];
+ virgl_encode_set_shader_images(vctx, shader_type, i, 1,
+ image);
+ }
+ }
+ }
+ }
+ }
+}
+