}
}
+static void virgl_attach_res_shader_images(struct virgl_context *vctx,
+ enum pipe_shader_type shader_type)
+{
+ struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+ struct virgl_resource *res;
+ unsigned i;
+ for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
+ res = virgl_resource(vctx->images[shader_type][i]);
+ if (res) {
+ vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
+ }
+ }
+}
+
/*
* after flushing, the hw context still has a bunch of
* resources bound, so we need to rebind those here.
virgl_attach_res_sampler_views(vctx, shader_type);
virgl_attach_res_uniform_buffers(vctx, shader_type);
virgl_attach_res_shader_buffers(vctx, shader_type);
+ virgl_attach_res_shader_images(vctx, shader_type);
}
virgl_attach_res_vertex_buffers(vctx);
virgl_attach_res_so_targets(vctx);
virgl_encode_set_shader_buffers(vctx, shader, start_slot, count, buffers);
}
+static void virgl_set_shader_images(struct pipe_context *ctx,
+ enum pipe_shader_type shader,
+ unsigned start_slot, unsigned count,
+ const struct pipe_image_view *images)
+{
+ struct virgl_context *vctx = virgl_context(ctx);
+ struct virgl_screen *rs = virgl_screen(ctx->screen);
+
+ for (unsigned i = 0; i < count; i++) {
+ unsigned idx = start_slot + i;
+
+ if (images) {
+ if (images[i].resource) {
+ pipe_resource_reference(&vctx->images[shader][idx], images[i].resource);
+ continue;
+ }
+ }
+ pipe_resource_reference(&vctx->images[shader][idx], NULL);
+ }
+
+ uint32_t max_shader_images = shader == PIPE_SHADER_FRAGMENT ?
+ rs->caps.caps.v2.max_shader_image_frag_compute :
+ rs->caps.caps.v2.max_shader_image_other_stages;
+ if (!max_shader_images)
+ return;
+ virgl_encode_set_shader_images(vctx, shader, start_slot, count, images);
+}
+
static void
virgl_context_destroy( struct pipe_context *ctx )
{
vctx->base.blit = virgl_blit;
vctx->base.set_shader_buffers = virgl_set_shader_buffers;
+ vctx->base.set_shader_images = virgl_set_shader_images;
virgl_init_context_resource_functions(&vctx->base);
virgl_init_query_functions(vctx);
virgl_init_so_functions(vctx);
struct pipe_resource *ubos[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS];
struct pipe_resource *ssbos[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
+ struct pipe_resource *images[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
int num_transfers;
int num_draws;
struct list_head to_flush_bufs;
}
return 0;
}
+
+int virgl_encode_set_shader_images(struct virgl_context *ctx,
+ enum pipe_shader_type shader,
+ unsigned start_slot, unsigned count,
+ const struct pipe_image_view *images)
+{
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count)));
+
+ virgl_encoder_write_dword(ctx->cbuf, shader);
+ virgl_encoder_write_dword(ctx->cbuf, start_slot);
+ for (i = 0; i < count; i++) {
+ if (images) {
+ struct virgl_resource *res = virgl_resource(images[i].resource);
+ virgl_encoder_write_dword(ctx->cbuf, images[i].format);
+ virgl_encoder_write_dword(ctx->cbuf, images[i].access);
+ virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset);
+ virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size);
+ virgl_encoder_write_res(ctx, res);
+ } else {
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ }
+ }
+ return 0;
+}
enum pipe_shader_type shader,
unsigned start_slot, unsigned count,
const struct pipe_shader_buffer *buffers);
+int virgl_encode_set_shader_images(struct virgl_context *ctx,
+ enum pipe_shader_type shader,
+ unsigned start_slot, unsigned count,
+ const struct pipe_image_view *images);
#endif
uint32_t max_vertex_attrib_stride;
uint32_t max_shader_buffer_frag_compute;
uint32_t max_shader_buffer_other_stages;
+ uint32_t max_shader_image_frag_compute;
+ uint32_t max_shader_image_other_stages;
+ uint32_t max_image_samples;
};
union virgl_caps {
VIRGL_CCMD_SET_TESS_STATE,
VIRGL_CCMD_SET_MIN_SAMPLES,
VIRGL_CCMD_SET_SHADER_BUFFERS,
+ VIRGL_CCMD_SET_SHADER_IMAGES,
};
/*
#define VIRGL_SET_SHADER_BUFFER_LENGTH(x) ((x) * VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE + 4)
#define VIRGL_SET_SHADER_BUFFER_RES_HANDLE(x) ((x) * VIRGL_SET_SHADER_BUFFER_ELEMENT_SIZE + 5)
+/* set shader images */
+#define VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE 5
+#define VIRGL_SET_SHADER_IMAGE_SIZE(x) (VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE * (x)) + 2
+#define VIRGL_SET_SHADER_IMAGE_SHADER_TYPE 1
+#define VIRGL_SET_SHADER_IMAGE_START_SLOT 2
+#define VIRGL_SET_SHADER_IMAGE_FORMAT(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 3)
+#define VIRGL_SET_SHADER_IMAGE_ACCESS(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 4)
+#define VIRGL_SET_SHADER_IMAGE_LAYER_OFFSET(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 5)
+#define VIRGL_SET_SHADER_IMAGE_LEVEL_SIZE(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 6)
+#define VIRGL_SET_SHADER_IMAGE_RES_HANDLE(x) ((x) * VIRGL_SET_SHADER_IMAGE_ELEMENT_SIZE + 7)
+
#endif
return vscreen->caps.caps.v2.max_shader_buffer_frag_compute;
else
return vscreen->caps.caps.v2.max_shader_buffer_other_stages;
+ case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
+ if (shader == PIPE_SHADER_FRAGMENT)
+ return vscreen->caps.caps.v2.max_shader_image_frag_compute;
+ else
+ return vscreen->caps.caps.v2.max_shader_image_other_stages;
case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD:
case PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS:
case PIPE_SHADER_CAP_INT64_ATOMICS:
if (sample_count > 1) {
if (!vscreen->caps.caps.v1.bset.texture_multisample)
return FALSE;
+
+ if (bind & PIPE_BIND_SHADER_IMAGE) {
+ if (sample_count > vscreen->caps.caps.v2.max_image_samples)
+ return FALSE;
+ }
+
if (sample_count > vscreen->caps.caps.v1.max_samples)
return FALSE;
}
caps->caps.v2.shader_buffer_offset_alignment = 32;
caps->caps.v2.capability_bits = 0;
caps->caps.v2.max_vertex_attrib_stride = 0;
+ caps->caps.v2.max_image_samples = 0;
}
#endif