}
}
+static void virgl_attach_res_atomic_buffers(struct virgl_context *vctx)
+{
+ struct virgl_winsys *vws = virgl_screen(vctx->base.screen)->vws;
+ struct virgl_resource *res;
+ unsigned i;
+ for (i = 0; i < PIPE_MAX_HW_ATOMIC_BUFFERS; i++) {
+ res = virgl_resource(vctx->atomic_buffers[i]);
+ if (res) {
+ vws->emit_res(vws, vctx->cbuf, res->hw_res, FALSE);
+ }
+ }
+}
+
/*
* after flushing, the hw context still has a bunch of
* resources bound, so we need to rebind those here.
virgl_attach_res_shader_buffers(vctx, shader_type);
virgl_attach_res_shader_images(vctx, shader_type);
}
+ virgl_attach_res_atomic_buffers(vctx);
virgl_attach_res_vertex_buffers(vctx);
virgl_attach_res_so_targets(vctx);
}
blit);
}
+static void virgl_set_hw_atomic_buffers(struct pipe_context *ctx,
+ unsigned start_slot,
+ unsigned count,
+ const struct pipe_shader_buffer *buffers)
+{
+ struct virgl_context *vctx = virgl_context(ctx);
+
+ for (unsigned i = 0; i < count; i++) {
+ unsigned idx = start_slot + i;
+
+ if (buffers) {
+ if (buffers[i].buffer) {
+ pipe_resource_reference(&vctx->atomic_buffers[idx],
+ buffers[i].buffer);
+ continue;
+ }
+ }
+ pipe_resource_reference(&vctx->atomic_buffers[idx], NULL);
+ }
+ virgl_encode_set_hw_atomic_buffers(vctx, start_slot, count, buffers);
+}
+
static void virgl_set_shader_buffers(struct pipe_context *ctx,
enum pipe_shader_type shader,
unsigned start_slot, unsigned count,
vctx->base.blit = virgl_blit;
vctx->base.set_shader_buffers = virgl_set_shader_buffers;
+ vctx->base.set_hw_atomic_buffers = virgl_set_hw_atomic_buffers;
vctx->base.set_shader_images = virgl_set_shader_images;
vctx->base.memory_barrier = virgl_memory_barrier;
int num_draws;
struct list_head to_flush_bufs;
+ struct pipe_resource *atomic_buffers[PIPE_MAX_HW_ATOMIC_BUFFERS];
+
struct primconvert_context *primconvert;
uint32_t hw_sub_ctx_id;
};
return 0;
}
+int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx,
+ unsigned start_slot, unsigned count,
+ const struct pipe_shader_buffer *buffers)
+{
+ int i;
+ virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count)));
+
+ virgl_encoder_write_dword(ctx->cbuf, start_slot);
+ for (i = 0; i < count; i++) {
+ if (buffers) {
+ struct virgl_resource *res = virgl_resource(buffers[i].buffer);
+ virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
+ virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
+ virgl_encoder_write_res(ctx, res);
+ } else {
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ virgl_encoder_write_dword(ctx->cbuf, 0);
+ }
+ }
+ return 0;
+}
+
int virgl_encode_set_shader_images(struct virgl_context *ctx,
enum pipe_shader_type shader,
unsigned start_slot, unsigned count,
enum pipe_shader_type shader,
unsigned start_slot, unsigned count,
const struct pipe_image_view *images);
+int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx,
+ unsigned start_slot, unsigned count,
+ const struct pipe_shader_buffer *buffers);
int virgl_encode_memory_barrier(struct virgl_context *ctx,
unsigned flags);
int virgl_encode_launch_grid(struct virgl_context *ctx,
uint32_t max_texture_2d_size;
uint32_t max_texture_3d_size;
uint32_t max_texture_cube_size;
+ uint32_t max_combined_shader_buffers;
+ uint32_t max_atomic_counters[6];
+ uint32_t max_atomic_counter_buffers[6];
+ uint32_t max_combined_atomic_counters;
+ uint32_t max_combined_atomic_counter_buffers;
};
union virgl_caps {
VIRGL_CCMD_LAUNCH_GRID,
VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH,
VIRGL_CCMD_TEXTURE_BARRIER,
+ VIRGL_CCMD_SET_ATOMIC_BUFFERS,
};
/*
#define VIRGL_TEXTURE_BARRIER_SIZE 1
#define VIRGL_TEXTURE_BARRIER_FLAGS 1
+/* hw atomics */
+#define VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE 3
+#define VIRGL_SET_ATOMIC_BUFFER_SIZE(x) (VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE * (x)) + 1
+#define VIRGL_SET_ATOMIC_BUFFER_START_SLOT 1
+#define VIRGL_SET_ATOMIC_BUFFER_OFFSET(x) ((x) * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 2)
+#define VIRGL_SET_ATOMIC_BUFFER_LENGTH(x) ((x) * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 3)
+#define VIRGL_SET_ATOMIC_BUFFER_RES_HANDLE(x) ((x) * VIRGL_SET_ATOMIC_BUFFER_ELEMENT_SIZE + 4)
+
#endif
return vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_SHADER_CLOCK;
case PIPE_CAP_TGSI_ARRAY_COMPONENTS:
return vscreen->caps.caps.v2.capability_bits & VIRGL_CAP_TGSI_COMPONENTS;
+ case PIPE_CAP_MAX_COMBINED_SHADER_BUFFERS:
+ return vscreen->caps.caps.v2.max_combined_shader_buffers;
+ case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTERS:
+ return vscreen->caps.caps.v2.max_combined_atomic_counters;
+ case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTER_BUFFERS:
+ return vscreen->caps.caps.v2.max_combined_atomic_counter_buffers;
case PIPE_CAP_TEXTURE_GATHER_SM5:
case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
case PIPE_CAP_FAKE_SW_MSAA:
return vscreen->caps.caps.v2.max_shader_image_other_stages;
case PIPE_SHADER_CAP_SUPPORTED_IRS:
return (1 << PIPE_SHADER_IR_TGSI);
+ case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
+ return vscreen->caps.caps.v2.max_atomic_counters[shader];
+ case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
+ return vscreen->caps.caps.v2.max_atomic_counter_buffers[shader];
case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD:
case PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS:
case PIPE_SHADER_CAP_INT64_ATOMICS:
case PIPE_SHADER_CAP_FP16:
- case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
- case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
return 0;
case PIPE_SHADER_CAP_SCALAR_ISA:
return 1;