/*
* Copyright (C) 2019 Collabora, Ltd.
+ * Copyright (C) 2019 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
*/
#include "pan_context.h"
+#include "pan_cmdstream.h"
+#include "pan_bo.h"
#include "util/u_memory.h"
+#include "nir_serialize.h"
+
+/* Compute CSOs are tracked like graphics shader CSOs, but are
+ * considerably simpler. We do not implement multiple
+ * variants/keying. So the CSO create function just goes ahead and
+ * compiles the thing. */
static void *
panfrost_create_compute_state(
struct pipe_context *pctx,
const struct pipe_compute_state *cso)
{
- return mem_dup(cso, sizeof(*cso));
+ struct panfrost_context *ctx = pan_context(pctx);
+
+ struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants);
+ so->cbase = *cso;
+ so->is_compute = true;
+
+ struct panfrost_shader_state *v = calloc(1, sizeof(*v));
+ so->variants = v;
+
+ so->variant_count = 1;
+ so->active_variant = 0;
+
+ if (cso->ir_type == PIPE_SHADER_IR_NIR_SERIALIZED) {
+ struct blob_reader reader;
+ const struct pipe_binary_program_header *hdr = cso->prog;
+
+ blob_reader_init(&reader, hdr->blob, hdr->num_bytes);
+ so->cbase.prog = nir_deserialize(NULL, &midgard_nir_options, &reader);
+ so->cbase.ir_type = PIPE_SHADER_IR_NIR;
+ }
+
+ panfrost_shader_compile(ctx, so->cbase.ir_type, so->cbase.prog,
+ MESA_SHADER_COMPUTE, v, NULL);
+
+ return so;
}
static void
panfrost_bind_compute_state(struct pipe_context *pipe, void *cso)
{
- struct pipe_compute_state *state = (struct pipe_compute_state *) cso;
+ struct panfrost_context *ctx = pan_context(pipe);
- printf("Binding compute %p\n", state);
- /* Stub */
+ struct panfrost_shader_variants *variants =
+ (struct panfrost_shader_variants *) cso;
+
+ ctx->shader[PIPE_SHADER_COMPUTE] = variants;
}
static void
{
struct panfrost_context *ctx = pan_context(pipe);
- struct mali_job_descriptor_header job = {
- .job_type = JOB_TYPE_COMPUTE,
- .job_descriptor_size = 1,
- .job_barrier = 1
- };
+ /* TODO: Do we want a special compute-only batch? */
+ struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
+
+ ctx->compute_grid = info;
/* TODO: Stub */
- struct midgard_payload_vertex_tiler *payload = &ctx->payloads[PIPE_SHADER_COMPUTE];
+ struct midgard_payload_vertex_tiler payload;
- struct panfrost_transfer transfer = panfrost_allocate_transient(ctx, sizeof(job) + sizeof(*payload));
- memcpy(transfer.cpu, &job, sizeof(job));
- memcpy(transfer.cpu + sizeof(job), payload, sizeof(*payload));
+ /* We implement OpenCL inputs as uniforms (or a UBO -- same thing), so
+ * reuse the graphics path for this by lowering to Gallium */
- /* TODO: Do we want a special compute-only batch? */
- struct panfrost_job *batch = panfrost_get_job_for_fbo(ctx);
+ struct pipe_constant_buffer ubuf = {
+ .buffer = NULL,
+ .buffer_offset = 0,
+ .buffer_size = ctx->shader[PIPE_SHADER_COMPUTE]->cbase.req_input_mem,
+ .user_buffer = info->input
+ };
+
+ if (info->input)
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_COMPUTE, 0, &ubuf);
+
+ panfrost_vt_init(ctx, PIPE_SHADER_COMPUTE, &payload.prefix, &payload.postfix);
- /* Queue the job */
- panfrost_scoreboard_queue_compute_job(batch, transfer);
+ panfrost_emit_shader_meta(batch, PIPE_SHADER_COMPUTE, &payload.postfix);
+ panfrost_emit_const_buf(batch, PIPE_SHADER_COMPUTE, &payload.postfix);
+ panfrost_emit_shared_memory(batch, info, &payload);
+
+ /* Invoke according to the grid info */
+
+ panfrost_pack_work_groups_compute(&payload.prefix,
+ info->grid[0], info->grid[1],
+ info->grid[2],
+ info->block[0], info->block[1],
+ info->block[2],
+ false);
+
+ panfrost_new_job(batch, JOB_TYPE_COMPUTE, true, 0, &payload,
+ sizeof(payload), false);
+ panfrost_flush_all_batches(ctx, true);
+}
+
+static void
+panfrost_set_compute_resources(struct pipe_context *pctx,
+ unsigned start, unsigned count,
+ struct pipe_surface **resources)
+{
+ /* TODO */
+}
+
+static void
+panfrost_set_global_binding(struct pipe_context *pctx,
+ unsigned first, unsigned count,
+ struct pipe_resource **resources,
+ uint32_t **handles)
+{
+ /* TODO */
+}
+
+static void
+panfrost_memory_barrier(struct pipe_context *pctx, unsigned flags)
+{
+ /* TODO */
}
void
pctx->delete_compute_state = panfrost_delete_compute_state;
pctx->launch_grid = panfrost_launch_grid;
-}
+ pctx->set_compute_resources = panfrost_set_compute_resources;
+ pctx->set_global_binding = panfrost_set_global_binding;
+ pctx->memory_barrier = panfrost_memory_barrier;
+}