X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fpanfrost%2Fpan_compute.c;h=f8184b65d83f79db223c433e811a96d07792b820;hb=a0578998a45e7b398011dbe6f20d38b5608ec706;hp=f4c28c3642998537f0f6b8cb201ec4b3b5b5adea;hpb=6dc105555b43695e1de49baf946c6179987f2f4a;p=mesa.git diff --git a/src/gallium/drivers/panfrost/pan_compute.c b/src/gallium/drivers/panfrost/pan_compute.c index f4c28c36429..f8184b65d83 100644 --- a/src/gallium/drivers/panfrost/pan_compute.c +++ b/src/gallium/drivers/panfrost/pan_compute.c @@ -27,6 +27,8 @@ */ #include "pan_context.h" +#include "pan_cmdstream.h" +#include "pan_bo.h" #include "util/u_memory.h" #include "nir_serialize.h" @@ -52,9 +54,6 @@ panfrost_create_compute_state( so->variant_count = 1; so->active_variant = 0; - /* calloc, instead of malloc - to zero unused fields */ - v->tripipe = CALLOC_STRUCT(mali_shader_meta); - if (cso->ir_type == PIPE_SHADER_IR_NIR_SERIALIZED) { struct blob_reader reader; const struct pipe_binary_program_header *hdr = cso->prog; @@ -64,9 +63,8 @@ panfrost_create_compute_state( so->cbase.ir_type = PIPE_SHADER_IR_NIR; } - panfrost_shader_compile(ctx, v->tripipe, - so->cbase.ir_type, so->cbase.prog, - MESA_SHADER_COMPUTE, v, NULL); + panfrost_shader_compile(ctx, so->cbase.ir_type, so->cbase.prog, + MESA_SHADER_COMPUTE, v, NULL); return so; } @@ -103,14 +101,8 @@ panfrost_launch_grid(struct pipe_context *pipe, ctx->compute_grid = info; - struct mali_job_descriptor_header job = { - .job_type = JOB_TYPE_COMPUTE, - .job_descriptor_size = 1, - .job_barrier = 1 - }; - /* TODO: Stub */ - struct midgard_payload_vertex_tiler *payload = &ctx->payloads[PIPE_SHADER_COMPUTE]; + struct midgard_payload_vertex_tiler payload; /* We implement OpenCL inputs as uniforms (or a UBO -- same thing), so * reuse the graphics path for this by lowering to Gallium */ @@ -125,30 +117,23 @@ panfrost_launch_grid(struct pipe_context *pipe, if (info->input) pipe->set_constant_buffer(pipe, PIPE_SHADER_COMPUTE, 0, &ubuf); - panfrost_emit_for_draw(ctx, false); - - struct mali_shared_memory shared = { - .shared_workgroup_count = ~0 - }; + panfrost_vt_init(ctx, PIPE_SHADER_COMPUTE, &payload.prefix, &payload.postfix); - payload->postfix.shared_memory = - panfrost_upload_transient(batch, &shared, sizeof(shared)); + panfrost_emit_shader_meta(batch, PIPE_SHADER_COMPUTE, &payload.postfix); + panfrost_emit_const_buf(batch, PIPE_SHADER_COMPUTE, &payload.postfix); + panfrost_emit_shared_memory(batch, info, &payload); /* Invoke according to the grid info */ - panfrost_pack_work_groups_compute(&payload->prefix, - info->grid[0], info->grid[1], info->grid[2], - info->block[0], info->block[1], info->block[2], false); - - /* Upload the payload */ - - struct panfrost_transfer transfer = panfrost_allocate_transient(batch, sizeof(job) + sizeof(*payload)); - memcpy(transfer.cpu, &job, sizeof(job)); - memcpy(transfer.cpu + sizeof(job), payload, sizeof(*payload)); - - /* Queue the job */ - panfrost_scoreboard_queue_compute_job(batch, transfer); + panfrost_pack_work_groups_compute(&payload.prefix, + info->grid[0], info->grid[1], + info->grid[2], + info->block[0], info->block[1], + info->block[2], + false); + panfrost_new_job(batch, JOB_TYPE_COMPUTE, true, 0, &payload, + sizeof(payload), false); panfrost_flush_all_batches(ctx, true); }