d05016e9359fa2e7444100620f546c7090f21612
[mesa.git] / src / gallium / drivers / panfrost / pan_compute.c
1 /*
2 * Copyright (C) 2019 Collabora, Ltd.
3 * Copyright (C) 2019 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors (Collabora):
25 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
26 *
27 */
28
29 #include "pan_context.h"
30 #include "pan_cmdstream.h"
31 #include "pan_bo.h"
32 #include "util/u_memory.h"
33 #include "nir_serialize.h"
34
35 /* Compute CSOs are tracked like graphics shader CSOs, but are
36 * considerably simpler. We do not implement multiple
37 * variants/keying. So the CSO create function just goes ahead and
38 * compiles the thing. */
39
40 static void *
41 panfrost_create_compute_state(
42 struct pipe_context *pctx,
43 const struct pipe_compute_state *cso)
44 {
45 struct panfrost_context *ctx = pan_context(pctx);
46
47 struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants);
48 so->cbase = *cso;
49 so->is_compute = true;
50
51 struct panfrost_shader_state *v = calloc(1, sizeof(*v));
52 so->variants = v;
53
54 so->variant_count = 1;
55 so->active_variant = 0;
56
57 /* calloc, instead of malloc - to zero unused fields */
58 v->tripipe = CALLOC_STRUCT(mali_shader_meta);
59
60 if (cso->ir_type == PIPE_SHADER_IR_NIR_SERIALIZED) {
61 struct blob_reader reader;
62 const struct pipe_binary_program_header *hdr = cso->prog;
63
64 blob_reader_init(&reader, hdr->blob, hdr->num_bytes);
65 so->cbase.prog = nir_deserialize(NULL, &midgard_nir_options, &reader);
66 so->cbase.ir_type = PIPE_SHADER_IR_NIR;
67 }
68
69 panfrost_shader_compile(ctx, v->tripipe,
70 so->cbase.ir_type, so->cbase.prog,
71 MESA_SHADER_COMPUTE, v, NULL);
72
73 return so;
74 }
75
76 static void
77 panfrost_bind_compute_state(struct pipe_context *pipe, void *cso)
78 {
79 struct panfrost_context *ctx = pan_context(pipe);
80
81 struct panfrost_shader_variants *variants =
82 (struct panfrost_shader_variants *) cso;
83
84 ctx->shader[PIPE_SHADER_COMPUTE] = variants;
85 }
86
87 static void
88 panfrost_delete_compute_state(struct pipe_context *pipe, void *cso)
89 {
90 free(cso);
91 }
92
93 /* Launch grid is the compute equivalent of draw_vbo, so in this routine, we
94 * construct the COMPUTE job and some of its payload.
95 */
96
97 static void
98 panfrost_launch_grid(struct pipe_context *pipe,
99 const struct pipe_grid_info *info)
100 {
101 struct panfrost_context *ctx = pan_context(pipe);
102
103 /* TODO: Do we want a special compute-only batch? */
104 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
105
106 ctx->compute_grid = info;
107
108 /* TODO: Stub */
109 struct midgard_payload_vertex_tiler *payload = &ctx->payloads[PIPE_SHADER_COMPUTE];
110
111 /* We implement OpenCL inputs as uniforms (or a UBO -- same thing), so
112 * reuse the graphics path for this by lowering to Gallium */
113
114 struct pipe_constant_buffer ubuf = {
115 .buffer = NULL,
116 .buffer_offset = 0,
117 .buffer_size = ctx->shader[PIPE_SHADER_COMPUTE]->cbase.req_input_mem,
118 .user_buffer = info->input
119 };
120
121 if (info->input)
122 pipe->set_constant_buffer(pipe, PIPE_SHADER_COMPUTE, 0, &ubuf);
123
124 panfrost_patch_shader_state(ctx, PIPE_SHADER_COMPUTE);
125 panfrost_emit_shader_meta(batch, PIPE_SHADER_COMPUTE, payload);
126 panfrost_emit_const_buf(batch, PIPE_SHADER_COMPUTE, payload);
127 panfrost_emit_shared_memory(batch, info, payload);
128
129 /* Invoke according to the grid info */
130
131 panfrost_pack_work_groups_compute(&payload->prefix,
132 info->grid[0], info->grid[1], info->grid[2],
133 info->block[0], info->block[1], info->block[2], false);
134
135 panfrost_new_job(batch, JOB_TYPE_COMPUTE, true, 0, payload, sizeof(*payload), false);
136 panfrost_flush_all_batches(ctx, true);
137 }
138
139 static void
140 panfrost_set_compute_resources(struct pipe_context *pctx,
141 unsigned start, unsigned count,
142 struct pipe_surface **resources)
143 {
144 /* TODO */
145 }
146
147 static void
148 panfrost_set_global_binding(struct pipe_context *pctx,
149 unsigned first, unsigned count,
150 struct pipe_resource **resources,
151 uint32_t **handles)
152 {
153 /* TODO */
154 }
155
156 static void
157 panfrost_memory_barrier(struct pipe_context *pctx, unsigned flags)
158 {
159 /* TODO */
160 }
161
162 void
163 panfrost_compute_context_init(struct pipe_context *pctx)
164 {
165 pctx->create_compute_state = panfrost_create_compute_state;
166 pctx->bind_compute_state = panfrost_bind_compute_state;
167 pctx->delete_compute_state = panfrost_delete_compute_state;
168
169 pctx->launch_grid = panfrost_launch_grid;
170
171 pctx->set_compute_resources = panfrost_set_compute_resources;
172 pctx->set_global_binding = panfrost_set_global_binding;
173
174 pctx->memory_barrier = panfrost_memory_barrier;
175 }