gallium: make handles of set_global_binding 64 bit
[mesa.git] / src / gallium / drivers / panfrost / pan_compute.c
1 /*
2 * Copyright (C) 2019 Collabora, Ltd.
3 * Copyright (C) 2019 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors (Collabora):
25 * Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
26 *
27 */
28
29 #include "pan_context.h"
30 #include "pan_cmdstream.h"
31 #include "pan_bo.h"
32 #include "util/u_memory.h"
33 #include "nir_serialize.h"
34
35 /* Compute CSOs are tracked like graphics shader CSOs, but are
36 * considerably simpler. We do not implement multiple
37 * variants/keying. So the CSO create function just goes ahead and
38 * compiles the thing. */
39
40 static void *
41 panfrost_create_compute_state(
42 struct pipe_context *pctx,
43 const struct pipe_compute_state *cso)
44 {
45 struct panfrost_context *ctx = pan_context(pctx);
46
47 struct panfrost_shader_variants *so = CALLOC_STRUCT(panfrost_shader_variants);
48 so->cbase = *cso;
49 so->is_compute = true;
50
51 struct panfrost_shader_state *v = calloc(1, sizeof(*v));
52 so->variants = v;
53
54 so->variant_count = 1;
55 so->active_variant = 0;
56
57 if (cso->ir_type == PIPE_SHADER_IR_NIR_SERIALIZED) {
58 struct blob_reader reader;
59 const struct pipe_binary_program_header *hdr = cso->prog;
60
61 blob_reader_init(&reader, hdr->blob, hdr->num_bytes);
62 so->cbase.prog = nir_deserialize(NULL, &midgard_nir_options, &reader);
63 so->cbase.ir_type = PIPE_SHADER_IR_NIR;
64 }
65
66 panfrost_shader_compile(ctx, so->cbase.ir_type, so->cbase.prog,
67 MESA_SHADER_COMPUTE, v, NULL);
68
69 return so;
70 }
71
72 static void
73 panfrost_bind_compute_state(struct pipe_context *pipe, void *cso)
74 {
75 struct panfrost_context *ctx = pan_context(pipe);
76
77 struct panfrost_shader_variants *variants =
78 (struct panfrost_shader_variants *) cso;
79
80 ctx->shader[PIPE_SHADER_COMPUTE] = variants;
81 }
82
83 static void
84 panfrost_delete_compute_state(struct pipe_context *pipe, void *cso)
85 {
86 free(cso);
87 }
88
89 /* Launch grid is the compute equivalent of draw_vbo, so in this routine, we
90 * construct the COMPUTE job and some of its payload.
91 */
92
93 static void
94 panfrost_launch_grid(struct pipe_context *pipe,
95 const struct pipe_grid_info *info)
96 {
97 struct panfrost_context *ctx = pan_context(pipe);
98
99 /* TODO: Do we want a special compute-only batch? */
100 struct panfrost_batch *batch = panfrost_get_batch_for_fbo(ctx);
101
102 ctx->compute_grid = info;
103
104 /* TODO: Stub */
105 struct midgard_payload_vertex_tiler payload;
106
107 /* We implement OpenCL inputs as uniforms (or a UBO -- same thing), so
108 * reuse the graphics path for this by lowering to Gallium */
109
110 struct pipe_constant_buffer ubuf = {
111 .buffer = NULL,
112 .buffer_offset = 0,
113 .buffer_size = ctx->shader[PIPE_SHADER_COMPUTE]->cbase.req_input_mem,
114 .user_buffer = info->input
115 };
116
117 if (info->input)
118 pipe->set_constant_buffer(pipe, PIPE_SHADER_COMPUTE, 0, &ubuf);
119
120 panfrost_vt_init(ctx, PIPE_SHADER_COMPUTE, &payload);
121
122 panfrost_emit_shader_meta(batch, PIPE_SHADER_COMPUTE, &payload);
123 panfrost_emit_const_buf(batch, PIPE_SHADER_COMPUTE, &payload);
124 panfrost_emit_shared_memory(batch, info, &payload);
125
126 /* Invoke according to the grid info */
127
128 panfrost_pack_work_groups_compute(&payload.prefix,
129 info->grid[0], info->grid[1],
130 info->grid[2],
131 info->block[0], info->block[1],
132 info->block[2],
133 false);
134
135 panfrost_new_job(batch, JOB_TYPE_COMPUTE, true, 0, &payload,
136 sizeof(payload), false);
137 panfrost_flush_all_batches(ctx, true);
138 }
139
140 static void
141 panfrost_set_compute_resources(struct pipe_context *pctx,
142 unsigned start, unsigned count,
143 struct pipe_surface **resources)
144 {
145 /* TODO */
146 }
147
148 static void
149 panfrost_set_global_binding(struct pipe_context *pctx,
150 unsigned first, unsigned count,
151 struct pipe_resource **resources,
152 uint64_t **handles)
153 {
154 /* TODO */
155 }
156
157 static void
158 panfrost_memory_barrier(struct pipe_context *pctx, unsigned flags)
159 {
160 /* TODO */
161 }
162
163 void
164 panfrost_compute_context_init(struct pipe_context *pctx)
165 {
166 pctx->create_compute_state = panfrost_create_compute_state;
167 pctx->bind_compute_state = panfrost_bind_compute_state;
168 pctx->delete_compute_state = panfrost_delete_compute_state;
169
170 pctx->launch_grid = panfrost_launch_grid;
171
172 pctx->set_compute_resources = panfrost_set_compute_resources;
173 pctx->set_global_binding = panfrost_set_global_binding;
174
175 pctx->memory_barrier = panfrost_memory_barrier;
176 }