freedreno: update generated headers
[mesa.git] / src / gallium / drivers / freedreno / a5xx / fd5_compute.c
1 /*
2 * Copyright (C) 2017 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "pipe/p_state.h"
28
29 #include "freedreno_resource.h"
30
31 #include "fd5_compute.h"
32 #include "fd5_context.h"
33 #include "fd5_emit.h"
34
35 struct fd5_compute_stateobj {
36 struct ir3_shader *shader;
37 };
38
39
40 static void *
41 fd5_create_compute_state(struct pipe_context *pctx,
42 const struct pipe_compute_state *cso)
43 {
44 struct fd_context *ctx = fd_context(pctx);
45
46 /* req_input_mem will only be non-zero for cl kernels (ie. clover).
47 * This isn't a perfect test because I guess it is possible (but
48 * uncommon) for none for the kernel parameters to be a global,
49 * but ctx->set_global_bindings() can't fail, so this is the next
50 * best place to fail if we need a newer version of kernel driver:
51 */
52 if ((cso->req_input_mem > 0) &&
53 fd_device_version(ctx->dev) < FD_VERSION_BO_IOVA) {
54 return NULL;
55 }
56
57 struct ir3_compiler *compiler = ctx->screen->compiler;
58 struct fd5_compute_stateobj *so = CALLOC_STRUCT(fd5_compute_stateobj);
59 so->shader = ir3_shader_create_compute(compiler, cso, &ctx->debug);
60 return so;
61 }
62
63 static void
64 fd5_delete_compute_state(struct pipe_context *pctx, void *hwcso)
65 {
66 struct fd5_compute_stateobj *so = hwcso;
67 ir3_shader_destroy(so->shader);
68 free(so);
69 }
70
71 /* maybe move to fd5_program? */
72 static void
73 cs_program_emit(struct fd_ringbuffer *ring, struct ir3_shader_variant *v,
74 const struct pipe_grid_info *info)
75 {
76 const unsigned *local_size = info->block;
77 const struct ir3_info *i = &v->info;
78 enum a3xx_threadsize thrsz;
79 unsigned instrlen = v->instrlen;
80
81 /* if shader is more than 32*16 instructions, don't preload it. Similar
82 * to the combined restriction of 64*16 for VS+FS
83 */
84 if (instrlen > 32)
85 instrlen = 0;
86
87 /* maybe the limit should be 1024.. basically if we can't have full
88 * occupancy, use TWO_QUAD mode to reduce divergence penalty.
89 */
90 if ((local_size[0] * local_size[1] * local_size[2]) < 512) {
91 thrsz = TWO_QUADS;
92 } else {
93 thrsz = FOUR_QUADS;
94 }
95
96 OUT_PKT4(ring, REG_A5XX_SP_SP_CNTL, 1);
97 OUT_RING(ring, 0x00000000); /* SP_SP_CNTL */
98
99 OUT_PKT4(ring, REG_A5XX_HLSQ_CONTROL_0_REG, 1);
100 OUT_RING(ring, A5XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(TWO_QUADS) |
101 A5XX_HLSQ_CONTROL_0_REG_CSTHREADSIZE(thrsz) |
102 0x00000880 /* XXX */);
103
104 OUT_PKT4(ring, REG_A5XX_SP_CS_CTRL_REG0, 1);
105 OUT_RING(ring, A5XX_SP_CS_CTRL_REG0_THREADSIZE(thrsz) |
106 A5XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(i->max_half_reg + 1) |
107 A5XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(i->max_reg + 1) |
108 A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(0x3) | // XXX need to figure this out somehow..
109 0x6 /* XXX */);
110
111 OUT_PKT4(ring, REG_A5XX_HLSQ_CS_CONFIG, 1);
112 OUT_RING(ring, A5XX_HLSQ_CS_CONFIG_CONSTOBJECTOFFSET(0) |
113 A5XX_HLSQ_CS_CONFIG_SHADEROBJOFFSET(0) |
114 A5XX_HLSQ_CS_CONFIG_ENABLED);
115
116 OUT_PKT4(ring, REG_A5XX_HLSQ_CS_CNTL, 1);
117 OUT_RING(ring, A5XX_HLSQ_CS_CNTL_INSTRLEN(instrlen) |
118 COND(v->has_ssbo, A5XX_HLSQ_CS_CNTL_SSBO_ENABLE));
119
120 OUT_PKT4(ring, REG_A5XX_SP_CS_CONFIG, 1);
121 OUT_RING(ring, A5XX_SP_CS_CONFIG_CONSTOBJECTOFFSET(0) |
122 A5XX_SP_CS_CONFIG_SHADEROBJOFFSET(0) |
123 A5XX_SP_CS_CONFIG_ENABLED);
124
125 unsigned constlen = align(v->constlen, 4) / 4;
126 OUT_PKT4(ring, REG_A5XX_HLSQ_CS_CONSTLEN, 2);
127 OUT_RING(ring, constlen); /* HLSQ_CS_CONSTLEN */
128 OUT_RING(ring, instrlen); /* HLSQ_CS_INSTRLEN */
129
130 OUT_PKT4(ring, REG_A5XX_SP_CS_OBJ_START_LO, 2);
131 OUT_RELOC(ring, v->bo, 0, 0, 0); /* SP_CS_OBJ_START_LO/HI */
132
133 OUT_PKT4(ring, REG_A5XX_HLSQ_UPDATE_CNTL, 1);
134 OUT_RING(ring, 0x1f00000);
135
136 uint32_t local_invocation_id, work_group_id;
137 local_invocation_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
138 work_group_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_WORK_GROUP_ID);
139
140 OUT_PKT4(ring, REG_A5XX_HLSQ_CS_CNTL_0, 2);
141 OUT_RING(ring, A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
142 A5XX_HLSQ_CS_CNTL_0_UNK0(regid(63, 0)) |
143 A5XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
144 A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
145 OUT_RING(ring, 0x1); /* HLSQ_CS_CNTL_1 */
146
147 if (instrlen > 0)
148 fd5_emit_shader(ring, v);
149 }
150
151 static void
152 emit_setup(struct fd_context *ctx)
153 {
154 struct fd_ringbuffer *ring = ctx->batch->draw;
155
156 fd5_emit_restore(ctx->batch, ring);
157 fd5_emit_lrz_flush(ring);
158
159 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
160 OUT_RING(ring, 0x0);
161
162 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
163 OUT_RING(ring, PC_CCU_INVALIDATE_COLOR);
164
165 OUT_PKT4(ring, REG_A5XX_PC_POWER_CNTL, 1);
166 OUT_RING(ring, 0x00000003); /* PC_POWER_CNTL */
167
168 OUT_PKT4(ring, REG_A5XX_VFD_POWER_CNTL, 1);
169 OUT_RING(ring, 0x00000003); /* VFD_POWER_CNTL */
170
171 /* 0x10000000 for BYPASS.. 0x7c13c080 for GMEM: */
172 fd_wfi(ctx->batch, ring);
173 OUT_PKT4(ring, REG_A5XX_RB_CCU_CNTL, 1);
174 OUT_RING(ring, 0x10000000); /* RB_CCU_CNTL */
175
176 OUT_PKT4(ring, REG_A5XX_RB_CNTL, 1);
177 OUT_RING(ring, A5XX_RB_CNTL_BYPASS);
178 }
179
180 static void
181 fd5_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info)
182 {
183 struct fd5_compute_stateobj *so = ctx->compute;
184 struct ir3_shader_key key = {};
185 struct ir3_shader_variant *v;
186 struct fd_ringbuffer *ring = ctx->batch->draw;
187 unsigned i, nglobal = 0;
188
189 emit_setup(ctx);
190
191 v = ir3_shader_variant(so->shader, key, &ctx->debug);
192 if (!v)
193 return;
194
195 if (ctx->dirty_shader[PIPE_SHADER_COMPUTE] & FD_DIRTY_SHADER_PROG)
196 cs_program_emit(ring, v, info);
197
198 fd5_emit_cs_state(ctx, ring, v);
199 ir3_emit_cs_consts(v, ring, ctx, info);
200
201 foreach_bit(i, ctx->global_bindings.enabled_mask)
202 nglobal++;
203
204 if (nglobal > 0) {
205 /* global resources don't otherwise get an OUT_RELOC(), since
206 * the raw ptr address is emitted ir ir3_emit_cs_consts().
207 * So to make the kernel aware that these buffers are referenced
208 * by the batch, emit dummy reloc's as part of a no-op packet
209 * payload:
210 */
211 OUT_PKT7(ring, CP_NOP, 2 * nglobal);
212 foreach_bit(i, ctx->global_bindings.enabled_mask) {
213 struct pipe_resource *prsc = ctx->global_bindings.buf[i];
214 OUT_RELOCW(ring, fd_resource(prsc)->bo, 0, 0, 0);
215 }
216 }
217
218 const unsigned *local_size = info->block; // v->shader->nir->info->cs.local_size;
219 const unsigned *num_groups = info->grid;
220 /* for some reason, mesa/st doesn't set info->work_dim, so just assume 3: */
221 const unsigned work_dim = info->work_dim ? info->work_dim : 3;
222 OUT_PKT4(ring, REG_A5XX_HLSQ_CS_NDRANGE_0, 7);
223 OUT_RING(ring, A5XX_HLSQ_CS_NDRANGE_0_KERNELDIM(work_dim) |
224 A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(local_size[0] - 1) |
225 A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(local_size[1] - 1) |
226 A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(local_size[2] - 1));
227 OUT_RING(ring, A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(local_size[0] * num_groups[0]));
228 OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_2_GLOBALOFF_X */
229 OUT_RING(ring, A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(local_size[1] * num_groups[1]));
230 OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_4_GLOBALOFF_Y */
231 OUT_RING(ring, A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(local_size[2] * num_groups[2]));
232 OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_6_GLOBALOFF_Z */
233
234 OUT_PKT4(ring, REG_A5XX_HLSQ_CS_KERNEL_GROUP_X, 3);
235 OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_X */
236 OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Y */
237 OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Z */
238
239 if (info->indirect) {
240 struct fd_resource *rsc = fd_resource(info->indirect);
241
242 fd5_emit_flush(ctx, ring);
243
244 OUT_PKT7(ring, CP_EXEC_CS_INDIRECT, 4);
245 OUT_RING(ring, 0x00000000);
246 OUT_RELOC(ring, rsc->bo, info->indirect_offset, 0, 0); /* ADDR_LO/HI */
247 OUT_RING(ring, A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size[0] - 1) |
248 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size[1] - 1) |
249 A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size[2] - 1));
250 } else {
251 OUT_PKT7(ring, CP_EXEC_CS, 4);
252 OUT_RING(ring, 0x00000000);
253 OUT_RING(ring, CP_EXEC_CS_1_NGROUPS_X(info->grid[0]));
254 OUT_RING(ring, CP_EXEC_CS_2_NGROUPS_Y(info->grid[1]));
255 OUT_RING(ring, CP_EXEC_CS_3_NGROUPS_Z(info->grid[2]));
256 }
257 }
258
259 void
260 fd5_compute_init(struct pipe_context *pctx)
261 {
262 struct fd_context *ctx = fd_context(pctx);
263 ctx->launch_grid = fd5_launch_grid;
264 pctx->create_compute_state = fd5_create_compute_state;
265 pctx->delete_compute_state = fd5_delete_compute_state;
266 }