freedreno/computerator: add computerator
[mesa.git] / src / freedreno / computerator / a6xx.c
1 /*
2 * Copyright © 2020 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3/ir3_compiler.h"
25
26 #include "util/u_math.h"
27
28 #include "registers/adreno_pm4.xml.h"
29 #include "registers/adreno_common.xml.h"
30 #include "registers/a6xx.xml.h"
31
32 #include "main.h"
33 #include "ir3_asm.h"
34
35 struct a6xx_backend {
36 struct backend base;
37
38 struct ir3_compiler *compiler;
39 struct fd_device *dev;
40
41 unsigned seqno;
42 struct fd_bo *control_mem;
43 };
44 define_cast(backend, a6xx_backend);
45
46 /* This struct defines the layout of the fd6_context::control buffer: */
47 struct fd6_control {
48 uint32_t seqno; /* seqno for async CP_EVENT_WRITE, etc */
49 uint32_t _pad0;
50 volatile uint32_t vsc_overflow;
51 uint32_t _pad1;
52 /* flag set from cmdstream when VSC overflow detected: */
53 uint32_t vsc_scratch;
54 uint32_t _pad2;
55 uint32_t _pad3;
56 uint32_t _pad4;
57
58 /* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
59 struct {
60 uint32_t offset;
61 uint32_t pad[7];
62 } flush_base[4];
63 };
64
65 #define control_ptr(a6xx_backend, member) \
66 (a6xx_backend)->control_mem, offsetof(struct fd6_control, member), 0, 0
67
68 static struct kernel *
69 a6xx_assemble(struct backend *b, FILE *in)
70 {
71 struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
72 struct ir3_kernel *ir3_kernel =
73 ir3_asm_assemble(a6xx_backend->compiler, in);
74 ir3_kernel->backend = b;
75 return &ir3_kernel->base;
76 }
77
78 static void
79 a6xx_disassemble(struct kernel *kernel, FILE *out)
80 {
81 ir3_asm_disassemble(to_ir3_kernel(kernel), out);
82 }
83
84 static void
85 cs_program_emit(struct fd_ringbuffer *ring, struct kernel *kernel)
86 {
87 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
88 struct ir3_shader_variant *v = ir3_kernel->v;
89 const struct ir3_info *i = &v->info;
90 enum a3xx_threadsize thrsz = FOUR_QUADS;
91
92 OUT_PKT4(ring, REG_A6XX_HLSQ_UPDATE_CNTL, 1);
93 OUT_RING(ring, 0xff);
94
95 unsigned constlen = align(v->constlen, 4);
96 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL, 1);
97 OUT_RING(ring, A6XX_HLSQ_CS_CNTL_CONSTLEN(constlen) |
98 A6XX_HLSQ_CS_CNTL_ENABLED);
99
100 OUT_PKT4(ring, REG_A6XX_SP_CS_CONFIG, 2);
101 OUT_RING(ring, A6XX_SP_CS_CONFIG_ENABLED |
102 A6XX_SP_CS_CONFIG_NIBO(kernel->num_bufs) |
103 A6XX_SP_CS_CONFIG_NTEX(v->num_samp) |
104 A6XX_SP_CS_CONFIG_NSAMP(v->num_samp)); /* SP_VS_CONFIG */
105 OUT_RING(ring, v->instrlen); /* SP_VS_INSTRLEN */
106
107 OUT_PKT4(ring, REG_A6XX_SP_CS_CTRL_REG0, 1);
108 OUT_RING(ring, A6XX_SP_CS_CTRL_REG0_THREADSIZE(thrsz) |
109 A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(i->max_reg + 1) |
110 A6XX_SP_CS_CTRL_REG0_MERGEDREGS |
111 A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(v->branchstack) |
112 COND(v->need_pixlod, A6XX_SP_CS_CTRL_REG0_PIXLODENABLE));
113
114 OUT_PKT4(ring, REG_A6XX_SP_CS_UNKNOWN_A9B1, 1);
115 OUT_RING(ring, 0x41);
116
117 uint32_t local_invocation_id, work_group_id;
118 local_invocation_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
119 work_group_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_WORK_GROUP_ID);
120
121 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL_0, 2);
122 OUT_RING(ring, A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
123 A6XX_HLSQ_CS_CNTL_0_UNK0(regid(63, 0)) |
124 A6XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
125 A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
126 OUT_RING(ring, 0x2fc); /* HLSQ_CS_UNKNOWN_B998 */
127
128 OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START_LO, 2);
129 OUT_RELOC(ring, v->bo, 0, 0, 0); /* SP_CS_OBJ_START_LO/HI */
130
131 OUT_PKT4(ring, REG_A6XX_SP_CS_INSTRLEN, 1);
132 OUT_RING(ring, v->instrlen);
133
134 OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START_LO, 2);
135 OUT_RELOC(ring, v->bo, 0, 0, 0);
136
137 OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
138 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
139 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
140 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
141 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
142 CP_LOAD_STATE6_0_NUM_UNIT(v->instrlen));
143 OUT_RELOCD(ring, v->bo, 0, 0, 0);
144 }
145
146 static void
147 emit_const(struct fd_ringbuffer *ring, uint32_t regid,
148 uint32_t sizedwords, const uint32_t *dwords)
149 {
150 uint32_t align_sz;
151
152 debug_assert((regid % 4) == 0);
153
154 align_sz = align(sizedwords, 4);
155
156 OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3 + align_sz);
157 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(regid/4) |
158 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
159 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
160 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
161 CP_LOAD_STATE6_0_NUM_UNIT(DIV_ROUND_UP(sizedwords, 4)));
162 OUT_RING(ring, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
163 OUT_RING(ring, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
164
165 for (uint32_t i = 0; i < sizedwords; i++) {
166 OUT_RING(ring, dwords[i]);
167 }
168
169 /* Zero-pad to multiple of 4 dwords */
170 for (uint32_t i = sizedwords; i < align_sz; i++) {
171 OUT_RING(ring, 0);
172 }
173 }
174
175
176 static void
177 cs_const_emit(struct fd_ringbuffer *ring, struct kernel *kernel, uint32_t grid[3])
178 {
179 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
180 struct ir3_shader_variant *v = ir3_kernel->v;
181
182 const struct ir3_const_state *const_state = &v->shader->const_state;
183 uint32_t base = const_state->offsets.immediate;
184 int size = const_state->immediates_count;
185
186 if (ir3_kernel->numwg != INVALID_REG) {
187 assert((ir3_kernel->numwg & 0x3) == 0);
188 int idx = ir3_kernel->numwg >> 2;
189 const_state->immediates[idx].val[0] = grid[0];
190 const_state->immediates[idx].val[1] = grid[1];
191 const_state->immediates[idx].val[2] = grid[2];
192 }
193
194 /* truncate size to avoid writing constants that shader
195 * does not use:
196 */
197 size = MIN2(size + base, v->constlen) - base;
198
199 /* convert out of vec4: */
200 base *= 4;
201 size *= 4;
202
203 if (size > 0) {
204 emit_const(ring, base, size, const_state->immediates[0].val);
205 }
206 }
207
208 static void
209 cs_ibo_emit(struct fd_ringbuffer *ring, struct fd_submit *submit,
210 struct kernel *kernel)
211 {
212 struct fd_ringbuffer *state =
213 fd_submit_new_ringbuffer(submit,
214 kernel->num_bufs * 16 * 4,
215 FD_RINGBUFFER_STREAMING);
216
217 for (unsigned i = 0; i < kernel->num_bufs; i++) {
218 /* size is encoded with low 15b in WIDTH and high bits in HEIGHT,
219 * in units of elements:
220 */
221 unsigned sz = kernel->buf_sizes[i];
222 unsigned width = sz & MASK(15);
223 unsigned height = sz >> 15;
224
225 OUT_RING(state, A6XX_IBO_0_FMT(FMT6_32_UINT) |
226 A6XX_IBO_0_TILE_MODE(0));
227 OUT_RING(state, A6XX_IBO_1_WIDTH(width) |
228 A6XX_IBO_1_HEIGHT(height));
229 OUT_RING(state, A6XX_IBO_2_PITCH(0) |
230 A6XX_IBO_2_UNK4 | A6XX_IBO_2_UNK31 |
231 A6XX_IBO_2_TYPE(A6XX_TEX_1D));
232 OUT_RING(state, A6XX_IBO_3_ARRAY_PITCH(0));
233 OUT_RELOCW(state, kernel->bufs[i], 0, 0, 0);
234 OUT_RING(state, 0x00000000);
235 OUT_RING(state, 0x00000000);
236 OUT_RING(state, 0x00000000);
237 OUT_RING(state, 0x00000000);
238 OUT_RING(state, 0x00000000);
239 OUT_RING(state, 0x00000000);
240 OUT_RING(state, 0x00000000);
241 OUT_RING(state, 0x00000000);
242 OUT_RING(state, 0x00000000);
243 OUT_RING(state, 0x00000000);
244 }
245
246 OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
247 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
248 CP_LOAD_STATE6_0_STATE_TYPE(ST6_IBO) |
249 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
250 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
251 CP_LOAD_STATE6_0_NUM_UNIT(kernel->num_bufs));
252 OUT_RB(ring, state);
253
254 OUT_PKT4(ring, REG_A6XX_SP_CS_IBO_LO, 2);
255 OUT_RB(ring, state);
256
257 OUT_PKT4(ring, REG_A6XX_SP_CS_IBO_COUNT, 1);
258 OUT_RING(ring, kernel->num_bufs);
259
260 fd_ringbuffer_del(state);
261 }
262
263 static inline unsigned
264 event_write(struct fd_ringbuffer *ring, struct kernel *kernel,
265 enum vgt_event_type evt, bool timestamp)
266 {
267 unsigned seqno = 0;
268
269 OUT_PKT7(ring, CP_EVENT_WRITE, timestamp ? 4 : 1);
270 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(evt));
271 if (timestamp) {
272 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
273 struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
274 seqno = ++a6xx_backend->seqno;
275 OUT_RELOCW(ring, control_ptr(a6xx_backend, seqno)); /* ADDR_LO/HI */
276 OUT_RING(ring, seqno);
277 }
278
279 return seqno;
280 }
281
282 static inline void
283 cache_flush(struct fd_ringbuffer *ring, struct kernel *kernel)
284 {
285 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
286 struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
287 unsigned seqno;
288
289 seqno = event_write(ring, kernel, CACHE_FLUSH_AND_INV_EVENT, true);
290
291 OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
292 OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
293 CP_WAIT_REG_MEM_0_POLL_MEMORY);
294 OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
295 OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(seqno));
296 OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(~0));
297 OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
298
299 seqno = event_write(ring, kernel, CACHE_FLUSH_TS, true);
300
301 OUT_PKT7(ring, CP_WAIT_MEM_GTE, 4);
302 OUT_RING(ring, CP_WAIT_MEM_GTE_0_RESERVED(0));
303 OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
304 OUT_RING(ring, CP_WAIT_MEM_GTE_3_REF(seqno));
305 }
306
307 static void
308 a6xx_emit_grid(struct kernel *kernel, uint32_t grid[3], struct fd_submit *submit)
309 {
310 struct fd_ringbuffer *ring = fd_submit_new_ringbuffer(submit, 0,
311 FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
312
313 cs_program_emit(ring, kernel);
314 cs_const_emit(ring, kernel, grid);
315 cs_ibo_emit(ring, submit, kernel);
316
317 OUT_PKT7(ring, CP_SET_MARKER, 1);
318 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
319
320 const unsigned *local_size = kernel->local_size;
321 const unsigned *num_groups = grid;
322
323 unsigned work_dim = 0;
324 for (int i = 0; i < 3; i++) {
325 if (!grid[i])
326 break;
327 work_dim++;
328 }
329
330 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_NDRANGE_0, 7);
331 OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(work_dim) |
332 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(local_size[0] - 1) |
333 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(local_size[1] - 1) |
334 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(local_size[2] - 1));
335 OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(local_size[0] * num_groups[0]));
336 OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_2_GLOBALOFF_X */
337 OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(local_size[1] * num_groups[1]));
338 OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_4_GLOBALOFF_Y */
339 OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(local_size[2] * num_groups[2]));
340 OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_6_GLOBALOFF_Z */
341
342 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_KERNEL_GROUP_X, 3);
343 OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_X */
344 OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Y */
345 OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Z */
346
347 OUT_PKT7(ring, CP_EXEC_CS, 4);
348 OUT_RING(ring, 0x00000000);
349 OUT_RING(ring, CP_EXEC_CS_1_NGROUPS_X(grid[0]));
350 OUT_RING(ring, CP_EXEC_CS_2_NGROUPS_Y(grid[1]));
351 OUT_RING(ring, CP_EXEC_CS_3_NGROUPS_Z(grid[2]));
352
353 OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
354
355 cache_flush(ring, kernel);
356 }
357
358 struct backend *
359 a6xx_init(struct fd_device *dev, uint32_t gpu_id)
360 {
361 struct a6xx_backend *a6xx_backend = calloc(1, sizeof(*a6xx_backend));
362
363 a6xx_backend->base = (struct backend) {
364 .assemble = a6xx_assemble,
365 .disassemble = a6xx_disassemble,
366 .emit_grid = a6xx_emit_grid,
367 };
368
369 a6xx_backend->compiler = ir3_compiler_create(dev, gpu_id);
370 a6xx_backend->dev = dev;
371
372 a6xx_backend->control_mem = fd_bo_new(dev, 0x1000,
373 DRM_FREEDRENO_GEM_TYPE_KMEM, "control");
374
375 return &a6xx_backend->base;
376 }