freedreno/registers: split header build into subdirs
[mesa.git] / src / freedreno / computerator / a6xx.c
1 /*
2 * Copyright © 2020 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3/ir3_compiler.h"
25
26 #include "util/u_math.h"
27
28 #include "adreno_pm4.xml.h"
29 #include "adreno_common.xml.h"
30 #include "a6xx.xml.h"
31
32 #include "main.h"
33 #include "ir3_asm.h"
34
35 struct a6xx_backend {
36 struct backend base;
37
38 struct ir3_compiler *compiler;
39 struct fd_device *dev;
40
41 unsigned seqno;
42 struct fd_bo *control_mem;
43
44 struct fd_bo *query_mem;
45 const struct perfcntr *perfcntrs;
46 unsigned num_perfcntrs;
47 };
48 define_cast(backend, a6xx_backend);
49
50 /*
51 * Data structures shared with GPU:
52 */
53
54 /* This struct defines the layout of the fd6_context::control buffer: */
55 struct fd6_control {
56 uint32_t seqno; /* seqno for async CP_EVENT_WRITE, etc */
57 uint32_t _pad0;
58 volatile uint32_t vsc_overflow;
59 uint32_t _pad1;
60 /* flag set from cmdstream when VSC overflow detected: */
61 uint32_t vsc_scratch;
62 uint32_t _pad2;
63 uint32_t _pad3;
64 uint32_t _pad4;
65
66 /* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
67 struct {
68 uint32_t offset;
69 uint32_t pad[7];
70 } flush_base[4];
71 };
72
73 #define control_ptr(a6xx_backend, member) \
74 (a6xx_backend)->control_mem, offsetof(struct fd6_control, member), 0, 0
75
76
77 struct PACKED fd6_query_sample {
78 uint64_t start;
79 uint64_t result;
80 uint64_t stop;
81 };
82
83
84 /* offset of a single field of an array of fd6_query_sample: */
85 #define query_sample_idx(a6xx_backend, idx, field) \
86 (a6xx_backend)->query_mem, \
87 (idx * sizeof(struct fd6_query_sample)) + \
88 offsetof(struct fd6_query_sample, field), \
89 0, 0
90
91
92 /*
93 * Backend implementation:
94 */
95
96 static struct kernel *
97 a6xx_assemble(struct backend *b, FILE *in)
98 {
99 struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
100 struct ir3_kernel *ir3_kernel =
101 ir3_asm_assemble(a6xx_backend->compiler, in);
102 ir3_kernel->backend = b;
103 return &ir3_kernel->base;
104 }
105
106 static void
107 a6xx_disassemble(struct kernel *kernel, FILE *out)
108 {
109 ir3_asm_disassemble(to_ir3_kernel(kernel), out);
110 }
111
112 static void
113 cs_program_emit(struct fd_ringbuffer *ring, struct kernel *kernel)
114 {
115 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
116 struct ir3_shader_variant *v = ir3_kernel->v;
117 const struct ir3_info *i = &v->info;
118 enum a3xx_threadsize thrsz = FOUR_QUADS;
119
120 OUT_PKT4(ring, REG_A6XX_HLSQ_INVALIDATE_CMD, 1);
121 OUT_RING(ring, A6XX_HLSQ_INVALIDATE_CMD_VS_STATE |
122 A6XX_HLSQ_INVALIDATE_CMD_HS_STATE |
123 A6XX_HLSQ_INVALIDATE_CMD_DS_STATE |
124 A6XX_HLSQ_INVALIDATE_CMD_GS_STATE |
125 A6XX_HLSQ_INVALIDATE_CMD_FS_STATE |
126 A6XX_HLSQ_INVALIDATE_CMD_CS_STATE |
127 A6XX_HLSQ_INVALIDATE_CMD_CS_IBO |
128 A6XX_HLSQ_INVALIDATE_CMD_GFX_IBO);
129
130 unsigned constlen = align(v->constlen, 4);
131 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL, 1);
132 OUT_RING(ring, A6XX_HLSQ_CS_CNTL_CONSTLEN(constlen) |
133 A6XX_HLSQ_CS_CNTL_ENABLED);
134
135 OUT_PKT4(ring, REG_A6XX_SP_CS_CONFIG, 2);
136 OUT_RING(ring, A6XX_SP_CS_CONFIG_ENABLED |
137 A6XX_SP_CS_CONFIG_NIBO(kernel->num_bufs) |
138 A6XX_SP_CS_CONFIG_NTEX(v->num_samp) |
139 A6XX_SP_CS_CONFIG_NSAMP(v->num_samp)); /* SP_VS_CONFIG */
140 OUT_RING(ring, v->instrlen); /* SP_VS_INSTRLEN */
141
142 OUT_PKT4(ring, REG_A6XX_SP_CS_CTRL_REG0, 1);
143 OUT_RING(ring, A6XX_SP_CS_CTRL_REG0_THREADSIZE(thrsz) |
144 A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(i->max_reg + 1) |
145 A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(i->max_half_reg + 1) |
146 COND(v->mergedregs, A6XX_SP_CS_CTRL_REG0_MERGEDREGS) |
147 A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(v->branchstack) |
148 COND(v->need_pixlod, A6XX_SP_CS_CTRL_REG0_PIXLODENABLE));
149
150 OUT_PKT4(ring, REG_A6XX_SP_CS_UNKNOWN_A9B1, 1);
151 OUT_RING(ring, 0x41);
152
153 uint32_t local_invocation_id, work_group_id;
154 local_invocation_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_LOCAL_INVOCATION_ID);
155 work_group_id = ir3_find_sysval_regid(v, SYSTEM_VALUE_WORK_GROUP_ID);
156
157 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_CNTL_0, 2);
158 OUT_RING(ring, A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(work_group_id) |
159 A6XX_HLSQ_CS_CNTL_0_UNK0(regid(63, 0)) |
160 A6XX_HLSQ_CS_CNTL_0_UNK1(regid(63, 0)) |
161 A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(local_invocation_id));
162 OUT_RING(ring, 0x2fc); /* HLSQ_CS_UNKNOWN_B998 */
163
164 OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START_LO, 2);
165 OUT_RELOC(ring, v->bo, 0, 0, 0); /* SP_CS_OBJ_START_LO/HI */
166
167 OUT_PKT4(ring, REG_A6XX_SP_CS_INSTRLEN, 1);
168 OUT_RING(ring, v->instrlen);
169
170 OUT_PKT4(ring, REG_A6XX_SP_CS_OBJ_START_LO, 2);
171 OUT_RELOC(ring, v->bo, 0, 0, 0);
172
173 OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
174 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
175 CP_LOAD_STATE6_0_STATE_TYPE(ST6_SHADER) |
176 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
177 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
178 CP_LOAD_STATE6_0_NUM_UNIT(v->instrlen));
179 OUT_RELOC(ring, v->bo, 0, 0, 0);
180 }
181
182 static void
183 emit_const(struct fd_ringbuffer *ring, uint32_t regid,
184 uint32_t sizedwords, const uint32_t *dwords)
185 {
186 uint32_t align_sz;
187
188 debug_assert((regid % 4) == 0);
189
190 align_sz = align(sizedwords, 4);
191
192 OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3 + align_sz);
193 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(regid/4) |
194 CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
195 CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
196 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
197 CP_LOAD_STATE6_0_NUM_UNIT(DIV_ROUND_UP(sizedwords, 4)));
198 OUT_RING(ring, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
199 OUT_RING(ring, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
200
201 for (uint32_t i = 0; i < sizedwords; i++) {
202 OUT_RING(ring, dwords[i]);
203 }
204
205 /* Zero-pad to multiple of 4 dwords */
206 for (uint32_t i = sizedwords; i < align_sz; i++) {
207 OUT_RING(ring, 0);
208 }
209 }
210
211
212 static void
213 cs_const_emit(struct fd_ringbuffer *ring, struct kernel *kernel, uint32_t grid[3])
214 {
215 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
216 struct ir3_shader_variant *v = ir3_kernel->v;
217
218 const struct ir3_const_state *const_state = ir3_const_state(v);
219 uint32_t base = const_state->offsets.immediate;
220 int size = const_state->immediates_count;
221
222 if (ir3_kernel->info.numwg != INVALID_REG) {
223 assert((ir3_kernel->info.numwg & 0x3) == 0);
224 int idx = ir3_kernel->info.numwg >> 2;
225 const_state->immediates[idx].val[0] = grid[0];
226 const_state->immediates[idx].val[1] = grid[1];
227 const_state->immediates[idx].val[2] = grid[2];
228 }
229
230 /* truncate size to avoid writing constants that shader
231 * does not use:
232 */
233 size = MIN2(size + base, v->constlen) - base;
234
235 /* convert out of vec4: */
236 base *= 4;
237 size *= 4;
238
239 if (size > 0) {
240 emit_const(ring, base, size, const_state->immediates[0].val);
241 }
242 }
243
244 static void
245 cs_ibo_emit(struct fd_ringbuffer *ring, struct fd_submit *submit,
246 struct kernel *kernel)
247 {
248 struct fd_ringbuffer *state =
249 fd_submit_new_ringbuffer(submit,
250 kernel->num_bufs * 16 * 4,
251 FD_RINGBUFFER_STREAMING);
252
253 for (unsigned i = 0; i < kernel->num_bufs; i++) {
254 /* size is encoded with low 15b in WIDTH and high bits in HEIGHT,
255 * in units of elements:
256 */
257 unsigned sz = kernel->buf_sizes[i];
258 unsigned width = sz & MASK(15);
259 unsigned height = sz >> 15;
260
261 OUT_RING(state, A6XX_IBO_0_FMT(FMT6_32_UINT) |
262 A6XX_IBO_0_TILE_MODE(0));
263 OUT_RING(state, A6XX_IBO_1_WIDTH(width) |
264 A6XX_IBO_1_HEIGHT(height));
265 OUT_RING(state, A6XX_IBO_2_PITCH(0) |
266 A6XX_IBO_2_UNK4 | A6XX_IBO_2_UNK31 |
267 A6XX_IBO_2_TYPE(A6XX_TEX_1D));
268 OUT_RING(state, A6XX_IBO_3_ARRAY_PITCH(0));
269 OUT_RELOC(state, kernel->bufs[i], 0, 0, 0);
270 OUT_RING(state, 0x00000000);
271 OUT_RING(state, 0x00000000);
272 OUT_RING(state, 0x00000000);
273 OUT_RING(state, 0x00000000);
274 OUT_RING(state, 0x00000000);
275 OUT_RING(state, 0x00000000);
276 OUT_RING(state, 0x00000000);
277 OUT_RING(state, 0x00000000);
278 OUT_RING(state, 0x00000000);
279 OUT_RING(state, 0x00000000);
280 }
281
282 OUT_PKT7(ring, CP_LOAD_STATE6_FRAG, 3);
283 OUT_RING(ring, CP_LOAD_STATE6_0_DST_OFF(0) |
284 CP_LOAD_STATE6_0_STATE_TYPE(ST6_IBO) |
285 CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
286 CP_LOAD_STATE6_0_STATE_BLOCK(SB6_CS_SHADER) |
287 CP_LOAD_STATE6_0_NUM_UNIT(kernel->num_bufs));
288 OUT_RB(ring, state);
289
290 OUT_PKT4(ring, REG_A6XX_SP_CS_IBO_LO, 2);
291 OUT_RB(ring, state);
292
293 OUT_PKT4(ring, REG_A6XX_SP_CS_IBO_COUNT, 1);
294 OUT_RING(ring, kernel->num_bufs);
295
296 fd_ringbuffer_del(state);
297 }
298
299 static inline unsigned
300 event_write(struct fd_ringbuffer *ring, struct kernel *kernel,
301 enum vgt_event_type evt, bool timestamp)
302 {
303 unsigned seqno = 0;
304
305 OUT_PKT7(ring, CP_EVENT_WRITE, timestamp ? 4 : 1);
306 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(evt));
307 if (timestamp) {
308 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
309 struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
310 seqno = ++a6xx_backend->seqno;
311 OUT_RELOC(ring, control_ptr(a6xx_backend, seqno)); /* ADDR_LO/HI */
312 OUT_RING(ring, seqno);
313 }
314
315 return seqno;
316 }
317
318 static inline void
319 cache_flush(struct fd_ringbuffer *ring, struct kernel *kernel)
320 {
321 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
322 struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
323 unsigned seqno;
324
325 seqno = event_write(ring, kernel, RB_DONE_TS, true);
326
327 OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
328 OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
329 CP_WAIT_REG_MEM_0_POLL_MEMORY);
330 OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
331 OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(seqno));
332 OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(~0));
333 OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(16));
334
335 seqno = event_write(ring, kernel, CACHE_FLUSH_TS, true);
336
337 OUT_PKT7(ring, CP_WAIT_MEM_GTE, 4);
338 OUT_RING(ring, CP_WAIT_MEM_GTE_0_RESERVED(0));
339 OUT_RELOC(ring, control_ptr(a6xx_backend, seqno));
340 OUT_RING(ring, CP_WAIT_MEM_GTE_3_REF(seqno));
341 }
342
343 static void
344 a6xx_emit_grid(struct kernel *kernel, uint32_t grid[3], struct fd_submit *submit)
345 {
346 struct ir3_kernel *ir3_kernel = to_ir3_kernel(kernel);
347 struct a6xx_backend *a6xx_backend = to_a6xx_backend(ir3_kernel->backend);
348 struct fd_ringbuffer *ring = fd_submit_new_ringbuffer(submit, 0,
349 FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
350
351 cs_program_emit(ring, kernel);
352 cs_const_emit(ring, kernel, grid);
353 cs_ibo_emit(ring, submit, kernel);
354
355 OUT_PKT7(ring, CP_SET_MARKER, 1);
356 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
357
358 const unsigned *local_size = kernel->local_size;
359 const unsigned *num_groups = grid;
360
361 unsigned work_dim = 0;
362 for (int i = 0; i < 3; i++) {
363 if (!grid[i])
364 break;
365 work_dim++;
366 }
367
368 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_NDRANGE_0, 7);
369 OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(work_dim) |
370 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(local_size[0] - 1) |
371 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(local_size[1] - 1) |
372 A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(local_size[2] - 1));
373 OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(local_size[0] * num_groups[0]));
374 OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_2_GLOBALOFF_X */
375 OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(local_size[1] * num_groups[1]));
376 OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_4_GLOBALOFF_Y */
377 OUT_RING(ring, A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(local_size[2] * num_groups[2]));
378 OUT_RING(ring, 0); /* HLSQ_CS_NDRANGE_6_GLOBALOFF_Z */
379
380 OUT_PKT4(ring, REG_A6XX_HLSQ_CS_KERNEL_GROUP_X, 3);
381 OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_X */
382 OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Y */
383 OUT_RING(ring, 1); /* HLSQ_CS_KERNEL_GROUP_Z */
384
385 if (a6xx_backend->num_perfcntrs > 0) {
386 a6xx_backend->query_mem = fd_bo_new(a6xx_backend->dev,
387 a6xx_backend->num_perfcntrs * sizeof(struct fd6_query_sample),
388 DRM_FREEDRENO_GEM_TYPE_KMEM, "query");
389
390 /* configure the performance counters to count the requested
391 * countables:
392 */
393 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
394 const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
395
396 OUT_PKT4(ring, counter->select_reg, 1);
397 OUT_RING(ring, counter->selector);
398 }
399
400 OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
401
402 /* and snapshot the start values: */
403 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
404 const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
405
406 OUT_PKT7(ring, CP_REG_TO_MEM, 3);
407 OUT_RING(ring, CP_REG_TO_MEM_0_64B |
408 CP_REG_TO_MEM_0_REG(counter->counter_reg_lo));
409 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, start));
410 }
411 }
412
413 OUT_PKT7(ring, CP_EXEC_CS, 4);
414 OUT_RING(ring, 0x00000000);
415 OUT_RING(ring, CP_EXEC_CS_1_NGROUPS_X(grid[0]));
416 OUT_RING(ring, CP_EXEC_CS_2_NGROUPS_Y(grid[1]));
417 OUT_RING(ring, CP_EXEC_CS_3_NGROUPS_Z(grid[2]));
418
419 OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
420
421 if (a6xx_backend->num_perfcntrs > 0) {
422 /* snapshot the end values: */
423 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
424 const struct perfcntr *counter = &a6xx_backend->perfcntrs[i];
425
426 OUT_PKT7(ring, CP_REG_TO_MEM, 3);
427 OUT_RING(ring, CP_REG_TO_MEM_0_64B |
428 CP_REG_TO_MEM_0_REG(counter->counter_reg_lo));
429 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, stop));
430 }
431
432 /* and compute the result: */
433 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
434 /* result += stop - start: */
435 OUT_PKT7(ring, CP_MEM_TO_MEM, 9);
436 OUT_RING(ring, CP_MEM_TO_MEM_0_DOUBLE |
437 CP_MEM_TO_MEM_0_NEG_C);
438 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, result)); /* dst */
439 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, result)); /* srcA */
440 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, stop)); /* srcB */
441 OUT_RELOC(ring, query_sample_idx(a6xx_backend, i, start)); /* srcC */
442 }
443 }
444
445 cache_flush(ring, kernel);
446 }
447
448 static void
449 a6xx_set_perfcntrs(struct backend *b, const struct perfcntr *perfcntrs,
450 unsigned num_perfcntrs)
451 {
452 struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
453
454 a6xx_backend->perfcntrs = perfcntrs;
455 a6xx_backend->num_perfcntrs = num_perfcntrs;
456 }
457
458 static void
459 a6xx_read_perfcntrs(struct backend *b, uint64_t *results)
460 {
461 struct a6xx_backend *a6xx_backend = to_a6xx_backend(b);
462
463 fd_bo_cpu_prep(a6xx_backend->query_mem, NULL, DRM_FREEDRENO_PREP_READ);
464 struct fd6_query_sample *samples = fd_bo_map(a6xx_backend->query_mem);
465
466 for (unsigned i = 0; i < a6xx_backend->num_perfcntrs; i++) {
467 results[i] = samples[i].result;
468 }
469 }
470
471 struct backend *
472 a6xx_init(struct fd_device *dev, uint32_t gpu_id)
473 {
474 struct a6xx_backend *a6xx_backend = calloc(1, sizeof(*a6xx_backend));
475
476 a6xx_backend->base = (struct backend) {
477 .assemble = a6xx_assemble,
478 .disassemble = a6xx_disassemble,
479 .emit_grid = a6xx_emit_grid,
480 .set_perfcntrs = a6xx_set_perfcntrs,
481 .read_perfcntrs = a6xx_read_perfcntrs,
482 };
483
484 a6xx_backend->compiler = ir3_compiler_create(dev, gpu_id);
485 a6xx_backend->dev = dev;
486
487 a6xx_backend->control_mem = fd_bo_new(dev, 0x1000,
488 DRM_FREEDRENO_GEM_TYPE_KMEM, "control");
489
490 return &a6xx_backend->base;
491 }