panfrost: Print errors from kernel
[mesa.git] / src / gallium / drivers / panfrost / pan_drm.c
1 /*
2 * © Copyright 2019 Collabora, Ltd.
3 * Copyright 2019 Alyssa Rosenzweig
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 */
25
26 #include <fcntl.h>
27 #include <xf86drm.h>
28
29 #include "drm-uapi/panfrost_drm.h"
30
31 #include "util/u_memory.h"
32 #include "util/os_time.h"
33 #include "os/os_mman.h"
34
35 #include "pan_screen.h"
36 #include "pan_resource.h"
37 #include "pan_context.h"
38 #include "pan_util.h"
39 #include "pandecode/decode.h"
40
41 void
42 panfrost_drm_mmap_bo(struct panfrost_screen *screen, struct panfrost_bo *bo)
43 {
44 struct drm_panfrost_mmap_bo mmap_bo = { .handle = bo->gem_handle };
45 int ret;
46
47 if (bo->cpu)
48 return;
49
50 ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_MMAP_BO, &mmap_bo);
51 if (ret) {
52 fprintf(stderr, "DRM_IOCTL_PANFROST_MMAP_BO failed: %m\n");
53 assert(0);
54 }
55
56 bo->cpu = os_mmap(NULL, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
57 screen->fd, mmap_bo.offset);
58 if (bo->cpu == MAP_FAILED) {
59 fprintf(stderr, "mmap failed: %p %m\n", bo->cpu);
60 assert(0);
61 }
62
63 /* Record the mmap if we're tracing */
64 if (pan_debug & PAN_DBG_TRACE)
65 pandecode_inject_mmap(bo->gpu, bo->cpu, bo->size, NULL);
66 }
67
68 static void
69 panfrost_drm_munmap_bo(struct panfrost_screen *screen, struct panfrost_bo *bo)
70 {
71 if (!bo->cpu)
72 return;
73
74 if (os_munmap((void *) (uintptr_t)bo->cpu, bo->size)) {
75 perror("munmap");
76 abort();
77 }
78
79 bo->cpu = NULL;
80 }
81
82 struct panfrost_bo *
83 panfrost_drm_create_bo(struct panfrost_screen *screen, size_t size,
84 uint32_t flags)
85 {
86 struct panfrost_bo *bo;
87
88 /* Kernel will fail (confusingly) with EPERM otherwise */
89 assert(size > 0);
90
91 /* To maximize BO cache usage, don't allocate tiny BOs */
92 size = MAX2(size, 4096);
93
94 /* GROWABLE BOs cannot be mmapped */
95 if (flags & PAN_ALLOCATE_GROWABLE)
96 assert(flags & PAN_ALLOCATE_INVISIBLE);
97
98 unsigned translated_flags = 0;
99
100 if (screen->kernel_version->version_major > 1 ||
101 screen->kernel_version->version_minor >= 1) {
102 if (flags & PAN_ALLOCATE_GROWABLE)
103 translated_flags |= PANFROST_BO_HEAP;
104 if (!(flags & PAN_ALLOCATE_EXECUTE))
105 translated_flags |= PANFROST_BO_NOEXEC;
106 }
107
108 struct drm_panfrost_create_bo create_bo = {
109 .size = size,
110 .flags = translated_flags,
111 };
112
113 /* Before creating a BO, we first want to check the cache */
114
115 bo = panfrost_bo_cache_fetch(screen, size, flags);
116
117 if (bo == NULL) {
118 /* Otherwise, the cache misses and we need to allocate a BO fresh from
119 * the kernel */
120
121 int ret;
122
123 ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_CREATE_BO, &create_bo);
124 if (ret) {
125 fprintf(stderr, "DRM_IOCTL_PANFROST_CREATE_BO failed: %m\n");
126 assert(0);
127 }
128
129 /* We have a BO allocated from the kernel; fill in the userspace
130 * version */
131
132 bo = rzalloc(screen, struct panfrost_bo);
133 bo->size = create_bo.size;
134 bo->gpu = create_bo.offset;
135 bo->gem_handle = create_bo.handle;
136 bo->flags = flags;
137 }
138
139 /* Only mmap now if we know we need to. For CPU-invisible buffers, we
140 * never map since we don't care about their contents; they're purely
141 * for GPU-internal use. But we do trace them anyway. */
142
143 if (!(flags & (PAN_ALLOCATE_INVISIBLE | PAN_ALLOCATE_DELAY_MMAP)))
144 panfrost_drm_mmap_bo(screen, bo);
145 else if (flags & PAN_ALLOCATE_INVISIBLE) {
146 if (pan_debug & PAN_DBG_TRACE)
147 pandecode_inject_mmap(bo->gpu, NULL, bo->size, NULL);
148 }
149
150 pipe_reference_init(&bo->reference, 1);
151 return bo;
152 }
153
154 void
155 panfrost_drm_release_bo(struct panfrost_screen *screen, struct panfrost_bo *bo, bool cacheable)
156 {
157 struct drm_gem_close gem_close = { .handle = bo->gem_handle };
158 int ret;
159
160 if (!bo)
161 return;
162
163 /* Rather than freeing the BO now, we'll cache the BO for later
164 * allocations if we're allowed to */
165
166 if (cacheable) {
167 bool cached = panfrost_bo_cache_put(screen, bo);
168
169 if (cached)
170 return;
171 }
172
173 /* Otherwise, if the BO wasn't cached, we'll legitimately free the BO */
174
175 panfrost_drm_munmap_bo(screen, bo);
176
177 ret = drmIoctl(screen->fd, DRM_IOCTL_GEM_CLOSE, &gem_close);
178 if (ret) {
179 fprintf(stderr, "DRM_IOCTL_GEM_CLOSE failed: %m\n");
180 assert(0);
181 }
182
183 ralloc_free(bo);
184 }
185
186 void
187 panfrost_drm_allocate_slab(struct panfrost_screen *screen,
188 struct panfrost_memory *mem,
189 size_t pages,
190 bool same_va,
191 int extra_flags,
192 int commit_count,
193 int extent)
194 {
195 // TODO cache allocations
196 // TODO properly handle errors
197 // TODO take into account extra_flags
198 mem->bo = panfrost_drm_create_bo(screen, pages * 4096, extra_flags);
199 mem->stack_bottom = 0;
200 }
201
202 void
203 panfrost_drm_free_slab(struct panfrost_screen *screen, struct panfrost_memory *mem)
204 {
205 panfrost_bo_unreference(&screen->base, mem->bo);
206 mem->bo = NULL;
207 }
208
209 struct panfrost_bo *
210 panfrost_drm_import_bo(struct panfrost_screen *screen, int fd)
211 {
212 struct panfrost_bo *bo = rzalloc(screen, struct panfrost_bo);
213 struct drm_panfrost_get_bo_offset get_bo_offset = {0,};
214 ASSERTED int ret;
215 unsigned gem_handle;
216
217 ret = drmPrimeFDToHandle(screen->fd, fd, &gem_handle);
218 assert(!ret);
219
220 get_bo_offset.handle = gem_handle;
221 ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_BO_OFFSET, &get_bo_offset);
222 assert(!ret);
223
224 bo->gem_handle = gem_handle;
225 bo->gpu = (mali_ptr) get_bo_offset.offset;
226 bo->size = lseek(fd, 0, SEEK_END);
227 assert(bo->size > 0);
228 pipe_reference_init(&bo->reference, 1);
229
230 // TODO map and unmap on demand?
231 panfrost_drm_mmap_bo(screen, bo);
232 return bo;
233 }
234
235 int
236 panfrost_drm_export_bo(struct panfrost_screen *screen, const struct panfrost_bo *bo)
237 {
238 struct drm_prime_handle args = {
239 .handle = bo->gem_handle,
240 .flags = DRM_CLOEXEC,
241 };
242
243 int ret = drmIoctl(screen->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
244 if (ret == -1)
245 return -1;
246
247 return args.fd;
248 }
249
250 static int
251 panfrost_drm_submit_job(struct panfrost_context *ctx, u64 job_desc, int reqs)
252 {
253 struct pipe_context *gallium = (struct pipe_context *) ctx;
254 struct panfrost_screen *screen = pan_screen(gallium->screen);
255 struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
256 struct drm_panfrost_submit submit = {0,};
257 int *bo_handles, ret;
258
259 submit.in_syncs = (u64) (uintptr_t) &ctx->out_sync;
260 submit.in_sync_count = 1;
261
262 submit.out_sync = ctx->out_sync;
263
264 submit.jc = job_desc;
265 submit.requirements = reqs;
266
267 bo_handles = calloc(job->bos->entries, sizeof(*bo_handles));
268 assert(bo_handles);
269
270 set_foreach(job->bos, entry) {
271 struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
272 assert(bo->gem_handle > 0);
273 bo_handles[submit.bo_handle_count++] = bo->gem_handle;
274 }
275
276 submit.bo_handles = (u64) (uintptr_t) bo_handles;
277 ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
278 free(bo_handles);
279 if (ret) {
280 fprintf(stderr, "Error submitting: %m\n");
281 return errno;
282 }
283
284 /* Trace the job if we're doing that */
285 if (pan_debug & PAN_DBG_TRACE) {
286 /* Wait so we can get errors reported back */
287 drmSyncobjWait(screen->fd, &ctx->out_sync, 1, INT64_MAX, 0, NULL);
288 pandecode_jc(submit.jc, FALSE);
289 }
290
291 return 0;
292 }
293
294 int
295 panfrost_drm_submit_vs_fs_job(struct panfrost_context *ctx, bool has_draws, bool is_scanout)
296 {
297 int ret = 0;
298
299 struct panfrost_job *job = panfrost_get_job_for_fbo(ctx);
300
301 /* TODO: Add here the transient pools */
302 panfrost_job_add_bo(job, ctx->scratchpad.bo);
303 panfrost_job_add_bo(job, ctx->tiler_heap.bo);
304 panfrost_job_add_bo(job, job->polygon_list);
305
306 if (job->first_job.gpu) {
307 ret = panfrost_drm_submit_job(ctx, job->first_job.gpu, 0);
308 assert(!ret);
309 }
310
311 if (job->first_tiler.gpu || job->clear) {
312 ret = panfrost_drm_submit_job(ctx, panfrost_fragment_job(ctx, has_draws), PANFROST_JD_REQ_FS);
313 assert(!ret);
314 }
315
316 return ret;
317 }
318
319 static struct panfrost_fence *
320 panfrost_fence_create(struct panfrost_context *ctx)
321 {
322 struct pipe_context *gallium = (struct pipe_context *) ctx;
323 struct panfrost_screen *screen = pan_screen(gallium->screen);
324 struct panfrost_fence *f = calloc(1, sizeof(*f));
325 if (!f)
326 return NULL;
327
328 /* Snapshot the last Panfrost's rendering's out fence. We'd rather have
329 * another syncobj instead of a sync file, but this is all we get.
330 * (HandleToFD/FDToHandle just gives you another syncobj ID for the
331 * same syncobj).
332 */
333 drmSyncobjExportSyncFile(screen->fd, ctx->out_sync, &f->fd);
334 if (f->fd == -1) {
335 fprintf(stderr, "export failed: %m\n");
336 free(f);
337 return NULL;
338 }
339
340 pipe_reference_init(&f->reference, 1);
341
342 return f;
343 }
344
345 void
346 panfrost_drm_force_flush_fragment(struct panfrost_context *ctx,
347 struct pipe_fence_handle **fence)
348 {
349 struct pipe_context *gallium = (struct pipe_context *) ctx;
350 struct panfrost_screen *screen = pan_screen(gallium->screen);
351
352 if (!screen->last_fragment_flushed) {
353 drmSyncobjWait(screen->fd, &ctx->out_sync, 1, INT64_MAX, 0, NULL);
354 screen->last_fragment_flushed = true;
355
356 /* The job finished up, so we're safe to clean it up now */
357 panfrost_free_job(ctx, screen->last_job);
358 }
359
360 if (fence) {
361 struct panfrost_fence *f = panfrost_fence_create(ctx);
362 gallium->screen->fence_reference(gallium->screen, fence, NULL);
363 *fence = (struct pipe_fence_handle *)f;
364 }
365 }
366
367 unsigned
368 panfrost_drm_query_gpu_version(struct panfrost_screen *screen)
369 {
370 struct drm_panfrost_get_param get_param = {0,};
371 ASSERTED int ret;
372
373 get_param.param = DRM_PANFROST_PARAM_GPU_PROD_ID;
374 ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_PARAM, &get_param);
375 assert(!ret);
376
377 return get_param.value;
378 }
379
380 int
381 panfrost_drm_init_context(struct panfrost_context *ctx)
382 {
383 struct pipe_context *gallium = (struct pipe_context *) ctx;
384 struct panfrost_screen *screen = pan_screen(gallium->screen);
385
386 return drmSyncobjCreate(screen->fd, DRM_SYNCOBJ_CREATE_SIGNALED,
387 &ctx->out_sync);
388 }
389
390 void
391 panfrost_drm_fence_reference(struct pipe_screen *screen,
392 struct pipe_fence_handle **ptr,
393 struct pipe_fence_handle *fence)
394 {
395 struct panfrost_fence **p = (struct panfrost_fence **)ptr;
396 struct panfrost_fence *f = (struct panfrost_fence *)fence;
397 struct panfrost_fence *old = *p;
398
399 if (pipe_reference(&(*p)->reference, &f->reference)) {
400 close(old->fd);
401 free(old);
402 }
403 *p = f;
404 }
405
406 boolean
407 panfrost_drm_fence_finish(struct pipe_screen *pscreen,
408 struct pipe_context *ctx,
409 struct pipe_fence_handle *fence,
410 uint64_t timeout)
411 {
412 struct panfrost_screen *screen = pan_screen(pscreen);
413 struct panfrost_fence *f = (struct panfrost_fence *)fence;
414 int ret;
415
416 unsigned syncobj;
417 ret = drmSyncobjCreate(screen->fd, 0, &syncobj);
418 if (ret) {
419 fprintf(stderr, "Failed to create syncobj to wait on: %m\n");
420 return false;
421 }
422
423 drmSyncobjImportSyncFile(screen->fd, syncobj, f->fd);
424 if (ret) {
425 fprintf(stderr, "Failed to import fence to syncobj: %m\n");
426 return false;
427 }
428
429 uint64_t abs_timeout = os_time_get_absolute_timeout(timeout);
430 if (abs_timeout == OS_TIMEOUT_INFINITE)
431 abs_timeout = INT64_MAX;
432
433 ret = drmSyncobjWait(screen->fd, &syncobj, 1, abs_timeout, 0, NULL);
434
435 drmSyncobjDestroy(screen->fd, syncobj);
436
437 return ret >= 0;
438 }