2 * Copyright (C) 2008 VMware, Inc.
3 * Copyright (C) 2014 Broadcom
4 * Copyright (C) 2018 Alyssa Rosenzweig
5 * Copyright (C) 2019 Collabora, Ltd.
6 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 #include "util/u_debug.h"
30 #include "util/u_memory.h"
31 #include "util/format/u_format.h"
32 #include "util/format/u_format_s3tc.h"
33 #include "util/u_video.h"
34 #include "util/u_screen.h"
35 #include "util/os_time.h"
36 #include "util/u_process.h"
37 #include "pipe/p_defines.h"
38 #include "pipe/p_screen.h"
39 #include "draw/draw_context.h"
43 #include "drm-uapi/drm_fourcc.h"
44 #include "drm-uapi/panfrost_drm.h"
47 #include "pan_screen.h"
48 #include "pan_resource.h"
49 #include "pan_public.h"
51 #include "pandecode/decode.h"
53 #include "pan_context.h"
54 #include "midgard/midgard_compile.h"
55 #include "panfrost-quirks.h"
57 static const struct debug_named_value debug_options
[] = {
58 {"msgs", PAN_DBG_MSGS
, "Print debug messages"},
59 {"trace", PAN_DBG_TRACE
, "Trace the command stream"},
60 {"deqp", PAN_DBG_DEQP
, "Hacks for dEQP"},
61 {"afbc", PAN_DBG_AFBC
, "Enable non-conformant AFBC impl"},
62 {"sync", PAN_DBG_SYNC
, "Wait for each job's completion and check for any GPU fault"},
63 {"precompile", PAN_DBG_PRECOMPILE
, "Precompile shaders for shader-db"},
64 {"gles3", PAN_DBG_GLES3
, "Enable experimental GLES3 implementation"},
68 DEBUG_GET_ONCE_FLAGS_OPTION(pan_debug
, "PAN_MESA_DEBUG", debug_options
, 0)
73 panfrost_get_name(struct pipe_screen
*screen
)
75 return panfrost_model_name(pan_screen(screen
)->gpu_id
);
79 panfrost_get_vendor(struct pipe_screen
*screen
)
85 panfrost_get_device_vendor(struct pipe_screen
*screen
)
91 panfrost_get_param(struct pipe_screen
*screen
, enum pipe_cap param
)
93 /* We expose in-dev stuff for dEQP that we don't want apps to use yet */
94 bool is_deqp
= pan_debug
& PAN_DBG_DEQP
;
96 /* Our GLES3 implementation is WIP */
97 bool is_gles3
= pan_debug
& PAN_DBG_GLES3
;
101 case PIPE_CAP_NPOT_TEXTURES
:
102 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES
:
103 case PIPE_CAP_MIXED_COLOR_DEPTH_BITS
:
104 case PIPE_CAP_FRAGMENT_SHADER_TEXTURE_LOD
:
105 case PIPE_CAP_VERTEX_SHADER_SATURATE
:
106 case PIPE_CAP_POINT_SPRITE
:
109 case PIPE_CAP_MAX_RENDER_TARGETS
:
110 return is_gles3
? 4 : 1;
112 /* Throttling frames breaks pipelining */
113 case PIPE_CAP_THROTTLE
:
116 case PIPE_CAP_OCCLUSION_QUERY
:
118 case PIPE_CAP_QUERY_TIME_ELAPSED
:
119 case PIPE_CAP_QUERY_PIPELINE_STATISTICS
:
120 case PIPE_CAP_QUERY_TIMESTAMP
:
121 case PIPE_CAP_QUERY_SO_OVERFLOW
:
124 case PIPE_CAP_TEXTURE_SWIZZLE
:
127 case PIPE_CAP_TEXTURE_MIRROR_CLAMP
:
128 case PIPE_CAP_TEXTURE_MIRROR_CLAMP_TO_EDGE
:
131 case PIPE_CAP_TGSI_INSTANCEID
:
132 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR
:
133 case PIPE_CAP_PRIMITIVE_RESTART
:
136 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS
:
137 return is_gles3
? 4 : 0;
138 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS
:
139 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS
:
140 return is_gles3
? 64 : 0;
141 case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS
:
144 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS
:
147 case PIPE_CAP_GLSL_FEATURE_LEVEL
:
148 case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY
:
149 return is_gles3
? 140 : 120;
150 case PIPE_CAP_ESSL_FEATURE_LEVEL
:
151 return is_gles3
? 300 : 120;
153 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT
:
158 case PIPE_CAP_TEXTURE_MULTISAMPLE
:
161 /* For faking GLES 3.1 for dEQP-GLES31 */
162 case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTERS
:
163 case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTER_BUFFERS
:
164 case PIPE_CAP_IMAGE_LOAD_FORMATTED
:
165 case PIPE_CAP_CUBE_MAP_ARRAY
:
168 /* For faking compute shaders */
169 case PIPE_CAP_COMPUTE
:
172 /* TODO: Where does this req come from in practice? */
173 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY
:
176 case PIPE_CAP_MAX_TEXTURE_2D_SIZE
:
178 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS
:
179 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS
:
182 case PIPE_CAP_BLEND_EQUATION_SEPARATE
:
183 case PIPE_CAP_INDEP_BLEND_ENABLE
:
184 case PIPE_CAP_INDEP_BLEND_FUNC
:
187 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT
:
188 /* Hardware is natively upper left */
191 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT
:
192 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER
:
193 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER
:
194 case PIPE_CAP_GENERATE_MIPMAP
:
197 /* We would prefer varyings */
198 case PIPE_CAP_TGSI_FS_FACE_IS_INTEGER_SYSVAL
:
199 case PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL
:
202 /* I really don't want to set this CAP but let's not swim against the
204 case PIPE_CAP_TGSI_TEXCOORD
:
207 case PIPE_CAP_SEAMLESS_CUBE_MAP
:
208 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE
:
211 case PIPE_CAP_MAX_VERTEX_ELEMENT_SRC_OFFSET
:
214 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS
:
217 case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE
:
220 case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER
:
223 case PIPE_CAP_ENDIANNESS
:
224 return PIPE_ENDIAN_NATIVE
;
226 case PIPE_CAP_SAMPLER_VIEW_TARGET
:
229 case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET
:
232 case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET
:
235 case PIPE_CAP_VENDOR_ID
:
236 case PIPE_CAP_DEVICE_ID
:
239 case PIPE_CAP_ACCELERATED
:
241 case PIPE_CAP_TEXTURE_FLOAT_LINEAR
:
242 case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR
:
243 case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS
:
244 case PIPE_CAP_TGSI_ARRAY_COMPONENTS
:
247 case PIPE_CAP_VIDEO_MEMORY
: {
248 uint64_t system_memory
;
250 if (!os_get_total_physical_memory(&system_memory
))
253 return (int)(system_memory
>> 20);
256 case PIPE_CAP_SHADER_STENCIL_EXPORT
:
259 case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT
:
262 case PIPE_CAP_MAX_VARYINGS
:
265 case PIPE_CAP_ALPHA_TEST
:
266 case PIPE_CAP_FLATSHADE
:
267 case PIPE_CAP_TWO_SIDED_COLOR
:
268 case PIPE_CAP_CLIP_PLANES
:
272 return u_pipe_screen_get_param_defaults(screen
, param
);
277 panfrost_get_shader_param(struct pipe_screen
*screen
,
278 enum pipe_shader_type shader
,
279 enum pipe_shader_cap param
)
281 bool is_deqp
= pan_debug
& PAN_DBG_DEQP
;
283 if (shader
!= PIPE_SHADER_VERTEX
&&
284 shader
!= PIPE_SHADER_FRAGMENT
&&
285 !(shader
== PIPE_SHADER_COMPUTE
&& is_deqp
))
288 /* this is probably not totally correct.. but it's a start: */
290 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS
:
291 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS
:
292 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS
:
293 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS
:
296 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH
:
299 case PIPE_SHADER_CAP_MAX_INPUTS
:
302 case PIPE_SHADER_CAP_MAX_OUTPUTS
:
303 return shader
== PIPE_SHADER_FRAGMENT
? 4 : 16;
305 case PIPE_SHADER_CAP_MAX_TEMPS
:
306 return 256; /* GL_MAX_PROGRAM_TEMPORARIES_ARB */
308 case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE
:
309 return 16 * 1024 * sizeof(float);
311 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS
:
312 return PAN_MAX_CONST_BUFFERS
;
314 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED
:
317 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR
:
319 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR
:
322 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR
:
325 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR
:
328 case PIPE_SHADER_CAP_SUBROUTINES
:
331 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED
:
334 case PIPE_SHADER_CAP_INTEGERS
:
337 case PIPE_SHADER_CAP_INT64_ATOMICS
:
338 case PIPE_SHADER_CAP_FP16
:
339 case PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED
:
340 case PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED
:
341 case PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED
:
342 case PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED
:
343 case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE
:
346 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS
:
347 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS
:
348 return 16; /* XXX: How many? */
350 case PIPE_SHADER_CAP_PREFERRED_IR
:
351 return PIPE_SHADER_IR_NIR
;
353 case PIPE_SHADER_CAP_SUPPORTED_IRS
:
354 return (1 << PIPE_SHADER_IR_NIR
) | (1 << PIPE_SHADER_IR_NIR_SERIALIZED
);
356 case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT
:
359 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS
:
360 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES
:
361 return is_deqp
? 8 : 0;
362 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS
:
363 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS
:
366 case PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS
:
367 case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD
:
371 DBG("unknown shader param %d\n", param
);
379 panfrost_get_paramf(struct pipe_screen
*screen
, enum pipe_capf param
)
382 case PIPE_CAPF_MAX_LINE_WIDTH
:
385 case PIPE_CAPF_MAX_LINE_WIDTH_AA
:
386 return 255.0; /* arbitrary */
388 case PIPE_CAPF_MAX_POINT_WIDTH
:
391 case PIPE_CAPF_MAX_POINT_WIDTH_AA
:
394 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY
:
397 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS
:
398 return 16.0; /* arbitrary */
400 case PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE
:
401 case PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE
:
402 case PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY
:
406 debug_printf("Unexpected PIPE_CAPF %d query\n", param
);
412 * Query format support for creating a texture, drawing surface, etc.
413 * \param format the format to test
414 * \param type one of PIPE_TEXTURE, PIPE_SURFACE
417 panfrost_is_format_supported( struct pipe_screen
*screen
,
418 enum pipe_format format
,
419 enum pipe_texture_target target
,
420 unsigned sample_count
,
421 unsigned storage_sample_count
,
424 const struct util_format_description
*format_desc
;
426 assert(target
== PIPE_BUFFER
||
427 target
== PIPE_TEXTURE_1D
||
428 target
== PIPE_TEXTURE_1D_ARRAY
||
429 target
== PIPE_TEXTURE_2D
||
430 target
== PIPE_TEXTURE_2D_ARRAY
||
431 target
== PIPE_TEXTURE_RECT
||
432 target
== PIPE_TEXTURE_3D
||
433 target
== PIPE_TEXTURE_CUBE
||
434 target
== PIPE_TEXTURE_CUBE_ARRAY
);
436 format_desc
= util_format_description(format
);
441 /* MSAA 4x supported, but no more. Technically some revisions of the
442 * hardware can go up to 16x but we don't support higher modes yet. */
444 if (sample_count
> 1 && !(pan_debug
& PAN_DBG_DEQP
))
447 if (sample_count
> 4)
450 if (MAX2(sample_count
, 1) != MAX2(storage_sample_count
, 1))
453 /* Format wishlist */
454 if (format
== PIPE_FORMAT_X8Z24_UNORM
)
457 if (format
== PIPE_FORMAT_A1B5G5R5_UNORM
|| format
== PIPE_FORMAT_X1B5G5R5_UNORM
)
461 if (format
== PIPE_FORMAT_B5G5R5A1_UNORM
)
464 /* Don't confuse poorly written apps (workaround dEQP bug) that expect
465 * more alpha than they ask for */
467 bool scanout
= bind
& (PIPE_BIND_SCANOUT
| PIPE_BIND_SHARED
| PIPE_BIND_DISPLAY_TARGET
);
468 bool renderable
= bind
& PIPE_BIND_RENDER_TARGET
;
470 if (scanout
&& renderable
&& !util_format_is_rgba8_variant(format_desc
))
473 switch (format_desc
->layout
) {
474 case UTIL_FORMAT_LAYOUT_PLAIN
:
475 case UTIL_FORMAT_LAYOUT_OTHER
:
477 case UTIL_FORMAT_LAYOUT_ETC
:
478 case UTIL_FORMAT_LAYOUT_ASTC
:
484 /* Internally, formats that are depth/stencil renderable are limited.
486 * In particular: Z16, Z24, Z24S8, S8 are all identical from the GPU
487 * rendering perspective. That is, we render to Z24S8 (which we can
488 * AFBC compress), ignore the different when texturing (who cares?),
489 * and then in the off-chance there's a CPU read we blit back to
492 * ...alternatively, we can make the state tracker deal with that. */
494 if (bind
& PIPE_BIND_DEPTH_STENCIL
) {
496 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
497 case PIPE_FORMAT_Z24X8_UNORM
:
498 case PIPE_FORMAT_Z32_UNORM
:
499 case PIPE_FORMAT_Z32_FLOAT
:
500 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
512 panfrost_get_compute_param(struct pipe_screen
*pscreen
, enum pipe_shader_ir ir_type
,
513 enum pipe_compute_cap param
, void *ret
)
515 const char * const ir
= "panfrost";
517 if (!(pan_debug
& PAN_DBG_DEQP
))
520 #define RET(x) do { \
522 memcpy(ret, x, sizeof(x)); \
527 case PIPE_COMPUTE_CAP_ADDRESS_BITS
:
528 RET((uint32_t []){ 64 });
530 case PIPE_COMPUTE_CAP_IR_TARGET
:
532 sprintf(ret
, "%s", ir
);
533 return strlen(ir
) * sizeof(char);
535 case PIPE_COMPUTE_CAP_GRID_DIMENSION
:
536 RET((uint64_t []) { 3 });
538 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE
:
539 RET(((uint64_t []) { 65535, 65535, 65535 }));
541 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE
:
542 RET(((uint64_t []) { 1024, 1024, 64 }));
544 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK
:
545 RET((uint64_t []) { 1024 });
547 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE
:
548 RET((uint64_t []) { 1024*1024*512 /* Maybe get memory */ });
550 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE
:
551 RET((uint64_t []) { 32768 });
553 case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE
:
554 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE
:
555 RET((uint64_t []) { 4096 });
557 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE
:
558 RET((uint64_t []) { 1024*1024*512 /* Maybe get memory */ });
560 case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY
:
561 RET((uint32_t []) { 800 /* MHz -- TODO */ });
563 case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS
:
564 RET((uint32_t []) { 9999 }); // TODO
566 case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED
:
567 RET((uint32_t []) { 1 }); // TODO
569 case PIPE_COMPUTE_CAP_SUBGROUP_SIZE
:
570 RET((uint32_t []) { 32 }); // TODO
572 case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK
:
573 RET((uint64_t []) { 1024 }); // TODO
580 panfrost_destroy_screen(struct pipe_screen
*pscreen
)
582 struct panfrost_screen
*screen
= pan_screen(pscreen
);
583 panfrost_bo_cache_evict_all(screen
);
584 pthread_mutex_destroy(&screen
->bo_cache
.lock
);
585 pthread_mutex_destroy(&screen
->active_bos_lock
);
586 drmFreeVersion(screen
->kernel_version
);
591 panfrost_get_timestamp(struct pipe_screen
*_screen
)
593 return os_time_get_nano();
597 panfrost_fence_reference(struct pipe_screen
*pscreen
,
598 struct pipe_fence_handle
**ptr
,
599 struct pipe_fence_handle
*fence
)
601 struct panfrost_fence
**p
= (struct panfrost_fence
**)ptr
;
602 struct panfrost_fence
*f
= (struct panfrost_fence
*)fence
;
603 struct panfrost_fence
*old
= *p
;
605 if (pipe_reference(&(*p
)->reference
, &f
->reference
)) {
606 util_dynarray_foreach(&old
->syncfds
, int, fd
)
608 util_dynarray_fini(&old
->syncfds
);
615 panfrost_fence_finish(struct pipe_screen
*pscreen
,
616 struct pipe_context
*ctx
,
617 struct pipe_fence_handle
*fence
,
620 struct panfrost_screen
*screen
= pan_screen(pscreen
);
621 struct panfrost_fence
*f
= (struct panfrost_fence
*)fence
;
622 struct util_dynarray syncobjs
;
625 /* All fences were already signaled */
626 if (!util_dynarray_num_elements(&f
->syncfds
, int))
629 util_dynarray_init(&syncobjs
, NULL
);
630 util_dynarray_foreach(&f
->syncfds
, int, fd
) {
633 ret
= drmSyncobjCreate(screen
->fd
, 0, &syncobj
);
636 ret
= drmSyncobjImportSyncFile(screen
->fd
, syncobj
, *fd
);
638 util_dynarray_append(&syncobjs
, uint32_t, syncobj
);
641 uint64_t abs_timeout
= os_time_get_absolute_timeout(timeout
);
642 if (abs_timeout
== OS_TIMEOUT_INFINITE
)
643 abs_timeout
= INT64_MAX
;
645 ret
= drmSyncobjWait(screen
->fd
, util_dynarray_begin(&syncobjs
),
646 util_dynarray_num_elements(&syncobjs
, uint32_t),
647 abs_timeout
, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
,
650 util_dynarray_foreach(&syncobjs
, uint32_t, syncobj
)
651 drmSyncobjDestroy(screen
->fd
, *syncobj
);
656 struct panfrost_fence
*
657 panfrost_fence_create(struct panfrost_context
*ctx
,
658 struct util_dynarray
*fences
)
660 struct panfrost_screen
*screen
= pan_screen(ctx
->base
.screen
);
661 struct panfrost_fence
*f
= calloc(1, sizeof(*f
));
665 util_dynarray_init(&f
->syncfds
, NULL
);
667 /* Export fences from all pending batches. */
668 util_dynarray_foreach(fences
, struct panfrost_batch_fence
*, fence
) {
671 /* The fence is already signaled, no need to export it. */
672 if ((*fence
)->signaled
)
675 drmSyncobjExportSyncFile(screen
->fd
, (*fence
)->syncobj
, &fd
);
677 fprintf(stderr
, "export failed: %m\n");
680 util_dynarray_append(&f
->syncfds
, int, fd
);
683 pipe_reference_init(&f
->reference
, 1);
689 panfrost_screen_get_compiler_options(struct pipe_screen
*pscreen
,
690 enum pipe_shader_ir ir
,
691 enum pipe_shader_type shader
)
693 return &midgard_nir_options
;
697 panfrost_active_bos_hash(const void *key
)
699 const struct panfrost_bo
*bo
= key
;
701 return _mesa_hash_data(&bo
->gem_handle
, sizeof(bo
->gem_handle
));
705 panfrost_active_bos_cmp(const void *keya
, const void *keyb
)
707 const struct panfrost_bo
*a
= keya
, *b
= keyb
;
709 return a
->gem_handle
== b
->gem_handle
;
713 panfrost_create_screen(int fd
, struct renderonly
*ro
)
715 pan_debug
= debug_get_option_pan_debug();
717 /* Blacklist apps known to be buggy under Panfrost */
718 const char *proc
= util_get_process_name();
719 const char *blacklist
[] = {
724 for (unsigned i
= 0; i
< ARRAY_SIZE(blacklist
); ++i
) {
725 if ((strcmp(blacklist
[i
], proc
) == 0))
729 /* Create the screen */
730 struct panfrost_screen
*screen
= rzalloc(NULL
, struct panfrost_screen
);
736 screen
->ro
= renderonly_dup(ro
);
738 DBG("Failed to dup renderonly object\n");
746 screen
->gpu_id
= panfrost_query_gpu_version(screen
->fd
);
747 screen
->core_count
= panfrost_query_core_count(screen
->fd
);
748 screen
->thread_tls_alloc
= panfrost_query_thread_tls_alloc(screen
->fd
);
749 screen
->quirks
= panfrost_get_quirks(screen
->gpu_id
);
750 screen
->kernel_version
= drmGetVersion(fd
);
752 /* Check if we're loading against a supported GPU model. */
754 switch (screen
->gpu_id
) {
755 case 0x720: /* T720 */
756 case 0x750: /* T760 */
757 case 0x820: /* T820 */
758 case 0x860: /* T860 */
761 /* Fail to load against untested models */
762 debug_printf("panfrost: Unsupported model %X", screen
->gpu_id
);
766 pthread_mutex_init(&screen
->active_bos_lock
, NULL
);
767 screen
->active_bos
= _mesa_set_create(screen
, panfrost_active_bos_hash
,
768 panfrost_active_bos_cmp
);
770 pthread_mutex_init(&screen
->bo_cache
.lock
, NULL
);
771 list_inithead(&screen
->bo_cache
.lru
);
772 for (unsigned i
= 0; i
< ARRAY_SIZE(screen
->bo_cache
.buckets
); ++i
)
773 list_inithead(&screen
->bo_cache
.buckets
[i
]);
775 if (pan_debug
& (PAN_DBG_TRACE
| PAN_DBG_SYNC
))
776 pandecode_initialize(!(pan_debug
& PAN_DBG_TRACE
));
778 screen
->base
.destroy
= panfrost_destroy_screen
;
780 screen
->base
.get_name
= panfrost_get_name
;
781 screen
->base
.get_vendor
= panfrost_get_vendor
;
782 screen
->base
.get_device_vendor
= panfrost_get_device_vendor
;
783 screen
->base
.get_param
= panfrost_get_param
;
784 screen
->base
.get_shader_param
= panfrost_get_shader_param
;
785 screen
->base
.get_compute_param
= panfrost_get_compute_param
;
786 screen
->base
.get_paramf
= panfrost_get_paramf
;
787 screen
->base
.get_timestamp
= panfrost_get_timestamp
;
788 screen
->base
.is_format_supported
= panfrost_is_format_supported
;
789 screen
->base
.context_create
= panfrost_create_context
;
790 screen
->base
.get_compiler_options
= panfrost_screen_get_compiler_options
;
791 screen
->base
.fence_reference
= panfrost_fence_reference
;
792 screen
->base
.fence_finish
= panfrost_fence_finish
;
793 screen
->base
.set_damage_region
= panfrost_resource_set_damage_region
;
795 panfrost_resource_screen_init(screen
);
797 return &screen
->base
;