2 * Copyright (C) 2008 VMware, Inc.
3 * Copyright (C) 2014 Broadcom
4 * Copyright (C) 2018 Alyssa Rosenzweig
5 * Copyright (C) 2019 Collabora, Ltd.
6 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 #include "util/u_debug.h"
30 #include "util/u_memory.h"
31 #include "util/format/u_format.h"
32 #include "util/format/u_format_s3tc.h"
33 #include "util/u_video.h"
34 #include "util/u_screen.h"
35 #include "util/os_time.h"
36 #include "util/u_process.h"
37 #include "pipe/p_defines.h"
38 #include "pipe/p_screen.h"
39 #include "draw/draw_context.h"
43 #include "drm-uapi/drm_fourcc.h"
44 #include "drm-uapi/panfrost_drm.h"
47 #include "pan_screen.h"
48 #include "pan_resource.h"
49 #include "pan_public.h"
51 #include "pandecode/decode.h"
53 #include "pan_context.h"
54 #include "midgard/midgard_compile.h"
55 #include "panfrost-quirks.h"
57 static const struct debug_named_value debug_options
[] = {
58 {"msgs", PAN_DBG_MSGS
, "Print debug messages"},
59 {"trace", PAN_DBG_TRACE
, "Trace the command stream"},
60 {"deqp", PAN_DBG_DEQP
, "Hacks for dEQP"},
61 {"afbc", PAN_DBG_AFBC
, "Enable non-conformant AFBC impl"},
62 {"sync", PAN_DBG_SYNC
, "Wait for each job's completion and check for any GPU fault"},
66 DEBUG_GET_ONCE_FLAGS_OPTION(pan_debug
, "PAN_MESA_DEBUG", debug_options
, 0)
71 panfrost_get_name(struct pipe_screen
*screen
)
77 panfrost_get_vendor(struct pipe_screen
*screen
)
83 panfrost_get_device_vendor(struct pipe_screen
*screen
)
89 panfrost_get_param(struct pipe_screen
*screen
, enum pipe_cap param
)
91 /* We expose in-dev stuff for dEQP that we don't want apps to use yet */
92 bool is_deqp
= pan_debug
& PAN_DBG_DEQP
;
95 case PIPE_CAP_NPOT_TEXTURES
:
96 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES
:
97 case PIPE_CAP_MIXED_COLOR_DEPTH_BITS
:
98 case PIPE_CAP_FRAGMENT_SHADER_TEXTURE_LOD
:
99 case PIPE_CAP_VERTEX_SHADER_SATURATE
:
100 case PIPE_CAP_POINT_SPRITE
:
103 case PIPE_CAP_MAX_RENDER_TARGETS
:
104 return is_deqp
? 4 : 1;
106 /* Throttling frames breaks pipelining */
107 case PIPE_CAP_THROTTLE
:
110 case PIPE_CAP_OCCLUSION_QUERY
:
112 case PIPE_CAP_QUERY_TIME_ELAPSED
:
113 case PIPE_CAP_QUERY_PIPELINE_STATISTICS
:
114 case PIPE_CAP_QUERY_TIMESTAMP
:
115 case PIPE_CAP_QUERY_SO_OVERFLOW
:
118 case PIPE_CAP_TEXTURE_SWIZZLE
:
121 case PIPE_CAP_TGSI_INSTANCEID
:
122 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR
:
123 return is_deqp
? 1 : 0;
125 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS
:
126 return is_deqp
? 4 : 0;
127 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS
:
128 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS
:
129 return is_deqp
? 64 : 0;
130 case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS
:
133 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS
:
134 return is_deqp
? 256 : 0; /* for GL3 */
136 case PIPE_CAP_GLSL_FEATURE_LEVEL
:
137 case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY
:
138 return is_deqp
? 140 : 120;
139 case PIPE_CAP_ESSL_FEATURE_LEVEL
:
140 return is_deqp
? 300 : 120;
142 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT
:
143 return is_deqp
? 16 : 0;
145 case PIPE_CAP_CUBE_MAP_ARRAY
:
148 /* For faking GLES 3.1 for dEQP-GLES31 */
149 case PIPE_CAP_TEXTURE_MULTISAMPLE
:
150 case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTERS
:
151 case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTER_BUFFERS
:
152 case PIPE_CAP_IMAGE_LOAD_FORMATTED
:
155 /* For faking compute shaders */
156 case PIPE_CAP_COMPUTE
:
159 /* TODO: Where does this req come from in practice? */
160 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY
:
163 case PIPE_CAP_MAX_TEXTURE_2D_SIZE
:
165 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS
:
166 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS
:
169 case PIPE_CAP_BLEND_EQUATION_SEPARATE
:
170 case PIPE_CAP_INDEP_BLEND_ENABLE
:
171 case PIPE_CAP_INDEP_BLEND_FUNC
:
174 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT
:
175 /* Hardware is natively upper left */
178 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT
:
179 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER
:
180 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER
:
181 case PIPE_CAP_GENERATE_MIPMAP
:
184 /* We would prefer varyings */
185 case PIPE_CAP_TGSI_FS_FACE_IS_INTEGER_SYSVAL
:
186 case PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL
:
189 /* I really don't want to set this CAP but let's not swim against the
191 case PIPE_CAP_TGSI_TEXCOORD
:
194 case PIPE_CAP_SEAMLESS_CUBE_MAP
:
195 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE
:
198 case PIPE_CAP_MAX_VERTEX_ELEMENT_SRC_OFFSET
:
201 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS
:
204 case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE
:
207 case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER
:
210 case PIPE_CAP_ENDIANNESS
:
211 return PIPE_ENDIAN_NATIVE
;
213 case PIPE_CAP_SAMPLER_VIEW_TARGET
:
216 case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET
:
219 case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET
:
222 case PIPE_CAP_VENDOR_ID
:
223 case PIPE_CAP_DEVICE_ID
:
226 case PIPE_CAP_ACCELERATED
:
228 case PIPE_CAP_TEXTURE_FLOAT_LINEAR
:
229 case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR
:
230 case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS
:
231 case PIPE_CAP_TGSI_ARRAY_COMPONENTS
:
234 case PIPE_CAP_VIDEO_MEMORY
: {
235 uint64_t system_memory
;
237 if (!os_get_total_physical_memory(&system_memory
))
240 return (int)(system_memory
>> 20);
243 case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT
:
246 case PIPE_CAP_MAX_VARYINGS
:
249 case PIPE_CAP_ALPHA_TEST
:
253 return u_pipe_screen_get_param_defaults(screen
, param
);
258 panfrost_get_shader_param(struct pipe_screen
*screen
,
259 enum pipe_shader_type shader
,
260 enum pipe_shader_cap param
)
262 bool is_deqp
= pan_debug
& PAN_DBG_DEQP
;
264 if (shader
!= PIPE_SHADER_VERTEX
&&
265 shader
!= PIPE_SHADER_FRAGMENT
&&
266 !(shader
== PIPE_SHADER_COMPUTE
&& is_deqp
))
269 /* this is probably not totally correct.. but it's a start: */
271 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS
:
272 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS
:
273 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS
:
274 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS
:
277 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH
:
280 case PIPE_SHADER_CAP_MAX_INPUTS
:
283 case PIPE_SHADER_CAP_MAX_OUTPUTS
:
284 return shader
== PIPE_SHADER_FRAGMENT
? 4 : 8;
286 case PIPE_SHADER_CAP_MAX_TEMPS
:
287 return 256; /* GL_MAX_PROGRAM_TEMPORARIES_ARB */
289 case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE
:
290 return 16 * 1024 * sizeof(float);
292 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS
:
293 return PAN_MAX_CONST_BUFFERS
;
295 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED
:
298 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR
:
300 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR
:
303 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR
:
306 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR
:
309 case PIPE_SHADER_CAP_SUBROUTINES
:
312 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED
:
315 case PIPE_SHADER_CAP_INTEGERS
:
318 case PIPE_SHADER_CAP_INT64_ATOMICS
:
319 case PIPE_SHADER_CAP_FP16
:
320 case PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED
:
321 case PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED
:
322 case PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED
:
323 case PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED
:
324 case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE
:
327 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS
:
328 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS
:
329 return 16; /* XXX: How many? */
331 case PIPE_SHADER_CAP_PREFERRED_IR
:
332 return PIPE_SHADER_IR_NIR
;
334 case PIPE_SHADER_CAP_SUPPORTED_IRS
:
335 return (1 << PIPE_SHADER_IR_NIR
) | (1 << PIPE_SHADER_IR_NIR_SERIALIZED
);
337 case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT
:
340 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS
:
341 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES
:
342 return is_deqp
? 4 : 0;
343 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS
:
344 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS
:
347 case PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS
:
348 case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD
:
352 fprintf(stderr
, "unknown shader param %d\n", param
);
360 panfrost_get_paramf(struct pipe_screen
*screen
, enum pipe_capf param
)
363 case PIPE_CAPF_MAX_LINE_WIDTH
:
366 case PIPE_CAPF_MAX_LINE_WIDTH_AA
:
367 return 255.0; /* arbitrary */
369 case PIPE_CAPF_MAX_POINT_WIDTH
:
372 case PIPE_CAPF_MAX_POINT_WIDTH_AA
:
375 case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY
:
378 case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS
:
379 return 16.0; /* arbitrary */
381 case PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE
:
382 case PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE
:
383 case PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY
:
387 debug_printf("Unexpected PIPE_CAPF %d query\n", param
);
393 * Query format support for creating a texture, drawing surface, etc.
394 * \param format the format to test
395 * \param type one of PIPE_TEXTURE, PIPE_SURFACE
398 panfrost_is_format_supported( struct pipe_screen
*screen
,
399 enum pipe_format format
,
400 enum pipe_texture_target target
,
401 unsigned sample_count
,
402 unsigned storage_sample_count
,
405 const struct util_format_description
*format_desc
;
407 assert(target
== PIPE_BUFFER
||
408 target
== PIPE_TEXTURE_1D
||
409 target
== PIPE_TEXTURE_1D_ARRAY
||
410 target
== PIPE_TEXTURE_2D
||
411 target
== PIPE_TEXTURE_2D_ARRAY
||
412 target
== PIPE_TEXTURE_RECT
||
413 target
== PIPE_TEXTURE_3D
||
414 target
== PIPE_TEXTURE_CUBE
||
415 target
== PIPE_TEXTURE_CUBE_ARRAY
);
417 format_desc
= util_format_description(format
);
422 if (sample_count
> 1)
425 /* Format wishlist */
426 if (format
== PIPE_FORMAT_X8Z24_UNORM
)
429 if (format
== PIPE_FORMAT_A1B5G5R5_UNORM
|| format
== PIPE_FORMAT_X1B5G5R5_UNORM
)
433 if (format
== PIPE_FORMAT_B5G5R5A1_UNORM
)
436 /* Don't confuse poorly written apps (workaround dEQP bug) that expect
437 * more alpha than they ask for */
439 bool scanout
= bind
& (PIPE_BIND_SCANOUT
| PIPE_BIND_SHARED
| PIPE_BIND_DISPLAY_TARGET
);
440 bool renderable
= bind
& PIPE_BIND_RENDER_TARGET
;
442 if (scanout
&& renderable
&& !util_format_is_rgba8_variant(format_desc
))
445 if (format_desc
->layout
!= UTIL_FORMAT_LAYOUT_PLAIN
&&
446 format_desc
->layout
!= UTIL_FORMAT_LAYOUT_OTHER
) {
447 /* Compressed formats not yet hooked up. */
451 /* Internally, formats that are depth/stencil renderable are limited.
453 * In particular: Z16, Z24, Z24S8, S8 are all identical from the GPU
454 * rendering perspective. That is, we render to Z24S8 (which we can
455 * AFBC compress), ignore the different when texturing (who cares?),
456 * and then in the off-chance there's a CPU read we blit back to
459 * ...alternatively, we can make the state tracker deal with that. */
461 if (bind
& PIPE_BIND_DEPTH_STENCIL
) {
463 case PIPE_FORMAT_Z24_UNORM_S8_UINT
:
464 case PIPE_FORMAT_Z24X8_UNORM
:
465 case PIPE_FORMAT_Z32_UNORM
:
466 case PIPE_FORMAT_Z32_FLOAT
:
467 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
:
479 panfrost_get_compute_param(struct pipe_screen
*pscreen
, enum pipe_shader_ir ir_type
,
480 enum pipe_compute_cap param
, void *ret
)
482 const char * const ir
= "panfrost";
484 if (!(pan_debug
& PAN_DBG_DEQP
))
487 #define RET(x) do { \
489 memcpy(ret, x, sizeof(x)); \
494 case PIPE_COMPUTE_CAP_ADDRESS_BITS
:
495 RET((uint32_t []){ 64 });
497 case PIPE_COMPUTE_CAP_IR_TARGET
:
499 sprintf(ret
, "%s", ir
);
500 return strlen(ir
) * sizeof(char);
502 case PIPE_COMPUTE_CAP_GRID_DIMENSION
:
503 RET((uint64_t []) { 3 });
505 case PIPE_COMPUTE_CAP_MAX_GRID_SIZE
:
506 RET(((uint64_t []) { 65535, 65535, 65535 }));
508 case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE
:
509 RET(((uint64_t []) { 1024, 1024, 64 }));
511 case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK
:
512 RET((uint64_t []) { 1024 });
514 case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE
:
515 RET((uint64_t []) { 1024*1024*512 /* Maybe get memory */ });
517 case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE
:
518 RET((uint64_t []) { 32768 });
520 case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE
:
521 case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE
:
522 RET((uint64_t []) { 4096 });
524 case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE
:
525 RET((uint64_t []) { 1024*1024*512 /* Maybe get memory */ });
527 case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY
:
528 RET((uint32_t []) { 800 /* MHz -- TODO */ });
530 case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS
:
531 RET((uint32_t []) { 9999 }); // TODO
533 case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED
:
534 RET((uint32_t []) { 1 }); // TODO
536 case PIPE_COMPUTE_CAP_SUBGROUP_SIZE
:
537 RET((uint32_t []) { 32 }); // TODO
539 case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK
:
540 RET((uint64_t []) { 1024 }); // TODO
547 panfrost_destroy_screen(struct pipe_screen
*pscreen
)
549 struct panfrost_screen
*screen
= pan_screen(pscreen
);
550 panfrost_bo_cache_evict_all(screen
);
551 pthread_mutex_destroy(&screen
->bo_cache
.lock
);
552 pthread_mutex_destroy(&screen
->active_bos_lock
);
553 drmFreeVersion(screen
->kernel_version
);
558 panfrost_flush_frontbuffer(struct pipe_screen
*_screen
,
559 struct pipe_resource
*resource
,
560 unsigned level
, unsigned layer
,
561 void *context_private
,
562 struct pipe_box
*sub_box
)
564 /* TODO: Display target integration */
568 panfrost_get_timestamp(struct pipe_screen
*_screen
)
570 return os_time_get_nano();
574 panfrost_fence_reference(struct pipe_screen
*pscreen
,
575 struct pipe_fence_handle
**ptr
,
576 struct pipe_fence_handle
*fence
)
578 struct panfrost_fence
**p
= (struct panfrost_fence
**)ptr
;
579 struct panfrost_fence
*f
= (struct panfrost_fence
*)fence
;
580 struct panfrost_fence
*old
= *p
;
582 if (pipe_reference(&(*p
)->reference
, &f
->reference
)) {
583 util_dynarray_foreach(&old
->syncfds
, int, fd
)
585 util_dynarray_fini(&old
->syncfds
);
592 panfrost_fence_finish(struct pipe_screen
*pscreen
,
593 struct pipe_context
*ctx
,
594 struct pipe_fence_handle
*fence
,
597 struct panfrost_screen
*screen
= pan_screen(pscreen
);
598 struct panfrost_fence
*f
= (struct panfrost_fence
*)fence
;
599 struct util_dynarray syncobjs
;
602 /* All fences were already signaled */
603 if (!util_dynarray_num_elements(&f
->syncfds
, int))
606 util_dynarray_init(&syncobjs
, NULL
);
607 util_dynarray_foreach(&f
->syncfds
, int, fd
) {
610 ret
= drmSyncobjCreate(screen
->fd
, 0, &syncobj
);
613 ret
= drmSyncobjImportSyncFile(screen
->fd
, syncobj
, *fd
);
615 util_dynarray_append(&syncobjs
, uint32_t, syncobj
);
618 uint64_t abs_timeout
= os_time_get_absolute_timeout(timeout
);
619 if (abs_timeout
== OS_TIMEOUT_INFINITE
)
620 abs_timeout
= INT64_MAX
;
622 ret
= drmSyncobjWait(screen
->fd
, util_dynarray_begin(&syncobjs
),
623 util_dynarray_num_elements(&syncobjs
, uint32_t),
624 abs_timeout
, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
,
627 util_dynarray_foreach(&syncobjs
, uint32_t, syncobj
)
628 drmSyncobjDestroy(screen
->fd
, *syncobj
);
633 struct panfrost_fence
*
634 panfrost_fence_create(struct panfrost_context
*ctx
,
635 struct util_dynarray
*fences
)
637 struct panfrost_screen
*screen
= pan_screen(ctx
->base
.screen
);
638 struct panfrost_fence
*f
= calloc(1, sizeof(*f
));
642 util_dynarray_init(&f
->syncfds
, NULL
);
644 /* Export fences from all pending batches. */
645 util_dynarray_foreach(fences
, struct panfrost_batch_fence
*, fence
) {
648 /* The fence is already signaled, no need to export it. */
649 if ((*fence
)->signaled
)
652 drmSyncobjExportSyncFile(screen
->fd
, (*fence
)->syncobj
, &fd
);
654 fprintf(stderr
, "export failed: %m\n");
657 util_dynarray_append(&f
->syncfds
, int, fd
);
660 pipe_reference_init(&f
->reference
, 1);
666 panfrost_screen_get_compiler_options(struct pipe_screen
*pscreen
,
667 enum pipe_shader_ir ir
,
668 enum pipe_shader_type shader
)
670 return &midgard_nir_options
;
675 struct panfrost_screen
*screen
,
676 enum drm_panfrost_param param
,
679 struct drm_panfrost_get_param get_param
= {0,};
682 get_param
.param
= DRM_PANFROST_PARAM_GPU_PROD_ID
;
683 ret
= drmIoctl(screen
->fd
, DRM_IOCTL_PANFROST_GET_PARAM
, &get_param
);
685 assert(!(ret
&& required
));
687 return get_param
.value
;
691 panfrost_query_gpu_version(struct panfrost_screen
*screen
)
693 return panfrost_query_raw(screen
, DRM_PANFROST_PARAM_GPU_PROD_ID
, true);
697 panfrost_active_bos_hash(const void *key
)
699 const struct panfrost_bo
*bo
= key
;
701 return _mesa_hash_data(&bo
->gem_handle
, sizeof(bo
->gem_handle
));
705 panfrost_active_bos_cmp(const void *keya
, const void *keyb
)
707 const struct panfrost_bo
*a
= keya
, *b
= keyb
;
709 return a
->gem_handle
== b
->gem_handle
;
713 panfrost_create_screen(int fd
, struct renderonly
*ro
)
715 pan_debug
= debug_get_option_pan_debug();
717 /* Blacklist apps known to be buggy under Panfrost */
718 const char *proc
= util_get_process_name();
719 const char *blacklist
[] = {
724 for (unsigned i
= 0; i
< ARRAY_SIZE(blacklist
); ++i
) {
725 if ((strcmp(blacklist
[i
], proc
) == 0))
729 /* Create the screen */
730 struct panfrost_screen
*screen
= rzalloc(NULL
, struct panfrost_screen
);
736 screen
->ro
= renderonly_dup(ro
);
738 fprintf(stderr
, "Failed to dup renderonly object\n");
746 screen
->gpu_id
= panfrost_query_gpu_version(screen
);
747 screen
->quirks
= panfrost_get_quirks(screen
->gpu_id
);
748 screen
->kernel_version
= drmGetVersion(fd
);
750 /* Check if we're loading against a supported GPU model. */
752 switch (screen
->gpu_id
) {
753 case 0x720: /* T720 */
754 case 0x750: /* T760 */
755 case 0x820: /* T820 */
756 case 0x860: /* T860 */
759 /* Fail to load against untested models */
760 debug_printf("panfrost: Unsupported model %X", screen
->gpu_id
);
764 pthread_mutex_init(&screen
->active_bos_lock
, NULL
);
765 screen
->active_bos
= _mesa_set_create(screen
, panfrost_active_bos_hash
,
766 panfrost_active_bos_cmp
);
768 pthread_mutex_init(&screen
->bo_cache
.lock
, NULL
);
769 list_inithead(&screen
->bo_cache
.lru
);
770 for (unsigned i
= 0; i
< ARRAY_SIZE(screen
->bo_cache
.buckets
); ++i
)
771 list_inithead(&screen
->bo_cache
.buckets
[i
]);
773 if (pan_debug
& PAN_DBG_TRACE
)
774 pandecode_initialize();
776 screen
->base
.destroy
= panfrost_destroy_screen
;
778 screen
->base
.get_name
= panfrost_get_name
;
779 screen
->base
.get_vendor
= panfrost_get_vendor
;
780 screen
->base
.get_device_vendor
= panfrost_get_device_vendor
;
781 screen
->base
.get_param
= panfrost_get_param
;
782 screen
->base
.get_shader_param
= panfrost_get_shader_param
;
783 screen
->base
.get_compute_param
= panfrost_get_compute_param
;
784 screen
->base
.get_paramf
= panfrost_get_paramf
;
785 screen
->base
.get_timestamp
= panfrost_get_timestamp
;
786 screen
->base
.is_format_supported
= panfrost_is_format_supported
;
787 screen
->base
.context_create
= panfrost_create_context
;
788 screen
->base
.flush_frontbuffer
= panfrost_flush_frontbuffer
;
789 screen
->base
.get_compiler_options
= panfrost_screen_get_compiler_options
;
790 screen
->base
.fence_reference
= panfrost_fence_reference
;
791 screen
->base
.fence_finish
= panfrost_fence_finish
;
792 screen
->base
.set_damage_region
= panfrost_resource_set_damage_region
;
794 panfrost_resource_screen_init(screen
);
796 return &screen
->base
;