-/**************************************************************************
- *
- * Copyright 2008 VMware, Inc.
- * Copyright 2014 Broadcom
- * Copyright 2018 Alyssa Rosenzweig
- * Copyright 2019 Collabora
- * All Rights Reserved.
+/*
+ * Copyright (C) 2008 VMware, Inc.
+ * Copyright (C) 2014 Broadcom
+ * Copyright (C) 2018 Alyssa Rosenzweig
+ * Copyright (C) 2019 Collabora, Ltd.
+ * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
*
* Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*
- **************************************************************************/
-
+ */
#include "util/u_debug.h"
#include "util/u_memory.h"
-#include "util/u_format.h"
-#include "util/u_format_s3tc.h"
+#include "util/format/u_format.h"
+#include "util/format/u_format_s3tc.h"
#include "util/u_video.h"
#include "util/u_screen.h"
#include "util/os_time.h"
+#include "util/u_process.h"
#include "pipe/p_defines.h"
#include "pipe/p_screen.h"
#include "draw/draw_context.h"
-#include <xf86drm.h>
#include <fcntl.h>
#include "drm-uapi/drm_fourcc.h"
+#include "drm-uapi/panfrost_drm.h"
+#include "pan_bo.h"
#include "pan_screen.h"
#include "pan_resource.h"
#include "pan_public.h"
#include "pan_context.h"
#include "midgard/midgard_compile.h"
+#include "panfrost-quirks.h"
static const struct debug_named_value debug_options[] = {
- {"msgs", PAN_DBG_MSGS, "Print debug messages"},
- {"trace", PAN_DBG_TRACE, "Trace the command stream"},
- DEBUG_NAMED_VALUE_END
+ {"msgs", PAN_DBG_MSGS, "Print debug messages"},
+ {"trace", PAN_DBG_TRACE, "Trace the command stream"},
+ {"deqp", PAN_DBG_DEQP, "Hacks for dEQP"},
+ {"afbc", PAN_DBG_AFBC, "Enable non-conformant AFBC impl"},
+ DEBUG_NAMED_VALUE_END
};
DEBUG_GET_ONCE_FLAGS_OPTION(pan_debug, "PAN_MESA_DEBUG", debug_options, 0)
int pan_debug = 0;
-struct panfrost_driver *panfrost_create_drm_driver(int fd);
-
static const char *
panfrost_get_name(struct pipe_screen *screen)
{
static int
panfrost_get_param(struct pipe_screen *screen, enum pipe_cap param)
{
+ /* We expose in-dev stuff for dEQP that we don't want apps to use yet */
+ bool is_deqp = pan_debug & PAN_DBG_DEQP;
+
switch (param) {
case PIPE_CAP_NPOT_TEXTURES:
case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
- return 1;
-
- case PIPE_CAP_SM3:
+ case PIPE_CAP_FRAGMENT_SHADER_TEXTURE_LOD:
+ case PIPE_CAP_VERTEX_SHADER_SATURATE:
case PIPE_CAP_POINT_SPRITE:
return 1;
case PIPE_CAP_MAX_RENDER_TARGETS:
- case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
- return 1;
+ return is_deqp ? 4 : 1;
+
+ /* Throttling frames breaks pipelining */
+ case PIPE_CAP_THROTTLE:
+ return 0;
case PIPE_CAP_OCCLUSION_QUERY:
+ return 1;
case PIPE_CAP_QUERY_TIME_ELAPSED:
case PIPE_CAP_QUERY_PIPELINE_STATISTICS:
- return 1; /* TODO: Queries */
+ case PIPE_CAP_QUERY_TIMESTAMP:
+ case PIPE_CAP_QUERY_SO_OVERFLOW:
+ return 0;
- case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
case PIPE_CAP_TEXTURE_SWIZZLE:
return 1;
- /* TODO: ES3. We expose these caps so we can access higher dEQP
- * tests; in actuality they are nonfunctional */
- case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
- return 4;
case PIPE_CAP_TGSI_INSTANCEID:
case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
+ return is_deqp ? 1 : 0;
+
+ case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
+ return is_deqp ? 4 : 0;
+ case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
+ case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
+ return is_deqp ? 64 : 0;
+ case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS:
+ return 1;
+
+ case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
+ return is_deqp ? 256 : 0; /* for GL3 */
+
+ case PIPE_CAP_GLSL_FEATURE_LEVEL:
+ case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
+ return is_deqp ? 140 : 120;
+ case PIPE_CAP_ESSL_FEATURE_LEVEL:
+ return is_deqp ? 300 : 120;
+
+ case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
+ return is_deqp ? 16 : 0;
+
+ case PIPE_CAP_CUBE_MAP_ARRAY:
+ return is_deqp;
+
+ /* For faking GLES 3.1 for dEQP-GLES31 */
+ case PIPE_CAP_TEXTURE_MULTISAMPLE:
+ case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTERS:
+ case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTER_BUFFERS:
+ case PIPE_CAP_IMAGE_LOAD_FORMATTED:
+ return is_deqp;
+
+ /* For faking compute shaders */
+ case PIPE_CAP_COMPUTE:
+ return is_deqp;
+
+ /* TODO: Where does this req come from in practice? */
+ case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
return 1;
case PIPE_CAP_MAX_TEXTURE_2D_SIZE:
return 13;
case PIPE_CAP_BLEND_EQUATION_SEPARATE:
- return 1;
-
case PIPE_CAP_INDEP_BLEND_ENABLE:
- return 1;
-
case PIPE_CAP_INDEP_BLEND_FUNC:
return 1;
return 0;
case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
- return 1;
case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
- return 1;
-
case PIPE_CAP_GENERATE_MIPMAP:
return 1;
- case PIPE_CAP_DEPTH_CLIP_DISABLE:
- return 1;
-
- case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
- case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
- return 16 * 4;
-
- case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
- case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
- return 1024;
-
- case PIPE_CAP_MAX_VERTEX_STREAMS:
- return 1;
+ /* We would prefer varyings */
+ case PIPE_CAP_TGSI_FS_FACE_IS_INTEGER_SYSVAL:
+ case PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL:
+ return 0;
- case PIPE_CAP_SHADER_STENCIL_EXPORT:
+ /* I really don't want to set this CAP but let's not swim against the
+ * tide.. */
+ case PIPE_CAP_TGSI_TEXCOORD:
return 1;
case PIPE_CAP_SEAMLESS_CUBE_MAP:
case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
return 1;
- case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
- return 256; /* for GL3 */
-
- case PIPE_CAP_CONDITIONAL_RENDER:
- return 1;
-
- case PIPE_CAP_FRAGMENT_COLOR_CLAMPED:
- case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
- case PIPE_CAP_VERTEX_COLOR_CLAMPED:
- return 1;
-
- case PIPE_CAP_GLSL_FEATURE_LEVEL:
- return 330;
-
- case PIPE_CAP_USER_VERTEX_BUFFERS: /* TODO */
- case PIPE_CAP_RESOURCE_FROM_USER_MEMORY:
- return 0;
-
- case PIPE_CAP_TGSI_VS_LAYER_VIEWPORT:
- case PIPE_CAP_DOUBLES:
- case PIPE_CAP_INT64:
- case PIPE_CAP_INT64_DIVMOD:
- return 1;
-
- case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
- return 16;
-
case PIPE_CAP_MAX_VERTEX_ELEMENT_SRC_OFFSET:
return 0xffff;
- case PIPE_CAP_QUERY_TIMESTAMP:
- case PIPE_CAP_CUBE_MAP_ARRAY:
- return 1;
-
case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
return 1;
case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
return 65536;
- case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
- return 0;
-
case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
return 0;
- case PIPE_CAP_MAX_VIEWPORTS:
- return PIPE_MAX_VIEWPORTS;
-
case PIPE_CAP_ENDIANNESS:
return PIPE_ENDIAN_NATIVE;
- case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
- return 4;
-
- case PIPE_CAP_TEXTURE_GATHER_SM5:
- case PIPE_CAP_TEXTURE_QUERY_LOD:
- case PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION:
case PIPE_CAP_SAMPLER_VIEW_TARGET:
- case PIPE_CAP_FAKE_SW_MSAA:
return 1;
case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
- return -32;
+ return -8;
case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET:
- return 31;
-
- case PIPE_CAP_DRAW_INDIRECT:
- return 1;
-
- case PIPE_CAP_QUERY_SO_OVERFLOW:
- return 1;
+ return 7;
case PIPE_CAP_VENDOR_ID:
- return 0xFFFFFFFF;
-
case PIPE_CAP_DEVICE_ID:
return 0xFFFFFFFF;
case PIPE_CAP_ACCELERATED:
+ case PIPE_CAP_UMA:
+ case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
+ case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR:
+ case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS:
+ case PIPE_CAP_TGSI_ARRAY_COMPONENTS:
return 1;
case PIPE_CAP_VIDEO_MEMORY: {
return (int)(system_memory >> 20);
}
- case PIPE_CAP_UMA:
- return 1;
-
- case PIPE_CAP_CONDITIONAL_RENDER_INVERTED:
- case PIPE_CAP_CLIP_HALFZ:
- case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
- case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR:
- case PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT:
- case PIPE_CAP_CULL_DISTANCE:
- case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS:
- case PIPE_CAP_TGSI_ARRAY_COMPONENTS:
- case PIPE_CAP_CLEAR_TEXTURE:
- return 1;
-
case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT:
return 4;
case PIPE_CAP_MAX_VARYINGS:
return 16;
+ case PIPE_CAP_ALPHA_TEST:
+ return 0;
+
default:
return u_pipe_screen_get_param_defaults(screen, param);
}
enum pipe_shader_type shader,
enum pipe_shader_cap param)
{
+ bool is_deqp = pan_debug & PAN_DBG_DEQP;
+
if (shader != PIPE_SHADER_VERTEX &&
- shader != PIPE_SHADER_FRAGMENT) {
+ shader != PIPE_SHADER_FRAGMENT &&
+ !(shader == PIPE_SHADER_COMPUTE && is_deqp))
return 0;
- }
/* this is probably not totally correct.. but it's a start: */
switch (param) {
- case PIPE_SHADER_CAP_SCALAR_ISA:
- return 0;
-
case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
- return 0;
case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
return 16;
case PIPE_SHADER_CAP_MAX_OUTPUTS:
- return shader == PIPE_SHADER_FRAGMENT ? 1 : 8;
+ return shader == PIPE_SHADER_FRAGMENT ? 4 : 8;
case PIPE_SHADER_CAP_MAX_TEMPS:
return 256; /* GL_MAX_PROGRAM_TEMPORARIES_ARB */
return 16 * 1024 * sizeof(float);
case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
- return 16;
+ return PAN_MAX_CONST_BUFFERS;
case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
return 0;
return PIPE_SHADER_IR_NIR;
case PIPE_SHADER_CAP_SUPPORTED_IRS:
- return 0;
+ return (1 << PIPE_SHADER_IR_NIR) | (1 << PIPE_SHADER_IR_NIR_SERIALIZED);
case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
return 32;
case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
- case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD:
- case PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS:
+ return is_deqp ? 4 : 0;
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
return 0;
+ case PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS:
+ case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD:
+ return 0;
+
default:
fprintf(stderr, "unknown shader param %d\n", param);
return 0;
/* fall-through */
case PIPE_CAPF_MAX_POINT_WIDTH_AA:
- return 255.0; /* arbitrary */
+ return 1024.0;
case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
return 16.0;
* \param format the format to test
* \param type one of PIPE_TEXTURE, PIPE_SURFACE
*/
-static boolean
+static bool
panfrost_is_format_supported( struct pipe_screen *screen,
enum pipe_format format,
enum pipe_texture_target target,
format_desc = util_format_description(format);
if (!format_desc)
- return FALSE;
+ return false;
if (sample_count > 1)
- return FALSE;
+ return false;
/* Format wishlist */
- if (format == PIPE_FORMAT_Z24X8_UNORM || format == PIPE_FORMAT_X8Z24_UNORM)
- return FALSE;
+ if (format == PIPE_FORMAT_X8Z24_UNORM)
+ return false;
if (format == PIPE_FORMAT_A1B5G5R5_UNORM || format == PIPE_FORMAT_X1B5G5R5_UNORM)
- return FALSE;
+ return false;
- if (bind & PIPE_BIND_RENDER_TARGET) {
- if (format_desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS)
- return FALSE;
+ /* TODO */
+ if (format == PIPE_FORMAT_B5G5R5A1_UNORM)
+ return FALSE;
- if (format == PIPE_FORMAT_B5G6R5_UNORM)
- return TRUE;
+ /* Don't confuse poorly written apps (workaround dEQP bug) that expect
+ * more alpha than they ask for */
- /* Check for vaguely 8UNORM formats. Looser than
- * util_format_is_rgba8_variant, since it permits R8 (for
- * instance) */
+ bool scanout = bind & (PIPE_BIND_SCANOUT | PIPE_BIND_SHARED | PIPE_BIND_DISPLAY_TARGET);
+ bool renderable = bind & PIPE_BIND_RENDER_TARGET;
- for (unsigned chan = 0; chan < 4; ++chan) {
- enum util_format_type t = format_desc->channel[chan].type;
- if (t == UTIL_FORMAT_TYPE_VOID) continue;
- if (t != UTIL_FORMAT_TYPE_UNSIGNED) return FALSE;
- if (!format_desc->channel[chan].normalized) return FALSE;
- if (format_desc->channel[chan].size != 8) return FALSE;
- }
+ if (scanout && renderable && !util_format_is_rgba8_variant(format_desc))
+ return false;
- /*
- * Although possible, it is unnatural to render into compressed or YUV
- * surfaces. So disable these here to avoid going into weird paths
- * inside the state trackers.
- */
- if (format_desc->block.width != 1 ||
- format_desc->block.height != 1)
- return FALSE;
+ if (format_desc->layout != UTIL_FORMAT_LAYOUT_PLAIN &&
+ format_desc->layout != UTIL_FORMAT_LAYOUT_OTHER) {
+ /* Compressed formats not yet hooked up. */
+ return false;
}
- if (bind & PIPE_BIND_DEPTH_STENCIL) {
- if (format_desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS)
- return FALSE;
- }
+ /* Internally, formats that are depth/stencil renderable are limited.
+ *
+ * In particular: Z16, Z24, Z24S8, S8 are all identical from the GPU
+ * rendering perspective. That is, we render to Z24S8 (which we can
+ * AFBC compress), ignore the different when texturing (who cares?),
+ * and then in the off-chance there's a CPU read we blit back to
+ * staging.
+ *
+ * ...alternatively, we can make the state tracker deal with that. */
- if (format_desc->layout == UTIL_FORMAT_LAYOUT_BPTC ||
- format_desc->layout == UTIL_FORMAT_LAYOUT_ASTC ||
- format_desc->layout == UTIL_FORMAT_LAYOUT_ETC) {
- /* Compressed formats not yet hooked up. */
- return FALSE;
+ if (bind & PIPE_BIND_DEPTH_STENCIL) {
+ switch (format) {
+ case PIPE_FORMAT_Z24_UNORM_S8_UINT:
+ case PIPE_FORMAT_Z24X8_UNORM:
+ case PIPE_FORMAT_Z32_UNORM:
+ case PIPE_FORMAT_Z32_FLOAT:
+ case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
+ return true;
+
+ default:
+ return false;
+ }
}
- return TRUE;
+ return true;
}
+static int
+panfrost_get_compute_param(struct pipe_screen *pscreen, enum pipe_shader_ir ir_type,
+ enum pipe_compute_cap param, void *ret)
+{
+ const char * const ir = "panfrost";
+
+ if (!(pan_debug & PAN_DBG_DEQP))
+ return 0;
+
+#define RET(x) do { \
+ if (ret) \
+ memcpy(ret, x, sizeof(x)); \
+ return sizeof(x); \
+} while (0)
+
+ switch (param) {
+ case PIPE_COMPUTE_CAP_ADDRESS_BITS:
+ RET((uint32_t []){ 64 });
+
+ case PIPE_COMPUTE_CAP_IR_TARGET:
+ if (ret)
+ sprintf(ret, "%s", ir);
+ return strlen(ir) * sizeof(char);
+
+ case PIPE_COMPUTE_CAP_GRID_DIMENSION:
+ RET((uint64_t []) { 3 });
+
+ case PIPE_COMPUTE_CAP_MAX_GRID_SIZE:
+ RET(((uint64_t []) { 65535, 65535, 65535 }));
+
+ case PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE:
+ RET(((uint64_t []) { 1024, 1024, 64 }));
+
+ case PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK:
+ RET((uint64_t []) { 1024 });
+
+ case PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE:
+ RET((uint64_t []) { 1024*1024*512 /* Maybe get memory */ });
+
+ case PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE:
+ RET((uint64_t []) { 32768 });
+
+ case PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE:
+ case PIPE_COMPUTE_CAP_MAX_INPUT_SIZE:
+ RET((uint64_t []) { 4096 });
+
+ case PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE:
+ RET((uint64_t []) { 1024*1024*512 /* Maybe get memory */ });
+
+ case PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY:
+ RET((uint32_t []) { 800 /* MHz -- TODO */ });
+
+ case PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS:
+ RET((uint32_t []) { 9999 }); // TODO
+
+ case PIPE_COMPUTE_CAP_IMAGES_SUPPORTED:
+ RET((uint32_t []) { 1 }); // TODO
+
+ case PIPE_COMPUTE_CAP_SUBGROUP_SIZE:
+ RET((uint32_t []) { 32 }); // TODO
+
+ case PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK:
+ RET((uint64_t []) { 1024 }); // TODO
+ }
+
+ return 0;
+}
static void
panfrost_destroy_screen(struct pipe_screen *pscreen)
{
struct panfrost_screen *screen = pan_screen(pscreen);
- panfrost_resource_screen_deinit(screen);
+ panfrost_bo_cache_evict_all(screen);
+ pthread_mutex_destroy(&screen->bo_cache.lock);
+ pthread_mutex_destroy(&screen->active_bos_lock);
+ drmFreeVersion(screen->kernel_version);
ralloc_free(screen);
}
struct pipe_fence_handle **ptr,
struct pipe_fence_handle *fence)
{
- panfrost_drm_fence_reference(pscreen, ptr, fence);
+ struct panfrost_fence **p = (struct panfrost_fence **)ptr;
+ struct panfrost_fence *f = (struct panfrost_fence *)fence;
+ struct panfrost_fence *old = *p;
+
+ if (pipe_reference(&(*p)->reference, &f->reference)) {
+ util_dynarray_foreach(&old->syncfds, int, fd)
+ close(*fd);
+ util_dynarray_fini(&old->syncfds);
+ free(old);
+ }
+ *p = f;
}
-static boolean
+static bool
panfrost_fence_finish(struct pipe_screen *pscreen,
struct pipe_context *ctx,
struct pipe_fence_handle *fence,
uint64_t timeout)
{
- return panfrost_drm_fence_finish(pscreen, ctx, fence, timeout);
+ struct panfrost_screen *screen = pan_screen(pscreen);
+ struct panfrost_fence *f = (struct panfrost_fence *)fence;
+ struct util_dynarray syncobjs;
+ int ret;
+
+ /* All fences were already signaled */
+ if (!util_dynarray_num_elements(&f->syncfds, int))
+ return true;
+
+ util_dynarray_init(&syncobjs, NULL);
+ util_dynarray_foreach(&f->syncfds, int, fd) {
+ uint32_t syncobj;
+
+ ret = drmSyncobjCreate(screen->fd, 0, &syncobj);
+ assert(!ret);
+
+ ret = drmSyncobjImportSyncFile(screen->fd, syncobj, *fd);
+ assert(!ret);
+ util_dynarray_append(&syncobjs, uint32_t, syncobj);
+ }
+
+ uint64_t abs_timeout = os_time_get_absolute_timeout(timeout);
+ if (abs_timeout == OS_TIMEOUT_INFINITE)
+ abs_timeout = INT64_MAX;
+
+ ret = drmSyncobjWait(screen->fd, util_dynarray_begin(&syncobjs),
+ util_dynarray_num_elements(&syncobjs, uint32_t),
+ abs_timeout, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL,
+ NULL);
+
+ util_dynarray_foreach(&syncobjs, uint32_t, syncobj)
+ drmSyncobjDestroy(screen->fd, *syncobj);
+
+ return ret >= 0;
+}
+
+struct panfrost_fence *
+panfrost_fence_create(struct panfrost_context *ctx,
+ struct util_dynarray *fences)
+{
+ struct panfrost_screen *screen = pan_screen(ctx->base.screen);
+ struct panfrost_fence *f = calloc(1, sizeof(*f));
+ if (!f)
+ return NULL;
+
+ util_dynarray_init(&f->syncfds, NULL);
+
+ /* Export fences from all pending batches. */
+ util_dynarray_foreach(fences, struct panfrost_batch_fence *, fence) {
+ int fd = -1;
+
+ /* The fence is already signaled, no need to export it. */
+ if ((*fence)->signaled)
+ continue;
+
+ drmSyncobjExportSyncFile(screen->fd, (*fence)->syncobj, &fd);
+ if (fd == -1)
+ fprintf(stderr, "export failed: %m\n");
+
+ assert(fd != -1);
+ util_dynarray_append(&f->syncfds, int, fd);
+ }
+
+ pipe_reference_init(&f->reference, 1);
+
+ return f;
}
static const void *
return &midgard_nir_options;
}
+static unsigned
+panfrost_query_gpu_version(struct panfrost_screen *screen)
+{
+ struct drm_panfrost_get_param get_param = {0,};
+ ASSERTED int ret;
+
+ get_param.param = DRM_PANFROST_PARAM_GPU_PROD_ID;
+ ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_GET_PARAM, &get_param);
+ assert(!ret);
+
+ return get_param.value;
+}
+
+static uint32_t
+panfrost_active_bos_hash(const void *key)
+{
+ const struct panfrost_bo *bo = key;
+
+ return _mesa_hash_data(&bo->gem_handle, sizeof(bo->gem_handle));
+}
+
+static bool
+panfrost_active_bos_cmp(const void *keya, const void *keyb)
+{
+ const struct panfrost_bo *a = keya, *b = keyb;
+
+ return a->gem_handle == b->gem_handle;
+}
+
struct pipe_screen *
panfrost_create_screen(int fd, struct renderonly *ro)
{
- struct panfrost_screen *screen = rzalloc(NULL, struct panfrost_screen);
+ pan_debug = debug_get_option_pan_debug();
+
+ /* Blacklist apps known to be buggy under Panfrost */
+ const char *proc = util_get_process_name();
+ const char *blacklist[] = {
+ "chromium",
+ "chrome",
+ };
+
+ for (unsigned i = 0; i < ARRAY_SIZE(blacklist); ++i) {
+ if ((strcmp(blacklist[i], proc) == 0))
+ return NULL;
+ }
- pan_debug = debug_get_option_pan_debug();
+ /* Create the screen */
+ struct panfrost_screen *screen = rzalloc(NULL, struct panfrost_screen);
if (!screen)
return NULL;
screen->fd = fd;
+ screen->gpu_id = panfrost_query_gpu_version(screen);
+ screen->quirks = panfrost_get_quirks(screen->gpu_id);
+ screen->kernel_version = drmGetVersion(fd);
+
+ /* Check if we're loading against a supported GPU model. */
+
+ switch (screen->gpu_id) {
+ case 0x720: /* T720 */
+ case 0x750: /* T760 */
+ case 0x820: /* T820 */
+ case 0x860: /* T860 */
+ break;
+ default:
+ /* Fail to load against untested models */
+ debug_printf("panfrost: Unsupported model %X", screen->gpu_id);
+ return NULL;
+ }
+
+ pthread_mutex_init(&screen->active_bos_lock, NULL);
+ screen->active_bos = _mesa_set_create(screen, panfrost_active_bos_hash,
+ panfrost_active_bos_cmp);
+
+ pthread_mutex_init(&screen->bo_cache.lock, NULL);
+ list_inithead(&screen->bo_cache.lru);
+ for (unsigned i = 0; i < ARRAY_SIZE(screen->bo_cache.buckets); ++i)
+ list_inithead(&screen->bo_cache.buckets[i]);
+
if (pan_debug & PAN_DBG_TRACE)
pandecode_initialize();
screen->base.get_device_vendor = panfrost_get_device_vendor;
screen->base.get_param = panfrost_get_param;
screen->base.get_shader_param = panfrost_get_shader_param;
+ screen->base.get_compute_param = panfrost_get_compute_param;
screen->base.get_paramf = panfrost_get_paramf;
screen->base.get_timestamp = panfrost_get_timestamp;
screen->base.is_format_supported = panfrost_is_format_supported;
screen->base.get_compiler_options = panfrost_screen_get_compiler_options;
screen->base.fence_reference = panfrost_fence_reference;
screen->base.fence_finish = panfrost_fence_finish;
-
- screen->last_fragment_flushed = true;
- screen->last_job = NULL;
+ screen->base.set_damage_region = panfrost_resource_set_damage_region;
panfrost_resource_screen_init(screen);