#include "util/u_memory.h"
#include "util/u_inlines.h"
-#include "util/u_format.h"
-#include "util/u_format_s3tc.h"
+#include "util/format/u_format.h"
+#include "util/format/u_format_s3tc.h"
#include "util/u_screen.h"
#include "util/u_string.h"
#include "util/u_debug.h"
#include "a5xx/fd5_screen.h"
#include "a6xx/fd6_screen.h"
+/* for fd_get_driver/device_uuid() */
+#include "common/freedreno_uuid.h"
#include "ir3/ir3_nir.h"
+#include "ir3/ir3_compiler.h"
#include "a2xx/ir2.h"
-/* XXX this should go away */
-#include "state_tracker/drm_driver.h"
-
static const struct debug_named_value debug_options[] = {
{"msgs", FD_DBG_MSGS, "Print debug messages"},
{"disasm", FD_DBG_DISASM, "Dump TGSI and adreno shader disassembly (a2xx only, see IR3_SHADER_DEBUG)"},
{"noscis", FD_DBG_NOSCIS, "Disable scissor optimization"},
{"direct", FD_DBG_DIRECT, "Force inline (SS_DIRECT) state loads"},
{"nobypass", FD_DBG_NOBYPASS, "Disable GMEM bypass"},
- {"fraghalf", FD_DBG_FRAGHALF, "Use half-precision in fragment shader"},
+ {"log", FD_DBG_LOG, "Enable GPU timestamp based logging (a6xx+)"},
{"nobin", FD_DBG_NOBIN, "Disable hw binning"},
- {"glsl120", FD_DBG_GLSL120,"Temporary flag to force GLSL 1.20 (rather than 1.30) on a3xx+"},
+ {"nogmem", FD_DBG_NOGMEM, "Disable GMEM rendering (bypass only)"},
+ /* BIT(10) */
{"shaderdb", FD_DBG_SHADERDB, "Enable shaderdb output"},
{"flush", FD_DBG_FLUSH, "Force flush after every draw"},
{"deqp", FD_DBG_DEQP, "Enable dEQP hacks"},
{"inorder", FD_DBG_INORDER,"Disable reordering for draws/blits"},
{"bstat", FD_DBG_BSTAT, "Print batch stats at context destroy"},
{"nogrow", FD_DBG_NOGROW, "Disable \"growable\" cmdstream buffers, even if kernel supports it"},
- {"lrz", FD_DBG_LRZ, "Enable experimental LRZ support (a5xx+)"},
+ {"lrz", FD_DBG_LRZ, "Enable experimental LRZ support (a5xx)"},
{"noindirect",FD_DBG_NOINDR, "Disable hw indirect draws (emulate on CPU)"},
{"noblit", FD_DBG_NOBLIT, "Disable blitter (fallback to generic blit path)"},
{"hiprio", FD_DBG_HIPRIO, "Force high-priority context"},
{"ttile", FD_DBG_TTILE, "Enable texture tiling (a2xx/a3xx/a5xx)"},
{"perfcntrs", FD_DBG_PERFC, "Expose performance counters"},
{"noubwc", FD_DBG_NOUBWC, "Disable UBWC for all internal buffers"},
+ {"nolrz", FD_DBG_NOLRZ, "Disable LRZ (a6xx)"},
+ {"notile", FD_DBG_NOTILE, "Disable tiling for all internal buffers"},
+ {"layout", FD_DBG_LAYOUT, "Dump resource layouts"},
+ {"nofp16", FD_DBG_NOFP16, "Disable mediump precision lowering"},
+ {"nohw", FD_DBG_NOHW, "Disable submitting commands to the HW"},
DEBUG_NAMED_VALUE_END
};
int fd_mesa_debug = 0;
bool fd_binning_enabled = true;
-static bool glsl120 = false;
static const char *
fd_screen_get_name(struct pipe_screen *pscreen)
FREE(screen->ro);
fd_bc_fini(&screen->batch_cache);
+ fd_gmem_screen_fini(pscreen);
slab_destroy_parent(&screen->transfer_pool);
- mtx_destroy(&screen->lock);
+ simple_mtx_destroy(&screen->lock);
+
+ if (screen->compiler)
+ ir3_compiler_destroy(screen->compiler);
- ralloc_free(screen->compiler);
+ ralloc_free(screen->live_batches);
free(screen->perfcntr_queries);
free(screen);
case PIPE_CAP_TEXTURE_SWIZZLE:
case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
- case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
case PIPE_CAP_SEAMLESS_CUBE_MAP:
case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
- case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
- case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
- case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
case PIPE_CAP_STRING_MARKER:
case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
case PIPE_CAP_TEXTURE_BARRIER:
case PIPE_CAP_INVALIDATE_BUFFER:
+ case PIPE_CAP_RGB_OVERRIDE_DST_ALPHA_BLEND:
+ case PIPE_CAP_GLSL_TESS_LEVELS_AS_INPUTS:
+ case PIPE_CAP_NIR_COMPACT_ARRAYS:
return 1;
+ case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
+ case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
+ case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
+ return !is_a2xx(screen);
+
+ case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
+ return is_a2xx(screen);
+ case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
+ return !is_a2xx(screen);
+
case PIPE_CAP_PACKED_UNIFORMS:
return !is_a2xx(screen);
case PIPE_CAP_FRAGMENT_SHADER_DERIVATIVES:
case PIPE_CAP_VERTEX_SHADER_SATURATE:
case PIPE_CAP_PRIMITIVE_RESTART:
+ case PIPE_CAP_PRIMITIVE_RESTART_FIXED_INDEX:
case PIPE_CAP_TGSI_INSTANCEID:
case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
case PIPE_CAP_INDEP_BLEND_ENABLE:
return is_a3xx(screen) || is_a4xx(screen);
case PIPE_CAP_POLYGON_OFFSET_CLAMP:
- return is_a5xx(screen) || is_a6xx(screen);
+ return is_a4xx(screen) || is_a5xx(screen) || is_a6xx(screen);
case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
if (is_a3xx(screen)) return 16;
case PIPE_CAP_GLSL_FEATURE_LEVEL:
case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
- if (glsl120)
- return 120;
return is_ir3(screen) ? 140 : 120;
case PIPE_CAP_ESSL_FEATURE_LEVEL:
if (is_a6xx(screen)) return 1;
return 0;
- case PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION:
- return 0;
-
case PIPE_CAP_CONTEXT_PRIORITY_MASK:
return screen->priority_mask;
return 1;
return 0;
+ case PIPE_CAP_NIR_IMAGES_AS_DEREF:
+ return 0;
+
case PIPE_CAP_MAX_VIEWPORTS:
return 1;
case PIPE_CAP_MAX_VARYINGS:
return 16;
+ case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS:
+ /* We don't really have a limit on this, it all goes into the main
+ * memory buffer. Needs to be at least 120 / 4 (minimum requirement
+ * for GL_MAX_TESS_PATCH_COMPONENTS).
+ */
+ return 128;
+
+ case PIPE_CAP_MAX_TEXTURE_UPLOAD_MEMORY_BUDGET:
+ return 64 * 1024 * 1024;
+
case PIPE_CAP_SHAREABLE_SHADERS:
case PIPE_CAP_GLSL_OPTIMIZE_CONSERVATIVELY:
/* manage the variants for these ourself, to avoid breaking precompile: */
return 1;
return 0;
+ /* Geometry shaders.. */
+ case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
+ return 512;
+ case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
+ return 2048;
+ case PIPE_CAP_MAX_GS_INVOCATIONS:
+ return 32;
+
/* Stream output. */
case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
if (is_ir3(screen))
case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS:
case PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL:
+ case PIPE_CAP_TGSI_TEXCOORD:
if (is_ir3(screen))
return 1;
return 0;
case PIPE_CAP_TGSI_FS_FACE_IS_INTEGER_SYSVAL:
return 1;
+ case PIPE_CAP_TGSI_FS_POINT_IS_SYSVAL:
+ return is_a2xx(screen);
case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
if (is_ir3(screen))
/* Texturing. */
case PIPE_CAP_MAX_TEXTURE_2D_SIZE:
- return 1 << (MAX_MIP_LEVELS - 1);
+ if (is_a6xx(screen) || is_a5xx(screen) || is_a4xx(screen))
+ return 16384;
+ else
+ return 8192;
case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
- return MAX_MIP_LEVELS;
+ if (is_a6xx(screen) || is_a5xx(screen) || is_a4xx(screen))
+ return 15;
+ else
+ return 14;
case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
return 11;
return 10;
case PIPE_CAP_UMA:
return 1;
+ case PIPE_CAP_MEMOBJ:
+ return fd_device_version(screen->dev) >= FD_VERSION_MEMORY_FD;
case PIPE_CAP_NATIVE_FENCE_FD:
return fd_device_version(screen->dev) >= FD_VERSION_FENCE_FD;
default:
case PIPE_SHADER_FRAGMENT:
case PIPE_SHADER_VERTEX:
break;
+ case PIPE_SHADER_TESS_CTRL:
+ case PIPE_SHADER_TESS_EVAL:
+ case PIPE_SHADER_GEOMETRY:
+ if (is_a6xx(screen))
+ break;
+ return 0;
case PIPE_SHADER_COMPUTE:
if (has_compute(screen))
break;
return 0;
- case PIPE_SHADER_GEOMETRY:
- /* maye we could emulate.. */
- return 0;
default:
DBG("unknown shader type %d", shader);
return 0;
* everything is just normal registers. This is just temporary
* hack until load_input/store_output handle arrays in a similar
* way as load_var/store_var..
+ *
+ * For tessellation stages, inputs are loaded using ldlw or ldg, both
+ * of which support indirection.
*/
- return 0;
+ return shader == PIPE_SHADER_TESS_CTRL || shader == PIPE_SHADER_TESS_EVAL;
case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
/* a2xx compiler doesn't handle indirect: */
case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
return 1;
case PIPE_SHADER_CAP_INTEGERS:
- if (glsl120)
- return 0;
return is_ir3(screen) ? 1 : 0;
case PIPE_SHADER_CAP_INT64_ATOMICS:
+ case PIPE_SHADER_CAP_FP16_DERIVATIVES:
+ case PIPE_SHADER_CAP_INT16:
+ case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
return 0;
case PIPE_SHADER_CAP_FP16:
- return 0;
+ return ((is_a5xx(screen) || is_a6xx(screen)) &&
+ (shader == PIPE_SHADER_COMPUTE ||
+ shader == PIPE_SHADER_FRAGMENT) &&
+ !(fd_mesa_debug & FD_DBG_NOFP16));
case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
return 16;
return (1 << PIPE_SHADER_IR_NIR) | (1 << PIPE_SHADER_IR_TGSI);
case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
return 32;
- case PIPE_SHADER_CAP_SCALAR_ISA:
- return is_ir3(screen) ? 1 : 0;
case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
if (is_a5xx(screen) || is_a6xx(screen)) {
return ir2_get_compiler_options();
}
+static struct disk_cache *
+fd_get_disk_shader_cache(struct pipe_screen *pscreen)
+{
+ struct fd_screen *screen = fd_screen(pscreen);
+
+ if (is_ir3(screen)) {
+ struct ir3_compiler *compiler = screen->compiler;
+ return compiler->disk_cache;
+ }
+
+ return NULL;
+}
+
bool
fd_screen_bo_get_handle(struct pipe_screen *pscreen,
struct fd_bo *bo,
fd_fence_ref(ptr, pfence);
}
+static void
+fd_screen_get_device_uuid(struct pipe_screen *pscreen, char *uuid)
+{
+ struct fd_screen *screen = fd_screen(pscreen);
+
+ fd_get_device_uuid(uuid, screen->gpu_id);
+}
+
+static void
+fd_screen_get_driver_uuid(struct pipe_screen *pscreen, char *uuid)
+{
+ fd_get_driver_uuid(uuid);
+}
+
struct pipe_screen *
fd_screen_create(struct fd_device *dev, struct renderonly *ro)
{
if (fd_mesa_debug & FD_DBG_NOBIN)
fd_binning_enabled = false;
- glsl120 = !!(fd_mesa_debug & FD_DBG_GLSL120);
-
if (!screen)
return NULL;
screen->priority_mask = (1 << val) - 1;
}
- if ((fd_device_version(dev) >= FD_VERSION_ROBUSTNESS) &&
- (fd_pipe_get_param(screen->pipe, FD_PP_PGTABLE, &val) == 0)) {
- screen->has_robustness = val;
- }
+ if (fd_device_version(dev) >= FD_VERSION_ROBUSTNESS)
+ screen->has_robustness = true;
struct sysinfo si;
sysinfo(&si);
case 330:
fd3_screen_init(pscreen);
break;
+ case 405:
case 420:
case 430:
fd4_screen_init(pscreen);
break;
+ case 510:
case 530:
case 540:
fd5_screen_init(pscreen);
break;
+ case 618:
case 630:
+ case 640:
+ case 650:
fd6_screen_init(pscreen);
break;
default:
}
if (screen->gpu_id >= 600) {
- screen->gmem_alignw = 32;
- screen->gmem_alignh = 32;
+ screen->gmem_alignw = 16;
+ screen->gmem_alignh = 4;
+ screen->tile_alignw = is_a650(screen) ? 96 : 32;
+ screen->tile_alignh = 32;
screen->num_vsc_pipes = 32;
} else if (screen->gpu_id >= 500) {
- screen->gmem_alignw = 64;
- screen->gmem_alignh = 32;
+ screen->gmem_alignw = screen->tile_alignw = 64;
+ screen->gmem_alignh = screen->tile_alignh = 32;
screen->num_vsc_pipes = 16;
} else {
- screen->gmem_alignw = 32;
- screen->gmem_alignh = 32;
+ screen->gmem_alignw = screen->tile_alignw = 32;
+ screen->gmem_alignh = screen->tile_alignh = 32;
screen->num_vsc_pipes = 8;
}
+ if (fd_mesa_debug & FD_DBG_PERFC) {
+ screen->perfcntr_groups = fd_perfcntrs(screen->gpu_id,
+ &screen->num_perfcntr_groups);
+ }
+
/* NOTE: don't enable if we have too old of a kernel to support
* growable cmdstream buffers, since memory requirement for cmdstream
* buffers would be too much otherwise.
if (fd_device_version(dev) >= FD_VERSION_UNLIMITED_CMDS)
screen->reorder = !(fd_mesa_debug & FD_DBG_INORDER);
+ if (BATCH_DEBUG)
+ screen->live_batches = _mesa_pointer_set_create(NULL);
+
fd_bc_init(&screen->batch_cache);
- (void) mtx_init(&screen->lock, mtx_plain);
+ list_inithead(&screen->context_list);
+
+ (void) simple_mtx_init(&screen->lock, mtx_plain);
pscreen->destroy = fd_screen_destroy;
pscreen->get_param = fd_screen_get_param;
pscreen->get_shader_param = fd_screen_get_shader_param;
pscreen->get_compute_param = fd_get_compute_param;
pscreen->get_compiler_options = fd_get_compiler_options;
+ pscreen->get_disk_shader_cache = fd_get_disk_shader_cache;
fd_resource_screen_init(pscreen);
fd_query_screen_init(pscreen);
+ fd_gmem_screen_init(pscreen);
pscreen->get_name = fd_screen_get_name;
pscreen->get_vendor = fd_screen_get_vendor;
pscreen->query_dmabuf_modifiers = fd_screen_query_dmabuf_modifiers;
- if (!screen->supported_modifiers) {
- static const uint64_t supported_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- };
-
- screen->supported_modifiers = supported_modifiers;
- screen->num_supported_modifiers = ARRAY_SIZE(supported_modifiers);
- }
+ pscreen->get_device_uuid = fd_screen_get_device_uuid;
+ pscreen->get_driver_uuid = fd_screen_get_driver_uuid;
slab_create_parent(&screen->transfer_pool, sizeof(struct fd_transfer), 16);