vc4: Add support for 16-bit signed/unsigned norm/scaled vertex attrs.
[mesa.git] / src / gallium / drivers / freedreno / freedreno_screen.c
index bc6ff78aa734836f74f4d3cbc19f263a3c3352b2..19379a8b02418c7838335b3f68a4835ffd470d68 100644 (file)
@@ -52,6 +52,7 @@
 
 #include "a2xx/fd2_screen.h"
 #include "a3xx/fd3_screen.h"
+#include "a4xx/fd4_screen.h"
 
 /* XXX this should go away */
 #include "state_tracker/drm_driver.h"
@@ -61,15 +62,16 @@ static const struct debug_named_value debug_options[] = {
                {"disasm",    FD_DBG_DISASM, "Dump TGSI and adreno shader disassembly"},
                {"dclear",    FD_DBG_DCLEAR, "Mark all state dirty after clear"},
                {"flush",     FD_DBG_FLUSH,  "Force flush after every draw"},
-               {"dscis",     FD_DBG_DSCIS,  "Disable scissor optimization"},
+               {"noscis",    FD_DBG_NOSCIS, "Disable scissor optimization"},
                {"direct",    FD_DBG_DIRECT, "Force inline (SS_DIRECT) state loads"},
-               {"dbypass",   FD_DBG_DBYPASS,"Disable GMEM bypass"},
+               {"nobypass",  FD_DBG_NOBYPASS, "Disable GMEM bypass"},
                {"fraghalf",  FD_DBG_FRAGHALF, "Use half-precision in fragment shader"},
                {"nobin",     FD_DBG_NOBIN,  "Disable hw binning"},
                {"noopt",     FD_DBG_NOOPT , "Disable optimization passes in compiler"},
                {"optmsgs",   FD_DBG_OPTMSGS,"Enable optimizater debug messages"},
                {"optdump",   FD_DBG_OPTDUMP,"Dump shader DAG to .dot files"},
                {"glsl130",   FD_DBG_GLSL130,"Temporary flag to enable GLSL 130 on a3xx+"},
+               {"nocp",      FD_DBG_NOCP,   "Disable copy-propagation"},
                DEBUG_NAMED_VALUE_END
 };
 
@@ -191,13 +193,13 @@ fd_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
 
        case PIPE_CAP_SM3:
        case PIPE_CAP_PRIMITIVE_RESTART:
-               return (screen->gpu_id >= 300) ? 1 : 0;
+               return is_a3xx(screen) || is_a4xx(screen);
 
        case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
                return 256;
 
        case PIPE_CAP_GLSL_FEATURE_LEVEL:
-               return ((screen->gpu_id >= 300) && glsl130) ? 130 : 120;
+               return ((is_a3xx(screen) || is_a4xx(screen)) && glsl130) ? 130 : 120;
 
        /* Unsupported features. */
        case PIPE_CAP_INDEP_BLEND_ENABLE:
@@ -225,6 +227,7 @@ fd_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
        case PIPE_CAP_TGSI_FS_FINE_DERIVATIVE:
        case PIPE_CAP_CONDITIONAL_RENDER_INVERTED:
        case PIPE_CAP_SAMPLER_VIEW_TARGET:
+       case PIPE_CAP_CLIP_HALFZ:
                return 0;
 
        case PIPE_CAP_MAX_VIEWPORTS:
@@ -254,7 +257,7 @@ fd_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
                return 11;
 
        case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
-               return (screen->gpu_id >= 300) ? 256 : 0;
+               return (is_a3xx(screen) || is_a4xx(screen)) ? 256 : 0;
 
        /* Render targets. */
        case PIPE_CAP_MAX_RENDER_TARGETS:
@@ -265,7 +268,10 @@ fd_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
        case PIPE_CAP_QUERY_TIMESTAMP:
                return 0;
        case PIPE_CAP_OCCLUSION_QUERY:
-               return (screen->gpu_id >= 300) ? 1 : 0;
+               /* TODO still missing on a4xx, but we lie to get gl2..
+                * it's not a feature, it's a bug!
+                */
+               return is_a3xx(screen) || is_a4xx(screen);
 
        case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
        case PIPE_CAP_MIN_TEXEL_OFFSET:
@@ -350,6 +356,7 @@ fd_screen_get_shader_param(struct pipe_screen *pscreen, unsigned shader,
        case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
                return 8; /* XXX */
        case PIPE_SHADER_CAP_MAX_INPUTS:
+       case PIPE_SHADER_CAP_MAX_OUTPUTS:
                return 16;
        case PIPE_SHADER_CAP_MAX_TEMPS:
                return 64; /* Max native temporaries. */
@@ -358,7 +365,7 @@ fd_screen_get_shader_param(struct pipe_screen *pscreen, unsigned shader,
                 * split between VS and FS.  Use lower limit of 256 to
                 * avoid getting into impossible situations:
                 */
-               return ((screen->gpu_id >= 300) ? 256 : 64) * sizeof(float[4]);
+               return ((is_a3xx(screen) || is_a4xx(screen)) ? 256 : 64) * sizeof(float[4]);
        case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
                return 1;
        case PIPE_SHADER_CAP_MAX_PREDS:
@@ -378,8 +385,11 @@ fd_screen_get_shader_param(struct pipe_screen *pscreen, unsigned shader,
        case PIPE_SHADER_CAP_INTEGERS:
                /* we should be able to support this on a3xx, but not
                 * implemented yet:
+                *
+                * TODO looks like a4xx will require some additional
+                * work for integer varying fetch..
                 */
-               return ((screen->gpu_id >= 300) && glsl130) ? 1 : 0;
+               return (is_a3xx(screen) && glsl130) ? 1 : 0;
        case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
        case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
                return 16;
@@ -511,7 +521,7 @@ fd_screen_create(struct fd_device *dev)
         * before enabling:
         *
         * If you have a different adreno version, feel free to add it to one
-        * of the two cases below and see what happens.  And if it works, please
+        * of the cases below and see what happens.  And if it works, please
         * send a patch ;-)
         */
        switch (screen->gpu_id) {
@@ -522,6 +532,9 @@ fd_screen_create(struct fd_device *dev)
        case 330:
                fd3_screen_init(pscreen);
                break;
+       case 420:
+               fd4_screen_init(pscreen);
+               break;
        default:
                debug_printf("unsupported GPU: a%03d\n", screen->gpu_id);
                goto fail;