#include "util/u_memory.h"
#include "util/u_inlines.h"
-#include "util/u_format.h"
-#include "util/u_format_s3tc.h"
+#include "util/format/u_format.h"
+#include "util/format/u_format_s3tc.h"
#include "util/u_screen.h"
#include "util/u_string.h"
#include "util/u_debug.h"
{"nobypass", FD_DBG_NOBYPASS, "Disable GMEM bypass"},
{"fraghalf", FD_DBG_FRAGHALF, "Use half-precision in fragment shader"},
{"nobin", FD_DBG_NOBIN, "Disable hw binning"},
+ {"nogmem", FD_DBG_NOGMEM, "Disable GMEM rendering (bypass only)"},
{"glsl120", FD_DBG_GLSL120,"Temporary flag to force GLSL 1.20 (rather than 1.30) on a3xx+"},
{"shaderdb", FD_DBG_SHADERDB, "Enable shaderdb output"},
{"flush", FD_DBG_FLUSH, "Force flush after every draw"},
{"inorder", FD_DBG_INORDER,"Disable reordering for draws/blits"},
{"bstat", FD_DBG_BSTAT, "Print batch stats at context destroy"},
{"nogrow", FD_DBG_NOGROW, "Disable \"growable\" cmdstream buffers, even if kernel supports it"},
- {"lrz", FD_DBG_LRZ, "Enable experimental LRZ support (a5xx+)"},
+ {"lrz", FD_DBG_LRZ, "Enable experimental LRZ support (a5xx)"},
{"noindirect",FD_DBG_NOINDR, "Disable hw indirect draws (emulate on CPU)"},
{"noblit", FD_DBG_NOBLIT, "Disable blitter (fallback to generic blit path)"},
{"hiprio", FD_DBG_HIPRIO, "Force high-priority context"},
- {"ttile", FD_DBG_TTILE, "Enable texture tiling (a2xx/a5xx)"},
+ {"ttile", FD_DBG_TTILE, "Enable texture tiling (a2xx/a3xx/a5xx)"},
{"perfcntrs", FD_DBG_PERFC, "Expose performance counters"},
{"noubwc", FD_DBG_NOUBWC, "Disable UBWC for all internal buffers"},
+ {"nolrz", FD_DBG_NOLRZ, "Disable LRZ (a6xx)"},
+ {"notile", FD_DBG_NOTILE, "Disable tiling for all internal buffers"},
DEBUG_NAMED_VALUE_END
};
if (is_a6xx(screen)) return 1;
return 0;
- case PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION:
- return 0;
-
case PIPE_CAP_CONTEXT_PRIORITY_MASK:
return screen->priority_mask;
case PIPE_CAP_MAX_VARYINGS:
return 16;
+ case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS:
+ /* We don't really have a limit on this, it all goes into the main
+ * memory buffer. Needs to be at least 120 / 4 (minimum requirement
+ * for GL_MAX_TESS_PATCH_COMPONENTS).
+ */
+ return 128;
+
+ case PIPE_CAP_MAX_TEXTURE_UPLOAD_MEMORY_BUDGET:
+ return 64 * 1024 * 1024;
+
case PIPE_CAP_SHAREABLE_SHADERS:
case PIPE_CAP_GLSL_OPTIMIZE_CONSERVATIVELY:
/* manage the variants for these ourself, to avoid breaking precompile: */
return 1;
return 0;
+ /* Geometry shaders.. */
+ case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
+ return 512;
+ case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
+ return 2048;
+ case PIPE_CAP_MAX_GS_INVOCATIONS:
+ return 32;
+
/* Stream output. */
case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
if (is_ir3(screen))
return 0;
case PIPE_CAP_TGSI_FS_FACE_IS_INTEGER_SYSVAL:
return 1;
+ case PIPE_CAP_TGSI_FS_POINT_IS_SYSVAL:
+ return is_a2xx(screen);
case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
if (is_ir3(screen))
case PIPE_SHADER_FRAGMENT:
case PIPE_SHADER_VERTEX:
break;
+ case PIPE_SHADER_TESS_CTRL:
+ case PIPE_SHADER_TESS_EVAL:
+ case PIPE_SHADER_GEOMETRY:
+ if (is_a6xx(screen))
+ break;
+ return 0;
case PIPE_SHADER_COMPUTE:
if (has_compute(screen))
break;
return 0;
- case PIPE_SHADER_GEOMETRY:
- /* maye we could emulate.. */
- return 0;
default:
DBG("unknown shader type %d", shader);
return 0;
* everything is just normal registers. This is just temporary
* hack until load_input/store_output handle arrays in a similar
* way as load_var/store_var..
+ *
+ * For tessellation stages, inputs are loaded using ldlw or ldg, both
+ * of which support indirection.
*/
- return 0;
+ return shader == PIPE_SHADER_TESS_CTRL || shader == PIPE_SHADER_TESS_EVAL;
case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
/* a2xx compiler doesn't handle indirect: */
return (1 << PIPE_SHADER_IR_NIR) | (1 << PIPE_SHADER_IR_TGSI);
case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
return 32;
- case PIPE_SHADER_CAP_SCALAR_ISA:
- return is_ir3(screen) ? 1 : 0;
case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
if (is_a5xx(screen) || is_a6xx(screen)) {
return bo;
}
+static void _fd_fence_ref(struct pipe_screen *pscreen,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *pfence)
+{
+ fd_fence_ref(ptr, pfence);
+}
+
struct pipe_screen *
fd_screen_create(struct fd_device *dev, struct renderonly *ro)
{
case 430:
fd4_screen_init(pscreen);
break;
+ case 510:
case 530:
case 540:
fd5_screen_init(pscreen);
break;
+ case 618:
case 630:
+ case 640:
fd6_screen_init(pscreen);
break;
default:
screen->num_vsc_pipes = 8;
}
+ if (fd_mesa_debug & FD_DBG_PERFC) {
+ screen->perfcntr_groups = fd_perfcntrs(screen->gpu_id,
+ &screen->num_perfcntr_groups);
+ }
+
/* NOTE: don't enable if we have too old of a kernel to support
* growable cmdstream buffers, since memory requirement for cmdstream
* buffers would be too much otherwise.
pscreen->get_timestamp = fd_screen_get_timestamp;
- pscreen->fence_reference = fd_fence_ref;
+ pscreen->fence_reference = _fd_fence_ref;
pscreen->fence_finish = fd_fence_finish;
pscreen->fence_get_fd = fd_fence_get_fd;