From: Eric Anholt Date: Fri, 29 Mar 2019 22:38:15 +0000 (-0700) Subject: v3d: Bump the maximum texture size to 4k for V3D 4.x. X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=commitdiff_plain;h=62360e92ec97d59389330a5aeb070416523da774 v3d: Bump the maximum texture size to 4k for V3D 4.x. 4.1 and 4.2 both have the same 16k limit, but it I'm seeing GPU hangs in the CTS at 8k and 16k. 4k at least lets us get one 4k display working. Cc: mesa-stable@lists.freedesktop.org --- diff --git a/src/broadcom/cle/v3d_packet_v33.xml b/src/broadcom/cle/v3d_packet_v33.xml index 754461dc067..06e8ddad7ec 100644 --- a/src/broadcom/cle/v3d_packet_v33.xml +++ b/src/broadcom/cle/v3d_packet_v33.xml @@ -820,8 +820,8 @@ - - + + diff --git a/src/broadcom/common/v3d_limits.h b/src/broadcom/common/v3d_limits.h index ee7a3e6bc00..e21ee246eff 100644 --- a/src/broadcom/common/v3d_limits.h +++ b/src/broadcom/common/v3d_limits.h @@ -32,7 +32,8 @@ */ #define V3D_MAX_TEXTURE_SAMPLERS 16 -#define V3D_MAX_MIP_LEVELS 12 +/* The HW can do 16384 (15), but we run into hangs when we expose that. */ +#define V3D_MAX_MIP_LEVELS 13 #define V3D_MAX_SAMPLES 4 diff --git a/src/gallium/drivers/v3d/v3d_screen.c b/src/gallium/drivers/v3d/v3d_screen.c index 073bdf51449..b2fcde6edd2 100644 --- a/src/gallium/drivers/v3d/v3d_screen.c +++ b/src/gallium/drivers/v3d/v3d_screen.c @@ -189,7 +189,10 @@ v3d_screen_get_param(struct pipe_screen *pscreen, enum pipe_cap param) case PIPE_CAP_MAX_TEXTURE_2D_LEVELS: case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS: case PIPE_CAP_MAX_TEXTURE_3D_LEVELS: - return V3D_MAX_MIP_LEVELS; + if (screen->devinfo.ver < 40) + return 12; + else + return V3D_MAX_MIP_LEVELS; case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS: return 2048; diff --git a/src/gallium/drivers/v3d/v3dx_draw.c b/src/gallium/drivers/v3d/v3dx_draw.c index ebdc436318d..14e85e78485 100644 --- a/src/gallium/drivers/v3d/v3dx_draw.c +++ b/src/gallium/drivers/v3d/v3dx_draw.c @@ -55,7 +55,28 @@ v3d_start_draw(struct v3d_context *v3d) job->submit.bcl_start = job->bcl.bo->offset; v3d_job_add_bo(job, job->bcl.bo); - job->tile_alloc = v3d_bo_alloc(v3d->screen, 1024 * 1024, "tile_alloc"); + /* The PTB will request the tile alloc initial size per tile at start + * of tile binning. + */ + uint32_t tile_alloc_size = (job->draw_tiles_x * + job->draw_tiles_y) * 64; + /* The PTB allocates in aligned 4k chunks after the initial setup. */ + tile_alloc_size = align(tile_alloc_size, 4096); + + /* Include the first two chunk allocations that the PTB does so that + * we definitely clear the OOM condition before triggering one (the HW + * won't trigger OOM during the first allocations). + */ + tile_alloc_size += 8192; + + /* For performance, allocate some extra initial memory after the PTB's + * minimal allocations, so that we hopefully don't have to block the + * GPU on the kernel handling an OOM signal. + */ + tile_alloc_size += 512 * 1024; + + job->tile_alloc = v3d_bo_alloc(v3d->screen, tile_alloc_size, + "tile_alloc"); uint32_t tsda_per_tile_size = v3d->screen->devinfo.ver >= 40 ? 256 : 64; job->tile_state = v3d_bo_alloc(v3d->screen, job->draw_tiles_y * diff --git a/src/gallium/drivers/v3d/v3dx_state.c b/src/gallium/drivers/v3d/v3dx_state.c index 78762a1b5ee..24f47cc3a0c 100644 --- a/src/gallium/drivers/v3d/v3dx_state.c +++ b/src/gallium/drivers/v3d/v3dx_state.c @@ -846,6 +846,9 @@ v3d_setup_texture_shader_state(struct V3DX(TEXTURE_SHADER_STATE) *tex, prsc->target == PIPE_TEXTURE_1D_ARRAY) { tex->image_height = tex->image_width >> 14; } + + tex->image_width &= (1 << 14) - 1; + tex->image_height &= (1 << 14) - 1; #endif if (prsc->target == PIPE_TEXTURE_3D) {