if (!can_do_blit(info))
                return false;
 
-       fd_fence_ref(ctx->base.screen, &ctx->last_fence, NULL);
+       fd_fence_ref(&ctx->last_fence, NULL);
 
        batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
 
 
        /* in case batch wasn't flushed but fence was created: */
        fd_fence_populate(batch->fence, 0, -1);
 
-       fd_fence_ref(NULL, &batch->fence, NULL);
+       fd_fence_ref(&batch->fence, NULL);
 
        fd_ringbuffer_del(batch->draw);
        if (!batch->nondraw) {
 
 fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond, bool discard,
                enum fd_render_stage stage)
 {
-       fd_fence_ref(ctx->base.screen, &ctx->last_fence, NULL);
+       fd_fence_ref(&ctx->last_fence, NULL);
 
        util_blitter_save_fragment_constant_buffer_slot(ctx->blitter,
                        ctx->constbuf[PIPE_SHADER_FRAGMENT].cb);
 
         * a fence, re-use the last one:
         */
        if (ctx->last_fence) {
-               fd_fence_ref(pctx->screen, &fence, ctx->last_fence);
+               fd_fence_ref(&fence, ctx->last_fence);
                goto out;
        }
 
                return;
 
        /* Take a ref to the batch's fence (batch can be unref'd when flushed: */
-       fd_fence_ref(pctx->screen, &fence, batch->fence);
+       fd_fence_ref(&fence, batch->fence);
 
        /* TODO is it worth trying to figure out if app is using fence-fd's, to
         * avoid requesting one every batch?
 
 out:
        if (fencep)
-               fd_fence_ref(pctx->screen, fencep, fence);
+               fd_fence_ref(fencep, fence);
 
-       fd_fence_ref(pctx->screen, &ctx->last_fence, fence);
+       fd_fence_ref(&ctx->last_fence, fence);
 
-       fd_fence_ref(pctx->screen, &fence, NULL);
+       fd_fence_ref(&fence, NULL);
 }
 
 static void
 
        DBG("");
 
-       fd_fence_ref(pctx->screen, &ctx->last_fence, NULL);
+       fd_fence_ref(&ctx->last_fence, NULL);
 
        if (ctx->screen->reorder && util_queue_is_initialized(&ctx->flush_queue))
                util_queue_destroy(&ctx->flush_queue);
 
                return;
        }
 
-       fd_fence_ref(pctx->screen, &ctx->last_fence, NULL);
+       fd_fence_ref(&ctx->last_fence, NULL);
 
        /* Upload a user index buffer. */
        struct pipe_resource *indexbuf = NULL;
        if (!fd_render_condition_check(pctx))
                return;
 
-       fd_fence_ref(pctx->screen, &ctx->last_fence, NULL);
+       fd_fence_ref(&ctx->last_fence, NULL);
 
        if (ctx->in_blit) {
                fd_batch_reset(batch);
 
        FREE(fence);
 }
 
-void fd_fence_ref(struct pipe_screen *pscreen,
-               struct pipe_fence_handle **ptr,
+void fd_fence_ref(struct pipe_fence_handle **ptr,
                struct pipe_fence_handle *pfence)
 {
        if (pipe_reference(&(*ptr)->reference, &pfence->reference))
 
 
 void fd_fence_populate(struct pipe_fence_handle *fence,
                uint32_t timestamp, int fence_fd);
-void fd_fence_ref(struct pipe_screen *pscreen,
-               struct pipe_fence_handle **ptr,
+void fd_fence_ref(struct pipe_fence_handle **ptr,
                struct pipe_fence_handle *pfence);
-bool fd_fence_finish(struct pipe_screen *screen,
+bool fd_fence_finish(struct pipe_screen *pscreen,
                struct pipe_context *ctx,
                struct pipe_fence_handle *pfence,
                uint64_t timeout);
 
        return bo;
 }
 
+static void _fd_fence_ref(struct pipe_screen *pscreen,
+               struct pipe_fence_handle **ptr,
+               struct pipe_fence_handle *pfence)
+{
+       fd_fence_ref(ptr, pfence);
+}
+
 struct pipe_screen *
 fd_screen_create(struct fd_device *dev, struct renderonly *ro)
 {
 
        pscreen->get_timestamp = fd_screen_get_timestamp;
 
-       pscreen->fence_reference = fd_fence_ref;
+       pscreen->fence_reference = _fd_fence_ref;
        pscreen->fence_finish = fd_fence_finish;
        pscreen->fence_get_fd = fd_fence_get_fd;