It had been reduced to one fixed stage.
lp_prim_setup.c \
lp_prim_vbuf.c \
lp_setup.c \
- lp_quad_pipe.c \
- lp_quad_fs.c \
lp_query.c \
lp_screen.c \
lp_state_blend.c \
'lp_prim_setup.c',
'lp_prim_vbuf.c',
'lp_setup.c',
- 'lp_quad_pipe.c',
- 'lp_quad_fs.c',
'lp_query.c',
'lp_screen.c',
'lp_state_blend.c',
void
llvmpipe_map_transfers(struct llvmpipe_context *lp)
{
+ struct pipe_screen *screen = lp->pipe.screen;
+ struct pipe_surface *zsbuf = lp->framebuffer.zsbuf;
unsigned i;
for (i = 0; i < lp->framebuffer.nr_cbufs; i++) {
lp_tile_cache_map_transfers(lp->cbuf_cache[i]);
}
+
+ if(zsbuf) {
+ if(!lp->zsbuf_transfer)
+ lp->zsbuf_transfer = screen->get_tex_transfer(screen, zsbuf->texture,
+ zsbuf->face, zsbuf->level, zsbuf->zslice,
+ PIPE_TRANSFER_READ_WRITE,
+ 0, 0, zsbuf->width, zsbuf->height);
+ if(lp->zsbuf_transfer && !lp->zsbuf_map)
+ lp->zsbuf_map = screen->transfer_map(screen, lp->zsbuf_transfer);
+
+ }
}
for (i = 0; i < lp->framebuffer.nr_cbufs; i++) {
lp_tile_cache_unmap_transfers(lp->cbuf_cache[i]);
}
+
+ if(lp->zsbuf_transfer) {
+ struct pipe_screen *screen = lp->pipe.screen;
+
+ if(lp->zsbuf_map) {
+ screen->transfer_unmap(screen, lp->zsbuf_transfer);
+ lp->zsbuf_map = NULL;
+ }
+ }
}
if (llvmpipe->draw)
draw_destroy( llvmpipe->draw );
- llvmpipe->quad.shade->destroy( llvmpipe->quad.shade );
-
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
lp_destroy_tile_cache(llvmpipe->cbuf_cache[i]);
/*
* Alloc caches for accessing drawing surfaces and textures.
- * Must be before quad stage setup!
*/
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
llvmpipe->cbuf_cache[i] = lp_create_tile_cache( screen );
llvmpipe->tex_cache[i] = lp_create_tex_tile_cache( screen );
- /* setup quad rendering stages */
- llvmpipe->quad.shade = lp_quad_shade_stage(llvmpipe);
-
/* vertex shader samplers */
for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
llvmpipe->tgsi.vert_samplers[i].base.get_samples = lp_get_samples;
#include "draw/draw_vertex.h"
-#include "lp_quad_pipe.h"
#include "lp_tex_sample.h"
unsigned line_stipple_counter;
- /** Software quad rendering pipeline */
- struct {
- struct quad_stage *shade;
-
- struct quad_stage *first; /**< points to one of the above stages */
- } quad;
-
/** TGSI exec things */
struct {
struct lp_shader_sampler vert_samplers[PIPE_MAX_SAMPLERS];
struct llvmpipe_tile_cache *cbuf_cache[PIPE_MAX_COLOR_BUFS];
+ /* TODO: we shouldn't be using external interfaces internally like this */
+ struct pipe_transfer *zsbuf_transfer;
+ uint8_t *zsbuf_map;
+
unsigned tex_timestamp;
struct llvmpipe_tex_tile_cache *tex_cache[PIPE_MAX_SAMPLERS];
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc.
- * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/* Vertices are just an array of floats, with all the attributes
- * packed. We currently assume a layout like:
- *
- * attr[0][0..3] - window position
- * attr[1..n][0..3] - remaining attributes.
- *
- * Attributes are assumed to be 4 floats wide but are packed so that
- * all the enabled attributes run contiguously.
- */
-
-#include "util/u_math.h"
-#include "util/u_memory.h"
-#include "pipe/p_defines.h"
-#include "pipe/p_screen.h"
-
-#include "lp_context.h"
-#include "lp_state.h"
-#include "lp_quad.h"
-#include "lp_quad_pipe.h"
-#include "lp_texture.h"
-#include "lp_tile_cache.h"
-#include "lp_tile_soa.h"
-
-
-struct quad_shade_stage
-{
- struct quad_stage stage; /**< base class */
-
- struct pipe_transfer *transfer;
- uint8_t *map;
-};
-
-
-/** cast wrapper */
-static INLINE struct quad_shade_stage *
-quad_shade_stage(struct quad_stage *qs)
-{
- return (struct quad_shade_stage *) qs;
-}
-
-
-
-/**
- * Execute fragment shader for the four fragments in the quad.
- */
-static void
-shade_quads(struct quad_stage *qs,
- struct quad_header *quads[],
- unsigned nr)
-{
- struct quad_shade_stage *qss = quad_shade_stage( qs );
- struct llvmpipe_context *llvmpipe = qs->llvmpipe;
- struct lp_fragment_shader *fs = llvmpipe->fs;
- void *constants;
- struct tgsi_sampler **samplers;
- struct quad_header *quad = quads[0];
- const unsigned x = quad->input.x0;
- const unsigned y = quad->input.y0;
- uint8_t *tile = lp_get_cached_tile(llvmpipe->cbuf_cache[0], x, y);
- uint8_t *color;
- void *depth;
- uint32_t ALIGN16_ATTRIB mask[4][NUM_CHANNELS];
- unsigned chan_index;
- unsigned q;
-
- assert(fs->current);
- if(!fs->current)
- return;
-
- /* Sanity checks */
- assert(nr * QUAD_SIZE == TILE_VECTOR_HEIGHT * TILE_VECTOR_WIDTH);
- assert(x % TILE_VECTOR_WIDTH == 0);
- assert(y % TILE_VECTOR_HEIGHT == 0);
- for (q = 0; q < nr; ++q) {
- assert(quads[q]->input.x0 == x + q*2);
- assert(quads[q]->input.y0 == y);
- }
-
- /* mask */
- for (q = 0; q < 4; ++q)
- for (chan_index = 0; chan_index < NUM_CHANNELS; ++chan_index)
- mask[q][chan_index] = quads[q]->inout.mask & (1 << chan_index) ? ~0 : 0;
-
- /* color buffer */
- color = &TILE_PIXEL(tile, x & (TILE_SIZE-1), y & (TILE_SIZE-1), 0);
-
- /* depth buffer */
- if(qss->map) {
- assert((x % 2) == 0);
- assert((y % 2) == 0);
- depth = qss->map +
- y*qss->transfer->stride +
- 2*x*qss->transfer->block.size;
- }
- else
- depth = NULL;
-
- constants = llvmpipe->mapped_constants[PIPE_SHADER_FRAGMENT];
- samplers = (struct tgsi_sampler **)llvmpipe->tgsi.frag_samplers_list;
- /* TODO: blend color */
-
- assert((((uintptr_t)mask) & 0xf) == 0);
- assert((((uintptr_t)depth) & 0xf) == 0);
- assert((((uintptr_t)color) & 0xf) == 0);
- assert((((uintptr_t)llvmpipe->blend_color) & 0xf) == 0);
-
- /* run shader */
- fs->current->jit_function( x,
- y,
- quad->coef->a0,
- quad->coef->dadx,
- quad->coef->dady,
- constants,
- &mask[0][0],
- color,
- depth,
- samplers);
-}
-
-
-
-/**
- * Per-primitive (or per-begin?) setup
- */
-static void
-shade_begin(struct quad_stage *qs)
-{
- struct quad_shade_stage *qss = quad_shade_stage( qs );
- struct llvmpipe_context *llvmpipe = qs->llvmpipe;
- struct pipe_screen *screen = llvmpipe->pipe.screen;
- struct pipe_surface *zsbuf = llvmpipe->framebuffer.zsbuf;
-
- if(qss->transfer) {
- if(qss->map) {
- screen->transfer_unmap(screen, qss->transfer);
- qss->map = NULL;
- }
-
- screen->tex_transfer_destroy(qss->transfer);
- qss->transfer = NULL;
- }
-
- if(zsbuf) {
- qss->transfer = screen->get_tex_transfer(screen, zsbuf->texture,
- zsbuf->face, zsbuf->level, zsbuf->zslice,
- PIPE_TRANSFER_READ_WRITE,
- 0, 0, zsbuf->width, zsbuf->height);
- if(qss->transfer)
- qss->map = screen->transfer_map(screen, qss->transfer);
-
- }
-
-}
-
-
-static void
-shade_destroy(struct quad_stage *qs)
-{
- struct quad_shade_stage *qss = quad_shade_stage( qs );
- struct llvmpipe_context *llvmpipe = qs->llvmpipe;
- struct pipe_screen *screen = llvmpipe->pipe.screen;
-
- if(qss->transfer) {
- if(qss->map) {
- screen->transfer_unmap(screen, qss->transfer);
- qss->map = NULL;
- }
-
- screen->tex_transfer_destroy(qss->transfer);
- qss->transfer = NULL;
- }
-
- align_free( qs );
-}
-
-
-struct quad_stage *
-lp_quad_shade_stage( struct llvmpipe_context *llvmpipe )
-{
- struct quad_shade_stage *qss;
-
- qss = align_malloc(sizeof(struct quad_shade_stage), 16);
- if (!qss)
- return NULL;
-
- memset(qss, 0, sizeof *qss);
-
- qss->stage.llvmpipe = llvmpipe;
- qss->stage.begin = shade_begin;
- qss->stage.run = shade_quads;
- qss->stage.destroy = shade_destroy;
-
- return &qss->stage;
-}
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-#include "lp_context.h"
-#include "lp_state.h"
-#include "pipe/p_shader_tokens.h"
-
-void
-lp_build_quad_pipeline(struct llvmpipe_context *lp)
-{
- lp->quad.first = lp->quad.shade;
-}
-
+++ /dev/null
-/**************************************************************************
- *
- * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/* Authors: Keith Whitwell <keith@tungstengraphics.com>
- */
-
-#ifndef LP_QUAD_PIPE_H
-#define LP_QUAD_PIPE_H
-
-
-struct llvmpipe_context;
-struct quad_header;
-
-
-/**
- * Fragment processing is performed on 2x2 blocks of pixels called "quads".
- * Quad processing is performed with a pipeline of stages represented by
- * this type.
- */
-struct quad_stage {
- struct llvmpipe_context *llvmpipe;
-
- struct quad_stage *next;
-
- void (*begin)(struct quad_stage *qs);
-
- /** the stage action */
- void (*run)(struct quad_stage *qs, struct quad_header *quad[], unsigned nr);
-
- void (*destroy)(struct quad_stage *qs);
-};
-
-
-struct quad_stage *lp_quad_shade_stage( struct llvmpipe_context *llvmpipe );
-
-void lp_build_quad_pipeline(struct llvmpipe_context *lp);
-
-#endif /* LP_QUAD_PIPE_H */
#include "lp_context.h"
#include "lp_prim_setup.h"
#include "lp_quad.h"
-#include "lp_quad_pipe.h"
#include "lp_setup.h"
#include "lp_state.h"
#include "draw/draw_context.h"
#include "pipe/p_thread.h"
#include "util/u_math.h"
#include "util/u_memory.h"
+#include "lp_tile_cache.h"
#include "lp_tile_soa.h"
+/**
+ * Execute fragment shader for the four fragments in the quad.
+ */
+static void
+shade_quads(struct llvmpipe_context *llvmpipe,
+ struct quad_header *quads[],
+ unsigned nr)
+{
+ struct lp_fragment_shader *fs = llvmpipe->fs;
+ void *constants;
+ struct tgsi_sampler **samplers;
+ struct quad_header *quad = quads[0];
+ const unsigned x = quad->input.x0;
+ const unsigned y = quad->input.y0;
+ uint8_t *tile = lp_get_cached_tile(llvmpipe->cbuf_cache[0], x, y);
+ uint8_t *color;
+ void *depth;
+ uint32_t ALIGN16_ATTRIB mask[4][NUM_CHANNELS];
+ unsigned chan_index;
+ unsigned q;
+
+ assert(fs->current);
+ if(!fs->current)
+ return;
+
+ /* Sanity checks */
+ assert(nr * QUAD_SIZE == TILE_VECTOR_HEIGHT * TILE_VECTOR_WIDTH);
+ assert(x % TILE_VECTOR_WIDTH == 0);
+ assert(y % TILE_VECTOR_HEIGHT == 0);
+ for (q = 0; q < nr; ++q) {
+ assert(quads[q]->input.x0 == x + q*2);
+ assert(quads[q]->input.y0 == y);
+ }
+
+ /* mask */
+ for (q = 0; q < 4; ++q)
+ for (chan_index = 0; chan_index < NUM_CHANNELS; ++chan_index)
+ mask[q][chan_index] = quads[q]->inout.mask & (1 << chan_index) ? ~0 : 0;
+
+ /* color buffer */
+ color = &TILE_PIXEL(tile, x & (TILE_SIZE-1), y & (TILE_SIZE-1), 0);
+
+ /* depth buffer */
+ if(llvmpipe->zsbuf_map) {
+ assert((x % 2) == 0);
+ assert((y % 2) == 0);
+ depth = llvmpipe->zsbuf_map +
+ y*llvmpipe->zsbuf_transfer->stride +
+ 2*x*llvmpipe->zsbuf_transfer->block.size;
+ }
+ else
+ depth = NULL;
+
+ constants = llvmpipe->mapped_constants[PIPE_SHADER_FRAGMENT];
+ samplers = (struct tgsi_sampler **)llvmpipe->tgsi.frag_samplers_list;
+ /* TODO: blend color */
+
+ assert((((uintptr_t)mask) & 0xf) == 0);
+ assert((((uintptr_t)depth) & 0xf) == 0);
+ assert((((uintptr_t)color) & 0xf) == 0);
+ assert((((uintptr_t)llvmpipe->blend_color) & 0xf) == 0);
+
+ /* run shader */
+ fs->current->jit_function( x,
+ y,
+ quad->coef->a0,
+ quad->coef->dadx,
+ quad->coef->dady,
+ constants,
+ &mask[0][0],
+ color,
+ depth,
+ samplers);
+}
+
+
+
/**
* Do triangle cull test using tri determinant (sign indicates orientation)
quad_ptrs[i] = &quads[i];
}
- lp->quad.first->run( lp->quad.first, quad_ptrs, nr_quads );
+ shade_quads( lp, quad_ptrs, nr_quads );
#else
- lp->quad.first->run( lp->quad.first, &quad, 1 );
+ shade_quads( lp, &quad, 1 );
#endif
}
}
const int xleft1 = setup->span.left[1];
const int xright0 = setup->span.right[0];
const int xright1 = setup->span.right[1];
- struct quad_stage *pipe = setup->llvmpipe->quad.first;
int minleft = block_x(MIN2(xleft0, xleft1));
}
assert(!(mask0 | mask1));
- pipe->run( pipe, setup->quad_ptrs, nr_quads );
+ shade_quads(setup->llvmpipe, setup->quad_ptrs, nr_quads );
}
}
llvmpipe_update_derived(lp);
}
- lp->quad.first->begin( lp->quad.first );
-
if (lp->reduced_api_prim == PIPE_PRIM_TRIANGLES &&
lp->rasterizer->fill_cw == PIPE_POLYGON_MODE_FILL &&
lp->rasterizer->fill_ccw == PIPE_POLYGON_MODE_FILL) {
llvmpipe_update_fs( llvmpipe );
- if (llvmpipe->dirty & (LP_NEW_FRAMEBUFFER |
- LP_NEW_FS))
- lp_build_quad_pipeline(llvmpipe);
-
llvmpipe->dirty = 0;
}
/* zbuf changing? */
if (lp->framebuffer.zsbuf != fb->zsbuf) {
+
+ if(lp->zsbuf_transfer) {
+ struct pipe_screen *screen = pipe->screen;
+
+ if(lp->zsbuf_map) {
+ screen->transfer_unmap(screen, lp->zsbuf_transfer);
+ lp->zsbuf_map = NULL;
+ }
+
+ screen->tex_transfer_destroy(lp->zsbuf_transfer);
+ lp->zsbuf_transfer = NULL;
+ }
+
/* assign new */
lp->framebuffer.zsbuf = fb->zsbuf;