1 /**************************************************************************
3 * Copyright 2008-2009 VMware, Inc.
4 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 /* Vertices are just an array of floats, with all the attributes
30 * packed. We currently assume a layout like:
32 * attr[0][0..3] - window position
33 * attr[1..n][0..3] - remaining attributes.
35 * Attributes are assumed to be 4 floats wide but are packed so that
36 * all the enabled attributes run contiguously.
39 #include "util/u_math.h"
40 #include "util/u_memory.h"
41 #include "pipe/p_defines.h"
42 #include "pipe/p_shader_tokens.h"
44 #include "lp_context.h"
47 #include "lp_quad_pipe.h"
48 #include "lp_texture.h"
49 #include "lp_tex_sample.h"
52 struct quad_shade_stage
54 struct quad_stage stage
; /**< base class */
56 union tgsi_exec_channel ALIGN16_ATTRIB pos
[NUM_CHANNELS
];
58 struct tgsi_exec_vector ALIGN16_ATTRIB outputs
[PIPE_MAX_ATTRIBS
];
63 static INLINE
struct quad_shade_stage
*
64 quad_shade_stage(struct quad_stage
*qs
)
66 return (struct quad_shade_stage
*) qs
;
71 setup_pos_vector(struct quad_shade_stage
*qss
,
72 const struct tgsi_interp_coef
*coef
,
79 qss
->pos
[0].f
[1] = x
+ 1;
81 qss
->pos
[0].f
[3] = x
+ 1;
86 qss
->pos
[1].f
[2] = y
+ 1;
87 qss
->pos
[1].f
[3] = y
+ 1;
89 /* do Z and W for all fragments in the quad */
90 for (chan
= 2; chan
< 4; chan
++) {
91 const float dadx
= coef
->dadx
[chan
];
92 const float dady
= coef
->dady
[chan
];
93 const float a0
= coef
->a0
[chan
] + dadx
* x
+ dady
* y
;
94 qss
->pos
[chan
].f
[0] = a0
;
95 qss
->pos
[chan
].f
[1] = a0
+ dadx
;
96 qss
->pos
[chan
].f
[2] = a0
+ dady
;
97 qss
->pos
[chan
].f
[3] = a0
+ dadx
+ dady
;
103 * Execute fragment shader for the four fragments in the quad.
106 shade_quad(struct quad_stage
*qs
, struct quad_header
*quad
)
108 struct quad_shade_stage
*qss
= quad_shade_stage( qs
);
109 struct llvmpipe_context
*llvmpipe
= qs
->llvmpipe
;
111 struct tgsi_sampler
**samplers
;
114 /* Compute X, Y, Z, W vals for this quad */
115 setup_pos_vector(qss
,
117 (float)quad
->input
.x0
, (float)quad
->input
.y0
);
120 constants
= llvmpipe
->mapped_constants
[PIPE_SHADER_FRAGMENT
];
121 samplers
= (struct tgsi_sampler
**)llvmpipe
->tgsi
.frag_samplers_list
;
124 llvmpipe
->fs
->jit_function( qss
->pos
,
134 quad
->inout
.mask
&= ... ;
135 if (quad
->inout
.mask
== 0)
142 const ubyte
*sem_name
= llvmpipe
->fs
->info
.output_semantic_name
;
143 const ubyte
*sem_index
= llvmpipe
->fs
->info
.output_semantic_index
;
144 const uint n
= qss
->stage
.llvmpipe
->fs
->info
.num_outputs
;
146 for (i
= 0; i
< n
; i
++) {
147 switch (sem_name
[i
]) {
148 case TGSI_SEMANTIC_COLOR
:
150 uint cbuf
= sem_index
[i
];
151 memcpy(quad
->output
.color
[cbuf
],
152 &qss
->outputs
[i
].xyzw
[0].f
[0],
153 sizeof(quad
->output
.color
[0]) );
156 case TGSI_SEMANTIC_POSITION
:
159 for (j
= 0; j
< 4; j
++) {
160 quad
->output
.depth
[j
] = qss
->outputs
[0].xyzw
[2].f
[j
];
175 coverage_quad(struct quad_stage
*qs
, struct quad_header
*quad
)
177 struct llvmpipe_context
*llvmpipe
= qs
->llvmpipe
;
180 /* loop over colorbuffer outputs */
181 for (cbuf
= 0; cbuf
< llvmpipe
->framebuffer
.nr_cbufs
; cbuf
++) {
182 float (*quadColor
)[4] = quad
->output
.color
[cbuf
];
184 for (j
= 0; j
< QUAD_SIZE
; j
++) {
185 assert(quad
->input
.coverage
[j
] >= 0.0);
186 assert(quad
->input
.coverage
[j
] <= 1.0);
187 quadColor
[3][j
] *= quad
->input
.coverage
[j
];
195 shade_quads(struct quad_stage
*qs
,
196 struct quad_header
*quads
[],
199 struct quad_shade_stage
*qss
= quad_shade_stage( qs
);
200 unsigned i
, pass
= 0;
202 for (i
= 0; i
< nr
; i
++) {
203 if (!shade_quad(qs
, quads
[i
]))
206 if (/*do_coverage*/ 0)
207 coverage_quad( qs
, quads
[i
] );
209 quads
[pass
++] = quads
[i
];
213 qs
->next
->run(qs
->next
, quads
, pass
);
221 * Per-primitive (or per-begin?) setup
224 shade_begin(struct quad_stage
*qs
)
226 qs
->next
->begin(qs
->next
);
231 shade_destroy(struct quad_stage
*qs
)
238 lp_quad_shade_stage( struct llvmpipe_context
*llvmpipe
)
240 struct quad_shade_stage
*qss
;
242 qss
= CALLOC_STRUCT(quad_shade_stage
);
246 qss
->stage
.llvmpipe
= llvmpipe
;
247 qss
->stage
.begin
= shade_begin
;
248 qss
->stage
.run
= shade_quads
;
249 qss
->stage
.destroy
= shade_destroy
;