gallium: added draw_need_pipeline() predicate function
[mesa.git] / src / gallium / auxiliary / draw / draw_vf.c
1 /*
2 * Copyright 2003 Tungsten Graphics, inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Keith Whitwell <keithw@tungstengraphics.com>
26 */
27
28
29 #include <stddef.h>
30
31 #include "pipe/p_compiler.h"
32 #include "pipe/p_util.h"
33 #include "rtasm/rtasm_execmem.h"
34
35 #include "draw_vf.h"
36
37
38 #define DRAW_VF_DBG 0
39
40
41 static boolean match_fastpath( struct draw_vertex_fetch *vf,
42 const struct draw_vf_fastpath *fp)
43 {
44 unsigned j;
45
46 if (vf->attr_count != fp->attr_count)
47 return FALSE;
48
49 for (j = 0; j < vf->attr_count; j++)
50 if (vf->attr[j].format != fp->attr[j].format ||
51 vf->attr[j].inputsize != fp->attr[j].size ||
52 vf->attr[j].vertoffset != fp->attr[j].offset)
53 return FALSE;
54
55 if (fp->match_strides) {
56 if (vf->vertex_stride != fp->vertex_stride)
57 return FALSE;
58
59 for (j = 0; j < vf->attr_count; j++)
60 if (vf->attr[j].inputstride != fp->attr[j].stride)
61 return FALSE;
62 }
63
64 return TRUE;
65 }
66
67 static boolean search_fastpath_emit( struct draw_vertex_fetch *vf )
68 {
69 struct draw_vf_fastpath *fp = vf->fastpath;
70
71 for ( ; fp ; fp = fp->next) {
72 if (match_fastpath(vf, fp)) {
73 vf->emit = fp->func;
74 return TRUE;
75 }
76 }
77
78 return FALSE;
79 }
80
81 void draw_vf_register_fastpath( struct draw_vertex_fetch *vf,
82 boolean match_strides )
83 {
84 struct draw_vf_fastpath *fastpath = CALLOC_STRUCT(draw_vf_fastpath);
85 unsigned i;
86
87 fastpath->vertex_stride = vf->vertex_stride;
88 fastpath->attr_count = vf->attr_count;
89 fastpath->match_strides = match_strides;
90 fastpath->func = vf->emit;
91 fastpath->attr = (struct draw_vf_attr_type *)
92 MALLOC(vf->attr_count * sizeof(fastpath->attr[0]));
93
94 for (i = 0; i < vf->attr_count; i++) {
95 fastpath->attr[i].format = vf->attr[i].format;
96 fastpath->attr[i].stride = vf->attr[i].inputstride;
97 fastpath->attr[i].size = vf->attr[i].inputsize;
98 fastpath->attr[i].offset = vf->attr[i].vertoffset;
99 }
100
101 fastpath->next = vf->fastpath;
102 vf->fastpath = fastpath;
103 }
104
105
106
107
108 /***********************************************************************
109 * Build codegen functions or return generic ones:
110 */
111 static void choose_emit_func( struct draw_vertex_fetch *vf,
112 unsigned count,
113 uint8_t *dest)
114 {
115 vf->emit = NULL;
116
117 /* Does this match an existing (hardwired, codegen or known-bad)
118 * fastpath?
119 */
120 if (search_fastpath_emit(vf)) {
121 /* Use this result. If it is null, then it is already known
122 * that the current state will fail for codegen and there is no
123 * point trying again.
124 */
125 }
126 else if (vf->codegen_emit) {
127 vf->codegen_emit( vf );
128 }
129
130 if (!vf->emit) {
131 draw_vf_generate_hardwired_emit(vf);
132 }
133
134 /* Otherwise use the generic version:
135 */
136 if (!vf->emit)
137 vf->emit = draw_vf_generic_emit;
138
139 vf->emit( vf, count, dest );
140 }
141
142
143
144
145
146 /***********************************************************************
147 * Public entrypoints, mostly dispatch to the above:
148 */
149
150
151
152 static unsigned
153 draw_vf_set_vertex_attributes( struct draw_vertex_fetch *vf,
154 const struct draw_vf_attr_map *map,
155 unsigned nr,
156 unsigned vertex_stride )
157 {
158 unsigned offset = 0;
159 unsigned i, j;
160
161 assert(nr < PIPE_ATTRIB_MAX);
162
163 for (j = 0, i = 0; i < nr; i++) {
164 const unsigned format = map[i].format;
165 if (format == DRAW_EMIT_PAD) {
166 #if (DRAW_VF_DBG)
167 debug_printf("%d: pad %d, offset %d\n", i,
168 map[i].offset, offset);
169 #endif
170
171 offset += map[i].offset;
172
173 }
174 else {
175 vf->attr[j].attrib = map[i].attrib;
176 vf->attr[j].format = format;
177 vf->attr[j].insert = draw_vf_format_info[format].insert;
178 vf->attr[j].vertattrsize = draw_vf_format_info[format].attrsize;
179 vf->attr[j].vertoffset = offset;
180 vf->attr[j].isconst = draw_vf_format_info[format].isconst;
181 if(vf->attr[j].isconst)
182 memcpy(vf->attr[j].data, &map[i].data, vf->attr[j].vertattrsize);
183
184 #if (DRAW_VF_DBG)
185 debug_printf("%d: %s, offset %d\n", i,
186 draw_vf_format_info[format].name,
187 vf->attr[j].vertoffset);
188 #endif
189
190 offset += draw_vf_format_info[format].attrsize;
191 j++;
192 }
193 }
194
195 vf->attr_count = j;
196 vf->vertex_stride = vertex_stride ? vertex_stride : offset;
197 vf->emit = choose_emit_func;
198
199 assert(vf->vertex_stride >= offset);
200 return vf->vertex_stride;
201 }
202
203
204 void draw_vf_set_vertex_info( struct draw_vertex_fetch *vf,
205 const struct vertex_info *vinfo,
206 float point_size )
207 {
208 unsigned i, j, k;
209 struct draw_vf_attr *a = vf->attr;
210 struct draw_vf_attr_map attrs[PIPE_MAX_SHADER_INPUTS];
211 unsigned count = 0; /* for debug/sanity */
212 unsigned nr_attrs = 0;
213
214 for (i = 0; i < vinfo->num_attribs; i++) {
215 j = vinfo->src_index[i];
216 switch (vinfo->emit[i]) {
217 case EMIT_OMIT:
218 /* no-op */
219 break;
220 case EMIT_ALL: {
221 /* just copy the whole vertex as-is to the vbuf */
222 unsigned s = vinfo->size;
223 assert(i == 0);
224 assert(j == 0);
225 /* copy the vertex header */
226 /* XXX: we actually don't copy the header, just pad it */
227 attrs[nr_attrs].attrib = 0;
228 attrs[nr_attrs].format = DRAW_EMIT_PAD;
229 attrs[nr_attrs].offset = offsetof(struct vertex_header, data);
230 s -= offsetof(struct vertex_header, data)/4;
231 count += offsetof(struct vertex_header, data)/4;
232 nr_attrs++;
233 /* copy the vertex data */
234 for(k = 0; k < (s & ~0x3); k += 4) {
235 attrs[nr_attrs].attrib = k/4;
236 attrs[nr_attrs].format = DRAW_EMIT_4F;
237 attrs[nr_attrs].offset = 0;
238 nr_attrs++;
239 count += 4;
240 }
241 /* tail */
242 /* XXX: actually, this shouldn't be needed */
243 attrs[nr_attrs].attrib = k/4;
244 attrs[nr_attrs].offset = 0;
245 switch(s & 0x3) {
246 case 0:
247 break;
248 case 1:
249 attrs[nr_attrs].format = DRAW_EMIT_1F;
250 nr_attrs++;
251 count += 1;
252 break;
253 case 2:
254 attrs[nr_attrs].format = DRAW_EMIT_2F;
255 nr_attrs++;
256 count += 2;
257 break;
258 case 3:
259 attrs[nr_attrs].format = DRAW_EMIT_3F;
260 nr_attrs++;
261 count += 3;
262 break;
263 }
264 break;
265 }
266 case EMIT_1F:
267 attrs[nr_attrs].attrib = j;
268 attrs[nr_attrs].format = DRAW_EMIT_1F;
269 attrs[nr_attrs].offset = 0;
270 nr_attrs++;
271 count++;
272 break;
273 case EMIT_1F_PSIZE:
274 attrs[nr_attrs].attrib = j;
275 attrs[nr_attrs].format = DRAW_EMIT_1F_CONST;
276 attrs[nr_attrs].offset = 0;
277 attrs[nr_attrs].data.f[0] = point_size;
278 nr_attrs++;
279 count++;
280 break;
281 case EMIT_2F:
282 attrs[nr_attrs].attrib = j;
283 attrs[nr_attrs].format = DRAW_EMIT_2F;
284 attrs[nr_attrs].offset = 0;
285 nr_attrs++;
286 count += 2;
287 break;
288 case EMIT_3F:
289 attrs[nr_attrs].attrib = j;
290 attrs[nr_attrs].format = DRAW_EMIT_3F;
291 attrs[nr_attrs].offset = 0;
292 nr_attrs++;
293 count += 3;
294 break;
295 case EMIT_4F:
296 attrs[nr_attrs].attrib = j;
297 attrs[nr_attrs].format = DRAW_EMIT_4F;
298 attrs[nr_attrs].offset = 0;
299 nr_attrs++;
300 count += 4;
301 break;
302 case EMIT_4UB:
303 attrs[nr_attrs].attrib = j;
304 attrs[nr_attrs].format = DRAW_EMIT_4UB_4F_BGRA;
305 attrs[nr_attrs].offset = 0;
306 nr_attrs++;
307 count += 1;
308 break;
309 default:
310 assert(0);
311 }
312 }
313
314 assert(count == vinfo->size);
315
316 draw_vf_set_vertex_attributes(vf,
317 attrs,
318 nr_attrs,
319 vinfo->size * sizeof(float) );
320
321 for (j = 0; j < vf->attr_count; j++) {
322 a[j].inputsize = 4;
323 a[j].do_insert = a[j].insert[4 - 1];
324 if(a[j].isconst) {
325 a[j].inputptr = a[j].data;
326 a[j].inputstride = 0;
327 }
328 }
329 }
330
331
332 #if 0
333 /* Set attribute pointers, adjusted for start position:
334 */
335 void draw_vf_set_sources( struct draw_vertex_fetch *vf,
336 GLvector4f * const sources[],
337 unsigned start )
338 {
339 struct draw_vf_attr *a = vf->attr;
340 unsigned j;
341
342 for (j = 0; j < vf->attr_count; j++) {
343 const GLvector4f *vptr = sources[a[j].attrib];
344
345 if ((a[j].inputstride != vptr->stride) ||
346 (a[j].inputsize != vptr->size))
347 vf->emit = choose_emit_func;
348
349 a[j].inputstride = vptr->stride;
350 a[j].inputsize = vptr->size;
351 a[j].do_insert = a[j].insert[vptr->size - 1];
352 a[j].inputptr = ((uint8_t *)vptr->data) + start * vptr->stride;
353 }
354 }
355 #endif
356
357
358 /**
359 * Emit a vertex to dest.
360 */
361 void draw_vf_emit_vertex( struct draw_vertex_fetch *vf,
362 struct vertex_header *vertex,
363 void *dest )
364 {
365 struct draw_vf_attr *a = vf->attr;
366 unsigned j;
367
368 for (j = 0; j < vf->attr_count; j++) {
369 if (!a[j].isconst) {
370 a[j].inputptr = (uint8_t *)&vertex->data[a[j].attrib][0];
371 a[j].inputstride = 0; /* XXX: one-vertex-max ATM */
372 }
373 }
374
375 vf->emit( vf, 1, (uint8_t*) dest );
376 }
377
378
379
380 struct draw_vertex_fetch *draw_vf_create( void )
381 {
382 struct draw_vertex_fetch *vf = CALLOC_STRUCT(draw_vertex_fetch);
383 unsigned i;
384
385 for (i = 0; i < PIPE_ATTRIB_MAX; i++)
386 vf->attr[i].vf = vf;
387
388 vf->identity[0] = 0.0;
389 vf->identity[1] = 0.0;
390 vf->identity[2] = 0.0;
391 vf->identity[3] = 1.0;
392
393 vf->codegen_emit = NULL;
394
395 #ifdef USE_SSE_ASM
396 if (!GETENV("GALLIUM_NO_CODEGEN"))
397 vf->codegen_emit = draw_vf_generate_sse_emit;
398 #endif
399
400 return vf;
401 }
402
403
404 void draw_vf_destroy( struct draw_vertex_fetch *vf )
405 {
406 struct draw_vf_fastpath *fp, *tmp;
407
408 for (fp = vf->fastpath ; fp ; fp = tmp) {
409 tmp = fp->next;
410 FREE(fp->attr);
411
412 /* KW: At the moment, fp->func is constrained to be allocated by
413 * rtasm_exec_alloc(), as the hardwired fastpaths in
414 * t_vertex_generic.c are handled specially. It would be nice
415 * to unify them, but this probably won't change until this
416 * module gets another overhaul.
417 */
418 //rtasm_exec_free((void *) fp->func);
419 FREE(fp);
420 }
421
422 vf->fastpath = NULL;
423 FREE(vf);
424 }