1 /**************************************************************************
3 * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Keith Whitwell <keith@tungstengraphics.com>
33 #include "util/u_memory.h"
34 #include "util/u_format.h"
35 #include "util/u_math.h"
36 #include "pipe/p_state.h"
37 #include "translate.h"
42 typedef void (*fetch_func
)(float *dst
,
44 unsigned i
, unsigned j
);
45 typedef void (*emit_func
)(const float *attrib
, void *ptr
);
49 struct translate_generic
{
50 struct translate translate
;
53 enum translate_element_type type
;
57 unsigned input_offset
;
58 unsigned instance_divisor
;
61 unsigned output_offset
;
63 const uint8_t *input_ptr
;
64 unsigned input_stride
;
67 } attrib
[PIPE_MAX_ATTRIBS
];
73 static struct translate_generic
*translate_generic( struct translate
*translate
)
75 return (struct translate_generic
*)translate
;
79 * Fetch a float[4] vertex attribute from memory, doing format/type
80 * conversion as needed.
82 * This is probably needed/dupliocated elsewhere, eg format
83 * conversion, texture sampling etc.
85 #define ATTRIB( NAME, SZ, TYPE, TO ) \
87 emit_##NAME(const float *attrib, void *ptr) \
90 TYPE *out = (TYPE *)ptr; \
92 for (i = 0; i < SZ; i++) { \
93 out[i] = TO(attrib[i]); \
98 #define TO_64_FLOAT(x) ((double) x)
99 #define TO_32_FLOAT(x) (x)
101 #define TO_8_USCALED(x) ((unsigned char) x)
102 #define TO_16_USCALED(x) ((unsigned short) x)
103 #define TO_32_USCALED(x) ((unsigned int) x)
105 #define TO_8_SSCALED(x) ((char) x)
106 #define TO_16_SSCALED(x) ((short) x)
107 #define TO_32_SSCALED(x) ((int) x)
109 #define TO_8_UNORM(x) ((unsigned char) (x * 255.0f))
110 #define TO_16_UNORM(x) ((unsigned short) (x * 65535.0f))
111 #define TO_32_UNORM(x) ((unsigned int) (x * 4294967295.0f))
113 #define TO_8_SNORM(x) ((char) (x * 127.0f))
114 #define TO_16_SNORM(x) ((short) (x * 32767.0f))
115 #define TO_32_SNORM(x) ((int) (x * 2147483647.0f))
117 #define TO_32_FIXED(x) ((int) (x * 65536.0f))
120 ATTRIB( R64G64B64A64_FLOAT
, 4, double, TO_64_FLOAT
)
121 ATTRIB( R64G64B64_FLOAT
, 3, double, TO_64_FLOAT
)
122 ATTRIB( R64G64_FLOAT
, 2, double, TO_64_FLOAT
)
123 ATTRIB( R64_FLOAT
, 1, double, TO_64_FLOAT
)
125 ATTRIB( R32G32B32A32_FLOAT
, 4, float, TO_32_FLOAT
)
126 ATTRIB( R32G32B32_FLOAT
, 3, float, TO_32_FLOAT
)
127 ATTRIB( R32G32_FLOAT
, 2, float, TO_32_FLOAT
)
128 ATTRIB( R32_FLOAT
, 1, float, TO_32_FLOAT
)
130 ATTRIB( R32G32B32A32_USCALED
, 4, unsigned, TO_32_USCALED
)
131 ATTRIB( R32G32B32_USCALED
, 3, unsigned, TO_32_USCALED
)
132 ATTRIB( R32G32_USCALED
, 2, unsigned, TO_32_USCALED
)
133 ATTRIB( R32_USCALED
, 1, unsigned, TO_32_USCALED
)
135 ATTRIB( R32G32B32A32_SSCALED
, 4, int, TO_32_SSCALED
)
136 ATTRIB( R32G32B32_SSCALED
, 3, int, TO_32_SSCALED
)
137 ATTRIB( R32G32_SSCALED
, 2, int, TO_32_SSCALED
)
138 ATTRIB( R32_SSCALED
, 1, int, TO_32_SSCALED
)
140 ATTRIB( R32G32B32A32_UNORM
, 4, unsigned, TO_32_UNORM
)
141 ATTRIB( R32G32B32_UNORM
, 3, unsigned, TO_32_UNORM
)
142 ATTRIB( R32G32_UNORM
, 2, unsigned, TO_32_UNORM
)
143 ATTRIB( R32_UNORM
, 1, unsigned, TO_32_UNORM
)
145 ATTRIB( R32G32B32A32_SNORM
, 4, int, TO_32_SNORM
)
146 ATTRIB( R32G32B32_SNORM
, 3, int, TO_32_SNORM
)
147 ATTRIB( R32G32_SNORM
, 2, int, TO_32_SNORM
)
148 ATTRIB( R32_SNORM
, 1, int, TO_32_SNORM
)
150 ATTRIB( R16G16B16A16_USCALED
, 4, ushort
, TO_16_USCALED
)
151 ATTRIB( R16G16B16_USCALED
, 3, ushort
, TO_16_USCALED
)
152 ATTRIB( R16G16_USCALED
, 2, ushort
, TO_16_USCALED
)
153 ATTRIB( R16_USCALED
, 1, ushort
, TO_16_USCALED
)
155 ATTRIB( R16G16B16A16_SSCALED
, 4, short, TO_16_SSCALED
)
156 ATTRIB( R16G16B16_SSCALED
, 3, short, TO_16_SSCALED
)
157 ATTRIB( R16G16_SSCALED
, 2, short, TO_16_SSCALED
)
158 ATTRIB( R16_SSCALED
, 1, short, TO_16_SSCALED
)
160 ATTRIB( R16G16B16A16_UNORM
, 4, ushort
, TO_16_UNORM
)
161 ATTRIB( R16G16B16_UNORM
, 3, ushort
, TO_16_UNORM
)
162 ATTRIB( R16G16_UNORM
, 2, ushort
, TO_16_UNORM
)
163 ATTRIB( R16_UNORM
, 1, ushort
, TO_16_UNORM
)
165 ATTRIB( R16G16B16A16_SNORM
, 4, short, TO_16_SNORM
)
166 ATTRIB( R16G16B16_SNORM
, 3, short, TO_16_SNORM
)
167 ATTRIB( R16G16_SNORM
, 2, short, TO_16_SNORM
)
168 ATTRIB( R16_SNORM
, 1, short, TO_16_SNORM
)
170 ATTRIB( R8G8B8A8_USCALED
, 4, ubyte
, TO_8_USCALED
)
171 ATTRIB( R8G8B8_USCALED
, 3, ubyte
, TO_8_USCALED
)
172 ATTRIB( R8G8_USCALED
, 2, ubyte
, TO_8_USCALED
)
173 ATTRIB( R8_USCALED
, 1, ubyte
, TO_8_USCALED
)
175 ATTRIB( R8G8B8A8_SSCALED
, 4, char, TO_8_SSCALED
)
176 ATTRIB( R8G8B8_SSCALED
, 3, char, TO_8_SSCALED
)
177 ATTRIB( R8G8_SSCALED
, 2, char, TO_8_SSCALED
)
178 ATTRIB( R8_SSCALED
, 1, char, TO_8_SSCALED
)
180 ATTRIB( R8G8B8A8_UNORM
, 4, ubyte
, TO_8_UNORM
)
181 ATTRIB( R8G8B8_UNORM
, 3, ubyte
, TO_8_UNORM
)
182 ATTRIB( R8G8_UNORM
, 2, ubyte
, TO_8_UNORM
)
183 ATTRIB( R8_UNORM
, 1, ubyte
, TO_8_UNORM
)
185 ATTRIB( R8G8B8A8_SNORM
, 4, char, TO_8_SNORM
)
186 ATTRIB( R8G8B8_SNORM
, 3, char, TO_8_SNORM
)
187 ATTRIB( R8G8_SNORM
, 2, char, TO_8_SNORM
)
188 ATTRIB( R8_SNORM
, 1, char, TO_8_SNORM
)
190 ATTRIB( A8R8G8B8_UNORM
, 4, ubyte
, TO_8_UNORM
)
191 /*ATTRIB( R8G8B8A8_UNORM, 4, ubyte, TO_8_UNORM )*/
195 emit_B8G8R8A8_UNORM( const float *attrib
, void *ptr
)
197 ubyte
*out
= (ubyte
*)ptr
;
198 out
[2] = TO_8_UNORM(attrib
[0]);
199 out
[1] = TO_8_UNORM(attrib
[1]);
200 out
[0] = TO_8_UNORM(attrib
[2]);
201 out
[3] = TO_8_UNORM(attrib
[3]);
205 emit_NULL( const float *attrib
, void *ptr
)
207 /* do nothing is the only sensible option */
210 static emit_func
get_emit_func( enum pipe_format format
)
213 case PIPE_FORMAT_R64_FLOAT
:
214 return &emit_R64_FLOAT
;
215 case PIPE_FORMAT_R64G64_FLOAT
:
216 return &emit_R64G64_FLOAT
;
217 case PIPE_FORMAT_R64G64B64_FLOAT
:
218 return &emit_R64G64B64_FLOAT
;
219 case PIPE_FORMAT_R64G64B64A64_FLOAT
:
220 return &emit_R64G64B64A64_FLOAT
;
222 case PIPE_FORMAT_R32_FLOAT
:
223 return &emit_R32_FLOAT
;
224 case PIPE_FORMAT_R32G32_FLOAT
:
225 return &emit_R32G32_FLOAT
;
226 case PIPE_FORMAT_R32G32B32_FLOAT
:
227 return &emit_R32G32B32_FLOAT
;
228 case PIPE_FORMAT_R32G32B32A32_FLOAT
:
229 return &emit_R32G32B32A32_FLOAT
;
231 case PIPE_FORMAT_R32_UNORM
:
232 return &emit_R32_UNORM
;
233 case PIPE_FORMAT_R32G32_UNORM
:
234 return &emit_R32G32_UNORM
;
235 case PIPE_FORMAT_R32G32B32_UNORM
:
236 return &emit_R32G32B32_UNORM
;
237 case PIPE_FORMAT_R32G32B32A32_UNORM
:
238 return &emit_R32G32B32A32_UNORM
;
240 case PIPE_FORMAT_R32_USCALED
:
241 return &emit_R32_USCALED
;
242 case PIPE_FORMAT_R32G32_USCALED
:
243 return &emit_R32G32_USCALED
;
244 case PIPE_FORMAT_R32G32B32_USCALED
:
245 return &emit_R32G32B32_USCALED
;
246 case PIPE_FORMAT_R32G32B32A32_USCALED
:
247 return &emit_R32G32B32A32_USCALED
;
249 case PIPE_FORMAT_R32_SNORM
:
250 return &emit_R32_SNORM
;
251 case PIPE_FORMAT_R32G32_SNORM
:
252 return &emit_R32G32_SNORM
;
253 case PIPE_FORMAT_R32G32B32_SNORM
:
254 return &emit_R32G32B32_SNORM
;
255 case PIPE_FORMAT_R32G32B32A32_SNORM
:
256 return &emit_R32G32B32A32_SNORM
;
258 case PIPE_FORMAT_R32_SSCALED
:
259 return &emit_R32_SSCALED
;
260 case PIPE_FORMAT_R32G32_SSCALED
:
261 return &emit_R32G32_SSCALED
;
262 case PIPE_FORMAT_R32G32B32_SSCALED
:
263 return &emit_R32G32B32_SSCALED
;
264 case PIPE_FORMAT_R32G32B32A32_SSCALED
:
265 return &emit_R32G32B32A32_SSCALED
;
267 case PIPE_FORMAT_R16_UNORM
:
268 return &emit_R16_UNORM
;
269 case PIPE_FORMAT_R16G16_UNORM
:
270 return &emit_R16G16_UNORM
;
271 case PIPE_FORMAT_R16G16B16_UNORM
:
272 return &emit_R16G16B16_UNORM
;
273 case PIPE_FORMAT_R16G16B16A16_UNORM
:
274 return &emit_R16G16B16A16_UNORM
;
276 case PIPE_FORMAT_R16_USCALED
:
277 return &emit_R16_USCALED
;
278 case PIPE_FORMAT_R16G16_USCALED
:
279 return &emit_R16G16_USCALED
;
280 case PIPE_FORMAT_R16G16B16_USCALED
:
281 return &emit_R16G16B16_USCALED
;
282 case PIPE_FORMAT_R16G16B16A16_USCALED
:
283 return &emit_R16G16B16A16_USCALED
;
285 case PIPE_FORMAT_R16_SNORM
:
286 return &emit_R16_SNORM
;
287 case PIPE_FORMAT_R16G16_SNORM
:
288 return &emit_R16G16_SNORM
;
289 case PIPE_FORMAT_R16G16B16_SNORM
:
290 return &emit_R16G16B16_SNORM
;
291 case PIPE_FORMAT_R16G16B16A16_SNORM
:
292 return &emit_R16G16B16A16_SNORM
;
294 case PIPE_FORMAT_R16_SSCALED
:
295 return &emit_R16_SSCALED
;
296 case PIPE_FORMAT_R16G16_SSCALED
:
297 return &emit_R16G16_SSCALED
;
298 case PIPE_FORMAT_R16G16B16_SSCALED
:
299 return &emit_R16G16B16_SSCALED
;
300 case PIPE_FORMAT_R16G16B16A16_SSCALED
:
301 return &emit_R16G16B16A16_SSCALED
;
303 case PIPE_FORMAT_R8_UNORM
:
304 return &emit_R8_UNORM
;
305 case PIPE_FORMAT_R8G8_UNORM
:
306 return &emit_R8G8_UNORM
;
307 case PIPE_FORMAT_R8G8B8_UNORM
:
308 return &emit_R8G8B8_UNORM
;
309 case PIPE_FORMAT_R8G8B8A8_UNORM
:
310 return &emit_R8G8B8A8_UNORM
;
312 case PIPE_FORMAT_R8_USCALED
:
313 return &emit_R8_USCALED
;
314 case PIPE_FORMAT_R8G8_USCALED
:
315 return &emit_R8G8_USCALED
;
316 case PIPE_FORMAT_R8G8B8_USCALED
:
317 return &emit_R8G8B8_USCALED
;
318 case PIPE_FORMAT_R8G8B8A8_USCALED
:
319 return &emit_R8G8B8A8_USCALED
;
321 case PIPE_FORMAT_R8_SNORM
:
322 return &emit_R8_SNORM
;
323 case PIPE_FORMAT_R8G8_SNORM
:
324 return &emit_R8G8_SNORM
;
325 case PIPE_FORMAT_R8G8B8_SNORM
:
326 return &emit_R8G8B8_SNORM
;
327 case PIPE_FORMAT_R8G8B8A8_SNORM
:
328 return &emit_R8G8B8A8_SNORM
;
330 case PIPE_FORMAT_R8_SSCALED
:
331 return &emit_R8_SSCALED
;
332 case PIPE_FORMAT_R8G8_SSCALED
:
333 return &emit_R8G8_SSCALED
;
334 case PIPE_FORMAT_R8G8B8_SSCALED
:
335 return &emit_R8G8B8_SSCALED
;
336 case PIPE_FORMAT_R8G8B8A8_SSCALED
:
337 return &emit_R8G8B8A8_SSCALED
;
339 case PIPE_FORMAT_B8G8R8A8_UNORM
:
340 return &emit_B8G8R8A8_UNORM
;
342 case PIPE_FORMAT_A8R8G8B8_UNORM
:
343 return &emit_A8R8G8B8_UNORM
;
354 * Fetch vertex attributes for 'count' vertices.
356 static void PIPE_CDECL
generic_run_elts( struct translate
*translate
,
357 const unsigned *elts
,
359 unsigned instance_id
,
360 void *output_buffer
)
362 struct translate_generic
*tg
= translate_generic(translate
);
363 char *vert
= output_buffer
;
364 unsigned nr_attrs
= tg
->nr_attrib
;
368 /* loop over vertex attributes (vertex shader inputs)
370 for (i
= 0; i
< count
; i
++) {
371 const unsigned elt
= *elts
++;
373 for (attr
= 0; attr
< nr_attrs
; attr
++) {
375 char *dst
= vert
+ tg
->attrib
[attr
].output_offset
;
377 if (tg
->attrib
[attr
].type
== TRANSLATE_ELEMENT_NORMAL
) {
381 if (tg
->attrib
[attr
].instance_divisor
) {
382 index
= instance_id
/ tg
->attrib
[attr
].instance_divisor
;
387 /* clamp to void going out of bounds */
388 index
= MIN2(index
, tg
->attrib
[attr
].max_index
);
390 src
= tg
->attrib
[attr
].input_ptr
+
391 tg
->attrib
[attr
].input_stride
* index
;
393 tg
->attrib
[attr
].fetch( data
, src
, 0, 0 );
396 debug_printf("Fetch elt attr %d from %p stride %d div %u max %u index %d: "
397 " %f, %f, %f, %f \n",
399 tg
->attrib
[attr
].input_ptr
,
400 tg
->attrib
[attr
].input_stride
,
401 tg
->attrib
[attr
].instance_divisor
,
402 tg
->attrib
[attr
].max_index
,
404 data
[0], data
[1],data
[2], data
[3]);
406 data
[0] = (float)instance_id
;
410 debug_printf("vert %d/%d attr %d: %f %f %f %f\n",
411 i
, elt
, attr
, data
[0], data
[1], data
[2], data
[3]);
413 tg
->attrib
[attr
].emit( data
, dst
);
415 vert
+= tg
->translate
.key
.output_stride
;
421 static void PIPE_CDECL
generic_run( struct translate
*translate
,
424 unsigned instance_id
,
425 void *output_buffer
)
427 struct translate_generic
*tg
= translate_generic(translate
);
428 char *vert
= output_buffer
;
429 unsigned nr_attrs
= tg
->nr_attrib
;
433 /* loop over vertex attributes (vertex shader inputs)
435 for (i
= 0; i
< count
; i
++) {
436 unsigned elt
= start
+ i
;
438 for (attr
= 0; attr
< nr_attrs
; attr
++) {
440 char *dst
= vert
+ tg
->attrib
[attr
].output_offset
;
442 if (tg
->attrib
[attr
].type
== TRANSLATE_ELEMENT_NORMAL
) {
446 if (tg
->attrib
[attr
].instance_divisor
) {
447 index
= instance_id
/ tg
->attrib
[attr
].instance_divisor
;
453 /* clamp to void going out of bounds */
454 index
= MIN2(index
, tg
->attrib
[attr
].max_index
);
456 src
= tg
->attrib
[attr
].input_ptr
+
457 tg
->attrib
[attr
].input_stride
* index
;
459 tg
->attrib
[attr
].fetch( data
, src
, 0, 0 );
462 debug_printf("Fetch linear attr %d from %p stride %d index %d: "
463 " %f, %f, %f, %f \n",
465 tg
->attrib
[attr
].input_ptr
,
466 tg
->attrib
[attr
].input_stride
,
468 data
[0], data
[1],data
[2], data
[3]);
470 data
[0] = (float)instance_id
;
474 debug_printf("vert %d attr %d: %f %f %f %f\n",
475 i
, attr
, data
[0], data
[1], data
[2], data
[3]);
477 tg
->attrib
[attr
].emit( data
, dst
);
480 vert
+= tg
->translate
.key
.output_stride
;
486 static void generic_set_buffer( struct translate
*translate
,
492 struct translate_generic
*tg
= translate_generic(translate
);
495 for (i
= 0; i
< tg
->nr_attrib
; i
++) {
496 if (tg
->attrib
[i
].buffer
== buf
) {
497 tg
->attrib
[i
].input_ptr
= ((const uint8_t *)ptr
+
498 tg
->attrib
[i
].input_offset
);
499 tg
->attrib
[i
].input_stride
= stride
;
500 tg
->attrib
[i
].max_index
= max_index
;
506 static void generic_release( struct translate
*translate
)
513 struct translate
*translate_generic_create( const struct translate_key
*key
)
515 struct translate_generic
*tg
= CALLOC_STRUCT(translate_generic
);
521 tg
->translate
.key
= *key
;
522 tg
->translate
.release
= generic_release
;
523 tg
->translate
.set_buffer
= generic_set_buffer
;
524 tg
->translate
.run_elts
= generic_run_elts
;
525 tg
->translate
.run
= generic_run
;
527 for (i
= 0; i
< key
->nr_elements
; i
++) {
528 const struct util_format_description
*format_desc
=
529 util_format_description(key
->element
[i
].input_format
);
532 assert(format_desc
->fetch_rgba_float
);
534 tg
->attrib
[i
].type
= key
->element
[i
].type
;
536 tg
->attrib
[i
].fetch
= format_desc
->fetch_rgba_float
;
537 tg
->attrib
[i
].buffer
= key
->element
[i
].input_buffer
;
538 tg
->attrib
[i
].input_offset
= key
->element
[i
].input_offset
;
539 tg
->attrib
[i
].instance_divisor
= key
->element
[i
].instance_divisor
;
541 tg
->attrib
[i
].emit
= get_emit_func(key
->element
[i
].output_format
);
542 tg
->attrib
[i
].output_offset
= key
->element
[i
].output_offset
;
546 tg
->nr_attrib
= key
->nr_elements
;
549 return &tg
->translate
;