--- /dev/null
+/*
+ * This file is automatically generated from the Mesa internal type
+ * definitions. Do not edit directly.
+ */
+
+#ifndef __ASM_TYPES_H__
+#define __ASM_TYPES_H__
+
+
+
+/* =============================================================
+ * Offsets for GLcontext
+ */
+
+#define CTX_DRIVER_CTX 904
+
+#define CTX_LIGHT_ENABLED 38592
+#define CTX_LIGHT_SHADE_MODEL 38596
+#define CTX_LIGHT_COLOR_MAT_FACE 38600
+#define CTX_LIGHT_COLOR_MAT_MODE 38604
+#define CTX_LIGHT_COLOR_MAT_MASK 38608
+#define CTX_LIGHT_COLOR_MAT_ENABLED 38612
+#define CTX_LIGHT_ENABLED_LIST 38616
+#define CTX_LIGHT_NEED_VERTS 42973
+#define CTX_LIGHT_FLAGS 42976
+#define CTX_LIGHT_BASE_COLOR 42980
+
+
+/* =============================================================
+ * Offsets for struct vertex_buffer
+ */
+
+#define VB_SIZE 0
+#define VB_COUNT 4
+
+#define VB_ELTS 8
+#define VB_OBJ_PTR 12
+#define VB_EYE_PTR 16
+#define VB_CLIP_PTR 20
+#define VB_PROJ_CLIP_PTR 24
+#define VB_CLIP_OR_MASK 28
+#define VB_CLIP_MASK 32
+#define VB_NORMAL_PTR 36
+#define VB_EDGE_FLAG 44
+#define VB_TEX0_COORD_PTR 48
+#define VB_TEX1_COORD_PTR 52
+#define VB_TEX2_COORD_PTR 56
+#define VB_TEX3_COORD_PTR 60
+#define VB_INDEX_PTR 80
+#define VB_COLOR_PTR 88
+#define VB_SECONDARY_COLOR_PTR 96
+#define VB_FOG_COORD_PTR 108
+#define VB_POINT_SIZE_PTR 104
+#define VB_PRIMITIVE 112
+
+#define VB_LAST_CLIPPED 244
+
+/*
+ * Flags for struct vertex_buffer
+ */
+
+#define VERT_BIT_OBJ 0x1
+#define VERT_BIT_NORM 0x4
+#define VERT_BIT_RGBA 0x8
+#define VERT_BIT_SPEC_RGB 0x10
+#define VERT_BIT_FOG_COORD 0x20
+#define VERT_BIT_TEX0 0x100
+#define VERT_BIT_TEX1 0x200
+#define VERT_BIT_TEX2 0x400
+#define VERT_BIT_TEX3 0x800
+
+
+/* =============================================================
+ * Offsets for GLvector4f
+ */
+
+#define V4F_DATA 0
+#define V4F_START 4
+#define V4F_COUNT 8
+#define V4F_STRIDE 12
+#define V4F_SIZE 16
+#define V4F_FLAGS 20
+
+/*
+ * Flags for GLvector4f
+ */
+
+#define VEC_MALLOC 0x10
+#define VEC_NOT_WRITEABLE 0x40
+#define VEC_BAD_STRIDE 0x100
+
+#define VEC_SIZE_1 0x1
+#define VEC_SIZE_2 0x3
+#define VEC_SIZE_3 0x7
+#define VEC_SIZE_4 0xf
+
+
+/* =============================================================
+ * Offsets for GLmatrix
+ */
+
+#define MATRIX_DATA 0
+#define MATRIX_INV 4
+#define MATRIX_FLAGS 8
+#define MATRIX_TYPE 12
+
+
+/* =============================================================
+ * Offsets for struct gl_light
+ */
+
+#define LIGHT_NEXT 0
+#define LIGHT_PREV 4
+
+#define LIGHT_AMBIENT 8
+#define LIGHT_DIFFUSE 24
+#define LIGHT_SPECULAR 40
+#define LIGHT_EYE_POSITION 56
+#define LIGHT_EYE_DIRECTION 72
+#define LIGHT_SPOT_EXPONENT 88
+#define LIGHT_SPOT_CUTOFF 92
+#define LIGHT_COS_CUTOFF 96
+#define LIGHT_CONST_ATTEN 100
+#define LIGHT_LINEAR_ATTEN 104
+#define LIGHT_QUADRATIC_ATTEN 108
+#define LIGHT_ENABLED 112
+
+#define LIGHT_FLAGS 116
+
+#define LIGHT_POSITION 120
+#define LIGHT_VP_INF_NORM 136
+#define LIGHT_H_INF_NORM 148
+#define LIGHT_NORM_DIRECTION 160
+#define LIGHT_VP_INF_SPOT_ATTEN 176
+
+#define LIGHT_SPOT_EXP_TABLE 180
+#define LIGHT_MAT_AMBIENT 4276
+#define LIGHT_MAT_DIFFUSE 4300
+#define LIGHT_MAT_SPECULAR 4324
+
+#define SIZEOF_GL_LIGHT 4356
+
+/*
+ * Flags for struct gl_light
+ */
+
+#define LIGHT_SPOT 0x1
+#define LIGHT_LOCAL_VIEWER 0x2
+#define LIGHT_POSITIONAL 0x4
+
+#define LIGHT_NEED_VERTICES 0x6
+
+
+/* =============================================================
+ * Offsets for struct gl_lightmodel
+ */
+
+#define LIGHT_MODEL_AMBIENT 0
+#define LIGHT_MODEL_LOCAL_VIEWER 16
+#define LIGHT_MODEL_TWO_SIDE 17
+#define LIGHT_MODEL_COLOR_CONTROL 20
+
+
+#endif /* __ASM_TYPES_H__ */
--- /dev/null
+/* $Id: x86-64.c,v 1.1 2005/05/07 16:59:59 brianp Exp $ */
+
+/*
+ * Mesa 3-D graphics library
+ * Version: 6.3
+ *
+ * Copyright (C) 1999-2003 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * x86-64 optimizations shamelessy converted from x86/sse/3dnow assembly by
+ * Mikko Tiihonen
+ */
+
+#ifdef USE_X86_64_ASM
+
+#include "glheader.h"
+#include "context.h"
+#include "math/m_xform.h"
+#include "tnl/t_context.h"
+#include "x86-64.h"
+#include "../x86/common_x86_macros.h"
+
+#ifdef DEBUG
+#include "math/m_debug.h"
+#endif
+
+DECLARE_XFORM_GROUP( x86_64, 4 )
+
+#endif
+
+/*
+extern void _mesa_x86_64_transform_points4_general( XFORM_ARGS );
+extern void _mesa_x86_64_transform_points4_identity( XFORM_ARGS );
+extern void _mesa_x86_64_transform_points4_perspective( XFORM_ARGS );
+extern void _mesa_x86_64_transform_points4_3d( XFORM_ARGS );
+extern void _mesa_x86_64_transform_points4_3d_no_rot( XFORM_ARGS );
+extern void _mesa_x86_64_transform_points4_2d_no_rot( XFORM_ARGS );
+extern void _mesa_x86_64_transform_points4_2d( XFORM_ARGS );
+*/
+
+#ifdef USE_X86_64_ASM
+static void message( const char *msg )
+{
+ GLboolean debug;
+#ifdef DEBUG
+ debug = GL_TRUE;
+#else
+ if ( _mesa_getenv( "MESA_DEBUG" ) ) {
+ debug = GL_TRUE;
+ } else {
+ debug = GL_FALSE;
+ }
+#endif
+ if ( debug ) {
+ fprintf( stderr, "%s", msg );
+ }
+}
+#endif
+
+
+void _mesa_init_all_x86_64_transform_asm(void)
+{
+#ifdef USE_X86_64_ASM
+
+ if ( _mesa_getenv( "MESA_NO_ASM" ) ) {
+ return;
+ }
+
+ message("Initializing x86-64 optimizations\n");
+
+ ASSIGN_XFORM_GROUP( x86_64, 4 );
+
+ /*
+ _mesa_transform_tab[4][MATRIX_GENERAL] =
+ _mesa_x86_64_transform_points4_general;
+ _mesa_transform_tab[4][MATRIX_IDENTITY] =
+ _mesa_x86_64_transform_points4_identity;
+ _mesa_transform_tab[4][MATRIX_3D] =
+ _mesa_x86_64_transform_points4_3d;
+ _mesa_transform_tab[4][MATRIX_3D_NO_ROT] =
+ _mesa_x86_64_transform_points4_3d_no_rot;
+ _mesa_transform_tab[4][MATRIX_PERSPECTIVE] =
+ _mesa_x86_64_transform_points4_perspective;
+ _mesa_transform_tab[4][MATRIX_2D_NO_ROT] =
+ _mesa_x86_64_transform_points4_2d_no_rot;
+ _mesa_transform_tab[4][MATRIX_2D] =
+ _mesa_x86_64_transform_points4_2d;
+ */
+
+#ifdef DEBUG
+ _math_test_all_transform_functions("x86_64");
+ _math_test_all_cliptest_functions("x86_64");
+ _math_test_all_normal_transform_functions("x86_64");
+#endif
+
+#endif
+}
--- /dev/null
+/* $Id: xform4.S,v 1.1 2005/05/07 16:59:59 brianp Exp $ */
+
+/*
+ * Mesa 3-D graphics library
+ * Version: 3.5
+ *
+ * Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifdef USE_X86_64_ASM
+
+#include "matypes.h"
+
+.text
+
+.align 16
+
+.globl _mesa_x86_64_transform_points4_general
+_mesa_x86_64_transform_points4_general:
+/*
+ * rdi = dest
+ * rsi = matrix
+ * rdx = source
+ */
+ movl V4F_COUNT(%rdx), %ecx /* count */
+ movzx V4F_STRIDE(%rdx), %eax /* stride */
+
+ movl %ecx, V4F_COUNT(%rdi) /* set dest count */
+ movl $4, V4F_SIZE(%rdi) /* set dest size */
+ .byte 0x66, 0x66, 0x66, 0x90 /* manual align += 3 */
+ orl $VEC_SIZE_4, V4F_FLAGS(%rdi)/* set dest flags */
+
+ testl %ecx, %ecx /* verify non-zero count */
+ prefetchnta 64(%rsi)
+ jz p4_general_done
+
+ movq V4F_START(%rdx), %rdx /* ptr to first src vertex */
+ movq V4F_START(%rdi), %rdi /* ptr to first dest vertex */
+
+ prefetch 16(%rdx)
+
+ movaps 0(%rsi), %xmm4 /* m3 | m2 | m1 | m0 */
+ movaps 16(%rsi), %xmm5 /* m7 | m6 | m5 | m4 */
+ .byte 0x66, 0x66, 0x90 /* manual align += 3 */
+ movaps 32(%rsi), %xmm6 /* m11 | m10 | m9 | m8 */
+ movaps 48(%rsi), %xmm7 /* m15 | m14 | m13 | m12 */
+
+p4_general_loop:
+
+ movaps (%rdx), %xmm8 /* ox | oy | oz | ow */
+ prefetchw 16(%rdi)
+
+ pshufd $0x00, %xmm8, %xmm0 /* ox | ox | ox | ox */
+ addq %rax, %rdx
+ pshufd $0x55, %xmm8, %xmm1 /* oy | oy | oy | oy */
+ mulps %xmm4, %xmm0 /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */
+ pshufd $0xAA, %xmm8, %xmm2 /* oz | oz | oz | ox */
+ mulps %xmm5, %xmm1 /* oy*m7 | oy*m6 | oy*m5 | oy*m4 */
+ pshufd $0xFF, %xmm8, %xmm3 /* ow | ow | ow | ow */
+ mulps %xmm6, %xmm2 /* oz*m11 | oz*m10 | oz*m9 | oz*m8 */
+ addps %xmm1, %xmm0 /* ox*m3+oy*m7 | ... */
+ mulps %xmm7, %xmm3 /* ow*m15 | ow*m14 | ow*m13 | ow*m12 */
+ addps %xmm2, %xmm0 /* ox*m3+oy*m7+oz*m11 | ... */
+ prefetch 16(%rdx)
+ addps %xmm3, %xmm0 /* ox*m3+oy*m7+oz*m11+ow*m15 | ... */
+
+ movaps %xmm0, (%rdi) /* ->D(3) | ->D(2) | ->D(1) | ->D(0) */
+ addq $16, %rdi
+
+ decl %ecx
+ jnz p4_general_loop
+
+p4_general_done:
+ .byte 0xf3
+ ret
+
+.section .rodata
+
+.align 16
+p4_constants:
+.byte 0xff, 0xff, 0xff, 0xff
+.byte 0xff, 0xff, 0xff, 0xff
+.byte 0xff, 0xff, 0xff, 0xff
+.byte 0x00, 0x00, 0x00, 0x00
+
+.byte 0x00, 0x00, 0x00, 0x00
+.byte 0x00, 0x00, 0x00, 0x00
+.byte 0x00, 0x00, 0x00, 0x00
+.float 0f+1.0
+
+.text
+.align 16
+.globl _mesa_x86_64_transform_points4_3d
+/*
+ * this is slower than _mesa_x86_64_transform_points4_general
+ * because it ensures that the last matrix row (or is it column?) is 0,0,0,1
+ */
+_mesa_x86_64_transform_points4_3d:
+
+ leaq p4_constants(%rip), %rax
+
+ prefetchnta 64(%rsi)
+
+ movaps (%rax), %xmm9
+ movaps 16(%rax), %xmm10
+
+ movl V4F_COUNT(%rdx), %ecx /* count */
+ movzx V4F_STRIDE(%rdx), %eax /* stride */
+
+ movl %ecx, V4F_COUNT(%rdi) /* set dest count */
+ movl $4, V4F_SIZE(%rdi) /* set dest size */
+ orl $VEC_SIZE_4, V4F_FLAGS(%rdi)/* set dest flags */
+
+ testl %ecx, %ecx /* verify non-zero count */
+ jz p4_3d_done
+
+ movq V4F_START(%rdx), %rdx /* ptr to first src vertex */
+ movq V4F_START(%rdi), %rdi /* ptr to first dest vertex */
+
+ prefetch 16(%rdx)
+
+ movaps 0(%rsi), %xmm4 /* m3 | m2 | m1 | m0 */
+ movaps 16(%rsi), %xmm5 /* m7 | m6 | m5 | m4 */
+ andps %xmm9, %xmm4 /* 0.0 | m2 | m1 | m0 */
+ movaps 32(%rsi), %xmm6 /* m11 | m10 | m9 | m8 */
+ andps %xmm9, %xmm5 /* 0.0 | m6 | m5 | m4 */
+ movaps 48(%rsi), %xmm7 /* m15 | m14 | m13 | m12 */
+ andps %xmm9, %xmm6 /* 0.0 | m10 | m9 | m8 */
+ andps %xmm9, %xmm7 /* 0.0 | m14 | m13 | m12 */
+ .byte 0x66, 0x66, 0x90 /* manual align += 3 */
+ orps %xmm10, %xmm7 /* 1.0 | m14 | m13 | m12 */
+
+p4_3d_loop:
+
+ movaps (%rdx), %xmm8 /* ox | oy | oz | ow */
+ prefetchw 16(%rdi)
+
+ pshufd $0x00, %xmm8, %xmm0 /* ox | ox | ox | ox */
+ addq %rax, %rdx
+ pshufd $0x55, %xmm8, %xmm1 /* oy | oy | oy | oy */
+ mulps %xmm4, %xmm0 /* ox*m3 | ox*m2 | ox*m1 | ox*m0 */
+ pshufd $0xAA, %xmm8, %xmm2 /* oz | oz | oz | ox */
+ mulps %xmm5, %xmm1 /* oy*m7 | oy*m6 | oy*m5 | oy*m4 */
+ pshufd $0xFF, %xmm8, %xmm3 /* ow | ow | ow | ow */
+ mulps %xmm6, %xmm2 /* oz*m11 | oz*m10 | oz*m9 | oz*m8 */
+ addps %xmm1, %xmm0 /* ox*m3+oy*m7 | ... */
+ mulps %xmm7, %xmm3 /* ow*m15 | ow*m14 | ow*m13 | ow*m12 */
+ addps %xmm2, %xmm0 /* ox*m3+oy*m7+oz*m11 | ... */
+ prefetch 16(%rdx)
+ addps %xmm3, %xmm0 /* ox*m3+oy*m7+oz*m11+ow*m15 | ... */
+
+ movaps %xmm0, (%rdi) /* ->D(3) | ->D(2) | ->D(1) | ->D(0) */
+ addq $16, %rdi
+
+ dec %ecx
+ jnz p4_3d_loop
+
+p4_3d_done:
+ .byte 0xf3
+ ret
+
+
+.align 16
+.globl _mesa_x86_64_transform_points4_identity
+_mesa_x86_64_transform_points4_identity:
+
+ movl V4F_COUNT(%rdx), %ecx /* count */
+ movzx V4F_STRIDE(%rdx), %eax /* stride */
+
+ movl %ecx, V4F_COUNT(%rdi) /* set dest count */
+ movl $4, V4F_SIZE(%rdi) /* set dest size */
+ orl $VEC_SIZE_4, V4F_FLAGS(%rdi)/* set dest flags */
+
+ test %ecx, %ecx
+ jz p4_identity_done
+
+ movq V4F_START(%rdx), %rsi /* ptr to first src vertex */
+ movq V4F_START(%rdi), %rdi /* ptr to first dest vertex */
+ prefetch 64(%rsi)
+ prefetchw 64(%rdi)
+
+ add %ecx, %ecx
+
+ rep movsq
+
+p4_identity_done:
+ .byte 0xf3
+ ret
+
+
+.align 16
+.globl _mesa_x86_64_transform_points4_3d_no_rot
+_mesa_x86_64_transform_points4_3d_no_rot:
+
+ movl V4F_COUNT(%rdx), %ecx /* count */
+ movzx V4F_STRIDE(%rdx), %eax /* stride */
+
+ movl %ecx, V4F_COUNT(%rdi) /* set dest count */
+ movl $4, V4F_SIZE(%rdi) /* set dest size */
+ .byte 0x66, 0x66, 0x90 /* manual align += 3 */
+ orl $VEC_SIZE_4, V4F_FLAGS(%rdi)/* set dest flags */
+
+ test %ecx, %ecx
+ .byte 0x66, 0x66, 0x90 /* manual align += 3 */
+ jz p4_3d_no_rot_done
+
+ movq V4F_START(%rdx), %rdx /* ptr to first src vertex */
+ movq V4F_START(%rdi), %rdi /* ptr to first dest vertex */
+
+ prefetch (%rdx)
+
+ movd (%rsi), %mm0 /* | m00 */
+ .byte 0x66, 0x66, 0x90 /* manual align += 3 */
+ punpckldq 20(%rsi), %mm0 /* m11 | m00 */
+
+ movd 40(%rsi), %mm2 /* | m22 */
+ movq 48(%rsi), %mm1 /* m31 | m30 */
+
+ punpckldq 56(%rsi), %mm2 /* m11 | m00 */
+
+p4_3d_no_rot_loop:
+
+ prefetchw 32(%rdi)
+
+ movq (%rdx), %mm4 /* x1 | x0 */
+ movq 8(%rdx), %mm5 /* x3 | x2 */
+ movd 12(%rdx), %mm7 /* | x3 */
+
+ movq %mm5, %mm6 /* x3 | x2 */
+ pfmul %mm0, %mm4 /* x1*m11 | x0*m00 */
+
+ punpckhdq %mm6, %mm6 /* x3 | x3 */
+ pfmul %mm2, %mm5 /* x3*m32 | x2*m22 */
+
+ pfmul %mm1, %mm6 /* x3*m31 | x3*m30 */
+ pfacc %mm7, %mm5 /* x3 | x2*m22+x3*m32 */
+
+ pfadd %mm6, %mm4 /* x1*m11+x3*m31 | x0*m00+x3*m30 */
+
+ addq %rax, %rdx
+ movq %mm4, (%rdi) /* write r0, r1 */
+ movq %mm5, 8(%rdi) /* write r2, r3 */
+
+ addq $16, %rdi
+
+ decl %ecx
+ prefetch 32(%rdx)
+ jnz p4_3d_no_rot_loop
+
+p4_3d_no_rot_done:
+ femms
+ ret
+
+
+.align 16
+.globl _mesa_x86_64_transform_points4_perspective
+_mesa_x86_64_transform_points4_perspective:
+
+ movl V4F_COUNT(%rdx), %ecx /* count */
+ movzx V4F_STRIDE(%rdx), %eax /* stride */
+
+ movl %ecx, V4F_COUNT(%rdi) /* set dest count */
+ movl $4, V4F_SIZE(%rdi) /* set dest size */
+ orl $VEC_SIZE_4, V4F_FLAGS(%rdi)/* set dest flags */
+
+ test %ecx, %ecx
+ .byte 0x66, 0x66, 0x90 /* manual align += 3 */
+ jz p4_perspective_done
+
+ movq V4F_START(%rdx), %rdx /* ptr to first src vertex */
+ movq V4F_START(%rdi), %rdi /* ptr to first dest vertex */
+
+ movd (%rsi), %mm0 /* | m00 */
+ pxor %mm7, %mm7 /* 0 | 0 */
+ punpckldq 20(%rsi), %mm0 /* m11 | m00 */
+
+ movq 32(%rsi), %mm2 /* m21 | m20 */
+ prefetch (%rdx)
+
+ movd 40(%rsi), %mm1 /* | m22 */
+
+ .byte 0x66, 0x66, 0x90 /* manual align += 3 */
+ punpckldq 56(%rsi), %mm1 /* m32 | m22 */
+
+
+p4_perspective_loop:
+
+ prefetchw 32(%rdi) /* prefetch 2 vertices ahead */
+
+ movq (%rdx), %mm4 /* x1 | x0 */
+ movq 8(%rdx), %mm5 /* x3 | x2 */
+ movd 8(%rdx), %mm3 /* | x2 */
+
+ movq %mm5, %mm6 /* x3 | x2 */
+ pfmul %mm0, %mm4 /* x1*m11 | x0*m00 */
+
+ punpckldq %mm5, %mm5 /* x2 | x2 */
+
+ pfmul %mm2, %mm5 /* x2*m21 | x2*m20 */
+ pfsubr %mm7, %mm3 /* | -x2 */
+
+ pfmul %mm1, %mm6 /* x3*m32 | x2*m22 */
+ pfadd %mm4, %mm5 /* x1*m11+x2*m21 | x0*m00+x2*m20 */
+
+ pfacc %mm3, %mm6 /* -x2 | x2*m22+x3*m32 */
+
+ movq %mm5, (%rdi) /* write r0, r1 */
+ addq %rax, %rdx
+ movq %mm6, 8(%rdi) /* write r2, r3 */
+
+ addq $16, %rdi
+
+ decl %ecx
+ prefetch 32(%rdx) /* hopefully stride is zero */
+ jnz p4_perspective_loop
+
+p4_perspective_done:
+ femms
+ ret
+
+.align 16
+.globl _mesa_x86_64_transform_points4_2d_no_rot
+_mesa_x86_64_transform_points4_2d_no_rot:
+
+ movl V4F_COUNT(%rdx), %ecx /* count */
+ movzx V4F_STRIDE(%rdx), %eax /* stride */
+
+ movl %ecx, V4F_COUNT(%rdi) /* set dest count */
+ movl $4, V4F_SIZE(%rdi) /* set dest size */
+ orl $VEC_SIZE_4, V4F_FLAGS(%rdi)/* set dest flags */
+
+ test %ecx, %ecx
+ .byte 0x90 /* manual align += 1 */
+ jz p4_2d_no_rot_done
+
+ movq V4F_START(%rdx), %rdx /* ptr to first src vertex */
+ movq V4F_START(%rdi), %rdi /* ptr to first dest vertex */
+
+ movd (%rsi), %mm0 /* | m00 */
+ prefetch (%rdx)
+ punpckldq 20(%rsi), %mm0 /* m11 | m00 */
+
+ movq 48(%rsi), %mm1 /* m31 | m30 */
+
+p4_2d_no_rot_loop:
+
+ prefetchw 32(%rdi) /* prefetch 2 vertices ahead */
+
+ movq (%rdx), %mm4 /* x1 | x0 */
+ movq 8(%rdx), %mm5 /* x3 | x2 */
+
+ pfmul %mm0, %mm4 /* x1*m11 | x0*m00 */
+ movq %mm5, %mm6 /* x3 | x2 */
+
+ punpckhdq %mm6, %mm6 /* x3 | x3 */
+
+ addq %rax, %rdx
+ pfmul %mm1, %mm6 /* x3*m31 | x3*m30 */
+
+ prefetch 32(%rdx) /* hopefully stride is zero */
+ pfadd %mm4, %mm6 /* x1*m11+x3*m31 | x0*m00+x3*m30 */
+
+ movq %mm6, (%rdi) /* write r0, r1 */
+ movq %mm5, 8(%rdi) /* write r2, r3 */
+
+ addq $16, %rdi
+
+ decl %ecx
+ jnz p4_2d_no_rot_loop
+
+p4_2d_no_rot_done:
+ femms
+ ret
+
+
+.align 16
+.globl _mesa_x86_64_transform_points4_2d
+_mesa_x86_64_transform_points4_2d:
+
+ movl V4F_COUNT(%rdx), %ecx /* count */
+ movzx V4F_STRIDE(%rdx), %eax /* stride */
+
+ movl %ecx, V4F_COUNT(%rdi) /* set dest count */
+ movl $4, V4F_SIZE(%rdi) /* set dest size */
+ .byte 0x66, 0x66, 0x90 /* manual align += 4 */
+ orl $VEC_SIZE_4, V4F_FLAGS(%rdi)/* set dest flags */
+
+ test %ecx, %ecx
+ .byte 0x66, 0x66, 0x90 /* manual align += 4 */
+ jz p4_2d_done
+
+ movq V4F_START(%rdx), %rdx /* ptr to first src vertex */
+ movq V4F_START(%rdi), %rdi /* ptr to first dest vertex */
+
+ movd (%rsi), %mm0 /* | m00 */
+ movd 4(%rsi), %mm1 /* | m01 */
+
+ prefetch (%rdx)
+
+ punpckldq 16(%rsi), %mm0 /* m10 | m00 */
+ .byte 0x66, 0x66, 0x90 /* manual align += 4 */
+ punpckldq 20(%rsi), %mm1 /* m11 | m01 */
+
+ movq 48(%rsi), %mm2 /* m31 | m30 */
+
+p4_2d_loop:
+
+ prefetchw 32(%rdi) /* prefetch 2 vertices ahead */
+
+ movq (%rdx), %mm3 /* x1 | x0 */
+ movq 8(%rdx), %mm5 /* x3 | x2 */
+
+ movq %mm3, %mm4 /* x1 | x0 */
+ movq %mm5, %mm6 /* x3 | x2 */
+
+ pfmul %mm1, %mm4 /* x1*m11 | x0*m01 */
+ punpckhdq %mm6, %mm6 /* x3 | x3 */
+
+ pfmul %mm0, %mm3 /* x1*m10 | x0*m00 */
+
+ addq %rax, %rdx
+ pfacc %mm4, %mm3 /* x0*m01+x1*m11 | x0*m00+x1*m10 */
+
+ pfmul %mm2, %mm6 /* x3*m31 | x3*m30 */
+ prefetch 32(%rdx) /* hopefully stride is zero */
+
+ pfadd %mm6, %mm3 /* r1 | r0 */
+
+ movq %mm3, (%rdi) /* write r0, r1 */
+ movq %mm5, 8(%rdi) /* write r2, r3 */
+
+ addq $16, %rdi
+
+ decl %ecx
+ jnz p4_2d_loop
+
+p4_2d_done:
+ femms
+ ret
+
+#endif