# gamma are missing because they have not been converted to use the new
# interface.
-DRI_DIRS = i810 i915 i965 mach64 mga r128 r200 r300 radeon s3v \
+DRI_DIRS = i810 i915tex i915 i965 mach64 mga r128 r200 r300 radeon s3v \
savage sis tdfx trident unichrome ffb
<h2>New Features</h2>
<ul>
+<li>New <code>minstall</code> script to replace normal install program
+<li>Faster fragment program execution in software
</ul>
<h2>Changes</h2>
<li>OPTION NV_position_invariant didn't work in NV vertex programs
<li>glDrawPixels into a user-created framebuffer object could crash Xlib driver
<li>Line clipping was broken in some circumstances
+<li>fragment.fogcoord register didn't always contain the correct value
</ul>
renormal \
shadowtex \
singlebuffer \
+ streaming_rect \
spectex \
spriteblast \
stex3d \
--- /dev/null
+
+/*
+ * GL_ARB_multitexture demo
+ *
+ * Command line options:
+ * -info print GL implementation information
+ *
+ *
+ * Brian Paul November 1998 This program is in the public domain.
+ * Modified on 12 Feb 2002 for > 2 texture units.
+ */
+
+#define GL_GLEXT_PROTOTYPES
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <GL/glut.h>
+
+#include "readtex.h"
+
+
+#define ANIMATE 10
+#define PBO 11
+#define QUIT 100
+
+static GLboolean Animate = GL_TRUE;
+static GLboolean use_pbo = 1;
+static GLboolean whole_rect = 1;
+
+static GLfloat Drift = 0.0;
+static GLfloat drift_increment = 1/255.0;
+static GLfloat Xrot = 20.0, Yrot = 30.0;
+
+static GLuint Width = 1024;
+static GLuint Height = 512;
+
+
+static void Idle( void )
+{
+ if (Animate) {
+
+ Drift += drift_increment;
+ if (Drift >= 1.0)
+ Drift = 0.0;
+
+ glutPostRedisplay();
+ }
+}
+
+static int max( int a, int b ) { return a > b ? a : b; }
+static int min( int a, int b ) { return a < b ? a : b; }
+
+static void DrawObject()
+{
+ GLint size = Width * Height * 4;
+
+ if (use_pbo) {
+ /* XXX: This is extremely important - semantically makes the buffer
+ * contents undefined, but in practice means that the driver can
+ * release the old copy of the texture and allocate a new one
+ * without waiting for outstanding rendering to complete.
+ */
+ glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_EXT, size, NULL, GL_STREAM_DRAW_ARB);
+
+ {
+ char *image = glMapBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, GL_WRITE_ONLY_ARB);
+
+ printf("char %d\n", (unsigned char)(Drift * 255));
+
+ memset(image, size, (unsigned char)(Drift * 255));
+
+ glUnmapBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT);
+ }
+
+
+ /* BGRA is required for most hardware paths:
+ */
+ glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGBA, Width, Height, 0,
+ GL_BGRA, GL_UNSIGNED_BYTE, NULL);
+ }
+ else {
+ static char *image = NULL;
+
+ if (image == NULL)
+ image = malloc(size);
+
+ memset(image, size, (unsigned char)(Drift * 255));
+
+ /* BGRA should be the fast path for regular uploads as well.
+ */
+ glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGBA, Width, Height, 0,
+ GL_BGRA, GL_UNSIGNED_BYTE, image);
+ }
+
+ {
+ int x,y,w,h;
+
+ if (whole_rect) {
+ x = y = 0;
+ w = Width;
+ h = Height;
+ }
+ else {
+ x = y = 0;
+ w = min(10, Width);
+ h = min(10, Height);
+ }
+
+ glBegin(GL_QUADS);
+
+ glTexCoord2f( x, y);
+ glVertex2f( x, y );
+
+ glTexCoord2f( x, y + h);
+ glVertex2f( x, y + h);
+
+ glTexCoord2f( x + w + .5, y + h);
+ glVertex2f( x + w, y + h );
+
+ glTexCoord2f( x + w, y + .5);
+ glVertex2f( x + w, y );
+
+ glEnd();
+ }
+}
+
+
+
+static void Display( void )
+{
+ static GLint T0 = 0;
+ static GLint Frames = 0;
+ GLint t;
+
+ glClear( GL_COLOR_BUFFER_BIT );
+
+ glPushMatrix();
+ DrawObject();
+ glPopMatrix();
+
+ glutSwapBuffers();
+
+ Frames++;
+
+ t = glutGet(GLUT_ELAPSED_TIME);
+ if (t - T0 >= 1000) {
+ GLfloat seconds = (t - T0) / 1000.0;
+
+ GLfloat fps = Frames / seconds;
+ printf("%d frames in %6.3f seconds = %6.3f FPS\n", Frames, seconds, fps);
+
+ drift_increment = 2.2 * seconds / Frames;
+ T0 = t;
+ Frames = 0;
+ }
+}
+
+
+static void Reshape( int width, int height )
+{
+ glViewport( 0, 0, width, height );
+ glMatrixMode( GL_PROJECTION );
+ glLoadIdentity();
+/* glFrustum( -1.0, 1.0, -1.0, 1.0, 10.0, 100.0 ); */
+ gluOrtho2D( 0, width, height, 0 );
+ glMatrixMode( GL_MODELVIEW );
+ glLoadIdentity();
+ glTranslatef(0.375, 0.375, 0);
+}
+
+
+static void ModeMenu(int entry)
+{
+ if (entry==ANIMATE) {
+ Animate = !Animate;
+ }
+ else if (entry==PBO) {
+ use_pbo = !use_pbo;
+ }
+ else if (entry==QUIT) {
+ exit(0);
+ }
+
+ glutPostRedisplay();
+}
+
+
+static void Key( unsigned char key, int x, int y )
+{
+ (void) x;
+ (void) y;
+ switch (key) {
+ case 27:
+ exit(0);
+ break;
+ }
+ glutPostRedisplay();
+}
+
+
+static void SpecialKey( int key, int x, int y )
+{
+ float step = 3.0;
+ (void) x;
+ (void) y;
+
+ switch (key) {
+ case GLUT_KEY_UP:
+ Xrot += step;
+ break;
+ case GLUT_KEY_DOWN:
+ Xrot -= step;
+ break;
+ case GLUT_KEY_LEFT:
+ Yrot += step;
+ break;
+ case GLUT_KEY_RIGHT:
+ Yrot -= step;
+ break;
+ }
+ glutPostRedisplay();
+}
+
+
+static void Init( int argc, char *argv[] )
+{
+ const char *exten = (const char *) glGetString(GL_EXTENSIONS);
+ GLuint texObj, DrawPBO;
+ GLint size;
+
+
+ if (!strstr(exten, "GL_ARB_multitexture")) {
+ printf("Sorry, GL_ARB_multitexture not supported by this renderer.\n");
+ exit(1);
+ }
+
+ glGetIntegerv(GL_MAX_TEXTURE_SIZE, &size);
+ printf("%d x %d max texture size\n", size, size);
+
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
+
+ /* allocate two texture objects */
+ glGenTextures(1, &texObj);
+
+ /* setup the texture objects */
+ glActiveTextureARB(GL_TEXTURE0_ARB);
+ glBindTexture(GL_TEXTURE_RECTANGLE_ARB, texObj);
+
+ glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+
+ glGenBuffersARB(1, &DrawPBO);
+
+ glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, DrawPBO);
+ glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_EXT,
+ Width * Height * 4, NULL, GL_STREAM_DRAW);
+
+ glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
+
+ glEnable(GL_TEXTURE_RECTANGLE_ARB);
+
+ glShadeModel(GL_SMOOTH);
+ glClearColor(0.3, 0.3, 0.4, 1.0);
+
+ if (argc > 1 && strcmp(argv[1], "-info")==0) {
+ printf("GL_RENDERER = %s\n", (char *) glGetString(GL_RENDERER));
+ printf("GL_VERSION = %s\n", (char *) glGetString(GL_VERSION));
+ printf("GL_VENDOR = %s\n", (char *) glGetString(GL_VENDOR));
+ printf("GL_EXTENSIONS = %s\n", (char *) glGetString(GL_EXTENSIONS));
+ }
+}
+
+
+int main( int argc, char *argv[] )
+{
+ GLint i;
+
+ glutInit( &argc, argv );
+
+ for (i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-w") == 0) {
+ Width = atoi(argv[i+1]);
+ if (Width <= 0) {
+ printf("Error, bad width\n");
+ exit(1);
+ }
+ i++;
+ }
+ else if (strcmp(argv[i], "-h") == 0) {
+ Height = atoi(argv[i+1]);
+ if (Height <= 0) {
+ printf("Error, bad height\n");
+ exit(1);
+ }
+ i++;
+ }
+ }
+
+ glutInitWindowSize( Width, Height );
+ glutInitWindowPosition( 0, 0 );
+ glutInitDisplayMode( GLUT_RGB | GLUT_DOUBLE );
+ glutCreateWindow(argv[0] );
+
+ Init( argc, argv );
+
+ glutReshapeFunc( Reshape );
+ glutKeyboardFunc( Key );
+ glutSpecialFunc( SpecialKey );
+ glutDisplayFunc( Display );
+ glutIdleFunc( Idle );
+
+ glutCreateMenu(ModeMenu);
+ glutAddMenuEntry("Toggle Animation", ANIMATE);
+ glutAddMenuEntry("Toggle PBO", PBO);
+ glutAddMenuEntry("Quit", QUIT);
+ glutAttachMenu(GLUT_RIGHT_BUTTON);
+
+ glutMainLoop();
+ return 0;
+}
#include <GL/glut.h>
-static GLsizei MaxSize = 1024;
-static GLsizei TexWidth = 256, TexHeight = 256, TexBorder = 0;
+static GLsizei MaxSize = 2048;
+static GLsizei TexWidth = 1024, TexHeight = 1024, TexBorder = 0;
static GLboolean ScaleAndBias = GL_FALSE;
static GLboolean SubImage = GL_FALSE;
static GLdouble DownloadRate = 0.0; /* texels/sec */
static GLuint Mode = 0;
+/* Try and avoid L2 cache effects by cycling through a small number of
+ * textures.
+ *
+ * At the initial size of 1024x1024x4 == 4mbyte, say 8 textures will
+ * keep us out of most caches at 32mb total.
+ *
+ * This turns into a fairly interesting question of what exactly you
+ * expect to be in cache in normal usage, and what you think should be
+ * outside. There's no rules for this, no reason to favour one usage
+ * over another except what the application you care about happens to
+ * resemble most closely.
+ *
+ * - Should the client texture image be in L2 cache? Has it just been
+ * generated or read from disk?
+ * - Does the application really use >1 texture, or is it constantly
+ * updating one image in-place?
+ *
+ * Different answers will favour different texture upload mechanisms.
+ * To upload an image that is purely outside of cache, a DMA-based
+ * upload will probably win, whereas for small, in-cache textures,
+ * copying looks good.
+ */
+#define NR_TEXOBJ 4
+static GLuint TexObj[NR_TEXOBJ];
+
+
struct FormatRec {
GLenum Format;
GLenum Type;
}
}
+/* On x86, there is a performance cliff for memcpy to texture memory
+ * for sources below 64 byte alignment. We do our best with this in
+ * the driver, but it is better if the images are correctly aligned to
+ * start with:
+ */
+#define ALIGN (1<<12)
+
+static unsigned align(unsigned value, unsigned a)
+{
+ return (value + a - 1) & ~(a-1);
+}
+
+static int MIN2(int a, int b)
+{
+ return a < b ? a : b;
+}
static void
MeasureDownloadRate(void)
{
const int w = TexWidth + 2 * TexBorder;
const int h = TexHeight + 2 * TexBorder;
- const int bytes = w * h * BytesPerTexel(Format);
+ const int image_bytes = align(w * h * BytesPerTexel(Format), ALIGN);
+ const int bytes = image_bytes * NR_TEXOBJ;
+ GLubyte *orig_texImage, *orig_getImage;
GLubyte *texImage, *getImage;
GLdouble t0, t1, time;
int count;
int i;
+ int offset = 0;
+ GLdouble total = 0; /* ints will tend to overflow */
+
+ printf("allocating %d bytes for %d %dx%d images\n",
+ bytes, NR_TEXOBJ, w, h);
- texImage = (GLubyte *) malloc(bytes);
- getImage = (GLubyte *) malloc(bytes);
- if (!texImage || !getImage) {
+ orig_texImage = (GLubyte *) malloc(bytes + ALIGN);
+ orig_getImage = (GLubyte *) malloc(image_bytes + ALIGN);
+ if (!orig_texImage || !orig_getImage) {
DownloadRate = 0.0;
return;
}
+ printf("alloc %p %p\n", orig_texImage, orig_getImage);
+
+ texImage = (GLubyte *)align((unsigned)orig_texImage, ALIGN);
+ getImage = (GLubyte *)align((unsigned)orig_getImage, ALIGN);
+
+ for (i = 1; !(((unsigned)texImage) & i); i<<=1)
+ ;
+ printf("texture image alignment: %d bytes (%p)\n", i, texImage);
+
for (i = 0; i < bytes; i++) {
texImage[i] = i & 0xff;
}
count = 0;
t0 = glutGet(GLUT_ELAPSED_TIME) * 0.001;
do {
+ int img = count%NR_TEXOBJ;
+ GLubyte *img_ptr = texImage + img * image_bytes;
+
+ glBindTexture(GL_TEXTURE_2D, TexObj[img]);
+
if (SubImage && count > 0) {
- glTexSubImage2D(GL_TEXTURE_2D, 0, -TexBorder, -TexBorder, w, h,
+ /* Only update a portion of the image each iteration. This
+ * is presumably why you'd want to use texsubimage, otherwise
+ * you may as well just call teximage again.
+ *
+ * A bigger question is whether to use a pointer that moves
+ * with each call, ie does the incoming data come from L2
+ * cache under normal circumstances, or is it pulled from
+ * uncached memory?
+ *
+ * There's a good argument to say L2 cache, ie you'd expect
+ * the data to have been recently generated. It's possible
+ * that it could have come from a file read, which may or may
+ * not have gone through the cpu.
+ */
+ glTexSubImage2D(GL_TEXTURE_2D, 0,
+ -TexBorder,
+ -TexBorder + offset * h/8,
+ w,
+ h/8,
FormatTable[Format].Format,
- FormatTable[Format].Type, texImage);
+ FormatTable[Format].Type,
+#if 1
+ texImage /* likely in L2$ */
+#else
+ img_ptr + offset * bytes/8 /* unlikely in L2$ */
+#endif
+ );
+ offset += 1;
+ offset %= 8;
+ total += w * h / 8;
}
else {
glTexImage2D(GL_TEXTURE_2D, 0,
FormatTable[Format].IntFormat, w, h, TexBorder,
FormatTable[Format].Format,
- FormatTable[Format].Type, texImage);
+ FormatTable[Format].Type,
+ img_ptr);
+ total += w*h;
}
/* draw a tiny polygon to force texture into texram */
glDisable(GL_TEXTURE_2D);
- printf("w*h=%d count=%d time=%f\n", w*h, count, time);
- DownloadRate = w * h * count / time;
-
-#if 0
- if (!ScaleAndBias) {
- /* verify texture readback */
- glGetTexImage(GL_TEXTURE_2D, 0,
- FormatTable[Format].Format,
- FormatTable[Format].Type, getImage);
- for (i = 0; i < w * h; i++) {
- if (texImage[i] != getImage[i]) {
- printf("[%d] %d != %d\n", i, texImage[i], getImage[i]);
- }
- }
- }
-#endif
+ printf("total texels=%f time=%f\n", total, time);
+ DownloadRate = total / time;
+
- free(texImage);
- free(getImage);
+ free(orig_texImage);
+ free(orig_getImage);
{
GLint err = glGetError();
quad-offset-unfilled.c \
quad-unfilled.c \
quad-tex-2d.c \
+ quad-tex-pbo.c \
quad-tex-3d.c \
+ quad-tex-dep.c \
quad.c \
quads.c \
quadstrip.c \
--- /dev/null
+/*
+ * Copyright (c) 1991, 1992, 1993 Silicon Graphics, Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and
+ * its documentation for any purpose is hereby granted without fee, provided
+ * that (i) the above copyright notices and this permission notice appear in
+ * all copies of the software and related documentation, and (ii) the name of
+ * Silicon Graphics may not be used in any advertising or
+ * publicity relating to the software without the specific, prior written
+ * permission of Silicon Graphics.
+ *
+ * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF
+ * ANY KIND,
+ * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
+ * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * IN NO EVENT SHALL SILICON GRAPHICS BE LIABLE FOR
+ * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND,
+ * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+ * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF
+ * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#define GL_GLEXT_PROTOTYPES
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <GL/glut.h>
+
+
+#define CI_OFFSET_1 16
+#define CI_OFFSET_2 32
+
+GLenum doubleBuffer;
+
+static GLuint DrawPBO;
+
+static void Init(void)
+{
+ fprintf(stderr, "GL_RENDERER = %s\n", (char *) glGetString(GL_RENDERER));
+ fprintf(stderr, "GL_VERSION = %s\n", (char *) glGetString(GL_VERSION));
+ fprintf(stderr, "GL_VENDOR = %s\n", (char *) glGetString(GL_VENDOR));
+
+ glClearColor(0.0, 0.0, 1.0, 0.0);
+
+#define SIZE 16
+ {
+ GLubyte tex2d[SIZE][SIZE][4];
+ GLint s, t;
+
+ for (s = 0; s < SIZE; s++) {
+ for (t = 0; t < SIZE; t++) {
+ /* bgra:
+ */
+ tex2d[t][s][0] = 0x30;
+ tex2d[t][s][1] = t*255/(SIZE-1);
+ tex2d[t][s][2] = s*255/(SIZE-1);
+ tex2d[t][s][3] = 0xff;
+ }
+ }
+
+
+ /* put image into DrawPBO */
+ glGenBuffersARB(1, &DrawPBO);
+ glBindBufferARB(GL_PIXEL_PACK_BUFFER_EXT, DrawPBO);
+ glBufferDataARB(GL_PIXEL_PACK_BUFFER_EXT,
+ SIZE * SIZE * 4, tex2d, GL_STATIC_DRAW);
+ glBindBufferARB(GL_PIXEL_PACK_BUFFER_EXT, 0);
+
+
+ glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_R, GL_REPEAT);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
+ glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, DrawPBO);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, SIZE, SIZE, 0,
+ GL_BGRA, GL_UNSIGNED_BYTE, NULL);
+ glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, 0);
+ glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
+ glEnable(GL_TEXTURE_2D);
+ }
+
+}
+
+static void Reshape(int width, int height)
+{
+
+ glViewport(0, 0, (GLint)width, (GLint)height);
+
+ glMatrixMode(GL_PROJECTION);
+ glLoadIdentity();
+ glOrtho(-1.0, 1.0, -1.0, 1.0, -0.5, 1000.0);
+ glMatrixMode(GL_MODELVIEW);
+}
+
+static void Key(unsigned char key, int x, int y)
+{
+
+ switch (key) {
+ case 27:
+ exit(1);
+ default:
+ return;
+ }
+
+ glutPostRedisplay();
+}
+
+static void Draw(void)
+{
+ glClear(GL_COLOR_BUFFER_BIT);
+
+ glBegin(GL_QUADS);
+ glTexCoord2f(1,0);
+ glVertex3f( 0.9, -0.9, -30.0);
+ glTexCoord2f(1,1);
+ glVertex3f( 0.9, 0.9, -30.0);
+ glTexCoord2f(0,1);
+ glVertex3f(-0.9, 0.9, -30.0);
+ glTexCoord2f(0,0);
+ glVertex3f(-0.9, -0.9, -30.0);
+ glEnd();
+
+ glFlush();
+
+ if (doubleBuffer) {
+ glutSwapBuffers();
+ }
+}
+
+static GLenum Args(int argc, char **argv)
+{
+ GLint i;
+
+ doubleBuffer = GL_FALSE;
+
+ for (i = 1; i < argc; i++) {
+ if (strcmp(argv[i], "-sb") == 0) {
+ doubleBuffer = GL_FALSE;
+ } else if (strcmp(argv[i], "-db") == 0) {
+ doubleBuffer = GL_TRUE;
+ } else {
+ fprintf(stderr, "%s (Bad option).\n", argv[i]);
+ return GL_FALSE;
+ }
+ }
+ return GL_TRUE;
+}
+
+int main(int argc, char **argv)
+{
+ GLenum type;
+
+ glutInit(&argc, argv);
+
+ if (Args(argc, argv) == GL_FALSE) {
+ exit(1);
+ }
+
+ glutInitWindowPosition(0, 0); glutInitWindowSize( 250, 250);
+
+ type = GLUT_RGB;
+ type |= (doubleBuffer) ? GLUT_DOUBLE : GLUT_SINGLE;
+ glutInitDisplayMode(type);
+
+ if (glutCreateWindow("First Tri") == GL_FALSE) {
+ exit(1);
+ }
+
+ Init();
+
+ glutReshapeFunc(Reshape);
+ glutKeyboardFunc(Key);
+ glutDisplayFunc(Draw);
+ glutMainLoop();
+ return 0;
+}
__glXGetMscRateOML,
};
+#define DRM_MAX_FDS 16
+static struct {
+ char *BusID;
+ int fd;
+ int refcount;
+} connection[DRM_MAX_FDS];
+
+static int nr_fds = 0;
+
+int drmOpenOnce(void *unused,
+ const char *BusID,
+ int *newlyopened)
+{
+ int i;
+ int fd;
+
+ for (i = 0; i < nr_fds; i++)
+ if (strcmp(BusID, connection[i].BusID) == 0) {
+ connection[i].refcount++;
+ *newlyopened = 0;
+ return connection[i].fd;
+ }
+
+ fd = drmOpen(unused, BusID);
+ if (fd <= 0 || nr_fds == DRM_MAX_FDS)
+ return fd;
+
+ connection[nr_fds].BusID = strdup(BusID);
+ connection[nr_fds].fd = fd;
+ connection[nr_fds].refcount = 1;
+ *newlyopened = 1;
+
+ if (0)
+ fprintf(stderr, "saved connection %d for %s %d\n",
+ nr_fds, connection[nr_fds].BusID,
+ strcmp(BusID, connection[nr_fds].BusID));
+
+ nr_fds++;
+
+ return fd;
+}
+
+void drmCloseOnce(int fd)
+{
+ int i;
+
+
+
+ for (i = 0; i < nr_fds; i++) {
+ if (fd == connection[i].fd) {
+ if (--connection[i].refcount == 0) {
+ drmClose(connection[i].fd);
+ free(connection[i].BusID);
+
+ if (i < --nr_fds)
+ connection[i] = connection[nr_fds];
+
+ return;
+ }
+ }
+ }
+}
+
/**
* Perform the required libGL-side initialization and call the client-side
framebuffer.dev_priv = NULL;
if (XF86DRIOpenConnection(dpy, scrn, &hSAREA, &BusID)) {
- fd = drmOpen(NULL,BusID);
+ int newlyopened;
+ fd = drmOpenOnce(NULL,BusID, &newlyopened);
Xfree(BusID); /* No longer needed */
err_msg = "open DRM";
}
err_msg = "XF86DRIAuthConnection";
- if (XF86DRIAuthConnection(dpy, scrn, magic)) {
+ if (!newlyopened || XF86DRIAuthConnection(dpy, scrn, magic)) {
char *driverName;
/*
}
if ( fd >= 0 ) {
- (void)drmClose(fd);
+ (void)drmCloseOnce(fd);
}
(void)XF86DRICloseConnection(dpy, scrn);
}
static void
-dfbClear( GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint x, GLint y, GLint width, GLint height )
+dfbClear( GLcontext *ctx, GLbitfield mask )
{
IDirectFBGL_data *data = (IDirectFBGL_data*) ctx->DriverCtx;
+ int x = ctx->DrawBuffer->_Xmin;
+ int y = ctx->DrawBuffer->_Ymin;
+ int width = ctx->DrawBuffer->_Xmax - x;
+ int height = ctx->DrawBuffer->_Ymax - y;
+ GLboolean all = (width == ctx->DrawBuffer->Width && height == ctx->DrawBuffer->height)
if (mask & BUFFER_BIT_FRONT_LEFT &&
ctx->Color.ColorMask[0] &&
}
if (mask)
- _swrast_Clear( ctx, mask, all, x, y, width, height );
+ _swrast_Clear( ctx, mask );
}
../common/vblank.c \
../common/dri_util.c \
../common/xmlconfig.c \
- ../common/drirenderbuffer.c
+ ../common/drirenderbuffer.c
+
+COMMON_BM_SOURCES = \
+ ../common/dri_bufmgr.c \
+ ../common/dri_drmpool.c
+
ifeq ($(WINDOW_SYSTEM),dri)
WINOBJ=
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ */
+
+#include <xf86drm.h>
+#include <stdlib.h>
+#include "glthread.h"
+#include "errno.h"
+#include "dri_bufmgr.h"
+#include "string.h"
+#include "imports.h"
+#include "dri_bufpool.h"
+
+_glthread_DECLARE_STATIC_MUTEX(bmMutex);
+
+/*
+ * TODO: Introduce fence pools in the same way as
+ * buffer object pools.
+ */
+
+
+
+typedef struct _DriFenceObject
+{
+ int fd;
+ _glthread_Mutex mutex;
+ int refCount;
+ const char *name;
+ drmFence fence;
+} DriFenceObject;
+
+typedef struct _DriBufferObject
+{
+ DriBufferPool *pool;
+ _glthread_Mutex mutex;
+ int refCount;
+ const char *name;
+ unsigned flags;
+ unsigned hint;
+ unsigned alignment;
+ void *private;
+} DriBufferObject;
+
+
+void
+bmError(int val, const char *file, const char *function, int line)
+{
+ _mesa_printf("Fatal video memory manager error \"%s\".\n"
+ "Check kernel logs or set the LIBGL_DEBUG\n"
+ "environment variable to \"verbose\" for more info.\n"
+ "Detected in file %s, line %d, function %s.\n",
+ strerror(-val), file, line, function);
+#ifndef NDEBUG
+ abort();
+#else
+ abort();
+#endif
+}
+
+DriFenceObject *
+driFenceBuffers(int fd, char *name, unsigned flags)
+{
+ DriFenceObject *fence = (DriFenceObject *) malloc(sizeof(*fence));
+ int ret;
+
+ if (!fence)
+ BM_CKFATAL(-EINVAL);
+
+ _glthread_LOCK_MUTEX(bmMutex);
+ fence->refCount = 1;
+ fence->name = name;
+ fence->fd = fd;
+ _glthread_INIT_MUTEX(fence->mutex);
+ ret = drmFenceBuffers(fd, flags, &fence->fence);
+ _glthread_UNLOCK_MUTEX(bmMutex);
+ if (ret) {
+ free(fence);
+ BM_CKFATAL(ret);
+ }
+ return fence;
+}
+
+
+unsigned
+driFenceType(DriFenceObject * fence)
+{
+ unsigned ret;
+
+ _glthread_LOCK_MUTEX(bmMutex);
+ ret = fence->fence.flags;
+ _glthread_UNLOCK_MUTEX(bmMutex);
+
+ return ret;
+}
+
+
+DriFenceObject *
+driFenceReference(DriFenceObject * fence)
+{
+ _glthread_LOCK_MUTEX(bmMutex);
+ ++fence->refCount;
+ _glthread_UNLOCK_MUTEX(bmMutex);
+ return fence;
+}
+
+void
+driFenceUnReference(DriFenceObject * fence)
+{
+ if (!fence)
+ return;
+
+ _glthread_LOCK_MUTEX(bmMutex);
+ if (--fence->refCount == 0) {
+ drmFenceDestroy(fence->fd, &fence->fence);
+ free(fence);
+ }
+ _glthread_UNLOCK_MUTEX(bmMutex);
+}
+
+void
+driFenceFinish(DriFenceObject * fence, unsigned type, int lazy)
+{
+ int ret;
+ unsigned flags = (lazy) ? DRM_FENCE_FLAG_WAIT_LAZY : 0;
+
+ _glthread_LOCK_MUTEX(fence->mutex);
+ ret = drmFenceWait(fence->fd, flags, &fence->fence, type);
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+ BM_CKFATAL(ret);
+}
+
+int
+driFenceSignaled(DriFenceObject * fence, unsigned type)
+{
+ int signaled;
+ int ret;
+
+ if (fence == NULL)
+ return GL_TRUE;
+
+ _glthread_LOCK_MUTEX(fence->mutex);
+ ret = drmFenceSignaled(fence->fd, &fence->fence, type, &signaled);
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+ BM_CKFATAL(ret);
+ return signaled;
+}
+
+
+extern drmBO *
+driBOKernel(struct _DriBufferObject *buf)
+{
+ drmBO *ret;
+
+ assert(buf->private != NULL);
+ ret = buf->pool->kernel(buf->pool, buf->private);
+ if (!ret)
+ BM_CKFATAL(-EINVAL);
+
+ return ret;
+}
+
+void
+driBOWaitIdle(struct _DriBufferObject *buf, int lazy)
+{
+ assert(buf->private != NULL);
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ BM_CKFATAL(buf->pool->waitIdle(buf->pool, buf->private, lazy));
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+void *
+driBOMap(struct _DriBufferObject *buf, unsigned flags, unsigned hint)
+{
+ void *virtual;
+
+ assert(buf->private != NULL);
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ BM_CKFATAL(buf->pool->map(buf->pool, buf->private, flags, hint, &virtual));
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ return virtual;
+}
+
+void
+driBOUnmap(struct _DriBufferObject *buf)
+{
+ assert(buf->private != NULL);
+
+ buf->pool->unmap(buf->pool, buf->private);
+}
+
+unsigned long
+driBOOffset(struct _DriBufferObject *buf)
+{
+ unsigned long ret;
+
+ assert(buf->private != NULL);
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ ret = buf->pool->offset(buf->pool, buf->private);
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ return ret;
+}
+
+unsigned
+driBOFlags(struct _DriBufferObject *buf)
+{
+ unsigned ret;
+
+ assert(buf->private != NULL);
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ ret = buf->pool->flags(buf->pool, buf->private);
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ return ret;
+}
+
+struct _DriBufferObject *
+driBOReference(struct _DriBufferObject *buf)
+{
+ _glthread_LOCK_MUTEX(bmMutex);
+ if (++buf->refCount == 1) {
+ BM_CKFATAL(-EINVAL);
+ }
+ _glthread_UNLOCK_MUTEX(bmMutex);
+ return buf;
+}
+
+void
+driBOUnReference(struct _DriBufferObject *buf)
+{
+ int tmp;
+
+ if (!buf)
+ return;
+
+ _glthread_LOCK_MUTEX(bmMutex);
+ tmp = --buf->refCount;
+ _glthread_UNLOCK_MUTEX(bmMutex);
+ if (!tmp) {
+ buf->pool->destroy(buf->pool, buf->private);
+ free(buf);
+ }
+}
+
+void
+driBOData(struct _DriBufferObject *buf,
+ unsigned size, const void *data, unsigned flags)
+{
+ void *virtual;
+ int newBuffer;
+ struct _DriBufferPool *pool;
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ pool = buf->pool;
+ if (!pool->create) {
+ _mesa_error(NULL, GL_INVALID_OPERATION,
+ "driBOData called on invalid buffer\n");
+ BM_CKFATAL(-EINVAL);
+ }
+ newBuffer = !buf->private || (pool->size(pool, buf->private) < size) ||
+ pool->map(pool, buf->private, DRM_BO_FLAG_WRITE,
+ DRM_BO_HINT_DONT_BLOCK, &virtual);
+
+ if (newBuffer) {
+ if (buf->private)
+ pool->destroy(pool, buf->private);
+ if (!flags)
+ flags = buf->flags;
+ buf->private = pool->create(pool, size, flags, 0, buf->alignment);
+ if (!buf->private)
+ BM_CKFATAL(-ENOMEM);
+ BM_CKFATAL(pool->map(pool, buf->private,
+ DRM_BO_FLAG_WRITE,
+ DRM_BO_HINT_DONT_BLOCK, &virtual));
+ }
+
+ if (data != NULL)
+ memcpy(virtual, data, size);
+
+ BM_CKFATAL(pool->unmap(pool, buf->private));
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+void
+driBOSubData(struct _DriBufferObject *buf,
+ unsigned long offset, unsigned long size, const void *data)
+{
+ void *virtual;
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (size && data) {
+ BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
+ DRM_BO_FLAG_WRITE, 0, &virtual));
+ memcpy((unsigned char *) virtual + offset, data, size);
+ BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
+ }
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+void
+driBOGetSubData(struct _DriBufferObject *buf,
+ unsigned long offset, unsigned long size, void *data)
+{
+ void *virtual;
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (size && data) {
+ BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
+ DRM_BO_FLAG_READ, 0, &virtual));
+ memcpy(data, (unsigned char *) virtual + offset, size);
+ BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
+ }
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+void
+driBOSetStatic(struct _DriBufferObject *buf,
+ unsigned long offset,
+ unsigned long size, void *virtual, unsigned flags)
+{
+ _glthread_LOCK_MUTEX(buf->mutex);
+ if (buf->private != NULL) {
+ _mesa_error(NULL, GL_INVALID_OPERATION,
+ "Invalid buffer for setStatic\n");
+ BM_CKFATAL(-EINVAL);
+ }
+ if (buf->pool->setstatic == NULL) {
+ _mesa_error(NULL, GL_INVALID_OPERATION,
+ "Invalid buffer pool for setStatic\n");
+ BM_CKFATAL(-EINVAL);
+ }
+
+ if (!flags)
+ flags = buf->flags;
+
+ buf->private = buf->pool->setstatic(buf->pool, offset, size,
+ virtual, flags);
+ if (!buf->private) {
+ _mesa_error(NULL, GL_OUT_OF_MEMORY,
+ "Invalid buffer pool for setStatic\n");
+ BM_CKFATAL(-ENOMEM);
+ }
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+
+
+void
+driGenBuffers(struct _DriBufferPool *pool,
+ const char *name,
+ unsigned n,
+ struct _DriBufferObject *buffers[],
+ unsigned alignment, unsigned flags, unsigned hint)
+{
+ struct _DriBufferObject *buf;
+ int i;
+
+ flags = (flags) ? flags : DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM |
+ DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE;
+
+
+ for (i = 0; i < n; ++i) {
+ buf = (struct _DriBufferObject *) calloc(1, sizeof(*buf));
+ if (!buf)
+ BM_CKFATAL(-ENOMEM);
+
+ _glthread_INIT_MUTEX(buf->mutex);
+ _glthread_LOCK_MUTEX(buf->mutex);
+ _glthread_LOCK_MUTEX(bmMutex);
+ buf->refCount = 1;
+ _glthread_UNLOCK_MUTEX(bmMutex);
+ buf->flags = flags;
+ buf->hint = hint;
+ buf->name = name;
+ buf->alignment = alignment;
+ buf->pool = pool;
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+ buffers[i] = buf;
+ }
+}
+
+void
+driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[])
+{
+ int i;
+
+ for (i = 0; i < n; ++i) {
+ driBOUnReference(buffers[i]);
+ }
+}
+
+
+void
+driInitBufMgr(int fd)
+{
+ ;
+}
+
+
+void
+driBOCreateList(int target, drmBOList * list)
+{
+ _glthread_LOCK_MUTEX(bmMutex);
+ BM_CKFATAL(drmBOCreateList(20, list));
+ _glthread_UNLOCK_MUTEX(bmMutex);
+}
+
+void
+driBOResetList(drmBOList * list)
+{
+ _glthread_LOCK_MUTEX(bmMutex);
+ BM_CKFATAL(drmBOResetList(list));
+ _glthread_UNLOCK_MUTEX(bmMutex);
+}
+
+void
+driBOAddListItem(drmBOList * list, struct _DriBufferObject *buf,
+ unsigned flags, unsigned mask)
+{
+ int newItem;
+
+ _glthread_LOCK_MUTEX(buf->mutex);
+ _glthread_LOCK_MUTEX(bmMutex);
+ BM_CKFATAL(drmAddValidateItem(list, driBOKernel(buf),
+ flags, mask, &newItem));
+ _glthread_UNLOCK_MUTEX(bmMutex);
+
+ /*
+ * Tell userspace pools to validate the buffer. This should be a
+ * noop if the pool is already validated.
+ * FIXME: We should have a list for this as well.
+ */
+
+ if (buf->pool->validate) {
+ BM_CKFATAL(buf->pool->validate(buf->pool, buf->private));
+ }
+
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+}
+
+void
+driBOFence(struct _DriBufferObject *buf, struct _DriFenceObject *fence)
+{
+ _glthread_LOCK_MUTEX(buf->mutex);
+ BM_CKFATAL(buf->pool->fence(buf->pool, buf->private, fence));
+ _glthread_UNLOCK_MUTEX(buf->mutex);
+
+}
+
+void
+driBOValidateList(int fd, drmBOList * list)
+{
+ _glthread_LOCK_MUTEX(bmMutex);
+ BM_CKFATAL(drmBOValidateList(fd, list));
+ _glthread_UNLOCK_MUTEX(bmMutex);
+}
+
+void
+driPoolTakeDown(struct _DriBufferPool *pool)
+{
+ pool->takeDown(pool);
+
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _DRI_BUFMGR_H_
+#define _DRI_BUFMGR_H_
+#include <xf86drm.h>
+
+
+struct _DriFenceObject;
+struct _DriBufferObject;
+struct _DriBufferPool;
+
+extern struct _DriFenceObject *driFenceBuffers(int fd, char *name,
+ unsigned flags);
+
+extern struct _DriFenceObject *driFenceReference(struct _DriFenceObject *fence);
+
+extern void driFenceUnReference(struct _DriFenceObject *fence);
+
+extern void
+driFenceFinish(struct _DriFenceObject *fence, unsigned type, int lazy);
+
+extern int driFenceSignaled(struct _DriFenceObject *fence, unsigned type);
+extern unsigned driFenceType(struct _DriFenceObject *fence);
+
+/*
+ * Return a pointer to the libdrm buffer object this DriBufferObject
+ * uses.
+ */
+
+extern drmBO *driBOKernel(struct _DriBufferObject *buf);
+extern void *driBOMap(struct _DriBufferObject *buf, unsigned flags,
+ unsigned hint);
+extern void driBOUnmap(struct _DriBufferObject *buf);
+extern unsigned long driBOOffset(struct _DriBufferObject *buf);
+extern unsigned driBOFlags(struct _DriBufferObject *buf);
+extern struct _DriBufferObject *driBOReference(struct _DriBufferObject *buf);
+extern void driBOUnReference(struct _DriBufferObject *buf);
+extern void driBOData(struct _DriBufferObject *r_buf,
+ unsigned size, const void *data, unsigned flags);
+extern void driBOSubData(struct _DriBufferObject *buf,
+ unsigned long offset, unsigned long size,
+ const void *data);
+extern void driBOGetSubData(struct _DriBufferObject *buf,
+ unsigned long offset, unsigned long size,
+ void *data);
+extern void driGenBuffers(struct _DriBufferPool *pool,
+ const char *name,
+ unsigned n,
+ struct _DriBufferObject *buffers[],
+ unsigned alignment, unsigned flags, unsigned hint);
+extern void driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[]);
+extern void driInitBufMgr(int fd);
+extern void driBOCreateList(int target, drmBOList * list);
+extern void driBOResetList(drmBOList * list);
+extern void driBOAddListItem(drmBOList * list, struct _DriBufferObject *buf,
+ unsigned flags, unsigned mask);
+extern void driBOValidateList(int fd, drmBOList * list);
+
+extern void driBOFence(struct _DriBufferObject *buf,
+ struct _DriFenceObject *fence);
+
+extern void driPoolTakeDown(struct _DriBufferPool *pool);
+extern void driBOSetStatic(struct _DriBufferObject *buf,
+ unsigned long offset,
+ unsigned long size, void *virtual, unsigned flags);
+extern void driBOWaitIdle(struct _DriBufferObject *buf, int lazy);
+extern void driPoolTakeDown(struct _DriBufferPool *pool);
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _DRI_BUFPOOL_H_
+#define _DRI_BUFPOOL_H_
+
+#include <xf86drm.h>
+struct _DriFenceObject;
+
+typedef struct _DriBufferPool
+{
+ int fd;
+ int (*map) (struct _DriBufferPool * pool, void *private,
+ unsigned flags, int hint, void **virtual);
+ int (*unmap) (struct _DriBufferPool * pool, void *private);
+ int (*destroy) (struct _DriBufferPool * pool, void *private);
+ unsigned long (*offset) (struct _DriBufferPool * pool, void *private);
+ unsigned (*flags) (struct _DriBufferPool * pool, void *private);
+ unsigned long (*size) (struct _DriBufferPool * pool, void *private);
+ void *(*create) (struct _DriBufferPool * pool, unsigned long size,
+ unsigned flags, unsigned hint, unsigned alignment);
+ int (*fence) (struct _DriBufferPool * pool, void *private,
+ struct _DriFenceObject * fence);
+ drmBO *(*kernel) (struct _DriBufferPool * pool, void *private);
+ int (*validate) (struct _DriBufferPool * pool, void *private);
+ void *(*setstatic) (struct _DriBufferPool * pool, unsigned long offset,
+ unsigned long size, void *virtual, unsigned flags);
+ int (*waitIdle) (struct _DriBufferPool *pool, void *private,
+ int lazy);
+ void (*takeDown) (struct _DriBufferPool * pool);
+ void *data;
+} DriBufferPool;
+
+extern void bmError(int val, const char *file, const char *function,
+ int line);
+#define BM_CKFATAL(val) \
+ do{ \
+ int tstVal = (val); \
+ if (tstVal) \
+ bmError(tstVal, __FILE__, __FUNCTION__, __LINE__); \
+ } while(0);
+
+
+
+
+
+/*
+ * Builtin pools.
+ */
+
+/*
+ * Kernel buffer objects. Size in multiples of page size. Page size aligned.
+ */
+
+extern struct _DriBufferPool *driDRMPoolInit(int fd);
+extern struct _DriBufferPool *driDRMStaticPoolInit(int fd);
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <xf86drm.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include "dri_bufpool.h"
+
+/*
+ * Buffer pool implementation using DRM buffer objects as DRI buffer objects.
+ */
+
+static void *
+pool_create(struct _DriBufferPool *pool,
+ unsigned long size, unsigned flags, unsigned hint,
+ unsigned alignment)
+{
+ drmBO *buf = (drmBO *) malloc(sizeof(*buf));
+ int ret;
+ unsigned pageSize = getpagesize();
+
+ if (!buf)
+ return NULL;
+
+ if ((alignment > pageSize) && (alignment % pageSize)) {
+ return NULL;
+ }
+
+ ret = drmBOCreate(pool->fd, 0, size, alignment / pageSize,
+ NULL, drm_bo_type_dc,
+ flags, hint, buf);
+ if (ret) {
+ free(buf);
+ return NULL;
+ }
+
+ return (void *) buf;
+}
+
+static int
+pool_destroy(struct _DriBufferPool *pool, void *private)
+{
+ int ret;
+ drmBO *buf = (drmBO *) private;
+ ret = drmBODestroy(pool->fd, buf);
+ free(buf);
+ return ret;
+}
+
+static int
+pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
+ int hint, void **virtual)
+{
+ drmBO *buf = (drmBO *) private;
+
+ return drmBOMap(pool->fd, buf, flags, hint, virtual);
+}
+
+static int
+pool_unmap(struct _DriBufferPool *pool, void *private)
+{
+ drmBO *buf = (drmBO *) private;
+ return drmBOUnmap(pool->fd, buf);
+}
+
+static unsigned long
+pool_offset(struct _DriBufferPool *pool, void *private)
+{
+ drmBO *buf = (drmBO *) private;
+ return buf->offset;
+}
+
+static unsigned
+pool_flags(struct _DriBufferPool *pool, void *private)
+{
+ drmBO *buf = (drmBO *) private;
+ return buf->flags;
+}
+
+
+static unsigned long
+pool_size(struct _DriBufferPool *pool, void *private)
+{
+ drmBO *buf = (drmBO *) private;
+ return buf->size;
+}
+
+static int
+pool_fence(struct _DriBufferPool *pool, void *private,
+ struct _DriFenceObject *fence)
+{
+ /*
+ * Noop. The kernel handles all fencing.
+ */
+
+ return 0;
+}
+
+static drmBO *
+pool_kernel(struct _DriBufferPool *pool, void *private)
+{
+ return (drmBO *) private;
+}
+
+static int
+pool_waitIdle(struct _DriBufferPool *pool, void *private, int lazy)
+{
+ drmBO *buf = (drmBO *) private;
+ return drmBOWaitIdle(pool->fd, buf, (lazy) ? DRM_BO_HINT_WAIT_LAZY:0);
+}
+
+
+static void
+pool_takedown(struct _DriBufferPool *pool)
+{
+ free(pool);
+}
+
+
+struct _DriBufferPool *
+driDRMPoolInit(int fd)
+{
+ struct _DriBufferPool *pool;
+
+ pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
+
+ if (!pool)
+ return NULL;
+
+ pool->fd = fd;
+ pool->map = &pool_map;
+ pool->unmap = &pool_unmap;
+ pool->destroy = &pool_destroy;
+ pool->offset = &pool_offset;
+ pool->flags = &pool_flags;
+ pool->size = &pool_size;
+ pool->create = &pool_create;
+ pool->fence = &pool_fence;
+ pool->kernel = &pool_kernel;
+ pool->validate = NULL;
+ pool->setstatic = NULL;
+ pool->waitIdle = &pool_waitIdle;
+ pool->takeDown = &pool_takedown;
+ pool->data = NULL;
+ return pool;
+}
+
+
+static void *
+pool_setstatic(struct _DriBufferPool *pool, unsigned long offset,
+ unsigned long size, void *virtual, unsigned flags)
+{
+ drmBO *buf = (drmBO *) malloc(sizeof(*buf));
+ int ret;
+
+ if (!buf)
+ return NULL;
+
+ ret = drmBOCreate(pool->fd, offset, size, 0, NULL, drm_bo_type_fake,
+ flags, 0, buf);
+
+ if (ret) {
+ free(buf);
+ return NULL;
+ }
+
+ buf->virtual = virtual;
+
+ return (void *) buf;
+}
+
+
+struct _DriBufferPool *
+driDRMStaticPoolInit(int fd)
+{
+ struct _DriBufferPool *pool;
+
+ pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
+
+ if (!pool)
+ return NULL;
+
+ pool->fd = fd;
+ pool->map = &pool_map;
+ pool->unmap = &pool_unmap;
+ pool->destroy = &pool_destroy;
+ pool->offset = &pool_offset;
+ pool->flags = &pool_flags;
+ pool->size = &pool_size;
+ pool->create = NULL;
+ pool->fence = &pool_fence;
+ pool->kernel = &pool_kernel;
+ pool->validate = NULL;
+ pool->setstatic = &pool_setstatic;
+ pool->waitIdle = &pool_waitIdle;
+ pool->takeDown = &pool_takedown;
+ pool->data = NULL;
+ return pool;
+}
(void)drmUnmap((drmAddress)psp->pSAREA, SAREA_MAX);
(void)drmUnmap((drmAddress)psp->pFB, psp->fbSize);
_mesa_free(psp->pDevPriv);
- (void)drmClose(psp->fd);
+ (void)drmCloseOnce(psp->fd);
if ( psp->modes != NULL ) {
(*dri_interface->destroyContextModes)( psp->modes );
}
#define DRI_VALIDATE_DRAWABLE_INFO(psp, pdp) \
do { \
while (*(pdp->pStamp) != pdp->lastStamp) { \
- DRM_UNLOCK(psp->fd, &psp->pSAREA->lock, \
- pdp->driContextPriv->hHWContext); \
+ register unsigned int hwContext = psp->pSAREA->lock.lock & \
+ ~(DRM_LOCK_HELD | DRM_LOCK_CONT); \
+ DRM_UNLOCK(psp->fd, &psp->pSAREA->lock, hwContext); \
\
DRM_SPINLOCK(&psp->pSAREA->drawable_lock, psp->drawLockID); \
DRI_VALIDATE_DRAWABLE_INFO_ONCE(pdp); \
DRM_SPINUNLOCK(&psp->pSAREA->drawable_lock, psp->drawLockID); \
\
- DRM_LIGHT_LOCK(psp->fd, &psp->pSAREA->lock, \
- pdp->driContextPriv->hHWContext); \
+ DRM_LIGHT_LOCK(psp->fd, &psp->pSAREA->lock, hwContext); \
} \
} while (0)
}
}
-void ffbDDClear(GLcontext *ctx, GLbitfield mask, GLboolean allFoo,
- GLint cxFoo, GLint cyFoo, GLint cwidthFoo, GLint cheightFoo)
+void ffbDDClear(GLcontext *ctx, GLbitfield mask)
{
ffbContextPtr fmesa = FFB_CONTEXT(ctx);
__DRIdrawablePrivate *dPriv = fmesa->driDrawable;
}
if (mask)
- _swrast_Clear(ctx, mask, 0, 0, 0, 0, 0);
+ _swrast_Clear(ctx, mask);
}
-/* $XFree86: xc/lib/GL/mesa/src/drv/ffb/ffb_clear.h,v 1.2 2002/02/22 21:32:58 dawes Exp $ */
-
#ifndef _FFB_CLEAR_H
#define _FFB_CLEAR_H
-extern void ffbDDClear(GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint cx, GLint cy, GLint cwidth, GLint cheight);
+extern void ffbDDClear(GLcontext *ctx, GLbitfield mask);
#endif /* !(_FFB_CLEAR_H) */
*/
ffbDDClear(fmesa->glCtx,
(BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_BACK_LEFT |
- BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL),
- 1, 0, 0, 0, 0);
+ BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL));
}
} else {
_mesa_make_current(NULL, NULL, NULL);
* Buffer clear
*/
-static void gammaDDClear( GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint cx, GLint cy, GLint cw, GLint ch )
+static void gammaDDClear( GLcontext *ctx, GLbitfield mask )
{
gammaContextPtr gmesa = GAMMA_CONTEXT(ctx);
GLINTDRIPtr gDRIPriv = (GLINTDRIPtr)gmesa->driScreen->pDevPriv;
#endif
if ( mask )
- _swrast_Clear( ctx, mask, all, cx, cy, cw, ch );
+ _swrast_Clear( ctx, mask );
}
/* =============================================================
#define DEPTH_SCALE ((1<<16)-1)
-static void i810Clear( GLcontext *ctx, GLbitfield mask, GLboolean allFoo,
- GLint cxFoo, GLint cyFoo, GLint cwFoo, GLint chFoo )
+static void i810Clear( GLcontext *ctx, GLbitfield mask )
{
i810ContextPtr imesa = I810_CONTEXT( ctx );
__DRIdrawablePrivate *dPriv = imesa->driDrawable;
}
if (mask)
- _swrast_Clear( ctx, mask, 0, 0, 0, 0, 0 );
+ _swrast_Clear( ctx, mask );
}
void
i830ClearWithTris(intelContextPtr intel, GLbitfield mask,
- GLboolean all,
- GLint cx, GLint cy, GLint cw, GLint ch)
+ GLboolean allFoo,
+ GLint cxFoo, GLint cyFoo, GLint cwFoo, GLint chFoo)
{
i830ContextPtr i830 = I830_CONTEXT( intel );
__DRIdrawablePrivate *dPriv = intel->driDrawable;
intelScreenPrivate *screen = intel->intelScreen;
int x0, y0, x1, y1;
+ GLint cx, cy, cw, ch;
+ GLboolean all;
INTEL_FIREVERTICES(intel);
SET_STATE( i830, meta );
LOCK_HARDWARE(intel);
+ /* get clear bounds after locking */
+ cx = intel->ctx.DrawBuffer->_Xmin;
+ cy = intel->ctx.DrawBuffer->_Ymin;
+ cw = intel->ctx.DrawBuffer->_Xmax - cx;
+ ch = intel->ctx.DrawBuffer->_Ymax - cy;
+ all = (cw == intel->ctx.DrawBuffer->Width &&
+ ch == intel->ctx.DrawBuffer->Height);
+
if(!all) {
x0 = cx;
y0 = cy;
#include "macros.h"
#include "enums.h"
+#include "tnl/tnl.h"
#include "tnl/t_context.h"
#include "intel_batchbuffer.h"
ctx->Driver.Enable( ctx, GL_FRAGMENT_PROGRAM_ARB,
ctx->FragmentProgram.Enabled );
}
+
+ _tnl_program_string(ctx, target, prog);
}
void
-i915ClearWithTris(intelContextPtr intel, GLbitfield mask,
- GLboolean all,
- GLint cx, GLint cy, GLint cw, GLint ch)
+i915ClearWithTris(intelContextPtr intel, GLbitfield buffers,
+ GLboolean allFoo,
+ GLint cxFoo, GLint cyFoo, GLint cwFoo, GLint chFoo)
{
i915ContextPtr i915 = I915_CONTEXT( intel );
__DRIdrawablePrivate *dPriv = intel->driDrawable;
intelScreenPrivate *screen = intel->intelScreen;
int x0, y0, x1, y1;
+ GLint cx, cy, cw, ch;
+ GLboolean all;
SET_STATE( i915, meta );
set_initial_state( i915 );
LOCK_HARDWARE(intel);
+ /* get clear bounds after locking */
+ cx = intel->ctx.DrawBuffer->_Xmin;
+ cy = intel->ctx.DrawBuffer->_Ymin;
+ cw = intel->ctx.DrawBuffer->_Xmax - cx;
+ ch = intel->ctx.DrawBuffer->_Ymax - cy;
+ all = (cw == intel->ctx.DrawBuffer->Width &&
+ ch == intel->ctx.DrawBuffer->Height);
+
if (!all) {
x0 = cx;
y0 = cy;
* The active cliprects will be applied as for any other geometry.
*/
- if (mask & BUFFER_BIT_FRONT_LEFT) {
+ if (buffers & BUFFER_BIT_FRONT_LEFT) {
set_no_depth_stencil_write( i915 );
set_color_mask( i915, GL_TRUE );
set_draw_region( i915, &screen->front );
0, 0, 0, 0);
}
- if (mask & BUFFER_BIT_BACK_LEFT) {
+ if (buffers & BUFFER_BIT_BACK_LEFT) {
set_no_depth_stencil_write( i915 );
set_color_mask( i915, GL_TRUE );
set_draw_region( i915, &screen->back );
0, 0, 0, 0);
}
- if (mask & BUFFER_BIT_STENCIL) {
+ if (buffers & BUFFER_BIT_STENCIL) {
set_stencil_replace( i915,
intel->ctx.Stencil.WriteMask[0],
intel->ctx.Stencil.Clear);
-void intelClearWithBlit(GLcontext *ctx, GLbitfield flags, GLboolean all,
- GLint cx1, GLint cy1, GLint cw, GLint ch)
+void intelClearWithBlit(GLcontext *ctx, GLbitfield buffers, GLboolean allFoo,
+ GLint cx1Foo, GLint cy1Foo, GLint cwFoo, GLint chFoo)
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
intelScreenPrivate *intelScreen = intel->intelScreen;
GLuint clear_depth, clear_color;
- GLint cx, cy;
+ GLint cx, cy, cw, ch;
+ GLboolean all;
GLint pitch;
GLint cpp = intelScreen->cpp;
GLint i;
intelFlush( &intel->ctx );
LOCK_HARDWARE( intel );
+ /* get clear bounds after locking */
+ cx = intel->ctx.DrawBuffer->_Xmin;
+ cy = intel->ctx.DrawBuffer->_Ymin;
+ cw = intel->ctx.DrawBuffer->_Xmax - cx;
+ ch = intel->ctx.DrawBuffer->_Ymax - cy;
+ all = (cw == intel->ctx.DrawBuffer->Width &&
+ ch == intel->ctx.DrawBuffer->Height);
+
pitch = intelScreen->front.pitch;
clear_color = intel->ClearColor;
clear_depth = 0;
- if (flags & BUFFER_BIT_DEPTH) {
+ if (buffers & BUFFER_BIT_DEPTH) {
clear_depth = (GLuint)(ctx->Depth.Clear * intel->ClearDepth);
}
- if (flags & BUFFER_BIT_STENCIL) {
+ if (buffers & BUFFER_BIT_STENCIL) {
clear_depth |= (ctx->Stencil.Clear & 0xff) << 24;
}
XY_COLOR_BLT_WRITE_ALPHA |
XY_COLOR_BLT_WRITE_RGB);
D_CMD = XY_COLOR_BLT_CMD;
- if (flags & BUFFER_BIT_DEPTH) D_CMD |= XY_COLOR_BLT_WRITE_RGB;
- if (flags & BUFFER_BIT_STENCIL) D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
+ if (buffers & BUFFER_BIT_DEPTH) D_CMD |= XY_COLOR_BLT_WRITE_RGB;
+ if (buffers & BUFFER_BIT_STENCIL) D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
break;
default:
BR13 = (0xF0 << 16) | (pitch) | (1<<24);
{
/* flip top to bottom */
- cy = intel->driDrawable->h-cy1-ch;
- cx = cx1 + intel->drawX;
+ cy = intel->driDrawable->h - cy - ch;
+ cx = cx + intel->drawX;
cy += intel->drawY;
/* adjust for page flipping */
if ( intel->sarea->pf_current_page == 1 ) {
- GLuint tmp = flags;
+ GLuint tmp = buffers;
- flags &= ~(BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_BACK_LEFT);
- if ( tmp & BUFFER_BIT_FRONT_LEFT ) flags |= BUFFER_BIT_BACK_LEFT;
- if ( tmp & BUFFER_BIT_BACK_LEFT ) flags |= BUFFER_BIT_FRONT_LEFT;
+ buffers &= ~(BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_BACK_LEFT);
+ if ( tmp & BUFFER_BIT_FRONT_LEFT ) buffers |= BUFFER_BIT_BACK_LEFT;
+ if ( tmp & BUFFER_BIT_BACK_LEFT ) buffers |= BUFFER_BIT_FRONT_LEFT;
}
for (i = 0 ; i < intel->numClipRects ; i++)
b.y2 > intelScreen->height)
continue;
- if ( flags & BUFFER_BIT_FRONT_LEFT ) {
+ if ( buffers & BUFFER_BIT_FRONT_LEFT ) {
BEGIN_BATCH( 6);
OUT_BATCH( CMD );
OUT_BATCH( BR13 );
ADVANCE_BATCH();
}
- if ( flags & BUFFER_BIT_BACK_LEFT ) {
+ if ( buffers & BUFFER_BIT_BACK_LEFT ) {
BEGIN_BATCH( 6);
OUT_BATCH( CMD );
OUT_BATCH( BR13 );
ADVANCE_BATCH();
}
- if ( flags & (BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH) ) {
+ if ( buffers & (BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH) ) {
BEGIN_BATCH( 6);
OUT_BATCH( D_CMD );
OUT_BATCH( BR13 );
}
-void intelClear(GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint cx, GLint cy, GLint cw, GLint ch)
+void intelClear(GLcontext *ctx, GLbitfield mask)
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
const GLuint colorMask = *((GLuint *) &ctx->Color.ColorMask);
swrast_mask |= (mask & BUFFER_BIT_ACCUM);
if (blit_mask)
- intelClearWithBlit( ctx, blit_mask, all, cx, cy, cw, ch );
+ intelClearWithBlit( ctx, blit_mask, 0, 0, 0, 0, 0);
if (tri_mask)
- intel->vtbl.clear_with_tris( intel, tri_mask, all, cx, cy, cw, ch);
+ intel->vtbl.clear_with_tris( intel, tri_mask, 0, 0, 0, 0, 0);
if (swrast_mask)
- _swrast_Clear( ctx, swrast_mask, all, cx, cy, cw, ch );
+ _swrast_Clear( ctx, swrast_mask );
}
extern void intelWaitAgeLocked( intelContextPtr intel, int age, GLboolean unlock );
-extern void intelClear(GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint cx, GLint cy, GLint cw, GLint ch);
+extern void intelClear(GLcontext *ctx, GLbitfield mask);
extern void intelPageFlip( const __DRIdrawablePrivate *dpriv );
--- /dev/null
+
+TOP = ../../../../..
+include $(TOP)/configs/current
+
+LIBNAME = i915tex_dri.so
+
+MINIGLX_SOURCES = server/intel_dri.c
+
+DRIVER_SOURCES = \
+ i830_context.c \
+ i830_metaops.c \
+ i830_state.c \
+ i830_texblend.c \
+ i830_tex.c \
+ i830_texstate.c \
+ i830_vtbl.c \
+ intel_render.c \
+ intel_regions.c \
+ intel_buffer_objects.c \
+ intel_batchbuffer.c \
+ intel_mipmap_tree.c \
+ i915_tex_layout.c \
+ intel_tex_image.c \
+ intel_tex_subimage.c \
+ intel_tex_copy.c \
+ intel_tex_validate.c \
+ intel_tex_format.c \
+ intel_tex.c \
+ intel_pixel.c \
+ intel_pixel_copy.c \
+ intel_pixel_read.c \
+ intel_pixel_draw.c \
+ intel_buffers.c \
+ intel_blit.c \
+ i915_tex.c \
+ i915_texstate.c \
+ i915_context.c \
+ i915_debug.c \
+ i915_fragprog.c \
+ i915_metaops.c \
+ i915_program.c \
+ i915_state.c \
+ i915_vtbl.c \
+ intel_context.c \
+ intel_ioctl.c \
+ intel_rotate.c \
+ intel_screen.c \
+ intel_span.c \
+ intel_state.c \
+ intel_tris.c \
+ intel_fbo.c \
+ intel_depthstencil.c \
+ intel_batchpool.c
+
+C_SOURCES = \
+ $(COMMON_SOURCES) \
+ $(COMMON_BM_SOURCES) \
+ $(DRIVER_SOURCES)
+
+ASM_SOURCES =
+
+
+
+include ../Makefile.template
+
+symlinks:
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "i830_context.h"
+#include "imports.h"
+#include "texmem.h"
+#include "intel_tex.h"
+#include "tnl/tnl.h"
+#include "tnl/t_vertex.h"
+#include "tnl/t_context.h"
+#include "utils.h"
+
+/***************************************
+ * Mesa's Driver Functions
+ ***************************************/
+
+static const struct dri_extension i830_extensions[] = {
+ {"GL_ARB_texture_env_crossbar", NULL},
+ {NULL, NULL}
+};
+
+
+static void
+i830InitDriverFunctions(struct dd_function_table *functions)
+{
+ intelInitDriverFunctions(functions);
+ i830InitStateFuncs(functions);
+ i830InitTextureFuncs(functions);
+}
+
+
+GLboolean
+i830CreateContext(const __GLcontextModes * mesaVis,
+ __DRIcontextPrivate * driContextPriv,
+ void *sharedContextPrivate)
+{
+ struct dd_function_table functions;
+ struct i830_context *i830 = CALLOC_STRUCT(i830_context);
+ struct intel_context *intel = &i830->intel;
+ GLcontext *ctx = &intel->ctx;
+ if (!i830)
+ return GL_FALSE;
+
+ i830InitVtbl(i830);
+ i830InitDriverFunctions(&functions);
+
+ if (!intelInitContext(intel, mesaVis, driContextPriv,
+ sharedContextPrivate, &functions)) {
+ FREE(i830);
+ return GL_FALSE;
+ }
+
+ intel->ctx.Const.MaxTextureUnits = I830_TEX_UNITS;
+ intel->ctx.Const.MaxTextureImageUnits = I830_TEX_UNITS;
+ intel->ctx.Const.MaxTextureCoordUnits = I830_TEX_UNITS;
+
+ /* Advertise the full hardware capabilities. The new memory
+ * manager should cope much better with overload situations:
+ */
+ ctx->Const.MaxTextureLevels = 12;
+ ctx->Const.Max3DTextureLevels = 9;
+ ctx->Const.MaxCubeTextureLevels = 11;
+ ctx->Const.MaxTextureRectSize = (1 << 11);
+ ctx->Const.MaxTextureUnits = I830_TEX_UNITS;
+
+ _tnl_init_vertices(ctx, ctx->Const.MaxArrayLockSize + 12,
+ 18 * sizeof(GLfloat));
+
+ intel->verts = TNL_CONTEXT(ctx)->clipspace.vertex_buf;
+
+ driInitExtensions(ctx, i830_extensions, GL_FALSE);
+
+ i830InitState(i830);
+ i830InitMetaFuncs(i830);
+
+ _tnl_allow_vertex_fog(ctx, 1);
+ _tnl_allow_pixel_fog(ctx, 0);
+
+ return GL_TRUE;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef I830CONTEXT_INC
+#define I830CONTEXT_INC
+
+#include "intel_context.h"
+
+#define I830_FALLBACK_TEXTURE 0x1000
+#define I830_FALLBACK_COLORMASK 0x2000
+#define I830_FALLBACK_STENCIL 0x4000
+#define I830_FALLBACK_STIPPLE 0x8000
+#define I830_FALLBACK_LOGICOP 0x10000
+
+#define I830_UPLOAD_CTX 0x1
+#define I830_UPLOAD_BUFFERS 0x2
+#define I830_UPLOAD_STIPPLE 0x4
+#define I830_UPLOAD_INVARIENT 0x8
+#define I830_UPLOAD_TEX(i) (0x10<<(i))
+#define I830_UPLOAD_TEXBLEND(i) (0x100<<(i))
+#define I830_UPLOAD_TEX_ALL (0x0f0)
+#define I830_UPLOAD_TEXBLEND_ALL (0xf00)
+
+/* State structure offsets - these will probably disappear.
+ */
+#define I830_DESTREG_CBUFADDR0 0
+#define I830_DESTREG_CBUFADDR1 1
+#define I830_DESTREG_DBUFADDR0 2
+#define I830_DESTREG_DBUFADDR1 3
+#define I830_DESTREG_DV0 4
+#define I830_DESTREG_DV1 5
+#define I830_DESTREG_SENABLE 6
+#define I830_DESTREG_SR0 7
+#define I830_DESTREG_SR1 8
+#define I830_DESTREG_SR2 9
+#define I830_DEST_SETUP_SIZE 10
+
+#define I830_CTXREG_STATE1 0
+#define I830_CTXREG_STATE2 1
+#define I830_CTXREG_STATE3 2
+#define I830_CTXREG_STATE4 3
+#define I830_CTXREG_STATE5 4
+#define I830_CTXREG_IALPHAB 5
+#define I830_CTXREG_STENCILTST 6
+#define I830_CTXREG_ENABLES_1 7
+#define I830_CTXREG_ENABLES_2 8
+#define I830_CTXREG_AA 9
+#define I830_CTXREG_FOGCOLOR 10
+#define I830_CTXREG_BLENDCOLOR0 11
+#define I830_CTXREG_BLENDCOLOR1 12
+#define I830_CTXREG_VF 13
+#define I830_CTXREG_VF2 14
+#define I830_CTXREG_MCSB0 15
+#define I830_CTXREG_MCSB1 16
+#define I830_CTX_SETUP_SIZE 17
+
+#define I830_STPREG_ST0 0
+#define I830_STPREG_ST1 1
+#define I830_STP_SETUP_SIZE 2
+
+#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
+#define I830_TEXREG_TM0S1 1
+#define I830_TEXREG_TM0S2 2
+#define I830_TEXREG_TM0S3 3
+#define I830_TEXREG_TM0S4 4
+#define I830_TEXREG_MCS 5 /* _3DSTATE_MAP_COORD_SETS */
+#define I830_TEXREG_CUBE 6 /* _3DSTATE_MAP_SUBE */
+#define I830_TEX_SETUP_SIZE 7
+
+#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
+
+struct i830_texture_object
+{
+ struct intel_texture_object intel;
+ GLuint Setup[I830_TEX_SETUP_SIZE];
+};
+
+#define I830_TEX_UNITS 4
+
+struct i830_hw_state
+{
+ GLuint Ctx[I830_CTX_SETUP_SIZE];
+ GLuint Buffer[I830_DEST_SETUP_SIZE];
+ GLuint Stipple[I830_STP_SETUP_SIZE];
+ GLuint Tex[I830_TEX_UNITS][I830_TEX_SETUP_SIZE];
+ GLuint TexBlend[I830_TEX_UNITS][I830_TEXBLEND_SIZE];
+ GLuint TexBlendWordsUsed[I830_TEX_UNITS];
+
+ struct intel_region *draw_region;
+ struct intel_region *depth_region;
+
+ /* Regions aren't actually that appropriate here as the memory may
+ * be from a PBO or FBO. Just use the buffer id. Will have to do
+ * this for draw and depth for FBO's...
+ */
+ struct _DriBufferObject *tex_buffer[I830_TEX_UNITS];
+ GLuint tex_offset[I830_TEX_UNITS];
+
+ GLuint emitted; /* I810_UPLOAD_* */
+ GLuint active;
+};
+
+struct i830_context
+{
+ struct intel_context intel;
+
+ GLuint lodbias_tm0s3[MAX_TEXTURE_UNITS];
+ DECLARE_RENDERINPUTS(last_index_bitset);
+
+ struct i830_hw_state meta, initial, state, *current;
+};
+
+
+
+
+#define I830_STATECHANGE(i830, flag) \
+do { \
+ INTEL_FIREVERTICES( &i830->intel ); \
+ i830->state.emitted &= ~flag; \
+} while (0)
+
+#define I830_ACTIVESTATE(i830, flag, mode) \
+do { \
+ INTEL_FIREVERTICES( &i830->intel ); \
+ if (mode) \
+ i830->state.active |= flag; \
+ else \
+ i830->state.active &= ~flag; \
+} while (0)
+
+/* i830_vtbl.c
+ */
+extern void i830InitVtbl(struct i830_context *i830);
+
+/* i830_context.c
+ */
+extern GLboolean
+i830CreateContext(const __GLcontextModes * mesaVis,
+ __DRIcontextPrivate * driContextPriv,
+ void *sharedContextPrivate);
+
+/* i830_tex.c, i830_texstate.c
+ */
+extern void i830UpdateTextureState(struct intel_context *intel);
+
+extern void i830InitTextureFuncs(struct dd_function_table *functions);
+
+/* i830_texblend.c
+ */
+extern GLuint i830SetTexEnvCombine(struct i830_context *i830,
+ const struct gl_tex_env_combine_state
+ *combine, GLint blendUnit, GLuint texel_op,
+ GLuint * state, const GLfloat * factor);
+
+extern void i830EmitTextureBlend(struct i830_context *i830);
+
+
+/* i830_state.c
+ */
+extern void i830InitStateFuncs(struct dd_function_table *functions);
+
+extern void i830EmitState(struct i830_context *i830);
+
+extern void i830InitState(struct i830_context *i830);
+
+/* i830_metaops.c
+ */
+extern void i830InitMetaFuncs(struct i830_context *i830);
+
+extern void
+i830RotateWindow(struct intel_context *intel, __DRIdrawablePrivate * dPriv,
+ GLuint srcBuf);
+
+/*======================================================================
+ * Inline conversion functions. These are better-typed than the
+ * macros used previously:
+ */
+static INLINE struct i830_context *
+i830_context(GLcontext * ctx)
+{
+ return (struct i830_context *) ctx;
+}
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "enums.h"
+#include "mtypes.h"
+#include "macros.h"
+#include "utils.h"
+
+#include "intel_screen.h"
+#include "intel_batchbuffer.h"
+#include "intel_ioctl.h"
+#include "intel_regions.h"
+
+#include "i830_context.h"
+#include "i830_reg.h"
+
+/* A large amount of state doesn't need to be uploaded.
+ */
+#define ACTIVE (I830_UPLOAD_INVARIENT | \
+ I830_UPLOAD_CTX | \
+ I830_UPLOAD_BUFFERS | \
+ I830_UPLOAD_STIPPLE | \
+ I830_UPLOAD_TEXBLEND(0) | \
+ I830_UPLOAD_TEX(0))
+
+
+#define SET_STATE( i830, STATE ) \
+do { \
+ i830->current->emitted &= ~ACTIVE; \
+ i830->current = &i830->STATE; \
+ i830->current->emitted &= ~ACTIVE; \
+} while (0)
+
+
+static void
+set_no_stencil_write(struct intel_context *intel)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+
+ /* ctx->Driver.Enable( ctx, GL_STENCIL_TEST, GL_FALSE )
+ */
+ i830->meta.Ctx[I830_CTXREG_ENABLES_1] &= ~ENABLE_STENCIL_TEST;
+ i830->meta.Ctx[I830_CTXREG_ENABLES_2] &= ~ENABLE_STENCIL_WRITE;
+ i830->meta.Ctx[I830_CTXREG_ENABLES_1] |= DISABLE_STENCIL_TEST;
+ i830->meta.Ctx[I830_CTXREG_ENABLES_2] |= DISABLE_STENCIL_WRITE;
+
+ i830->meta.emitted &= ~I830_UPLOAD_CTX;
+}
+
+static void
+set_no_depth_write(struct intel_context *intel)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+
+ /* ctx->Driver.Enable( ctx, GL_DEPTH_TEST, GL_FALSE )
+ */
+ i830->meta.Ctx[I830_CTXREG_ENABLES_1] &= ~ENABLE_DIS_DEPTH_TEST_MASK;
+ i830->meta.Ctx[I830_CTXREG_ENABLES_2] &= ~ENABLE_DIS_DEPTH_WRITE_MASK;
+ i830->meta.Ctx[I830_CTXREG_ENABLES_1] |= DISABLE_DEPTH_TEST;
+ i830->meta.Ctx[I830_CTXREG_ENABLES_2] |= DISABLE_DEPTH_WRITE;
+
+ i830->meta.emitted &= ~I830_UPLOAD_CTX;
+}
+
+/* Set depth unit to replace.
+ */
+static void
+set_depth_replace(struct intel_context *intel)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+
+ /* ctx->Driver.Enable( ctx, GL_DEPTH_TEST, GL_FALSE )
+ * ctx->Driver.DepthMask( ctx, GL_TRUE )
+ */
+ i830->meta.Ctx[I830_CTXREG_ENABLES_1] &= ~ENABLE_DIS_DEPTH_TEST_MASK;
+ i830->meta.Ctx[I830_CTXREG_ENABLES_2] &= ~ENABLE_DIS_DEPTH_WRITE_MASK;
+ i830->meta.Ctx[I830_CTXREG_ENABLES_1] |= ENABLE_DEPTH_TEST;
+ i830->meta.Ctx[I830_CTXREG_ENABLES_2] |= ENABLE_DEPTH_WRITE;
+
+ /* ctx->Driver.DepthFunc( ctx, GL_ALWAYS )
+ */
+ i830->meta.Ctx[I830_CTXREG_STATE3] &= ~DEPTH_TEST_FUNC_MASK;
+ i830->meta.Ctx[I830_CTXREG_STATE3] |= (ENABLE_DEPTH_TEST_FUNC |
+ DEPTH_TEST_FUNC
+ (COMPAREFUNC_ALWAYS));
+
+ i830->meta.emitted &= ~I830_UPLOAD_CTX;
+}
+
+
+/* Set stencil unit to replace always with the reference value.
+ */
+static void
+set_stencil_replace(struct intel_context *intel,
+ GLuint s_mask, GLuint s_clear)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+
+ /* ctx->Driver.Enable( ctx, GL_STENCIL_TEST, GL_TRUE )
+ */
+ i830->meta.Ctx[I830_CTXREG_ENABLES_1] |= ENABLE_STENCIL_TEST;
+ i830->meta.Ctx[I830_CTXREG_ENABLES_2] |= ENABLE_STENCIL_WRITE;
+
+ /* ctx->Driver.StencilMask( ctx, s_mask )
+ */
+ i830->meta.Ctx[I830_CTXREG_STATE4] &= ~MODE4_ENABLE_STENCIL_WRITE_MASK;
+ i830->meta.Ctx[I830_CTXREG_STATE4] |= (ENABLE_STENCIL_WRITE_MASK |
+ STENCIL_WRITE_MASK((s_mask &
+ 0xff)));
+
+ /* ctx->Driver.StencilOp( ctx, GL_REPLACE, GL_REPLACE, GL_REPLACE )
+ */
+ i830->meta.Ctx[I830_CTXREG_STENCILTST] &= ~(STENCIL_OPS_MASK);
+ i830->meta.Ctx[I830_CTXREG_STENCILTST] |=
+ (ENABLE_STENCIL_PARMS |
+ STENCIL_FAIL_OP(STENCILOP_REPLACE) |
+ STENCIL_PASS_DEPTH_FAIL_OP(STENCILOP_REPLACE) |
+ STENCIL_PASS_DEPTH_PASS_OP(STENCILOP_REPLACE));
+
+ /* ctx->Driver.StencilFunc( ctx, GL_ALWAYS, s_clear, ~0 )
+ */
+ i830->meta.Ctx[I830_CTXREG_STATE4] &= ~MODE4_ENABLE_STENCIL_TEST_MASK;
+ i830->meta.Ctx[I830_CTXREG_STATE4] |= (ENABLE_STENCIL_TEST_MASK |
+ STENCIL_TEST_MASK(0xff));
+
+ i830->meta.Ctx[I830_CTXREG_STENCILTST] &= ~(STENCIL_REF_VALUE_MASK |
+ ENABLE_STENCIL_TEST_FUNC_MASK);
+ i830->meta.Ctx[I830_CTXREG_STENCILTST] |=
+ (ENABLE_STENCIL_REF_VALUE |
+ ENABLE_STENCIL_TEST_FUNC |
+ STENCIL_REF_VALUE((s_clear & 0xff)) |
+ STENCIL_TEST_FUNC(COMPAREFUNC_ALWAYS));
+
+
+
+ i830->meta.emitted &= ~I830_UPLOAD_CTX;
+}
+
+
+static void
+set_color_mask(struct intel_context *intel, GLboolean state)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+
+ const GLuint mask = ((1 << WRITEMASK_RED_SHIFT) |
+ (1 << WRITEMASK_GREEN_SHIFT) |
+ (1 << WRITEMASK_BLUE_SHIFT) |
+ (1 << WRITEMASK_ALPHA_SHIFT));
+
+ i830->meta.Ctx[I830_CTXREG_ENABLES_2] &= ~mask;
+
+ if (state) {
+ i830->meta.Ctx[I830_CTXREG_ENABLES_2] |=
+ (i830->state.Ctx[I830_CTXREG_ENABLES_2] & mask);
+ }
+
+ i830->meta.emitted &= ~I830_UPLOAD_CTX;
+}
+
+/* Installs a one-stage passthrough texture blend pipeline. Is there
+ * more that can be done to turn off texturing?
+ */
+static void
+set_no_texture(struct intel_context *intel)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+ static const struct gl_tex_env_combine_state comb = {
+ GL_NONE, GL_NONE,
+ {GL_TEXTURE, 0, 0,}, {GL_TEXTURE, 0, 0,},
+ {GL_SRC_COLOR, 0, 0}, {GL_SRC_ALPHA, 0, 0},
+ 0, 0, 0, 0
+ };
+
+ i830->meta.TexBlendWordsUsed[0] =
+ i830SetTexEnvCombine(i830, &comb, 0, TEXBLENDARG_TEXEL0,
+ i830->meta.TexBlend[0], NULL);
+
+ i830->meta.TexBlend[0][0] |= TEXOP_LAST_STAGE;
+ i830->meta.emitted &= ~I830_UPLOAD_TEXBLEND(0);
+}
+
+/* Set up a single element blend stage for 'replace' texturing with no
+ * funny ops.
+ */
+static void
+set_texture_blend_replace(struct intel_context *intel)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+ static const struct gl_tex_env_combine_state comb = {
+ GL_REPLACE, GL_REPLACE,
+ {GL_TEXTURE, GL_TEXTURE, GL_TEXTURE,}, {GL_TEXTURE, GL_TEXTURE,
+ GL_TEXTURE,},
+ {GL_SRC_COLOR, GL_SRC_COLOR, GL_SRC_COLOR}, {GL_SRC_ALPHA, GL_SRC_ALPHA,
+ GL_SRC_ALPHA},
+ 0, 0, 1, 1
+ };
+
+ i830->meta.TexBlendWordsUsed[0] =
+ i830SetTexEnvCombine(i830, &comb, 0, TEXBLENDARG_TEXEL0,
+ i830->meta.TexBlend[0], NULL);
+
+ i830->meta.TexBlend[0][0] |= TEXOP_LAST_STAGE;
+ i830->meta.emitted &= ~I830_UPLOAD_TEXBLEND(0);
+
+/* fprintf(stderr, "%s: TexBlendWordsUsed[0]: %d\n", */
+/* __FUNCTION__, i830->meta.TexBlendWordsUsed[0]); */
+}
+
+
+
+/* Set up an arbitary piece of memory as a rectangular texture
+ * (including the front or back buffer).
+ */
+static GLboolean
+set_tex_rect_source(struct intel_context *intel,
+ struct _DriBufferObject *buffer,
+ GLuint offset,
+ GLuint pitch, GLuint height, GLenum format, GLenum type)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+ GLuint *setup = i830->meta.Tex[0];
+ GLint numLevels = 1;
+ GLuint textureFormat;
+ GLuint cpp;
+
+ /* A full implementation of this would do the upload through
+ * glTexImage2d, and get all the conversion operations at that
+ * point. We are restricted, but still at least have access to the
+ * fragment program swizzle.
+ */
+ switch (format) {
+ case GL_BGRA:
+ switch (type) {
+ case GL_UNSIGNED_INT_8_8_8_8_REV:
+ case GL_UNSIGNED_BYTE:
+ textureFormat = (MAPSURF_32BIT | MT_32BIT_ARGB8888);
+ cpp = 4;
+ break;
+ default:
+ return GL_FALSE;
+ }
+ break;
+ case GL_RGBA:
+ switch (type) {
+ case GL_UNSIGNED_INT_8_8_8_8_REV:
+ case GL_UNSIGNED_BYTE:
+ textureFormat = (MAPSURF_32BIT | MT_32BIT_ABGR8888);
+ cpp = 4;
+ break;
+ default:
+ return GL_FALSE;
+ }
+ break;
+ case GL_BGR:
+ switch (type) {
+ case GL_UNSIGNED_SHORT_5_6_5_REV:
+ textureFormat = (MAPSURF_16BIT | MT_16BIT_RGB565);
+ cpp = 2;
+ break;
+ default:
+ return GL_FALSE;
+ }
+ break;
+ case GL_RGB:
+ switch (type) {
+ case GL_UNSIGNED_SHORT_5_6_5:
+ textureFormat = (MAPSURF_16BIT | MT_16BIT_RGB565);
+ cpp = 2;
+ break;
+ default:
+ return GL_FALSE;
+ }
+ break;
+
+ default:
+ return GL_FALSE;
+ }
+
+ i830->meta.tex_buffer[0] = buffer;
+ i830->meta.tex_offset[0] = offset;
+
+ setup[I830_TEXREG_TM0LI] = (_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
+ (LOAD_TEXTURE_MAP0 << 0) | 4);
+ setup[I830_TEXREG_TM0S1] = (((height - 1) << TM0S1_HEIGHT_SHIFT) |
+ ((pitch - 1) << TM0S1_WIDTH_SHIFT) |
+ textureFormat);
+ setup[I830_TEXREG_TM0S2] =
+ (((((pitch * cpp) / 4) -
+ 1) << TM0S2_PITCH_SHIFT) | TM0S2_CUBE_FACE_ENA_MASK);
+
+ setup[I830_TEXREG_TM0S3] =
+ ((((numLevels -
+ 1) *
+ 4) << TM0S3_MIN_MIP_SHIFT) | (FILTER_NEAREST <<
+ TM0S3_MIN_FILTER_SHIFT) |
+ (MIPFILTER_NONE << TM0S3_MIP_FILTER_SHIFT) | (FILTER_NEAREST <<
+ TM0S3_MAG_FILTER_SHIFT));
+
+ setup[I830_TEXREG_CUBE] = (_3DSTATE_MAP_CUBE | MAP_UNIT(0));
+
+ setup[I830_TEXREG_MCS] = (_3DSTATE_MAP_COORD_SET_CMD |
+ MAP_UNIT(0) |
+ ENABLE_TEXCOORD_PARAMS |
+ TEXCOORDS_ARE_IN_TEXELUNITS |
+ TEXCOORDTYPE_CARTESIAN |
+ ENABLE_ADDR_V_CNTL |
+ TEXCOORD_ADDR_V_MODE(TEXCOORDMODE_WRAP) |
+ ENABLE_ADDR_U_CNTL |
+ TEXCOORD_ADDR_U_MODE(TEXCOORDMODE_WRAP));
+
+ i830->meta.emitted &= ~I830_UPLOAD_TEX(0);
+ return GL_TRUE;
+}
+
+
+static void
+set_vertex_format(struct intel_context *intel)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+ i830->meta.Ctx[I830_CTXREG_VF] = (_3DSTATE_VFT0_CMD |
+ VFT0_TEX_COUNT(1) |
+ VFT0_DIFFUSE | VFT0_XYZ);
+ i830->meta.Ctx[I830_CTXREG_VF2] = (_3DSTATE_VFT1_CMD |
+ VFT1_TEX0_FMT(TEXCOORDFMT_2D) |
+ VFT1_TEX1_FMT(TEXCOORDFMT_2D) |
+ VFT1_TEX2_FMT(TEXCOORDFMT_2D) |
+ VFT1_TEX3_FMT(TEXCOORDFMT_2D));
+ i830->meta.emitted &= ~I830_UPLOAD_CTX;
+}
+
+
+static void
+meta_import_pixel_state(struct intel_context *intel)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+
+ i830->meta.Ctx[I830_CTXREG_STATE1] = i830->state.Ctx[I830_CTXREG_STATE1];
+ i830->meta.Ctx[I830_CTXREG_STATE2] = i830->state.Ctx[I830_CTXREG_STATE2];
+ i830->meta.Ctx[I830_CTXREG_STATE3] = i830->state.Ctx[I830_CTXREG_STATE3];
+ i830->meta.Ctx[I830_CTXREG_STATE4] = i830->state.Ctx[I830_CTXREG_STATE4];
+ i830->meta.Ctx[I830_CTXREG_STATE5] = i830->state.Ctx[I830_CTXREG_STATE5];
+ i830->meta.Ctx[I830_CTXREG_IALPHAB] = i830->state.Ctx[I830_CTXREG_IALPHAB];
+ i830->meta.Ctx[I830_CTXREG_STENCILTST] =
+ i830->state.Ctx[I830_CTXREG_STENCILTST];
+ i830->meta.Ctx[I830_CTXREG_ENABLES_1] =
+ i830->state.Ctx[I830_CTXREG_ENABLES_1];
+ i830->meta.Ctx[I830_CTXREG_ENABLES_2] =
+ i830->state.Ctx[I830_CTXREG_ENABLES_2];
+ i830->meta.Ctx[I830_CTXREG_AA] = i830->state.Ctx[I830_CTXREG_AA];
+ i830->meta.Ctx[I830_CTXREG_FOGCOLOR] =
+ i830->state.Ctx[I830_CTXREG_FOGCOLOR];
+ i830->meta.Ctx[I830_CTXREG_BLENDCOLOR0] =
+ i830->state.Ctx[I830_CTXREG_BLENDCOLOR0];
+ i830->meta.Ctx[I830_CTXREG_BLENDCOLOR1] =
+ i830->state.Ctx[I830_CTXREG_BLENDCOLOR1];
+ i830->meta.Ctx[I830_CTXREG_MCSB0] = i830->state.Ctx[I830_CTXREG_MCSB0];
+ i830->meta.Ctx[I830_CTXREG_MCSB1] = i830->state.Ctx[I830_CTXREG_MCSB1];
+
+
+ i830->meta.Ctx[I830_CTXREG_STATE3] &= ~CULLMODE_MASK;
+ i830->meta.Stipple[I830_STPREG_ST1] &= ~ST1_ENABLE;
+ i830->meta.emitted &= ~I830_UPLOAD_CTX;
+
+
+ i830->meta.Buffer[I830_DESTREG_SENABLE] =
+ i830->state.Buffer[I830_DESTREG_SENABLE];
+ i830->meta.Buffer[I830_DESTREG_SR1] = i830->state.Buffer[I830_DESTREG_SR1];
+ i830->meta.Buffer[I830_DESTREG_SR2] = i830->state.Buffer[I830_DESTREG_SR2];
+ i830->meta.emitted &= ~I830_UPLOAD_BUFFERS;
+}
+
+
+
+/* Select between front and back draw buffers.
+ */
+static void
+meta_draw_region(struct intel_context *intel,
+ struct intel_region *draw_region,
+ struct intel_region *depth_region)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+ GLuint format;
+ GLuint depth_format = DEPTH_FRMT_16_FIXED;
+
+ intel_region_release(&i830->meta.draw_region);
+ intel_region_reference(&i830->meta.draw_region, draw_region);
+
+ intel_region_release(&i830->meta.depth_region);
+ intel_region_reference(&i830->meta.depth_region, depth_region);
+
+ /* XXX FBO: grab code from i915 meta_draw_region */
+
+ /* XXX: 555 support?
+ */
+ if (draw_region->cpp == 2)
+ format = DV_PF_565;
+ else
+ format = DV_PF_8888;
+
+ if (depth_region) {
+ if (depth_region->cpp == 2)
+ depth_format = DEPTH_FRMT_16_FIXED;
+ else
+ depth_format = DEPTH_FRMT_24_FIXED_8_OTHER;
+ }
+
+ i830->meta.Buffer[I830_DESTREG_DV1] = (DSTORG_HORT_BIAS(0x8) | /* .5 */
+ DSTORG_VERT_BIAS(0x8) | /* .5 */
+ format | DEPTH_IS_Z | depth_format);
+
+ i830->meta.emitted &= ~I830_UPLOAD_BUFFERS;
+}
+
+
+/* Operations where the 3D engine is decoupled temporarily from the
+ * current GL state and used for other purposes than simply rendering
+ * incoming triangles.
+ */
+static void
+install_meta_state(struct intel_context *intel)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+ memcpy(&i830->meta, &i830->initial, sizeof(i830->meta));
+
+ i830->meta.active = ACTIVE;
+ i830->meta.emitted = 0;
+
+ SET_STATE(i830, meta);
+ set_vertex_format(intel);
+ set_no_texture(intel);
+}
+
+static void
+leave_meta_state(struct intel_context *intel)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+ intel_region_release(&i830->meta.draw_region);
+ intel_region_release(&i830->meta.depth_region);
+/* intel_region_release(intel, &i830->meta.tex_region[0]); */
+ SET_STATE(i830, state);
+}
+
+
+
+void
+i830InitMetaFuncs(struct i830_context *i830)
+{
+ i830->intel.vtbl.install_meta_state = install_meta_state;
+ i830->intel.vtbl.leave_meta_state = leave_meta_state;
+ i830->intel.vtbl.meta_no_depth_write = set_no_depth_write;
+ i830->intel.vtbl.meta_no_stencil_write = set_no_stencil_write;
+ i830->intel.vtbl.meta_stencil_replace = set_stencil_replace;
+ i830->intel.vtbl.meta_depth_replace = set_depth_replace;
+ i830->intel.vtbl.meta_color_mask = set_color_mask;
+ i830->intel.vtbl.meta_no_texture = set_no_texture;
+ i830->intel.vtbl.meta_texture_blend_replace = set_texture_blend_replace;
+ i830->intel.vtbl.meta_tex_rect_source = set_tex_rect_source;
+ i830->intel.vtbl.meta_draw_region = meta_draw_region;
+ i830->intel.vtbl.meta_import_pixel_state = meta_import_pixel_state;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef _I830_REG_H_
+#define _I830_REG_H_
+
+
+#include "intel_reg.h"
+
+#define I830_SET_FIELD( var, mask, value ) (var &= ~(mask), var |= value)
+
+#define _3DSTATE_AA_CMD (CMD_3D | (0x06<<24))
+#define AA_LINE_ECAAR_WIDTH_ENABLE (1<<16)
+#define AA_LINE_ECAAR_WIDTH_0_5 0
+#define AA_LINE_ECAAR_WIDTH_1_0 (1<<14)
+#define AA_LINE_ECAAR_WIDTH_2_0 (2<<14)
+#define AA_LINE_ECAAR_WIDTH_4_0 (3<<14)
+#define AA_LINE_REGION_WIDTH_ENABLE (1<<8)
+#define AA_LINE_REGION_WIDTH_0_5 0
+#define AA_LINE_REGION_WIDTH_1_0 (1<<6)
+#define AA_LINE_REGION_WIDTH_2_0 (2<<6)
+#define AA_LINE_REGION_WIDTH_4_0 (3<<6)
+#define AA_LINE_ENABLE ((1<<1) | 1)
+#define AA_LINE_DISABLE (1<<1)
+
+#define _3DSTATE_BUF_INFO_CMD (CMD_3D | (0x1d<<24) | (0x8e<<16) | 1)
+/* Dword 1 */
+#define BUF_3D_ID_COLOR_BACK (0x3<<24)
+#define BUF_3D_ID_DEPTH (0x7<<24)
+#define BUF_3D_USE_FENCE (1<<23)
+#define BUF_3D_TILED_SURFACE (1<<22)
+#define BUF_3D_TILE_WALK_X 0
+#define BUF_3D_TILE_WALK_Y (1<<21)
+#define BUF_3D_PITCH(x) (((x)/4)<<2)
+/* Dword 2 */
+#define BUF_3D_ADDR(x) ((x) & ~0x3)
+
+
+#define _3DSTATE_COLOR_FACTOR_CMD (CMD_3D | (0x1d<<24) | (0x1<<16))
+
+#define _3DSTATE_COLOR_FACTOR_N_CMD(stage) (CMD_3D | (0x1d<<24) | \
+ ((0x90+(stage))<<16))
+
+#define _3DSTATE_CONST_BLEND_COLOR_CMD (CMD_3D | (0x1d<<24) | (0x88<<16))
+
+#define _3DSTATE_DFLT_DIFFUSE_CMD (CMD_3D | (0x1d<<24) | (0x99<<16))
+
+#define _3DSTATE_DFLT_SPEC_CMD (CMD_3D | (0x1d<<24) | (0x9a<<16))
+
+#define _3DSTATE_DFLT_Z_CMD (CMD_3D | (0x1d<<24) | (0x98<<16))
+
+
+#define _3DSTATE_DST_BUF_VARS_CMD (CMD_3D | (0x1d<<24) | (0x85<<16))
+/* Dword 1 */
+#define DSTORG_HORT_BIAS(x) ((x)<<20)
+#define DSTORG_VERT_BIAS(x) ((x)<<16)
+#define COLOR_4_2_2_CHNL_WRT_ALL 0
+#define COLOR_4_2_2_CHNL_WRT_Y (1<<12)
+#define COLOR_4_2_2_CHNL_WRT_CR (2<<12)
+#define COLOR_4_2_2_CHNL_WRT_CB (3<<12)
+#define COLOR_4_2_2_CHNL_WRT_CRCB (4<<12)
+#define COLR_BUF_8BIT 0
+#define COLR_BUF_RGB555 (1<<8)
+#define COLR_BUF_RGB565 (2<<8)
+#define COLR_BUF_ARGB8888 (3<<8)
+#define DEPTH_IS_Z 0
+#define DEPTH_IS_W (1<<6)
+#define DEPTH_FRMT_16_FIXED 0
+#define DEPTH_FRMT_16_FLOAT (1<<2)
+#define DEPTH_FRMT_24_FIXED_8_OTHER (2<<2)
+#define DEPTH_FRMT_24_FLOAT_8_OTHER (3<<2)
+#define VERT_LINE_STRIDE_1 (1<<1)
+#define VERT_LINE_STRIDE_0 0
+#define VERT_LINE_STRIDE_OFS_1 1
+#define VERT_LINE_STRIDE_OFS_0 0
+
+
+#define _3DSTATE_DRAW_RECT_CMD (CMD_3D|(0x1d<<24)|(0x80<<16)|3)
+/* Dword 1 */
+#define DRAW_RECT_DIS_DEPTH_OFS (1<<30)
+#define DRAW_DITHER_OFS_X(x) ((x)<<26)
+#define DRAW_DITHER_OFS_Y(x) ((x)<<24)
+/* Dword 2 */
+#define DRAW_YMIN(x) ((x)<<16)
+#define DRAW_XMIN(x) (x)
+/* Dword 3 */
+#define DRAW_YMAX(x) ((x)<<16)
+#define DRAW_XMAX(x) (x)
+/* Dword 4 */
+#define DRAW_YORG(x) ((x)<<16)
+#define DRAW_XORG(x) (x)
+
+
+#define _3DSTATE_ENABLES_1_CMD (CMD_3D|(0x3<<24))
+#define ENABLE_LOGIC_OP_MASK ((1<<23)|(1<<22))
+#define ENABLE_LOGIC_OP ((1<<23)|(1<<22))
+#define DISABLE_LOGIC_OP (1<<23)
+#define ENABLE_STENCIL_TEST ((1<<21)|(1<<20))
+#define DISABLE_STENCIL_TEST (1<<21)
+#define ENABLE_DEPTH_BIAS ((1<<11)|(1<<10))
+#define DISABLE_DEPTH_BIAS (1<<11)
+#define ENABLE_SPEC_ADD_MASK ((1<<9)|(1<<8))
+#define ENABLE_SPEC_ADD ((1<<9)|(1<<8))
+#define DISABLE_SPEC_ADD (1<<9)
+#define ENABLE_DIS_FOG_MASK ((1<<7)|(1<<6))
+#define ENABLE_FOG ((1<<7)|(1<<6))
+#define DISABLE_FOG (1<<7)
+#define ENABLE_DIS_ALPHA_TEST_MASK ((1<<5)|(1<<4))
+#define ENABLE_ALPHA_TEST ((1<<5)|(1<<4))
+#define DISABLE_ALPHA_TEST (1<<5)
+#define ENABLE_DIS_CBLEND_MASK ((1<<3)|(1<<2))
+#define ENABLE_COLOR_BLEND ((1<<3)|(1<<2))
+#define DISABLE_COLOR_BLEND (1<<3)
+#define ENABLE_DIS_DEPTH_TEST_MASK ((1<<1)|1)
+#define ENABLE_DEPTH_TEST ((1<<1)|1)
+#define DISABLE_DEPTH_TEST (1<<1)
+
+/* _3DSTATE_ENABLES_2, p138 */
+#define _3DSTATE_ENABLES_2_CMD (CMD_3D|(0x4<<24))
+#define ENABLE_STENCIL_WRITE ((1<<21)|(1<<20))
+#define DISABLE_STENCIL_WRITE (1<<21)
+#define ENABLE_TEX_CACHE ((1<<17)|(1<<16))
+#define DISABLE_TEX_CACHE (1<<17)
+#define ENABLE_DITHER ((1<<9)|(1<<8))
+#define DISABLE_DITHER (1<<9)
+#define ENABLE_COLOR_MASK (1<<10)
+#define WRITEMASK_ALPHA (1<<7)
+#define WRITEMASK_ALPHA_SHIFT 7
+#define WRITEMASK_RED (1<<6)
+#define WRITEMASK_RED_SHIFT 6
+#define WRITEMASK_GREEN (1<<5)
+#define WRITEMASK_GREEN_SHIFT 5
+#define WRITEMASK_BLUE (1<<4)
+#define WRITEMASK_BLUE_SHIFT 4
+#define WRITEMASK_MASK ((1<<4)|(1<<5)|(1<<6)|(1<<7))
+#define ENABLE_COLOR_WRITE ((1<<3)|(1<<2))
+#define DISABLE_COLOR_WRITE (1<<3)
+#define ENABLE_DIS_DEPTH_WRITE_MASK 0x3
+#define ENABLE_DEPTH_WRITE ((1<<1)|1)
+#define DISABLE_DEPTH_WRITE (1<<1)
+
+/* _3DSTATE_FOG_COLOR, p139 */
+#define _3DSTATE_FOG_COLOR_CMD (CMD_3D|(0x15<<24))
+#define FOG_COLOR_RED(x) ((x)<<16)
+#define FOG_COLOR_GREEN(x) ((x)<<8)
+#define FOG_COLOR_BLUE(x) (x)
+
+/* _3DSTATE_FOG_MODE, p140 */
+#define _3DSTATE_FOG_MODE_CMD (CMD_3D|(0x1d<<24)|(0x89<<16)|2)
+/* Dword 1 */
+#define FOGFUNC_ENABLE (1<<31)
+#define FOGFUNC_VERTEX 0
+#define FOGFUNC_PIXEL_EXP (1<<28)
+#define FOGFUNC_PIXEL_EXP2 (2<<28)
+#define FOGFUNC_PIXEL_LINEAR (3<<28)
+#define FOGSRC_INDEX_Z (1<<27)
+#define FOGSRC_INDEX_W ((1<<27)|(1<<25))
+#define FOG_LINEAR_CONST (1<<24)
+#define FOG_CONST_1(x) ((x)<<4)
+#define ENABLE_FOG_DENSITY (1<<23)
+/* Dword 2 */
+#define FOG_CONST_2(x) (x)
+/* Dword 3 */
+#define FOG_DENSITY(x) (x)
+
+/* _3DSTATE_INDEPENDENT_ALPHA_BLEND, p142 */
+#define _3DSTATE_INDPT_ALPHA_BLEND_CMD (CMD_3D|(0x0b<<24))
+#define ENABLE_INDPT_ALPHA_BLEND ((1<<23)|(1<<22))
+#define DISABLE_INDPT_ALPHA_BLEND (1<<23)
+#define ALPHA_BLENDFUNC_MASK 0x3f0000
+#define ENABLE_ALPHA_BLENDFUNC (1<<21)
+#define ABLENDFUNC_ADD 0
+#define ABLENDFUNC_SUB (1<<16)
+#define ABLENDFUNC_RVSE_SUB (2<<16)
+#define ABLENDFUNC_MIN (3<<16)
+#define ABLENDFUNC_MAX (4<<16)
+#define SRC_DST_ABLEND_MASK 0xfff
+#define ENABLE_SRC_ABLEND_FACTOR (1<<11)
+#define SRC_ABLEND_FACT(x) ((x)<<6)
+#define ENABLE_DST_ABLEND_FACTOR (1<<5)
+#define DST_ABLEND_FACT(x) (x)
+
+
+/* _3DSTATE_MAP_BLEND_ARG, p152 */
+#define _3DSTATE_MAP_BLEND_ARG_CMD(stage) (CMD_3D|(0x0e<<24)|((stage)<<20))
+
+#define TEXPIPE_COLOR 0
+#define TEXPIPE_ALPHA (1<<18)
+#define TEXPIPE_KILL (2<<18)
+#define TEXBLEND_ARG0 0
+#define TEXBLEND_ARG1 (1<<15)
+#define TEXBLEND_ARG2 (2<<15)
+#define TEXBLEND_ARG3 (3<<15)
+#define TEXBLENDARG_MODIFY_PARMS (1<<6)
+#define TEXBLENDARG_REPLICATE_ALPHA (1<<5)
+#define TEXBLENDARG_INV_ARG (1<<4)
+#define TEXBLENDARG_ONE 0
+#define TEXBLENDARG_FACTOR 0x01
+#define TEXBLENDARG_ACCUM 0x02
+#define TEXBLENDARG_DIFFUSE 0x03
+#define TEXBLENDARG_SPEC 0x04
+#define TEXBLENDARG_CURRENT 0x05
+#define TEXBLENDARG_TEXEL0 0x06
+#define TEXBLENDARG_TEXEL1 0x07
+#define TEXBLENDARG_TEXEL2 0x08
+#define TEXBLENDARG_TEXEL3 0x09
+#define TEXBLENDARG_FACTOR_N 0x0e
+
+/* _3DSTATE_MAP_BLEND_OP, p155 */
+#define _3DSTATE_MAP_BLEND_OP_CMD(stage) (CMD_3D|(0x0d<<24)|((stage)<<20))
+#if 0
+# define TEXPIPE_COLOR 0
+# define TEXPIPE_ALPHA (1<<18)
+# define TEXPIPE_KILL (2<<18)
+#endif
+#define ENABLE_TEXOUTPUT_WRT_SEL (1<<17)
+#define TEXOP_OUTPUT_CURRENT 0
+#define TEXOP_OUTPUT_ACCUM (1<<15)
+#define ENABLE_TEX_CNTRL_STAGE ((1<<12)|(1<<11))
+#define DISABLE_TEX_CNTRL_STAGE (1<<12)
+#define TEXOP_SCALE_SHIFT 9
+#define TEXOP_SCALE_1X (0 << TEXOP_SCALE_SHIFT)
+#define TEXOP_SCALE_2X (1 << TEXOP_SCALE_SHIFT)
+#define TEXOP_SCALE_4X (2 << TEXOP_SCALE_SHIFT)
+#define TEXOP_MODIFY_PARMS (1<<8)
+#define TEXOP_LAST_STAGE (1<<7)
+#define TEXBLENDOP_KILLPIXEL 0x02
+#define TEXBLENDOP_ARG1 0x01
+#define TEXBLENDOP_ARG2 0x02
+#define TEXBLENDOP_MODULATE 0x03
+#define TEXBLENDOP_ADD 0x06
+#define TEXBLENDOP_ADDSIGNED 0x07
+#define TEXBLENDOP_BLEND 0x08
+#define TEXBLENDOP_BLEND_AND_ADD 0x09
+#define TEXBLENDOP_SUBTRACT 0x0a
+#define TEXBLENDOP_DOT3 0x0b
+#define TEXBLENDOP_DOT4 0x0c
+#define TEXBLENDOP_MODULATE_AND_ADD 0x0d
+#define TEXBLENDOP_MODULATE_2X_AND_ADD 0x0e
+#define TEXBLENDOP_MODULATE_4X_AND_ADD 0x0f
+
+/* _3DSTATE_MAP_BUMP_TABLE, p160 TODO */
+/* _3DSTATE_MAP_COLOR_CHROMA_KEY, p161 TODO */
+
+#define _3DSTATE_MAP_COORD_TRANSFORM ((3<<29)|(0x1d<<24)|(0x8c<<16))
+#define DISABLE_TEX_TRANSFORM (1<<28)
+#define TEXTURE_SET(x) (x<<29)
+
+#define _3DSTATE_VERTEX_TRANSFORM ((3<<29)|(0x1d<<24)|(0x8b<<16))
+#define DISABLE_VIEWPORT_TRANSFORM (1<<31)
+#define DISABLE_PERSPECTIVE_DIVIDE (1<<29)
+
+
+/* _3DSTATE_MAP_COORD_SET_BINDINGS, p162 */
+#define _3DSTATE_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16))
+#define TEXBIND_MASK3 ((1<<15)|(1<<14)|(1<<13)|(1<<12))
+#define TEXBIND_MASK2 ((1<<11)|(1<<10)|(1<<9)|(1<<8))
+#define TEXBIND_MASK1 ((1<<7)|(1<<6)|(1<<5)|(1<<4))
+#define TEXBIND_MASK0 ((1<<3)|(1<<2)|(1<<1)|1)
+
+#define TEXBIND_SET3(x) ((x)<<12)
+#define TEXBIND_SET2(x) ((x)<<8)
+#define TEXBIND_SET1(x) ((x)<<4)
+#define TEXBIND_SET0(x) (x)
+
+#define TEXCOORDSRC_KEEP 0
+#define TEXCOORDSRC_DEFAULT 0x01
+#define TEXCOORDSRC_VTXSET_0 0x08
+#define TEXCOORDSRC_VTXSET_1 0x09
+#define TEXCOORDSRC_VTXSET_2 0x0a
+#define TEXCOORDSRC_VTXSET_3 0x0b
+#define TEXCOORDSRC_VTXSET_4 0x0c
+#define TEXCOORDSRC_VTXSET_5 0x0d
+#define TEXCOORDSRC_VTXSET_6 0x0e
+#define TEXCOORDSRC_VTXSET_7 0x0f
+
+#define MAP_UNIT(unit) ((unit)<<16)
+#define MAP_UNIT_MASK (0x7<<16)
+
+/* _3DSTATE_MAP_COORD_SETS, p164 */
+#define _3DSTATE_MAP_COORD_SET_CMD (CMD_3D|(0x1c<<24)|(0x01<<19))
+#define ENABLE_TEXCOORD_PARAMS (1<<15)
+#define TEXCOORDS_ARE_NORMAL (1<<14)
+#define TEXCOORDS_ARE_IN_TEXELUNITS 0
+#define TEXCOORDTYPE_CARTESIAN 0
+#define TEXCOORDTYPE_HOMOGENEOUS (1<<11)
+#define TEXCOORDTYPE_VECTOR (2<<11)
+#define TEXCOORDTYPE_MASK (0x7<<11)
+#define ENABLE_ADDR_V_CNTL (1<<7)
+#define ENABLE_ADDR_U_CNTL (1<<3)
+#define TEXCOORD_ADDR_V_MODE(x) ((x)<<4)
+#define TEXCOORD_ADDR_U_MODE(x) (x)
+#define TEXCOORDMODE_WRAP 0
+#define TEXCOORDMODE_MIRROR 1
+#define TEXCOORDMODE_CLAMP 2
+#define TEXCOORDMODE_WRAP_SHORTEST 3
+#define TEXCOORDMODE_CLAMP_BORDER 4
+#define TEXCOORD_ADDR_V_MASK 0x70
+#define TEXCOORD_ADDR_U_MASK 0x7
+
+/* _3DSTATE_MAP_CUBE, p168 TODO */
+#define _3DSTATE_MAP_CUBE (CMD_3D|(0x1c<<24)|(0x0a<<19))
+#define CUBE_NEGX_ENABLE (1<<5)
+#define CUBE_POSX_ENABLE (1<<4)
+#define CUBE_NEGY_ENABLE (1<<3)
+#define CUBE_POSY_ENABLE (1<<2)
+#define CUBE_NEGZ_ENABLE (1<<1)
+#define CUBE_POSZ_ENABLE (1<<0)
+
+
+/* _3DSTATE_MODES_1, p190 */
+#define _3DSTATE_MODES_1_CMD (CMD_3D|(0x08<<24))
+#define BLENDFUNC_MASK 0x3f0000
+#define ENABLE_COLR_BLND_FUNC (1<<21)
+#define BLENDFUNC_ADD 0
+#define BLENDFUNC_SUB (1<<16)
+#define BLENDFUNC_RVRSE_SUB (2<<16)
+#define BLENDFUNC_MIN (3<<16)
+#define BLENDFUNC_MAX (4<<16)
+#define SRC_DST_BLND_MASK 0xfff
+#define ENABLE_SRC_BLND_FACTOR (1<<11)
+#define ENABLE_DST_BLND_FACTOR (1<<5)
+#define SRC_BLND_FACT(x) ((x)<<6)
+#define DST_BLND_FACT(x) (x)
+
+
+/* _3DSTATE_MODES_2, p192 */
+#define _3DSTATE_MODES_2_CMD (CMD_3D|(0x0f<<24))
+#define ENABLE_GLOBAL_DEPTH_BIAS (1<<22)
+#define GLOBAL_DEPTH_BIAS(x) ((x)<<14)
+#define ENABLE_ALPHA_TEST_FUNC (1<<13)
+#define ENABLE_ALPHA_REF_VALUE (1<<8)
+#define ALPHA_TEST_FUNC(x) ((x)<<9)
+#define ALPHA_REF_VALUE(x) (x)
+
+#define ALPHA_TEST_REF_MASK 0x3fff
+
+/* _3DSTATE_MODES_3, p193 */
+#define _3DSTATE_MODES_3_CMD (CMD_3D|(0x02<<24))
+#define DEPTH_TEST_FUNC_MASK 0x1f0000
+#define ENABLE_DEPTH_TEST_FUNC (1<<20)
+/* Uses COMPAREFUNC */
+#define DEPTH_TEST_FUNC(x) ((x)<<16)
+#define ENABLE_ALPHA_SHADE_MODE (1<<11)
+#define ENABLE_FOG_SHADE_MODE (1<<9)
+#define ENABLE_SPEC_SHADE_MODE (1<<7)
+#define ENABLE_COLOR_SHADE_MODE (1<<5)
+#define ALPHA_SHADE_MODE(x) ((x)<<10)
+#define FOG_SHADE_MODE(x) ((x)<<8)
+#define SPEC_SHADE_MODE(x) ((x)<<6)
+#define COLOR_SHADE_MODE(x) ((x)<<4)
+#define CULLMODE_MASK 0xf
+#define ENABLE_CULL_MODE (1<<3)
+#define CULLMODE_BOTH 0
+#define CULLMODE_NONE 1
+#define CULLMODE_CW 2
+#define CULLMODE_CCW 3
+
+#define SHADE_MODE_LINEAR 0
+#define SHADE_MODE_FLAT 0x1
+
+/* _3DSTATE_MODES_4, p195 */
+#define _3DSTATE_MODES_4_CMD (CMD_3D|(0x16<<24))
+#define ENABLE_LOGIC_OP_FUNC (1<<23)
+#define LOGIC_OP_FUNC(x) ((x)<<18)
+#define LOGICOP_MASK ((1<<18)|(1<<19)|(1<<20)|(1<<21))
+#define LOGICOP_CLEAR 0
+#define LOGICOP_NOR 0x1
+#define LOGICOP_AND_INV 0x2
+#define LOGICOP_COPY_INV 0x3
+#define LOGICOP_AND_RVRSE 0x4
+#define LOGICOP_INV 0x5
+#define LOGICOP_XOR 0x6
+#define LOGICOP_NAND 0x7
+#define LOGICOP_AND 0x8
+#define LOGICOP_EQUIV 0x9
+#define LOGICOP_NOOP 0xa
+#define LOGICOP_OR_INV 0xb
+#define LOGICOP_COPY 0xc
+#define LOGICOP_OR_RVRSE 0xd
+#define LOGICOP_OR 0xe
+#define LOGICOP_SET 0xf
+#define MODE4_ENABLE_STENCIL_TEST_MASK ((1<<17)|(0xff00))
+#define ENABLE_STENCIL_TEST_MASK (1<<17)
+#define STENCIL_TEST_MASK(x) (((x)&0xff)<<8)
+#define MODE4_ENABLE_STENCIL_WRITE_MASK ((1<<16)|(0x00ff))
+#define ENABLE_STENCIL_WRITE_MASK (1<<16)
+#define STENCIL_WRITE_MASK(x) ((x)&0xff)
+
+/* _3DSTATE_MODES_5, p196 */
+#define _3DSTATE_MODES_5_CMD (CMD_3D|(0x0c<<24))
+#define ENABLE_SPRITE_POINT_TEX (1<<23)
+#define SPRITE_POINT_TEX_ON (1<<22)
+#define SPRITE_POINT_TEX_OFF 0
+#define FLUSH_RENDER_CACHE (1<<18)
+#define FLUSH_TEXTURE_CACHE (1<<16)
+#define FIXED_LINE_WIDTH_MASK 0xfc00
+#define ENABLE_FIXED_LINE_WIDTH (1<<15)
+#define FIXED_LINE_WIDTH(x) ((x)<<10)
+#define FIXED_POINT_WIDTH_MASK 0x3ff
+#define ENABLE_FIXED_POINT_WIDTH (1<<9)
+#define FIXED_POINT_WIDTH(x) (x)
+
+/* _3DSTATE_RASTERIZATION_RULES, p198 */
+#define _3DSTATE_RASTER_RULES_CMD (CMD_3D|(0x07<<24))
+#define ENABLE_POINT_RASTER_RULE (1<<15)
+#define OGL_POINT_RASTER_RULE (1<<13)
+#define ENABLE_LINE_STRIP_PROVOKE_VRTX (1<<8)
+#define ENABLE_TRI_FAN_PROVOKE_VRTX (1<<5)
+#define ENABLE_TRI_STRIP_PROVOKE_VRTX (1<<2)
+#define LINE_STRIP_PROVOKE_VRTX(x) ((x)<<6)
+#define TRI_FAN_PROVOKE_VRTX(x) ((x)<<3)
+#define TRI_STRIP_PROVOKE_VRTX(x) (x)
+
+/* _3DSTATE_SCISSOR_ENABLE, p200 */
+#define _3DSTATE_SCISSOR_ENABLE_CMD (CMD_3D|(0x1c<<24)|(0x10<<19))
+#define ENABLE_SCISSOR_RECT ((1<<1) | 1)
+#define DISABLE_SCISSOR_RECT (1<<1)
+
+/* _3DSTATE_SCISSOR_RECTANGLE_0, p201 */
+#define _3DSTATE_SCISSOR_RECT_0_CMD (CMD_3D|(0x1d<<24)|(0x81<<16)|1)
+/* Dword 1 */
+#define SCISSOR_RECT_0_YMIN(x) ((x)<<16)
+#define SCISSOR_RECT_0_XMIN(x) (x)
+/* Dword 2 */
+#define SCISSOR_RECT_0_YMAX(x) ((x)<<16)
+#define SCISSOR_RECT_0_XMAX(x) (x)
+
+/* _3DSTATE_STENCIL_TEST, p202 */
+#define _3DSTATE_STENCIL_TEST_CMD (CMD_3D|(0x09<<24))
+#define ENABLE_STENCIL_PARMS (1<<23)
+#define STENCIL_OPS_MASK (0xffc000)
+#define STENCIL_FAIL_OP(x) ((x)<<20)
+#define STENCIL_PASS_DEPTH_FAIL_OP(x) ((x)<<17)
+#define STENCIL_PASS_DEPTH_PASS_OP(x) ((x)<<14)
+
+#define ENABLE_STENCIL_TEST_FUNC_MASK ((1<<13)|(1<<12)|(1<<11)|(1<<10)|(1<<9))
+#define ENABLE_STENCIL_TEST_FUNC (1<<13)
+/* Uses COMPAREFUNC */
+#define STENCIL_TEST_FUNC(x) ((x)<<9)
+#define STENCIL_REF_VALUE_MASK ((1<<8)|0xff)
+#define ENABLE_STENCIL_REF_VALUE (1<<8)
+#define STENCIL_REF_VALUE(x) (x)
+
+/* _3DSTATE_VERTEX_FORMAT, p204 */
+#define _3DSTATE_VFT0_CMD (CMD_3D|(0x05<<24))
+#define VFT0_POINT_WIDTH (1<<12)
+#define VFT0_TEX_COUNT_MASK (7<<8)
+#define VFT0_TEX_COUNT_SHIFT 8
+#define VFT0_TEX_COUNT(x) ((x)<<8)
+#define VFT0_SPEC (1<<7)
+#define VFT0_DIFFUSE (1<<6)
+#define VFT0_DEPTH_OFFSET (1<<5)
+#define VFT0_XYZ (1<<1)
+#define VFT0_XYZW (2<<1)
+#define VFT0_XY (3<<1)
+#define VFT0_XYW (4<<1)
+#define VFT0_XYZW_MASK (7<<1)
+
+/* _3DSTATE_VERTEX_FORMAT_2, p206 */
+#define _3DSTATE_VFT1_CMD (CMD_3D|(0x0a<<24))
+#define VFT1_TEX7_FMT(x) ((x)<<14)
+#define VFT1_TEX6_FMT(x) ((x)<<12)
+#define VFT1_TEX5_FMT(x) ((x)<<10)
+#define VFT1_TEX4_FMT(x) ((x)<<8)
+#define VFT1_TEX3_FMT(x) ((x)<<6)
+#define VFT1_TEX2_FMT(x) ((x)<<4)
+#define VFT1_TEX1_FMT(x) ((x)<<2)
+#define VFT1_TEX0_FMT(x) (x)
+#define VFT1_TEX0_MASK 3
+#define VFT1_TEX1_SHIFT 2
+#define TEXCOORDFMT_2D 0
+#define TEXCOORDFMT_3D 1
+#define TEXCOORDFMT_4D 2
+#define TEXCOORDFMT_1D 3
+
+/*New stuff picked up along the way */
+
+#define MLC_LOD_BIAS_MASK ((1<<7)-1)
+
+
+/* _3DSTATE_VERTEX_TRANSFORM, p207 */
+#define _3DSTATE_VERTEX_TRANS_CMD (CMD_3D|(0x1d<<24)|(0x8b<<16)|0)
+#define _3DSTATE_VERTEX_TRANS_MTX_CMD (CMD_3D|(0x1d<<24)|(0x8b<<16)|6)
+/* Dword 1 */
+#define ENABLE_VIEWPORT_TRANSFORM ((1<<31)|(1<<30))
+#define DISABLE_VIEWPORT_TRANSFORM (1<<31)
+#define ENABLE_PERSP_DIVIDE ((1<<29)|(1<<28))
+#define DISABLE_PERSP_DIVIDE (1<<29)
+#define VRTX_TRANS_LOAD_MATRICES 0x7421
+#define VRTX_TRANS_NO_LOAD_MATRICES 0x0000
+/* Dword 2 -> 7 are matrix elements */
+
+/* _3DSTATE_W_STATE, p209 */
+#define _3DSTATE_W_STATE_CMD (CMD_3D|(0x1d<<24)|(0x8d<<16)|1)
+/* Dword 1 */
+#define MAGIC_W_STATE_DWORD1 0x00000008
+/* Dword 2 */
+#define WFAR_VALUE(x) (x)
+
+
+/* Stipple command, carried over from the i810, apparently:
+ */
+#define _3DSTATE_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define ST1_ENABLE (1<<16)
+#define ST1_MASK (0xffff)
+
+
+
+#define _3DSTATE_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16))
+#define LOAD_TEXTURE_MAP0 (1<<11)
+#define LOAD_GLOBAL_COLOR_FACTOR (1<<6)
+
+#define TM0S0_ADDRESS_MASK 0xfffffffc
+#define TM0S0_USE_FENCE (1<<1)
+
+#define TM0S1_HEIGHT_SHIFT 21
+#define TM0S1_WIDTH_SHIFT 10
+#define TM0S1_PALETTE_SELECT (1<<9)
+#define TM0S1_MAPSURF_FORMAT_MASK (0x7 << 6)
+#define TM0S1_MAPSURF_FORMAT_SHIFT 6
+#define MAPSURF_8BIT_INDEXED (0<<6)
+#define MAPSURF_8BIT (1<<6)
+#define MAPSURF_16BIT (2<<6)
+#define MAPSURF_32BIT (3<<6)
+#define MAPSURF_411 (4<<6)
+#define MAPSURF_422 (5<<6)
+#define MAPSURF_COMPRESSED (6<<6)
+#define MAPSURF_4BIT_INDEXED (7<<6)
+#define TM0S1_MT_FORMAT_MASK (0x7 << 3)
+#define TM0S1_MT_FORMAT_SHIFT 3
+#define MT_4BIT_IDX_ARGB8888 (7<<3) /* SURFACE_4BIT_INDEXED */
+#define MT_8BIT_IDX_RGB565 (0<<3) /* SURFACE_8BIT_INDEXED */
+#define MT_8BIT_IDX_ARGB1555 (1<<3)
+#define MT_8BIT_IDX_ARGB4444 (2<<3)
+#define MT_8BIT_IDX_AY88 (3<<3)
+#define MT_8BIT_IDX_ABGR8888 (4<<3)
+#define MT_8BIT_IDX_BUMP_88DVDU (5<<3)
+#define MT_8BIT_IDX_BUMP_655LDVDU (6<<3)
+#define MT_8BIT_IDX_ARGB8888 (7<<3)
+#define MT_8BIT_I8 (0<<3) /* SURFACE_8BIT */
+#define MT_8BIT_L8 (1<<3)
+#define MT_16BIT_RGB565 (0<<3) /* SURFACE_16BIT */
+#define MT_16BIT_ARGB1555 (1<<3)
+#define MT_16BIT_ARGB4444 (2<<3)
+#define MT_16BIT_AY88 (3<<3)
+#define MT_16BIT_DIB_ARGB1555_8888 (4<<3)
+#define MT_16BIT_BUMP_88DVDU (5<<3)
+#define MT_16BIT_BUMP_655LDVDU (6<<3)
+#define MT_16BIT_DIB_RGB565_8888 (7<<3)
+#define MT_32BIT_ARGB8888 (0<<3) /* SURFACE_32BIT */
+#define MT_32BIT_ABGR8888 (1<<3)
+#define MT_32BIT_BUMP_XLDVDU_8888 (6<<3)
+#define MT_32BIT_DIB_8888 (7<<3)
+#define MT_411_YUV411 (0<<3) /* SURFACE_411 */
+#define MT_422_YCRCB_SWAPY (0<<3) /* SURFACE_422 */
+#define MT_422_YCRCB_NORMAL (1<<3)
+#define MT_422_YCRCB_SWAPUV (2<<3)
+#define MT_422_YCRCB_SWAPUVY (3<<3)
+#define MT_COMPRESS_DXT1 (0<<3) /* SURFACE_COMPRESSED */
+#define MT_COMPRESS_DXT2_3 (1<<3)
+#define MT_COMPRESS_DXT4_5 (2<<3)
+#define MT_COMPRESS_FXT1 (3<<3)
+#define TM0S1_COLORSPACE_CONVERSION (1 << 2)
+#define TM0S1_TILED_SURFACE (1 << 1)
+#define TM0S1_TILE_WALK (1 << 0)
+
+#define TM0S2_PITCH_SHIFT 21
+#define TM0S2_CUBE_FACE_ENA_SHIFT 15
+#define TM0S2_CUBE_FACE_ENA_MASK (1<<15)
+#define TM0S2_MAP_FORMAT (1<<14)
+#define TM0S2_VERTICAL_LINE_STRIDE (1<<13)
+#define TM0S2_VERITCAL_LINE_STRIDE_OFF (1<<12)
+#define TM0S2_OUTPUT_CHAN_SHIFT 10
+#define TM0S2_OUTPUT_CHAN_MASK (3<<10)
+
+#define TM0S3_MIP_FILTER_MASK (0x3<<30)
+#define TM0S3_MIP_FILTER_SHIFT 30
+#define MIPFILTER_NONE 0
+#define MIPFILTER_NEAREST 1
+#define MIPFILTER_LINEAR 3
+#define TM0S3_MAG_FILTER_MASK (0x3<<28)
+#define TM0S3_MAG_FILTER_SHIFT 28
+#define TM0S3_MIN_FILTER_MASK (0x3<<26)
+#define TM0S3_MIN_FILTER_SHIFT 26
+#define FILTER_NEAREST 0
+#define FILTER_LINEAR 1
+#define FILTER_ANISOTROPIC 2
+
+#define TM0S3_LOD_BIAS_SHIFT 17
+#define TM0S3_LOD_BIAS_MASK (0x1ff<<17)
+#define TM0S3_MAX_MIP_SHIFT 9
+#define TM0S3_MAX_MIP_MASK (0xff<<9)
+#define TM0S3_MIN_MIP_SHIFT 3
+#define TM0S3_MIN_MIP_MASK (0x3f<<3)
+#define TM0S3_KILL_PIXEL (1<<2)
+#define TM0S3_KEYED_FILTER (1<<1)
+#define TM0S3_CHROMA_KEY (1<<0)
+
+
+/* _3DSTATE_MAP_TEXEL_STREAM, p188 */
+#define _3DSTATE_MAP_TEX_STREAM_CMD (CMD_3D|(0x1c<<24)|(0x05<<19))
+#define DISABLE_TEX_STREAM_BUMP (1<<12)
+#define ENABLE_TEX_STREAM_BUMP ((1<<12)|(1<<11))
+#define TEX_MODIFY_UNIT_0 0
+#define TEX_MODIFY_UNIT_1 (1<<8)
+#define ENABLE_TEX_STREAM_COORD_SET (1<<7)
+#define TEX_STREAM_COORD_SET(x) ((x)<<4)
+#define ENABLE_TEX_STREAM_MAP_IDX (1<<3)
+#define TEX_STREAM_MAP_IDX(x) (x)
+
+
+#define MI_FLUSH ((0<<29)|(4<<23))
+#define FLUSH_MAP_CACHE (1<<0)
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "glheader.h"
+#include "context.h"
+#include "macros.h"
+#include "enums.h"
+#include "dd.h"
+
+#include "texmem.h"
+
+#include "intel_screen.h"
+#include "intel_batchbuffer.h"
+#include "intel_fbo.h"
+
+#include "i830_context.h"
+#include "i830_reg.h"
+
+#define FILE_DEBUG_FLAG DEBUG_STATE
+
+static void
+i830StencilFuncSeparate(GLcontext * ctx, GLenum face, GLenum func, GLint ref,
+ GLuint mask)
+{
+ struct i830_context *i830 = i830_context(ctx);
+ int test = intel_translate_compare_func(func);
+
+ mask = mask & 0xff;
+
+ DBG("%s : func: %s, ref : 0x%x, mask: 0x%x\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(func), ref, mask);
+
+
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_STATE4] &= ~MODE4_ENABLE_STENCIL_TEST_MASK;
+ i830->state.Ctx[I830_CTXREG_STATE4] |= (ENABLE_STENCIL_TEST_MASK |
+ STENCIL_TEST_MASK(mask));
+ i830->state.Ctx[I830_CTXREG_STENCILTST] &= ~(STENCIL_REF_VALUE_MASK |
+ ENABLE_STENCIL_TEST_FUNC_MASK);
+ i830->state.Ctx[I830_CTXREG_STENCILTST] |= (ENABLE_STENCIL_REF_VALUE |
+ ENABLE_STENCIL_TEST_FUNC |
+ STENCIL_REF_VALUE(ref) |
+ STENCIL_TEST_FUNC(test));
+}
+
+static void
+i830StencilMaskSeparate(GLcontext * ctx, GLenum face, GLuint mask)
+{
+ struct i830_context *i830 = i830_context(ctx);
+
+ DBG("%s : mask 0x%x\n", __FUNCTION__, mask);
+
+ mask = mask & 0xff;
+
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_STATE4] &= ~MODE4_ENABLE_STENCIL_WRITE_MASK;
+ i830->state.Ctx[I830_CTXREG_STATE4] |= (ENABLE_STENCIL_WRITE_MASK |
+ STENCIL_WRITE_MASK(mask));
+}
+
+static void
+i830StencilOpSeparate(GLcontext * ctx, GLenum face, GLenum fail, GLenum zfail,
+ GLenum zpass)
+{
+ struct i830_context *i830 = i830_context(ctx);
+ int fop, dfop, dpop;
+
+ DBG("%s: fail : %s, zfail: %s, zpass : %s\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(fail),
+ _mesa_lookup_enum_by_nr(zfail),
+ _mesa_lookup_enum_by_nr(zpass));
+
+ fop = 0;
+ dfop = 0;
+ dpop = 0;
+
+ switch (fail) {
+ case GL_KEEP:
+ fop = STENCILOP_KEEP;
+ break;
+ case GL_ZERO:
+ fop = STENCILOP_ZERO;
+ break;
+ case GL_REPLACE:
+ fop = STENCILOP_REPLACE;
+ break;
+ case GL_INCR:
+ fop = STENCILOP_INCRSAT;
+ break;
+ case GL_DECR:
+ fop = STENCILOP_DECRSAT;
+ break;
+ case GL_INCR_WRAP:
+ fop = STENCILOP_INCR;
+ break;
+ case GL_DECR_WRAP:
+ fop = STENCILOP_DECR;
+ break;
+ case GL_INVERT:
+ fop = STENCILOP_INVERT;
+ break;
+ default:
+ break;
+ }
+ switch (zfail) {
+ case GL_KEEP:
+ dfop = STENCILOP_KEEP;
+ break;
+ case GL_ZERO:
+ dfop = STENCILOP_ZERO;
+ break;
+ case GL_REPLACE:
+ dfop = STENCILOP_REPLACE;
+ break;
+ case GL_INCR:
+ dfop = STENCILOP_INCRSAT;
+ break;
+ case GL_DECR:
+ dfop = STENCILOP_DECRSAT;
+ break;
+ case GL_INCR_WRAP:
+ dfop = STENCILOP_INCR;
+ break;
+ case GL_DECR_WRAP:
+ dfop = STENCILOP_DECR;
+ break;
+ case GL_INVERT:
+ dfop = STENCILOP_INVERT;
+ break;
+ default:
+ break;
+ }
+ switch (zpass) {
+ case GL_KEEP:
+ dpop = STENCILOP_KEEP;
+ break;
+ case GL_ZERO:
+ dpop = STENCILOP_ZERO;
+ break;
+ case GL_REPLACE:
+ dpop = STENCILOP_REPLACE;
+ break;
+ case GL_INCR:
+ dpop = STENCILOP_INCRSAT;
+ break;
+ case GL_DECR:
+ dpop = STENCILOP_DECRSAT;
+ break;
+ case GL_INCR_WRAP:
+ dpop = STENCILOP_INCR;
+ break;
+ case GL_DECR_WRAP:
+ dpop = STENCILOP_DECR;
+ break;
+ case GL_INVERT:
+ dpop = STENCILOP_INVERT;
+ break;
+ default:
+ break;
+ }
+
+
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_STENCILTST] &= ~(STENCIL_OPS_MASK);
+ i830->state.Ctx[I830_CTXREG_STENCILTST] |= (ENABLE_STENCIL_PARMS |
+ STENCIL_FAIL_OP(fop) |
+ STENCIL_PASS_DEPTH_FAIL_OP
+ (dfop) |
+ STENCIL_PASS_DEPTH_PASS_OP
+ (dpop));
+}
+
+static void
+i830AlphaFunc(GLcontext * ctx, GLenum func, GLfloat ref)
+{
+ struct i830_context *i830 = i830_context(ctx);
+ int test = intel_translate_compare_func(func);
+ GLubyte refByte;
+ GLuint refInt;
+
+ UNCLAMPED_FLOAT_TO_UBYTE(refByte, ref);
+ refInt = (GLuint) refByte;
+
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_STATE2] &= ~ALPHA_TEST_REF_MASK;
+ i830->state.Ctx[I830_CTXREG_STATE2] |= (ENABLE_ALPHA_TEST_FUNC |
+ ENABLE_ALPHA_REF_VALUE |
+ ALPHA_TEST_FUNC(test) |
+ ALPHA_REF_VALUE(refInt));
+}
+
+/**
+ * Makes sure that the proper enables are set for LogicOp, Independant Alpha
+ * Blend, and Blending. It needs to be called from numerous places where we
+ * could change the LogicOp or Independant Alpha Blend without subsequent
+ * calls to glEnable.
+ *
+ * \todo
+ * This function is substantially different from the old i830-specific driver.
+ * I'm not sure which is correct.
+ */
+static void
+i830EvalLogicOpBlendState(GLcontext * ctx)
+{
+ struct i830_context *i830 = i830_context(ctx);
+
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+
+ if (ctx->Color._LogicOpEnabled) {
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] &= ~(ENABLE_COLOR_BLEND |
+ ENABLE_LOGIC_OP_MASK);
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] |= (DISABLE_COLOR_BLEND |
+ ENABLE_LOGIC_OP);
+ }
+ else if (ctx->Color.BlendEnabled) {
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] &= ~(ENABLE_COLOR_BLEND |
+ ENABLE_LOGIC_OP_MASK);
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] |= (ENABLE_COLOR_BLEND |
+ DISABLE_LOGIC_OP);
+ }
+ else {
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] &= ~(ENABLE_COLOR_BLEND |
+ ENABLE_LOGIC_OP_MASK);
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] |= (DISABLE_COLOR_BLEND |
+ DISABLE_LOGIC_OP);
+ }
+}
+
+static void
+i830BlendColor(GLcontext * ctx, const GLfloat color[4])
+{
+ struct i830_context *i830 = i830_context(ctx);
+ GLubyte r, g, b, a;
+
+ DBG("%s\n", __FUNCTION__);
+
+ UNCLAMPED_FLOAT_TO_UBYTE(r, color[RCOMP]);
+ UNCLAMPED_FLOAT_TO_UBYTE(g, color[GCOMP]);
+ UNCLAMPED_FLOAT_TO_UBYTE(b, color[BCOMP]);
+ UNCLAMPED_FLOAT_TO_UBYTE(a, color[ACOMP]);
+
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_BLENDCOLOR1] =
+ (a << 24) | (r << 16) | (g << 8) | b;
+}
+
+/**
+ * Sets both the blend equation (called "function" in i830 docs) and the
+ * blend function (called "factor" in i830 docs). This is done in a single
+ * function because some blend equations (i.e., \c GL_MIN and \c GL_MAX)
+ * change the interpretation of the blend function.
+ */
+static void
+i830_set_blend_state(GLcontext * ctx)
+{
+ struct i830_context *i830 = i830_context(ctx);
+ int funcA;
+ int funcRGB;
+ int eqnA;
+ int eqnRGB;
+ int iab;
+ int s1;
+
+
+ funcRGB =
+ SRC_BLND_FACT(intel_translate_blend_factor(ctx->Color.BlendSrcRGB))
+ | DST_BLND_FACT(intel_translate_blend_factor(ctx->Color.BlendDstRGB));
+
+ switch (ctx->Color.BlendEquationRGB) {
+ case GL_FUNC_ADD:
+ eqnRGB = BLENDFUNC_ADD;
+ break;
+ case GL_MIN:
+ eqnRGB = BLENDFUNC_MIN;
+ funcRGB = SRC_BLND_FACT(BLENDFACT_ONE) | DST_BLND_FACT(BLENDFACT_ONE);
+ break;
+ case GL_MAX:
+ eqnRGB = BLENDFUNC_MAX;
+ funcRGB = SRC_BLND_FACT(BLENDFACT_ONE) | DST_BLND_FACT(BLENDFACT_ONE);
+ break;
+ case GL_FUNC_SUBTRACT:
+ eqnRGB = BLENDFUNC_SUB;
+ break;
+ case GL_FUNC_REVERSE_SUBTRACT:
+ eqnRGB = BLENDFUNC_RVRSE_SUB;
+ break;
+ default:
+ fprintf(stderr, "[%s:%u] Invalid RGB blend equation (0x%04x).\n",
+ __FUNCTION__, __LINE__, ctx->Color.BlendEquationRGB);
+ return;
+ }
+
+
+ funcA = SRC_ABLEND_FACT(intel_translate_blend_factor(ctx->Color.BlendSrcA))
+ | DST_ABLEND_FACT(intel_translate_blend_factor(ctx->Color.BlendDstA));
+
+ switch (ctx->Color.BlendEquationA) {
+ case GL_FUNC_ADD:
+ eqnA = BLENDFUNC_ADD;
+ break;
+ case GL_MIN:
+ eqnA = BLENDFUNC_MIN;
+ funcA = SRC_BLND_FACT(BLENDFACT_ONE) | DST_BLND_FACT(BLENDFACT_ONE);
+ break;
+ case GL_MAX:
+ eqnA = BLENDFUNC_MAX;
+ funcA = SRC_BLND_FACT(BLENDFACT_ONE) | DST_BLND_FACT(BLENDFACT_ONE);
+ break;
+ case GL_FUNC_SUBTRACT:
+ eqnA = BLENDFUNC_SUB;
+ break;
+ case GL_FUNC_REVERSE_SUBTRACT:
+ eqnA = BLENDFUNC_RVRSE_SUB;
+ break;
+ default:
+ fprintf(stderr, "[%s:%u] Invalid alpha blend equation (0x%04x).\n",
+ __FUNCTION__, __LINE__, ctx->Color.BlendEquationA);
+ return;
+ }
+
+ iab = eqnA | funcA
+ | _3DSTATE_INDPT_ALPHA_BLEND_CMD
+ | ENABLE_SRC_ABLEND_FACTOR | ENABLE_DST_ABLEND_FACTOR
+ | ENABLE_ALPHA_BLENDFUNC;
+ s1 = eqnRGB | funcRGB
+ | _3DSTATE_MODES_1_CMD
+ | ENABLE_SRC_BLND_FACTOR | ENABLE_DST_BLND_FACTOR
+ | ENABLE_COLR_BLND_FUNC;
+
+ if ((eqnA | funcA) != (eqnRGB | funcRGB))
+ iab |= ENABLE_INDPT_ALPHA_BLEND;
+ else
+ iab |= DISABLE_INDPT_ALPHA_BLEND;
+
+ if (iab != i830->state.Ctx[I830_CTXREG_IALPHAB] ||
+ s1 != i830->state.Ctx[I830_CTXREG_STATE1]) {
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_IALPHAB] = iab;
+ i830->state.Ctx[I830_CTXREG_STATE1] = s1;
+ }
+
+ /* This will catch a logicop blend equation. It will also ensure
+ * independant alpha blend is really in the correct state (either enabled
+ * or disabled) if blending is already enabled.
+ */
+
+ i830EvalLogicOpBlendState(ctx);
+
+ if (0) {
+ fprintf(stderr,
+ "[%s:%u] STATE1: 0x%08x IALPHAB: 0x%08x blend is %sabled\n",
+ __FUNCTION__, __LINE__, i830->state.Ctx[I830_CTXREG_STATE1],
+ i830->state.Ctx[I830_CTXREG_IALPHAB],
+ (ctx->Color.BlendEnabled) ? "en" : "dis");
+ }
+}
+
+
+static void
+i830BlendEquationSeparate(GLcontext * ctx, GLenum modeRGB, GLenum modeA)
+{
+ DBG("%s -> %s, %s\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(modeRGB),
+ _mesa_lookup_enum_by_nr(modeA));
+
+ (void) modeRGB;
+ (void) modeA;
+ i830_set_blend_state(ctx);
+}
+
+
+static void
+i830BlendFuncSeparate(GLcontext * ctx, GLenum sfactorRGB,
+ GLenum dfactorRGB, GLenum sfactorA, GLenum dfactorA)
+{
+ DBG("%s -> RGB(%s, %s) A(%s, %s)\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(sfactorRGB),
+ _mesa_lookup_enum_by_nr(dfactorRGB),
+ _mesa_lookup_enum_by_nr(sfactorA),
+ _mesa_lookup_enum_by_nr(dfactorA));
+
+ (void) sfactorRGB;
+ (void) dfactorRGB;
+ (void) sfactorA;
+ (void) dfactorA;
+ i830_set_blend_state(ctx);
+}
+
+
+
+static void
+i830DepthFunc(GLcontext * ctx, GLenum func)
+{
+ struct i830_context *i830 = i830_context(ctx);
+ int test = intel_translate_compare_func(func);
+
+ DBG("%s\n", __FUNCTION__);
+
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_STATE3] &= ~DEPTH_TEST_FUNC_MASK;
+ i830->state.Ctx[I830_CTXREG_STATE3] |= (ENABLE_DEPTH_TEST_FUNC |
+ DEPTH_TEST_FUNC(test));
+}
+
+static void
+i830DepthMask(GLcontext * ctx, GLboolean flag)
+{
+ struct i830_context *i830 = i830_context(ctx);
+
+ DBG("%s flag (%d)\n", __FUNCTION__, flag);
+
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+
+ i830->state.Ctx[I830_CTXREG_ENABLES_2] &= ~ENABLE_DIS_DEPTH_WRITE_MASK;
+
+ if (flag && ctx->Depth.Test)
+ i830->state.Ctx[I830_CTXREG_ENABLES_2] |= ENABLE_DEPTH_WRITE;
+ else
+ i830->state.Ctx[I830_CTXREG_ENABLES_2] |= DISABLE_DEPTH_WRITE;
+}
+
+/* =============================================================
+ * Polygon stipple
+ *
+ * The i830 supports a 4x4 stipple natively, GL wants 32x32.
+ * Fortunately stipple is usually a repeating pattern.
+ */
+static void
+i830PolygonStipple(GLcontext * ctx, const GLubyte * mask)
+{
+ struct i830_context *i830 = i830_context(ctx);
+ const GLubyte *m = mask;
+ GLubyte p[4];
+ int i, j, k;
+ int active = (ctx->Polygon.StippleFlag &&
+ i830->intel.reduced_primitive == GL_TRIANGLES);
+ GLuint newMask;
+
+ if (active) {
+ I830_STATECHANGE(i830, I830_UPLOAD_STIPPLE);
+ i830->state.Stipple[I830_STPREG_ST1] &= ~ST1_ENABLE;
+ }
+
+ p[0] = mask[12] & 0xf;
+ p[0] |= p[0] << 4;
+ p[1] = mask[8] & 0xf;
+ p[1] |= p[1] << 4;
+ p[2] = mask[4] & 0xf;
+ p[2] |= p[2] << 4;
+ p[3] = mask[0] & 0xf;
+ p[3] |= p[3] << 4;
+
+ for (k = 0; k < 8; k++)
+ for (j = 3; j >= 0; j--)
+ for (i = 0; i < 4; i++, m++)
+ if (*m != p[j]) {
+ i830->intel.hw_stipple = 0;
+ return;
+ }
+
+ newMask = (((p[0] & 0xf) << 0) |
+ ((p[1] & 0xf) << 4) |
+ ((p[2] & 0xf) << 8) | ((p[3] & 0xf) << 12));
+
+
+ if (newMask == 0xffff || newMask == 0x0) {
+ /* this is needed to make conform pass */
+ i830->intel.hw_stipple = 0;
+ return;
+ }
+
+ i830->state.Stipple[I830_STPREG_ST1] &= ~0xffff;
+ i830->state.Stipple[I830_STPREG_ST1] |= newMask;
+ i830->intel.hw_stipple = 1;
+
+ if (active)
+ i830->state.Stipple[I830_STPREG_ST1] |= ST1_ENABLE;
+}
+
+
+/* =============================================================
+ * Hardware clipping
+ */
+static void
+i830Scissor(GLcontext * ctx, GLint x, GLint y, GLsizei w, GLsizei h)
+{
+ struct i830_context *i830 = i830_context(ctx);
+ int x1, y1, x2, y2;
+
+ if (!ctx->DrawBuffer)
+ return;
+
+ DBG("%s %d,%d %dx%d\n", __FUNCTION__, x, y, w, h);
+
+ if (ctx->DrawBuffer->Name == 0) {
+ x1 = x;
+ y1 = ctx->DrawBuffer->Height - (y + h);
+ x2 = x + w - 1;
+ y2 = y1 + h - 1;
+ DBG("%s %d..%d,%d..%d (inverted)\n", __FUNCTION__, x1, x2, y1, y2);
+ }
+ else {
+ /* FBO - not inverted
+ */
+ x1 = x;
+ y1 = y;
+ x2 = x + w - 1;
+ y2 = y + h - 1;
+ DBG("%s %d..%d,%d..%d (not inverted)\n", __FUNCTION__, x1, x2, y1, y2);
+ }
+
+ x1 = CLAMP(x1, 0, ctx->DrawBuffer->Width - 1);
+ y1 = CLAMP(y1, 0, ctx->DrawBuffer->Height - 1);
+ x2 = CLAMP(x2, 0, ctx->DrawBuffer->Width - 1);
+ y2 = CLAMP(y2, 0, ctx->DrawBuffer->Height - 1);
+
+ DBG("%s %d..%d,%d..%d (clamped)\n", __FUNCTION__, x1, x2, y1, y2);
+
+ I830_STATECHANGE(i830, I830_UPLOAD_BUFFERS);
+ i830->state.Buffer[I830_DESTREG_SR1] = (y1 << 16) | (x1 & 0xffff);
+ i830->state.Buffer[I830_DESTREG_SR2] = (y2 << 16) | (x2 & 0xffff);
+}
+
+static void
+i830LogicOp(GLcontext * ctx, GLenum opcode)
+{
+ struct i830_context *i830 = i830_context(ctx);
+ int tmp = intel_translate_logic_op(opcode);
+
+ DBG("%s\n", __FUNCTION__);
+
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_STATE4] &= ~LOGICOP_MASK;
+ i830->state.Ctx[I830_CTXREG_STATE4] |= LOGIC_OP_FUNC(tmp);
+}
+
+
+
+static void
+i830CullFaceFrontFace(GLcontext * ctx, GLenum unused)
+{
+ struct i830_context *i830 = i830_context(ctx);
+ GLuint mode;
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (!ctx->Polygon.CullFlag) {
+ mode = CULLMODE_NONE;
+ }
+ else if (ctx->Polygon.CullFaceMode != GL_FRONT_AND_BACK) {
+ mode = CULLMODE_CW;
+
+ if (ctx->Polygon.CullFaceMode == GL_FRONT)
+ mode ^= (CULLMODE_CW ^ CULLMODE_CCW);
+ if (ctx->Polygon.FrontFace != GL_CCW)
+ mode ^= (CULLMODE_CW ^ CULLMODE_CCW);
+ }
+ else {
+ mode = CULLMODE_BOTH;
+ }
+
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_STATE3] &= ~CULLMODE_MASK;
+ i830->state.Ctx[I830_CTXREG_STATE3] |= ENABLE_CULL_MODE | mode;
+}
+
+static void
+i830LineWidth(GLcontext * ctx, GLfloat widthf)
+{
+ struct i830_context *i830 = i830_context(ctx);
+ int width;
+ int state5;
+
+ DBG("%s\n", __FUNCTION__);
+
+ width = (int) (widthf * 2);
+ CLAMP_SELF(width, 1, 15);
+
+ state5 = i830->state.Ctx[I830_CTXREG_STATE5] & ~FIXED_LINE_WIDTH_MASK;
+ state5 |= (ENABLE_FIXED_LINE_WIDTH | FIXED_LINE_WIDTH(width));
+
+ if (state5 != i830->state.Ctx[I830_CTXREG_STATE5]) {
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_STATE5] = state5;
+ }
+}
+
+static void
+i830PointSize(GLcontext * ctx, GLfloat size)
+{
+ struct i830_context *i830 = i830_context(ctx);
+ GLint point_size = (int) size;
+
+ DBG("%s\n", __FUNCTION__);
+
+ CLAMP_SELF(point_size, 1, 256);
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_STATE5] &= ~FIXED_POINT_WIDTH_MASK;
+ i830->state.Ctx[I830_CTXREG_STATE5] |= (ENABLE_FIXED_POINT_WIDTH |
+ FIXED_POINT_WIDTH(point_size));
+}
+
+
+/* =============================================================
+ * Color masks
+ */
+
+static void
+i830ColorMask(GLcontext * ctx,
+ GLboolean r, GLboolean g, GLboolean b, GLboolean a)
+{
+ struct i830_context *i830 = i830_context(ctx);
+ GLuint tmp = 0;
+
+ DBG("%s r(%d) g(%d) b(%d) a(%d)\n", __FUNCTION__, r, g, b, a);
+
+ tmp = ((i830->state.Ctx[I830_CTXREG_ENABLES_2] & ~WRITEMASK_MASK) |
+ ENABLE_COLOR_MASK |
+ ENABLE_COLOR_WRITE |
+ ((!r) << WRITEMASK_RED_SHIFT) |
+ ((!g) << WRITEMASK_GREEN_SHIFT) |
+ ((!b) << WRITEMASK_BLUE_SHIFT) | ((!a) << WRITEMASK_ALPHA_SHIFT));
+
+ if (tmp != i830->state.Ctx[I830_CTXREG_ENABLES_2]) {
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_ENABLES_2] = tmp;
+ }
+}
+
+static void
+update_specular(GLcontext * ctx)
+{
+ struct i830_context *i830 = i830_context(ctx);
+
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] &= ~ENABLE_SPEC_ADD_MASK;
+
+ if (NEED_SECONDARY_COLOR(ctx))
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] |= ENABLE_SPEC_ADD;
+ else
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] |= DISABLE_SPEC_ADD;
+}
+
+static void
+i830LightModelfv(GLcontext * ctx, GLenum pname, const GLfloat * param)
+{
+ DBG("%s\n", __FUNCTION__);
+
+ if (pname == GL_LIGHT_MODEL_COLOR_CONTROL) {
+ update_specular(ctx);
+ }
+}
+
+/* In Mesa 3.5 we can reliably do native flatshading.
+ */
+static void
+i830ShadeModel(GLcontext * ctx, GLenum mode)
+{
+ struct i830_context *i830 = i830_context(ctx);
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+
+
+#define SHADE_MODE_MASK ((1<<10)|(1<<8)|(1<<6)|(1<<4))
+
+ i830->state.Ctx[I830_CTXREG_STATE3] &= ~SHADE_MODE_MASK;
+
+ if (mode == GL_FLAT) {
+ i830->state.Ctx[I830_CTXREG_STATE3] |=
+ (ALPHA_SHADE_MODE(SHADE_MODE_FLAT) | FOG_SHADE_MODE(SHADE_MODE_FLAT)
+ | SPEC_SHADE_MODE(SHADE_MODE_FLAT) |
+ COLOR_SHADE_MODE(SHADE_MODE_FLAT));
+ }
+ else {
+ i830->state.Ctx[I830_CTXREG_STATE3] |=
+ (ALPHA_SHADE_MODE(SHADE_MODE_LINEAR) |
+ FOG_SHADE_MODE(SHADE_MODE_LINEAR) |
+ SPEC_SHADE_MODE(SHADE_MODE_LINEAR) |
+ COLOR_SHADE_MODE(SHADE_MODE_LINEAR));
+ }
+}
+
+/* =============================================================
+ * Fog
+ */
+static void
+i830Fogfv(GLcontext * ctx, GLenum pname, const GLfloat * param)
+{
+ struct i830_context *i830 = i830_context(ctx);
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (pname == GL_FOG_COLOR) {
+ GLuint color = (((GLubyte) (ctx->Fog.Color[0] * 255.0F) << 16) |
+ ((GLubyte) (ctx->Fog.Color[1] * 255.0F) << 8) |
+ ((GLubyte) (ctx->Fog.Color[2] * 255.0F) << 0));
+
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_FOGCOLOR] =
+ (_3DSTATE_FOG_COLOR_CMD | color);
+ }
+}
+
+/* =============================================================
+ */
+
+static void
+i830Enable(GLcontext * ctx, GLenum cap, GLboolean state)
+{
+ struct i830_context *i830 = i830_context(ctx);
+
+ switch (cap) {
+ case GL_LIGHTING:
+ case GL_COLOR_SUM:
+ update_specular(ctx);
+ break;
+
+ case GL_ALPHA_TEST:
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] &= ~ENABLE_DIS_ALPHA_TEST_MASK;
+ if (state)
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] |= ENABLE_ALPHA_TEST;
+ else
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] |= DISABLE_ALPHA_TEST;
+
+ break;
+
+ case GL_BLEND:
+ i830EvalLogicOpBlendState(ctx);
+ break;
+
+ case GL_COLOR_LOGIC_OP:
+ i830EvalLogicOpBlendState(ctx);
+
+ /* Logicop doesn't seem to work at 16bpp:
+ */
+ if (i830->intel.intelScreen->cpp == 2)
+ FALLBACK(&i830->intel, I830_FALLBACK_LOGICOP, state);
+ break;
+
+ case GL_DITHER:
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_ENABLES_2] &= ~ENABLE_DITHER;
+
+ if (state)
+ i830->state.Ctx[I830_CTXREG_ENABLES_2] |= ENABLE_DITHER;
+ else
+ i830->state.Ctx[I830_CTXREG_ENABLES_2] |= DISABLE_DITHER;
+ break;
+
+ case GL_DEPTH_TEST:
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] &= ~ENABLE_DIS_DEPTH_TEST_MASK;
+
+ if (state)
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] |= ENABLE_DEPTH_TEST;
+ else
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] |= DISABLE_DEPTH_TEST;
+
+ /* Also turn off depth writes when GL_DEPTH_TEST is disabled:
+ */
+ i830DepthMask(ctx, ctx->Depth.Mask);
+ break;
+
+ case GL_SCISSOR_TEST:
+ I830_STATECHANGE(i830, I830_UPLOAD_BUFFERS);
+
+ if (state)
+ i830->state.Buffer[I830_DESTREG_SENABLE] =
+ (_3DSTATE_SCISSOR_ENABLE_CMD | ENABLE_SCISSOR_RECT);
+ else
+ i830->state.Buffer[I830_DESTREG_SENABLE] =
+ (_3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT);
+
+ break;
+
+ case GL_LINE_SMOOTH:
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+
+ i830->state.Ctx[I830_CTXREG_AA] &= ~AA_LINE_ENABLE;
+ if (state)
+ i830->state.Ctx[I830_CTXREG_AA] |= AA_LINE_ENABLE;
+ else
+ i830->state.Ctx[I830_CTXREG_AA] |= AA_LINE_DISABLE;
+ break;
+
+ case GL_FOG:
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] &= ~ENABLE_DIS_FOG_MASK;
+ if (state)
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] |= ENABLE_FOG;
+ else
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] |= DISABLE_FOG;
+ break;
+
+ case GL_CULL_FACE:
+ i830CullFaceFrontFace(ctx, 0);
+ break;
+
+ case GL_TEXTURE_2D:
+ break;
+
+ case GL_STENCIL_TEST:
+ {
+ GLboolean hw_stencil = GL_FALSE;
+ if (ctx->DrawBuffer) {
+ struct intel_renderbuffer *irbStencil
+ = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_STENCIL);
+ hw_stencil = (irbStencil && irbStencil->region);
+ }
+ if (hw_stencil) {
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+
+ if (state) {
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] |= ENABLE_STENCIL_TEST;
+ i830->state.Ctx[I830_CTXREG_ENABLES_2] |= ENABLE_STENCIL_WRITE;
+ }
+ else {
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] &= ~ENABLE_STENCIL_TEST;
+ i830->state.Ctx[I830_CTXREG_ENABLES_2] &=
+ ~ENABLE_STENCIL_WRITE;
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] |= DISABLE_STENCIL_TEST;
+ i830->state.Ctx[I830_CTXREG_ENABLES_2] |=
+ DISABLE_STENCIL_WRITE;
+ }
+ }
+ else {
+ FALLBACK(&i830->intel, I830_FALLBACK_STENCIL, state);
+ }
+ }
+ break;
+
+ case GL_POLYGON_STIPPLE:
+ /* The stipple command worked on my 855GM box, but not my 845G.
+ * I'll do more testing later to find out exactly which hardware
+ * supports it. Disabled for now.
+ */
+ if (i830->intel.hw_stipple &&
+ i830->intel.reduced_primitive == GL_TRIANGLES) {
+ I830_STATECHANGE(i830, I830_UPLOAD_STIPPLE);
+ i830->state.Stipple[I830_STPREG_ST1] &= ~ST1_ENABLE;
+ if (state)
+ i830->state.Stipple[I830_STPREG_ST1] |= ST1_ENABLE;
+ }
+ break;
+
+ default:
+ ;
+ }
+}
+
+
+static void
+i830_init_packets(struct i830_context *i830)
+{
+ intelScreenPrivate *screen = i830->intel.intelScreen;
+
+ /* Zero all state */
+ memset(&i830->state, 0, sizeof(i830->state));
+
+ /* Set default blend state */
+ i830->state.TexBlend[0][0] = (_3DSTATE_MAP_BLEND_OP_CMD(0) |
+ TEXPIPE_COLOR |
+ ENABLE_TEXOUTPUT_WRT_SEL |
+ TEXOP_OUTPUT_CURRENT |
+ DISABLE_TEX_CNTRL_STAGE |
+ TEXOP_SCALE_1X |
+ TEXOP_MODIFY_PARMS |
+ TEXOP_LAST_STAGE | TEXBLENDOP_ARG1);
+ i830->state.TexBlend[0][1] = (_3DSTATE_MAP_BLEND_OP_CMD(0) |
+ TEXPIPE_ALPHA |
+ ENABLE_TEXOUTPUT_WRT_SEL |
+ TEXOP_OUTPUT_CURRENT |
+ TEXOP_SCALE_1X |
+ TEXOP_MODIFY_PARMS | TEXBLENDOP_ARG1);
+ i830->state.TexBlend[0][2] = (_3DSTATE_MAP_BLEND_ARG_CMD(0) |
+ TEXPIPE_COLOR |
+ TEXBLEND_ARG1 |
+ TEXBLENDARG_MODIFY_PARMS |
+ TEXBLENDARG_DIFFUSE);
+ i830->state.TexBlend[0][3] = (_3DSTATE_MAP_BLEND_ARG_CMD(0) |
+ TEXPIPE_ALPHA |
+ TEXBLEND_ARG1 |
+ TEXBLENDARG_MODIFY_PARMS |
+ TEXBLENDARG_DIFFUSE);
+
+ i830->state.TexBlendWordsUsed[0] = 4;
+
+
+ i830->state.Ctx[I830_CTXREG_VF] = 0;
+ i830->state.Ctx[I830_CTXREG_VF2] = 0;
+
+ i830->state.Ctx[I830_CTXREG_AA] = (_3DSTATE_AA_CMD |
+ AA_LINE_ECAAR_WIDTH_ENABLE |
+ AA_LINE_ECAAR_WIDTH_1_0 |
+ AA_LINE_REGION_WIDTH_ENABLE |
+ AA_LINE_REGION_WIDTH_1_0 |
+ AA_LINE_DISABLE);
+
+ i830->state.Ctx[I830_CTXREG_ENABLES_1] = (_3DSTATE_ENABLES_1_CMD |
+ DISABLE_LOGIC_OP |
+ DISABLE_STENCIL_TEST |
+ DISABLE_DEPTH_BIAS |
+ DISABLE_SPEC_ADD |
+ DISABLE_FOG |
+ DISABLE_ALPHA_TEST |
+ DISABLE_COLOR_BLEND |
+ DISABLE_DEPTH_TEST);
+
+#if 000 /* XXX all the stencil enable state is set in i830Enable(), right? */
+ if (i830->intel.hw_stencil) {
+ i830->state.Ctx[I830_CTXREG_ENABLES_2] = (_3DSTATE_ENABLES_2_CMD |
+ ENABLE_STENCIL_WRITE |
+ ENABLE_TEX_CACHE |
+ ENABLE_DITHER |
+ ENABLE_COLOR_MASK |
+ /* set no color comps disabled */
+ ENABLE_COLOR_WRITE |
+ ENABLE_DEPTH_WRITE);
+ }
+ else
+#endif
+ {
+ i830->state.Ctx[I830_CTXREG_ENABLES_2] = (_3DSTATE_ENABLES_2_CMD |
+ DISABLE_STENCIL_WRITE |
+ ENABLE_TEX_CACHE |
+ ENABLE_DITHER |
+ ENABLE_COLOR_MASK |
+ /* set no color comps disabled */
+ ENABLE_COLOR_WRITE |
+ ENABLE_DEPTH_WRITE);
+ }
+
+ i830->state.Ctx[I830_CTXREG_STATE1] = (_3DSTATE_MODES_1_CMD |
+ ENABLE_COLR_BLND_FUNC |
+ BLENDFUNC_ADD |
+ ENABLE_SRC_BLND_FACTOR |
+ SRC_BLND_FACT(BLENDFACT_ONE) |
+ ENABLE_DST_BLND_FACTOR |
+ DST_BLND_FACT(BLENDFACT_ZERO));
+
+ i830->state.Ctx[I830_CTXREG_STATE2] = (_3DSTATE_MODES_2_CMD |
+ ENABLE_GLOBAL_DEPTH_BIAS |
+ GLOBAL_DEPTH_BIAS(0) |
+ ENABLE_ALPHA_TEST_FUNC |
+ ALPHA_TEST_FUNC(COMPAREFUNC_ALWAYS)
+ | ALPHA_REF_VALUE(0));
+
+ i830->state.Ctx[I830_CTXREG_STATE3] = (_3DSTATE_MODES_3_CMD |
+ ENABLE_DEPTH_TEST_FUNC |
+ DEPTH_TEST_FUNC(COMPAREFUNC_LESS) |
+ ENABLE_ALPHA_SHADE_MODE |
+ ALPHA_SHADE_MODE(SHADE_MODE_LINEAR)
+ | ENABLE_FOG_SHADE_MODE |
+ FOG_SHADE_MODE(SHADE_MODE_LINEAR) |
+ ENABLE_SPEC_SHADE_MODE |
+ SPEC_SHADE_MODE(SHADE_MODE_LINEAR) |
+ ENABLE_COLOR_SHADE_MODE |
+ COLOR_SHADE_MODE(SHADE_MODE_LINEAR)
+ | ENABLE_CULL_MODE | CULLMODE_NONE);
+
+ i830->state.Ctx[I830_CTXREG_STATE4] = (_3DSTATE_MODES_4_CMD |
+ ENABLE_LOGIC_OP_FUNC |
+ LOGIC_OP_FUNC(LOGICOP_COPY) |
+ ENABLE_STENCIL_TEST_MASK |
+ STENCIL_TEST_MASK(0xff) |
+ ENABLE_STENCIL_WRITE_MASK |
+ STENCIL_WRITE_MASK(0xff));
+
+ i830->state.Ctx[I830_CTXREG_STENCILTST] = (_3DSTATE_STENCIL_TEST_CMD |
+ ENABLE_STENCIL_PARMS |
+ STENCIL_FAIL_OP(STENCILOP_KEEP)
+ |
+ STENCIL_PASS_DEPTH_FAIL_OP
+ (STENCILOP_KEEP) |
+ STENCIL_PASS_DEPTH_PASS_OP
+ (STENCILOP_KEEP) |
+ ENABLE_STENCIL_TEST_FUNC |
+ STENCIL_TEST_FUNC
+ (COMPAREFUNC_ALWAYS) |
+ ENABLE_STENCIL_REF_VALUE |
+ STENCIL_REF_VALUE(0));
+
+ i830->state.Ctx[I830_CTXREG_STATE5] = (_3DSTATE_MODES_5_CMD | FLUSH_TEXTURE_CACHE | ENABLE_SPRITE_POINT_TEX | SPRITE_POINT_TEX_OFF | ENABLE_FIXED_LINE_WIDTH | FIXED_LINE_WIDTH(0x2) | /* 1.0 */
+ ENABLE_FIXED_POINT_WIDTH |
+ FIXED_POINT_WIDTH(1));
+
+ i830->state.Ctx[I830_CTXREG_IALPHAB] = (_3DSTATE_INDPT_ALPHA_BLEND_CMD |
+ DISABLE_INDPT_ALPHA_BLEND |
+ ENABLE_ALPHA_BLENDFUNC |
+ ABLENDFUNC_ADD);
+
+ i830->state.Ctx[I830_CTXREG_FOGCOLOR] = (_3DSTATE_FOG_COLOR_CMD |
+ FOG_COLOR_RED(0) |
+ FOG_COLOR_GREEN(0) |
+ FOG_COLOR_BLUE(0));
+
+ i830->state.Ctx[I830_CTXREG_BLENDCOLOR0] = _3DSTATE_CONST_BLEND_COLOR_CMD;
+ i830->state.Ctx[I830_CTXREG_BLENDCOLOR1] = 0;
+
+ i830->state.Ctx[I830_CTXREG_MCSB0] = _3DSTATE_MAP_COORD_SETBIND_CMD;
+ i830->state.Ctx[I830_CTXREG_MCSB1] = (TEXBIND_SET3(TEXCOORDSRC_VTXSET_3) |
+ TEXBIND_SET2(TEXCOORDSRC_VTXSET_2) |
+ TEXBIND_SET1(TEXCOORDSRC_VTXSET_1) |
+ TEXBIND_SET0(TEXCOORDSRC_VTXSET_0));
+
+
+ i830->state.Stipple[I830_STPREG_ST0] = _3DSTATE_STIPPLE;
+
+ i830->state.Buffer[I830_DESTREG_CBUFADDR0] = _3DSTATE_BUF_INFO_CMD;
+ i830->state.Buffer[I830_DESTREG_CBUFADDR1] = (BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(screen->front.pitch) | /* pitch in bytes */
+ BUF_3D_USE_FENCE);
+
+
+ i830->state.Buffer[I830_DESTREG_DBUFADDR0] = _3DSTATE_BUF_INFO_CMD;
+ i830->state.Buffer[I830_DESTREG_DBUFADDR1] = (BUF_3D_ID_DEPTH | BUF_3D_PITCH(screen->depth.pitch) | /* pitch in bytes */
+ BUF_3D_USE_FENCE);
+
+ i830->state.Buffer[I830_DESTREG_DV0] = _3DSTATE_DST_BUF_VARS_CMD;
+
+ switch (screen->fbFormat) {
+ case DV_PF_565:
+ i830->state.Buffer[I830_DESTREG_DV1] = (DSTORG_HORT_BIAS(0x8) | /* .5 */
+ DSTORG_VERT_BIAS(0x8) | /* .5 */
+ screen->fbFormat |
+ DEPTH_IS_Z |
+ DEPTH_FRMT_16_FIXED);
+ break;
+ case DV_PF_8888:
+ i830->state.Buffer[I830_DESTREG_DV1] = (DSTORG_HORT_BIAS(0x8) | /* .5 */
+ DSTORG_VERT_BIAS(0x8) | /* .5 */
+ screen->fbFormat |
+ DEPTH_IS_Z |
+ DEPTH_FRMT_24_FIXED_8_OTHER);
+ break;
+ }
+
+ i830->state.Buffer[I830_DESTREG_SENABLE] = (_3DSTATE_SCISSOR_ENABLE_CMD |
+ DISABLE_SCISSOR_RECT);
+ i830->state.Buffer[I830_DESTREG_SR0] = _3DSTATE_SCISSOR_RECT_0_CMD;
+ i830->state.Buffer[I830_DESTREG_SR1] = 0;
+ i830->state.Buffer[I830_DESTREG_SR2] = 0;
+}
+
+
+void
+i830InitStateFuncs(struct dd_function_table *functions)
+{
+ functions->AlphaFunc = i830AlphaFunc;
+ functions->BlendColor = i830BlendColor;
+ functions->BlendEquationSeparate = i830BlendEquationSeparate;
+ functions->BlendFuncSeparate = i830BlendFuncSeparate;
+ functions->ColorMask = i830ColorMask;
+ functions->CullFace = i830CullFaceFrontFace;
+ functions->DepthFunc = i830DepthFunc;
+ functions->DepthMask = i830DepthMask;
+ functions->Enable = i830Enable;
+ functions->Fogfv = i830Fogfv;
+ functions->FrontFace = i830CullFaceFrontFace;
+ functions->LightModelfv = i830LightModelfv;
+ functions->LineWidth = i830LineWidth;
+ functions->LogicOpcode = i830LogicOp;
+ functions->PointSize = i830PointSize;
+ functions->PolygonStipple = i830PolygonStipple;
+ functions->Scissor = i830Scissor;
+ functions->ShadeModel = i830ShadeModel;
+ functions->StencilFuncSeparate = i830StencilFuncSeparate;
+ functions->StencilMaskSeparate = i830StencilMaskSeparate;
+ functions->StencilOpSeparate = i830StencilOpSeparate;
+}
+
+void
+i830InitState(struct i830_context *i830)
+{
+ GLcontext *ctx = &i830->intel.ctx;
+
+ i830_init_packets(i830);
+
+ intelInitState(ctx);
+
+ memcpy(&i830->initial, &i830->state, sizeof(i830->state));
+
+ i830->current = &i830->state;
+ i830->state.emitted = 0;
+ i830->state.active = (I830_UPLOAD_INVARIENT |
+ I830_UPLOAD_TEXBLEND(0) |
+ I830_UPLOAD_STIPPLE |
+ I830_UPLOAD_CTX | I830_UPLOAD_BUFFERS);
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "mtypes.h"
+#include "imports.h"
+#include "simple_list.h"
+#include "enums.h"
+#include "image.h"
+#include "texstore.h"
+#include "texformat.h"
+#include "texmem.h"
+#include "swrast/swrast.h"
+
+#include "mm.h"
+
+#include "intel_ioctl.h"
+
+#include "i830_context.h"
+#include "i830_reg.h"
+
+
+
+static void
+i830TexEnv(GLcontext * ctx, GLenum target,
+ GLenum pname, const GLfloat * param)
+{
+
+ switch (pname) {
+ case GL_TEXTURE_ENV_COLOR:
+ case GL_TEXTURE_ENV_MODE:
+ case GL_COMBINE_RGB:
+ case GL_COMBINE_ALPHA:
+ case GL_SOURCE0_RGB:
+ case GL_SOURCE1_RGB:
+ case GL_SOURCE2_RGB:
+ case GL_SOURCE0_ALPHA:
+ case GL_SOURCE1_ALPHA:
+ case GL_SOURCE2_ALPHA:
+ case GL_OPERAND0_RGB:
+ case GL_OPERAND1_RGB:
+ case GL_OPERAND2_RGB:
+ case GL_OPERAND0_ALPHA:
+ case GL_OPERAND1_ALPHA:
+ case GL_OPERAND2_ALPHA:
+ case GL_RGB_SCALE:
+ case GL_ALPHA_SCALE:
+ break;
+
+ case GL_TEXTURE_LOD_BIAS:{
+ struct i830_context *i830 = i830_context(ctx);
+ GLuint unit = ctx->Texture.CurrentUnit;
+ int b = (int) ((*param) * 16.0);
+ if (b > 63)
+ b = 63;
+ if (b < -64)
+ b = -64;
+ I830_STATECHANGE(i830, I830_UPLOAD_TEX(unit));
+ i830->lodbias_tm0s3[unit] =
+ ((b << TM0S3_LOD_BIAS_SHIFT) & TM0S3_LOD_BIAS_MASK);
+ break;
+ }
+
+ default:
+ break;
+ }
+}
+
+
+
+
+void
+i830InitTextureFuncs(struct dd_function_table *functions)
+{
+ functions->TexEnv = i830TexEnv;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "macros.h"
+#include "mtypes.h"
+#include "simple_list.h"
+#include "enums.h"
+#include "texformat.h"
+#include "texstore.h"
+
+#include "mm.h"
+
+#include "intel_screen.h"
+#include "intel_ioctl.h"
+#include "intel_tex.h"
+
+#include "i830_context.h"
+#include "i830_reg.h"
+
+
+/* ================================================================
+ * Texture combine functions
+ */
+static GLuint
+pass_through(GLuint * state, GLuint blendUnit)
+{
+ state[0] = (_3DSTATE_MAP_BLEND_OP_CMD(blendUnit) |
+ TEXPIPE_COLOR |
+ ENABLE_TEXOUTPUT_WRT_SEL |
+ TEXOP_OUTPUT_CURRENT |
+ DISABLE_TEX_CNTRL_STAGE |
+ TEXOP_SCALE_1X | TEXOP_MODIFY_PARMS | TEXBLENDOP_ARG1);
+ state[1] = (_3DSTATE_MAP_BLEND_OP_CMD(blendUnit) |
+ TEXPIPE_ALPHA |
+ ENABLE_TEXOUTPUT_WRT_SEL |
+ TEXOP_OUTPUT_CURRENT |
+ TEXOP_SCALE_1X | TEXOP_MODIFY_PARMS | TEXBLENDOP_ARG1);
+ state[2] = (_3DSTATE_MAP_BLEND_ARG_CMD(blendUnit) |
+ TEXPIPE_COLOR |
+ TEXBLEND_ARG1 |
+ TEXBLENDARG_MODIFY_PARMS | TEXBLENDARG_CURRENT);
+ state[3] = (_3DSTATE_MAP_BLEND_ARG_CMD(blendUnit) |
+ TEXPIPE_ALPHA |
+ TEXBLEND_ARG1 |
+ TEXBLENDARG_MODIFY_PARMS | TEXBLENDARG_CURRENT);
+
+ return 4;
+}
+
+static GLuint
+emit_factor(GLuint blendUnit, GLuint * state, GLuint count,
+ const GLfloat * factor)
+{
+ GLubyte r, g, b, a;
+ GLuint col;
+
+ if (0)
+ fprintf(stderr, "emit constant %d: %.2f %.2f %.2f %.2f\n",
+ blendUnit, factor[0], factor[1], factor[2], factor[3]);
+
+ UNCLAMPED_FLOAT_TO_UBYTE(r, factor[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(g, factor[1]);
+ UNCLAMPED_FLOAT_TO_UBYTE(b, factor[2]);
+ UNCLAMPED_FLOAT_TO_UBYTE(a, factor[3]);
+
+ col = ((a << 24) | (r << 16) | (g << 8) | b);
+
+ state[count++] = _3DSTATE_COLOR_FACTOR_N_CMD(blendUnit);
+ state[count++] = col;
+
+ return count;
+}
+
+
+static INLINE GLuint
+GetTexelOp(GLint unit)
+{
+ switch (unit) {
+ case 0:
+ return TEXBLENDARG_TEXEL0;
+ case 1:
+ return TEXBLENDARG_TEXEL1;
+ case 2:
+ return TEXBLENDARG_TEXEL2;
+ case 3:
+ return TEXBLENDARG_TEXEL3;
+ default:
+ return TEXBLENDARG_TEXEL0;
+ }
+}
+
+
+/**
+ * Calculate the hardware instuctions to setup the current texture enviromnemt
+ * settings. Since \c gl_texture_unit::_CurrentCombine is used, both
+ * "classic" texture enviroments and GL_ARB_texture_env_combine type texture
+ * environments are treated identically.
+ *
+ * \todo
+ * This function should return \c GLboolean. When \c GL_FALSE is returned,
+ * it means that an environment is selected that the hardware cannot do. This
+ * is the way the Radeon and R200 drivers work.
+ *
+ * \todo
+ * Looking at i830_3d_regs.h, it seems the i830 can do part of
+ * GL_ATI_texture_env_combine3. It can handle using \c GL_ONE and
+ * \c GL_ZERO as combine inputs (which the code already supports). It can
+ * also handle the \c GL_MODULATE_ADD_ATI mode. Is it worth investigating
+ * partial support for the extension?
+ */
+GLuint
+i830SetTexEnvCombine(struct i830_context * i830,
+ const struct gl_tex_env_combine_state * combine,
+ GLint blendUnit,
+ GLuint texel_op, GLuint * state, const GLfloat * factor)
+{
+ const GLuint numColorArgs = combine->_NumArgsRGB;
+ const GLuint numAlphaArgs = combine->_NumArgsA;
+
+ GLuint blendop;
+ GLuint ablendop;
+ GLuint args_RGB[3];
+ GLuint args_A[3];
+ GLuint rgb_shift;
+ GLuint alpha_shift;
+ GLboolean need_factor = 0;
+ int i;
+ unsigned used;
+ static const GLuint tex_blend_rgb[3] = {
+ TEXPIPE_COLOR | TEXBLEND_ARG1 | TEXBLENDARG_MODIFY_PARMS,
+ TEXPIPE_COLOR | TEXBLEND_ARG2 | TEXBLENDARG_MODIFY_PARMS,
+ TEXPIPE_COLOR | TEXBLEND_ARG0 | TEXBLENDARG_MODIFY_PARMS,
+ };
+ static const GLuint tex_blend_a[3] = {
+ TEXPIPE_ALPHA | TEXBLEND_ARG1 | TEXBLENDARG_MODIFY_PARMS,
+ TEXPIPE_ALPHA | TEXBLEND_ARG2 | TEXBLENDARG_MODIFY_PARMS,
+ TEXPIPE_ALPHA | TEXBLEND_ARG0 | TEXBLENDARG_MODIFY_PARMS,
+ };
+
+ if (INTEL_DEBUG & DEBUG_TEXTURE)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+
+ /* The EXT version of the DOT3 extension does not support the
+ * scale factor, but the ARB version (and the version in OpenGL
+ * 1.3) does.
+ */
+ switch (combine->ModeRGB) {
+ case GL_DOT3_RGB_EXT:
+ alpha_shift = combine->ScaleShiftA;
+ rgb_shift = 0;
+ break;
+
+ case GL_DOT3_RGBA_EXT:
+ alpha_shift = 0;
+ rgb_shift = 0;
+ break;
+
+ default:
+ rgb_shift = combine->ScaleShiftRGB;
+ alpha_shift = combine->ScaleShiftA;
+ break;
+ }
+
+
+ switch (combine->ModeRGB) {
+ case GL_REPLACE:
+ blendop = TEXBLENDOP_ARG1;
+ break;
+ case GL_MODULATE:
+ blendop = TEXBLENDOP_MODULATE;
+ break;
+ case GL_ADD:
+ blendop = TEXBLENDOP_ADD;
+ break;
+ case GL_ADD_SIGNED:
+ blendop = TEXBLENDOP_ADDSIGNED;
+ break;
+ case GL_INTERPOLATE:
+ blendop = TEXBLENDOP_BLEND;
+ break;
+ case GL_SUBTRACT:
+ blendop = TEXBLENDOP_SUBTRACT;
+ break;
+ case GL_DOT3_RGB_EXT:
+ case GL_DOT3_RGB:
+ blendop = TEXBLENDOP_DOT3;
+ break;
+ case GL_DOT3_RGBA_EXT:
+ case GL_DOT3_RGBA:
+ blendop = TEXBLENDOP_DOT3;
+ break;
+ default:
+ return pass_through(state, blendUnit);
+ }
+
+ blendop |= (rgb_shift << TEXOP_SCALE_SHIFT);
+
+
+ /* Handle RGB args */
+ for (i = 0; i < 3; i++) {
+ switch (combine->SourceRGB[i]) {
+ case GL_TEXTURE:
+ args_RGB[i] = texel_op;
+ break;
+ case GL_TEXTURE0:
+ case GL_TEXTURE1:
+ case GL_TEXTURE2:
+ case GL_TEXTURE3:
+ args_RGB[i] = GetTexelOp(combine->SourceRGB[i] - GL_TEXTURE0);
+ break;
+ case GL_CONSTANT:
+ args_RGB[i] = TEXBLENDARG_FACTOR_N;
+ need_factor = 1;
+ break;
+ case GL_PRIMARY_COLOR:
+ args_RGB[i] = TEXBLENDARG_DIFFUSE;
+ break;
+ case GL_PREVIOUS:
+ args_RGB[i] = TEXBLENDARG_CURRENT;
+ break;
+ default:
+ return pass_through(state, blendUnit);
+ }
+
+ switch (combine->OperandRGB[i]) {
+ case GL_SRC_COLOR:
+ args_RGB[i] |= 0;
+ break;
+ case GL_ONE_MINUS_SRC_COLOR:
+ args_RGB[i] |= TEXBLENDARG_INV_ARG;
+ break;
+ case GL_SRC_ALPHA:
+ args_RGB[i] |= TEXBLENDARG_REPLICATE_ALPHA;
+ break;
+ case GL_ONE_MINUS_SRC_ALPHA:
+ args_RGB[i] |= (TEXBLENDARG_REPLICATE_ALPHA | TEXBLENDARG_INV_ARG);
+ break;
+ default:
+ return pass_through(state, blendUnit);
+ }
+ }
+
+
+ /* Need to knobble the alpha calculations of TEXBLENDOP_DOT4 to
+ * match the spec. Can't use DOT3 as it won't propogate values
+ * into alpha as required:
+ *
+ * Note - the global factor is set up with alpha == .5, so
+ * the alpha part of the DOT4 calculation should be zero.
+ */
+ if (combine->ModeRGB == GL_DOT3_RGBA_EXT ||
+ combine->ModeRGB == GL_DOT3_RGBA) {
+ ablendop = TEXBLENDOP_DOT4;
+ args_A[0] = TEXBLENDARG_FACTOR; /* the global factor */
+ args_A[1] = TEXBLENDARG_FACTOR;
+ args_A[2] = TEXBLENDARG_FACTOR;
+ }
+ else {
+ switch (combine->ModeA) {
+ case GL_REPLACE:
+ ablendop = TEXBLENDOP_ARG1;
+ break;
+ case GL_MODULATE:
+ ablendop = TEXBLENDOP_MODULATE;
+ break;
+ case GL_ADD:
+ ablendop = TEXBLENDOP_ADD;
+ break;
+ case GL_ADD_SIGNED:
+ ablendop = TEXBLENDOP_ADDSIGNED;
+ break;
+ case GL_INTERPOLATE:
+ ablendop = TEXBLENDOP_BLEND;
+ break;
+ case GL_SUBTRACT:
+ ablendop = TEXBLENDOP_SUBTRACT;
+ break;
+ default:
+ return pass_through(state, blendUnit);
+ }
+
+
+ ablendop |= (alpha_shift << TEXOP_SCALE_SHIFT);
+
+ /* Handle A args */
+ for (i = 0; i < 3; i++) {
+ switch (combine->SourceA[i]) {
+ case GL_TEXTURE:
+ args_A[i] = texel_op;
+ break;
+ case GL_TEXTURE0:
+ case GL_TEXTURE1:
+ case GL_TEXTURE2:
+ case GL_TEXTURE3:
+ args_A[i] = GetTexelOp(combine->SourceA[i] - GL_TEXTURE0);
+ break;
+ case GL_CONSTANT:
+ args_A[i] = TEXBLENDARG_FACTOR_N;
+ need_factor = 1;
+ break;
+ case GL_PRIMARY_COLOR:
+ args_A[i] = TEXBLENDARG_DIFFUSE;
+ break;
+ case GL_PREVIOUS:
+ args_A[i] = TEXBLENDARG_CURRENT;
+ break;
+ default:
+ return pass_through(state, blendUnit);
+ }
+
+ switch (combine->OperandA[i]) {
+ case GL_SRC_ALPHA:
+ args_A[i] |= 0;
+ break;
+ case GL_ONE_MINUS_SRC_ALPHA:
+ args_A[i] |= TEXBLENDARG_INV_ARG;
+ break;
+ default:
+ return pass_through(state, blendUnit);
+ }
+ }
+ }
+
+
+
+ /* Native Arg1 == Arg0 in GL_EXT_texture_env_combine spec */
+ /* Native Arg2 == Arg1 in GL_EXT_texture_env_combine spec */
+ /* Native Arg0 == Arg2 in GL_EXT_texture_env_combine spec */
+
+ /* When we render we need to figure out which is the last really enabled
+ * tex unit, and put last stage on it
+ */
+
+
+ /* Build color & alpha pipelines */
+
+ used = 0;
+ state[used++] = (_3DSTATE_MAP_BLEND_OP_CMD(blendUnit) |
+ TEXPIPE_COLOR |
+ ENABLE_TEXOUTPUT_WRT_SEL |
+ TEXOP_OUTPUT_CURRENT |
+ DISABLE_TEX_CNTRL_STAGE | TEXOP_MODIFY_PARMS | blendop);
+ state[used++] = (_3DSTATE_MAP_BLEND_OP_CMD(blendUnit) |
+ TEXPIPE_ALPHA |
+ ENABLE_TEXOUTPUT_WRT_SEL |
+ TEXOP_OUTPUT_CURRENT | TEXOP_MODIFY_PARMS | ablendop);
+
+ for (i = 0; i < numColorArgs; i++) {
+ state[used++] = (_3DSTATE_MAP_BLEND_ARG_CMD(blendUnit) |
+ tex_blend_rgb[i] | args_RGB[i]);
+ }
+
+ for (i = 0; i < numAlphaArgs; i++) {
+ state[used++] = (_3DSTATE_MAP_BLEND_ARG_CMD(blendUnit) |
+ tex_blend_a[i] | args_A[i]);
+ }
+
+
+ if (need_factor)
+ return emit_factor(blendUnit, state, used, factor);
+ else
+ return used;
+}
+
+
+static void
+emit_texblend(struct i830_context *i830, GLuint unit, GLuint blendUnit,
+ GLboolean last_stage)
+{
+ struct gl_texture_unit *texUnit = &i830->intel.ctx.Texture.Unit[unit];
+ GLuint tmp[I830_TEXBLEND_SIZE], tmp_sz;
+
+
+ if (0)
+ fprintf(stderr, "%s unit %d\n", __FUNCTION__, unit);
+
+ /* Update i830->state.TexBlend
+ */
+ tmp_sz = i830SetTexEnvCombine(i830, texUnit->_CurrentCombine, blendUnit,
+ GetTexelOp(unit), tmp, texUnit->EnvColor);
+
+ if (last_stage)
+ tmp[0] |= TEXOP_LAST_STAGE;
+
+ if (tmp_sz != i830->state.TexBlendWordsUsed[blendUnit] ||
+ memcmp(tmp, i830->state.TexBlend[blendUnit],
+ tmp_sz * sizeof(GLuint))) {
+
+ I830_STATECHANGE(i830, I830_UPLOAD_TEXBLEND(blendUnit));
+ memcpy(i830->state.TexBlend[blendUnit], tmp, tmp_sz * sizeof(GLuint));
+ i830->state.TexBlendWordsUsed[blendUnit] = tmp_sz;
+ }
+
+ I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND(blendUnit), GL_TRUE);
+}
+
+static void
+emit_passthrough(struct i830_context *i830)
+{
+ GLuint tmp[I830_TEXBLEND_SIZE], tmp_sz;
+ GLuint unit = 0;
+
+ tmp_sz = pass_through(tmp, unit);
+ tmp[0] |= TEXOP_LAST_STAGE;
+
+ if (tmp_sz != i830->state.TexBlendWordsUsed[unit] ||
+ memcmp(tmp, i830->state.TexBlend[unit], tmp_sz * sizeof(GLuint))) {
+
+ I830_STATECHANGE(i830, I830_UPLOAD_TEXBLEND(unit));
+ memcpy(i830->state.TexBlend[unit], tmp, tmp_sz * sizeof(GLuint));
+ i830->state.TexBlendWordsUsed[unit] = tmp_sz;
+ }
+
+ I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND(unit), GL_TRUE);
+}
+
+void
+i830EmitTextureBlend(struct i830_context *i830)
+{
+ GLcontext *ctx = &i830->intel.ctx;
+ GLuint unit, last_stage = 0, blendunit = 0;
+
+ I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND_ALL, GL_FALSE);
+
+ if (ctx->Texture._EnabledUnits) {
+ for (unit = 0; unit < ctx->Const.MaxTextureUnits; unit++)
+ if (ctx->Texture.Unit[unit]._ReallyEnabled)
+ last_stage = unit;
+
+ for (unit = 0; unit < ctx->Const.MaxTextureUnits; unit++)
+ if (ctx->Texture.Unit[unit]._ReallyEnabled)
+ emit_texblend(i830, unit, blendunit++, last_stage == unit);
+ }
+ else {
+ emit_passthrough(i830);
+ }
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "macros.h"
+#include "mtypes.h"
+#include "simple_list.h"
+#include "enums.h"
+#include "texformat.h"
+#include "texstore.h"
+
+#include "mm.h"
+
+#include "intel_screen.h"
+#include "intel_ioctl.h"
+#include "intel_tex.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+
+#include "i830_context.h"
+#include "i830_reg.h"
+
+
+
+static GLuint
+translate_texture_format(GLuint mesa_format)
+{
+ switch (mesa_format) {
+ case MESA_FORMAT_L8:
+ return MAPSURF_8BIT | MT_8BIT_L8;
+ case MESA_FORMAT_I8:
+ return MAPSURF_8BIT | MT_8BIT_I8;
+ case MESA_FORMAT_A8:
+ return MAPSURF_8BIT | MT_8BIT_I8; /* Kludge! */
+ case MESA_FORMAT_AL88:
+ return MAPSURF_16BIT | MT_16BIT_AY88;
+ case MESA_FORMAT_RGB565:
+ return MAPSURF_16BIT | MT_16BIT_RGB565;
+ case MESA_FORMAT_ARGB1555:
+ return MAPSURF_16BIT | MT_16BIT_ARGB1555;
+ case MESA_FORMAT_ARGB4444:
+ return MAPSURF_16BIT | MT_16BIT_ARGB4444;
+ case MESA_FORMAT_ARGB8888:
+ return MAPSURF_32BIT | MT_32BIT_ARGB8888;
+ case MESA_FORMAT_YCBCR_REV:
+ return (MAPSURF_422 | MT_422_YCRCB_NORMAL);
+ case MESA_FORMAT_YCBCR:
+ return (MAPSURF_422 | MT_422_YCRCB_SWAPY);
+ case MESA_FORMAT_RGB_FXT1:
+ case MESA_FORMAT_RGBA_FXT1:
+ return (MAPSURF_COMPRESSED | MT_COMPRESS_FXT1);
+ case MESA_FORMAT_RGBA_DXT1:
+ case MESA_FORMAT_RGB_DXT1:
+ return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT1);
+ case MESA_FORMAT_RGBA_DXT3:
+ return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT2_3);
+ case MESA_FORMAT_RGBA_DXT5:
+ return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT4_5);
+ default:
+ fprintf(stderr, "%s: bad image format %x\n", __FUNCTION__, mesa_format);
+ abort();
+ return 0;
+ }
+}
+
+
+
+
+/* The i915 (and related graphics cores) do not support GL_CLAMP. The
+ * Intel drivers for "other operating systems" implement GL_CLAMP as
+ * GL_CLAMP_TO_EDGE, so the same is done here.
+ */
+static GLuint
+translate_wrap_mode(GLenum wrap)
+{
+ switch (wrap) {
+ case GL_REPEAT:
+ return TEXCOORDMODE_WRAP;
+ case GL_CLAMP:
+ case GL_CLAMP_TO_EDGE:
+ return TEXCOORDMODE_CLAMP; /* not really correct */
+ case GL_CLAMP_TO_BORDER:
+ return TEXCOORDMODE_CLAMP_BORDER;
+ case GL_MIRRORED_REPEAT:
+ return TEXCOORDMODE_MIRROR;
+ default:
+ return TEXCOORDMODE_WRAP;
+ }
+}
+
+
+/* Recalculate all state from scratch. Perhaps not the most
+ * efficient, but this has gotten complex enough that we need
+ * something which is understandable and reliable.
+ */
+static GLboolean
+i830_update_tex_unit(struct intel_context *intel, GLuint unit, GLuint ss3)
+{
+ GLcontext *ctx = &intel->ctx;
+ struct i830_context *i830 = i830_context(ctx);
+ struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
+ struct intel_texture_object *intelObj = intel_texture_object(tObj);
+ struct gl_texture_image *firstImage;
+ GLuint *state = i830->state.Tex[unit];
+
+ memset(state, 0, sizeof(state));
+
+ if (!intel_finalize_mipmap_tree(intel, unit))
+ return GL_FALSE;
+
+ /* Get first image here, since intelObj->firstLevel will get set in
+ * the intel_finalize_mipmap_tree() call above.
+ */
+ firstImage = tObj->Image[0][intelObj->firstLevel];
+
+ i830->state.tex_buffer[unit] = intelObj->mt->region->buffer;
+ i830->state.tex_offset[unit] = intel_miptree_image_offset(intelObj->mt, 0,
+ intelObj->
+ firstLevel);
+
+
+ state[I830_TEXREG_TM0LI] = (_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
+ (LOAD_TEXTURE_MAP0 << unit) | 4);
+
+/* state[I830_TEXREG_TM0S0] = (TM0S0_USE_FENCE | */
+/* t->intel.TextureOffset); */
+
+
+ state[I830_TEXREG_TM0S1] =
+ (((firstImage->Height - 1) << TM0S1_HEIGHT_SHIFT) |
+ ((firstImage->Width - 1) << TM0S1_WIDTH_SHIFT) |
+ translate_texture_format(firstImage->TexFormat->MesaFormat));
+
+ state[I830_TEXREG_TM0S2] =
+ (((((intelObj->mt->pitch * intelObj->mt->cpp) / 4) -
+ 1) << TM0S2_PITCH_SHIFT) | TM0S2_CUBE_FACE_ENA_MASK);
+
+ {
+ if (tObj->Target == GL_TEXTURE_CUBE_MAP)
+ state[I830_TEXREG_CUBE] = (_3DSTATE_MAP_CUBE | MAP_UNIT(unit) |
+ CUBE_NEGX_ENABLE |
+ CUBE_POSX_ENABLE |
+ CUBE_NEGY_ENABLE |
+ CUBE_POSY_ENABLE |
+ CUBE_NEGZ_ENABLE | CUBE_POSZ_ENABLE);
+ else
+ state[I830_TEXREG_CUBE] = (_3DSTATE_MAP_CUBE | MAP_UNIT(unit));
+ }
+
+
+
+
+ {
+ GLuint minFilt, mipFilt, magFilt;
+
+ switch (tObj->MinFilter) {
+ case GL_NEAREST:
+ minFilt = FILTER_NEAREST;
+ mipFilt = MIPFILTER_NONE;
+ break;
+ case GL_LINEAR:
+ minFilt = FILTER_LINEAR;
+ mipFilt = MIPFILTER_NONE;
+ break;
+ case GL_NEAREST_MIPMAP_NEAREST:
+ minFilt = FILTER_NEAREST;
+ mipFilt = MIPFILTER_NEAREST;
+ break;
+ case GL_LINEAR_MIPMAP_NEAREST:
+ minFilt = FILTER_LINEAR;
+ mipFilt = MIPFILTER_NEAREST;
+ break;
+ case GL_NEAREST_MIPMAP_LINEAR:
+ minFilt = FILTER_NEAREST;
+ mipFilt = MIPFILTER_LINEAR;
+ break;
+ case GL_LINEAR_MIPMAP_LINEAR:
+ minFilt = FILTER_LINEAR;
+ mipFilt = MIPFILTER_LINEAR;
+ break;
+ default:
+ return GL_FALSE;
+ }
+
+ if (tObj->MaxAnisotropy > 1.0) {
+ minFilt = FILTER_ANISOTROPIC;
+ magFilt = FILTER_ANISOTROPIC;
+ }
+ else {
+ switch (tObj->MagFilter) {
+ case GL_NEAREST:
+ magFilt = FILTER_NEAREST;
+ break;
+ case GL_LINEAR:
+ magFilt = FILTER_LINEAR;
+ break;
+ default:
+ return GL_FALSE;
+ }
+ }
+
+ state[I830_TEXREG_TM0S3] = i830->lodbias_tm0s3[unit];
+
+#if 0
+ /* YUV conversion:
+ */
+ if (firstImage->TexFormat->MesaFormat == MESA_FORMAT_YCBCR ||
+ firstImage->TexFormat->MesaFormat == MESA_FORMAT_YCBCR_REV)
+ state[I830_TEXREG_TM0S3] |= SS2_COLORSPACE_CONVERSION;
+#endif
+
+ state[I830_TEXREG_TM0S3] |= ((intelObj->lastLevel -
+ intelObj->firstLevel) *
+ 4) << TM0S3_MIN_MIP_SHIFT;
+
+ state[I830_TEXREG_TM0S3] |= ((minFilt << TM0S3_MIN_FILTER_SHIFT) |
+ (mipFilt << TM0S3_MIP_FILTER_SHIFT) |
+ (magFilt << TM0S3_MAG_FILTER_SHIFT));
+ }
+
+ {
+ GLenum ws = tObj->WrapS;
+ GLenum wt = tObj->WrapT;
+
+
+ /* 3D textures not available on i830
+ */
+ if (tObj->Target == GL_TEXTURE_3D)
+ return GL_FALSE;
+
+ state[I830_TEXREG_MCS] = (_3DSTATE_MAP_COORD_SET_CMD |
+ MAP_UNIT(unit) |
+ ENABLE_TEXCOORD_PARAMS |
+ ss3 |
+ ENABLE_ADDR_V_CNTL |
+ TEXCOORD_ADDR_V_MODE(translate_wrap_mode(wt))
+ | ENABLE_ADDR_U_CNTL |
+ TEXCOORD_ADDR_U_MODE(translate_wrap_mode
+ (ws)));
+ }
+
+
+ state[I830_TEXREG_TM0S4] = INTEL_PACKCOLOR8888(tObj->_BorderChan[0],
+ tObj->_BorderChan[1],
+ tObj->_BorderChan[2],
+ tObj->_BorderChan[3]);
+
+
+ I830_ACTIVESTATE(i830, I830_UPLOAD_TEX(unit), GL_TRUE);
+ /* memcmp was already disabled, but definitely won't work as the
+ * region might now change and that wouldn't be detected:
+ */
+ I830_STATECHANGE(i830, I830_UPLOAD_TEX(unit));
+ return GL_TRUE;
+}
+
+
+
+
+void
+i830UpdateTextureState(struct intel_context *intel)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+ GLboolean ok = GL_TRUE;
+ GLuint i;
+
+ for (i = 0; i < I830_TEX_UNITS && ok; i++) {
+ switch (intel->ctx.Texture.Unit[i]._ReallyEnabled) {
+ case TEXTURE_1D_BIT:
+ case TEXTURE_2D_BIT:
+ case TEXTURE_CUBE_BIT:
+ ok = i830_update_tex_unit(intel, i, TEXCOORDS_ARE_NORMAL);
+ break;
+ case TEXTURE_RECT_BIT:
+ ok = i830_update_tex_unit(intel, i, TEXCOORDS_ARE_IN_TEXELUNITS);
+ break;
+ case 0:
+ if (i830->state.active & I830_UPLOAD_TEX(i))
+ I830_ACTIVESTATE(i830, I830_UPLOAD_TEX(i), GL_FALSE);
+ break;
+ case TEXTURE_3D_BIT:
+ default:
+ ok = GL_FALSE;
+ break;
+ }
+ }
+
+ FALLBACK(intel, I830_FALLBACK_TEXTURE, !ok);
+
+ if (ok)
+ i830EmitTextureBlend(i830);
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "i830_context.h"
+#include "i830_reg.h"
+#include "intel_batchbuffer.h"
+#include "intel_regions.h"
+#include "tnl/t_context.h"
+#include "tnl/t_vertex.h"
+
+#define FILE_DEBUG_FLAG DEBUG_STATE
+
+static GLboolean i830_check_vertex_size(struct intel_context *intel,
+ GLuint expected);
+
+#define SZ_TO_HW(sz) ((sz-2)&0x3)
+#define EMIT_SZ(sz) (EMIT_1F + (sz) - 1)
+#define EMIT_ATTR( ATTR, STYLE, V0 ) \
+do { \
+ intel->vertex_attrs[intel->vertex_attr_count].attrib = (ATTR); \
+ intel->vertex_attrs[intel->vertex_attr_count].format = (STYLE); \
+ intel->vertex_attr_count++; \
+ v0 |= V0; \
+} while (0)
+
+#define EMIT_PAD( N ) \
+do { \
+ intel->vertex_attrs[intel->vertex_attr_count].attrib = 0; \
+ intel->vertex_attrs[intel->vertex_attr_count].format = EMIT_PAD; \
+ intel->vertex_attrs[intel->vertex_attr_count].offset = (N); \
+ intel->vertex_attr_count++; \
+} while (0)
+
+
+#define VRTX_TEX_SET_FMT(n, x) ((x)<<((n)*2))
+#define TEXBIND_SET(n, x) ((x)<<((n)*4))
+
+static void
+i830_render_start(struct intel_context *intel)
+{
+ GLcontext *ctx = &intel->ctx;
+ struct i830_context *i830 = i830_context(ctx);
+ TNLcontext *tnl = TNL_CONTEXT(ctx);
+ struct vertex_buffer *VB = &tnl->vb;
+ DECLARE_RENDERINPUTS(index_bitset);
+ GLuint v0 = _3DSTATE_VFT0_CMD;
+ GLuint v2 = _3DSTATE_VFT1_CMD;
+ GLuint mcsb1 = 0;
+
+ RENDERINPUTS_COPY(index_bitset, tnl->render_inputs_bitset);
+
+ /* Important:
+ */
+ VB->AttribPtr[VERT_ATTRIB_POS] = VB->NdcPtr;
+ intel->vertex_attr_count = 0;
+
+ /* EMIT_ATTR's must be in order as they tell t_vertex.c how to
+ * build up a hardware vertex.
+ */
+ if (RENDERINPUTS_TEST_RANGE(index_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX)) {
+ EMIT_ATTR(_TNL_ATTRIB_POS, EMIT_4F_VIEWPORT, VFT0_XYZW);
+ intel->coloroffset = 4;
+ }
+ else {
+ EMIT_ATTR(_TNL_ATTRIB_POS, EMIT_3F_VIEWPORT, VFT0_XYZ);
+ intel->coloroffset = 3;
+ }
+
+ if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_POINTSIZE)) {
+ EMIT_ATTR(_TNL_ATTRIB_POINTSIZE, EMIT_1F, VFT0_POINT_WIDTH);
+ }
+
+ EMIT_ATTR(_TNL_ATTRIB_COLOR0, EMIT_4UB_4F_BGRA, VFT0_DIFFUSE);
+
+ intel->specoffset = 0;
+ if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_COLOR1) ||
+ RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_FOG)) {
+ if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_COLOR1)) {
+ intel->specoffset = intel->coloroffset + 1;
+ EMIT_ATTR(_TNL_ATTRIB_COLOR1, EMIT_3UB_3F_BGR, VFT0_SPEC);
+ }
+ else
+ EMIT_PAD(3);
+
+ if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_FOG))
+ EMIT_ATTR(_TNL_ATTRIB_FOG, EMIT_1UB_1F, VFT0_SPEC);
+ else
+ EMIT_PAD(1);
+ }
+
+ if (RENDERINPUTS_TEST_RANGE(index_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX)) {
+ int i, count = 0;
+
+ for (i = 0; i < I830_TEX_UNITS; i++) {
+ if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_TEX(i))) {
+ GLuint sz = VB->TexCoordPtr[i]->size;
+ GLuint emit;
+ GLuint mcs = (i830->state.Tex[i][I830_TEXREG_MCS] &
+ ~TEXCOORDTYPE_MASK);
+
+ switch (sz) {
+ case 1:
+ case 2:
+ emit = EMIT_2F;
+ sz = 2;
+ mcs |= TEXCOORDTYPE_CARTESIAN;
+ break;
+ case 3:
+ emit = EMIT_3F;
+ sz = 3;
+ mcs |= TEXCOORDTYPE_VECTOR;
+ break;
+ case 4:
+ emit = EMIT_3F_XYW;
+ sz = 3;
+ mcs |= TEXCOORDTYPE_HOMOGENEOUS;
+ break;
+ default:
+ continue;
+ };
+
+
+ EMIT_ATTR(_TNL_ATTRIB_TEX0 + i, emit, 0);
+ v2 |= VRTX_TEX_SET_FMT(count, SZ_TO_HW(sz));
+ mcsb1 |= (count + 8) << (i * 4);
+
+ if (mcs != i830->state.Tex[i][I830_TEXREG_MCS]) {
+ I830_STATECHANGE(i830, I830_UPLOAD_TEX(i));
+ i830->state.Tex[i][I830_TEXREG_MCS] = mcs;
+ }
+
+ count++;
+ }
+ }
+
+ v0 |= VFT0_TEX_COUNT(count);
+ }
+
+ /* Only need to change the vertex emit code if there has been a
+ * statechange to a new hardware vertex format:
+ */
+ if (v0 != i830->state.Ctx[I830_CTXREG_VF] ||
+ v2 != i830->state.Ctx[I830_CTXREG_VF2] ||
+ mcsb1 != i830->state.Ctx[I830_CTXREG_MCSB1] ||
+ !RENDERINPUTS_EQUAL(index_bitset, i830->last_index_bitset)) {
+ int k;
+
+ I830_STATECHANGE(i830, I830_UPLOAD_CTX);
+
+ /* Must do this *after* statechange, so as not to affect
+ * buffered vertices reliant on the old state:
+ */
+ intel->vertex_size =
+ _tnl_install_attrs(ctx,
+ intel->vertex_attrs,
+ intel->vertex_attr_count,
+ intel->ViewportMatrix.m, 0);
+
+ intel->vertex_size >>= 2;
+
+ i830->state.Ctx[I830_CTXREG_VF] = v0;
+ i830->state.Ctx[I830_CTXREG_VF2] = v2;
+ i830->state.Ctx[I830_CTXREG_MCSB1] = mcsb1;
+ RENDERINPUTS_COPY(i830->last_index_bitset, index_bitset);
+
+ k = i830_check_vertex_size(intel, intel->vertex_size);
+ assert(k);
+ }
+}
+
+static void
+i830_reduced_primitive_state(struct intel_context *intel, GLenum rprim)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+ GLuint st1 = i830->state.Stipple[I830_STPREG_ST1];
+
+ st1 &= ~ST1_ENABLE;
+
+ switch (rprim) {
+ case GL_TRIANGLES:
+ if (intel->ctx.Polygon.StippleFlag && intel->hw_stipple)
+ st1 |= ST1_ENABLE;
+ break;
+ case GL_LINES:
+ case GL_POINTS:
+ default:
+ break;
+ }
+
+ i830->intel.reduced_primitive = rprim;
+
+ if (st1 != i830->state.Stipple[I830_STPREG_ST1]) {
+ INTEL_FIREVERTICES(intel);
+
+ I830_STATECHANGE(i830, I830_UPLOAD_STIPPLE);
+ i830->state.Stipple[I830_STPREG_ST1] = st1;
+ }
+}
+
+/* Pull apart the vertex format registers and figure out how large a
+ * vertex is supposed to be.
+ */
+static GLboolean
+i830_check_vertex_size(struct intel_context *intel, GLuint expected)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+ int vft0 = i830->current->Ctx[I830_CTXREG_VF];
+ int vft1 = i830->current->Ctx[I830_CTXREG_VF2];
+ int nrtex = (vft0 & VFT0_TEX_COUNT_MASK) >> VFT0_TEX_COUNT_SHIFT;
+ int i, sz = 0;
+
+ switch (vft0 & VFT0_XYZW_MASK) {
+ case VFT0_XY:
+ sz = 2;
+ break;
+ case VFT0_XYZ:
+ sz = 3;
+ break;
+ case VFT0_XYW:
+ sz = 3;
+ break;
+ case VFT0_XYZW:
+ sz = 4;
+ break;
+ default:
+ fprintf(stderr, "no xyzw specified\n");
+ return 0;
+ }
+
+ if (vft0 & VFT0_SPEC)
+ sz++;
+ if (vft0 & VFT0_DIFFUSE)
+ sz++;
+ if (vft0 & VFT0_DEPTH_OFFSET)
+ sz++;
+ if (vft0 & VFT0_POINT_WIDTH)
+ sz++;
+
+ for (i = 0; i < nrtex; i++) {
+ switch (vft1 & VFT1_TEX0_MASK) {
+ case TEXCOORDFMT_2D:
+ sz += 2;
+ break;
+ case TEXCOORDFMT_3D:
+ sz += 3;
+ break;
+ case TEXCOORDFMT_4D:
+ sz += 4;
+ break;
+ case TEXCOORDFMT_1D:
+ sz += 1;
+ break;
+ }
+ vft1 >>= VFT1_TEX1_SHIFT;
+ }
+
+ if (sz != expected)
+ fprintf(stderr, "vertex size mismatch %d/%d\n", sz, expected);
+
+ return sz == expected;
+}
+
+static void
+i830_emit_invarient_state(struct intel_context *intel)
+{
+ BATCH_LOCALS;
+
+ BEGIN_BATCH(40, 0);
+
+ OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
+ OUT_BATCH(0);
+
+ OUT_BATCH(_3DSTATE_DFLT_SPEC_CMD);
+ OUT_BATCH(0);
+
+ OUT_BATCH(_3DSTATE_DFLT_Z_CMD);
+ OUT_BATCH(0);
+
+ OUT_BATCH(_3DSTATE_FOG_MODE_CMD);
+ OUT_BATCH(FOGFUNC_ENABLE |
+ FOG_LINEAR_CONST | FOGSRC_INDEX_Z | ENABLE_FOG_DENSITY);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+
+
+ OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
+ MAP_UNIT(0) |
+ DISABLE_TEX_STREAM_BUMP |
+ ENABLE_TEX_STREAM_COORD_SET |
+ TEX_STREAM_COORD_SET(0) |
+ ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(0));
+ OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
+ MAP_UNIT(1) |
+ DISABLE_TEX_STREAM_BUMP |
+ ENABLE_TEX_STREAM_COORD_SET |
+ TEX_STREAM_COORD_SET(1) |
+ ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(1));
+ OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
+ MAP_UNIT(2) |
+ DISABLE_TEX_STREAM_BUMP |
+ ENABLE_TEX_STREAM_COORD_SET |
+ TEX_STREAM_COORD_SET(2) |
+ ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(2));
+ OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
+ MAP_UNIT(3) |
+ DISABLE_TEX_STREAM_BUMP |
+ ENABLE_TEX_STREAM_COORD_SET |
+ TEX_STREAM_COORD_SET(3) |
+ ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(3));
+
+ OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
+ OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(0));
+ OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
+ OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(1));
+ OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
+ OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(2));
+ OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
+ OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(3));
+
+ OUT_BATCH(_3DSTATE_RASTER_RULES_CMD |
+ ENABLE_POINT_RASTER_RULE |
+ OGL_POINT_RASTER_RULE |
+ ENABLE_LINE_STRIP_PROVOKE_VRTX |
+ ENABLE_TRI_FAN_PROVOKE_VRTX |
+ ENABLE_TRI_STRIP_PROVOKE_VRTX |
+ LINE_STRIP_PROVOKE_VRTX(1) |
+ TRI_FAN_PROVOKE_VRTX(2) | TRI_STRIP_PROVOKE_VRTX(2));
+
+ OUT_BATCH(_3DSTATE_VERTEX_TRANSFORM);
+ OUT_BATCH(DISABLE_VIEWPORT_TRANSFORM | DISABLE_PERSPECTIVE_DIVIDE);
+
+ OUT_BATCH(_3DSTATE_W_STATE_CMD);
+ OUT_BATCH(MAGIC_W_STATE_DWORD1);
+ OUT_BATCH(0x3f800000 /* 1.0 in IEEE float */ );
+
+
+ OUT_BATCH(_3DSTATE_COLOR_FACTOR_CMD);
+ OUT_BATCH(0x80808080); /* .5 required in alpha for GL_DOT3_RGBA_EXT */
+
+ ADVANCE_BATCH();
+}
+
+
+#define emit( intel, state, size ) \
+do { \
+ int k; \
+ BEGIN_BATCH(size / sizeof(GLuint), 0); \
+ for (k = 0 ; k < size / sizeof(GLuint) ; k++) { \
+ if (0) _mesa_printf(" 0x%08x\n", state[k]); \
+ OUT_BATCH(state[k]); \
+ } \
+ ADVANCE_BATCH(); \
+} while (0)
+
+static GLuint
+get_state_size(struct i830_hw_state *state)
+{
+ GLuint dirty = state->active & ~state->emitted;
+ GLuint sz = 0;
+ GLuint i;
+
+ if (dirty & I830_UPLOAD_INVARIENT)
+ sz += 40 * sizeof(int);
+
+ if (dirty & I830_UPLOAD_CTX)
+ sz += sizeof(state->Ctx);
+
+ if (dirty & I830_UPLOAD_BUFFERS)
+ sz += sizeof(state->Buffer);
+
+ if (dirty & I830_UPLOAD_STIPPLE)
+ sz += sizeof(state->Stipple);
+
+ for (i = 0; i < I830_TEX_UNITS; i++) {
+ if ((dirty & I830_UPLOAD_TEX(i)))
+ sz += sizeof(state->Tex[i]);
+
+ if (dirty & I830_UPLOAD_TEXBLEND(i))
+ sz += state->TexBlendWordsUsed[i] * 4;
+ }
+
+ return sz;
+}
+
+
+/* Push the state into the sarea and/or texture memory.
+ */
+static void
+i830_emit_state(struct intel_context *intel)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+ struct i830_hw_state *state = i830->current;
+ int i;
+ GLuint dirty;
+ BATCH_LOCALS;
+
+ /* We don't hold the lock at this point, so want to make sure that
+ * there won't be a buffer wrap.
+ *
+ * It might be better to talk about explicit places where
+ * scheduling is allowed, rather than assume that it is whenever a
+ * batchbuffer fills up.
+ */
+ intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
+
+ /* Do this here as we may have flushed the batchbuffer above,
+ * causing more state to be dirty!
+ */
+ dirty = state->active & ~state->emitted;
+
+ if (dirty & I830_UPLOAD_INVARIENT) {
+ DBG("I830_UPLOAD_INVARIENT:\n");
+ i830_emit_invarient_state(intel);
+ }
+
+ if (dirty & I830_UPLOAD_CTX) {
+ DBG("I830_UPLOAD_CTX:\n");
+ emit(i830, state->Ctx, sizeof(state->Ctx));
+
+ }
+
+ if (dirty & I830_UPLOAD_BUFFERS) {
+ DBG("I830_UPLOAD_BUFFERS:\n");
+ BEGIN_BATCH(I830_DEST_SETUP_SIZE + 2, 0);
+ OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR0]);
+ OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR1]);
+ OUT_RELOC(state->draw_region->buffer,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, 0);
+
+ if (state->depth_region) {
+ OUT_BATCH(state->Buffer[I830_DESTREG_DBUFADDR0]);
+ OUT_BATCH(state->Buffer[I830_DESTREG_DBUFADDR1]);
+ OUT_RELOC(state->depth_region->buffer,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, 0);
+ }
+
+ OUT_BATCH(state->Buffer[I830_DESTREG_DV0]);
+ OUT_BATCH(state->Buffer[I830_DESTREG_DV1]);
+ OUT_BATCH(state->Buffer[I830_DESTREG_SENABLE]);
+ OUT_BATCH(state->Buffer[I830_DESTREG_SR0]);
+ OUT_BATCH(state->Buffer[I830_DESTREG_SR1]);
+ OUT_BATCH(state->Buffer[I830_DESTREG_SR2]);
+ ADVANCE_BATCH();
+ }
+
+ if (dirty & I830_UPLOAD_STIPPLE) {
+ DBG("I830_UPLOAD_STIPPLE:\n");
+ emit(i830, state->Stipple, sizeof(state->Stipple));
+ }
+
+ for (i = 0; i < I830_TEX_UNITS; i++) {
+ if ((dirty & I830_UPLOAD_TEX(i))) {
+ DBG("I830_UPLOAD_TEX(%d):\n", i);
+
+ BEGIN_BATCH(I830_TEX_SETUP_SIZE + 1, 0);
+ OUT_BATCH(state->Tex[i][I830_TEXREG_TM0LI]);
+
+ if (state->tex_buffer[i]) {
+ OUT_RELOC(state->tex_buffer[i],
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_READ,
+ state->tex_offset[i] | TM0S0_USE_FENCE);
+ }
+ else {
+ assert(i == 0);
+ assert(state == &i830->meta);
+ OUT_BATCH(0);
+ }
+
+ OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S1]);
+ OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S2]);
+ OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S3]);
+ OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S4]);
+ OUT_BATCH(state->Tex[i][I830_TEXREG_MCS]);
+ OUT_BATCH(state->Tex[i][I830_TEXREG_CUBE]);
+ }
+
+ if (dirty & I830_UPLOAD_TEXBLEND(i)) {
+ DBG("I830_UPLOAD_TEXBLEND(%d): %d words\n", i,
+ state->TexBlendWordsUsed[i]);
+ emit(i830, state->TexBlend[i], state->TexBlendWordsUsed[i] * 4);
+ }
+ }
+
+ state->emitted |= dirty;
+}
+
+static void
+i830_destroy_context(struct intel_context *intel)
+{
+ _tnl_free_vertices(&intel->ctx);
+}
+
+static void
+i830_set_draw_region(struct intel_context *intel,
+ struct intel_region *draw_region,
+ struct intel_region *depth_region)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+
+ intel_region_release(&i830->state.draw_region);
+ intel_region_release(&i830->state.depth_region);
+ intel_region_reference(&i830->state.draw_region, draw_region);
+ intel_region_reference(&i830->state.depth_region, depth_region);
+
+ /* XXX FBO: Need code from i915_set_draw_region() */
+
+ I830_STATECHANGE(i830, I830_UPLOAD_BUFFERS);
+ I830_STATECHANGE(i830, I830_UPLOAD_BUFFERS);
+ i830->state.Buffer[I830_DESTREG_CBUFADDR1] =
+ (BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(draw_region->pitch) |
+ BUF_3D_USE_FENCE);
+ i830->state.Buffer[I830_DESTREG_DBUFADDR1] =
+ (BUF_3D_ID_DEPTH | BUF_3D_PITCH(depth_region->pitch) |
+ BUF_3D_USE_FENCE);
+}
+
+#if 0
+static void
+i830_update_color_z_regions(intelContextPtr intel,
+ const intelRegion * colorRegion,
+ const intelRegion * depthRegion)
+{
+ i830ContextPtr i830 = I830_CONTEXT(intel);
+
+ i830->state.Buffer[I830_DESTREG_CBUFADDR1] =
+ (BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(colorRegion->pitch) |
+ BUF_3D_USE_FENCE);
+ i830->state.Buffer[I830_DESTREG_CBUFADDR2] = colorRegion->offset;
+
+ i830->state.Buffer[I830_DESTREG_DBUFADDR1] =
+ (BUF_3D_ID_DEPTH | BUF_3D_PITCH(depthRegion->pitch) | BUF_3D_USE_FENCE);
+ i830->state.Buffer[I830_DESTREG_DBUFADDR2] = depthRegion->offset;
+}
+#endif
+
+
+/* This isn't really handled at the moment.
+ */
+static void
+i830_lost_hardware(struct intel_context *intel)
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+ i830->state.emitted = 0;
+}
+
+
+
+static GLuint
+i830_flush_cmd(void)
+{
+ return MI_FLUSH | FLUSH_MAP_CACHE;
+}
+
+
+static void
+i830_assert_not_dirty( struct intel_context *intel )
+{
+ struct i830_context *i830 = i830_context(&intel->ctx);
+ struct i830_hw_state *state = i830->current;
+ GLuint dirty = state->active & ~state->emitted;
+ assert(!dirty);
+}
+
+
+void
+i830InitVtbl(struct i830_context *i830)
+{
+ i830->intel.vtbl.check_vertex_size = i830_check_vertex_size;
+ i830->intel.vtbl.destroy = i830_destroy_context;
+ i830->intel.vtbl.emit_state = i830_emit_state;
+ i830->intel.vtbl.lost_hardware = i830_lost_hardware;
+ i830->intel.vtbl.reduced_primitive_state = i830_reduced_primitive_state;
+ i830->intel.vtbl.set_draw_region = i830_set_draw_region;
+ i830->intel.vtbl.update_texture_state = i830UpdateTextureState;
+ i830->intel.vtbl.flush_cmd = i830_flush_cmd;
+ i830->intel.vtbl.render_start = i830_render_start;
+ i830->intel.vtbl.assert_not_dirty = i830_assert_not_dirty;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "i915_context.h"
+#include "imports.h"
+#include "intel_tex.h"
+#include "intel_tris.h"
+#include "tnl/t_context.h"
+#include "tnl/t_pipeline.h"
+#include "tnl/t_vertex.h"
+
+#include "swrast/swrast.h"
+#include "swrast_setup/swrast_setup.h"
+#include "tnl/tnl.h"
+#include "array_cache/acache.h"
+
+#include "utils.h"
+#include "i915_reg.h"
+
+#include "intel_regions.h"
+#include "intel_batchbuffer.h"
+
+/***************************************
+ * Mesa's Driver Functions
+ ***************************************/
+
+static const struct dri_extension i915_extensions[] = {
+ {"GL_ARB_depth_texture", NULL},
+ {"GL_ARB_fragment_program", NULL},
+ {"GL_ARB_shadow", NULL},
+ {"GL_ARB_texture_env_crossbar", NULL},
+ {"GL_ARB_texture_non_power_of_two", NULL},
+ {"GL_EXT_shadow_funcs", NULL},
+ /* ARB extn won't work if not enabled */
+ {"GL_SGIX_depth_texture", NULL},
+ {NULL, NULL}
+};
+
+/* Override intel default.
+ */
+static void
+i915InvalidateState(GLcontext * ctx, GLuint new_state)
+{
+ _swrast_InvalidateState(ctx, new_state);
+ _swsetup_InvalidateState(ctx, new_state);
+ _ac_InvalidateState(ctx, new_state);
+ _tnl_InvalidateState(ctx, new_state);
+ _tnl_invalidate_vertex_state(ctx, new_state);
+ intel_context(ctx)->NewGLState |= new_state;
+
+ /* Todo: gather state values under which tracked parameters become
+ * invalidated, add callbacks for things like
+ * ProgramLocalParameters, etc.
+ */
+ {
+ struct i915_fragment_program *p =
+ (struct i915_fragment_program *) ctx->FragmentProgram._Current;
+ if (p && p->nr_params)
+ p->params_uptodate = 0;
+ }
+
+ if (new_state & (_NEW_FOG | _NEW_HINT | _NEW_PROGRAM))
+ i915_update_fog(ctx);
+}
+
+
+static void
+i915InitDriverFunctions(struct dd_function_table *functions)
+{
+ intelInitDriverFunctions(functions);
+ i915InitStateFunctions(functions);
+ i915InitTextureFuncs(functions);
+ i915InitFragProgFuncs(functions);
+ functions->UpdateState = i915InvalidateState;
+}
+
+
+
+GLboolean
+i915CreateContext(const __GLcontextModes * mesaVis,
+ __DRIcontextPrivate * driContextPriv,
+ void *sharedContextPrivate)
+{
+ struct dd_function_table functions;
+ struct i915_context *i915 =
+ (struct i915_context *) CALLOC_STRUCT(i915_context);
+ struct intel_context *intel = &i915->intel;
+ GLcontext *ctx = &intel->ctx;
+
+ if (!i915)
+ return GL_FALSE;
+
+ if (0)
+ _mesa_printf("\ntexmem-0-3 branch\n\n");
+
+ i915InitVtbl(i915);
+ i915InitMetaFuncs(i915);
+
+ i915InitDriverFunctions(&functions);
+
+ if (!intelInitContext(intel, mesaVis, driContextPriv,
+ sharedContextPrivate, &functions)) {
+ FREE(i915);
+ return GL_FALSE;
+ }
+
+ ctx->Const.MaxTextureUnits = I915_TEX_UNITS;
+ ctx->Const.MaxTextureImageUnits = I915_TEX_UNITS;
+ ctx->Const.MaxTextureCoordUnits = I915_TEX_UNITS;
+
+
+ /* Advertise the full hardware capabilities. The new memory
+ * manager should cope much better with overload situations:
+ */
+ ctx->Const.MaxTextureLevels = 12;
+ ctx->Const.Max3DTextureLevels = 9;
+ ctx->Const.MaxCubeTextureLevels = 12;
+ ctx->Const.MaxTextureRectSize = (1 << 11);
+ ctx->Const.MaxTextureUnits = I915_TEX_UNITS;
+
+ /* GL_ARB_fragment_program limits - don't think Mesa actually
+ * validates programs against these, and in any case one ARB
+ * instruction can translate to more than one HW instruction, so
+ * we'll still have to check and fallback each time.
+ */
+ ctx->Const.FragmentProgram.MaxNativeTemps = I915_MAX_TEMPORARY;
+ ctx->Const.FragmentProgram.MaxNativeAttribs = 11; /* 8 tex, 2 color, fog */
+ ctx->Const.FragmentProgram.MaxNativeParameters = I915_MAX_CONSTANT;
+ ctx->Const.FragmentProgram.MaxNativeAluInstructions = I915_MAX_ALU_INSN;
+ ctx->Const.FragmentProgram.MaxNativeTexInstructions = I915_MAX_TEX_INSN;
+ ctx->Const.FragmentProgram.MaxNativeInstructions = (I915_MAX_ALU_INSN +
+ I915_MAX_TEX_INSN);
+ ctx->Const.FragmentProgram.MaxNativeTexIndirections =
+ I915_MAX_TEX_INDIRECT;
+ ctx->Const.FragmentProgram.MaxNativeAddressRegs = 0; /* I don't think we have one */
+
+ ctx->_MaintainTexEnvProgram = 1;
+ ctx->_UseTexEnvProgram = 1;
+
+ driInitExtensions(ctx, i915_extensions, GL_FALSE);
+
+
+ _tnl_init_vertices(ctx, ctx->Const.MaxArrayLockSize + 12,
+ 36 * sizeof(GLfloat));
+
+ intel->verts = TNL_CONTEXT(ctx)->clipspace.vertex_buf;
+
+ i915InitState(i915);
+
+ return GL_TRUE;
+}
--- /dev/null
+ /**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef I915CONTEXT_INC
+#define I915CONTEXT_INC
+
+#include "intel_context.h"
+
+#define I915_FALLBACK_TEXTURE 0x1000
+#define I915_FALLBACK_COLORMASK 0x2000
+#define I915_FALLBACK_STENCIL 0x4000
+#define I915_FALLBACK_STIPPLE 0x8000
+#define I915_FALLBACK_PROGRAM 0x10000
+#define I915_FALLBACK_LOGICOP 0x20000
+#define I915_FALLBACK_POLYGON_SMOOTH 0x40000
+#define I915_FALLBACK_POINT_SMOOTH 0x80000
+
+#define I915_UPLOAD_CTX 0x1
+#define I915_UPLOAD_BUFFERS 0x2
+#define I915_UPLOAD_STIPPLE 0x4
+#define I915_UPLOAD_PROGRAM 0x8
+#define I915_UPLOAD_CONSTANTS 0x10
+#define I915_UPLOAD_FOG 0x20
+#define I915_UPLOAD_INVARIENT 0x40
+#define I915_UPLOAD_DEFAULTS 0x80
+#define I915_UPLOAD_TEX(i) (0x00010000<<(i))
+#define I915_UPLOAD_TEX_ALL (0x00ff0000)
+#define I915_UPLOAD_TEX_0_SHIFT 16
+
+
+/* State structure offsets - these will probably disappear.
+ */
+#define I915_DESTREG_CBUFADDR0 0
+#define I915_DESTREG_CBUFADDR1 1
+#define I915_DESTREG_DBUFADDR0 3
+#define I915_DESTREG_DBUFADDR1 4
+#define I915_DESTREG_DV0 6
+#define I915_DESTREG_DV1 7
+#define I915_DESTREG_SENABLE 8
+#define I915_DESTREG_SR0 9
+#define I915_DESTREG_SR1 10
+#define I915_DESTREG_SR2 11
+#define I915_DEST_SETUP_SIZE 12
+
+#define I915_CTXREG_STATE4 0
+#define I915_CTXREG_LI 1
+#define I915_CTXREG_LIS2 2
+#define I915_CTXREG_LIS4 3
+#define I915_CTXREG_LIS5 4
+#define I915_CTXREG_LIS6 5
+#define I915_CTXREG_IAB 6
+#define I915_CTXREG_BLENDCOLOR0 7
+#define I915_CTXREG_BLENDCOLOR1 8
+#define I915_CTX_SETUP_SIZE 9
+
+#define I915_FOGREG_COLOR 0
+#define I915_FOGREG_MODE0 1
+#define I915_FOGREG_MODE1 2
+#define I915_FOGREG_MODE2 3
+#define I915_FOGREG_MODE3 4
+#define I915_FOG_SETUP_SIZE 5
+
+#define I915_STPREG_ST0 0
+#define I915_STPREG_ST1 1
+#define I915_STP_SETUP_SIZE 2
+
+#define I915_TEXREG_MS3 1
+#define I915_TEXREG_MS4 2
+#define I915_TEXREG_SS2 3
+#define I915_TEXREG_SS3 4
+#define I915_TEXREG_SS4 5
+#define I915_TEX_SETUP_SIZE 6
+
+#define I915_DEFREG_C0 0
+#define I915_DEFREG_C1 1
+#define I915_DEFREG_S0 2
+#define I915_DEFREG_S1 3
+#define I915_DEFREG_Z0 4
+#define I915_DEFREG_Z1 5
+#define I915_DEF_SETUP_SIZE 6
+
+
+#define I915_MAX_CONSTANT 32
+#define I915_CONSTANT_SIZE (2+(4*I915_MAX_CONSTANT))
+
+
+#define I915_PROGRAM_SIZE 192
+
+
+/* Hardware version of a parsed fragment program. "Derived" from the
+ * mesa fragment_program struct.
+ */
+struct i915_fragment_program
+{
+ struct gl_fragment_program FragProg;
+
+ GLboolean translated;
+ GLboolean params_uptodate;
+ GLboolean on_hardware;
+ GLboolean error; /* If program is malformed for any reason. */
+
+ GLuint nr_tex_indirect;
+ GLuint nr_tex_insn;
+ GLuint nr_alu_insn;
+ GLuint nr_decl_insn;
+
+
+
+
+ /* TODO: split between the stored representation of a program and
+ * the state used to build that representation.
+ */
+ GLcontext *ctx;
+
+ GLuint declarations[I915_PROGRAM_SIZE];
+ GLuint program[I915_PROGRAM_SIZE];
+
+ GLfloat constant[I915_MAX_CONSTANT][4];
+ GLuint constant_flags[I915_MAX_CONSTANT];
+ GLuint nr_constants;
+
+ GLuint *csr; /* Cursor, points into program.
+ */
+
+ GLuint *decl; /* Cursor, points into declarations.
+ */
+
+ GLuint decl_s; /* flags for which s regs need to be decl'd */
+ GLuint decl_t; /* flags for which t regs need to be decl'd */
+
+ GLuint temp_flag; /* Tracks temporary regs which are in
+ * use.
+ */
+
+ GLuint utemp_flag; /* Tracks TYPE_U temporary regs which are in
+ * use.
+ */
+
+
+
+ /* Helpers for i915_fragprog.c:
+ */
+ GLuint wpos_tex;
+ GLboolean depth_written;
+
+ struct
+ {
+ GLuint reg; /* Hardware constant idx */
+ const GLfloat *values; /* Pointer to tracked values */
+ } param[I915_MAX_CONSTANT];
+ GLuint nr_params;
+
+
+ /* Helpers for i915_texprog.c:
+ */
+ GLuint src_texture; /* Reg containing sampled texture color,
+ * else UREG_BAD.
+ */
+
+ GLuint src_previous; /* Reg containing color from previous
+ * stage. May need to be decl'd.
+ */
+
+ GLuint last_tex_stage; /* Number of last enabled texture unit */
+
+ struct vertex_buffer *VB;
+};
+
+
+
+
+
+
+
+#define I915_TEX_UNITS 8
+
+
+struct i915_hw_state
+{
+ GLuint Ctx[I915_CTX_SETUP_SIZE];
+ GLuint Buffer[I915_DEST_SETUP_SIZE];
+ GLuint Stipple[I915_STP_SETUP_SIZE];
+ GLuint Fog[I915_FOG_SETUP_SIZE];
+ GLuint Defaults[I915_DEF_SETUP_SIZE];
+ GLuint Tex[I915_TEX_UNITS][I915_TEX_SETUP_SIZE];
+ GLuint Constant[I915_CONSTANT_SIZE];
+ GLuint ConstantSize;
+ GLuint Program[I915_PROGRAM_SIZE];
+ GLuint ProgramSize;
+
+ /* Region pointers for relocation:
+ */
+ struct intel_region *draw_region;
+ struct intel_region *depth_region;
+/* struct intel_region *tex_region[I915_TEX_UNITS]; */
+
+ /* Regions aren't actually that appropriate here as the memory may
+ * be from a PBO or FBO. Just use the buffer id. Will have to do
+ * this for draw and depth for FBO's...
+ */
+ struct _DriBufferObject *tex_buffer[I915_TEX_UNITS];
+ GLuint tex_offset[I915_TEX_UNITS];
+
+
+ GLuint active; /* I915_UPLOAD_* */
+ GLuint emitted; /* I915_UPLOAD_* */
+};
+
+#define I915_FOG_PIXEL 2
+#define I915_FOG_VERTEX 1
+#define I915_FOG_NONE 0
+
+struct i915_context
+{
+ struct intel_context intel;
+
+ GLuint last_ReallyEnabled;
+ GLuint vertex_fog;
+ GLuint lodbias_ss2[MAX_TEXTURE_UNITS];
+
+
+ struct i915_fragment_program tex_program;
+ struct i915_fragment_program *current_program;
+
+ struct i915_hw_state meta, initial, state, *current;
+};
+
+
+#define I915_STATECHANGE(i915, flag) \
+do { \
+ INTEL_FIREVERTICES( &(i915)->intel ); \
+ (i915)->state.emitted &= ~(flag); \
+} while (0)
+
+#define I915_ACTIVESTATE(i915, flag, mode) \
+do { \
+ INTEL_FIREVERTICES( &(i915)->intel ); \
+ if (mode) \
+ (i915)->state.active |= (flag); \
+ else \
+ (i915)->state.active &= ~(flag); \
+} while (0)
+
+
+/*======================================================================
+ * i915_vtbl.c
+ */
+extern void i915InitVtbl(struct i915_context *i915);
+
+extern void
+i915_state_draw_region(struct intel_context *intel,
+ struct i915_hw_state *state,
+ struct intel_region *color_region,
+ struct intel_region *depth_region);
+
+
+
+#define SZ_TO_HW(sz) ((sz-2)&0x3)
+#define EMIT_SZ(sz) (EMIT_1F + (sz) - 1)
+#define EMIT_ATTR( ATTR, STYLE, S4, SZ ) \
+do { \
+ intel->vertex_attrs[intel->vertex_attr_count].attrib = (ATTR); \
+ intel->vertex_attrs[intel->vertex_attr_count].format = (STYLE); \
+ s4 |= S4; \
+ intel->vertex_attr_count++; \
+ offset += (SZ); \
+} while (0)
+
+#define EMIT_PAD( N ) \
+do { \
+ intel->vertex_attrs[intel->vertex_attr_count].attrib = 0; \
+ intel->vertex_attrs[intel->vertex_attr_count].format = EMIT_PAD; \
+ intel->vertex_attrs[intel->vertex_attr_count].offset = (N); \
+ intel->vertex_attr_count++; \
+ offset += (N); \
+} while (0)
+
+
+
+/*======================================================================
+ * i915_context.c
+ */
+extern GLboolean i915CreateContext(const __GLcontextModes * mesaVis,
+ __DRIcontextPrivate * driContextPriv,
+ void *sharedContextPrivate);
+
+
+/*======================================================================
+ * i915_texprog.c
+ */
+extern void i915ValidateTextureProgram(struct i915_context *i915);
+
+
+/*======================================================================
+ * i915_debug.c
+ */
+extern void i915_disassemble_program(const GLuint * program, GLuint sz);
+extern void i915_print_ureg(const char *msg, GLuint ureg);
+
+
+/*======================================================================
+ * i915_state.c
+ */
+extern void i915InitStateFunctions(struct dd_function_table *functions);
+extern void i915InitState(struct i915_context *i915);
+extern void i915_update_fog(GLcontext * ctx);
+
+
+/*======================================================================
+ * i915_tex.c
+ */
+extern void i915UpdateTextureState(struct intel_context *intel);
+extern void i915InitTextureFuncs(struct dd_function_table *functions);
+
+/*======================================================================
+ * i915_metaops.c
+ */
+void i915InitMetaFuncs(struct i915_context *i915);
+
+
+/*======================================================================
+ * i915_fragprog.c
+ */
+extern void i915ValidateFragmentProgram(struct i915_context *i915);
+extern void i915InitFragProgFuncs(struct dd_function_table *functions);
+
+/*======================================================================
+ * Inline conversion functions. These are better-typed than the
+ * macros used previously:
+ */
+static INLINE struct i915_context *
+i915_context(GLcontext * ctx)
+{
+ return (struct i915_context *) ctx;
+}
+
+
+
+#define I915_CONTEXT(ctx) i915_context(ctx)
+
+
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "i915_reg.h"
+#include "i915_context.h"
+#include <stdio.h>
+
+
+static const char *opcodes[0x20] = {
+ "NOP",
+ "ADD",
+ "MOV",
+ "MUL",
+ "MAD",
+ "DP2ADD",
+ "DP3",
+ "DP4",
+ "FRC",
+ "RCP",
+ "RSQ",
+ "EXP",
+ "LOG",
+ "CMP",
+ "MIN",
+ "MAX",
+ "FLR",
+ "MOD",
+ "TRC",
+ "SGE",
+ "SLT",
+ "TEXLD",
+ "TEXLDP",
+ "TEXLDB",
+ "TEXKILL",
+ "DCL",
+ "0x1a",
+ "0x1b",
+ "0x1c",
+ "0x1d",
+ "0x1e",
+ "0x1f",
+};
+
+
+static const int args[0x20] = {
+ 0, /* 0 nop */
+ 2, /* 1 add */
+ 1, /* 2 mov */
+ 2, /* 3 m ul */
+ 3, /* 4 mad */
+ 3, /* 5 dp2add */
+ 2, /* 6 dp3 */
+ 2, /* 7 dp4 */
+ 1, /* 8 frc */
+ 1, /* 9 rcp */
+ 1, /* a rsq */
+ 1, /* b exp */
+ 1, /* c log */
+ 3, /* d cmp */
+ 2, /* e min */
+ 2, /* f max */
+ 1, /* 10 flr */
+ 1, /* 11 mod */
+ 1, /* 12 trc */
+ 2, /* 13 sge */
+ 2, /* 14 slt */
+ 1,
+ 1,
+ 1,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+
+static const char *regname[0x8] = {
+ "R",
+ "T",
+ "CONST",
+ "S",
+ "OC",
+ "OD",
+ "U",
+ "UNKNOWN",
+};
+
+static void
+print_reg_type_nr(GLuint type, GLuint nr)
+{
+ switch (type) {
+ case REG_TYPE_T:
+ switch (nr) {
+ case T_DIFFUSE:
+ fprintf(stderr, "T_DIFFUSE");
+ return;
+ case T_SPECULAR:
+ fprintf(stderr, "T_SPECULAR");
+ return;
+ case T_FOG_W:
+ fprintf(stderr, "T_FOG_W");
+ return;
+ default:
+ fprintf(stderr, "T_TEX%d", nr);
+ return;
+ }
+ case REG_TYPE_OC:
+ if (nr == 0) {
+ fprintf(stderr, "oC");
+ return;
+ }
+ break;
+ case REG_TYPE_OD:
+ if (nr == 0) {
+ fprintf(stderr, "oD");
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+
+ fprintf(stderr, "%s[%d]", regname[type], nr);
+}
+
+#define REG_SWIZZLE_MASK 0x7777
+#define REG_NEGATE_MASK 0x8888
+
+#define REG_SWIZZLE_XYZW ((SRC_X << A2_SRC2_CHANNEL_X_SHIFT) | \
+ (SRC_Y << A2_SRC2_CHANNEL_Y_SHIFT) | \
+ (SRC_Z << A2_SRC2_CHANNEL_Z_SHIFT) | \
+ (SRC_W << A2_SRC2_CHANNEL_W_SHIFT))
+
+
+static void
+print_reg_neg_swizzle(GLuint reg)
+{
+ int i;
+
+ if ((reg & REG_SWIZZLE_MASK) == REG_SWIZZLE_XYZW &&
+ (reg & REG_NEGATE_MASK) == 0)
+ return;
+
+ fprintf(stderr, ".");
+
+ for (i = 3; i >= 0; i--) {
+ if (reg & (1 << ((i * 4) + 3)))
+ fprintf(stderr, "-");
+
+ switch ((reg >> (i * 4)) & 0x7) {
+ case 0:
+ fprintf(stderr, "x");
+ break;
+ case 1:
+ fprintf(stderr, "y");
+ break;
+ case 2:
+ fprintf(stderr, "z");
+ break;
+ case 3:
+ fprintf(stderr, "w");
+ break;
+ case 4:
+ fprintf(stderr, "0");
+ break;
+ case 5:
+ fprintf(stderr, "1");
+ break;
+ default:
+ fprintf(stderr, "?");
+ break;
+ }
+ }
+}
+
+
+static void
+print_src_reg(GLuint dword)
+{
+ GLuint nr = (dword >> A2_SRC2_NR_SHIFT) & REG_NR_MASK;
+ GLuint type = (dword >> A2_SRC2_TYPE_SHIFT) & REG_TYPE_MASK;
+ print_reg_type_nr(type, nr);
+ print_reg_neg_swizzle(dword);
+}
+
+void
+i915_print_ureg(const char *msg, GLuint ureg)
+{
+ fprintf(stderr, "%s: ", msg);
+ print_src_reg(ureg >> 8);
+ fprintf(stderr, "\n");
+}
+
+static void
+print_dest_reg(GLuint dword)
+{
+ GLuint nr = (dword >> A0_DEST_NR_SHIFT) & REG_NR_MASK;
+ GLuint type = (dword >> A0_DEST_TYPE_SHIFT) & REG_TYPE_MASK;
+ print_reg_type_nr(type, nr);
+ if ((dword & A0_DEST_CHANNEL_ALL) == A0_DEST_CHANNEL_ALL)
+ return;
+ fprintf(stderr, ".");
+ if (dword & A0_DEST_CHANNEL_X)
+ fprintf(stderr, "x");
+ if (dword & A0_DEST_CHANNEL_Y)
+ fprintf(stderr, "y");
+ if (dword & A0_DEST_CHANNEL_Z)
+ fprintf(stderr, "z");
+ if (dword & A0_DEST_CHANNEL_W)
+ fprintf(stderr, "w");
+}
+
+
+#define GET_SRC0_REG(r0, r1) ((r0<<14)|(r1>>A1_SRC0_CHANNEL_W_SHIFT))
+#define GET_SRC1_REG(r0, r1) ((r0<<8)|(r1>>A2_SRC1_CHANNEL_W_SHIFT))
+#define GET_SRC2_REG(r) (r)
+
+
+static void
+print_arith_op(GLuint opcode, const GLuint * program)
+{
+ if (opcode != A0_NOP) {
+ print_dest_reg(program[0]);
+ if (program[0] & A0_DEST_SATURATE)
+ fprintf(stderr, " = SATURATE ");
+ else
+ fprintf(stderr, " = ");
+ }
+
+ fprintf(stderr, "%s ", opcodes[opcode]);
+
+ print_src_reg(GET_SRC0_REG(program[0], program[1]));
+ if (args[opcode] == 1) {
+ fprintf(stderr, "\n");
+ return;
+ }
+
+ fprintf(stderr, ", ");
+ print_src_reg(GET_SRC1_REG(program[1], program[2]));
+ if (args[opcode] == 2) {
+ fprintf(stderr, "\n");
+ return;
+ }
+
+ fprintf(stderr, ", ");
+ print_src_reg(GET_SRC2_REG(program[2]));
+ fprintf(stderr, "\n");
+ return;
+}
+
+
+static void
+print_tex_op(GLuint opcode, const GLuint * program)
+{
+ print_dest_reg(program[0] | A0_DEST_CHANNEL_ALL);
+ fprintf(stderr, " = ");
+
+ fprintf(stderr, "%s ", opcodes[opcode]);
+
+ fprintf(stderr, "S[%d],", program[0] & T0_SAMPLER_NR_MASK);
+
+ print_reg_type_nr((program[1] >> T1_ADDRESS_REG_TYPE_SHIFT) &
+ REG_TYPE_MASK,
+ (program[1] >> T1_ADDRESS_REG_NR_SHIFT) & REG_NR_MASK);
+ fprintf(stderr, "\n");
+}
+
+static void
+print_dcl_op(GLuint opcode, const GLuint * program)
+{
+ fprintf(stderr, "%s ", opcodes[opcode]);
+ print_dest_reg(program[0] | A0_DEST_CHANNEL_ALL);
+ fprintf(stderr, "\n");
+}
+
+
+void
+i915_disassemble_program(const GLuint * program, GLuint sz)
+{
+ GLuint size = program[0] & 0x1ff;
+ GLint i;
+
+ fprintf(stderr, "BEGIN\n");
+
+ if (size + 2 != sz) {
+ fprintf(stderr, "%s: program size mismatch %d/%d\n", __FUNCTION__,
+ size + 2, sz);
+ exit(1);
+ }
+
+ program++;
+ for (i = 1; i < sz; i += 3, program += 3) {
+ GLuint opcode = program[0] & (0x1f << 24);
+
+ if ((GLint) opcode >= A0_NOP && opcode <= A0_SLT)
+ print_arith_op(opcode >> 24, program);
+ else if (opcode >= T0_TEXLD && opcode <= T0_TEXKILL)
+ print_tex_op(opcode >> 24, program);
+ else if (opcode == D0_DCL)
+ print_dcl_op(opcode >> 24, program);
+ else
+ fprintf(stderr, "Unknown opcode 0x%x\n", opcode);
+ }
+
+ fprintf(stderr, "END\n\n");
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "macros.h"
+#include "enums.h"
+
+#include "tnl/tnl.h"
+#include "tnl/t_context.h"
+#include "intel_batchbuffer.h"
+
+#include "i915_reg.h"
+#include "i915_context.h"
+#include "i915_program.h"
+
+#include "program_instruction.h"
+#include "program.h"
+
+
+
+/* 1, -1/3!, 1/5!, -1/7! */
+static const GLfloat sin_constants[4] = { 1.0,
+ -1.0 / (3 * 2 * 1),
+ 1.0 / (5 * 4 * 3 * 2 * 1),
+ -1.0 / (7 * 6 * 5 * 4 * 3 * 2 * 1)
+};
+
+/* 1, -1/2!, 1/4!, -1/6! */
+static const GLfloat cos_constants[4] = { 1.0,
+ -1.0 / (2 * 1),
+ 1.0 / (4 * 3 * 2 * 1),
+ -1.0 / (6 * 5 * 4 * 3 * 2 * 1)
+};
+
+/**
+ * Retrieve a ureg for the given source register. Will emit
+ * constants, apply swizzling and negation as needed.
+ */
+static GLuint
+src_vector(struct i915_fragment_program *p,
+ const struct prog_src_register *source,
+ const struct gl_fragment_program *program)
+{
+ GLuint src;
+
+ switch (source->File) {
+
+ /* Registers:
+ */
+ case PROGRAM_TEMPORARY:
+ if (source->Index >= I915_MAX_TEMPORARY) {
+ i915_program_error(p, "Exceeded max temporary reg");
+ return 0;
+ }
+ src = UREG(REG_TYPE_R, source->Index);
+ break;
+ case PROGRAM_INPUT:
+ switch (source->Index) {
+ case FRAG_ATTRIB_WPOS:
+ src = i915_emit_decl(p, REG_TYPE_T, p->wpos_tex, D0_CHANNEL_ALL);
+ break;
+ case FRAG_ATTRIB_COL0:
+ src = i915_emit_decl(p, REG_TYPE_T, T_DIFFUSE, D0_CHANNEL_ALL);
+ break;
+ case FRAG_ATTRIB_COL1:
+ src = i915_emit_decl(p, REG_TYPE_T, T_SPECULAR, D0_CHANNEL_XYZ);
+ src = swizzle(src, X, Y, Z, ONE);
+ break;
+ case FRAG_ATTRIB_FOGC:
+ src = i915_emit_decl(p, REG_TYPE_T, T_FOG_W, D0_CHANNEL_W);
+ src = swizzle(src, W, W, W, W);
+ break;
+ case FRAG_ATTRIB_TEX0:
+ case FRAG_ATTRIB_TEX1:
+ case FRAG_ATTRIB_TEX2:
+ case FRAG_ATTRIB_TEX3:
+ case FRAG_ATTRIB_TEX4:
+ case FRAG_ATTRIB_TEX5:
+ case FRAG_ATTRIB_TEX6:
+ case FRAG_ATTRIB_TEX7:
+ src = i915_emit_decl(p, REG_TYPE_T,
+ T_TEX0 + (source->Index - FRAG_ATTRIB_TEX0),
+ D0_CHANNEL_ALL);
+ break;
+
+ default:
+ i915_program_error(p, "Bad source->Index");
+ return 0;
+ }
+ break;
+
+ /* Various paramters and env values. All emitted to
+ * hardware as program constants.
+ */
+ case PROGRAM_LOCAL_PARAM:
+ src = i915_emit_param4fv(p, program->Base.LocalParams[source->Index]);
+ break;
+
+ case PROGRAM_ENV_PARAM:
+ src =
+ i915_emit_param4fv(p,
+ p->ctx->FragmentProgram.Parameters[source->
+ Index]);
+ break;
+
+ case PROGRAM_STATE_VAR:
+ case PROGRAM_NAMED_PARAM:
+ src =
+ i915_emit_param4fv(p,
+ program->Base.Parameters->ParameterValues[source->
+ Index]);
+ break;
+
+ default:
+ i915_program_error(p, "Bad source->File");
+ return 0;
+ }
+
+ src = swizzle(src,
+ GET_SWZ(source->Swizzle, 0),
+ GET_SWZ(source->Swizzle, 1),
+ GET_SWZ(source->Swizzle, 2), GET_SWZ(source->Swizzle, 3));
+
+ if (source->NegateBase)
+ src = negate(src,
+ GET_BIT(source->NegateBase, 0),
+ GET_BIT(source->NegateBase, 1),
+ GET_BIT(source->NegateBase, 2),
+ GET_BIT(source->NegateBase, 3));
+
+ return src;
+}
+
+
+static GLuint
+get_result_vector(struct i915_fragment_program *p,
+ const struct prog_instruction *inst)
+{
+ switch (inst->DstReg.File) {
+ case PROGRAM_OUTPUT:
+ switch (inst->DstReg.Index) {
+ case FRAG_RESULT_COLR:
+ return UREG(REG_TYPE_OC, 0);
+ case FRAG_RESULT_DEPR:
+ p->depth_written = 1;
+ return UREG(REG_TYPE_OD, 0);
+ default:
+ i915_program_error(p, "Bad inst->DstReg.Index");
+ return 0;
+ }
+ case PROGRAM_TEMPORARY:
+ return UREG(REG_TYPE_R, inst->DstReg.Index);
+ default:
+ i915_program_error(p, "Bad inst->DstReg.File");
+ return 0;
+ }
+}
+
+static GLuint
+get_result_flags(const struct prog_instruction *inst)
+{
+ GLuint flags = 0;
+
+ if (inst->SaturateMode == SATURATE_ZERO_ONE)
+ flags |= A0_DEST_SATURATE;
+ if (inst->DstReg.WriteMask & WRITEMASK_X)
+ flags |= A0_DEST_CHANNEL_X;
+ if (inst->DstReg.WriteMask & WRITEMASK_Y)
+ flags |= A0_DEST_CHANNEL_Y;
+ if (inst->DstReg.WriteMask & WRITEMASK_Z)
+ flags |= A0_DEST_CHANNEL_Z;
+ if (inst->DstReg.WriteMask & WRITEMASK_W)
+ flags |= A0_DEST_CHANNEL_W;
+
+ return flags;
+}
+
+static GLuint
+translate_tex_src_target(struct i915_fragment_program *p, GLubyte bit)
+{
+ switch (bit) {
+ case TEXTURE_1D_INDEX:
+ return D0_SAMPLE_TYPE_2D;
+ case TEXTURE_2D_INDEX:
+ return D0_SAMPLE_TYPE_2D;
+ case TEXTURE_RECT_INDEX:
+ return D0_SAMPLE_TYPE_2D;
+ case TEXTURE_3D_INDEX:
+ return D0_SAMPLE_TYPE_VOLUME;
+ case TEXTURE_CUBE_INDEX:
+ return D0_SAMPLE_TYPE_CUBE;
+ default:
+ i915_program_error(p, "TexSrcBit");
+ return 0;
+ }
+}
+
+#define EMIT_TEX( OP ) \
+do { \
+ GLuint dim = translate_tex_src_target( p, inst->TexSrcTarget ); \
+ GLuint sampler = i915_emit_decl(p, REG_TYPE_S, \
+ inst->TexSrcUnit, dim); \
+ GLuint coord = src_vector( p, &inst->SrcReg[0], program); \
+ /* Texel lookup */ \
+ \
+ i915_emit_texld( p, \
+ get_result_vector( p, inst ), \
+ get_result_flags( inst ), \
+ sampler, \
+ coord, \
+ OP); \
+} while (0)
+
+#define EMIT_ARITH( OP, N ) \
+do { \
+ i915_emit_arith( p, \
+ OP, \
+ get_result_vector( p, inst ), \
+ get_result_flags( inst ), 0, \
+ (N<1)?0:src_vector( p, &inst->SrcReg[0], program), \
+ (N<2)?0:src_vector( p, &inst->SrcReg[1], program), \
+ (N<3)?0:src_vector( p, &inst->SrcReg[2], program)); \
+} while (0)
+
+#define EMIT_1ARG_ARITH( OP ) EMIT_ARITH( OP, 1 )
+#define EMIT_2ARG_ARITH( OP ) EMIT_ARITH( OP, 2 )
+#define EMIT_3ARG_ARITH( OP ) EMIT_ARITH( OP, 3 )
+
+
+/* Possible concerns:
+ *
+ * SIN, COS -- could use another taylor step?
+ * LIT -- results seem a little different to sw mesa
+ * LOG -- different to mesa on negative numbers, but this is conformant.
+ *
+ * Parse failures -- Mesa doesn't currently give a good indication
+ * internally whether a particular program string parsed or not. This
+ * can lead to confusion -- hopefully we cope with it ok now.
+ *
+ */
+static void
+upload_program(struct i915_fragment_program *p)
+{
+ const struct gl_fragment_program *program =
+ p->ctx->FragmentProgram._Current;
+ const struct prog_instruction *inst = program->Base.Instructions;
+
+/* _mesa_debug_fp_inst(program->Base.NumInstructions, inst); */
+
+ /* Is this a parse-failed program? Ensure a valid program is
+ * loaded, as the flagging of an error isn't sufficient to stop
+ * this being uploaded to hardware.
+ */
+ if (inst[0].Opcode == OPCODE_END) {
+ GLuint tmp = i915_get_utemp(p);
+ i915_emit_arith(p,
+ A0_MOV,
+ UREG(REG_TYPE_OC, 0),
+ A0_DEST_CHANNEL_ALL, 0,
+ swizzle(tmp, ONE, ZERO, ONE, ONE), 0, 0);
+ return;
+ }
+
+ while (1) {
+ GLuint src0, src1, src2, flags;
+ GLuint tmp = 0;
+
+ switch (inst->Opcode) {
+ case OPCODE_ABS:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+ i915_emit_arith(p,
+ A0_MAX,
+ get_result_vector(p, inst),
+ get_result_flags(inst), 0,
+ src0, negate(src0, 1, 1, 1, 1), 0);
+ break;
+
+ case OPCODE_ADD:
+ EMIT_2ARG_ARITH(A0_ADD);
+ break;
+
+ case OPCODE_CMP:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+ src1 = src_vector(p, &inst->SrcReg[1], program);
+ src2 = src_vector(p, &inst->SrcReg[2], program);
+ i915_emit_arith(p, A0_CMP, get_result_vector(p, inst), get_result_flags(inst), 0, src0, src2, src1); /* NOTE: order of src2, src1 */
+ break;
+
+ case OPCODE_COS:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+ tmp = i915_get_utemp(p);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_X, 0,
+ src0, i915_emit_const1f(p, 1.0 / (M_PI * 2)), 0);
+
+ i915_emit_arith(p, A0_MOD, tmp, A0_DEST_CHANNEL_X, 0, tmp, 0, 0);
+
+ /* By choosing different taylor constants, could get rid of this mul:
+ */
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_X, 0,
+ tmp, i915_emit_const1f(p, (M_PI * 2)), 0);
+
+ /*
+ * t0.xy = MUL x.xx11, x.x1111 ; x^2, x, 1, 1
+ * t0 = MUL t0.xyxy t0.xx11 ; x^4, x^3, x^2, 1
+ * t0 = MUL t0.xxz1 t0.z111 ; x^6 x^4 x^2 1
+ * result = DP4 t0, cos_constants
+ */
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_XY, 0,
+ swizzle(tmp, X, X, ONE, ONE),
+ swizzle(tmp, X, ONE, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_XYZ, 0,
+ swizzle(tmp, X, Y, X, ONE),
+ swizzle(tmp, X, X, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_XYZ, 0,
+ swizzle(tmp, X, X, Z, ONE),
+ swizzle(tmp, Z, ONE, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_DP4,
+ get_result_vector(p, inst),
+ get_result_flags(inst), 0,
+ swizzle(tmp, ONE, Z, Y, X),
+ i915_emit_const4fv(p, cos_constants), 0);
+
+ break;
+
+ case OPCODE_DP3:
+ EMIT_2ARG_ARITH(A0_DP3);
+ break;
+
+ case OPCODE_DP4:
+ EMIT_2ARG_ARITH(A0_DP4);
+ break;
+
+ case OPCODE_DPH:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+ src1 = src_vector(p, &inst->SrcReg[1], program);
+
+ i915_emit_arith(p,
+ A0_DP4,
+ get_result_vector(p, inst),
+ get_result_flags(inst), 0,
+ swizzle(src0, X, Y, Z, ONE), src1, 0);
+ break;
+
+ case OPCODE_DST:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+ src1 = src_vector(p, &inst->SrcReg[1], program);
+
+ /* result[0] = 1 * 1;
+ * result[1] = a[1] * b[1];
+ * result[2] = a[2] * 1;
+ * result[3] = 1 * b[3];
+ */
+ i915_emit_arith(p,
+ A0_MUL,
+ get_result_vector(p, inst),
+ get_result_flags(inst), 0,
+ swizzle(src0, ONE, Y, Z, ONE),
+ swizzle(src1, ONE, Y, ONE, W), 0);
+ break;
+
+ case OPCODE_EX2:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+
+ i915_emit_arith(p,
+ A0_EXP,
+ get_result_vector(p, inst),
+ get_result_flags(inst), 0,
+ swizzle(src0, X, X, X, X), 0, 0);
+ break;
+
+ case OPCODE_FLR:
+ EMIT_1ARG_ARITH(A0_FLR);
+ break;
+
+ case OPCODE_FRC:
+ EMIT_1ARG_ARITH(A0_FRC);
+ break;
+
+ case OPCODE_KIL:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+ tmp = i915_get_utemp(p);
+
+ i915_emit_texld(p, tmp, A0_DEST_CHANNEL_ALL, /* use a dummy dest reg */
+ 0, src0, T0_TEXKILL);
+ break;
+
+ case OPCODE_LG2:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+
+ i915_emit_arith(p,
+ A0_LOG,
+ get_result_vector(p, inst),
+ get_result_flags(inst), 0,
+ swizzle(src0, X, X, X, X), 0, 0);
+ break;
+
+ case OPCODE_LIT:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+ tmp = i915_get_utemp(p);
+
+ /* tmp = max( a.xyzw, a.00zw )
+ * XXX: Clamp tmp.w to -128..128
+ * tmp.y = log(tmp.y)
+ * tmp.y = tmp.w * tmp.y
+ * tmp.y = exp(tmp.y)
+ * result = cmp (a.11-x1, a.1x01, a.1xy1 )
+ */
+ i915_emit_arith(p, A0_MAX, tmp, A0_DEST_CHANNEL_ALL, 0,
+ src0, swizzle(src0, ZERO, ZERO, Z, W), 0);
+
+ i915_emit_arith(p, A0_LOG, tmp, A0_DEST_CHANNEL_Y, 0,
+ swizzle(tmp, Y, Y, Y, Y), 0, 0);
+
+ i915_emit_arith(p, A0_MUL, tmp, A0_DEST_CHANNEL_Y, 0,
+ swizzle(tmp, ZERO, Y, ZERO, ZERO),
+ swizzle(tmp, ZERO, W, ZERO, ZERO), 0);
+
+ i915_emit_arith(p, A0_EXP, tmp, A0_DEST_CHANNEL_Y, 0,
+ swizzle(tmp, Y, Y, Y, Y), 0, 0);
+
+ i915_emit_arith(p, A0_CMP,
+ get_result_vector(p, inst),
+ get_result_flags(inst), 0,
+ negate(swizzle(tmp, ONE, ONE, X, ONE), 0, 0, 1, 0),
+ swizzle(tmp, ONE, X, ZERO, ONE),
+ swizzle(tmp, ONE, X, Y, ONE));
+
+ break;
+
+ case OPCODE_LRP:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+ src1 = src_vector(p, &inst->SrcReg[1], program);
+ src2 = src_vector(p, &inst->SrcReg[2], program);
+ flags = get_result_flags(inst);
+ tmp = i915_get_utemp(p);
+
+ /* b*a + c*(1-a)
+ *
+ * b*a + c - ca
+ *
+ * tmp = b*a + c,
+ * result = (-c)*a + tmp
+ */
+ i915_emit_arith(p, A0_MAD, tmp,
+ flags & A0_DEST_CHANNEL_ALL, 0, src1, src0, src2);
+
+ i915_emit_arith(p, A0_MAD,
+ get_result_vector(p, inst),
+ flags, 0, negate(src2, 1, 1, 1, 1), src0, tmp);
+ break;
+
+ case OPCODE_MAD:
+ EMIT_3ARG_ARITH(A0_MAD);
+ break;
+
+ case OPCODE_MAX:
+ EMIT_2ARG_ARITH(A0_MAX);
+ break;
+
+ case OPCODE_MIN:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+ src1 = src_vector(p, &inst->SrcReg[1], program);
+ tmp = i915_get_utemp(p);
+ flags = get_result_flags(inst);
+
+ i915_emit_arith(p,
+ A0_MAX,
+ tmp, flags & A0_DEST_CHANNEL_ALL, 0,
+ negate(src0, 1, 1, 1, 1),
+ negate(src1, 1, 1, 1, 1), 0);
+
+ i915_emit_arith(p,
+ A0_MOV,
+ get_result_vector(p, inst),
+ flags, 0, negate(tmp, 1, 1, 1, 1), 0, 0);
+ break;
+
+ case OPCODE_MOV:
+ EMIT_1ARG_ARITH(A0_MOV);
+ break;
+
+ case OPCODE_MUL:
+ EMIT_2ARG_ARITH(A0_MUL);
+ break;
+
+ case OPCODE_POW:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+ src1 = src_vector(p, &inst->SrcReg[1], program);
+ tmp = i915_get_utemp(p);
+ flags = get_result_flags(inst);
+
+ /* XXX: masking on intermediate values, here and elsewhere.
+ */
+ i915_emit_arith(p,
+ A0_LOG,
+ tmp, A0_DEST_CHANNEL_X, 0,
+ swizzle(src0, X, X, X, X), 0, 0);
+
+ i915_emit_arith(p, A0_MUL, tmp, A0_DEST_CHANNEL_X, 0, tmp, src1, 0);
+
+
+ i915_emit_arith(p,
+ A0_EXP,
+ get_result_vector(p, inst),
+ flags, 0, swizzle(tmp, X, X, X, X), 0, 0);
+
+ break;
+
+ case OPCODE_RCP:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+
+ i915_emit_arith(p,
+ A0_RCP,
+ get_result_vector(p, inst),
+ get_result_flags(inst), 0,
+ swizzle(src0, X, X, X, X), 0, 0);
+ break;
+
+ case OPCODE_RSQ:
+
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+
+ i915_emit_arith(p,
+ A0_RSQ,
+ get_result_vector(p, inst),
+ get_result_flags(inst), 0,
+ swizzle(src0, X, X, X, X), 0, 0);
+ break;
+
+ case OPCODE_SCS:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+ tmp = i915_get_utemp(p);
+
+ /*
+ * t0.xy = MUL x.xx11, x.x1111 ; x^2, x, 1, 1
+ * t0 = MUL t0.xyxy t0.xx11 ; x^4, x^3, x^2, x
+ * t1 = MUL t0.xyyw t0.yz11 ; x^7 x^5 x^3 x
+ * scs.x = DP4 t1, sin_constants
+ * t1 = MUL t0.xxz1 t0.z111 ; x^6 x^4 x^2 1
+ * scs.y = DP4 t1, cos_constants
+ */
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_XY, 0,
+ swizzle(src0, X, X, ONE, ONE),
+ swizzle(src0, X, ONE, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_ALL, 0,
+ swizzle(tmp, X, Y, X, Y),
+ swizzle(tmp, X, X, ONE, ONE), 0);
+
+ if (inst->DstReg.WriteMask & WRITEMASK_Y) {
+ GLuint tmp1;
+
+ if (inst->DstReg.WriteMask & WRITEMASK_X)
+ tmp1 = i915_get_utemp(p);
+ else
+ tmp1 = tmp;
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp1, A0_DEST_CHANNEL_ALL, 0,
+ swizzle(tmp, X, Y, Y, W),
+ swizzle(tmp, X, Z, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_DP4,
+ get_result_vector(p, inst),
+ A0_DEST_CHANNEL_Y, 0,
+ swizzle(tmp1, W, Z, Y, X),
+ i915_emit_const4fv(p, sin_constants), 0);
+ }
+
+ if (inst->DstReg.WriteMask & WRITEMASK_X) {
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_XYZ, 0,
+ swizzle(tmp, X, X, Z, ONE),
+ swizzle(tmp, Z, ONE, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_DP4,
+ get_result_vector(p, inst),
+ A0_DEST_CHANNEL_X, 0,
+ swizzle(tmp, ONE, Z, Y, X),
+ i915_emit_const4fv(p, cos_constants), 0);
+ }
+ break;
+
+ case OPCODE_SGE:
+ EMIT_2ARG_ARITH(A0_SGE);
+ break;
+
+ case OPCODE_SIN:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+ tmp = i915_get_utemp(p);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_X, 0,
+ src0, i915_emit_const1f(p, 1.0 / (M_PI * 2)), 0);
+
+ i915_emit_arith(p, A0_MOD, tmp, A0_DEST_CHANNEL_X, 0, tmp, 0, 0);
+
+ /* By choosing different taylor constants, could get rid of this mul:
+ */
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_X, 0,
+ tmp, i915_emit_const1f(p, (M_PI * 2)), 0);
+
+ /*
+ * t0.xy = MUL x.xx11, x.x1111 ; x^2, x, 1, 1
+ * t0 = MUL t0.xyxy t0.xx11 ; x^4, x^3, x^2, x
+ * t1 = MUL t0.xyyw t0.yz11 ; x^7 x^5 x^3 x
+ * result = DP4 t1.wzyx, sin_constants
+ */
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_XY, 0,
+ swizzle(tmp, X, X, ONE, ONE),
+ swizzle(tmp, X, ONE, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_ALL, 0,
+ swizzle(tmp, X, Y, X, Y),
+ swizzle(tmp, X, X, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_ALL, 0,
+ swizzle(tmp, X, Y, Y, W),
+ swizzle(tmp, X, Z, ONE, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_DP4,
+ get_result_vector(p, inst),
+ get_result_flags(inst), 0,
+ swizzle(tmp, W, Z, Y, X),
+ i915_emit_const4fv(p, sin_constants), 0);
+ break;
+
+ case OPCODE_SLT:
+ EMIT_2ARG_ARITH(A0_SLT);
+ break;
+
+ case OPCODE_SUB:
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+ src1 = src_vector(p, &inst->SrcReg[1], program);
+
+ i915_emit_arith(p,
+ A0_ADD,
+ get_result_vector(p, inst),
+ get_result_flags(inst), 0,
+ src0, negate(src1, 1, 1, 1, 1), 0);
+ break;
+
+ case OPCODE_SWZ:
+ EMIT_1ARG_ARITH(A0_MOV); /* extended swizzle handled natively */
+ break;
+
+ case OPCODE_TEX:
+ EMIT_TEX(T0_TEXLD);
+ break;
+
+ case OPCODE_TXB:
+ EMIT_TEX(T0_TEXLDB);
+ break;
+
+ case OPCODE_TXP:
+ EMIT_TEX(T0_TEXLDP);
+ break;
+
+ case OPCODE_XPD:
+ /* Cross product:
+ * result.x = src0.y * src1.z - src0.z * src1.y;
+ * result.y = src0.z * src1.x - src0.x * src1.z;
+ * result.z = src0.x * src1.y - src0.y * src1.x;
+ * result.w = undef;
+ */
+ src0 = src_vector(p, &inst->SrcReg[0], program);
+ src1 = src_vector(p, &inst->SrcReg[1], program);
+ tmp = i915_get_utemp(p);
+
+ i915_emit_arith(p,
+ A0_MUL,
+ tmp, A0_DEST_CHANNEL_ALL, 0,
+ swizzle(src0, Z, X, Y, ONE),
+ swizzle(src1, Y, Z, X, ONE), 0);
+
+ i915_emit_arith(p,
+ A0_MAD,
+ get_result_vector(p, inst),
+ get_result_flags(inst), 0,
+ swizzle(src0, Y, Z, X, ONE),
+ swizzle(src1, Z, X, Y, ONE),
+ negate(tmp, 1, 1, 1, 0));
+ break;
+
+ case OPCODE_END:
+ return;
+
+ default:
+ i915_program_error(p, "bad opcode");
+ return;
+ }
+
+ inst++;
+ i915_release_utemps(p);
+ }
+}
+
+/* Rather than trying to intercept and jiggle depth writes during
+ * emit, just move the value into its correct position at the end of
+ * the program:
+ */
+static void
+fixup_depth_write(struct i915_fragment_program *p)
+{
+ if (p->depth_written) {
+ GLuint depth = UREG(REG_TYPE_OD, 0);
+
+ i915_emit_arith(p,
+ A0_MOV,
+ depth, A0_DEST_CHANNEL_W, 0,
+ swizzle(depth, X, Y, Z, Z), 0, 0);
+ }
+}
+
+
+#define FRAG_BIT_TEX(n) (FRAG_BIT_TEX0 << (n))
+
+
+static void
+check_wpos(struct i915_fragment_program *p)
+{
+ GLuint inputs = p->FragProg.Base.InputsRead;
+ GLint i;
+
+ p->wpos_tex = -1;
+
+ for (i = 0; i < p->ctx->Const.MaxTextureCoordUnits; i++) {
+ if (inputs & FRAG_BIT_TEX(i))
+ continue;
+ else if (inputs & FRAG_BIT_WPOS) {
+ p->wpos_tex = i;
+ inputs &= ~FRAG_BIT_WPOS;
+ }
+ }
+
+ if (inputs & FRAG_BIT_WPOS) {
+ i915_program_error(p, "No free texcoord for wpos value");
+ }
+}
+
+
+static void
+translate_program(struct i915_fragment_program *p)
+{
+ struct i915_context *i915 = I915_CONTEXT(p->ctx);
+
+ i915_init_program(i915, p);
+ check_wpos(p);
+ upload_program(p);
+ fixup_depth_write(p);
+ i915_fini_program(p);
+
+ p->translated = 1;
+}
+
+
+static void
+track_params(struct i915_fragment_program *p)
+{
+ GLint i;
+
+ if (p->nr_params)
+ _mesa_load_state_parameters(p->ctx, p->FragProg.Base.Parameters);
+
+ for (i = 0; i < p->nr_params; i++) {
+ GLint reg = p->param[i].reg;
+ COPY_4V(p->constant[reg], p->param[i].values);
+ }
+
+ p->params_uptodate = 1;
+ p->on_hardware = 0; /* overkill */
+}
+
+
+static void
+i915BindProgram(GLcontext * ctx, GLenum target, struct gl_program *prog)
+{
+ if (target == GL_FRAGMENT_PROGRAM_ARB) {
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ struct i915_fragment_program *p = (struct i915_fragment_program *) prog;
+
+ if (i915->current_program == p)
+ return;
+
+ if (i915->current_program) {
+ i915->current_program->on_hardware = 0;
+ i915->current_program->params_uptodate = 0;
+ }
+
+ i915->current_program = p;
+
+ assert(p->on_hardware == 0);
+ assert(p->params_uptodate == 0);
+
+ /* Hack: make sure fog is correctly enabled according to this
+ * fragment program's fog options.
+ */
+ ctx->Driver.Enable(ctx, GL_FRAGMENT_PROGRAM_ARB,
+ ctx->FragmentProgram.Enabled);
+ }
+}
+
+static struct gl_program *
+i915NewProgram(GLcontext * ctx, GLenum target, GLuint id)
+{
+ switch (target) {
+ case GL_VERTEX_PROGRAM_ARB:
+ return _mesa_init_vertex_program(ctx, CALLOC_STRUCT(gl_vertex_program),
+ target, id);
+
+ case GL_FRAGMENT_PROGRAM_ARB:{
+ struct i915_fragment_program *prog =
+ CALLOC_STRUCT(i915_fragment_program);
+ if (prog) {
+ i915_init_program(I915_CONTEXT(ctx), prog);
+
+ return _mesa_init_fragment_program(ctx, &prog->FragProg,
+ target, id);
+ }
+ else
+ return NULL;
+ }
+
+ default:
+ /* Just fallback:
+ */
+ return _mesa_new_program(ctx, target, id);
+ }
+}
+
+static void
+i915DeleteProgram(GLcontext * ctx, struct gl_program *prog)
+{
+ if (prog->Target == GL_FRAGMENT_PROGRAM_ARB) {
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ struct i915_fragment_program *p = (struct i915_fragment_program *) prog;
+
+ if (i915->current_program == p)
+ i915->current_program = 0;
+ }
+
+ _mesa_delete_program(ctx, prog);
+}
+
+
+static GLboolean
+i915IsProgramNative(GLcontext * ctx, GLenum target, struct gl_program *prog)
+{
+ if (target == GL_FRAGMENT_PROGRAM_ARB) {
+ struct i915_fragment_program *p = (struct i915_fragment_program *) prog;
+
+ if (!p->translated)
+ translate_program(p);
+
+ return !p->error;
+ }
+ else
+ return GL_TRUE;
+}
+
+static void
+i915ProgramStringNotify(GLcontext * ctx,
+ GLenum target, struct gl_program *prog)
+{
+ if (target == GL_FRAGMENT_PROGRAM_ARB) {
+ struct i915_fragment_program *p = (struct i915_fragment_program *) prog;
+ p->translated = 0;
+
+ /* Hack: make sure fog is correctly enabled according to this
+ * fragment program's fog options.
+ */
+ ctx->Driver.Enable(ctx, GL_FRAGMENT_PROGRAM_ARB,
+ ctx->FragmentProgram.Enabled);
+ }
+
+ _tnl_program_string(ctx, target, prog);
+}
+
+
+void
+i915ValidateFragmentProgram(struct i915_context *i915)
+{
+ GLcontext *ctx = &i915->intel.ctx;
+ struct intel_context *intel = intel_context(ctx);
+ TNLcontext *tnl = TNL_CONTEXT(ctx);
+ struct vertex_buffer *VB = &tnl->vb;
+
+ struct i915_fragment_program *p =
+ (struct i915_fragment_program *) ctx->FragmentProgram._Current;
+
+ const GLuint inputsRead = p->FragProg.Base.InputsRead;
+ GLuint s4 = i915->state.Ctx[I915_CTXREG_LIS4] & ~S4_VFMT_MASK;
+ GLuint s2 = S2_TEXCOORD_NONE;
+ int i, offset = 0;
+
+ if (i915->current_program != p) {
+ if (i915->current_program) {
+ i915->current_program->on_hardware = 0;
+ i915->current_program->params_uptodate = 0;
+ }
+
+ i915->current_program = p;
+ }
+
+
+ /* Important:
+ */
+ VB->AttribPtr[VERT_ATTRIB_POS] = VB->NdcPtr;
+
+ if (!p->translated)
+ translate_program(p);
+
+ intel->vertex_attr_count = 0;
+ intel->wpos_offset = 0;
+ intel->wpos_size = 0;
+ intel->coloroffset = 0;
+ intel->specoffset = 0;
+
+ if (inputsRead & FRAG_BITS_TEX_ANY) {
+ EMIT_ATTR(_TNL_ATTRIB_POS, EMIT_4F_VIEWPORT, S4_VFMT_XYZW, 16);
+ }
+ else {
+ EMIT_ATTR(_TNL_ATTRIB_POS, EMIT_3F_VIEWPORT, S4_VFMT_XYZ, 12);
+ }
+
+ if (inputsRead & FRAG_BIT_COL0) {
+ intel->coloroffset = offset / 4;
+ EMIT_ATTR(_TNL_ATTRIB_COLOR0, EMIT_4UB_4F_BGRA, S4_VFMT_COLOR, 4);
+ }
+
+ if ((inputsRead & (FRAG_BIT_COL1 | FRAG_BIT_FOGC)) ||
+ i915->vertex_fog != I915_FOG_NONE) {
+
+ if (inputsRead & FRAG_BIT_COL1) {
+ intel->specoffset = offset / 4;
+ EMIT_ATTR(_TNL_ATTRIB_COLOR1, EMIT_3UB_3F_BGR, S4_VFMT_SPEC_FOG, 3);
+ }
+ else
+ EMIT_PAD(3);
+
+ if ((inputsRead & FRAG_BIT_FOGC) || i915->vertex_fog != I915_FOG_NONE)
+ EMIT_ATTR(_TNL_ATTRIB_FOG, EMIT_1UB_1F, S4_VFMT_SPEC_FOG, 1);
+ else
+ EMIT_PAD(1);
+ }
+
+#if 0
+ if ((inputsRead & FRAG_BIT_FOGC) || i915->vertex_fog != I915_FOG_NONE) {
+ EMIT_ATTR(_TNL_ATTRIB_FOG, EMIT_1F, S4_VFMT_FOG_PARAM, 4);
+ }
+#endif
+
+ for (i = 0; i < p->ctx->Const.MaxTextureCoordUnits; i++) {
+ if (inputsRead & FRAG_BIT_TEX(i)) {
+ int sz = VB->TexCoordPtr[i]->size;
+
+ s2 &= ~S2_TEXCOORD_FMT(i, S2_TEXCOORD_FMT0_MASK);
+ s2 |= S2_TEXCOORD_FMT(i, SZ_TO_HW(sz));
+
+ EMIT_ATTR(_TNL_ATTRIB_TEX0 + i, EMIT_SZ(sz), 0, sz * 4);
+ }
+ else if (i == p->wpos_tex) {
+
+ /* If WPOS is required, duplicate the XYZ position data in an
+ * unused texture coordinate:
+ */
+ s2 &= ~S2_TEXCOORD_FMT(i, S2_TEXCOORD_FMT0_MASK);
+ s2 |= S2_TEXCOORD_FMT(i, SZ_TO_HW(3));
+
+ intel->wpos_offset = offset;
+ intel->wpos_size = 3 * sizeof(GLuint);
+
+ EMIT_PAD(intel->wpos_size);
+ }
+ }
+
+ if (s2 != i915->state.Ctx[I915_CTXREG_LIS2] ||
+ s4 != i915->state.Ctx[I915_CTXREG_LIS4]) {
+ int k;
+
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+
+ /* Must do this *after* statechange, so as not to affect
+ * buffered vertices reliant on the old state:
+ */
+ intel->vertex_size = _tnl_install_attrs(&intel->ctx,
+ intel->vertex_attrs,
+ intel->vertex_attr_count,
+ intel->ViewportMatrix.m, 0);
+
+ intel->vertex_size >>= 2;
+
+ i915->state.Ctx[I915_CTXREG_LIS2] = s2;
+ i915->state.Ctx[I915_CTXREG_LIS4] = s4;
+
+ k = intel->vtbl.check_vertex_size(intel, intel->vertex_size);
+ assert(k);
+ }
+
+ if (!p->params_uptodate)
+ track_params(p);
+
+ if (!p->on_hardware)
+ i915_upload_program(i915, p);
+}
+
+void
+i915InitFragProgFuncs(struct dd_function_table *functions)
+{
+ functions->BindProgram = i915BindProgram;
+ functions->NewProgram = i915NewProgram;
+ functions->DeleteProgram = i915DeleteProgram;
+ functions->IsProgramNative = i915IsProgramNative;
+ functions->ProgramStringNotify = i915ProgramStringNotify;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "enums.h"
+#include "mtypes.h"
+#include "macros.h"
+#include "utils.h"
+
+#include "intel_screen.h"
+#include "intel_batchbuffer.h"
+#include "intel_ioctl.h"
+#include "intel_regions.h"
+#include "intel_rotate.h"
+
+#include "i915_context.h"
+#include "i915_reg.h"
+
+/* We touch almost everything:
+ */
+#define ACTIVE (I915_UPLOAD_INVARIENT | \
+ I915_UPLOAD_CTX | \
+ I915_UPLOAD_BUFFERS | \
+ I915_UPLOAD_STIPPLE | \
+ I915_UPLOAD_PROGRAM | \
+ I915_UPLOAD_FOG | \
+ I915_UPLOAD_TEX(0))
+
+#define SET_STATE( i915, STATE ) \
+do { \
+ i915->current->emitted &= ~ACTIVE; \
+ i915->current = &i915->STATE; \
+ i915->current->emitted &= ~ACTIVE; \
+} while (0)
+
+
+static void
+meta_no_stencil_write(struct intel_context *intel)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+
+ /* ctx->Driver.Enable( ctx, GL_STENCIL_TEST, GL_FALSE )
+ */
+ i915->meta.Ctx[I915_CTXREG_LIS5] &= ~(S5_STENCIL_TEST_ENABLE |
+ S5_STENCIL_WRITE_ENABLE);
+
+ i915->meta.emitted &= ~I915_UPLOAD_CTX;
+}
+
+static void
+meta_no_depth_write(struct intel_context *intel)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+
+ /* ctx->Driver.Enable( ctx, GL_DEPTH_TEST, GL_FALSE )
+ */
+ i915->meta.Ctx[I915_CTXREG_LIS6] &= ~(S6_DEPTH_TEST_ENABLE |
+ S6_DEPTH_WRITE_ENABLE);
+
+ i915->meta.emitted &= ~I915_UPLOAD_CTX;
+}
+
+static void
+meta_depth_replace(struct intel_context *intel)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+
+ /* ctx->Driver.Enable( ctx, GL_DEPTH_TEST, GL_TRUE )
+ * ctx->Driver.DepthMask( ctx, GL_TRUE )
+ */
+ i915->meta.Ctx[I915_CTXREG_LIS6] |= (S6_DEPTH_TEST_ENABLE |
+ S6_DEPTH_WRITE_ENABLE);
+
+ /* ctx->Driver.DepthFunc( ctx, GL_REPLACE )
+ */
+ i915->meta.Ctx[I915_CTXREG_LIS6] &= ~S6_DEPTH_TEST_FUNC_MASK;
+ i915->meta.Ctx[I915_CTXREG_LIS6] |=
+ COMPAREFUNC_ALWAYS << S6_DEPTH_TEST_FUNC_SHIFT;
+
+ i915->meta.emitted &= ~I915_UPLOAD_CTX;
+}
+
+
+/* Set stencil unit to replace always with the reference value.
+ */
+static void
+meta_stencil_replace(struct intel_context *intel,
+ GLuint s_mask, GLuint s_clear)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ GLuint op = STENCILOP_REPLACE;
+ GLuint func = COMPAREFUNC_ALWAYS;
+
+ /* ctx->Driver.Enable( ctx, GL_STENCIL_TEST, GL_TRUE )
+ */
+ i915->meta.Ctx[I915_CTXREG_LIS5] |= (S5_STENCIL_TEST_ENABLE |
+ S5_STENCIL_WRITE_ENABLE);
+
+ /* ctx->Driver.StencilMask( ctx, s_mask )
+ */
+ i915->meta.Ctx[I915_CTXREG_STATE4] &= ~MODE4_ENABLE_STENCIL_WRITE_MASK;
+
+ i915->meta.Ctx[I915_CTXREG_STATE4] |= (ENABLE_STENCIL_WRITE_MASK |
+ STENCIL_WRITE_MASK(s_mask));
+
+ /* ctx->Driver.StencilOp( ctx, GL_REPLACE, GL_REPLACE, GL_REPLACE )
+ */
+ i915->meta.Ctx[I915_CTXREG_LIS5] &= ~(S5_STENCIL_FAIL_MASK |
+ S5_STENCIL_PASS_Z_FAIL_MASK |
+ S5_STENCIL_PASS_Z_PASS_MASK);
+
+ i915->meta.Ctx[I915_CTXREG_LIS5] |= ((op << S5_STENCIL_FAIL_SHIFT) |
+ (op << S5_STENCIL_PASS_Z_FAIL_SHIFT) |
+ (op << S5_STENCIL_PASS_Z_PASS_SHIFT));
+
+
+ /* ctx->Driver.StencilFunc( ctx, GL_ALWAYS, s_ref, ~0 )
+ */
+ i915->meta.Ctx[I915_CTXREG_STATE4] &= ~MODE4_ENABLE_STENCIL_TEST_MASK;
+ i915->meta.Ctx[I915_CTXREG_STATE4] |= (ENABLE_STENCIL_TEST_MASK |
+ STENCIL_TEST_MASK(0xff));
+
+ i915->meta.Ctx[I915_CTXREG_LIS5] &= ~(S5_STENCIL_REF_MASK |
+ S5_STENCIL_TEST_FUNC_MASK);
+
+ i915->meta.Ctx[I915_CTXREG_LIS5] |= ((s_clear << S5_STENCIL_REF_SHIFT) |
+ (func << S5_STENCIL_TEST_FUNC_SHIFT));
+
+
+ i915->meta.emitted &= ~I915_UPLOAD_CTX;
+}
+
+
+static void
+meta_color_mask(struct intel_context *intel, GLboolean state)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ const GLuint mask = (S5_WRITEDISABLE_RED |
+ S5_WRITEDISABLE_GREEN |
+ S5_WRITEDISABLE_BLUE | S5_WRITEDISABLE_ALPHA);
+
+ /* Copy colormask state from "regular" hw context.
+ */
+ if (state) {
+ i915->meta.Ctx[I915_CTXREG_LIS5] &= ~mask;
+ i915->meta.Ctx[I915_CTXREG_LIS5] |=
+ (i915->state.Ctx[I915_CTXREG_LIS5] & mask);
+ }
+ else
+ i915->meta.Ctx[I915_CTXREG_LIS5] |= mask;
+
+ i915->meta.emitted &= ~I915_UPLOAD_CTX;
+}
+
+
+
+static void
+meta_import_pixel_state(struct intel_context *intel)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ memcpy(i915->meta.Fog, i915->state.Fog, I915_FOG_SETUP_SIZE * 4);
+
+ i915->meta.Ctx[I915_CTXREG_LIS5] = i915->state.Ctx[I915_CTXREG_LIS5];
+ i915->meta.Ctx[I915_CTXREG_LIS6] = i915->state.Ctx[I915_CTXREG_LIS6];
+ i915->meta.Ctx[I915_CTXREG_STATE4] = i915->state.Ctx[I915_CTXREG_STATE4];
+ i915->meta.Ctx[I915_CTXREG_BLENDCOLOR1] =
+ i915->state.Ctx[I915_CTXREG_BLENDCOLOR1];
+ i915->meta.Ctx[I915_CTXREG_IAB] = i915->state.Ctx[I915_CTXREG_IAB];
+
+ i915->meta.Buffer[I915_DESTREG_SENABLE] =
+ i915->state.Buffer[I915_DESTREG_SENABLE];
+ i915->meta.Buffer[I915_DESTREG_SR1] = i915->state.Buffer[I915_DESTREG_SR1];
+ i915->meta.Buffer[I915_DESTREG_SR2] = i915->state.Buffer[I915_DESTREG_SR2];
+
+ i915->meta.emitted &= ~I915_UPLOAD_FOG;
+ i915->meta.emitted &= ~I915_UPLOAD_BUFFERS;
+ i915->meta.emitted &= ~I915_UPLOAD_CTX;
+}
+
+
+
+
+#define REG( type, nr ) (((type)<<5)|(nr))
+
+#define REG_R(x) REG(REG_TYPE_R, x)
+#define REG_T(x) REG(REG_TYPE_T, x)
+#define REG_CONST(x) REG(REG_TYPE_CONST, x)
+#define REG_S(x) REG(REG_TYPE_S, x)
+#define REG_OC REG(REG_TYPE_OC, 0)
+#define REG_OD REG(REG_TYPE_OD, 0)
+#define REG_U(x) REG(REG_TYPE_U, x)
+
+#define REG_T_DIFFUSE REG(REG_TYPE_T, T_DIFFUSE)
+#define REG_T_SPECULAR REG(REG_TYPE_T, T_SPECULAR)
+#define REG_T_FOG_W REG(REG_TYPE_T, T_FOG_W)
+#define REG_T_TEX(x) REG(REG_TYPE_T, x)
+
+
+#define A0_DEST_REG( reg ) ( (reg) << A0_DEST_NR_SHIFT )
+#define A0_SRC0_REG( reg ) ( (reg) << A0_SRC0_NR_SHIFT )
+#define A1_SRC1_REG( reg ) ( (reg) << A1_SRC1_NR_SHIFT )
+#define A1_SRC2_REG( reg ) ( (reg) << A1_SRC2_NR_SHIFT )
+#define A2_SRC2_REG( reg ) ( (reg) << A2_SRC2_NR_SHIFT )
+#define D0_DECL_REG( reg ) ( (reg) << D0_NR_SHIFT )
+#define T0_DEST_REG( reg ) ( (reg) << T0_DEST_NR_SHIFT )
+
+#define T0_SAMPLER( unit ) ((unit)<<T0_SAMPLER_NR_SHIFT)
+
+#define T1_ADDRESS_REG( type, nr ) (((type)<<T1_ADDRESS_REG_TYPE_SHIFT)| \
+ ((nr)<<T1_ADDRESS_REG_NR_SHIFT))
+
+
+#define A1_SRC0_XYZW ((SRC_X << A1_SRC0_CHANNEL_X_SHIFT) | \
+ (SRC_Y << A1_SRC0_CHANNEL_Y_SHIFT) | \
+ (SRC_Z << A1_SRC0_CHANNEL_Z_SHIFT) | \
+ (SRC_W << A1_SRC0_CHANNEL_W_SHIFT))
+
+#define A1_SRC1_XY ((SRC_X << A1_SRC1_CHANNEL_X_SHIFT) | \
+ (SRC_Y << A1_SRC1_CHANNEL_Y_SHIFT))
+
+#define A2_SRC1_ZW ((SRC_Z << A2_SRC1_CHANNEL_Z_SHIFT) | \
+ (SRC_W << A2_SRC1_CHANNEL_W_SHIFT))
+
+#define A2_SRC2_XYZW ((SRC_X << A2_SRC2_CHANNEL_X_SHIFT) | \
+ (SRC_Y << A2_SRC2_CHANNEL_Y_SHIFT) | \
+ (SRC_Z << A2_SRC2_CHANNEL_Z_SHIFT) | \
+ (SRC_W << A2_SRC2_CHANNEL_W_SHIFT))
+
+
+
+
+
+static void
+meta_no_texture(struct intel_context *intel)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+
+ static const GLuint prog[] = {
+ _3DSTATE_PIXEL_SHADER_PROGRAM,
+
+ /* Declare incoming diffuse color:
+ */
+ (D0_DCL | D0_DECL_REG(REG_T_DIFFUSE) | D0_CHANNEL_ALL),
+ D1_MBZ,
+ D2_MBZ,
+
+ /* output-color = mov(t_diffuse)
+ */
+ (A0_MOV |
+ A0_DEST_REG(REG_OC) |
+ A0_DEST_CHANNEL_ALL | A0_SRC0_REG(REG_T_DIFFUSE)),
+ (A1_SRC0_XYZW),
+ 0,
+ };
+
+
+ memcpy(i915->meta.Program, prog, sizeof(prog));
+ i915->meta.ProgramSize = sizeof(prog) / sizeof(*prog);
+ i915->meta.Program[0] |= i915->meta.ProgramSize - 2;
+ i915->meta.emitted &= ~I915_UPLOAD_PROGRAM;
+}
+
+static void
+meta_texture_blend_replace(struct intel_context *intel)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+
+ static const GLuint prog[] = {
+ _3DSTATE_PIXEL_SHADER_PROGRAM,
+
+ /* Declare the sampler:
+ */
+ (D0_DCL | D0_DECL_REG(REG_S(0)) | D0_SAMPLE_TYPE_2D | D0_CHANNEL_NONE),
+ D1_MBZ,
+ D2_MBZ,
+
+ /* Declare the interpolated texture coordinate:
+ */
+ (D0_DCL | D0_DECL_REG(REG_T_TEX(0)) | D0_CHANNEL_ALL),
+ D1_MBZ,
+ D2_MBZ,
+
+ /* output-color = texld(sample0, texcoord0)
+ */
+ (T0_TEXLD | T0_DEST_REG(REG_OC) | T0_SAMPLER(0)),
+ T1_ADDRESS_REG(REG_TYPE_T, 0),
+ T2_MBZ
+ };
+
+ memcpy(i915->meta.Program, prog, sizeof(prog));
+ i915->meta.ProgramSize = sizeof(prog) / sizeof(*prog);
+ i915->meta.Program[0] |= i915->meta.ProgramSize - 2;
+ i915->meta.emitted &= ~I915_UPLOAD_PROGRAM;
+}
+
+
+
+
+
+/* Set up an arbitary piece of memory as a rectangular texture
+ * (including the front or back buffer).
+ */
+static GLboolean
+meta_tex_rect_source(struct intel_context *intel,
+ struct _DriBufferObject *buffer,
+ GLuint offset,
+ GLuint pitch, GLuint height, GLenum format, GLenum type)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ GLuint unit = 0;
+ GLint numLevels = 1;
+ GLuint *state = i915->meta.Tex[0];
+ GLuint textureFormat;
+ GLuint cpp;
+
+ /* A full implementation of this would do the upload through
+ * glTexImage2d, and get all the conversion operations at that
+ * point. We are restricted, but still at least have access to the
+ * fragment program swizzle.
+ */
+ switch (format) {
+ case GL_BGRA:
+ switch (type) {
+ case GL_UNSIGNED_INT_8_8_8_8_REV:
+ case GL_UNSIGNED_BYTE:
+ textureFormat = (MAPSURF_32BIT | MT_32BIT_ARGB8888);
+ cpp = 4;
+ break;
+ default:
+ return GL_FALSE;
+ }
+ break;
+ case GL_RGBA:
+ switch (type) {
+ case GL_UNSIGNED_INT_8_8_8_8_REV:
+ case GL_UNSIGNED_BYTE:
+ textureFormat = (MAPSURF_32BIT | MT_32BIT_ABGR8888);
+ cpp = 4;
+ break;
+ default:
+ return GL_FALSE;
+ }
+ break;
+ case GL_BGR:
+ switch (type) {
+ case GL_UNSIGNED_SHORT_5_6_5_REV:
+ textureFormat = (MAPSURF_16BIT | MT_16BIT_RGB565);
+ cpp = 2;
+ break;
+ default:
+ return GL_FALSE;
+ }
+ break;
+ case GL_RGB:
+ switch (type) {
+ case GL_UNSIGNED_SHORT_5_6_5:
+ textureFormat = (MAPSURF_16BIT | MT_16BIT_RGB565);
+ cpp = 2;
+ break;
+ default:
+ return GL_FALSE;
+ }
+ break;
+
+ default:
+ return GL_FALSE;
+ }
+
+
+ if ((pitch * cpp) & 3) {
+ _mesa_printf("%s: texture is not dword pitch\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+/* intel_region_release(&i915->meta.tex_region[0]); */
+/* intel_region_reference(&i915->meta.tex_region[0], region); */
+ i915->meta.tex_buffer[0] = buffer;
+ i915->meta.tex_offset[0] = offset;
+
+ state[I915_TEXREG_MS3] = (((height - 1) << MS3_HEIGHT_SHIFT) |
+ ((pitch - 1) << MS3_WIDTH_SHIFT) |
+ textureFormat | MS3_USE_FENCE_REGS);
+
+ state[I915_TEXREG_MS4] = (((((pitch * cpp) / 4) - 1) << MS4_PITCH_SHIFT) |
+ MS4_CUBE_FACE_ENA_MASK |
+ ((((numLevels - 1) * 4)) << MS4_MAX_LOD_SHIFT));
+
+ state[I915_TEXREG_SS2] = ((FILTER_NEAREST << SS2_MIN_FILTER_SHIFT) |
+ (MIPFILTER_NONE << SS2_MIP_FILTER_SHIFT) |
+ (FILTER_NEAREST << SS2_MAG_FILTER_SHIFT));
+
+ state[I915_TEXREG_SS3] = ((TEXCOORDMODE_WRAP << SS3_TCX_ADDR_MODE_SHIFT) |
+ (TEXCOORDMODE_WRAP << SS3_TCY_ADDR_MODE_SHIFT) |
+ (TEXCOORDMODE_WRAP << SS3_TCZ_ADDR_MODE_SHIFT) |
+ (unit << SS3_TEXTUREMAP_INDEX_SHIFT));
+
+ state[I915_TEXREG_SS4] = 0;
+
+ i915->meta.emitted &= ~I915_UPLOAD_TEX(0);
+ return GL_TRUE;
+}
+
+
+/**
+ * Set the color and depth drawing region for meta ops.
+ */
+static void
+meta_draw_region(struct intel_context *intel,
+ struct intel_region *color_region,
+ struct intel_region *depth_region)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ i915_state_draw_region(intel, &i915->meta, color_region, depth_region);
+}
+
+
+static void
+set_vertex_format(struct intel_context *intel)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+
+ i915->meta.Ctx[I915_CTXREG_LIS2] =
+ (S2_TEXCOORD_FMT(0, TEXCOORDFMT_2D) |
+ S2_TEXCOORD_FMT(1, TEXCOORDFMT_NOT_PRESENT) |
+ S2_TEXCOORD_FMT(2, TEXCOORDFMT_NOT_PRESENT) |
+ S2_TEXCOORD_FMT(3, TEXCOORDFMT_NOT_PRESENT) |
+ S2_TEXCOORD_FMT(4, TEXCOORDFMT_NOT_PRESENT) |
+ S2_TEXCOORD_FMT(5, TEXCOORDFMT_NOT_PRESENT) |
+ S2_TEXCOORD_FMT(6, TEXCOORDFMT_NOT_PRESENT) |
+ S2_TEXCOORD_FMT(7, TEXCOORDFMT_NOT_PRESENT));
+
+ i915->meta.Ctx[I915_CTXREG_LIS4] &= ~S4_VFMT_MASK;
+
+ i915->meta.Ctx[I915_CTXREG_LIS4] |= (S4_VFMT_COLOR | S4_VFMT_XYZ);
+
+ i915->meta.emitted &= ~I915_UPLOAD_CTX;
+}
+
+
+
+/* Operations where the 3D engine is decoupled temporarily from the
+ * current GL state and used for other purposes than simply rendering
+ * incoming triangles.
+ */
+static void
+install_meta_state(struct intel_context *intel)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ memcpy(&i915->meta, &i915->initial, sizeof(i915->meta));
+ i915->meta.active = ACTIVE;
+ i915->meta.emitted = 0;
+
+ SET_STATE(i915, meta);
+ set_vertex_format(intel);
+ meta_no_texture(intel);
+}
+
+static void
+leave_meta_state(struct intel_context *intel)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ intel_region_release(&i915->meta.draw_region);
+ intel_region_release(&i915->meta.depth_region);
+/* intel_region_release(&i915->meta.tex_region[0]); */
+ SET_STATE(i915, state);
+}
+
+
+
+void
+i915InitMetaFuncs(struct i915_context *i915)
+{
+ i915->intel.vtbl.install_meta_state = install_meta_state;
+ i915->intel.vtbl.leave_meta_state = leave_meta_state;
+ i915->intel.vtbl.meta_no_depth_write = meta_no_depth_write;
+ i915->intel.vtbl.meta_no_stencil_write = meta_no_stencil_write;
+ i915->intel.vtbl.meta_stencil_replace = meta_stencil_replace;
+ i915->intel.vtbl.meta_depth_replace = meta_depth_replace;
+ i915->intel.vtbl.meta_color_mask = meta_color_mask;
+ i915->intel.vtbl.meta_no_texture = meta_no_texture;
+ i915->intel.vtbl.meta_texture_blend_replace = meta_texture_blend_replace;
+ i915->intel.vtbl.meta_tex_rect_source = meta_tex_rect_source;
+ i915->intel.vtbl.meta_draw_region = meta_draw_region;
+ i915->intel.vtbl.meta_import_pixel_state = meta_import_pixel_state;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <strings.h>
+
+#include "glheader.h"
+#include "macros.h"
+#include "enums.h"
+
+#include "tnl/t_context.h"
+#include "intel_batchbuffer.h"
+
+#include "i915_reg.h"
+#include "i915_context.h"
+#include "i915_program.h"
+
+
+#define A0_DEST( reg ) (((reg)&UREG_TYPE_NR_MASK)>>UREG_A0_DEST_SHIFT_LEFT)
+#define D0_DEST( reg ) (((reg)&UREG_TYPE_NR_MASK)>>UREG_A0_DEST_SHIFT_LEFT)
+#define T0_DEST( reg ) (((reg)&UREG_TYPE_NR_MASK)>>UREG_A0_DEST_SHIFT_LEFT)
+#define A0_SRC0( reg ) (((reg)&UREG_MASK)>>UREG_A0_SRC0_SHIFT_LEFT)
+#define A1_SRC0( reg ) (((reg)&UREG_MASK)<<UREG_A1_SRC0_SHIFT_RIGHT)
+#define A1_SRC1( reg ) (((reg)&UREG_MASK)>>UREG_A1_SRC1_SHIFT_LEFT)
+#define A2_SRC1( reg ) (((reg)&UREG_MASK)<<UREG_A2_SRC1_SHIFT_RIGHT)
+#define A2_SRC2( reg ) (((reg)&UREG_MASK)>>UREG_A2_SRC2_SHIFT_LEFT)
+
+/* These are special, and don't have swizzle/negate bits.
+ */
+#define T0_SAMPLER( reg ) (GET_UREG_NR(reg)<<T0_SAMPLER_NR_SHIFT)
+#define T1_ADDRESS_REG( reg ) ((GET_UREG_NR(reg)<<T1_ADDRESS_REG_NR_SHIFT) | \
+ (GET_UREG_TYPE(reg)<<T1_ADDRESS_REG_TYPE_SHIFT))
+
+
+/* Macros for translating UREG's into the various register fields used
+ * by the I915 programmable unit.
+ */
+#define UREG_A0_DEST_SHIFT_LEFT (UREG_TYPE_SHIFT - A0_DEST_TYPE_SHIFT)
+#define UREG_A0_SRC0_SHIFT_LEFT (UREG_TYPE_SHIFT - A0_SRC0_TYPE_SHIFT)
+#define UREG_A1_SRC0_SHIFT_RIGHT (A1_SRC0_CHANNEL_W_SHIFT - UREG_CHANNEL_W_SHIFT)
+#define UREG_A1_SRC1_SHIFT_LEFT (UREG_TYPE_SHIFT - A1_SRC1_TYPE_SHIFT)
+#define UREG_A2_SRC1_SHIFT_RIGHT (A2_SRC1_CHANNEL_W_SHIFT - UREG_CHANNEL_W_SHIFT)
+#define UREG_A2_SRC2_SHIFT_LEFT (UREG_TYPE_SHIFT - A2_SRC2_TYPE_SHIFT)
+
+#define UREG_MASK 0xffffff00
+#define UREG_TYPE_NR_MASK ((REG_TYPE_MASK << UREG_TYPE_SHIFT) | \
+ (REG_NR_MASK << UREG_NR_SHIFT))
+
+
+#define I915_CONSTFLAG_PARAM 0x1f
+
+GLuint
+i915_get_temp(struct i915_fragment_program *p)
+{
+ int bit = ffs(~p->temp_flag);
+ if (!bit) {
+ fprintf(stderr, "%s: out of temporaries\n", __FILE__);
+ exit(1);
+ }
+
+ p->temp_flag |= 1 << (bit - 1);
+ return UREG(REG_TYPE_R, (bit - 1));
+}
+
+
+GLuint
+i915_get_utemp(struct i915_fragment_program * p)
+{
+ int bit = ffs(~p->utemp_flag);
+ if (!bit) {
+ fprintf(stderr, "%s: out of temporaries\n", __FILE__);
+ exit(1);
+ }
+
+ p->utemp_flag |= 1 << (bit - 1);
+ return UREG(REG_TYPE_U, (bit - 1));
+}
+
+void
+i915_release_utemps(struct i915_fragment_program *p)
+{
+ p->utemp_flag = ~0x7;
+}
+
+
+GLuint
+i915_emit_decl(struct i915_fragment_program *p,
+ GLuint type, GLuint nr, GLuint d0_flags)
+{
+ GLuint reg = UREG(type, nr);
+
+ if (type == REG_TYPE_T) {
+ if (p->decl_t & (1 << nr))
+ return reg;
+
+ p->decl_t |= (1 << nr);
+ }
+ else if (type == REG_TYPE_S) {
+ if (p->decl_s & (1 << nr))
+ return reg;
+
+ p->decl_s |= (1 << nr);
+ }
+ else
+ return reg;
+
+ *(p->decl++) = (D0_DCL | D0_DEST(reg) | d0_flags);
+ *(p->decl++) = D1_MBZ;
+ *(p->decl++) = D2_MBZ;
+
+ p->nr_decl_insn++;
+ return reg;
+}
+
+GLuint
+i915_emit_arith(struct i915_fragment_program * p,
+ GLuint op,
+ GLuint dest,
+ GLuint mask,
+ GLuint saturate, GLuint src0, GLuint src1, GLuint src2)
+{
+ GLuint c[3];
+ GLuint nr_const = 0;
+
+ assert(GET_UREG_TYPE(dest) != REG_TYPE_CONST);
+ dest = UREG(GET_UREG_TYPE(dest), GET_UREG_NR(dest));
+ assert(dest);
+
+ if (GET_UREG_TYPE(src0) == REG_TYPE_CONST)
+ c[nr_const++] = 0;
+ if (GET_UREG_TYPE(src1) == REG_TYPE_CONST)
+ c[nr_const++] = 1;
+ if (GET_UREG_TYPE(src2) == REG_TYPE_CONST)
+ c[nr_const++] = 2;
+
+ /* Recursively call this function to MOV additional const values
+ * into temporary registers. Use utemp registers for this -
+ * currently shouldn't be possible to run out, but keep an eye on
+ * this.
+ */
+ if (nr_const > 1) {
+ GLuint s[3], first, i, old_utemp_flag;
+
+ s[0] = src0;
+ s[1] = src1;
+ s[2] = src2;
+ old_utemp_flag = p->utemp_flag;
+
+ first = GET_UREG_NR(s[c[0]]);
+ for (i = 1; i < nr_const; i++) {
+ if (GET_UREG_NR(s[c[i]]) != first) {
+ GLuint tmp = i915_get_utemp(p);
+
+ i915_emit_arith(p, A0_MOV, tmp, A0_DEST_CHANNEL_ALL, 0,
+ s[c[i]], 0, 0);
+ s[c[i]] = tmp;
+ }
+ }
+
+ src0 = s[0];
+ src1 = s[1];
+ src2 = s[2];
+ p->utemp_flag = old_utemp_flag; /* restore */
+ }
+
+ *(p->csr++) = (op | A0_DEST(dest) | mask | saturate | A0_SRC0(src0));
+ *(p->csr++) = (A1_SRC0(src0) | A1_SRC1(src1));
+ *(p->csr++) = (A2_SRC1(src1) | A2_SRC2(src2));
+
+ p->nr_alu_insn++;
+ return dest;
+}
+
+GLuint i915_emit_texld( struct i915_fragment_program *p,
+ GLuint dest,
+ GLuint destmask,
+ GLuint sampler,
+ GLuint coord,
+ GLuint op )
+{
+ if (coord != UREG(GET_UREG_TYPE(coord), GET_UREG_NR(coord))) {
+ /* No real way to work around this in the general case - need to
+ * allocate and declare a new temporary register (a utemp won't
+ * do). Will fallback for now.
+ */
+ i915_program_error(p, "Can't (yet) swizzle TEX arguments");
+ return 0;
+ }
+
+ /* Don't worry about saturate as we only support
+ */
+ if (destmask != A0_DEST_CHANNEL_ALL) {
+ GLuint tmp = i915_get_utemp(p);
+ i915_emit_texld( p, tmp, A0_DEST_CHANNEL_ALL, sampler, coord, op );
+ i915_emit_arith( p, A0_MOV, dest, destmask, 0, tmp, 0, 0 );
+ return dest;
+ }
+ else {
+ assert(GET_UREG_TYPE(dest) != REG_TYPE_CONST);
+ assert(dest = UREG(GET_UREG_TYPE(dest), GET_UREG_NR(dest)));
+
+ if (GET_UREG_TYPE(coord) != REG_TYPE_T) {
+ p->nr_tex_indirect++;
+ }
+
+ *(p->csr++) = (op |
+ T0_DEST( dest ) |
+ T0_SAMPLER( sampler ));
+
+ *(p->csr++) = T1_ADDRESS_REG( coord );
+ *(p->csr++) = T2_MBZ;
+
+ p->nr_tex_insn++;
+ return dest;
+ }
+}
+
+
+GLuint
+i915_emit_const1f(struct i915_fragment_program * p, GLfloat c0)
+{
+ GLint reg, idx;
+
+ if (c0 == 0.0)
+ return swizzle(UREG(REG_TYPE_R, 0), ZERO, ZERO, ZERO, ZERO);
+ if (c0 == 1.0)
+ return swizzle(UREG(REG_TYPE_R, 0), ONE, ONE, ONE, ONE);
+
+ for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
+ if (p->constant_flags[reg] == I915_CONSTFLAG_PARAM)
+ continue;
+ for (idx = 0; idx < 4; idx++) {
+ if (!(p->constant_flags[reg] & (1 << idx)) ||
+ p->constant[reg][idx] == c0) {
+ p->constant[reg][idx] = c0;
+ p->constant_flags[reg] |= 1 << idx;
+ if (reg + 1 > p->nr_constants)
+ p->nr_constants = reg + 1;
+ return swizzle(UREG(REG_TYPE_CONST, reg), idx, ZERO, ZERO, ONE);
+ }
+ }
+ }
+
+ fprintf(stderr, "%s: out of constants\n", __FUNCTION__);
+ p->error = 1;
+ return 0;
+}
+
+GLuint
+i915_emit_const2f(struct i915_fragment_program * p, GLfloat c0, GLfloat c1)
+{
+ GLint reg, idx;
+
+ if (c0 == 0.0)
+ return swizzle(i915_emit_const1f(p, c1), ZERO, X, Z, W);
+ if (c0 == 1.0)
+ return swizzle(i915_emit_const1f(p, c1), ONE, X, Z, W);
+
+ if (c1 == 0.0)
+ return swizzle(i915_emit_const1f(p, c0), X, ZERO, Z, W);
+ if (c1 == 1.0)
+ return swizzle(i915_emit_const1f(p, c0), X, ONE, Z, W);
+
+ for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
+ if (p->constant_flags[reg] == 0xf ||
+ p->constant_flags[reg] == I915_CONSTFLAG_PARAM)
+ continue;
+ for (idx = 0; idx < 3; idx++) {
+ if (!(p->constant_flags[reg] & (3 << idx))) {
+ p->constant[reg][idx] = c0;
+ p->constant[reg][idx + 1] = c1;
+ p->constant_flags[reg] |= 3 << idx;
+ if (reg + 1 > p->nr_constants)
+ p->nr_constants = reg + 1;
+ return swizzle(UREG(REG_TYPE_CONST, reg), idx, idx + 1, ZERO,
+ ONE);
+ }
+ }
+ }
+
+ fprintf(stderr, "%s: out of constants\n", __FUNCTION__);
+ p->error = 1;
+ return 0;
+}
+
+
+
+GLuint
+i915_emit_const4f(struct i915_fragment_program * p,
+ GLfloat c0, GLfloat c1, GLfloat c2, GLfloat c3)
+{
+ GLint reg;
+
+ for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
+ if (p->constant_flags[reg] == 0xf &&
+ p->constant[reg][0] == c0 &&
+ p->constant[reg][1] == c1 &&
+ p->constant[reg][2] == c2 && p->constant[reg][3] == c3) {
+ return UREG(REG_TYPE_CONST, reg);
+ }
+ else if (p->constant_flags[reg] == 0) {
+ p->constant[reg][0] = c0;
+ p->constant[reg][1] = c1;
+ p->constant[reg][2] = c2;
+ p->constant[reg][3] = c3;
+ p->constant_flags[reg] = 0xf;
+ if (reg + 1 > p->nr_constants)
+ p->nr_constants = reg + 1;
+ return UREG(REG_TYPE_CONST, reg);
+ }
+ }
+
+ fprintf(stderr, "%s: out of constants\n", __FUNCTION__);
+ p->error = 1;
+ return 0;
+}
+
+
+GLuint
+i915_emit_const4fv(struct i915_fragment_program * p, const GLfloat * c)
+{
+ return i915_emit_const4f(p, c[0], c[1], c[2], c[3]);
+}
+
+
+GLuint
+i915_emit_param4fv(struct i915_fragment_program * p, const GLfloat * values)
+{
+ GLint reg, i;
+
+ for (i = 0; i < p->nr_params; i++) {
+ if (p->param[i].values == values)
+ return UREG(REG_TYPE_CONST, p->param[i].reg);
+ }
+
+
+ for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
+ if (p->constant_flags[reg] == 0) {
+ p->constant_flags[reg] = I915_CONSTFLAG_PARAM;
+ i = p->nr_params++;
+
+ p->param[i].values = values;
+ p->param[i].reg = reg;
+ p->params_uptodate = 0;
+
+ if (reg + 1 > p->nr_constants)
+ p->nr_constants = reg + 1;
+ return UREG(REG_TYPE_CONST, reg);
+ }
+ }
+
+ fprintf(stderr, "%s: out of constants\n", __FUNCTION__);
+ p->error = 1;
+ return 0;
+}
+
+
+
+
+void
+i915_program_error(struct i915_fragment_program *p, const char *msg)
+{
+ /* XXX we shouldn't print anything to stdout, record GL error or
+ * call _mesa_problem()
+ */
+ fprintf(stderr, "%s\n", msg);
+ p->error = 1;
+}
+
+void
+i915_init_program(struct i915_context *i915, struct i915_fragment_program *p)
+{
+ GLcontext *ctx = &i915->intel.ctx;
+ TNLcontext *tnl = TNL_CONTEXT(ctx);
+
+ p->translated = 0;
+ p->params_uptodate = 0;
+ p->on_hardware = 0;
+ p->error = 0;
+
+ p->nr_tex_indirect = 1; /* correct? */
+ p->nr_tex_insn = 0;
+ p->nr_alu_insn = 0;
+ p->nr_decl_insn = 0;
+
+ p->ctx = ctx;
+ memset(p->constant_flags, 0, sizeof(p->constant_flags));
+
+ p->nr_constants = 0;
+ p->csr = p->program;
+ p->decl = p->declarations;
+ p->decl_s = 0;
+ p->decl_t = 0;
+ p->temp_flag = 0xffff000;
+ p->utemp_flag = ~0x7;
+ p->wpos_tex = -1;
+ p->depth_written = 0;
+ p->nr_params = 0;
+
+ p->src_texture = UREG_BAD;
+ p->src_previous = UREG(REG_TYPE_T, T_DIFFUSE);
+ p->last_tex_stage = 0;
+ p->VB = &tnl->vb;
+
+ *(p->decl++) = _3DSTATE_PIXEL_SHADER_PROGRAM;
+}
+
+
+void
+i915_fini_program(struct i915_fragment_program *p)
+{
+ GLuint program_size = p->csr - p->program;
+ GLuint decl_size = p->decl - p->declarations;
+
+ if (p->nr_tex_indirect > I915_MAX_TEX_INDIRECT)
+ i915_program_error(p, "Exceeded max nr indirect texture lookups");
+
+ if (p->nr_tex_insn > I915_MAX_TEX_INSN)
+ i915_program_error(p, "Exceeded max TEX instructions");
+
+ if (p->nr_alu_insn > I915_MAX_ALU_INSN)
+ i915_program_error(p, "Exceeded max ALU instructions");
+
+ if (p->nr_decl_insn > I915_MAX_DECL_INSN)
+ i915_program_error(p, "Exceeded max DECL instructions");
+
+ if (p->error) {
+ p->FragProg.Base.NumNativeInstructions = 0;
+ p->FragProg.NumNativeAluInstructions = 0;
+ p->FragProg.NumNativeTexInstructions = 0;
+ p->FragProg.NumNativeTexIndirections = 0;
+ }
+ else {
+ p->FragProg.Base.NumNativeInstructions = (p->nr_alu_insn +
+ p->nr_tex_insn +
+ p->nr_decl_insn);
+ p->FragProg.NumNativeAluInstructions = p->nr_alu_insn;
+ p->FragProg.NumNativeTexInstructions = p->nr_tex_insn;
+ p->FragProg.NumNativeTexIndirections = p->nr_tex_indirect;
+ }
+
+ p->declarations[0] |= program_size + decl_size - 2;
+}
+
+void
+i915_upload_program(struct i915_context *i915,
+ struct i915_fragment_program *p)
+{
+ GLuint program_size = p->csr - p->program;
+ GLuint decl_size = p->decl - p->declarations;
+
+ FALLBACK(&i915->intel, I915_FALLBACK_PROGRAM, p->error);
+
+ /* Could just go straight to the batchbuffer from here:
+ */
+ if (i915->state.ProgramSize != (program_size + decl_size) ||
+ memcmp(i915->state.Program + decl_size, p->program,
+ program_size * sizeof(int)) != 0) {
+ I915_STATECHANGE(i915, I915_UPLOAD_PROGRAM);
+ memcpy(i915->state.Program, p->declarations, decl_size * sizeof(int));
+ memcpy(i915->state.Program + decl_size, p->program,
+ program_size * sizeof(int));
+ i915->state.ProgramSize = decl_size + program_size;
+ }
+
+ /* Always seemed to get a failure if I used memcmp() to
+ * shortcircuit this state upload. Needs further investigation?
+ */
+ if (p->nr_constants) {
+ GLuint nr = p->nr_constants;
+
+ I915_ACTIVESTATE(i915, I915_UPLOAD_CONSTANTS, 1);
+ I915_STATECHANGE(i915, I915_UPLOAD_CONSTANTS);
+
+ i915->state.Constant[0] = _3DSTATE_PIXEL_SHADER_CONSTANTS | ((nr) * 4);
+ i915->state.Constant[1] = (1 << (nr - 1)) | ((1 << (nr - 1)) - 1);
+
+ memcpy(&i915->state.Constant[2], p->constant, 4 * sizeof(int) * (nr));
+ i915->state.ConstantSize = 2 + (nr) * 4;
+
+ if (0) {
+ GLuint i;
+ for (i = 0; i < nr; i++) {
+ fprintf(stderr, "const[%d]: %f %f %f %f\n", i,
+ p->constant[i][0],
+ p->constant[i][1], p->constant[i][2], p->constant[i][3]);
+ }
+ }
+ }
+ else {
+ I915_ACTIVESTATE(i915, I915_UPLOAD_CONSTANTS, 0);
+ }
+
+ p->on_hardware = 1;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef I915_PROGRAM_H
+#define I915_PROGRAM_H
+
+#include "i915_context.h"
+#include "i915_reg.h"
+
+
+
+/* Having zero and one in here makes the definition of swizzle a lot
+ * easier.
+ */
+#define UREG_TYPE_SHIFT 29
+#define UREG_NR_SHIFT 24
+#define UREG_CHANNEL_X_NEGATE_SHIFT 23
+#define UREG_CHANNEL_X_SHIFT 20
+#define UREG_CHANNEL_Y_NEGATE_SHIFT 19
+#define UREG_CHANNEL_Y_SHIFT 16
+#define UREG_CHANNEL_Z_NEGATE_SHIFT 15
+#define UREG_CHANNEL_Z_SHIFT 12
+#define UREG_CHANNEL_W_NEGATE_SHIFT 11
+#define UREG_CHANNEL_W_SHIFT 8
+#define UREG_CHANNEL_ZERO_NEGATE_MBZ 5
+#define UREG_CHANNEL_ZERO_SHIFT 4
+#define UREG_CHANNEL_ONE_NEGATE_MBZ 1
+#define UREG_CHANNEL_ONE_SHIFT 0
+
+#define UREG_BAD 0xffffffff /* not a valid ureg */
+
+#define X SRC_X
+#define Y SRC_Y
+#define Z SRC_Z
+#define W SRC_W
+#define ZERO SRC_ZERO
+#define ONE SRC_ONE
+
+/* Construct a ureg:
+ */
+#define UREG( type, nr ) (((type)<< UREG_TYPE_SHIFT) | \
+ ((nr) << UREG_NR_SHIFT) | \
+ (X << UREG_CHANNEL_X_SHIFT) | \
+ (Y << UREG_CHANNEL_Y_SHIFT) | \
+ (Z << UREG_CHANNEL_Z_SHIFT) | \
+ (W << UREG_CHANNEL_W_SHIFT) | \
+ (ZERO << UREG_CHANNEL_ZERO_SHIFT) | \
+ (ONE << UREG_CHANNEL_ONE_SHIFT))
+
+#define GET_CHANNEL_SRC( reg, channel ) ((reg<<(channel*4)) & (0xf<<20))
+#define CHANNEL_SRC( src, channel ) (src>>(channel*4))
+
+#define GET_UREG_TYPE(reg) (((reg)>>UREG_TYPE_SHIFT)®_TYPE_MASK)
+#define GET_UREG_NR(reg) (((reg)>>UREG_NR_SHIFT)®_NR_MASK)
+
+
+
+#define UREG_XYZW_CHANNEL_MASK 0x00ffff00
+
+/* One neat thing about the UREG representation:
+ */
+static INLINE int
+swizzle(int reg, int x, int y, int z, int w)
+{
+ return ((reg & ~UREG_XYZW_CHANNEL_MASK) |
+ CHANNEL_SRC(GET_CHANNEL_SRC(reg, x), 0) |
+ CHANNEL_SRC(GET_CHANNEL_SRC(reg, y), 1) |
+ CHANNEL_SRC(GET_CHANNEL_SRC(reg, z), 2) |
+ CHANNEL_SRC(GET_CHANNEL_SRC(reg, w), 3));
+}
+
+/* Another neat thing about the UREG representation:
+ */
+static INLINE int
+negate(int reg, int x, int y, int z, int w)
+{
+ return reg ^ (((x & 1) << UREG_CHANNEL_X_NEGATE_SHIFT) |
+ ((y & 1) << UREG_CHANNEL_Y_NEGATE_SHIFT) |
+ ((z & 1) << UREG_CHANNEL_Z_NEGATE_SHIFT) |
+ ((w & 1) << UREG_CHANNEL_W_NEGATE_SHIFT));
+}
+
+
+extern GLuint i915_get_temp(struct i915_fragment_program *p);
+extern GLuint i915_get_utemp(struct i915_fragment_program *p);
+extern void i915_release_utemps(struct i915_fragment_program *p);
+
+
+extern GLuint i915_emit_texld(struct i915_fragment_program *p,
+ GLuint dest,
+ GLuint destmask,
+ GLuint sampler, GLuint coord, GLuint op);
+
+extern GLuint i915_emit_arith(struct i915_fragment_program *p,
+ GLuint op,
+ GLuint dest,
+ GLuint mask,
+ GLuint saturate,
+ GLuint src0, GLuint src1, GLuint src2);
+
+extern GLuint i915_emit_decl(struct i915_fragment_program *p,
+ GLuint type, GLuint nr, GLuint d0_flags);
+
+
+extern GLuint i915_emit_const1f(struct i915_fragment_program *p, GLfloat c0);
+
+extern GLuint i915_emit_const2f(struct i915_fragment_program *p,
+ GLfloat c0, GLfloat c1);
+
+extern GLuint i915_emit_const4fv(struct i915_fragment_program *p,
+ const GLfloat * c);
+
+extern GLuint i915_emit_const4f(struct i915_fragment_program *p,
+ GLfloat c0, GLfloat c1,
+ GLfloat c2, GLfloat c3);
+
+
+extern GLuint i915_emit_param4fv(struct i915_fragment_program *p,
+ const GLfloat * values);
+
+extern void i915_program_error(struct i915_fragment_program *p,
+ const char *msg);
+
+extern void i915_init_program(struct i915_context *i915,
+ struct i915_fragment_program *p);
+
+extern void i915_upload_program(struct i915_context *i915,
+ struct i915_fragment_program *p);
+
+extern void i915_fini_program(struct i915_fragment_program *p);
+
+
+
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef _I915_REG_H_
+#define _I915_REG_H_
+
+
+#include "intel_reg.h"
+
+#define I915_SET_FIELD( var, mask, value ) (var &= ~(mask), var |= value)
+
+#define CMD_3D (0x3<<29)
+
+#define PRIM3D_INLINE (CMD_3D | (0x1f<<24))
+#define PRIM3D_TRILIST (0x0<<18)
+#define PRIM3D_TRISTRIP (0x1<<18)
+#define PRIM3D_TRISTRIP_RVRSE (0x2<<18)
+#define PRIM3D_TRIFAN (0x3<<18)
+#define PRIM3D_POLY (0x4<<18)
+#define PRIM3D_LINELIST (0x5<<18)
+#define PRIM3D_LINESTRIP (0x6<<18)
+#define PRIM3D_RECTLIST (0x7<<18)
+#define PRIM3D_POINTLIST (0x8<<18)
+#define PRIM3D_DIB (0x9<<18)
+#define PRIM3D_CLEAR_RECT (0xa<<18)
+#define PRIM3D_ZONE_INIT (0xd<<18)
+#define PRIM3D_MASK (0x1f<<18)
+
+/* p137 */
+#define _3DSTATE_AA_CMD (CMD_3D | (0x06<<24))
+#define AA_LINE_ECAAR_WIDTH_ENABLE (1<<16)
+#define AA_LINE_ECAAR_WIDTH_0_5 0
+#define AA_LINE_ECAAR_WIDTH_1_0 (1<<14)
+#define AA_LINE_ECAAR_WIDTH_2_0 (2<<14)
+#define AA_LINE_ECAAR_WIDTH_4_0 (3<<14)
+#define AA_LINE_REGION_WIDTH_ENABLE (1<<8)
+#define AA_LINE_REGION_WIDTH_0_5 0
+#define AA_LINE_REGION_WIDTH_1_0 (1<<6)
+#define AA_LINE_REGION_WIDTH_2_0 (2<<6)
+#define AA_LINE_REGION_WIDTH_4_0 (3<<6)
+
+/* 3DSTATE_BACKFACE_STENCIL_OPS, p138*/
+#define _3DSTATE_BACKFACE_STENCIL_OPS (CMD_3D | (0x8<<24))
+#define BFO_ENABLE_STENCIL_REF (1<<23)
+#define BFO_STENCIL_REF_SHIFT 15
+#define BFO_STENCIL_REF_MASK (0xff<<15)
+#define BFO_ENABLE_STENCIL_FUNCS (1<<14)
+#define BFO_STENCIL_TEST_SHIFT 11
+#define BFO_STENCIL_TEST_MASK (0x7<<11)
+#define BFO_STENCIL_FAIL_SHIFT 8
+#define BFO_STENCIL_FAIL_MASK (0x7<<8)
+#define BFO_STENCIL_PASS_Z_FAIL_SHIFT 5
+#define BFO_STENCIL_PASS_Z_FAIL_MASK (0x7<<5)
+#define BFO_STENCIL_PASS_Z_PASS_SHIFT 2
+#define BFO_STENCIL_PASS_Z_PASS_MASK (0x7<<2)
+#define BFO_ENABLE_STENCIL_TWO_SIDE (1<<1)
+#define BFO_STENCIL_TWO_SIDE (1<<0)
+
+
+/* 3DSTATE_BACKFACE_STENCIL_MASKS, p140 */
+#define _3DSTATE_BACKFACE_STENCIL_MASKS (CMD_3D | (0x9<<24))
+#define BFM_ENABLE_STENCIL_TEST_MASK (1<<17)
+#define BFM_ENABLE_STENCIL_WRITE_MASK (1<<16)
+#define BFM_STENCIL_TEST_MASK_SHIFT 8
+#define BFM_STENCIL_TEST_MASK_MASK (0xff<<8)
+#define BFM_STENCIL_WRITE_MASK_SHIFT 0
+#define BFM_STENCIL_WRITE_MASK_MASK (0xff<<0)
+
+
+
+/* 3DSTATE_BIN_CONTROL p141 */
+
+/* p143 */
+#define _3DSTATE_BUF_INFO_CMD (CMD_3D | (0x1d<<24) | (0x8e<<16) | 1)
+/* Dword 1 */
+#define BUF_3D_ID_COLOR_BACK (0x3<<24)
+#define BUF_3D_ID_DEPTH (0x7<<24)
+#define BUF_3D_USE_FENCE (1<<23)
+#define BUF_3D_TILED_SURFACE (1<<22)
+#define BUF_3D_TILE_WALK_X 0
+#define BUF_3D_TILE_WALK_Y (1<<21)
+#define BUF_3D_PITCH(x) (((x)/4)<<2)
+/* Dword 2 */
+#define BUF_3D_ADDR(x) ((x) & ~0x3)
+
+
+/* 3DSTATE_CHROMA_KEY */
+
+/* 3DSTATE_CLEAR_PARAMETERS, p150 */
+
+/* 3DSTATE_CONSTANT_BLEND_COLOR, p153 */
+#define _3DSTATE_CONST_BLEND_COLOR_CMD (CMD_3D | (0x1d<<24) | (0x88<<16))
+
+
+
+/* 3DSTATE_COORD_SET_BINDINGS, p154 */
+#define _3DSTATE_COORD_SET_BINDINGS (CMD_3D | (0x16<<24))
+#define CSB_TCB(iunit, eunit) ((eunit)<<(iunit*3))
+
+/* p156 */
+#define _3DSTATE_DFLT_DIFFUSE_CMD (CMD_3D | (0x1d<<24) | (0x99<<16))
+
+/* p157 */
+#define _3DSTATE_DFLT_SPEC_CMD (CMD_3D | (0x1d<<24) | (0x9a<<16))
+
+/* p158 */
+#define _3DSTATE_DFLT_Z_CMD (CMD_3D | (0x1d<<24) | (0x98<<16))
+
+
+/* 3DSTATE_DEPTH_OFFSET_SCALE, p159 */
+#define _3DSTATE_DEPTH_OFFSET_SCALE (CMD_3D | (0x1d<<24) | (0x97<<16))
+/* scale in dword 1 */
+
+
+/* 3DSTATE_DEPTH_SUBRECT_DISABLE, p160 */
+#define _3DSTATE_DEPTH_SUBRECT_DISABLE (CMD_3D | (0x1c<<24) | (0x11<19) | 0x2)
+
+/* p161 */
+#define _3DSTATE_DST_BUF_VARS_CMD (CMD_3D | (0x1d<<24) | (0x85<<16))
+/* Dword 1 */
+#define TEX_DEFAULT_COLOR_OGL (0<<30)
+#define TEX_DEFAULT_COLOR_D3D (1<<30)
+#define ZR_EARLY_DEPTH (1<<29)
+#define LOD_PRECLAMP_OGL (1<<28)
+#define LOD_PRECLAMP_D3D (0<<28)
+#define DITHER_FULL_ALWAYS (0<<26)
+#define DITHER_FULL_ON_FB_BLEND (1<<26)
+#define DITHER_CLAMPED_ALWAYS (2<<26)
+#define LINEAR_GAMMA_BLEND_32BPP (1<<25)
+#define DEBUG_DISABLE_ENH_DITHER (1<<24)
+#define DSTORG_HORT_BIAS(x) ((x)<<20)
+#define DSTORG_VERT_BIAS(x) ((x)<<16)
+#define COLOR_4_2_2_CHNL_WRT_ALL 0
+#define COLOR_4_2_2_CHNL_WRT_Y (1<<12)
+#define COLOR_4_2_2_CHNL_WRT_CR (2<<12)
+#define COLOR_4_2_2_CHNL_WRT_CB (3<<12)
+#define COLOR_4_2_2_CHNL_WRT_CRCB (4<<12)
+#define COLR_BUF_8BIT 0
+#define COLR_BUF_RGB555 (1<<8)
+#define COLR_BUF_RGB565 (2<<8)
+#define COLR_BUF_ARGB8888 (3<<8)
+#define DEPTH_FRMT_16_FIXED 0
+#define DEPTH_FRMT_16_FLOAT (1<<2)
+#define DEPTH_FRMT_24_FIXED_8_OTHER (2<<2)
+#define VERT_LINE_STRIDE_1 (1<<1)
+#define VERT_LINE_STRIDE_0 (0<<1)
+#define VERT_LINE_STRIDE_OFS_1 1
+#define VERT_LINE_STRIDE_OFS_0 0
+
+/* p166 */
+#define _3DSTATE_DRAW_RECT_CMD (CMD_3D|(0x1d<<24)|(0x80<<16)|3)
+/* Dword 1 */
+#define DRAW_RECT_DIS_DEPTH_OFS (1<<30)
+#define DRAW_DITHER_OFS_X(x) ((x)<<26)
+#define DRAW_DITHER_OFS_Y(x) ((x)<<24)
+/* Dword 2 */
+#define DRAW_YMIN(x) ((x)<<16)
+#define DRAW_XMIN(x) (x)
+/* Dword 3 */
+#define DRAW_YMAX(x) ((x)<<16)
+#define DRAW_XMAX(x) (x)
+/* Dword 4 */
+#define DRAW_YORG(x) ((x)<<16)
+#define DRAW_XORG(x) (x)
+
+
+/* 3DSTATE_FILTER_COEFFICIENTS_4X4, p170 */
+
+/* 3DSTATE_FILTER_COEFFICIENTS_6X5, p172 */
+
+
+/* _3DSTATE_FOG_COLOR, p173 */
+#define _3DSTATE_FOG_COLOR_CMD (CMD_3D|(0x15<<24))
+#define FOG_COLOR_RED(x) ((x)<<16)
+#define FOG_COLOR_GREEN(x) ((x)<<8)
+#define FOG_COLOR_BLUE(x) (x)
+
+/* _3DSTATE_FOG_MODE, p174 */
+#define _3DSTATE_FOG_MODE_CMD (CMD_3D|(0x1d<<24)|(0x89<<16)|2)
+/* Dword 1 */
+#define FMC1_FOGFUNC_MODIFY_ENABLE (1<<31)
+#define FMC1_FOGFUNC_VERTEX (0<<28)
+#define FMC1_FOGFUNC_PIXEL_EXP (1<<28)
+#define FMC1_FOGFUNC_PIXEL_EXP2 (2<<28)
+#define FMC1_FOGFUNC_PIXEL_LINEAR (3<<28)
+#define FMC1_FOGFUNC_MASK (3<<28)
+#define FMC1_FOGINDEX_MODIFY_ENABLE (1<<27)
+#define FMC1_FOGINDEX_Z (0<<25)
+#define FMC1_FOGINDEX_W (1<<25)
+#define FMC1_C1_C2_MODIFY_ENABLE (1<<24)
+#define FMC1_DENSITY_MODIFY_ENABLE (1<<23)
+#define FMC1_C1_ONE (1<<13)
+#define FMC1_C1_MASK (0xffff<<4)
+/* Dword 2 */
+#define FMC2_C2_ONE (1<<16)
+/* Dword 3 */
+#define FMC3_D_ONE (1<<16)
+
+
+
+/* _3DSTATE_INDEPENDENT_ALPHA_BLEND, p177 */
+#define _3DSTATE_INDEPENDENT_ALPHA_BLEND_CMD (CMD_3D|(0x0b<<24))
+#define IAB_MODIFY_ENABLE (1<<23)
+#define IAB_ENABLE (1<<22)
+#define IAB_MODIFY_FUNC (1<<21)
+#define IAB_FUNC_SHIFT 16
+#define IAB_MODIFY_SRC_FACTOR (1<<11)
+#define IAB_SRC_FACTOR_SHIFT 6
+#define IAB_SRC_FACTOR_MASK (BLENDFACT_MASK<<6)
+#define IAB_MODIFY_DST_FACTOR (1<<5)
+#define IAB_DST_FACTOR_SHIFT 0
+#define IAB_DST_FACTOR_MASK (BLENDFACT_MASK<<0)
+
+
+#define BLENDFUNC_ADD 0x0
+#define BLENDFUNC_SUBTRACT 0x1
+#define BLENDFUNC_REVERSE_SUBTRACT 0x2
+#define BLENDFUNC_MIN 0x3
+#define BLENDFUNC_MAX 0x4
+#define BLENDFUNC_MASK 0x7
+
+/* 3DSTATE_LOAD_INDIRECT, p180 */
+
+#define _3DSTATE_LOAD_INDIRECT (CMD_3D|(0x1d<<24)|(0x7<<16))
+#define LI0_STATE_STATIC_INDIRECT (0x01<<8)
+#define LI0_STATE_DYNAMIC_INDIRECT (0x02<<8)
+#define LI0_STATE_SAMPLER (0x04<<8)
+#define LI0_STATE_MAP (0x08<<8)
+#define LI0_STATE_PROGRAM (0x10<<8)
+#define LI0_STATE_CONSTANTS (0x20<<8)
+
+#define SIS0_BUFFER_ADDRESS(x) ((x)&~0x3)
+#define SIS0_FORCE_LOAD (1<<1)
+#define SIS0_BUFFER_VALID (1<<0)
+#define SIS1_BUFFER_LENGTH(x) ((x)&0xff)
+
+#define DIS0_BUFFER_ADDRESS(x) ((x)&~0x3)
+#define DIS0_BUFFER_RESET (1<<1)
+#define DIS0_BUFFER_VALID (1<<0)
+
+#define SSB0_BUFFER_ADDRESS(x) ((x)&~0x3)
+#define SSB0_FORCE_LOAD (1<<1)
+#define SSB0_BUFFER_VALID (1<<0)
+#define SSB1_BUFFER_LENGTH(x) ((x)&0xff)
+
+#define MSB0_BUFFER_ADDRESS(x) ((x)&~0x3)
+#define MSB0_FORCE_LOAD (1<<1)
+#define MSB0_BUFFER_VALID (1<<0)
+#define MSB1_BUFFER_LENGTH(x) ((x)&0xff)
+
+#define PSP0_BUFFER_ADDRESS(x) ((x)&~0x3)
+#define PSP0_FORCE_LOAD (1<<1)
+#define PSP0_BUFFER_VALID (1<<0)
+#define PSP1_BUFFER_LENGTH(x) ((x)&0xff)
+
+#define PSC0_BUFFER_ADDRESS(x) ((x)&~0x3)
+#define PSC0_FORCE_LOAD (1<<1)
+#define PSC0_BUFFER_VALID (1<<0)
+#define PSC1_BUFFER_LENGTH(x) ((x)&0xff)
+
+
+
+
+
+/* _3DSTATE_RASTERIZATION_RULES */
+#define _3DSTATE_RASTER_RULES_CMD (CMD_3D|(0x07<<24))
+#define ENABLE_POINT_RASTER_RULE (1<<15)
+#define OGL_POINT_RASTER_RULE (1<<13)
+#define ENABLE_TEXKILL_3D_4D (1<<10)
+#define TEXKILL_3D (0<<9)
+#define TEXKILL_4D (1<<9)
+#define ENABLE_LINE_STRIP_PROVOKE_VRTX (1<<8)
+#define ENABLE_TRI_FAN_PROVOKE_VRTX (1<<5)
+#define LINE_STRIP_PROVOKE_VRTX(x) ((x)<<6)
+#define TRI_FAN_PROVOKE_VRTX(x) ((x)<<3)
+
+/* _3DSTATE_SCISSOR_ENABLE, p256 */
+#define _3DSTATE_SCISSOR_ENABLE_CMD (CMD_3D|(0x1c<<24)|(0x10<<19))
+#define ENABLE_SCISSOR_RECT ((1<<1) | 1)
+#define DISABLE_SCISSOR_RECT (1<<1)
+
+/* _3DSTATE_SCISSOR_RECTANGLE_0, p257 */
+#define _3DSTATE_SCISSOR_RECT_0_CMD (CMD_3D|(0x1d<<24)|(0x81<<16)|1)
+/* Dword 1 */
+#define SCISSOR_RECT_0_YMIN(x) ((x)<<16)
+#define SCISSOR_RECT_0_XMIN(x) (x)
+/* Dword 2 */
+#define SCISSOR_RECT_0_YMAX(x) ((x)<<16)
+#define SCISSOR_RECT_0_XMAX(x) (x)
+
+/* p189 */
+#define _3DSTATE_LOAD_STATE_IMMEDIATE_1 ((0x3<<29)|(0x1d<<24)|(0x04<<16))
+#define I1_LOAD_S(n) (1<<(4+n))
+
+#define S0_VB_OFFSET_MASK 0xffffffc
+#define S0_AUTO_CACHE_INV_DISABLE (1<<0)
+
+#define S1_VERTEX_WIDTH_SHIFT 24
+#define S1_VERTEX_WIDTH_MASK (0x3f<<24)
+#define S1_VERTEX_PITCH_SHIFT 16
+#define S1_VERTEX_PITCH_MASK (0x3f<<16)
+
+#define TEXCOORDFMT_2D 0x0
+#define TEXCOORDFMT_3D 0x1
+#define TEXCOORDFMT_4D 0x2
+#define TEXCOORDFMT_1D 0x3
+#define TEXCOORDFMT_2D_16 0x4
+#define TEXCOORDFMT_4D_16 0x5
+#define TEXCOORDFMT_NOT_PRESENT 0xf
+#define S2_TEXCOORD_FMT0_MASK 0xf
+#define S2_TEXCOORD_FMT1_SHIFT 4
+#define S2_TEXCOORD_FMT(unit, type) ((type)<<(unit*4))
+#define S2_TEXCOORD_NONE (~0)
+
+/* S3 not interesting */
+
+#define S4_POINT_WIDTH_SHIFT 23
+#define S4_POINT_WIDTH_MASK (0x1ff<<23)
+#define S4_LINE_WIDTH_SHIFT 19
+#define S4_LINE_WIDTH_ONE (0x2<<19)
+#define S4_LINE_WIDTH_MASK (0xf<<19)
+#define S4_FLATSHADE_ALPHA (1<<18)
+#define S4_FLATSHADE_FOG (1<<17)
+#define S4_FLATSHADE_SPECULAR (1<<16)
+#define S4_FLATSHADE_COLOR (1<<15)
+#define S4_CULLMODE_BOTH (0<<13)
+#define S4_CULLMODE_NONE (1<<13)
+#define S4_CULLMODE_CW (2<<13)
+#define S4_CULLMODE_CCW (3<<13)
+#define S4_CULLMODE_MASK (3<<13)
+#define S4_VFMT_POINT_WIDTH (1<<12)
+#define S4_VFMT_SPEC_FOG (1<<11)
+#define S4_VFMT_COLOR (1<<10)
+#define S4_VFMT_DEPTH_OFFSET (1<<9)
+#define S4_VFMT_XYZ (1<<6)
+#define S4_VFMT_XYZW (2<<6)
+#define S4_VFMT_XY (3<<6)
+#define S4_VFMT_XYW (4<<6)
+#define S4_VFMT_XYZW_MASK (7<<6)
+#define S4_FORCE_DEFAULT_DIFFUSE (1<<5)
+#define S4_FORCE_DEFAULT_SPECULAR (1<<4)
+#define S4_LOCAL_DEPTH_OFFSET_ENABLE (1<<3)
+#define S4_VFMT_FOG_PARAM (1<<2)
+#define S4_SPRITE_POINT_ENABLE (1<<1)
+#define S4_LINE_ANTIALIAS_ENABLE (1<<0)
+
+#define S4_VFMT_MASK (S4_VFMT_POINT_WIDTH | \
+ S4_VFMT_SPEC_FOG | \
+ S4_VFMT_COLOR | \
+ S4_VFMT_DEPTH_OFFSET | \
+ S4_VFMT_XYZW_MASK | \
+ S4_VFMT_FOG_PARAM)
+
+
+#define S5_WRITEDISABLE_ALPHA (1<<31)
+#define S5_WRITEDISABLE_RED (1<<30)
+#define S5_WRITEDISABLE_GREEN (1<<29)
+#define S5_WRITEDISABLE_BLUE (1<<28)
+#define S5_WRITEDISABLE_MASK (0xf<<28)
+#define S5_FORCE_DEFAULT_POINT_SIZE (1<<27)
+#define S5_LAST_PIXEL_ENABLE (1<<26)
+#define S5_GLOBAL_DEPTH_OFFSET_ENABLE (1<<25)
+#define S5_FOG_ENABLE (1<<24)
+#define S5_STENCIL_REF_SHIFT 16
+#define S5_STENCIL_REF_MASK (0xff<<16)
+#define S5_STENCIL_TEST_FUNC_SHIFT 13
+#define S5_STENCIL_TEST_FUNC_MASK (0x7<<13)
+#define S5_STENCIL_FAIL_SHIFT 10
+#define S5_STENCIL_FAIL_MASK (0x7<<10)
+#define S5_STENCIL_PASS_Z_FAIL_SHIFT 7
+#define S5_STENCIL_PASS_Z_FAIL_MASK (0x7<<7)
+#define S5_STENCIL_PASS_Z_PASS_SHIFT 4
+#define S5_STENCIL_PASS_Z_PASS_MASK (0x7<<4)
+#define S5_STENCIL_WRITE_ENABLE (1<<3)
+#define S5_STENCIL_TEST_ENABLE (1<<2)
+#define S5_COLOR_DITHER_ENABLE (1<<1)
+#define S5_LOGICOP_ENABLE (1<<0)
+
+
+#define S6_ALPHA_TEST_ENABLE (1<<31)
+#define S6_ALPHA_TEST_FUNC_SHIFT 28
+#define S6_ALPHA_TEST_FUNC_MASK (0x7<<28)
+#define S6_ALPHA_REF_SHIFT 20
+#define S6_ALPHA_REF_MASK (0xff<<20)
+#define S6_DEPTH_TEST_ENABLE (1<<19)
+#define S6_DEPTH_TEST_FUNC_SHIFT 16
+#define S6_DEPTH_TEST_FUNC_MASK (0x7<<16)
+#define S6_CBUF_BLEND_ENABLE (1<<15)
+#define S6_CBUF_BLEND_FUNC_SHIFT 12
+#define S6_CBUF_BLEND_FUNC_MASK (0x7<<12)
+#define S6_CBUF_SRC_BLEND_FACT_SHIFT 8
+#define S6_CBUF_SRC_BLEND_FACT_MASK (0xf<<8)
+#define S6_CBUF_DST_BLEND_FACT_SHIFT 4
+#define S6_CBUF_DST_BLEND_FACT_MASK (0xf<<4)
+#define S6_DEPTH_WRITE_ENABLE (1<<3)
+#define S6_COLOR_WRITE_ENABLE (1<<2)
+#define S6_TRISTRIP_PV_SHIFT 0
+#define S6_TRISTRIP_PV_MASK (0x3<<0)
+
+#define S7_DEPTH_OFFSET_CONST_MASK ~0
+
+/* 3DSTATE_MAP_DEINTERLACER_PARAMETERS */
+
+/* 3DSTATE_MAP_PALETTE_LOAD_32, p206 */
+#define _3DSTATE_MAP_PALETTE_LOAD_32 (CMD_3D|(0x1d<<24)|(0x8f<<16))
+/* subsequent dwords up to length (max 16) are ARGB8888 color values */
+
+/* _3DSTATE_MODES_4, p218 */
+#define _3DSTATE_MODES_4_CMD (CMD_3D|(0x0d<<24))
+#define ENABLE_LOGIC_OP_FUNC (1<<23)
+#define LOGIC_OP_FUNC(x) ((x)<<18)
+#define LOGICOP_MASK (0xf<<18)
+#define MODE4_ENABLE_STENCIL_TEST_MASK ((1<<17)|(0xff00))
+#define ENABLE_STENCIL_TEST_MASK (1<<17)
+#define STENCIL_TEST_MASK(x) (((x)&0xff)<<8)
+#define MODE4_ENABLE_STENCIL_WRITE_MASK ((1<<16)|(0x00ff))
+#define ENABLE_STENCIL_WRITE_MASK (1<<16)
+#define STENCIL_WRITE_MASK(x) ((x)&0xff)
+
+/* _3DSTATE_MODES_5, p220 */
+#define _3DSTATE_MODES_5_CMD (CMD_3D|(0x0c<<24))
+#define PIPELINE_FLUSH_RENDER_CACHE (1<<18)
+#define PIPELINE_FLUSH_TEXTURE_CACHE (1<<16)
+
+
+/* p221 */
+#define _3DSTATE_PIXEL_SHADER_CONSTANTS (CMD_3D|(0x1d<<24)|(0x6<<16))
+#define PS1_REG(n) (1<<(n))
+#define PS2_CONST_X(n) (n)
+#define PS3_CONST_Y(n) (n)
+#define PS4_CONST_Z(n) (n)
+#define PS5_CONST_W(n) (n)
+
+/* p222 */
+
+
+#define I915_MAX_TEX_INDIRECT 4
+#define I915_MAX_TEX_INSN 32
+#define I915_MAX_ALU_INSN 64
+#define I915_MAX_DECL_INSN 27
+#define I915_MAX_TEMPORARY 16
+
+
+/* Each instruction is 3 dwords long, though most don't require all
+ * this space. Maximum of 123 instructions. Smaller maxes per insn
+ * type.
+ */
+#define _3DSTATE_PIXEL_SHADER_PROGRAM (CMD_3D|(0x1d<<24)|(0x5<<16))
+
+#define REG_TYPE_R 0 /* temporary regs, no need to
+ * dcl, must be written before
+ * read -- Preserved between
+ * phases.
+ */
+#define REG_TYPE_T 1 /* Interpolated values, must be
+ * dcl'ed before use.
+ *
+ * 0..7: texture coord,
+ * 8: diffuse spec,
+ * 9: specular color,
+ * 10: fog parameter in w.
+ */
+#define REG_TYPE_CONST 2 /* Restriction: only one const
+ * can be referenced per
+ * instruction, though it may be
+ * selected for multiple inputs.
+ * Constants not initialized
+ * default to zero.
+ */
+#define REG_TYPE_S 3 /* sampler */
+#define REG_TYPE_OC 4 /* output color (rgba) */
+#define REG_TYPE_OD 5 /* output depth (w), xyz are
+ * temporaries. If not written,
+ * interpolated depth is used?
+ */
+#define REG_TYPE_U 6 /* unpreserved temporaries */
+#define REG_TYPE_MASK 0x7
+#define REG_NR_MASK 0xf
+
+
+/* REG_TYPE_T:
+ */
+#define T_TEX0 0
+#define T_TEX1 1
+#define T_TEX2 2
+#define T_TEX3 3
+#define T_TEX4 4
+#define T_TEX5 5
+#define T_TEX6 6
+#define T_TEX7 7
+#define T_DIFFUSE 8
+#define T_SPECULAR 9
+#define T_FOG_W 10 /* interpolated fog is in W coord */
+
+/* Arithmetic instructions */
+
+/* .replicate_swizzle == selection and replication of a particular
+ * scalar channel, ie., .xxxx, .yyyy, .zzzz or .wwww
+ */
+#define A0_NOP (0x0<<24) /* no operation */
+#define A0_ADD (0x1<<24) /* dst = src0 + src1 */
+#define A0_MOV (0x2<<24) /* dst = src0 */
+#define A0_MUL (0x3<<24) /* dst = src0 * src1 */
+#define A0_MAD (0x4<<24) /* dst = src0 * src1 + src2 */
+#define A0_DP2ADD (0x5<<24) /* dst.xyzw = src0.xy dot src1.xy + src2.replicate_swizzle */
+#define A0_DP3 (0x6<<24) /* dst.xyzw = src0.xyz dot src1.xyz */
+#define A0_DP4 (0x7<<24) /* dst.xyzw = src0.xyzw dot src1.xyzw */
+#define A0_FRC (0x8<<24) /* dst = src0 - floor(src0) */
+#define A0_RCP (0x9<<24) /* dst.xyzw = 1/(src0.replicate_swizzle) */
+#define A0_RSQ (0xa<<24) /* dst.xyzw = 1/(sqrt(abs(src0.replicate_swizzle))) */
+#define A0_EXP (0xb<<24) /* dst.xyzw = exp2(src0.replicate_swizzle) */
+#define A0_LOG (0xc<<24) /* dst.xyzw = log2(abs(src0.replicate_swizzle)) */
+#define A0_CMP (0xd<<24) /* dst = (src0 >= 0.0) ? src1 : src2 */
+#define A0_MIN (0xe<<24) /* dst = (src0 < src1) ? src0 : src1 */
+#define A0_MAX (0xf<<24) /* dst = (src0 >= src1) ? src0 : src1 */
+#define A0_FLR (0x10<<24) /* dst = floor(src0) */
+#define A0_MOD (0x11<<24) /* dst = src0 fmod 1.0 */
+#define A0_TRC (0x12<<24) /* dst = int(src0) */
+#define A0_SGE (0x13<<24) /* dst = src0 >= src1 ? 1.0 : 0.0 */
+#define A0_SLT (0x14<<24) /* dst = src0 < src1 ? 1.0 : 0.0 */
+#define A0_DEST_SATURATE (1<<22)
+#define A0_DEST_TYPE_SHIFT 19
+/* Allow: R, OC, OD, U */
+#define A0_DEST_NR_SHIFT 14
+/* Allow R: 0..15, OC,OD: 0..0, U: 0..2 */
+#define A0_DEST_CHANNEL_X (1<<10)
+#define A0_DEST_CHANNEL_Y (2<<10)
+#define A0_DEST_CHANNEL_Z (4<<10)
+#define A0_DEST_CHANNEL_W (8<<10)
+#define A0_DEST_CHANNEL_ALL (0xf<<10)
+#define A0_DEST_CHANNEL_SHIFT 10
+#define A0_SRC0_TYPE_SHIFT 7
+#define A0_SRC0_NR_SHIFT 2
+
+#define A0_DEST_CHANNEL_XY (A0_DEST_CHANNEL_X|A0_DEST_CHANNEL_Y)
+#define A0_DEST_CHANNEL_XYZ (A0_DEST_CHANNEL_XY|A0_DEST_CHANNEL_Z)
+
+
+#define SRC_X 0
+#define SRC_Y 1
+#define SRC_Z 2
+#define SRC_W 3
+#define SRC_ZERO 4
+#define SRC_ONE 5
+
+#define A1_SRC0_CHANNEL_X_NEGATE (1<<31)
+#define A1_SRC0_CHANNEL_X_SHIFT 28
+#define A1_SRC0_CHANNEL_Y_NEGATE (1<<27)
+#define A1_SRC0_CHANNEL_Y_SHIFT 24
+#define A1_SRC0_CHANNEL_Z_NEGATE (1<<23)
+#define A1_SRC0_CHANNEL_Z_SHIFT 20
+#define A1_SRC0_CHANNEL_W_NEGATE (1<<19)
+#define A1_SRC0_CHANNEL_W_SHIFT 16
+#define A1_SRC1_TYPE_SHIFT 13
+#define A1_SRC1_NR_SHIFT 8
+#define A1_SRC1_CHANNEL_X_NEGATE (1<<7)
+#define A1_SRC1_CHANNEL_X_SHIFT 4
+#define A1_SRC1_CHANNEL_Y_NEGATE (1<<3)
+#define A1_SRC1_CHANNEL_Y_SHIFT 0
+
+#define A2_SRC1_CHANNEL_Z_NEGATE (1<<31)
+#define A2_SRC1_CHANNEL_Z_SHIFT 28
+#define A2_SRC1_CHANNEL_W_NEGATE (1<<27)
+#define A2_SRC1_CHANNEL_W_SHIFT 24
+#define A2_SRC2_TYPE_SHIFT 21
+#define A2_SRC2_NR_SHIFT 16
+#define A2_SRC2_CHANNEL_X_NEGATE (1<<15)
+#define A2_SRC2_CHANNEL_X_SHIFT 12
+#define A2_SRC2_CHANNEL_Y_NEGATE (1<<11)
+#define A2_SRC2_CHANNEL_Y_SHIFT 8
+#define A2_SRC2_CHANNEL_Z_NEGATE (1<<7)
+#define A2_SRC2_CHANNEL_Z_SHIFT 4
+#define A2_SRC2_CHANNEL_W_NEGATE (1<<3)
+#define A2_SRC2_CHANNEL_W_SHIFT 0
+
+
+
+/* Texture instructions */
+#define T0_TEXLD (0x15<<24) /* Sample texture using predeclared
+ * sampler and address, and output
+ * filtered texel data to destination
+ * register */
+#define T0_TEXLDP (0x16<<24) /* Same as texld but performs a
+ * perspective divide of the texture
+ * coordinate .xyz values by .w before
+ * sampling. */
+#define T0_TEXLDB (0x17<<24) /* Same as texld but biases the
+ * computed LOD by w. Only S4.6 two's
+ * comp is used. This implies that a
+ * float to fixed conversion is
+ * done. */
+#define T0_TEXKILL (0x18<<24) /* Does not perform a sampling
+ * operation. Simply kills the pixel
+ * if any channel of the address
+ * register is < 0.0. */
+#define T0_DEST_TYPE_SHIFT 19
+/* Allow: R, OC, OD, U */
+/* Note: U (unpreserved) regs do not retain their values between
+ * phases (cannot be used for feedback)
+ *
+ * Note: oC and OD registers can only be used as the destination of a
+ * texture instruction once per phase (this is an implementation
+ * restriction).
+ */
+#define T0_DEST_NR_SHIFT 14
+/* Allow R: 0..15, OC,OD: 0..0, U: 0..2 */
+#define T0_SAMPLER_NR_SHIFT 0 /* This field ignored for TEXKILL */
+#define T0_SAMPLER_NR_MASK (0xf<<0)
+
+#define T1_ADDRESS_REG_TYPE_SHIFT 24 /* Reg to use as texture coord */
+/* Allow R, T, OC, OD -- R, OC, OD are 'dependent' reads, new program phase */
+#define T1_ADDRESS_REG_NR_SHIFT 17
+#define T2_MBZ 0
+
+/* Declaration instructions */
+#define D0_DCL (0x19<<24) /* Declare a t (interpolated attrib)
+ * register or an s (sampler)
+ * register. */
+#define D0_SAMPLE_TYPE_SHIFT 22
+#define D0_SAMPLE_TYPE_2D (0x0<<22)
+#define D0_SAMPLE_TYPE_CUBE (0x1<<22)
+#define D0_SAMPLE_TYPE_VOLUME (0x2<<22)
+#define D0_SAMPLE_TYPE_MASK (0x3<<22)
+
+#define D0_TYPE_SHIFT 19
+/* Allow: T, S */
+#define D0_NR_SHIFT 14
+/* Allow T: 0..10, S: 0..15 */
+#define D0_CHANNEL_X (1<<10)
+#define D0_CHANNEL_Y (2<<10)
+#define D0_CHANNEL_Z (4<<10)
+#define D0_CHANNEL_W (8<<10)
+#define D0_CHANNEL_ALL (0xf<<10)
+#define D0_CHANNEL_NONE (0<<10)
+
+#define D0_CHANNEL_XY (D0_CHANNEL_X|D0_CHANNEL_Y)
+#define D0_CHANNEL_XYZ (D0_CHANNEL_XY|D0_CHANNEL_Z)
+
+/* I915 Errata: Do not allow (xz), (xw), (xzw) combinations for diffuse
+ * or specular declarations.
+ *
+ * For T dcls, only allow: (x), (xy), (xyz), (w), (xyzw)
+ *
+ * Must be zero for S (sampler) dcls
+ */
+#define D1_MBZ 0
+#define D2_MBZ 0
+
+
+
+/* p207 */
+#define _3DSTATE_MAP_STATE (CMD_3D|(0x1d<<24)|(0x0<<16))
+
+#define MS1_MAPMASK_SHIFT 0
+#define MS1_MAPMASK_MASK (0x8fff<<0)
+
+#define MS2_UNTRUSTED_SURFACE (1<<31)
+#define MS2_ADDRESS_MASK 0xfffffffc
+#define MS2_VERTICAL_LINE_STRIDE (1<<1)
+#define MS2_VERTICAL_OFFSET (1<<1)
+
+#define MS3_HEIGHT_SHIFT 21
+#define MS3_WIDTH_SHIFT 10
+#define MS3_PALETTE_SELECT (1<<9)
+#define MS3_MAPSURF_FORMAT_SHIFT 7
+#define MS3_MAPSURF_FORMAT_MASK (0x7<<7)
+#define MAPSURF_8BIT (1<<7)
+#define MAPSURF_16BIT (2<<7)
+#define MAPSURF_32BIT (3<<7)
+#define MAPSURF_422 (5<<7)
+#define MAPSURF_COMPRESSED (6<<7)
+#define MAPSURF_4BIT_INDEXED (7<<7)
+#define MS3_MT_FORMAT_MASK (0x7 << 3)
+#define MS3_MT_FORMAT_SHIFT 3
+#define MT_4BIT_IDX_ARGB8888 (7<<3) /* SURFACE_4BIT_INDEXED */
+#define MT_8BIT_I8 (0<<3) /* SURFACE_8BIT */
+#define MT_8BIT_L8 (1<<3)
+#define MT_8BIT_A8 (4<<3)
+#define MT_8BIT_MONO8 (5<<3)
+#define MT_16BIT_RGB565 (0<<3) /* SURFACE_16BIT */
+#define MT_16BIT_ARGB1555 (1<<3)
+#define MT_16BIT_ARGB4444 (2<<3)
+#define MT_16BIT_AY88 (3<<3)
+#define MT_16BIT_88DVDU (5<<3)
+#define MT_16BIT_BUMP_655LDVDU (6<<3)
+#define MT_16BIT_I16 (7<<3)
+#define MT_16BIT_L16 (8<<3)
+#define MT_16BIT_A16 (9<<3)
+#define MT_32BIT_ARGB8888 (0<<3) /* SURFACE_32BIT */
+#define MT_32BIT_ABGR8888 (1<<3)
+#define MT_32BIT_XRGB8888 (2<<3)
+#define MT_32BIT_XBGR8888 (3<<3)
+#define MT_32BIT_QWVU8888 (4<<3)
+#define MT_32BIT_AXVU8888 (5<<3)
+#define MT_32BIT_LXVU8888 (6<<3)
+#define MT_32BIT_XLVU8888 (7<<3)
+#define MT_32BIT_ARGB2101010 (8<<3)
+#define MT_32BIT_ABGR2101010 (9<<3)
+#define MT_32BIT_AWVU2101010 (0xA<<3)
+#define MT_32BIT_GR1616 (0xB<<3)
+#define MT_32BIT_VU1616 (0xC<<3)
+#define MT_32BIT_xI824 (0xD<<3)
+#define MT_32BIT_xA824 (0xE<<3)
+#define MT_32BIT_xL824 (0xF<<3)
+#define MT_422_YCRCB_SWAPY (0<<3) /* SURFACE_422 */
+#define MT_422_YCRCB_NORMAL (1<<3)
+#define MT_422_YCRCB_SWAPUV (2<<3)
+#define MT_422_YCRCB_SWAPUVY (3<<3)
+#define MT_COMPRESS_DXT1 (0<<3) /* SURFACE_COMPRESSED */
+#define MT_COMPRESS_DXT2_3 (1<<3)
+#define MT_COMPRESS_DXT4_5 (2<<3)
+#define MT_COMPRESS_FXT1 (3<<3)
+#define MT_COMPRESS_DXT1_RGB (4<<3)
+#define MS3_USE_FENCE_REGS (1<<2)
+#define MS3_TILED_SURFACE (1<<1)
+#define MS3_TILE_WALK (1<<0)
+
+#define MS4_PITCH_SHIFT 21
+#define MS4_CUBE_FACE_ENA_NEGX (1<<20)
+#define MS4_CUBE_FACE_ENA_POSX (1<<19)
+#define MS4_CUBE_FACE_ENA_NEGY (1<<18)
+#define MS4_CUBE_FACE_ENA_POSY (1<<17)
+#define MS4_CUBE_FACE_ENA_NEGZ (1<<16)
+#define MS4_CUBE_FACE_ENA_POSZ (1<<15)
+#define MS4_CUBE_FACE_ENA_MASK (0x3f<<15)
+#define MS4_MAX_LOD_SHIFT 9
+#define MS4_MAX_LOD_MASK (0x3f<<9)
+#define MS4_MIP_LAYOUT_LEGACY (0<<8)
+#define MS4_MIP_LAYOUT_BELOW_LPT (0<<8)
+#define MS4_MIP_LAYOUT_RIGHT_LPT (1<<8)
+#define MS4_VOLUME_DEPTH_SHIFT 0
+#define MS4_VOLUME_DEPTH_MASK (0xff<<0)
+
+/* p244 */
+#define _3DSTATE_SAMPLER_STATE (CMD_3D|(0x1d<<24)|(0x1<<16))
+
+#define SS1_MAPMASK_SHIFT 0
+#define SS1_MAPMASK_MASK (0x8fff<<0)
+
+#define SS2_REVERSE_GAMMA_ENABLE (1<<31)
+#define SS2_PACKED_TO_PLANAR_ENABLE (1<<30)
+#define SS2_COLORSPACE_CONVERSION (1<<29)
+#define SS2_CHROMAKEY_SHIFT 27
+#define SS2_BASE_MIP_LEVEL_SHIFT 22
+#define SS2_BASE_MIP_LEVEL_MASK (0x1f<<22)
+#define SS2_MIP_FILTER_SHIFT 20
+#define SS2_MIP_FILTER_MASK (0x3<<20)
+#define MIPFILTER_NONE 0
+#define MIPFILTER_NEAREST 1
+#define MIPFILTER_LINEAR 3
+#define SS2_MAG_FILTER_SHIFT 17
+#define SS2_MAG_FILTER_MASK (0x7<<17)
+#define FILTER_NEAREST 0
+#define FILTER_LINEAR 1
+#define FILTER_ANISOTROPIC 2
+#define FILTER_4X4_1 3
+#define FILTER_4X4_2 4
+#define FILTER_4X4_FLAT 5
+#define FILTER_6X5_MONO 6 /* XXX - check */
+#define SS2_MIN_FILTER_SHIFT 14
+#define SS2_MIN_FILTER_MASK (0x7<<14)
+#define SS2_LOD_BIAS_SHIFT 5
+#define SS2_LOD_BIAS_ONE (0x10<<5)
+#define SS2_LOD_BIAS_MASK (0x1ff<<5)
+/* Shadow requires:
+ * MT_X8{I,L,A}24 or MT_{I,L,A}16 texture format
+ * FILTER_4X4_x MIN and MAG filters
+ */
+#define SS2_SHADOW_ENABLE (1<<4)
+#define SS2_MAX_ANISO_MASK (1<<3)
+#define SS2_MAX_ANISO_2 (0<<3)
+#define SS2_MAX_ANISO_4 (1<<3)
+#define SS2_SHADOW_FUNC_SHIFT 0
+#define SS2_SHADOW_FUNC_MASK (0x7<<0)
+/* SS2_SHADOW_FUNC values: see COMPAREFUNC_* */
+
+#define SS3_MIN_LOD_SHIFT 24
+#define SS3_MIN_LOD_ONE (0x10<<24)
+#define SS3_MIN_LOD_MASK (0xff<<24)
+#define SS3_KILL_PIXEL_ENABLE (1<<17)
+#define SS3_TCX_ADDR_MODE_SHIFT 12
+#define SS3_TCX_ADDR_MODE_MASK (0x7<<12)
+#define TEXCOORDMODE_WRAP 0
+#define TEXCOORDMODE_MIRROR 1
+#define TEXCOORDMODE_CLAMP_EDGE 2
+#define TEXCOORDMODE_CUBE 3
+#define TEXCOORDMODE_CLAMP_BORDER 4
+#define TEXCOORDMODE_MIRROR_ONCE 5
+#define SS3_TCY_ADDR_MODE_SHIFT 9
+#define SS3_TCY_ADDR_MODE_MASK (0x7<<9)
+#define SS3_TCZ_ADDR_MODE_SHIFT 6
+#define SS3_TCZ_ADDR_MODE_MASK (0x7<<6)
+#define SS3_NORMALIZED_COORDS (1<<5)
+#define SS3_TEXTUREMAP_INDEX_SHIFT 1
+#define SS3_TEXTUREMAP_INDEX_MASK (0xf<<1)
+#define SS3_DEINTERLACER_ENABLE (1<<0)
+
+#define SS4_BORDER_COLOR_MASK (~0)
+
+/* 3DSTATE_SPAN_STIPPLE, p258
+ */
+#define _3DSTATE_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define ST1_ENABLE (1<<16)
+#define ST1_MASK (0xffff)
+
+#define _3DSTATE_DEFAULT_Z ((0x3<<29)|(0x1d<<24)|(0x98<<16))
+#define _3DSTATE_DEFAULT_DIFFUSE ((0x3<<29)|(0x1d<<24)|(0x99<<16))
+#define _3DSTATE_DEFAULT_SPECULAR ((0x3<<29)|(0x1d<<24)|(0x9a<<16))
+
+
+#define MI_FLUSH ((0<<29)|(4<<23))
+#define FLUSH_MAP_CACHE (1<<0)
+#define INHIBIT_FLUSH_RENDER_CACHE (1<<2)
+
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "glheader.h"
+#include "context.h"
+#include "macros.h"
+#include "enums.h"
+#include "dd.h"
+#include "tnl/tnl.h"
+#include "tnl/t_context.h"
+
+#include "texmem.h"
+
+#include "intel_fbo.h"
+#include "intel_screen.h"
+#include "intel_batchbuffer.h"
+
+#include "i915_context.h"
+#include "i915_reg.h"
+
+#define FILE_DEBUG_FLAG DEBUG_STATE
+
+static void
+i915StencilFuncSeparate(GLcontext * ctx, GLenum face, GLenum func, GLint ref,
+ GLuint mask)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ int test = intel_translate_compare_func(func);
+
+ mask = mask & 0xff;
+
+ DBG("%s : func: %s, ref : 0x%x, mask: 0x%x\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(func), ref, mask);
+
+
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ i915->state.Ctx[I915_CTXREG_STATE4] &= ~MODE4_ENABLE_STENCIL_TEST_MASK;
+ i915->state.Ctx[I915_CTXREG_STATE4] |= (ENABLE_STENCIL_TEST_MASK |
+ STENCIL_TEST_MASK(mask));
+
+ i915->state.Ctx[I915_CTXREG_LIS5] &= ~(S5_STENCIL_REF_MASK |
+ S5_STENCIL_TEST_FUNC_MASK);
+
+ i915->state.Ctx[I915_CTXREG_LIS5] |= ((ref << S5_STENCIL_REF_SHIFT) |
+ (test <<
+ S5_STENCIL_TEST_FUNC_SHIFT));
+}
+
+static void
+i915StencilMaskSeparate(GLcontext * ctx, GLenum face, GLuint mask)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+
+ DBG("%s : mask 0x%x\n", __FUNCTION__, mask);
+
+ mask = mask & 0xff;
+
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ i915->state.Ctx[I915_CTXREG_STATE4] &= ~MODE4_ENABLE_STENCIL_WRITE_MASK;
+ i915->state.Ctx[I915_CTXREG_STATE4] |= (ENABLE_STENCIL_WRITE_MASK |
+ STENCIL_WRITE_MASK(mask));
+}
+
+
+static void
+i915StencilOpSeparate(GLcontext * ctx, GLenum face, GLenum fail, GLenum zfail,
+ GLenum zpass)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ int fop = intel_translate_stencil_op(fail);
+ int dfop = intel_translate_stencil_op(zfail);
+ int dpop = intel_translate_stencil_op(zpass);
+
+
+ DBG("%s: fail : %s, zfail: %s, zpass : %s\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(fail),
+ _mesa_lookup_enum_by_nr(zfail), _mesa_lookup_enum_by_nr(zpass));
+
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+
+ i915->state.Ctx[I915_CTXREG_LIS5] &= ~(S5_STENCIL_FAIL_MASK |
+ S5_STENCIL_PASS_Z_FAIL_MASK |
+ S5_STENCIL_PASS_Z_PASS_MASK);
+
+ i915->state.Ctx[I915_CTXREG_LIS5] |= ((fop << S5_STENCIL_FAIL_SHIFT) |
+ (dfop <<
+ S5_STENCIL_PASS_Z_FAIL_SHIFT) |
+ (dpop <<
+ S5_STENCIL_PASS_Z_PASS_SHIFT));
+}
+
+static void
+i915AlphaFunc(GLcontext * ctx, GLenum func, GLfloat ref)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ int test = intel_translate_compare_func(func);
+ GLubyte refByte;
+
+ UNCLAMPED_FLOAT_TO_UBYTE(refByte, ref);
+
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ i915->state.Ctx[I915_CTXREG_LIS6] &= ~(S6_ALPHA_TEST_FUNC_MASK |
+ S6_ALPHA_REF_MASK);
+ i915->state.Ctx[I915_CTXREG_LIS6] |= ((test << S6_ALPHA_TEST_FUNC_SHIFT) |
+ (((GLuint) refByte) <<
+ S6_ALPHA_REF_SHIFT));
+}
+
+/* This function makes sure that the proper enables are
+ * set for LogicOp, Independant Alpha Blend, and Blending.
+ * It needs to be called from numerous places where we
+ * could change the LogicOp or Independant Alpha Blend without subsequent
+ * calls to glEnable.
+ */
+static void
+i915EvalLogicOpBlendState(GLcontext * ctx)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+
+ if (ctx->Color._LogicOpEnabled) {
+ i915->state.Ctx[I915_CTXREG_LIS5] |= S5_LOGICOP_ENABLE;
+ i915->state.Ctx[I915_CTXREG_LIS6] &= ~S6_CBUF_BLEND_ENABLE;
+ }
+ else {
+ i915->state.Ctx[I915_CTXREG_LIS5] &= ~S5_LOGICOP_ENABLE;
+
+ if (ctx->Color.BlendEnabled) {
+ i915->state.Ctx[I915_CTXREG_LIS6] |= S6_CBUF_BLEND_ENABLE;
+ }
+ else {
+ i915->state.Ctx[I915_CTXREG_LIS6] &= ~S6_CBUF_BLEND_ENABLE;
+ }
+ }
+}
+
+static void
+i915BlendColor(GLcontext * ctx, const GLfloat color[4])
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ GLubyte r, g, b, a;
+
+ DBG("%s\n", __FUNCTION__);
+
+ UNCLAMPED_FLOAT_TO_UBYTE(r, color[RCOMP]);
+ UNCLAMPED_FLOAT_TO_UBYTE(g, color[GCOMP]);
+ UNCLAMPED_FLOAT_TO_UBYTE(b, color[BCOMP]);
+ UNCLAMPED_FLOAT_TO_UBYTE(a, color[ACOMP]);
+
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ i915->state.Ctx[I915_CTXREG_BLENDCOLOR1] =
+ (a << 24) | (r << 16) | (g << 8) | b;
+}
+
+
+#define DST_BLND_FACT(f) ((f)<<S6_CBUF_DST_BLEND_FACT_SHIFT)
+#define SRC_BLND_FACT(f) ((f)<<S6_CBUF_SRC_BLEND_FACT_SHIFT)
+#define DST_ABLND_FACT(f) ((f)<<IAB_DST_FACTOR_SHIFT)
+#define SRC_ABLND_FACT(f) ((f)<<IAB_SRC_FACTOR_SHIFT)
+
+
+
+static GLuint
+translate_blend_equation(GLenum mode)
+{
+ switch (mode) {
+ case GL_FUNC_ADD:
+ return BLENDFUNC_ADD;
+ case GL_MIN:
+ return BLENDFUNC_MIN;
+ case GL_MAX:
+ return BLENDFUNC_MAX;
+ case GL_FUNC_SUBTRACT:
+ return BLENDFUNC_SUBTRACT;
+ case GL_FUNC_REVERSE_SUBTRACT:
+ return BLENDFUNC_REVERSE_SUBTRACT;
+ default:
+ return 0;
+ }
+}
+
+static void
+i915UpdateBlendState(GLcontext * ctx)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ GLuint iab = (i915->state.Ctx[I915_CTXREG_IAB] &
+ ~(IAB_SRC_FACTOR_MASK |
+ IAB_DST_FACTOR_MASK |
+ (BLENDFUNC_MASK << IAB_FUNC_SHIFT) | IAB_ENABLE));
+
+ GLuint lis6 = (i915->state.Ctx[I915_CTXREG_LIS6] &
+ ~(S6_CBUF_SRC_BLEND_FACT_MASK |
+ S6_CBUF_DST_BLEND_FACT_MASK | S6_CBUF_BLEND_FUNC_MASK));
+
+ GLuint eqRGB = ctx->Color.BlendEquationRGB;
+ GLuint eqA = ctx->Color.BlendEquationA;
+ GLuint srcRGB = ctx->Color.BlendSrcRGB;
+ GLuint dstRGB = ctx->Color.BlendDstRGB;
+ GLuint srcA = ctx->Color.BlendSrcA;
+ GLuint dstA = ctx->Color.BlendDstA;
+
+ if (eqRGB == GL_MIN || eqRGB == GL_MAX) {
+ srcRGB = dstRGB = GL_ONE;
+ }
+
+ if (eqA == GL_MIN || eqA == GL_MAX) {
+ srcA = dstA = GL_ONE;
+ }
+
+ lis6 |= SRC_BLND_FACT(intel_translate_blend_factor(srcRGB));
+ lis6 |= DST_BLND_FACT(intel_translate_blend_factor(dstRGB));
+ lis6 |= translate_blend_equation(eqRGB) << S6_CBUF_BLEND_FUNC_SHIFT;
+
+ iab |= SRC_ABLND_FACT(intel_translate_blend_factor(srcA));
+ iab |= DST_ABLND_FACT(intel_translate_blend_factor(dstA));
+ iab |= translate_blend_equation(eqA) << IAB_FUNC_SHIFT;
+
+ if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB)
+ iab |= IAB_ENABLE;
+
+ if (iab != i915->state.Ctx[I915_CTXREG_IAB] ||
+ lis6 != i915->state.Ctx[I915_CTXREG_LIS6]) {
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ i915->state.Ctx[I915_CTXREG_IAB] = iab;
+ i915->state.Ctx[I915_CTXREG_LIS6] = lis6;
+ }
+
+ /* This will catch a logicop blend equation */
+ i915EvalLogicOpBlendState(ctx);
+}
+
+
+static void
+i915BlendFuncSeparate(GLcontext * ctx, GLenum srcRGB,
+ GLenum dstRGB, GLenum srcA, GLenum dstA)
+{
+ i915UpdateBlendState(ctx);
+}
+
+
+static void
+i915BlendEquationSeparate(GLcontext * ctx, GLenum eqRGB, GLenum eqA)
+{
+ i915UpdateBlendState(ctx);
+}
+
+
+static void
+i915DepthFunc(GLcontext * ctx, GLenum func)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ int test = intel_translate_compare_func(func);
+
+ DBG("%s\n", __FUNCTION__);
+
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ i915->state.Ctx[I915_CTXREG_LIS6] &= ~S6_DEPTH_TEST_FUNC_MASK;
+ i915->state.Ctx[I915_CTXREG_LIS6] |= test << S6_DEPTH_TEST_FUNC_SHIFT;
+}
+
+static void
+i915DepthMask(GLcontext * ctx, GLboolean flag)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+
+ DBG("%s flag (%d)\n", __FUNCTION__, flag);
+
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+
+ if (flag && ctx->Depth.Test)
+ i915->state.Ctx[I915_CTXREG_LIS6] |= S6_DEPTH_WRITE_ENABLE;
+ else
+ i915->state.Ctx[I915_CTXREG_LIS6] &= ~S6_DEPTH_WRITE_ENABLE;
+}
+
+/* =============================================================
+ * Polygon stipple
+ *
+ * The i915 supports a 4x4 stipple natively, GL wants 32x32.
+ * Fortunately stipple is usually a repeating pattern.
+ */
+static void
+i915PolygonStipple(GLcontext * ctx, const GLubyte * mask)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ const GLubyte *m = mask;
+ GLubyte p[4];
+ int i, j, k;
+ int active = (ctx->Polygon.StippleFlag &&
+ i915->intel.reduced_primitive == GL_TRIANGLES);
+ GLuint newMask;
+
+ if (active) {
+ I915_STATECHANGE(i915, I915_UPLOAD_STIPPLE);
+ i915->state.Stipple[I915_STPREG_ST1] &= ~ST1_ENABLE;
+ }
+
+ p[0] = mask[12] & 0xf;
+ p[0] |= p[0] << 4;
+ p[1] = mask[8] & 0xf;
+ p[1] |= p[1] << 4;
+ p[2] = mask[4] & 0xf;
+ p[2] |= p[2] << 4;
+ p[3] = mask[0] & 0xf;
+ p[3] |= p[3] << 4;
+
+ for (k = 0; k < 8; k++)
+ for (j = 3; j >= 0; j--)
+ for (i = 0; i < 4; i++, m++)
+ if (*m != p[j]) {
+ i915->intel.hw_stipple = 0;
+ return;
+ }
+
+ newMask = (((p[0] & 0xf) << 0) |
+ ((p[1] & 0xf) << 4) |
+ ((p[2] & 0xf) << 8) | ((p[3] & 0xf) << 12));
+
+
+ if (newMask == 0xffff || newMask == 0x0) {
+ /* this is needed to make conform pass */
+ i915->intel.hw_stipple = 0;
+ return;
+ }
+
+ i915->state.Stipple[I915_STPREG_ST1] &= ~0xffff;
+ i915->state.Stipple[I915_STPREG_ST1] |= newMask;
+ i915->intel.hw_stipple = 1;
+
+ if (active)
+ i915->state.Stipple[I915_STPREG_ST1] |= ST1_ENABLE;
+}
+
+
+/* =============================================================
+ * Hardware clipping
+ */
+static void
+i915Scissor(GLcontext * ctx, GLint x, GLint y, GLsizei w, GLsizei h)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ int x1, y1, x2, y2;
+
+ if (!ctx->DrawBuffer)
+ return;
+
+ DBG("%s %d,%d %dx%d\n", __FUNCTION__, x, y, w, h);
+
+ if (ctx->DrawBuffer->Name == 0) {
+ x1 = x;
+ y1 = ctx->DrawBuffer->Height - (y + h);
+ x2 = x + w - 1;
+ y2 = y1 + h - 1;
+ DBG("%s %d..%d,%d..%d (inverted)\n", __FUNCTION__, x1, x2, y1, y2);
+ }
+ else {
+ /* FBO - not inverted
+ */
+ x1 = x;
+ y1 = y;
+ x2 = x + w - 1;
+ y2 = y + h - 1;
+ DBG("%s %d..%d,%d..%d (not inverted)\n", __FUNCTION__, x1, x2, y1, y2);
+ }
+
+ x1 = CLAMP(x1, 0, ctx->DrawBuffer->Width - 1);
+ y1 = CLAMP(y1, 0, ctx->DrawBuffer->Height - 1);
+ x2 = CLAMP(x2, 0, ctx->DrawBuffer->Width - 1);
+ y2 = CLAMP(y2, 0, ctx->DrawBuffer->Height - 1);
+
+ DBG("%s %d..%d,%d..%d (clamped)\n", __FUNCTION__, x1, x2, y1, y2);
+
+ I915_STATECHANGE(i915, I915_UPLOAD_BUFFERS);
+ i915->state.Buffer[I915_DESTREG_SR1] = (y1 << 16) | (x1 & 0xffff);
+ i915->state.Buffer[I915_DESTREG_SR2] = (y2 << 16) | (x2 & 0xffff);
+}
+
+static void
+i915LogicOp(GLcontext * ctx, GLenum opcode)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ int tmp = intel_translate_logic_op(opcode);
+
+ DBG("%s\n", __FUNCTION__);
+
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ i915->state.Ctx[I915_CTXREG_STATE4] &= ~LOGICOP_MASK;
+ i915->state.Ctx[I915_CTXREG_STATE4] |= LOGIC_OP_FUNC(tmp);
+}
+
+
+
+static void
+i915CullFaceFrontFace(GLcontext * ctx, GLenum unused)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ GLuint mode;
+
+ DBG("%s %d\n", __FUNCTION__,
+ ctx->DrawBuffer ? ctx->DrawBuffer->Name : 0);
+
+ if (!ctx->Polygon.CullFlag) {
+ mode = S4_CULLMODE_NONE;
+ }
+ else if (ctx->Polygon.CullFaceMode != GL_FRONT_AND_BACK) {
+ mode = S4_CULLMODE_CW;
+
+ if (ctx->DrawBuffer && ctx->DrawBuffer->Name != 0)
+ mode ^= (S4_CULLMODE_CW ^ S4_CULLMODE_CCW);
+ if (ctx->Polygon.CullFaceMode == GL_FRONT)
+ mode ^= (S4_CULLMODE_CW ^ S4_CULLMODE_CCW);
+ if (ctx->Polygon.FrontFace != GL_CCW)
+ mode ^= (S4_CULLMODE_CW ^ S4_CULLMODE_CCW);
+ }
+ else {
+ mode = S4_CULLMODE_BOTH;
+ }
+
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ i915->state.Ctx[I915_CTXREG_LIS4] &= ~S4_CULLMODE_MASK;
+ i915->state.Ctx[I915_CTXREG_LIS4] |= mode;
+}
+
+static void
+i915LineWidth(GLcontext * ctx, GLfloat widthf)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ int lis4 = i915->state.Ctx[I915_CTXREG_LIS4] & ~S4_LINE_WIDTH_MASK;
+ int width;
+
+ DBG("%s\n", __FUNCTION__);
+
+ width = (int) (widthf * 2);
+ CLAMP_SELF(width, 1, 0xf);
+ lis4 |= width << S4_LINE_WIDTH_SHIFT;
+
+ if (lis4 != i915->state.Ctx[I915_CTXREG_LIS4]) {
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ i915->state.Ctx[I915_CTXREG_LIS4] = lis4;
+ }
+}
+
+static void
+i915PointSize(GLcontext * ctx, GLfloat size)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ int lis4 = i915->state.Ctx[I915_CTXREG_LIS4] & ~S4_POINT_WIDTH_MASK;
+ GLint point_size = (int) size;
+
+ DBG("%s\n", __FUNCTION__);
+
+ CLAMP_SELF(point_size, 1, 255);
+ lis4 |= point_size << S4_POINT_WIDTH_SHIFT;
+
+ if (lis4 != i915->state.Ctx[I915_CTXREG_LIS4]) {
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ i915->state.Ctx[I915_CTXREG_LIS4] = lis4;
+ }
+}
+
+
+/* =============================================================
+ * Color masks
+ */
+
+static void
+i915ColorMask(GLcontext * ctx,
+ GLboolean r, GLboolean g, GLboolean b, GLboolean a)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ GLuint tmp = i915->state.Ctx[I915_CTXREG_LIS5] & ~S5_WRITEDISABLE_MASK;
+
+ DBG("%s r(%d) g(%d) b(%d) a(%d)\n", __FUNCTION__, r, g, b,
+ a);
+
+ if (!r)
+ tmp |= S5_WRITEDISABLE_RED;
+ if (!g)
+ tmp |= S5_WRITEDISABLE_GREEN;
+ if (!b)
+ tmp |= S5_WRITEDISABLE_BLUE;
+ if (!a)
+ tmp |= S5_WRITEDISABLE_ALPHA;
+
+ if (tmp != i915->state.Ctx[I915_CTXREG_LIS5]) {
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ i915->state.Ctx[I915_CTXREG_LIS5] = tmp;
+ }
+}
+
+static void
+update_specular(GLcontext * ctx)
+{
+ /* A hack to trigger the rebuild of the fragment program.
+ */
+ intel_context(ctx)->NewGLState |= _NEW_TEXTURE;
+ I915_CONTEXT(ctx)->tex_program.translated = 0;
+}
+
+static void
+i915LightModelfv(GLcontext * ctx, GLenum pname, const GLfloat * param)
+{
+ DBG("%s\n", __FUNCTION__);
+
+ if (pname == GL_LIGHT_MODEL_COLOR_CONTROL) {
+ update_specular(ctx);
+ }
+}
+
+static void
+i915ShadeModel(GLcontext * ctx, GLenum mode)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+
+ if (mode == GL_SMOOTH) {
+ i915->state.Ctx[I915_CTXREG_LIS4] &= ~(S4_FLATSHADE_ALPHA |
+ S4_FLATSHADE_COLOR |
+ S4_FLATSHADE_SPECULAR);
+ }
+ else {
+ i915->state.Ctx[I915_CTXREG_LIS4] |= (S4_FLATSHADE_ALPHA |
+ S4_FLATSHADE_COLOR |
+ S4_FLATSHADE_SPECULAR);
+ }
+}
+
+/* =============================================================
+ * Fog
+ */
+void
+i915_update_fog(GLcontext * ctx)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+ GLenum mode;
+ GLboolean enabled;
+ GLboolean try_pixel_fog;
+
+ if (ctx->FragmentProgram._Active) {
+ /* Pull in static fog state from program */
+
+ mode = ctx->FragmentProgram._Current->FogOption;
+ enabled = (mode != GL_NONE);
+ try_pixel_fog = 0;
+ }
+ else {
+ enabled = ctx->Fog.Enabled;
+ mode = ctx->Fog.Mode;
+
+ try_pixel_fog = (ctx->Fog.FogCoordinateSource == GL_FRAGMENT_DEPTH_EXT && ctx->Hint.Fog == GL_NICEST && 0); /* XXX - DISABLE -- Need ortho fallback */
+ }
+
+ if (!enabled) {
+ i915->vertex_fog = I915_FOG_NONE;
+ }
+ else if (try_pixel_fog) {
+
+ I915_STATECHANGE(i915, I915_UPLOAD_FOG);
+ i915->state.Fog[I915_FOGREG_MODE1] &= ~FMC1_FOGFUNC_MASK;
+ i915->vertex_fog = I915_FOG_PIXEL;
+
+ switch (mode) {
+ case GL_LINEAR:
+ if (ctx->Fog.End <= ctx->Fog.Start) {
+ /* XXX - this won't work with fragment programs. Need to
+ * either fallback or append fog instructions to end of
+ * program in the case of linear fog.
+ */
+ i915->state.Fog[I915_FOGREG_MODE1] |= FMC1_FOGFUNC_VERTEX;
+ i915->vertex_fog = I915_FOG_VERTEX;
+ }
+ else {
+ GLfloat c1 = ctx->Fog.End / (ctx->Fog.End - ctx->Fog.Start);
+ GLfloat c2 = 1.0 / (ctx->Fog.End - ctx->Fog.Start);
+
+ i915->state.Fog[I915_FOGREG_MODE1] &= ~FMC1_C1_MASK;
+ i915->state.Fog[I915_FOGREG_MODE1] |= FMC1_FOGFUNC_PIXEL_LINEAR;
+ i915->state.Fog[I915_FOGREG_MODE1] |=
+ ((GLuint) (c1 * FMC1_C1_ONE)) & FMC1_C1_MASK;
+
+ if (i915->state.Fog[I915_FOGREG_MODE1] & FMC1_FOGINDEX_Z) {
+ i915->state.Fog[I915_FOGREG_MODE2] =
+ (GLuint) (c2 * FMC2_C2_ONE);
+ }
+ else {
+ union
+ {
+ float f;
+ int i;
+ } fi;
+ fi.f = c2;
+ i915->state.Fog[I915_FOGREG_MODE2] = fi.i;
+ }
+ }
+ break;
+ case GL_EXP:
+ i915->state.Fog[I915_FOGREG_MODE1] |= FMC1_FOGFUNC_PIXEL_EXP;
+ break;
+ case GL_EXP2:
+ i915->state.Fog[I915_FOGREG_MODE1] |= FMC1_FOGFUNC_PIXEL_EXP2;
+ break;
+ default:
+ break;
+ }
+ }
+ else { /* if (i915->vertex_fog != I915_FOG_VERTEX) */
+
+ I915_STATECHANGE(i915, I915_UPLOAD_FOG);
+ i915->state.Fog[I915_FOGREG_MODE1] &= ~FMC1_FOGFUNC_MASK;
+ i915->state.Fog[I915_FOGREG_MODE1] |= FMC1_FOGFUNC_VERTEX;
+ i915->vertex_fog = I915_FOG_VERTEX;
+ }
+
+ {
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ I915_ACTIVESTATE(i915, I915_UPLOAD_FOG, enabled);
+ if (enabled)
+ i915->state.Ctx[I915_CTXREG_LIS5] |= S5_FOG_ENABLE;
+ else
+ i915->state.Ctx[I915_CTXREG_LIS5] &= ~S5_FOG_ENABLE;
+ }
+
+ if (enabled) {
+ _tnl_allow_vertex_fog(ctx, (i915->vertex_fog == I915_FOG_VERTEX));
+ _tnl_allow_pixel_fog(ctx, (i915->vertex_fog != I915_FOG_VERTEX));
+ }
+}
+
+static void
+i915Fogfv(GLcontext * ctx, GLenum pname, const GLfloat * param)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+
+ switch (pname) {
+ case GL_FOG_COORDINATE_SOURCE_EXT:
+ case GL_FOG_MODE:
+ case GL_FOG_START:
+ case GL_FOG_END:
+ break;
+
+ case GL_FOG_DENSITY:
+ I915_STATECHANGE(i915, I915_UPLOAD_FOG);
+
+ if (i915->state.Fog[I915_FOGREG_MODE1] & FMC1_FOGINDEX_Z) {
+ i915->state.Fog[I915_FOGREG_MODE3] = (GLuint) (ctx->Fog.Density *
+ FMC3_D_ONE);
+ }
+ else {
+ union
+ {
+ float f;
+ int i;
+ } fi;
+ fi.f = ctx->Fog.Density;
+ i915->state.Fog[I915_FOGREG_MODE3] = fi.i;
+ }
+ break;
+
+ case GL_FOG_COLOR:
+ I915_STATECHANGE(i915, I915_UPLOAD_FOG);
+ i915->state.Fog[I915_FOGREG_COLOR] =
+ (_3DSTATE_FOG_COLOR_CMD |
+ ((GLubyte) (ctx->Fog.Color[0] * 255.0F) << 16) |
+ ((GLubyte) (ctx->Fog.Color[1] * 255.0F) << 8) |
+ ((GLubyte) (ctx->Fog.Color[2] * 255.0F) << 0));
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+i915Hint(GLcontext * ctx, GLenum target, GLenum state)
+{
+ switch (target) {
+ case GL_FOG_HINT:
+ break;
+ default:
+ break;
+ }
+}
+
+/* =============================================================
+ */
+
+static void
+i915Enable(GLcontext * ctx, GLenum cap, GLboolean state)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+
+ switch (cap) {
+ case GL_TEXTURE_2D:
+ break;
+
+ case GL_LIGHTING:
+ case GL_COLOR_SUM:
+ update_specular(ctx);
+ break;
+
+ case GL_ALPHA_TEST:
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ if (state)
+ i915->state.Ctx[I915_CTXREG_LIS6] |= S6_ALPHA_TEST_ENABLE;
+ else
+ i915->state.Ctx[I915_CTXREG_LIS6] &= ~S6_ALPHA_TEST_ENABLE;
+ break;
+
+ case GL_BLEND:
+ i915EvalLogicOpBlendState(ctx);
+ break;
+
+ case GL_COLOR_LOGIC_OP:
+ i915EvalLogicOpBlendState(ctx);
+
+ /* Logicop doesn't seem to work at 16bpp:
+ */
+ if (i915->intel.intelScreen->cpp == 2) /* XXX FBO fix */
+ FALLBACK(&i915->intel, I915_FALLBACK_LOGICOP, state);
+ break;
+
+ case GL_FRAGMENT_PROGRAM_ARB:
+ break;
+
+ case GL_DITHER:
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ if (state)
+ i915->state.Ctx[I915_CTXREG_LIS5] |= S5_COLOR_DITHER_ENABLE;
+ else
+ i915->state.Ctx[I915_CTXREG_LIS5] &= ~S5_COLOR_DITHER_ENABLE;
+ break;
+
+ case GL_DEPTH_TEST:
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ if (state)
+ i915->state.Ctx[I915_CTXREG_LIS6] |= S6_DEPTH_TEST_ENABLE;
+ else
+ i915->state.Ctx[I915_CTXREG_LIS6] &= ~S6_DEPTH_TEST_ENABLE;
+
+ i915DepthMask(ctx, ctx->Depth.Mask);
+ break;
+
+ case GL_SCISSOR_TEST:
+ I915_STATECHANGE(i915, I915_UPLOAD_BUFFERS);
+ if (state)
+ i915->state.Buffer[I915_DESTREG_SENABLE] =
+ (_3DSTATE_SCISSOR_ENABLE_CMD | ENABLE_SCISSOR_RECT);
+ else
+ i915->state.Buffer[I915_DESTREG_SENABLE] =
+ (_3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT);
+ break;
+
+ case GL_LINE_SMOOTH:
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ if (state)
+ i915->state.Ctx[I915_CTXREG_LIS4] |= S4_LINE_ANTIALIAS_ENABLE;
+ else
+ i915->state.Ctx[I915_CTXREG_LIS4] &= ~S4_LINE_ANTIALIAS_ENABLE;
+ break;
+
+ case GL_FOG:
+ break;
+
+ case GL_CULL_FACE:
+ i915CullFaceFrontFace(ctx, 0);
+ break;
+
+ case GL_STENCIL_TEST:
+ {
+ GLboolean hw_stencil = GL_FALSE;
+ if (ctx->DrawBuffer) {
+ struct intel_renderbuffer *irbStencil
+ = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_STENCIL);
+ hw_stencil = (irbStencil && irbStencil->region);
+ }
+ if (hw_stencil) {
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ if (state)
+ i915->state.Ctx[I915_CTXREG_LIS5] |= (S5_STENCIL_TEST_ENABLE |
+ S5_STENCIL_WRITE_ENABLE);
+ else
+ i915->state.Ctx[I915_CTXREG_LIS5] &= ~(S5_STENCIL_TEST_ENABLE |
+ S5_STENCIL_WRITE_ENABLE);
+ }
+ else {
+ FALLBACK(&i915->intel, I915_FALLBACK_STENCIL, state);
+ }
+ }
+ break;
+
+ case GL_POLYGON_STIPPLE:
+ /* The stipple command worked on my 855GM box, but not my 845G.
+ * I'll do more testing later to find out exactly which hardware
+ * supports it. Disabled for now.
+ */
+ if (i915->intel.hw_stipple &&
+ i915->intel.reduced_primitive == GL_TRIANGLES) {
+ I915_STATECHANGE(i915, I915_UPLOAD_STIPPLE);
+ if (state)
+ i915->state.Stipple[I915_STPREG_ST1] |= ST1_ENABLE;
+ else
+ i915->state.Stipple[I915_STPREG_ST1] &= ~ST1_ENABLE;
+ }
+ break;
+
+ case GL_POLYGON_SMOOTH:
+ break;
+
+ case GL_POINT_SMOOTH:
+ break;
+
+ default:
+ ;
+ }
+}
+
+
+static void
+i915_init_packets(struct i915_context *i915)
+{
+ intelScreenPrivate *screen = i915->intel.intelScreen;
+
+ /* Zero all state */
+ memset(&i915->state, 0, sizeof(i915->state));
+
+
+ {
+ I915_STATECHANGE(i915, I915_UPLOAD_CTX);
+ /* Probably don't want to upload all this stuff every time one
+ * piece changes.
+ */
+ i915->state.Ctx[I915_CTXREG_LI] = (_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
+ I1_LOAD_S(2) |
+ I1_LOAD_S(4) |
+ I1_LOAD_S(5) | I1_LOAD_S(6) | (4));
+ i915->state.Ctx[I915_CTXREG_LIS2] = 0;
+ i915->state.Ctx[I915_CTXREG_LIS4] = 0;
+ i915->state.Ctx[I915_CTXREG_LIS5] = 0;
+
+ if (screen->cpp == 2) /* XXX FBO fix */
+ i915->state.Ctx[I915_CTXREG_LIS5] |= S5_COLOR_DITHER_ENABLE;
+
+
+ i915->state.Ctx[I915_CTXREG_LIS6] = (S6_COLOR_WRITE_ENABLE |
+ (2 << S6_TRISTRIP_PV_SHIFT));
+
+ i915->state.Ctx[I915_CTXREG_STATE4] = (_3DSTATE_MODES_4_CMD |
+ ENABLE_LOGIC_OP_FUNC |
+ LOGIC_OP_FUNC(LOGICOP_COPY) |
+ ENABLE_STENCIL_TEST_MASK |
+ STENCIL_TEST_MASK(0xff) |
+ ENABLE_STENCIL_WRITE_MASK |
+ STENCIL_WRITE_MASK(0xff));
+
+ i915->state.Ctx[I915_CTXREG_IAB] =
+ (_3DSTATE_INDEPENDENT_ALPHA_BLEND_CMD | IAB_MODIFY_ENABLE |
+ IAB_MODIFY_FUNC | IAB_MODIFY_SRC_FACTOR | IAB_MODIFY_DST_FACTOR);
+
+ i915->state.Ctx[I915_CTXREG_BLENDCOLOR0] =
+ _3DSTATE_CONST_BLEND_COLOR_CMD;
+ i915->state.Ctx[I915_CTXREG_BLENDCOLOR1] = 0;
+
+ }
+
+ {
+ I915_STATECHANGE(i915, I915_UPLOAD_STIPPLE);
+ i915->state.Stipple[I915_STPREG_ST0] = _3DSTATE_STIPPLE;
+ }
+
+
+ {
+ I915_STATECHANGE(i915, I915_UPLOAD_FOG);
+ i915->state.Fog[I915_FOGREG_MODE0] = _3DSTATE_FOG_MODE_CMD;
+ i915->state.Fog[I915_FOGREG_MODE1] = (FMC1_FOGFUNC_MODIFY_ENABLE |
+ FMC1_FOGFUNC_VERTEX |
+ FMC1_FOGINDEX_MODIFY_ENABLE |
+ FMC1_FOGINDEX_W |
+ FMC1_C1_C2_MODIFY_ENABLE |
+ FMC1_DENSITY_MODIFY_ENABLE);
+ i915->state.Fog[I915_FOGREG_COLOR] = _3DSTATE_FOG_COLOR_CMD;
+ }
+
+
+ {
+ I915_STATECHANGE(i915, I915_UPLOAD_BUFFERS);
+ /* color buffer offset/stride */
+ i915->state.Buffer[I915_DESTREG_CBUFADDR0] = _3DSTATE_BUF_INFO_CMD;
+ /* XXX FBO: remove this? Also get set in i915_set_draw_region() */
+ i915->state.Buffer[I915_DESTREG_CBUFADDR1] = (BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(screen->front.pitch) | /* pitch in bytes */
+ BUF_3D_USE_FENCE);
+
+ i915->state.Buffer[I915_DESTREG_DBUFADDR0] = _3DSTATE_BUF_INFO_CMD;
+ /* XXX FBO: remove this? Also get set in i915_set_draw_region() */
+ i915->state.Buffer[I915_DESTREG_DBUFADDR1] = (BUF_3D_ID_DEPTH | BUF_3D_PITCH(screen->depth.pitch) | /* pitch in bytes */
+ BUF_3D_USE_FENCE);
+
+ i915->state.Buffer[I915_DESTREG_DV0] = _3DSTATE_DST_BUF_VARS_CMD;
+
+ /* XXX FBO: remove this? Also get set in i915_set_draw_region() */
+#if 0 /* seems we don't need this */
+ switch (screen->fbFormat) {
+ case DV_PF_565:
+ i915->state.Buffer[I915_DESTREG_DV1] = (DSTORG_HORT_BIAS(0x8) | /* .5 */
+ DSTORG_VERT_BIAS(0x8) | /* .5 */
+ LOD_PRECLAMP_OGL |
+ TEX_DEFAULT_COLOR_OGL |
+ DITHER_FULL_ALWAYS |
+ screen->fbFormat |
+ DEPTH_FRMT_16_FIXED);
+ break;
+ case DV_PF_8888:
+ i915->state.Buffer[I915_DESTREG_DV1] = (DSTORG_HORT_BIAS(0x8) | /* .5 */
+ DSTORG_VERT_BIAS(0x8) | /* .5 */
+ LOD_PRECLAMP_OGL |
+ TEX_DEFAULT_COLOR_OGL |
+ screen->fbFormat |
+ DEPTH_FRMT_24_FIXED_8_OTHER);
+ break;
+ }
+#endif
+
+
+ /* scissor */
+ i915->state.Buffer[I915_DESTREG_SENABLE] =
+ (_3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT);
+ i915->state.Buffer[I915_DESTREG_SR0] = _3DSTATE_SCISSOR_RECT_0_CMD;
+ i915->state.Buffer[I915_DESTREG_SR1] = 0;
+ i915->state.Buffer[I915_DESTREG_SR2] = 0;
+ }
+
+
+#if 0
+ {
+ I915_STATECHANGE(i915, I915_UPLOAD_DEFAULTS);
+ i915->state.Default[I915_DEFREG_C0] = _3DSTATE_DEFAULT_DIFFUSE;
+ i915->state.Default[I915_DEFREG_C1] = 0;
+ i915->state.Default[I915_DEFREG_S0] = _3DSTATE_DEFAULT_SPECULAR;
+ i915->state.Default[I915_DEFREG_S1] = 0;
+ i915->state.Default[I915_DEFREG_Z0] = _3DSTATE_DEFAULT_Z;
+ i915->state.Default[I915_DEFREG_Z1] = 0;
+ }
+#endif
+
+
+ /* These will be emitted every at the head of every buffer, unless
+ * we get hardware contexts working.
+ */
+ i915->state.active = (I915_UPLOAD_PROGRAM |
+ I915_UPLOAD_STIPPLE |
+ I915_UPLOAD_CTX |
+ I915_UPLOAD_BUFFERS | I915_UPLOAD_INVARIENT);
+}
+
+void
+i915InitStateFunctions(struct dd_function_table *functions)
+{
+ functions->AlphaFunc = i915AlphaFunc;
+ functions->BlendColor = i915BlendColor;
+ functions->BlendEquationSeparate = i915BlendEquationSeparate;
+ functions->BlendFuncSeparate = i915BlendFuncSeparate;
+ functions->ColorMask = i915ColorMask;
+ functions->CullFace = i915CullFaceFrontFace;
+ functions->DepthFunc = i915DepthFunc;
+ functions->DepthMask = i915DepthMask;
+ functions->Enable = i915Enable;
+ functions->Fogfv = i915Fogfv;
+ functions->FrontFace = i915CullFaceFrontFace;
+ functions->Hint = i915Hint;
+ functions->LightModelfv = i915LightModelfv;
+ functions->LineWidth = i915LineWidth;
+ functions->LogicOpcode = i915LogicOp;
+ functions->PointSize = i915PointSize;
+ functions->PolygonStipple = i915PolygonStipple;
+ functions->Scissor = i915Scissor;
+ functions->ShadeModel = i915ShadeModel;
+ functions->StencilFuncSeparate = i915StencilFuncSeparate;
+ functions->StencilMaskSeparate = i915StencilMaskSeparate;
+ functions->StencilOpSeparate = i915StencilOpSeparate;
+}
+
+
+void
+i915InitState(struct i915_context *i915)
+{
+ GLcontext *ctx = &i915->intel.ctx;
+
+ i915_init_packets(i915);
+
+ intelInitState(ctx);
+
+ memcpy(&i915->initial, &i915->state, sizeof(i915->state));
+ i915->current = &i915->state;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "mtypes.h"
+#include "imports.h"
+#include "simple_list.h"
+#include "enums.h"
+#include "image.h"
+#include "texstore.h"
+#include "texformat.h"
+#include "texmem.h"
+#include "swrast/swrast.h"
+
+#include "mm.h"
+
+#include "intel_ioctl.h"
+
+#include "i915_context.h"
+#include "i915_reg.h"
+
+
+
+static void
+i915TexEnv(GLcontext * ctx, GLenum target,
+ GLenum pname, const GLfloat * param)
+{
+ struct i915_context *i915 = I915_CONTEXT(ctx);
+
+ switch (pname) {
+ case GL_TEXTURE_ENV_COLOR: /* Should be a tracked param */
+ case GL_TEXTURE_ENV_MODE:
+ case GL_COMBINE_RGB:
+ case GL_COMBINE_ALPHA:
+ case GL_SOURCE0_RGB:
+ case GL_SOURCE1_RGB:
+ case GL_SOURCE2_RGB:
+ case GL_SOURCE0_ALPHA:
+ case GL_SOURCE1_ALPHA:
+ case GL_SOURCE2_ALPHA:
+ case GL_OPERAND0_RGB:
+ case GL_OPERAND1_RGB:
+ case GL_OPERAND2_RGB:
+ case GL_OPERAND0_ALPHA:
+ case GL_OPERAND1_ALPHA:
+ case GL_OPERAND2_ALPHA:
+ case GL_RGB_SCALE:
+ case GL_ALPHA_SCALE:
+ i915->tex_program.translated = 0;
+ break;
+
+ case GL_TEXTURE_LOD_BIAS:{
+ GLuint unit = ctx->Texture.CurrentUnit;
+ GLint b = (int) ((*param) * 16.0);
+ if (b > 255)
+ b = 255;
+ if (b < -256)
+ b = -256;
+ I915_STATECHANGE(i915, I915_UPLOAD_TEX(unit));
+ i915->lodbias_ss2[unit] =
+ ((b << SS2_LOD_BIAS_SHIFT) & SS2_LOD_BIAS_MASK);
+ break;
+ }
+
+ default:
+ break;
+ }
+}
+
+
+static void
+i915BindTexture(GLcontext * ctx, GLenum target,
+ struct gl_texture_object *texobj)
+{
+ /* Need this if image format changes between bound textures.
+ * Could try and shortcircuit by checking for differences in
+ * state between incoming and outgoing textures:
+ */
+ I915_CONTEXT(ctx)->tex_program.translated = 0;
+}
+
+
+
+void
+i915InitTextureFuncs(struct dd_function_table *functions)
+{
+ functions->BindTexture = i915BindTexture;
+ functions->TexEnv = i915TexEnv;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Code to layout images in a mipmap tree for i915 and i945
+ * respectively.
+ */
+
+#include "intel_mipmap_tree.h"
+#include "macros.h"
+#include "intel_context.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+static GLint initial_offsets[6][2] = { {0, 0},
+{0, 2},
+{1, 0},
+{1, 2},
+{1, 1},
+{1, 3}
+};
+
+
+static GLint step_offsets[6][2] = { {0, 2},
+{0, 2},
+{-1, 2},
+{-1, 2},
+{-1, 1},
+{-1, 1}
+};
+
+static GLuint
+minify(GLuint d)
+{
+ return MAX2(1, d >> 1);
+}
+
+GLboolean
+i915_miptree_layout(struct intel_mipmap_tree * mt)
+{
+ GLint level;
+
+ switch (mt->target) {
+ case GL_TEXTURE_CUBE_MAP:{
+ const GLuint dim = mt->width0;
+ GLuint face;
+
+ /* double pitch for cube layouts */
+ mt->pitch = ((dim * mt->cpp * 2 + 3) & ~3) / mt->cpp;
+ mt->total_height = dim * 4;
+
+ for (level = mt->first_level; level <= mt->last_level; level++)
+ intel_miptree_set_level_info(mt, level, 6,
+ 0, 0,
+ mt->pitch, mt->total_height, 1);
+
+ for (face = 0; face < 6; face++) {
+ GLuint x = initial_offsets[face][0] * dim;
+ GLuint y = initial_offsets[face][1] * dim;
+ GLuint d = dim;
+
+ for (level = mt->first_level; level <= mt->last_level; level++) {
+ intel_miptree_set_image_offset(mt, level, face, x, y);
+
+ if (d == 0)
+ _mesa_printf("cube mipmap %d/%d (%d..%d) is 0x0\n",
+ face, level, mt->first_level, mt->last_level);
+
+ d >>= 1;
+ x += step_offsets[face][0] * d;
+ y += step_offsets[face][1] * d;
+ }
+ }
+ break;
+ }
+ case GL_TEXTURE_3D:{
+ GLuint width = mt->width0;
+ GLuint height = mt->height0;
+ GLuint depth = mt->depth0;
+ GLuint stack_height = 0;
+
+ /* Calculate the size of a single slice.
+ */
+ mt->pitch = ((mt->width0 * mt->cpp + 3) & ~3) / mt->cpp;
+
+ /* XXX: hardware expects/requires 9 levels at minimum.
+ */
+ for (level = mt->first_level; level <= MAX2(8, mt->last_level);
+ level++) {
+ intel_miptree_set_level_info(mt, level, 1, 0, mt->total_height,
+ width, height, depth);
+
+
+ stack_height += MAX2(2, height);
+
+ width = minify(width);
+ height = minify(height);
+ depth = minify(depth);
+ }
+
+ /* Fixup depth image_offsets:
+ */
+ depth = mt->depth0;
+ for (level = mt->first_level; level <= mt->last_level; level++) {
+ GLuint i;
+ for (i = 0; i < depth; i++)
+ intel_miptree_set_image_offset(mt, level, i,
+ 0, i * stack_height);
+
+ depth = minify(depth);
+ }
+
+
+ /* Multiply slice size by texture depth for total size. It's
+ * remarkable how wasteful of memory the i915 texture layouts
+ * are. They are largely fixed in the i945.
+ */
+ mt->total_height = stack_height * mt->depth0;
+ break;
+ }
+
+ default:{
+ GLuint width = mt->width0;
+ GLuint height = mt->height0;
+ GLuint img_height;
+
+ mt->pitch = ((mt->width0 * mt->cpp + 3) & ~3) / mt->cpp;
+ mt->total_height = 0;
+
+ for (level = mt->first_level; level <= mt->last_level; level++) {
+ intel_miptree_set_level_info(mt, level, 1,
+ 0, mt->total_height,
+ width, height, 1);
+
+ if (mt->compressed)
+ img_height = MAX2(1, height / 4);
+ else
+ img_height = MAX2(2, height);
+
+ mt->total_height += img_height;
+ mt->total_height += 1;
+ mt->total_height &= ~1;
+
+ width = minify(width);
+ height = minify(height);
+ }
+ break;
+ }
+ }
+ DBG("%s: %dx%dx%d - sz 0x%x\n", __FUNCTION__,
+ mt->pitch,
+ mt->total_height, mt->cpp, mt->pitch * mt->total_height * mt->cpp);
+
+ return GL_TRUE;
+}
+
+
+GLboolean
+i945_miptree_layout(struct intel_mipmap_tree * mt)
+{
+ GLint level;
+
+ switch (mt->target) {
+ case GL_TEXTURE_CUBE_MAP:{
+ const GLuint dim = mt->width0;
+ GLuint face;
+
+ /* Depending on the size of the largest images, pitch can be
+ * determined either by the old-style packing of cubemap faces,
+ * or the final row of 4x4, 2x2 and 1x1 faces below this.
+ */
+ if (dim > 32)
+ mt->pitch = ((dim * mt->cpp * 2 + 3) & ~3) / mt->cpp;
+ else
+ mt->pitch = 14 * 8;
+
+ mt->total_height = dim * 4 + 4;
+
+ /* Set all the levels to effectively occupy the whole rectangular region.
+ */
+ for (level = mt->first_level; level <= mt->last_level; level++)
+ intel_miptree_set_level_info(mt, level, 6,
+ 0, 0,
+ mt->pitch, mt->total_height, 1);
+
+
+
+ for (face = 0; face < 6; face++) {
+ GLuint x = initial_offsets[face][0] * dim;
+ GLuint y = initial_offsets[face][1] * dim;
+ GLuint d = dim;
+
+ if (dim == 4 && face >= 4) {
+ y = mt->total_height - 4;
+ x = (face - 4) * 8;
+ }
+ else if (dim < 4) {
+ y = mt->total_height - 4;
+ x = face * 8;
+ }
+
+ for (level = mt->first_level; level <= mt->last_level; level++) {
+ intel_miptree_set_image_offset(mt, level, face, x, y);
+
+ d >>= 1;
+
+ switch (d) {
+ case 4:
+ switch (face) {
+ case FACE_POS_X:
+ case FACE_NEG_X:
+ x += step_offsets[face][0] * d;
+ y += step_offsets[face][1] * d;
+ break;
+ case FACE_POS_Y:
+ case FACE_NEG_Y:
+ y += 12;
+ x -= 8;
+ break;
+ case FACE_POS_Z:
+ case FACE_NEG_Z:
+ y = mt->total_height - 4;
+ x = (face - 4) * 8;
+ break;
+ }
+
+ case 2:
+ y = mt->total_height - 4;
+ x = 16 + face * 8;
+ break;
+
+ case 1:
+ x += 48;
+ break;
+
+ default:
+ x += step_offsets[face][0] * d;
+ y += step_offsets[face][1] * d;
+ break;
+ }
+ }
+ }
+ break;
+ }
+ case GL_TEXTURE_3D:{
+ GLuint width = mt->width0;
+ GLuint height = mt->height0;
+ GLuint depth = mt->depth0;
+ GLuint pack_x_pitch, pack_x_nr;
+ GLuint pack_y_pitch;
+ GLuint level;
+
+ mt->pitch = ((mt->width0 * mt->cpp + 3) & ~3) / mt->cpp;
+ mt->total_height = 0;
+
+ pack_y_pitch = MAX2(mt->height0, 2);
+ pack_x_pitch = mt->pitch;
+ pack_x_nr = 1;
+
+ for (level = mt->first_level; level <= mt->last_level; level++) {
+ GLuint nr_images = mt->target == GL_TEXTURE_3D ? depth : 6;
+ GLint x = 0;
+ GLint y = 0;
+ GLint q, j;
+
+ intel_miptree_set_level_info(mt, level, nr_images,
+ 0, mt->total_height,
+ width, height, depth);
+
+ for (q = 0; q < nr_images;) {
+ for (j = 0; j < pack_x_nr && q < nr_images; j++, q++) {
+ intel_miptree_set_image_offset(mt, level, q, x, y);
+ x += pack_x_pitch;
+ }
+
+ x = 0;
+ y += pack_y_pitch;
+ }
+
+
+ mt->total_height += y;
+
+ if (pack_x_pitch > 4) {
+ pack_x_pitch >>= 1;
+ pack_x_nr <<= 1;
+ assert(pack_x_pitch * pack_x_nr <= mt->pitch);
+ }
+
+ if (pack_y_pitch > 2) {
+ pack_y_pitch >>= 1;
+ }
+
+ width = minify(width);
+ height = minify(height);
+ depth = minify(depth);
+ }
+ break;
+ }
+
+ case GL_TEXTURE_1D:
+ case GL_TEXTURE_2D:
+ case GL_TEXTURE_RECTANGLE_ARB:{
+ GLuint x = 0;
+ GLuint y = 0;
+ GLuint width = mt->width0;
+ GLuint height = mt->height0;
+ GLint align_h = 2;
+
+ mt->pitch = ((mt->width0 * mt->cpp + 3) & ~3) / mt->cpp;
+ mt->total_height = 0;
+
+ for (level = mt->first_level; level <= mt->last_level; level++) {
+ GLuint img_height;
+
+ intel_miptree_set_level_info(mt, level, 1,
+ x, y,
+ width,
+ mt->compressed ? height/4 : height, 1);
+
+
+ if (mt->compressed)
+ img_height = MAX2(1, height / 4);
+ else
+ img_height = MAX2(align_h, height);
+
+ /* LPT change: step right after second mipmap.
+ */
+ if (level == mt->first_level + 1) {
+ x += mt->pitch / 2;
+ x = (x + 3) & ~3;
+ }
+ else {
+ y += img_height;
+ y += align_h - 1;
+ y &= ~(align_h - 1);
+ }
+
+ /* Because the images are packed better, the final offset
+ * might not be the maximal one:
+ */
+ mt->total_height = MAX2(mt->total_height, y);
+
+ width = minify(width);
+ height = minify(height);
+ }
+ break;
+ }
+ default:
+ _mesa_problem(NULL, "Unexpected tex target in i945_miptree_layout()");
+ }
+
+ DBG("%s: %dx%dx%d - sz 0x%x\n", __FUNCTION__,
+ mt->pitch,
+ mt->total_height, mt->cpp, mt->pitch * mt->total_height * mt->cpp);
+
+ return GL_TRUE;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "mtypes.h"
+#include "enums.h"
+#include "texformat.h"
+#include "dri_bufmgr.h"
+
+#include "intel_mipmap_tree.h"
+#include "intel_tex.h"
+
+#include "i915_context.h"
+#include "i915_reg.h"
+
+
+static GLuint
+translate_texture_format(GLuint mesa_format)
+{
+ switch (mesa_format) {
+ case MESA_FORMAT_L8:
+ return MAPSURF_8BIT | MT_8BIT_L8;
+ case MESA_FORMAT_I8:
+ return MAPSURF_8BIT | MT_8BIT_I8;
+ case MESA_FORMAT_A8:
+ return MAPSURF_8BIT | MT_8BIT_A8;
+ case MESA_FORMAT_AL88:
+ return MAPSURF_16BIT | MT_16BIT_AY88;
+ case MESA_FORMAT_RGB565:
+ return MAPSURF_16BIT | MT_16BIT_RGB565;
+ case MESA_FORMAT_ARGB1555:
+ return MAPSURF_16BIT | MT_16BIT_ARGB1555;
+ case MESA_FORMAT_ARGB4444:
+ return MAPSURF_16BIT | MT_16BIT_ARGB4444;
+ case MESA_FORMAT_ARGB8888:
+ return MAPSURF_32BIT | MT_32BIT_ARGB8888;
+ case MESA_FORMAT_YCBCR_REV:
+ return (MAPSURF_422 | MT_422_YCRCB_NORMAL);
+ case MESA_FORMAT_YCBCR:
+ return (MAPSURF_422 | MT_422_YCRCB_SWAPY);
+ case MESA_FORMAT_RGB_FXT1:
+ case MESA_FORMAT_RGBA_FXT1:
+ return (MAPSURF_COMPRESSED | MT_COMPRESS_FXT1);
+ case MESA_FORMAT_Z16:
+ return (MAPSURF_16BIT | MT_16BIT_L16);
+ case MESA_FORMAT_RGBA_DXT1:
+ case MESA_FORMAT_RGB_DXT1:
+ return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT1);
+ case MESA_FORMAT_RGBA_DXT3:
+ return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT2_3);
+ case MESA_FORMAT_RGBA_DXT5:
+ return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT4_5);
+ case MESA_FORMAT_Z24_S8:
+ return (MAPSURF_32BIT | MT_32BIT_xL824);
+ default:
+ fprintf(stderr, "%s: bad image format %x\n", __FUNCTION__, mesa_format);
+ abort();
+ return 0;
+ }
+}
+
+
+
+
+/* The i915 (and related graphics cores) do not support GL_CLAMP. The
+ * Intel drivers for "other operating systems" implement GL_CLAMP as
+ * GL_CLAMP_TO_EDGE, so the same is done here.
+ */
+static GLuint
+translate_wrap_mode(GLenum wrap)
+{
+ switch (wrap) {
+ case GL_REPEAT:
+ return TEXCOORDMODE_WRAP;
+ case GL_CLAMP:
+ return TEXCOORDMODE_CLAMP_EDGE; /* not quite correct */
+ case GL_CLAMP_TO_EDGE:
+ return TEXCOORDMODE_CLAMP_EDGE;
+ case GL_CLAMP_TO_BORDER:
+ return TEXCOORDMODE_CLAMP_BORDER;
+ case GL_MIRRORED_REPEAT:
+ return TEXCOORDMODE_MIRROR;
+ default:
+ return TEXCOORDMODE_WRAP;
+ }
+}
+
+
+
+/* Recalculate all state from scratch. Perhaps not the most
+ * efficient, but this has gotten complex enough that we need
+ * something which is understandable and reliable.
+ */
+static GLboolean
+i915_update_tex_unit(struct intel_context *intel, GLuint unit, GLuint ss3)
+{
+ GLcontext *ctx = &intel->ctx;
+ struct i915_context *i915 = i915_context(ctx);
+ struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
+ struct intel_texture_object *intelObj = intel_texture_object(tObj);
+ struct gl_texture_image *firstImage;
+ GLuint *state = i915->state.Tex[unit];
+
+ memset(state, 0, sizeof(state));
+
+ /*We need to refcount these. */
+
+ if (i915->state.tex_buffer[unit] != NULL) {
+ driBOUnReference(i915->state.tex_buffer[unit]);
+ i915->state.tex_buffer[unit] = NULL;
+ }
+
+ if (!intel_finalize_mipmap_tree(intel, unit))
+ return GL_FALSE;
+
+ /* Get first image here, since intelObj->firstLevel will get set in
+ * the intel_finalize_mipmap_tree() call above.
+ */
+ firstImage = tObj->Image[0][intelObj->firstLevel];
+
+ i915->state.tex_buffer[unit] = driBOReference(intelObj->mt->region->buffer);
+ i915->state.tex_offset[unit] = intel_miptree_image_offset(intelObj->mt, 0,
+ intelObj->
+ firstLevel);
+
+ state[I915_TEXREG_MS3] =
+ (((firstImage->Height - 1) << MS3_HEIGHT_SHIFT) |
+ ((firstImage->Width - 1) << MS3_WIDTH_SHIFT) |
+ translate_texture_format(firstImage->TexFormat->MesaFormat) |
+ MS3_USE_FENCE_REGS);
+
+ state[I915_TEXREG_MS4] =
+ (((((intelObj->mt->pitch * intelObj->mt->cpp) / 4) -
+ 1) << MS4_PITCH_SHIFT) | MS4_CUBE_FACE_ENA_MASK |
+ ((((intelObj->lastLevel -
+ intelObj->firstLevel) *
+ 4)) << MS4_MAX_LOD_SHIFT) | ((firstImage->Depth -
+ 1) << MS4_VOLUME_DEPTH_SHIFT));
+
+
+ {
+ GLuint minFilt, mipFilt, magFilt;
+
+ switch (tObj->MinFilter) {
+ case GL_NEAREST:
+ minFilt = FILTER_NEAREST;
+ mipFilt = MIPFILTER_NONE;
+ break;
+ case GL_LINEAR:
+ minFilt = FILTER_LINEAR;
+ mipFilt = MIPFILTER_NONE;
+ break;
+ case GL_NEAREST_MIPMAP_NEAREST:
+ minFilt = FILTER_NEAREST;
+ mipFilt = MIPFILTER_NEAREST;
+ break;
+ case GL_LINEAR_MIPMAP_NEAREST:
+ minFilt = FILTER_LINEAR;
+ mipFilt = MIPFILTER_NEAREST;
+ break;
+ case GL_NEAREST_MIPMAP_LINEAR:
+ minFilt = FILTER_NEAREST;
+ mipFilt = MIPFILTER_LINEAR;
+ break;
+ case GL_LINEAR_MIPMAP_LINEAR:
+ minFilt = FILTER_LINEAR;
+ mipFilt = MIPFILTER_LINEAR;
+ break;
+ default:
+ return GL_FALSE;
+ }
+
+ if (tObj->MaxAnisotropy > 1.0) {
+ minFilt = FILTER_ANISOTROPIC;
+ magFilt = FILTER_ANISOTROPIC;
+ }
+ else {
+ switch (tObj->MagFilter) {
+ case GL_NEAREST:
+ magFilt = FILTER_NEAREST;
+ break;
+ case GL_LINEAR:
+ magFilt = FILTER_LINEAR;
+ break;
+ default:
+ return GL_FALSE;
+ }
+ }
+
+ state[I915_TEXREG_SS2] = i915->lodbias_ss2[unit];
+
+ /* YUV conversion:
+ */
+ if (firstImage->TexFormat->MesaFormat == MESA_FORMAT_YCBCR ||
+ firstImage->TexFormat->MesaFormat == MESA_FORMAT_YCBCR_REV)
+ state[I915_TEXREG_SS2] |= SS2_COLORSPACE_CONVERSION;
+
+ /* Shadow:
+ */
+ if (tObj->CompareMode == GL_COMPARE_R_TO_TEXTURE_ARB &&
+ tObj->Target != GL_TEXTURE_3D) {
+
+ state[I915_TEXREG_SS2] |=
+ (SS2_SHADOW_ENABLE |
+ intel_translate_compare_func(tObj->CompareFunc));
+
+ minFilt = FILTER_4X4_FLAT;
+ magFilt = FILTER_4X4_FLAT;
+ }
+
+ state[I915_TEXREG_SS2] |= ((minFilt << SS2_MIN_FILTER_SHIFT) |
+ (mipFilt << SS2_MIP_FILTER_SHIFT) |
+ (magFilt << SS2_MAG_FILTER_SHIFT));
+ }
+
+ {
+ GLenum ws = tObj->WrapS;
+ GLenum wt = tObj->WrapT;
+ GLenum wr = tObj->WrapR;
+
+
+ /* 3D textures don't seem to respect the border color.
+ * Fallback if there's ever a danger that they might refer to
+ * it.
+ *
+ * Effectively this means fallback on 3D clamp or
+ * clamp_to_border.
+ */
+ if (tObj->Target == GL_TEXTURE_3D &&
+ (tObj->MinFilter != GL_NEAREST ||
+ tObj->MagFilter != GL_NEAREST) &&
+ (ws == GL_CLAMP ||
+ wt == GL_CLAMP ||
+ wr == GL_CLAMP ||
+ ws == GL_CLAMP_TO_BORDER ||
+ wt == GL_CLAMP_TO_BORDER || wr == GL_CLAMP_TO_BORDER))
+ return GL_FALSE;
+
+
+ state[I915_TEXREG_SS3] = ss3; /* SS3_NORMALIZED_COORDS */
+
+ state[I915_TEXREG_SS3] |=
+ ((translate_wrap_mode(ws) << SS3_TCX_ADDR_MODE_SHIFT) |
+ (translate_wrap_mode(wt) << SS3_TCY_ADDR_MODE_SHIFT) |
+ (translate_wrap_mode(wr) << SS3_TCZ_ADDR_MODE_SHIFT));
+
+ state[I915_TEXREG_SS3] |= (unit << SS3_TEXTUREMAP_INDEX_SHIFT);
+ }
+
+
+ state[I915_TEXREG_SS4] = INTEL_PACKCOLOR8888(tObj->_BorderChan[0],
+ tObj->_BorderChan[1],
+ tObj->_BorderChan[2],
+ tObj->_BorderChan[3]);
+
+
+ I915_ACTIVESTATE(i915, I915_UPLOAD_TEX(unit), GL_TRUE);
+ /* memcmp was already disabled, but definitely won't work as the
+ * region might now change and that wouldn't be detected:
+ */
+ I915_STATECHANGE(i915, I915_UPLOAD_TEX(unit));
+
+
+#if 0
+ DBG(TEXTURE, "state[I915_TEXREG_SS2] = 0x%x\n", state[I915_TEXREG_SS2]);
+ DBG(TEXTURE, "state[I915_TEXREG_SS3] = 0x%x\n", state[I915_TEXREG_SS3]);
+ DBG(TEXTURE, "state[I915_TEXREG_SS4] = 0x%x\n", state[I915_TEXREG_SS4]);
+ DBG(TEXTURE, "state[I915_TEXREG_MS2] = 0x%x\n", state[I915_TEXREG_MS2]);
+ DBG(TEXTURE, "state[I915_TEXREG_MS3] = 0x%x\n", state[I915_TEXREG_MS3]);
+ DBG(TEXTURE, "state[I915_TEXREG_MS4] = 0x%x\n", state[I915_TEXREG_MS4]);
+#endif
+
+ return GL_TRUE;
+}
+
+
+
+
+void
+i915UpdateTextureState(struct intel_context *intel)
+{
+ GLboolean ok = GL_TRUE;
+ GLuint i;
+
+ for (i = 0; i < I915_TEX_UNITS && ok; i++) {
+ switch (intel->ctx.Texture.Unit[i]._ReallyEnabled) {
+ case TEXTURE_1D_BIT:
+ case TEXTURE_2D_BIT:
+ case TEXTURE_CUBE_BIT:
+ case TEXTURE_3D_BIT:
+ ok = i915_update_tex_unit(intel, i, SS3_NORMALIZED_COORDS);
+ break;
+ case TEXTURE_RECT_BIT:
+ ok = i915_update_tex_unit(intel, i, 0);
+ break;
+ case 0:{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ if (i915->state.active & I915_UPLOAD_TEX(i))
+ I915_ACTIVESTATE(i915, I915_UPLOAD_TEX(i), GL_FALSE);
+
+ if (i915->state.tex_buffer[i] != NULL) {
+ driBOUnReference(i915->state.tex_buffer[i]);
+ i915->state.tex_buffer[i] = NULL;
+ }
+
+ break;
+ }
+ default:
+ ok = GL_FALSE;
+ break;
+ }
+ }
+
+ FALLBACK(intel, I915_FALLBACK_TEXTURE, !ok);
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+
+#include "glheader.h"
+#include "mtypes.h"
+#include "imports.h"
+#include "macros.h"
+#include "colormac.h"
+
+#include "tnl/t_context.h"
+#include "tnl/t_vertex.h"
+
+#include "intel_batchbuffer.h"
+#include "intel_tex.h"
+#include "intel_regions.h"
+
+#include "i915_reg.h"
+#include "i915_context.h"
+
+static void
+i915_render_start(struct intel_context *intel)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+
+ i915ValidateFragmentProgram(i915);
+}
+
+
+static void
+i915_reduced_primitive_state(struct intel_context *intel, GLenum rprim)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ GLuint st1 = i915->state.Stipple[I915_STPREG_ST1];
+
+ st1 &= ~ST1_ENABLE;
+
+ switch (rprim) {
+ case GL_TRIANGLES:
+ if (intel->ctx.Polygon.StippleFlag && intel->hw_stipple)
+ st1 |= ST1_ENABLE;
+ break;
+ case GL_LINES:
+ case GL_POINTS:
+ default:
+ break;
+ }
+
+ i915->intel.reduced_primitive = rprim;
+
+ if (st1 != i915->state.Stipple[I915_STPREG_ST1]) {
+ INTEL_FIREVERTICES(intel);
+
+ I915_STATECHANGE(i915, I915_UPLOAD_STIPPLE);
+ i915->state.Stipple[I915_STPREG_ST1] = st1;
+ }
+}
+
+
+/* Pull apart the vertex format registers and figure out how large a
+ * vertex is supposed to be.
+ */
+static GLboolean
+i915_check_vertex_size(struct intel_context *intel, GLuint expected)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ int lis2 = i915->current->Ctx[I915_CTXREG_LIS2];
+ int lis4 = i915->current->Ctx[I915_CTXREG_LIS4];
+ int i, sz = 0;
+
+ switch (lis4 & S4_VFMT_XYZW_MASK) {
+ case S4_VFMT_XY:
+ sz = 2;
+ break;
+ case S4_VFMT_XYZ:
+ sz = 3;
+ break;
+ case S4_VFMT_XYW:
+ sz = 3;
+ break;
+ case S4_VFMT_XYZW:
+ sz = 4;
+ break;
+ default:
+ fprintf(stderr, "no xyzw specified\n");
+ return 0;
+ }
+
+ if (lis4 & S4_VFMT_SPEC_FOG)
+ sz++;
+ if (lis4 & S4_VFMT_COLOR)
+ sz++;
+ if (lis4 & S4_VFMT_DEPTH_OFFSET)
+ sz++;
+ if (lis4 & S4_VFMT_POINT_WIDTH)
+ sz++;
+ if (lis4 & S4_VFMT_FOG_PARAM)
+ sz++;
+
+ for (i = 0; i < 8; i++) {
+ switch (lis2 & S2_TEXCOORD_FMT0_MASK) {
+ case TEXCOORDFMT_2D:
+ sz += 2;
+ break;
+ case TEXCOORDFMT_3D:
+ sz += 3;
+ break;
+ case TEXCOORDFMT_4D:
+ sz += 4;
+ break;
+ case TEXCOORDFMT_1D:
+ sz += 1;
+ break;
+ case TEXCOORDFMT_2D_16:
+ sz += 1;
+ break;
+ case TEXCOORDFMT_4D_16:
+ sz += 2;
+ break;
+ case TEXCOORDFMT_NOT_PRESENT:
+ break;
+ default:
+ fprintf(stderr, "bad texcoord fmt %d\n", i);
+ return GL_FALSE;
+ }
+ lis2 >>= S2_TEXCOORD_FMT1_SHIFT;
+ }
+
+ if (sz != expected)
+ fprintf(stderr, "vertex size mismatch %d/%d\n", sz, expected);
+
+ return sz == expected;
+}
+
+
+static void
+i915_emit_invarient_state(struct intel_context *intel)
+{
+ BATCH_LOCALS;
+
+ BEGIN_BATCH(200, 0);
+
+ OUT_BATCH(_3DSTATE_AA_CMD |
+ AA_LINE_ECAAR_WIDTH_ENABLE |
+ AA_LINE_ECAAR_WIDTH_1_0 |
+ AA_LINE_REGION_WIDTH_ENABLE | AA_LINE_REGION_WIDTH_1_0);
+
+ OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
+ OUT_BATCH(0);
+
+ OUT_BATCH(_3DSTATE_DFLT_SPEC_CMD);
+ OUT_BATCH(0);
+
+ OUT_BATCH(_3DSTATE_DFLT_Z_CMD);
+ OUT_BATCH(0);
+
+ /* Don't support texture crossbar yet */
+ OUT_BATCH(_3DSTATE_COORD_SET_BINDINGS |
+ CSB_TCB(0, 0) |
+ CSB_TCB(1, 1) |
+ CSB_TCB(2, 2) |
+ CSB_TCB(3, 3) |
+ CSB_TCB(4, 4) | CSB_TCB(5, 5) | CSB_TCB(6, 6) | CSB_TCB(7, 7));
+
+ OUT_BATCH(_3DSTATE_RASTER_RULES_CMD |
+ ENABLE_POINT_RASTER_RULE |
+ OGL_POINT_RASTER_RULE |
+ ENABLE_LINE_STRIP_PROVOKE_VRTX |
+ ENABLE_TRI_FAN_PROVOKE_VRTX |
+ LINE_STRIP_PROVOKE_VRTX(1) |
+ TRI_FAN_PROVOKE_VRTX(2) | ENABLE_TEXKILL_3D_4D | TEXKILL_4D);
+
+ /* Need to initialize this to zero.
+ */
+ OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(3) | (1));
+ OUT_BATCH(0);
+
+ /* XXX: Use this */
+ OUT_BATCH(_3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT);
+
+ OUT_BATCH(_3DSTATE_SCISSOR_RECT_0_CMD);
+ OUT_BATCH(0);
+ OUT_BATCH(0);
+
+ OUT_BATCH(_3DSTATE_DEPTH_SUBRECT_DISABLE);
+
+ OUT_BATCH(_3DSTATE_LOAD_INDIRECT | 0); /* disable indirect state */
+ OUT_BATCH(0);
+
+
+ /* Don't support twosided stencil yet */
+ OUT_BATCH(_3DSTATE_BACKFACE_STENCIL_OPS | BFO_ENABLE_STENCIL_TWO_SIDE | 0);
+
+ ADVANCE_BATCH();
+}
+
+
+#define emit(intel, state, size ) \
+ intel_batchbuffer_data(intel->batch, state, size, 0 )
+
+static GLuint
+get_dirty(struct i915_hw_state *state)
+{
+ GLuint dirty;
+
+ /* Workaround the multitex hang - if one texture unit state is
+ * modified, emit all texture units.
+ */
+ dirty = state->active & ~state->emitted;
+ if (dirty & I915_UPLOAD_TEX_ALL)
+ state->emitted &= ~I915_UPLOAD_TEX_ALL;
+ dirty = state->active & ~state->emitted;
+ return dirty;
+}
+
+
+static GLuint
+get_state_size(struct i915_hw_state *state)
+{
+ GLuint dirty = get_dirty(state);
+ GLuint i;
+ GLuint sz = 0;
+
+ if (dirty & I915_UPLOAD_CTX)
+ sz += sizeof(state->Ctx);
+
+ if (dirty & I915_UPLOAD_BUFFERS)
+ sz += sizeof(state->Buffer);
+
+ if (dirty & I915_UPLOAD_STIPPLE)
+ sz += sizeof(state->Stipple);
+
+ if (dirty & I915_UPLOAD_FOG)
+ sz += sizeof(state->Fog);
+
+ if (dirty & I915_UPLOAD_TEX_ALL) {
+ int nr = 0;
+ for (i = 0; i < I915_TEX_UNITS; i++)
+ if (dirty & I915_UPLOAD_TEX(i))
+ nr++;
+
+ sz += (2 + nr * 3) * sizeof(GLuint) * 2;
+ }
+
+ if (dirty & I915_UPLOAD_CONSTANTS)
+ sz += state->ConstantSize * sizeof(GLuint);
+
+ if (dirty & I915_UPLOAD_PROGRAM)
+ sz += state->ProgramSize * sizeof(GLuint);
+
+ return sz;
+}
+
+
+/* Push the state into the sarea and/or texture memory.
+ */
+static void
+i915_emit_state(struct intel_context *intel)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ struct i915_hw_state *state = i915->current;
+ int i;
+ GLuint dirty;
+ BATCH_LOCALS;
+
+ /* We don't hold the lock at this point, so want to make sure that
+ * there won't be a buffer wrap.
+ *
+ * It might be better to talk about explicit places where
+ * scheduling is allowed, rather than assume that it is whenever a
+ * batchbuffer fills up.
+ */
+ intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
+
+ /* Do this here as we may have flushed the batchbuffer above,
+ * causing more state to be dirty!
+ */
+ dirty = get_dirty(state);
+
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "%s dirty: %x\n", __FUNCTION__, dirty);
+
+ if (dirty & I915_UPLOAD_INVARIENT) {
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "I915_UPLOAD_INVARIENT:\n");
+ i915_emit_invarient_state(intel);
+ }
+
+ if (dirty & I915_UPLOAD_CTX) {
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "I915_UPLOAD_CTX:\n");
+ emit(intel, state->Ctx, sizeof(state->Ctx));
+ }
+
+ if (dirty & I915_UPLOAD_BUFFERS) {
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "I915_UPLOAD_BUFFERS:\n");
+ BEGIN_BATCH(I915_DEST_SETUP_SIZE + 2, 0);
+ OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR0]);
+ OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR1]);
+ OUT_RELOC(state->draw_region->buffer,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE,
+ state->draw_region->draw_offset);
+
+ if (state->depth_region) {
+ OUT_BATCH(state->Buffer[I915_DESTREG_DBUFADDR0]);
+ OUT_BATCH(state->Buffer[I915_DESTREG_DBUFADDR1]);
+ OUT_RELOC(state->depth_region->buffer,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE,
+ state->depth_region->draw_offset);
+ }
+
+ OUT_BATCH(state->Buffer[I915_DESTREG_DV0]);
+ OUT_BATCH(state->Buffer[I915_DESTREG_DV1]);
+ OUT_BATCH(state->Buffer[I915_DESTREG_SENABLE]);
+ OUT_BATCH(state->Buffer[I915_DESTREG_SR0]);
+ OUT_BATCH(state->Buffer[I915_DESTREG_SR1]);
+ OUT_BATCH(state->Buffer[I915_DESTREG_SR2]);
+ ADVANCE_BATCH();
+ }
+
+ if (dirty & I915_UPLOAD_STIPPLE) {
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "I915_UPLOAD_STIPPLE:\n");
+ emit(intel, state->Stipple, sizeof(state->Stipple));
+ }
+
+ if (dirty & I915_UPLOAD_FOG) {
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "I915_UPLOAD_FOG:\n");
+ emit(intel, state->Fog, sizeof(state->Fog));
+ }
+
+ /* Combine all the dirty texture state into a single command to
+ * avoid lockups on I915 hardware.
+ */
+ if (dirty & I915_UPLOAD_TEX_ALL) {
+ int nr = 0;
+
+ for (i = 0; i < I915_TEX_UNITS; i++)
+ if (dirty & I915_UPLOAD_TEX(i))
+ nr++;
+
+ BEGIN_BATCH(2 + nr * 3, 0);
+ OUT_BATCH(_3DSTATE_MAP_STATE | (3 * nr));
+ OUT_BATCH((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT);
+ for (i = 0; i < I915_TEX_UNITS; i++)
+ if (dirty & I915_UPLOAD_TEX(i)) {
+
+ if (state->tex_buffer[i]) {
+ OUT_RELOC(state->tex_buffer[i],
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_READ,
+ state->tex_offset[i]);
+ }
+ else {
+ assert(i == 0);
+ assert(state == &i915->meta);
+ OUT_BATCH(0);
+ }
+
+ OUT_BATCH(state->Tex[i][I915_TEXREG_MS3]);
+ OUT_BATCH(state->Tex[i][I915_TEXREG_MS4]);
+ }
+ ADVANCE_BATCH();
+
+ BEGIN_BATCH(2 + nr * 3, 0);
+ OUT_BATCH(_3DSTATE_SAMPLER_STATE | (3 * nr));
+ OUT_BATCH((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT);
+ for (i = 0; i < I915_TEX_UNITS; i++)
+ if (dirty & I915_UPLOAD_TEX(i)) {
+ OUT_BATCH(state->Tex[i][I915_TEXREG_SS2]);
+ OUT_BATCH(state->Tex[i][I915_TEXREG_SS3]);
+ OUT_BATCH(state->Tex[i][I915_TEXREG_SS4]);
+ }
+ ADVANCE_BATCH();
+ }
+
+ if (dirty & I915_UPLOAD_CONSTANTS) {
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "I915_UPLOAD_CONSTANTS:\n");
+ emit(intel, state->Constant, state->ConstantSize * sizeof(GLuint));
+ }
+
+ if (dirty & I915_UPLOAD_PROGRAM) {
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "I915_UPLOAD_PROGRAM:\n");
+
+ assert((state->Program[0] & 0x1ff) + 2 == state->ProgramSize);
+
+ emit(intel, state->Program, state->ProgramSize * sizeof(GLuint));
+ if (INTEL_DEBUG & DEBUG_STATE)
+ i915_disassemble_program(state->Program, state->ProgramSize);
+ }
+
+ state->emitted |= dirty;
+}
+
+static void
+i915_destroy_context(struct intel_context *intel)
+{
+ _tnl_free_vertices(&intel->ctx);
+}
+
+
+/**
+ * Set the drawing regions for the color and depth/stencil buffers.
+ * This involves setting the pitch, cpp and buffer ID/location.
+ * Also set pixel format for color and Z rendering
+ * Used for setting both regular and meta state.
+ */
+void
+i915_state_draw_region(struct intel_context *intel,
+ struct i915_hw_state *state,
+ struct intel_region *color_region,
+ struct intel_region *depth_region)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ GLuint value;
+
+ ASSERT(state == &i915->state || state == &i915->meta);
+
+ if (state->draw_region != color_region) {
+ intel_region_release(&state->draw_region);
+ intel_region_reference(&state->draw_region, color_region);
+ }
+ if (state->depth_region != depth_region) {
+ intel_region_release(&state->depth_region);
+ intel_region_reference(&state->depth_region, depth_region);
+ }
+
+ /*
+ * Set stride/cpp values
+ */
+ if (color_region) {
+ state->Buffer[I915_DESTREG_CBUFADDR0] = _3DSTATE_BUF_INFO_CMD;
+ state->Buffer[I915_DESTREG_CBUFADDR1] =
+ (BUF_3D_ID_COLOR_BACK |
+ BUF_3D_PITCH(color_region->pitch * color_region->cpp) |
+ BUF_3D_USE_FENCE);
+ }
+
+ if (depth_region) {
+ state->Buffer[I915_DESTREG_DBUFADDR0] = _3DSTATE_BUF_INFO_CMD;
+ state->Buffer[I915_DESTREG_DBUFADDR1] =
+ (BUF_3D_ID_DEPTH |
+ BUF_3D_PITCH(depth_region->pitch * depth_region->cpp) |
+ BUF_3D_USE_FENCE);
+ }
+
+ /*
+ * Compute/set I915_DESTREG_DV1 value
+ */
+ value = (DSTORG_HORT_BIAS(0x8) | /* .5 */
+ DSTORG_VERT_BIAS(0x8) | /* .5 */
+ LOD_PRECLAMP_OGL | TEX_DEFAULT_COLOR_OGL);
+ if (color_region && color_region->cpp == 4) {
+ value |= DV_PF_8888;
+ }
+ else {
+ value |= (DITHER_FULL_ALWAYS | DV_PF_565);
+ }
+ if (depth_region && depth_region->cpp == 4) {
+ value |= DEPTH_FRMT_24_FIXED_8_OTHER;
+ }
+ else {
+ value |= DEPTH_FRMT_16_FIXED;
+ }
+ state->Buffer[I915_DESTREG_DV1] = value;
+
+ I915_STATECHANGE(i915, I915_UPLOAD_BUFFERS);
+}
+
+
+static void
+i915_set_draw_region(struct intel_context *intel,
+ struct intel_region *color_region,
+ struct intel_region *depth_region)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ i915_state_draw_region(intel, &i915->state, color_region, depth_region);
+}
+
+
+
+static void
+i915_lost_hardware(struct intel_context *intel)
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ i915->state.emitted = 0;
+}
+
+static GLuint
+i915_flush_cmd(void)
+{
+ return MI_FLUSH | FLUSH_MAP_CACHE;
+}
+
+static void
+i915_assert_not_dirty( struct intel_context *intel )
+{
+ struct i915_context *i915 = i915_context(&intel->ctx);
+ struct i915_hw_state *state = i915->current;
+ GLuint dirty = get_dirty(state);
+ assert(!dirty);
+}
+
+
+void
+i915InitVtbl(struct i915_context *i915)
+{
+ i915->intel.vtbl.check_vertex_size = i915_check_vertex_size;
+ i915->intel.vtbl.destroy = i915_destroy_context;
+ i915->intel.vtbl.emit_state = i915_emit_state;
+ i915->intel.vtbl.lost_hardware = i915_lost_hardware;
+ i915->intel.vtbl.reduced_primitive_state = i915_reduced_primitive_state;
+ i915->intel.vtbl.render_start = i915_render_start;
+ i915->intel.vtbl.set_draw_region = i915_set_draw_region;
+ i915->intel.vtbl.update_texture_state = i915UpdateTextureState;
+ i915->intel.vtbl.flush_cmd = i915_flush_cmd;
+ i915->intel.vtbl.assert_not_dirty = i915_assert_not_dirty;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "intel_batchbuffer.h"
+#include "intel_ioctl.h"
+
+/* Relocations in kernel space:
+ * - pass dma buffer seperately
+ * - memory manager knows how to patch
+ * - pass list of dependent buffers
+ * - pass relocation list
+ *
+ * Either:
+ * - get back an offset for buffer to fire
+ * - memory manager knows how to fire buffer
+ *
+ * Really want the buffer to be AGP and pinned.
+ *
+ */
+
+/* Cliprect fence: The highest fence protecting a dma buffer
+ * containing explicit cliprect information. Like the old drawable
+ * lock but irq-driven. X server must wait for this fence to expire
+ * before changing cliprects [and then doing sw rendering?]. For
+ * other dma buffers, the scheduler will grab current cliprect info
+ * and mix into buffer. X server must hold the lock while changing
+ * cliprects??? Make per-drawable. Need cliprects in shared memory
+ * -- beats storing them with every cmd buffer in the queue.
+ *
+ * ==> X server must wait for this fence to expire before touching the
+ * framebuffer with new cliprects.
+ *
+ * ==> Cliprect-dependent buffers associated with a
+ * cliprect-timestamp. All of the buffers associated with a timestamp
+ * must go to hardware before any buffer with a newer timestamp.
+ *
+ * ==> Dma should be queued per-drawable for correct X/GL
+ * synchronization. Or can fences be used for this?
+ *
+ * Applies to: Blit operations, metaops, X server operations -- X
+ * server automatically waits on its own dma to complete before
+ * modifying cliprects ???
+ */
+
+static void
+intel_dump_batchbuffer(GLuint offset, GLuint * ptr, GLuint count)
+{
+ int i;
+ fprintf(stderr, "\n\n\nSTART BATCH (%d dwords):\n", count / 4);
+ for (i = 0; i < count / 4; i += 4)
+ fprintf(stderr, "0x%x:\t0x%08x 0x%08x 0x%08x 0x%08x\n",
+ offset + i * 4, ptr[i], ptr[i + 1], ptr[i + 2], ptr[i + 3]);
+ fprintf(stderr, "END BATCH\n\n\n");
+}
+
+void
+intel_batchbuffer_reset(struct intel_batchbuffer *batch)
+{
+
+ int i;
+
+ /*
+ * Get a new, free batchbuffer.
+ */
+
+ batch->size = batch->intel->intelScreen->maxBatchSize;
+ driBOData(batch->buffer, batch->size, NULL, 0);
+
+ driBOResetList(&batch->list);
+
+ /*
+ * Unreference buffers previously on the relocation list.
+ */
+
+ for (i = 0; i < batch->nr_relocs; i++) {
+ struct buffer_reloc *r = &batch->reloc[i];
+ driBOUnReference(r->buf);
+ }
+
+ batch->list_count = 0;
+ batch->nr_relocs = 0;
+ batch->flags = 0;
+
+ /*
+ * We don't refcount the batchbuffer itself since we can't destroy it
+ * while it's on the list.
+ */
+
+
+ driBOAddListItem(&batch->list, batch->buffer,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE);
+
+
+ batch->map = driBOMap(batch->buffer, DRM_BO_FLAG_WRITE, 0);
+ batch->ptr = batch->map;
+}
+
+/*======================================================================
+ * Public functions
+ */
+struct intel_batchbuffer *
+intel_batchbuffer_alloc(struct intel_context *intel)
+{
+ struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
+
+ batch->intel = intel;
+
+ driGenBuffers(intel->intelScreen->batchPool, "batchbuffer", 1,
+ &batch->buffer, 4096,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE, 0);
+ batch->last_fence = NULL;
+ driBOCreateList(20, &batch->list);
+ intel_batchbuffer_reset(batch);
+ return batch;
+}
+
+void
+intel_batchbuffer_free(struct intel_batchbuffer *batch)
+{
+ if (batch->last_fence) {
+ driFenceFinish(batch->last_fence,
+ DRM_FENCE_TYPE_EXE | DRM_I915_FENCE_TYPE_RW, GL_FALSE);
+ driFenceUnReference(batch->last_fence);
+ batch->last_fence = NULL;
+ }
+ if (batch->map) {
+ driBOUnmap(batch->buffer);
+ batch->map = NULL;
+ }
+ driBOUnReference(batch->buffer);
+ batch->buffer = NULL;
+ free(batch);
+}
+
+/* TODO: Push this whole function into bufmgr.
+ */
+static void
+do_flush_locked(struct intel_batchbuffer *batch,
+ GLuint used,
+ GLboolean ignore_cliprects, GLboolean allow_unlock)
+{
+ GLuint *ptr;
+ GLuint i;
+ struct intel_context *intel = batch->intel;
+ unsigned fenceFlags;
+ struct _DriFenceObject *fo;
+
+ driBOValidateList(batch->intel->driFd, &batch->list);
+
+ /* Apply the relocations. This nasty map indicates to me that the
+ * whole task should be done internally by the memory manager, and
+ * that dma buffers probably need to be pinned within agp space.
+ */
+ ptr = (GLuint *) driBOMap(batch->buffer, DRM_BO_FLAG_WRITE,
+ DRM_BO_HINT_ALLOW_UNFENCED_MAP);
+
+
+ for (i = 0; i < batch->nr_relocs; i++) {
+ struct buffer_reloc *r = &batch->reloc[i];
+
+ ptr[r->offset / 4] = driBOOffset(r->buf) + r->delta;
+ }
+
+ if (INTEL_DEBUG & DEBUG_BATCH)
+ intel_dump_batchbuffer(0, ptr, used);
+
+ driBOUnmap(batch->buffer);
+ batch->map = NULL;
+
+ /* Throw away non-effective packets. Won't work once we have
+ * hardware contexts which would preserve statechanges beyond a
+ * single buffer.
+ */
+
+ if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
+ intel_batch_ioctl(batch->intel,
+ driBOOffset(batch->buffer),
+ used, ignore_cliprects, allow_unlock);
+ }
+
+
+ /*
+ * Kernel fencing. The flags tells the kernel that we've
+ * programmed an MI_FLUSH.
+ */
+
+ fenceFlags = DRM_I915_FENCE_FLAG_FLUSHED;
+ fo = driFenceBuffers(batch->intel->driFd,
+ "Batch fence", fenceFlags);
+
+ /*
+ * User space fencing.
+ */
+
+ driBOFence(batch->buffer, fo);
+
+ if (driFenceType(fo) == DRM_FENCE_TYPE_EXE) {
+
+ /*
+ * Oops. We only validated a batch buffer. This means we
+ * didn't do any proper rendering. Discard this fence object.
+ */
+
+ driFenceUnReference(fo);
+ } else {
+ driFenceUnReference(batch->last_fence);
+ batch->last_fence = fo;
+ for (i = 0; i < batch->nr_relocs; i++) {
+ struct buffer_reloc *r = &batch->reloc[i];
+ driBOFence(r->buf, fo);
+ }
+ }
+
+ if (intel->numClipRects == 0 && !ignore_cliprects) {
+ if (allow_unlock) {
+ UNLOCK_HARDWARE(intel);
+ sched_yield();
+ LOCK_HARDWARE(intel);
+ }
+ intel->vtbl.lost_hardware(intel);
+ }
+}
+
+
+struct _DriFenceObject *
+intel_batchbuffer_flush(struct intel_batchbuffer *batch)
+{
+ struct intel_context *intel = batch->intel;
+ GLuint used = batch->ptr - batch->map;
+
+ if (used == 0)
+ return batch->last_fence;
+
+ /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
+ * performance drain that we would like to avoid.
+ */
+ if (used & 4) {
+ ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
+ ((int *) batch->ptr)[1] = 0;
+ ((int *) batch->ptr)[2] = MI_BATCH_BUFFER_END;
+ used += 12;
+ }
+ else {
+ ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
+ ((int *) batch->ptr)[1] = MI_BATCH_BUFFER_END;
+ used += 8;
+ }
+
+ driBOUnmap(batch->buffer);
+ batch->ptr = NULL;
+ batch->map = NULL;
+
+ /* TODO: Just pass the relocation list and dma buffer up to the
+ * kernel.
+ */
+ if (!intel->locked) {
+ assert(!(batch->flags & INTEL_BATCH_NO_CLIPRECTS));
+
+ LOCK_HARDWARE(intel);
+ do_flush_locked(batch, used, GL_FALSE, GL_TRUE);
+ UNLOCK_HARDWARE(intel);
+ }
+ else {
+ GLboolean ignore_cliprects = !(batch->flags & INTEL_BATCH_CLIPRECTS);
+ do_flush_locked(batch, used, ignore_cliprects, GL_FALSE);
+ }
+
+ /* Reset the buffer:
+ */
+ intel_batchbuffer_reset(batch);
+ return batch->last_fence;
+}
+
+void
+intel_batchbuffer_finish(struct intel_batchbuffer *batch)
+{
+ struct _DriFenceObject *fence = intel_batchbuffer_flush(batch);
+ driFenceReference(fence);
+ driFenceFinish(fence, 3, GL_FALSE);
+ driFenceUnReference(fence);
+}
+
+
+/* This is the only way buffers get added to the validate list.
+ */
+GLboolean
+intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
+ struct _DriBufferObject *buffer,
+ GLuint flags, GLuint mask, GLuint delta)
+{
+ assert(batch->nr_relocs <= MAX_RELOCS);
+
+ driBOAddListItem(&batch->list, buffer, flags, mask);
+
+ {
+ struct buffer_reloc *r = &batch->reloc[batch->nr_relocs++];
+ driBOReference(buffer);
+ r->buf = buffer;
+ r->offset = batch->ptr - batch->map;
+ r->delta = delta;
+ }
+
+ batch->ptr += 4;
+ return GL_TRUE;
+}
+
+
+
+void
+intel_batchbuffer_data(struct intel_batchbuffer *batch,
+ const void *data, GLuint bytes, GLuint flags)
+{
+ assert((bytes & 3) == 0);
+ intel_batchbuffer_require_space(batch, bytes, flags);
+ __memcpy(batch->ptr, data, bytes);
+ batch->ptr += bytes;
+}
--- /dev/null
+#ifndef INTEL_BATCHBUFFER_H
+#define INTEL_BATCHBUFFER_H
+
+#include "mtypes.h"
+#include "dri_bufmgr.h"
+
+struct intel_context;
+
+#define BATCH_SZ 16384
+#define BATCH_RESERVED 16
+
+#define MAX_RELOCS 100
+
+#define INTEL_BATCH_NO_CLIPRECTS 0x1
+#define INTEL_BATCH_CLIPRECTS 0x2
+
+struct buffer_reloc
+{
+ struct _DriBufferObject *buf;
+ GLuint offset;
+ GLuint delta; /* not needed? */
+};
+
+struct intel_batchbuffer
+{
+ struct bufmgr *bm;
+ struct intel_context *intel;
+
+ struct _DriBufferObject *buffer;
+ struct _DriFenceObject *last_fence;
+ GLuint flags;
+
+ drmBOList list;
+ GLuint list_count;
+ GLubyte *map;
+ GLubyte *ptr;
+
+ struct buffer_reloc reloc[MAX_RELOCS];
+ GLuint nr_relocs;
+ GLuint size;
+};
+
+struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
+ *intel);
+
+void intel_batchbuffer_free(struct intel_batchbuffer *batch);
+
+
+void intel_batchbuffer_finish(struct intel_batchbuffer *batch);
+
+struct _DriFenceObject *intel_batchbuffer_flush(struct intel_batchbuffer
+ *batch);
+
+void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
+
+
+/* Unlike bmBufferData, this currently requires the buffer be mapped.
+ * Consider it a convenience function wrapping multple
+ * intel_buffer_dword() calls.
+ */
+void intel_batchbuffer_data(struct intel_batchbuffer *batch,
+ const void *data, GLuint bytes, GLuint flags);
+
+void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
+ GLuint bytes);
+
+GLboolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
+ struct _DriBufferObject *buffer,
+ GLuint flags,
+ GLuint mask, GLuint offset);
+
+/* Inline functions - might actually be better off with these
+ * non-inlined. Certainly better off switching all command packets to
+ * be passed as structs rather than dwords, but that's a little bit of
+ * work...
+ */
+static INLINE GLuint
+intel_batchbuffer_space(struct intel_batchbuffer *batch)
+{
+ return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
+}
+
+
+static INLINE void
+intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
+{
+ assert(batch->map);
+ assert(intel_batchbuffer_space(batch) >= 4);
+ *(GLuint *) (batch->ptr) = dword;
+ batch->ptr += 4;
+}
+
+static INLINE void
+intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
+ GLuint sz, GLuint flags)
+{
+ assert(sz < batch->size - 8);
+ if (intel_batchbuffer_space(batch) < sz ||
+ (batch->flags != 0 && flags != 0 && batch->flags != flags))
+ intel_batchbuffer_flush(batch);
+
+ batch->flags |= flags;
+}
+
+/* Here are the crusty old macros, to be removed:
+ */
+#define BATCH_LOCALS
+
+#define BEGIN_BATCH(n, flags) do { \
+ assert(!intel->prim.flush); \
+ intel_batchbuffer_require_space(intel->batch, (n)*4, flags); \
+} while (0)
+
+#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
+
+#define OUT_RELOC(buf,flags,mask,delta) do { \
+ assert((delta) >= 0); \
+ intel_batchbuffer_emit_reloc(intel->batch, buf, flags, mask, delta); \
+} while (0)
+
+#define ADVANCE_BATCH() do { } while(0)
+
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <xf86drm.h>
+#include <stdlib.h>
+#include <errno.h>
+#include "imports.h"
+#include "glthread.h"
+#include "dri_bufpool.h"
+#include "dri_bufmgr.h"
+#include "intel_screen.h"
+
+typedef struct
+{
+ drmMMListHead head;
+ struct _BPool *parent;
+ struct _DriFenceObject *fence;
+ unsigned long start;
+ int unfenced;
+ int mapped;
+} BBuf;
+
+typedef struct _BPool
+{
+ _glthread_Mutex mutex;
+ unsigned long bufSize;
+ unsigned poolSize;
+ unsigned numFree;
+ unsigned numTot;
+ unsigned numDelayed;
+ unsigned checkDelayed;
+ drmMMListHead free;
+ drmMMListHead delayed;
+ drmMMListHead head;
+ drmBO kernelBO;
+ void *virtual;
+ BBuf *bufs;
+} BPool;
+
+
+static BPool *
+createBPool(int fd, unsigned long bufSize, unsigned numBufs, unsigned flags,
+ unsigned checkDelayed)
+{
+ BPool *p = (BPool *) malloc(sizeof(*p));
+ BBuf *buf;
+ int i;
+
+ if (!p)
+ return NULL;
+
+ p->bufs = (BBuf *) malloc(numBufs * sizeof(*p->bufs));
+ if (!p->bufs) {
+ free(p);
+ return NULL;
+ }
+
+ DRMINITLISTHEAD(&p->free);
+ DRMINITLISTHEAD(&p->head);
+ DRMINITLISTHEAD(&p->delayed);
+
+ p->numTot = numBufs;
+ p->numFree = numBufs;
+ p->bufSize = bufSize;
+ p->numDelayed = 0;
+ p->checkDelayed = checkDelayed;
+
+ _glthread_INIT_MUTEX(p->mutex);
+
+ if (drmBOCreate(fd, 0, numBufs * bufSize, 0, NULL, drm_bo_type_dc,
+ flags, 0, &p->kernelBO)) {
+ free(p->bufs);
+ free(p);
+ return NULL;
+ }
+ if (drmBOMap(fd, &p->kernelBO, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0,
+ &p->virtual)) {
+ drmBODestroy(fd, &p->kernelBO);
+ free(p->bufs);
+ free(p);
+ return NULL;
+ }
+
+ /*
+ * We unmap the buffer so that we can validate it later. Note that this is
+ * just a synchronizing operation. The buffer will have a virtual mapping
+ * until it is destroyed.
+ */
+
+ drmBOUnmap(fd, &p->kernelBO);
+
+ buf = p->bufs;
+ for (i = 0; i < numBufs; ++i) {
+ buf->parent = p;
+ buf->fence = NULL;
+ buf->start = i * bufSize;
+ buf->mapped = 0;
+ buf->unfenced = 0;
+ DRMLISTADDTAIL(&buf->head, &p->free);
+ buf++;
+ }
+
+ return p;
+}
+
+
+static void
+pool_checkFree(BPool * p, int wait)
+{
+ drmMMListHead *list, *prev;
+ BBuf *buf;
+ int signaled = 0;
+ int i;
+
+ list = p->delayed.next;
+
+ if (p->numDelayed > 3) {
+ for (i = 0; i < p->numDelayed; i += 3) {
+ list = list->next;
+ }
+ }
+
+ prev = list->prev;
+ for (; list != &p->delayed; list = prev, prev = list->prev) {
+
+ buf = DRMLISTENTRY(BBuf, list, head);
+
+ if (!signaled) {
+ if (wait) {
+ driFenceFinish(buf->fence, DRM_FENCE_TYPE_EXE, 1);
+ signaled = 1;
+ }
+ else {
+ signaled = driFenceSignaled(buf->fence, DRM_FENCE_TYPE_EXE);
+ }
+ }
+
+ if (!signaled)
+ break;
+
+ driFenceUnReference(buf->fence);
+ buf->fence = NULL;
+ DRMLISTDEL(list);
+ p->numDelayed--;
+ DRMLISTADD(list, &p->free);
+ p->numFree++;
+ }
+}
+
+static void *
+pool_create(struct _DriBufferPool *pool,
+ unsigned long size, unsigned flags, unsigned hint,
+ unsigned alignment)
+{
+ BPool *p = (BPool *) pool->data;
+
+ drmMMListHead *item;
+
+ if (alignment && (alignment != 4096))
+ return NULL;
+
+ _glthread_LOCK_MUTEX(p->mutex);
+
+ if (p->numFree == 0)
+ pool_checkFree(p, GL_TRUE);
+
+ if (p->numFree == 0) {
+ fprintf(stderr, "Out of fixed size buffer objects\n");
+ BM_CKFATAL(-ENOMEM);
+ }
+
+ item = p->free.next;
+
+ if (item == &p->free) {
+ fprintf(stderr, "Fixed size buffer pool corruption\n");
+ }
+
+ DRMLISTDEL(item);
+ --p->numFree;
+
+ _glthread_UNLOCK_MUTEX(p->mutex);
+ return (void *) DRMLISTENTRY(BBuf, item, head);
+}
+
+
+static int
+pool_destroy(struct _DriBufferPool *pool, void *private)
+{
+ BBuf *buf = (BBuf *) private;
+ BPool *p = buf->parent;
+
+ _glthread_LOCK_MUTEX(p->mutex);
+
+ if (buf->fence) {
+ DRMLISTADDTAIL(&buf->head, &p->delayed);
+ p->numDelayed++;
+ }
+ else {
+ buf->unfenced = 0;
+ DRMLISTADD(&buf->head, &p->free);
+ p->numFree++;
+ }
+
+ if ((p->numDelayed % p->checkDelayed) == 0)
+ pool_checkFree(p, 0);
+
+ _glthread_UNLOCK_MUTEX(p->mutex);
+ return 0;
+}
+
+
+static int
+pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
+ int hint, void **virtual)
+{
+
+ BBuf *buf = (BBuf *) private;
+ BPool *p = buf->parent;
+
+ _glthread_LOCK_MUTEX(p->mutex);
+
+ /*
+ * Currently Mesa doesn't have any condition variables to resolve this
+ * cleanly in a multithreading environment.
+ * We bail out instead.
+ */
+
+ if (buf->mapped) {
+ fprintf(stderr, "Trying to map already mapped buffer object\n");
+ BM_CKFATAL(-EINVAL);
+ }
+
+#if 0
+ if (buf->unfenced && !(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
+ fprintf(stderr, "Trying to map an unfenced buffer object 0x%08x"
+ " 0x%08x %d\n", hint, flags, buf->start);
+ BM_CKFATAL(-EINVAL);
+ }
+
+#endif
+
+ if (buf->fence) {
+ _glthread_UNLOCK_MUTEX(p->mutex);
+ return -EBUSY;
+ }
+
+ buf->mapped = GL_TRUE;
+ *virtual = (unsigned char *) p->virtual + buf->start;
+ _glthread_UNLOCK_MUTEX(p->mutex);
+ return 0;
+}
+
+static int
+pool_waitIdle(struct _DriBufferPool *pool, void *private, int lazy)
+{
+ BBuf *buf = (BBuf *) private;
+ driFenceFinish(buf->fence, 0, lazy);
+ return 0;
+}
+
+static int
+pool_unmap(struct _DriBufferPool *pool, void *private)
+{
+ BBuf *buf = (BBuf *) private;
+
+ buf->mapped = 0;
+ return 0;
+}
+
+static unsigned long
+pool_offset(struct _DriBufferPool *pool, void *private)
+{
+ BBuf *buf = (BBuf *) private;
+ BPool *p = buf->parent;
+
+ return p->kernelBO.offset + buf->start;
+}
+
+static unsigned
+pool_flags(struct _DriBufferPool *pool, void *private)
+{
+ BPool *p = (BPool *) pool->data;
+
+ return p->kernelBO.flags;
+}
+
+static unsigned long
+pool_size(struct _DriBufferPool *pool, void *private)
+{
+ BPool *p = (BPool *) pool->data;
+
+ return p->bufSize;
+}
+
+
+static int
+pool_fence(struct _DriBufferPool *pool, void *private,
+ struct _DriFenceObject *fence)
+{
+ BBuf *buf = (BBuf *) private;
+ BPool *p = buf->parent;
+
+ _glthread_LOCK_MUTEX(p->mutex);
+ if (buf->fence) {
+ driFenceUnReference(buf->fence);
+ }
+ buf->fence = fence;
+ buf->unfenced = 0;
+ driFenceReference(buf->fence);
+ _glthread_UNLOCK_MUTEX(p->mutex);
+
+ return 0;
+}
+
+static drmBO *
+pool_kernel(struct _DriBufferPool *pool, void *private)
+{
+ BBuf *buf = (BBuf *) private;
+ BPool *p = buf->parent;
+
+ return &p->kernelBO;
+}
+
+static int
+pool_validate(struct _DriBufferPool *pool, void *private)
+{
+ BBuf *buf = (BBuf *) private;
+ BPool *p = buf->parent;
+ _glthread_LOCK_MUTEX(p->mutex);
+ buf->unfenced = GL_TRUE;
+ _glthread_UNLOCK_MUTEX(p->mutex);
+ return 0;
+}
+
+static void
+pool_takedown(struct _DriBufferPool *pool)
+{
+ BPool *p = (BPool *) pool->data;
+
+ /*
+ * Wait on outstanding fences.
+ */
+
+ _glthread_LOCK_MUTEX(p->mutex);
+ while ((p->numFree < p->numTot) && p->numDelayed) {
+ _glthread_UNLOCK_MUTEX(p->mutex);
+ sched_yield();
+ pool_checkFree(p, GL_TRUE);
+ _glthread_LOCK_MUTEX(p->mutex);
+ }
+
+ drmBODestroy(pool->fd, &p->kernelBO);
+ free(p->bufs);
+ _glthread_UNLOCK_MUTEX(p->mutex);
+ free(p);
+ free(pool);
+}
+
+
+struct _DriBufferPool *
+driBatchPoolInit(int fd, unsigned flags,
+ unsigned long bufSize,
+ unsigned numBufs, unsigned checkDelayed)
+{
+ struct _DriBufferPool *pool;
+
+ pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
+ if (!pool)
+ return NULL;
+
+ pool->data = createBPool(fd, bufSize, numBufs, flags, checkDelayed);
+ if (!pool->data)
+ return NULL;
+
+ pool->fd = fd;
+ pool->map = &pool_map;
+ pool->unmap = &pool_unmap;
+ pool->destroy = &pool_destroy;
+ pool->offset = &pool_offset;
+ pool->flags = &pool_flags;
+ pool->size = &pool_size;
+ pool->create = &pool_create;
+ pool->fence = &pool_fence;
+ pool->kernel = &pool_kernel;
+ pool->validate = &pool_validate;
+ pool->waitIdle = &pool_waitIdle;
+ pool->setstatic = NULL;
+ pool->takeDown = &pool_takedown;
+ return pool;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include <stdio.h>
+#include <errno.h>
+
+#include "mtypes.h"
+#include "context.h"
+#include "enums.h"
+
+#include "intel_batchbuffer.h"
+#include "intel_blit.h"
+#include "intel_buffers.h"
+#include "intel_context.h"
+#include "intel_fbo.h"
+#include "intel_reg.h"
+#include "intel_regions.h"
+#include "vblank.h"
+
+#define FILE_DEBUG_FLAG DEBUG_BLIT
+
+/**
+ * Copy the back color buffer to the front color buffer.
+ * Used for SwapBuffers().
+ */
+void
+intelCopyBuffer(const __DRIdrawablePrivate * dPriv,
+ const drm_clip_rect_t * rect)
+{
+
+ struct intel_context *intel;
+ const intelScreenPrivate *intelScreen;
+ GLboolean missed_target;
+ int64_t ust;
+
+ DBG("%s\n", __FUNCTION__);
+
+ assert(dPriv);
+
+ intel = intelScreenContext(dPriv->driScreenPriv->private);
+ if (!intel)
+ return;
+
+ intelScreen = intel->intelScreen;
+
+ if (!rect && !intel->swap_scheduled && intelScreen->drmMinor >= 6 &&
+ !(intel->vblank_flags & VBLANK_FLAG_NO_IRQ) &&
+ intelScreen->current_rotation == 0) {
+ unsigned int interval = driGetVBlankInterval(dPriv, intel->vblank_flags);
+ unsigned int target;
+ drm_i915_vblank_swap_t swap;
+
+ swap.drawable = dPriv->hHWDrawable;
+ swap.seqtype = DRM_VBLANK_ABSOLUTE;
+ target = swap.sequence = intel->vbl_seq + interval;
+
+ if (intel->vblank_flags & VBLANK_FLAG_SYNC) {
+ swap.seqtype |= DRM_VBLANK_NEXTONMISS;
+ } else if (interval == 0) {
+ goto noschedule;
+ }
+
+ if ( intel->vblank_flags & VBLANK_FLAG_SECONDARY ) {
+ swap.seqtype |= DRM_VBLANK_SECONDARY;
+ }
+
+ intel_batchbuffer_flush(intel->batch);
+
+ if (!drmCommandWriteRead(intel->driFd, DRM_I915_VBLANK_SWAP, &swap,
+ sizeof(swap))) {
+ intel->swap_scheduled = 1;
+ intel->vbl_seq = swap.sequence;
+ swap.sequence -= target;
+ missed_target = swap.sequence > 0 && swap.sequence <= (1 << 23);
+ }
+ } else {
+ intel->swap_scheduled = 0;
+ }
+noschedule:
+
+ if (intel->last_swap_fence) {
+ driFenceFinish(intel->last_swap_fence, DRM_FENCE_TYPE_EXE, GL_TRUE);
+ driFenceUnReference(intel->last_swap_fence);
+ intel->last_swap_fence = NULL;
+ }
+ intel->last_swap_fence = intel->first_swap_fence;
+ intel->first_swap_fence = NULL;
+
+ if (!intel->swap_scheduled) {
+ if (!rect) {
+ driWaitForVBlank(dPriv, &intel->vbl_seq, intel->vblank_flags,
+ &missed_target);
+ }
+
+
+ /* The LOCK_HARDWARE is required for the cliprects. Buffer offsets
+ * should work regardless.
+ */
+ LOCK_HARDWARE(intel);
+
+ if (intel->driDrawable && intel->driDrawable->numClipRects) {
+ const intelScreenPrivate *intelScreen = intel->intelScreen;
+ struct gl_framebuffer *fb
+ = (struct gl_framebuffer *) dPriv->driverPrivate;
+ const struct intel_region *frontRegion
+ = intel_get_rb_region(fb, BUFFER_FRONT_LEFT);
+ const struct intel_region *backRegion
+ = intel_get_rb_region(fb, BUFFER_BACK_LEFT);
+ const int nbox = dPriv->numClipRects;
+ const drm_clip_rect_t *pbox = dPriv->pClipRects;
+ const int pitch = frontRegion->pitch;
+ const int cpp = frontRegion->cpp;
+ int BR13, CMD;
+ int i;
+
+ ASSERT(fb);
+ ASSERT(fb->Name == 0); /* Not a user-created FBO */
+ ASSERT(frontRegion);
+ ASSERT(backRegion);
+ ASSERT(frontRegion->pitch == backRegion->pitch);
+ ASSERT(frontRegion->cpp == backRegion->cpp);
+
+ if (cpp == 2) {
+ BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
+ CMD = XY_SRC_COPY_BLT_CMD;
+ }
+ else {
+ BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
+ CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ }
+
+ for (i = 0; i < nbox; i++, pbox++) {
+ drm_clip_rect_t box;
+
+ if (pbox->x1 > pbox->x2 ||
+ pbox->y1 > pbox->y2 ||
+ pbox->x2 > intelScreen->width || pbox->y2 > intelScreen->height)
+ continue;
+
+ box = *pbox;
+
+ if (rect) {
+ if (rect->x1 > box.x1)
+ box.x1 = rect->x1;
+ if (rect->y1 > box.y1)
+ box.y1 = rect->y1;
+ if (rect->x2 < box.x2)
+ box.x2 = rect->x2;
+ if (rect->y2 < box.y2)
+ box.y2 = rect->y2;
+
+ if (box.x1 > box.x2 || box.y1 > box.y2)
+ continue;
+ }
+
+ BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((pbox->y1 << 16) | pbox->x1);
+ OUT_BATCH((pbox->y2 << 16) | pbox->x2);
+
+ if (intel->sarea->pf_current_page == 0)
+ OUT_RELOC(frontRegion->buffer,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, 0);
+ else
+ OUT_RELOC(backRegion->buffer,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, 0);
+ OUT_BATCH((pbox->y1 << 16) | pbox->x1);
+ OUT_BATCH(BR13 & 0xffff);
+
+ if (intel->sarea->pf_current_page == 0)
+ OUT_RELOC(backRegion->buffer,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_READ, 0);
+ else
+ OUT_RELOC(frontRegion->buffer,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_READ, 0);
+
+ ADVANCE_BATCH();
+ }
+
+ if (intel->first_swap_fence)
+ driFenceUnReference(intel->first_swap_fence);
+ intel->first_swap_fence = intel_batchbuffer_flush(intel->batch);
+ driFenceReference(intel->first_swap_fence);
+ }
+
+ UNLOCK_HARDWARE(intel);
+ }
+
+ if (!rect) {
+ intel->swap_count++;
+ (*dri_interface->getUST) (&ust);
+ if (missed_target) {
+ intel->swap_missed_count++;
+ intel->swap_missed_ust = ust - intel->swap_ust;
+ }
+
+ intel->swap_ust = ust;
+ }
+}
+
+
+
+
+void
+intelEmitFillBlit(struct intel_context *intel,
+ GLuint cpp,
+ GLshort dst_pitch,
+ struct _DriBufferObject *dst_buffer,
+ GLuint dst_offset,
+ GLshort x, GLshort y, GLshort w, GLshort h, GLuint color)
+{
+ GLuint BR13, CMD;
+ BATCH_LOCALS;
+
+ dst_pitch *= cpp;
+
+ switch (cpp) {
+ case 1:
+ case 2:
+ case 3:
+ BR13 = dst_pitch | (0xF0 << 16) | (1 << 24);
+ CMD = XY_COLOR_BLT_CMD;
+ break;
+ case 4:
+ BR13 = dst_pitch | (0xF0 << 16) | (1 << 24) | (1 << 25);
+ CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
+ XY_COLOR_BLT_WRITE_RGB);
+ break;
+ default:
+ return;
+ }
+
+ DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
+ __FUNCTION__, dst_buffer, dst_pitch, dst_offset, x, y, w, h);
+
+
+ BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((y << 16) | x);
+ OUT_BATCH(((y + h) << 16) | (x + w));
+ OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, dst_offset);
+ OUT_BATCH(color);
+ ADVANCE_BATCH();
+}
+
+
+/* Copy BitBlt
+ */
+void
+intelEmitCopyBlit(struct intel_context *intel,
+ GLuint cpp,
+ GLshort src_pitch,
+ struct _DriBufferObject *src_buffer,
+ GLuint src_offset,
+ GLshort dst_pitch,
+ struct _DriBufferObject *dst_buffer,
+ GLuint dst_offset,
+ GLshort src_x, GLshort src_y,
+ GLshort dst_x, GLshort dst_y, GLshort w, GLshort h)
+{
+ GLuint CMD, BR13;
+ int dst_y2 = dst_y + h;
+ int dst_x2 = dst_x + w;
+ BATCH_LOCALS;
+
+
+ DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
+ __FUNCTION__,
+ src_buffer, src_pitch, src_offset, src_x, src_y,
+ dst_buffer, dst_pitch, dst_offset, dst_x, dst_y, w, h);
+
+ src_pitch *= cpp;
+ dst_pitch *= cpp;
+
+ switch (cpp) {
+ case 1:
+ case 2:
+ case 3:
+ BR13 = (((GLint) dst_pitch) & 0xffff) | (0xCC << 16) | (1 << 24);
+ CMD = XY_SRC_COPY_BLT_CMD;
+ break;
+ case 4:
+ BR13 =
+ (((GLint) dst_pitch) & 0xffff) | (0xCC << 16) | (1 << 24) | (1 <<
+ 25);
+ CMD =
+ (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
+ XY_SRC_COPY_BLT_WRITE_RGB);
+ break;
+ default:
+ return;
+ }
+
+ if (dst_y2 < dst_y || dst_x2 < dst_x) {
+ return;
+ }
+
+ /* Initial y values don't seem to work with negative pitches. If
+ * we adjust the offsets manually (below), it seems to work fine.
+ *
+ * On the other hand, if we always adjust, the hardware doesn't
+ * know which blit directions to use, so overlapping copypixels get
+ * the wrong result.
+ */
+ if (dst_pitch > 0 && src_pitch > 0) {
+ BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((dst_y << 16) | dst_x);
+ OUT_BATCH((dst_y2 << 16) | dst_x2);
+ OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, dst_offset);
+ OUT_BATCH((src_y << 16) | src_x);
+ OUT_BATCH(((GLint) src_pitch & 0xffff));
+ OUT_RELOC(src_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_READ, src_offset);
+ ADVANCE_BATCH();
+ }
+ else {
+ BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((0 << 16) | dst_x);
+ OUT_BATCH((h << 16) | dst_x2);
+ OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE,
+ dst_offset + dst_y * dst_pitch);
+ OUT_BATCH((0 << 16) | src_x);
+ OUT_BATCH(((GLint) src_pitch & 0xffff));
+ OUT_RELOC(src_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_READ,
+ src_offset + src_y * src_pitch);
+ ADVANCE_BATCH();
+ }
+}
+
+
+/**
+ * Use blitting to clear the renderbuffers named by 'flags'.
+ * Note: we can't use the ctx->DrawBuffer->_ColorDrawBufferMask field
+ * since that might include software renderbuffers or renderbuffers
+ * which we're clearing with triangles.
+ * \param mask bitmask of BUFFER_BIT_* values indicating buffers to clear
+ */
+void
+intelClearWithBlit(GLcontext * ctx, GLbitfield mask)
+{
+ struct intel_context *intel = intel_context(ctx);
+ GLuint clear_depth;
+ GLbitfield skipBuffers = 0;
+ BATCH_LOCALS;
+
+ DBG("%s %x\n", __FUNCTION__, mask);
+
+ /*
+ * Compute values for clearing the buffers.
+ */
+ clear_depth = 0;
+ if (mask & BUFFER_BIT_DEPTH) {
+ clear_depth = (GLuint) (ctx->DrawBuffer->_DepthMax * ctx->Depth.Clear);
+ }
+ if (mask & BUFFER_BIT_STENCIL) {
+ clear_depth |= (ctx->Stencil.Clear & 0xff) << 24;
+ }
+
+ /* If clearing both depth and stencil, skip BUFFER_BIT_STENCIL in
+ * the loop below.
+ */
+ if ((mask & BUFFER_BIT_DEPTH) && (mask & BUFFER_BIT_STENCIL)) {
+ skipBuffers = BUFFER_BIT_STENCIL;
+ }
+
+ /* XXX Move this flush/lock into the following conditional? */
+ intelFlush(&intel->ctx);
+ LOCK_HARDWARE(intel);
+
+ if (intel->numClipRects) {
+ GLint cx, cy, cw, ch;
+ drm_clip_rect_t clear;
+ int i;
+
+ /* Get clear bounds after locking */
+ cx = ctx->DrawBuffer->_Xmin;
+ cy = ctx->DrawBuffer->_Ymin;
+ cw = ctx->DrawBuffer->_Xmax - ctx->DrawBuffer->_Xmin;
+ ch = ctx->DrawBuffer->_Ymax - ctx->DrawBuffer->_Ymin;
+
+ if (intel->ctx.DrawBuffer->Name == 0) {
+ /* clearing a window */
+
+ /* flip top to bottom */
+ clear.x1 = cx + intel->drawX;
+ clear.y1 = intel->driDrawable->y + intel->driDrawable->h - cy - ch;
+ clear.x2 = clear.x1 + cw;
+ clear.y2 = clear.y1 + ch;
+
+ /* adjust for page flipping */
+ if (intel->sarea->pf_current_page == 1) {
+ const GLuint tmp = mask;
+ mask &= ~(BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_BACK_LEFT);
+ if (tmp & BUFFER_BIT_FRONT_LEFT)
+ mask |= BUFFER_BIT_BACK_LEFT;
+ if (tmp & BUFFER_BIT_BACK_LEFT)
+ mask |= BUFFER_BIT_FRONT_LEFT;
+ }
+ }
+ else {
+ /* clearing FBO */
+ assert(intel->numClipRects == 1);
+ assert(intel->pClipRects == &intel->fboRect);
+ clear.x1 = cx;
+ clear.y1 = cy;
+ clear.x2 = clear.x1 + cw;
+ clear.y2 = clear.y1 + ch;
+ /* no change to mask */
+ }
+
+ for (i = 0; i < intel->numClipRects; i++) {
+ const drm_clip_rect_t *box = &intel->pClipRects[i];
+ drm_clip_rect_t b;
+ GLuint buf;
+ GLuint clearMask = mask; /* use copy, since we modify it below */
+ GLboolean all = (cw == ctx->DrawBuffer->Width &&
+ ch == ctx->DrawBuffer->Height);
+
+ if (!all) {
+ intel_intersect_cliprects(&b, &clear, box);
+ }
+ else {
+ b = *box;
+ }
+
+ if (0)
+ _mesa_printf("clear %d,%d..%d,%d, mask %x\n",
+ b.x1, b.y1, b.x2, b.y2, mask);
+
+ /* Loop over all renderbuffers */
+ for (buf = 0; buf < BUFFER_COUNT && clearMask; buf++) {
+ const GLbitfield bufBit = 1 << buf;
+ if ((clearMask & bufBit) && !(bufBit & skipBuffers)) {
+ /* OK, clear this renderbuffer */
+ const struct intel_renderbuffer *irb
+ = intel_renderbuffer(ctx->DrawBuffer->
+ Attachment[buf].Renderbuffer);
+ struct _DriBufferObject *write_buffer =
+ intel_region_buffer(intel->intelScreen, irb->region,
+ all ? INTEL_WRITE_FULL :
+ INTEL_WRITE_PART);
+
+ GLuint clearVal;
+ GLint pitch, cpp;
+ GLuint BR13, CMD;
+
+ ASSERT(irb);
+ ASSERT(irb->region);
+
+ pitch = irb->region->pitch;
+ cpp = irb->region->cpp;
+
+ DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
+ __FUNCTION__,
+ irb->region->buffer, (pitch * cpp),
+ irb->region->draw_offset,
+ b.x1, b.y1, b.x2 - b.x1, b.y2 - b.y1);
+
+
+ /* Setup the blit command */
+ if (cpp == 4) {
+ BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25);
+ if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL) {
+ CMD = XY_COLOR_BLT_CMD;
+ if (clearMask & BUFFER_BIT_DEPTH)
+ CMD |= XY_COLOR_BLT_WRITE_RGB;
+ if (clearMask & BUFFER_BIT_STENCIL)
+ CMD |= XY_COLOR_BLT_WRITE_ALPHA;
+ }
+ else {
+ /* clearing RGBA */
+ CMD = (XY_COLOR_BLT_CMD |
+ XY_COLOR_BLT_WRITE_ALPHA |
+ XY_COLOR_BLT_WRITE_RGB);
+ }
+ }
+ else {
+ ASSERT(cpp == 2 || cpp == 0);
+ BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
+ CMD = XY_COLOR_BLT_CMD;
+ }
+
+ if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL) {
+ clearVal = clear_depth;
+ }
+ else {
+ clearVal = (cpp == 4)
+ ? intel->ClearColor8888 : intel->ClearColor565;
+ }
+ /*
+ _mesa_debug(ctx, "hardware blit clear buf %d rb id %d\n",
+ buf, irb->Base.Name);
+ */
+ BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((b.y1 << 16) | b.x1);
+ OUT_BATCH((b.y2 << 16) | b.x2);
+ OUT_RELOC(write_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE,
+ irb->region->draw_offset);
+ OUT_BATCH(clearVal);
+ ADVANCE_BATCH();
+ clearMask &= ~bufBit; /* turn off bit, for faster loop exit */
+ }
+ }
+ }
+ intel_batchbuffer_flush(intel->batch);
+ }
+
+ UNLOCK_HARDWARE(intel);
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_BLIT_H
+#define INTEL_BLIT_H
+
+#include "intel_context.h"
+#include "intel_ioctl.h"
+#include "dri_bufmgr.h"
+
+extern void intelCopyBuffer(const __DRIdrawablePrivate * dpriv,
+ const drm_clip_rect_t * rect);
+
+extern void intelClearWithBlit(GLcontext * ctx, GLbitfield mask);
+
+extern void intelEmitCopyBlit(struct intel_context *intel,
+ GLuint cpp,
+ GLshort src_pitch,
+ struct _DriBufferObject *src_buffer,
+ GLuint src_offset,
+ GLshort dst_pitch,
+ struct _DriBufferObject *dst_buffer,
+ GLuint dst_offset,
+ GLshort srcx, GLshort srcy,
+ GLshort dstx, GLshort dsty,
+ GLshort w, GLshort h);
+
+extern void intelEmitFillBlit(struct intel_context *intel,
+ GLuint cpp,
+ GLshort dst_pitch,
+ struct _DriBufferObject *dst_buffer,
+ GLuint dst_offset,
+ GLshort x, GLshort y,
+ GLshort w, GLshort h, GLuint color);
+
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "imports.h"
+#include "mtypes.h"
+#include "bufferobj.h"
+
+#include "intel_context.h"
+#include "intel_buffer_objects.h"
+#include "intel_regions.h"
+#include "dri_bufmgr.h"
+
+/**
+ * There is some duplication between mesa's bufferobjects and our
+ * bufmgr buffers. Both have an integer handle and a hashtable to
+ * lookup an opaque structure. It would be nice if the handles and
+ * internal structure where somehow shared.
+ */
+static struct gl_buffer_object *
+intel_bufferobj_alloc(GLcontext * ctx, GLuint name, GLenum target)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
+
+ _mesa_initialize_buffer_object(&obj->Base, name, target);
+
+ driGenBuffers(intel->intelScreen->regionPool,
+ "bufferobj", 1, &obj->buffer, 64, 0, 0);
+
+ return &obj->Base;
+}
+
+
+/* Break the COW tie to the region. The region gets to keep the data.
+ */
+void
+intel_bufferobj_release_region(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj)
+{
+ assert(intel_obj->region->buffer == intel_obj->buffer);
+ intel_obj->region->pbo = NULL;
+ intel_obj->region = NULL;
+ driBOUnReference(intel_obj->buffer);
+ intel_obj->buffer = NULL;
+
+ /* This leads to a large number of buffer deletion/creation events.
+ * Currently the drm doesn't like that:
+ */
+ driGenBuffers(intel->intelScreen->regionPool,
+ "buffer object", 1, &intel_obj->buffer, 64, 0, 0);
+ driBOData(intel_obj->buffer, intel_obj->Base.Size, NULL, 0);
+}
+
+/* Break the COW tie to the region. Both the pbo and the region end
+ * up with a copy of the data.
+ */
+void
+intel_bufferobj_cow(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj)
+{
+ assert(intel_obj->region);
+ intel_region_cow(intel->intelScreen, intel_obj->region);
+}
+
+
+/**
+ * Deallocate/free a vertex/pixel buffer object.
+ * Called via glDeleteBuffersARB().
+ */
+static void
+intel_bufferobj_free(GLcontext * ctx, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+
+ if (intel_obj->region) {
+ intel_bufferobj_release_region(intel, intel_obj);
+ }
+ else if (intel_obj->buffer) {
+ driDeleteBuffers(1, &intel_obj->buffer);
+ }
+
+ _mesa_free(intel_obj);
+}
+
+
+
+/**
+ * Allocate space for and store data in a buffer object. Any data that was
+ * previously stored in the buffer object is lost. If data is NULL,
+ * memory will be allocated, but no copy will occur.
+ * Called via glBufferDataARB().
+ */
+static void
+intel_bufferobj_data(GLcontext * ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ const GLvoid * data,
+ GLenum usage, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ intel_obj->Base.Size = size;
+ intel_obj->Base.Usage = usage;
+
+ if (intel_obj->region)
+ intel_bufferobj_release_region(intel, intel_obj);
+
+ driBOData(intel_obj->buffer, size, data, 0);
+}
+
+
+/**
+ * Replace data in a subrange of buffer object. If the data range
+ * specified by size + offset extends beyond the end of the buffer or
+ * if data is NULL, no copy is performed.
+ * Called via glBufferSubDataARB().
+ */
+static void
+intel_bufferobj_subdata(GLcontext * ctx,
+ GLenum target,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ const GLvoid * data, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+
+ if (intel_obj->region)
+ intel_bufferobj_cow(intel, intel_obj);
+
+ driBOSubData(intel_obj->buffer, offset, size, data);
+}
+
+
+/**
+ * Called via glGetBufferSubDataARB().
+ */
+static void
+intel_bufferobj_get_subdata(GLcontext * ctx,
+ GLenum target,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ GLvoid * data, struct gl_buffer_object *obj)
+{
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+ driBOGetSubData(intel_obj->buffer, offset, size, data);
+}
+
+
+
+/**
+ * Called via glMapBufferARB().
+ */
+static void *
+intel_bufferobj_map(GLcontext * ctx,
+ GLenum target,
+ GLenum access, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ /* XXX: Translate access to flags arg below:
+ */
+ assert(intel_obj);
+
+ if (intel_obj->region)
+ intel_bufferobj_cow(intel, intel_obj);
+
+ obj->Pointer = driBOMap(intel_obj->buffer,
+ DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
+ return obj->Pointer;
+}
+
+
+/**
+ * Called via glMapBufferARB().
+ */
+static GLboolean
+intel_bufferobj_unmap(GLcontext * ctx,
+ GLenum target, struct gl_buffer_object *obj)
+{
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+ assert(obj->Pointer);
+ driBOUnmap(intel_obj->buffer);
+ obj->Pointer = NULL;
+ return GL_TRUE;
+}
+
+struct _DriBufferObject *
+intel_bufferobj_buffer(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj, GLuint flag)
+{
+ if (intel_obj->region) {
+ if (flag == INTEL_WRITE_PART)
+ intel_bufferobj_cow(intel, intel_obj);
+ else if (flag == INTEL_WRITE_FULL)
+ intel_bufferobj_release_region(intel, intel_obj);
+ }
+
+ return intel_obj->buffer;
+}
+
+void
+intel_bufferobj_init(struct intel_context *intel)
+{
+ GLcontext *ctx = &intel->ctx;
+
+ ctx->Driver.NewBufferObject = intel_bufferobj_alloc;
+ ctx->Driver.DeleteBuffer = intel_bufferobj_free;
+ ctx->Driver.BufferData = intel_bufferobj_data;
+ ctx->Driver.BufferSubData = intel_bufferobj_subdata;
+ ctx->Driver.GetBufferSubData = intel_bufferobj_get_subdata;
+ ctx->Driver.MapBuffer = intel_bufferobj_map;
+ ctx->Driver.UnmapBuffer = intel_bufferobj_unmap;
+}
--- /dev/null
+ /**************************************************************************
+ *
+ * Copyright 2005 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_BUFFEROBJ_H
+#define INTEL_BUFFEROBJ_H
+
+#include "mtypes.h"
+
+struct intel_context;
+struct intel_region;
+struct gl_buffer_object;
+
+
+/**
+ * Intel vertex/pixel buffer object, derived from Mesa's gl_buffer_object.
+ */
+struct intel_buffer_object
+{
+ struct gl_buffer_object Base;
+ struct _DriBufferObject *buffer; /* the low-level buffer manager's buffer handle */
+
+ struct intel_region *region; /* Is there a zero-copy texture
+ associated with this (pixel)
+ buffer object? */
+};
+
+
+/* Get the bm buffer associated with a GL bufferobject:
+ */
+struct _DriBufferObject *intel_bufferobj_buffer(struct intel_context *intel,
+ struct intel_buffer_object
+ *obj, GLuint flag);
+
+/* Hook the bufferobject implementation into mesa:
+ */
+void intel_bufferobj_init(struct intel_context *intel);
+
+
+
+/* Are the obj->Name tests necessary? Unfortunately yes, mesa
+ * allocates a couple of gl_buffer_object structs statically, and
+ * the Name == 0 test is the only way to identify them and avoid
+ * casting them erroneously to our structs.
+ */
+static INLINE struct intel_buffer_object *
+intel_buffer_object(struct gl_buffer_object *obj)
+{
+ if (obj->Name)
+ return (struct intel_buffer_object *) obj;
+ else
+ return NULL;
+}
+
+/* Helpers for zerocopy image uploads. See also intel_regions.h:
+ */
+void intel_bufferobj_cow(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj);
+void intel_bufferobj_release_region(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj);
+
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_blit.h"
+#include "intel_buffers.h"
+#include "intel_depthstencil.h"
+#include "intel_fbo.h"
+#include "intel_tris.h"
+#include "intel_regions.h"
+#include "intel_batchbuffer.h"
+#include "context.h"
+#include "utils.h"
+#include "framebuffer.h"
+#include "swrast/swrast.h"
+#include "vblank.h"
+
+
+/**
+ * XXX move this into a new dri/common/cliprects.c file.
+ */
+GLboolean
+intel_intersect_cliprects(drm_clip_rect_t * dst,
+ const drm_clip_rect_t * a,
+ const drm_clip_rect_t * b)
+{
+ GLint bx = b->x1;
+ GLint by = b->y1;
+ GLint bw = b->x2 - bx;
+ GLint bh = b->y2 - by;
+
+ if (bx < a->x1)
+ bw -= a->x1 - bx, bx = a->x1;
+ if (by < a->y1)
+ bh -= a->y1 - by, by = a->y1;
+ if (bx + bw > a->x2)
+ bw = a->x2 - bx;
+ if (by + bh > a->y2)
+ bh = a->y2 - by;
+ if (bw <= 0)
+ return GL_FALSE;
+ if (bh <= 0)
+ return GL_FALSE;
+
+ dst->x1 = bx;
+ dst->y1 = by;
+ dst->x2 = bx + bw;
+ dst->y2 = by + bh;
+
+ return GL_TRUE;
+}
+
+/**
+ * Return pointer to current color drawing region, or NULL.
+ */
+struct intel_region *
+intel_drawbuf_region(struct intel_context *intel)
+{
+ struct intel_renderbuffer *irbColor =
+ intel_renderbuffer(intel->ctx.DrawBuffer->_ColorDrawBuffers[0][0]);
+ if (irbColor)
+ return irbColor->region;
+ else
+ return NULL;
+}
+
+/**
+ * Return pointer to current color reading region, or NULL.
+ */
+struct intel_region *
+intel_readbuf_region(struct intel_context *intel)
+{
+ struct intel_renderbuffer *irb
+ = intel_renderbuffer(intel->ctx.ReadBuffer->_ColorReadBuffer);
+ if (irb)
+ return irb->region;
+ else
+ return NULL;
+}
+
+
+
+static void
+intelBufferSize(GLframebuffer * buffer, GLuint * width, GLuint * height)
+{
+ GET_CURRENT_CONTEXT(ctx);
+ struct intel_context *intel = intel_context(ctx);
+ /* Need to lock to make sure the driDrawable is uptodate. This
+ * information is used to resize Mesa's software buffers, so it has
+ * to be correct.
+ */
+ /* XXX This isn't 100% correct, the given buffer might not be
+ * bound to the current context!
+ */
+ LOCK_HARDWARE(intel);
+ if (intel->driDrawable) {
+ *width = intel->driDrawable->w;
+ *height = intel->driDrawable->h;
+ }
+ else {
+ *width = 0;
+ *height = 0;
+ }
+ UNLOCK_HARDWARE(intel);
+}
+
+
+
+/**
+ * Update the following fields for rendering to a user-created FBO:
+ * intel->numClipRects
+ * intel->pClipRects
+ * intel->drawX
+ * intel->drawY
+ */
+static void
+intelSetRenderbufferClipRects(struct intel_context *intel)
+{
+ assert(intel->ctx.DrawBuffer->Width > 0);
+ assert(intel->ctx.DrawBuffer->Height > 0);
+ intel->fboRect.x1 = 0;
+ intel->fboRect.y1 = 0;
+ intel->fboRect.x2 = intel->ctx.DrawBuffer->Width;
+ intel->fboRect.y2 = intel->ctx.DrawBuffer->Height;
+ intel->numClipRects = 1;
+ intel->pClipRects = &intel->fboRect;
+ intel->drawX = 0;
+ intel->drawY = 0;
+}
+
+
+/**
+ * As above, but for rendering to front buffer of a window.
+ * \sa intelSetRenderbufferClipRects
+ */
+static void
+intelSetFrontClipRects(struct intel_context *intel)
+{
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+
+ if (!dPriv)
+ return;
+
+ intel->numClipRects = dPriv->numClipRects;
+ intel->pClipRects = dPriv->pClipRects;
+ intel->drawX = dPriv->x;
+ intel->drawY = dPriv->y;
+}
+
+
+/**
+ * As above, but for rendering to back buffer of a window.
+ */
+static void
+intelSetBackClipRects(struct intel_context *intel)
+{
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+
+ if (!dPriv)
+ return;
+
+ if (intel->sarea->pf_enabled == 0 && dPriv->numBackClipRects == 0) {
+ /* use the front clip rects */
+ intel->numClipRects = dPriv->numClipRects;
+ intel->pClipRects = dPriv->pClipRects;
+ intel->drawX = dPriv->x;
+ intel->drawY = dPriv->y;
+ }
+ else {
+ /* use the back clip rects */
+ intel->numClipRects = dPriv->numBackClipRects;
+ intel->pClipRects = dPriv->pBackClipRects;
+ intel->drawX = dPriv->backX;
+ intel->drawY = dPriv->backY;
+ }
+}
+
+
+/**
+ * This will be called whenever the currently bound window is moved/resized.
+ * XXX: actually, it seems to NOT be called when the window is only moved (BP).
+ */
+void
+intelWindowMoved(struct intel_context *intel)
+{
+ GLcontext *ctx = &intel->ctx;
+
+ if (!intel->ctx.DrawBuffer) {
+ /* when would this happen? -BP */
+ intelSetFrontClipRects(intel);
+ }
+ else if (intel->ctx.DrawBuffer->Name != 0) {
+ /* drawing to user-created FBO - do nothing */
+ /* Cliprects would be set from intelDrawBuffer() */
+ }
+ else {
+ /* drawing to a window */
+ switch (intel->ctx.DrawBuffer->_ColorDrawBufferMask[0]) {
+ case BUFFER_BIT_FRONT_LEFT:
+ intelSetFrontClipRects(intel);
+ break;
+ case BUFFER_BIT_BACK_LEFT:
+ intelSetBackClipRects(intel);
+ break;
+ default:
+ /* glDrawBuffer(GL_NONE or GL_FRONT_AND_BACK): software fallback */
+ intelSetFrontClipRects(intel);
+ }
+ }
+
+ /* this update Mesa's notion of window size */
+ if (ctx->WinSysDrawBuffer) {
+ _mesa_resize_framebuffer(ctx, ctx->WinSysDrawBuffer,
+ intel->driDrawable->w, intel->driDrawable->h);
+ }
+
+ if (intel->intelScreen->driScrnPriv->ddxMinor >= 7 && intel->driDrawable) {
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+ drmI830Sarea *sarea = intel->sarea;
+ drm_clip_rect_t drw_rect = { .x1 = dPriv->x, .x2 = dPriv->x + dPriv->w,
+ .y1 = dPriv->y, .y2 = dPriv->y + dPriv->h };
+ drm_clip_rect_t pipeA_rect = { .x1 = sarea->pipeA_x, .y1 = sarea->pipeA_y,
+ .x2 = sarea->pipeA_x + sarea->pipeA_w,
+ .y2 = sarea->pipeA_y + sarea->pipeA_h };
+ drm_clip_rect_t pipeB_rect = { .x1 = sarea->pipeB_x, .y1 = sarea->pipeB_y,
+ .x2 = sarea->pipeB_x + sarea->pipeB_w,
+ .y2 = sarea->pipeB_y + sarea->pipeB_h };
+ GLint areaA = driIntersectArea( drw_rect, pipeA_rect );
+ GLint areaB = driIntersectArea( drw_rect, pipeB_rect );
+ GLuint flags = intel->vblank_flags;
+
+ if (areaB > areaA || (areaA == areaB && areaB > 0)) {
+ flags = intel->vblank_flags | VBLANK_FLAG_SECONDARY;
+ } else {
+ flags = intel->vblank_flags & ~VBLANK_FLAG_SECONDARY;
+ }
+
+ if (flags != intel->vblank_flags) {
+ intel->vblank_flags = flags;
+ driGetCurrentVBlank(dPriv, intel->vblank_flags, &intel->vbl_seq);
+ }
+ } else {
+ intel->vblank_flags &= ~VBLANK_FLAG_SECONDARY;
+ }
+
+ /* Update hardware scissor */
+ ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
+ ctx->Scissor.Width, ctx->Scissor.Height);
+}
+
+
+
+/* A true meta version of this would be very simple and additionally
+ * machine independent. Maybe we'll get there one day.
+ */
+static void
+intelClearWithTris(struct intel_context *intel, GLbitfield mask)
+{
+ GLcontext *ctx = &intel->ctx;
+ drm_clip_rect_t clear;
+
+ if (INTEL_DEBUG & DEBUG_BLIT)
+ _mesa_printf("%s 0x%x\n", __FUNCTION__, mask);
+
+ LOCK_HARDWARE(intel);
+
+ /* XXX FBO: was: intel->driDrawable->numClipRects */
+ if (intel->numClipRects) {
+ GLint cx, cy, cw, ch;
+ GLuint buf;
+
+ intel->vtbl.install_meta_state(intel);
+
+ /* Get clear bounds after locking */
+ cx = ctx->DrawBuffer->_Xmin;
+ cy = ctx->DrawBuffer->_Ymin;
+ ch = ctx->DrawBuffer->_Ymax - ctx->DrawBuffer->_Ymin;
+ cw = ctx->DrawBuffer->_Xmax - ctx->DrawBuffer->_Xmin;
+
+ /* note: regardless of 'all', cx, cy, cw, ch are now correct */
+ clear.x1 = cx;
+ clear.y1 = cy;
+ clear.x2 = cx + cw;
+ clear.y2 = cy + ch;
+
+ /* Back and stencil cliprects are the same. Try and do both
+ * buffers at once:
+ */
+ if (mask &
+ (BUFFER_BIT_BACK_LEFT | BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH)) {
+ struct intel_region *backRegion =
+ intel_get_rb_region(ctx->DrawBuffer, BUFFER_BACK_LEFT);
+ struct intel_region *depthRegion =
+ intel_get_rb_region(ctx->DrawBuffer, BUFFER_DEPTH);
+ const GLuint clearColor = (backRegion && backRegion->cpp == 4)
+ ? intel->ClearColor8888 : intel->ClearColor565;
+
+ intel->vtbl.meta_draw_region(intel, backRegion, depthRegion);
+
+ if (mask & BUFFER_BIT_BACK_LEFT)
+ intel->vtbl.meta_color_mask(intel, GL_TRUE);
+ else
+ intel->vtbl.meta_color_mask(intel, GL_FALSE);
+
+ if (mask & BUFFER_BIT_STENCIL)
+ intel->vtbl.meta_stencil_replace(intel,
+ intel->ctx.Stencil.WriteMask[0],
+ intel->ctx.Stencil.Clear);
+ else
+ intel->vtbl.meta_no_stencil_write(intel);
+
+ if (mask & BUFFER_BIT_DEPTH)
+ intel->vtbl.meta_depth_replace(intel);
+ else
+ intel->vtbl.meta_no_depth_write(intel);
+
+ /* XXX: Using INTEL_BATCH_NO_CLIPRECTS here is dangerous as the
+ * drawing origin may not be correctly emitted.
+ */
+ intel_meta_draw_quad(intel, clear.x1, clear.x2, clear.y1, clear.y2, intel->ctx.Depth.Clear, clearColor, 0, 0, 0, 0); /* texcoords */
+
+ mask &=
+ ~(BUFFER_BIT_BACK_LEFT | BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH);
+ }
+
+ /* clear the remaining (color) renderbuffers */
+ for (buf = 0; buf < BUFFER_COUNT && mask; buf++) {
+ const GLuint bufBit = 1 << buf;
+ if (mask & bufBit) {
+ struct intel_renderbuffer *irbColor =
+ intel_renderbuffer(ctx->DrawBuffer->
+ Attachment[buf].Renderbuffer);
+ GLuint color = (irbColor->region->cpp == 4)
+ ? intel->ClearColor8888 : intel->ClearColor565;
+
+ ASSERT(irbColor);
+
+ intel->vtbl.meta_no_depth_write(intel);
+ intel->vtbl.meta_no_stencil_write(intel);
+ intel->vtbl.meta_color_mask(intel, GL_TRUE);
+ intel->vtbl.meta_draw_region(intel, irbColor->region, NULL);
+
+ /* XXX: Using INTEL_BATCH_NO_CLIPRECTS here is dangerous as the
+ * drawing origin may not be correctly emitted.
+ */
+ intel_meta_draw_quad(intel, clear.x1, clear.x2, clear.y1, clear.y2, 0, /* depth clear val */
+ color, 0, 0, 0, 0); /* texcoords */
+
+ mask &= ~bufBit;
+ }
+ }
+
+ intel->vtbl.leave_meta_state(intel);
+ intel_batchbuffer_flush(intel->batch);
+ }
+ UNLOCK_HARDWARE(intel);
+}
+
+
+
+
+/**
+ * Copy the window contents named by dPriv to the rotated (or reflected)
+ * color buffer.
+ * srcBuf is BUFFER_BIT_FRONT_LEFT or BUFFER_BIT_BACK_LEFT to indicate the source.
+ */
+void
+intelRotateWindow(struct intel_context *intel,
+ __DRIdrawablePrivate * dPriv, GLuint srcBuf)
+{
+ intelScreenPrivate *screen = intel->intelScreen;
+ drm_clip_rect_t fullRect;
+ struct intel_region *src;
+ const drm_clip_rect_t *clipRects;
+ int numClipRects;
+ int i;
+ GLenum format, type;
+
+ int xOrig, yOrig;
+ int origNumClipRects;
+ drm_clip_rect_t *origRects;
+
+ /*
+ * set up hardware state
+ */
+ intelFlush(&intel->ctx);
+
+ LOCK_HARDWARE(intel);
+
+ if (!intel->numClipRects) {
+ UNLOCK_HARDWARE(intel);
+ return;
+ }
+
+ intel->vtbl.install_meta_state(intel);
+
+ intel->vtbl.meta_no_depth_write(intel);
+ intel->vtbl.meta_no_stencil_write(intel);
+ intel->vtbl.meta_color_mask(intel, GL_FALSE);
+
+
+ /* save current drawing origin and cliprects (restored at end) */
+ xOrig = intel->drawX;
+ yOrig = intel->drawY;
+ origNumClipRects = intel->numClipRects;
+ origRects = intel->pClipRects;
+
+ /*
+ * set drawing origin, cliprects for full-screen access to rotated screen
+ */
+ fullRect.x1 = 0;
+ fullRect.y1 = 0;
+ fullRect.x2 = screen->rotatedWidth;
+ fullRect.y2 = screen->rotatedHeight;
+ intel->drawX = 0;
+ intel->drawY = 0;
+ intel->numClipRects = 1;
+ intel->pClipRects = &fullRect;
+
+ intel->vtbl.meta_draw_region(intel, screen->rotated_region, NULL); /* ? */
+
+ if (srcBuf == BUFFER_BIT_FRONT_LEFT) {
+ src = intel->intelScreen->front_region;
+ clipRects = dPriv->pClipRects;
+ numClipRects = dPriv->numClipRects;
+ }
+ else {
+ src = intel->intelScreen->back_region;
+ clipRects = dPriv->pBackClipRects;
+ numClipRects = dPriv->numBackClipRects;
+ }
+
+ if (src->cpp == 4) {
+ format = GL_BGRA;
+ type = GL_UNSIGNED_BYTE;
+ }
+ else {
+ format = GL_BGR;
+ type = GL_UNSIGNED_SHORT_5_6_5_REV;
+ }
+
+ /* set the whole screen up as a texture to avoid alignment issues */
+ intel->vtbl.meta_tex_rect_source(intel,
+ src->buffer,
+ screen->width,
+ screen->height, src->pitch, format, type);
+
+ intel->vtbl.meta_texture_blend_replace(intel);
+
+ /*
+ * loop over the source window's cliprects
+ */
+ for (i = 0; i < numClipRects; i++) {
+ int srcX0 = clipRects[i].x1;
+ int srcY0 = clipRects[i].y1;
+ int srcX1 = clipRects[i].x2;
+ int srcY1 = clipRects[i].y2;
+ GLfloat verts[4][2], tex[4][2];
+ int j;
+
+ /* build vertices for four corners of clip rect */
+ verts[0][0] = srcX0;
+ verts[0][1] = srcY0;
+ verts[1][0] = srcX1;
+ verts[1][1] = srcY0;
+ verts[2][0] = srcX1;
+ verts[2][1] = srcY1;
+ verts[3][0] = srcX0;
+ verts[3][1] = srcY1;
+
+ /* .. and texcoords */
+ tex[0][0] = srcX0;
+ tex[0][1] = srcY0;
+ tex[1][0] = srcX1;
+ tex[1][1] = srcY0;
+ tex[2][0] = srcX1;
+ tex[2][1] = srcY1;
+ tex[3][0] = srcX0;
+ tex[3][1] = srcY1;
+
+ /* transform coords to rotated screen coords */
+
+ for (j = 0; j < 4; j++) {
+ matrix23TransformCoordf(&screen->rotMatrix,
+ &verts[j][0], &verts[j][1]);
+ }
+
+ /* draw polygon to map source image to dest region */
+ intel_meta_draw_poly(intel, 4, verts, 0, 0, tex);
+
+ } /* cliprect loop */
+
+ intel->vtbl.leave_meta_state(intel);
+ intel_batchbuffer_flush(intel->batch);
+
+ /* restore original drawing origin and cliprects */
+ intel->drawX = xOrig;
+ intel->drawY = yOrig;
+ intel->numClipRects = origNumClipRects;
+ intel->pClipRects = origRects;
+
+ UNLOCK_HARDWARE(intel);
+}
+
+
+/**
+ * Called by ctx->Driver.Clear.
+ */
+static void
+intelClear(GLcontext *ctx, GLbitfield mask)
+{
+ struct intel_context *intel = intel_context(ctx);
+ const GLuint colorMask = *((GLuint *) & ctx->Color.ColorMask);
+ GLbitfield tri_mask = 0;
+ GLbitfield blit_mask = 0;
+ GLbitfield swrast_mask = 0;
+ GLuint i;
+
+ if (0)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+ /* HW color buffers (front, back, aux, generic FBO, etc) */
+ if (colorMask == ~0) {
+ /* clear all R,G,B,A */
+ /* XXX FBO: need to check if colorbuffers are software RBOs! */
+ blit_mask |= (mask & BUFFER_BITS_COLOR);
+ }
+ else {
+ /* glColorMask in effect */
+ tri_mask |= (mask & BUFFER_BITS_COLOR);
+ }
+
+ /* HW stencil */
+ if (mask & BUFFER_BIT_STENCIL) {
+ const struct intel_region *stencilRegion
+ = intel_get_rb_region(ctx->DrawBuffer, BUFFER_STENCIL);
+ if (stencilRegion) {
+ /* have hw stencil */
+ if ((ctx->Stencil.WriteMask[0] & 0xff) != 0xff) {
+ /* not clearing all stencil bits, so use triangle clearing */
+ tri_mask |= BUFFER_BIT_STENCIL;
+ }
+ else {
+ /* clearing all stencil bits, use blitting */
+ blit_mask |= BUFFER_BIT_STENCIL;
+ }
+ }
+ }
+
+ /* HW depth */
+ if (mask & BUFFER_BIT_DEPTH) {
+ /* clear depth with whatever method is used for stencil (see above) */
+ if (tri_mask & BUFFER_BIT_STENCIL)
+ tri_mask |= BUFFER_BIT_DEPTH;
+ else
+ blit_mask |= BUFFER_BIT_DEPTH;
+ }
+
+ /* SW fallback clearing */
+ swrast_mask = mask & ~tri_mask & ~blit_mask;
+
+ for (i = 0; i < BUFFER_COUNT; i++) {
+ GLuint bufBit = 1 << i;
+ if ((blit_mask | tri_mask) & bufBit) {
+ if (!ctx->DrawBuffer->Attachment[i].Renderbuffer->ClassID) {
+ blit_mask &= ~bufBit;
+ tri_mask &= ~bufBit;
+ swrast_mask |= bufBit;
+ }
+ }
+ }
+
+
+ intelFlush(ctx); /* XXX intelClearWithBlit also does this */
+
+ if (blit_mask)
+ intelClearWithBlit(ctx, blit_mask);
+
+ if (tri_mask)
+ intelClearWithTris(intel, tri_mask);
+
+ if (swrast_mask)
+ _swrast_Clear(ctx, swrast_mask);
+}
+
+
+
+/* Flip the front & back buffers
+ */
+static void
+intelPageFlip(const __DRIdrawablePrivate * dPriv)
+{
+#if 0
+ struct intel_context *intel;
+ int tmp, ret;
+
+ if (INTEL_DEBUG & DEBUG_IOCTL)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+ assert(dPriv);
+ assert(dPriv->driContextPriv);
+ assert(dPriv->driContextPriv->driverPrivate);
+
+ intel = (struct intel_context *) dPriv->driContextPriv->driverPrivate;
+
+ intelFlush(&intel->ctx);
+ LOCK_HARDWARE(intel);
+
+ if (dPriv->pClipRects) {
+ *(drm_clip_rect_t *) intel->sarea->boxes = dPriv->pClipRects[0];
+ intel->sarea->nbox = 1;
+ }
+
+ ret = drmCommandNone(intel->driFd, DRM_I830_FLIP);
+ if (ret) {
+ fprintf(stderr, "%s: %d\n", __FUNCTION__, ret);
+ UNLOCK_HARDWARE(intel);
+ exit(1);
+ }
+
+ tmp = intel->sarea->last_enqueue;
+ intelRefillBatchLocked(intel);
+ UNLOCK_HARDWARE(intel);
+
+
+ intelSetDrawBuffer(&intel->ctx, intel->ctx.Color.DriverDrawBuffer);
+#endif
+}
+
+#if 0
+void
+intelSwapBuffers(__DRIdrawablePrivate * dPriv)
+{
+ if (dPriv->driverPrivate) {
+ const struct gl_framebuffer *fb
+ = (struct gl_framebuffer *) dPriv->driverPrivate;
+ if (fb->Visual.doubleBufferMode) {
+ GET_CURRENT_CONTEXT(ctx);
+ if (ctx && ctx->DrawBuffer == fb) {
+ _mesa_notifySwapBuffers(ctx); /* flush pending rendering */
+ }
+ if (0 /*intel->doPageFlip */ ) { /* doPageFlip is never set !!! */
+ intelPageFlip(dPriv);
+ }
+ else {
+ intelCopyBuffer(dPriv);
+ }
+ }
+ }
+ else {
+ _mesa_problem(NULL,
+ "dPriv has no gl_framebuffer pointer in intelSwapBuffers");
+ }
+}
+#else
+/* Trunk version:
+ */
+void
+intelSwapBuffers(__DRIdrawablePrivate * dPriv)
+{
+ if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
+ GET_CURRENT_CONTEXT(ctx);
+ struct intel_context *intel;
+
+ if (ctx == NULL)
+ return;
+
+ intel = intel_context(ctx);
+
+ if (ctx->Visual.doubleBufferMode) {
+ intelScreenPrivate *screen = intel->intelScreen;
+ _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
+ if (0 /*intel->doPageFlip */ ) { /* doPageFlip is never set !!! */
+ intelPageFlip(dPriv);
+ }
+ else {
+ intelCopyBuffer(dPriv, NULL);
+ }
+ if (screen->current_rotation != 0) {
+ intelRotateWindow(intel, dPriv, BUFFER_BIT_FRONT_LEFT);
+ }
+ }
+ }
+ else {
+ /* XXX this shouldn't be an error but we can't handle it for now */
+ fprintf(stderr, "%s: drawable has no context!\n", __FUNCTION__);
+ }
+}
+#endif
+
+void
+intelCopySubBuffer(__DRIdrawablePrivate * dPriv, int x, int y, int w, int h)
+{
+ if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
+ struct intel_context *intel =
+ (struct intel_context *) dPriv->driContextPriv->driverPrivate;
+ GLcontext *ctx = &intel->ctx;
+
+ if (ctx->Visual.doubleBufferMode) {
+ drm_clip_rect_t rect;
+ rect.x1 = x + dPriv->x;
+ rect.y1 = (dPriv->h - y - h) + dPriv->y;
+ rect.x2 = rect.x1 + w;
+ rect.y2 = rect.y1 + h;
+ _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
+ intelCopyBuffer(dPriv, &rect);
+ }
+ }
+ else {
+ /* XXX this shouldn't be an error but we can't handle it for now */
+ fprintf(stderr, "%s: drawable has no context!\n", __FUNCTION__);
+ }
+}
+
+
+/**
+ * Update the hardware state for drawing into a window or framebuffer object.
+ *
+ * Called by glDrawBuffer, glBindFramebufferEXT, MakeCurrent, and other
+ * places within the driver.
+ *
+ * Basically, this needs to be called any time the current framebuffer
+ * changes, the renderbuffers change, or we need to draw into different
+ * color buffers.
+ */
+void
+intel_draw_buffer(GLcontext * ctx, struct gl_framebuffer *fb)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *colorRegion, *depthRegion = NULL;
+ struct intel_renderbuffer *irbDepth = NULL, *irbStencil = NULL;
+ int front = 0; /* drawing to front color buffer? */
+
+ if (!fb) {
+ /* this can happen during the initial context initialization */
+ return;
+ }
+
+ /* Do this here, note core Mesa, since this function is called from
+ * many places within the driver.
+ */
+ if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
+ /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
+ _mesa_update_framebuffer(ctx);
+ /* this updates the DrawBuffer's Width/Height if it's a FBO */
+ _mesa_update_draw_buffer_bounds(ctx);
+ }
+
+ if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
+ /* this may occur when we're called by glBindFrameBuffer() during
+ * the process of someone setting up renderbuffers, etc.
+ */
+ /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
+ return;
+ }
+
+ if (fb->Name)
+ intel_validate_paired_depth_stencil(ctx, fb);
+
+ /*
+ * How many color buffers are we drawing into?
+ */
+ if (fb->_NumColorDrawBuffers[0] != 1
+#if 0
+ /* XXX FBO temporary - always use software rendering */
+ || 1
+#endif
+ ) {
+ /* writing to 0 or 2 or 4 color buffers */
+ /*_mesa_debug(ctx, "Software rendering\n");*/
+ FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_TRUE);
+ front = 1; /* might not have back color buffer */
+ }
+ else {
+ /* draw to exactly one color buffer */
+ /*_mesa_debug(ctx, "Hardware rendering\n");*/
+ FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_FALSE);
+ if (fb->_ColorDrawBufferMask[0] == BUFFER_BIT_FRONT_LEFT) {
+ front = 1;
+ }
+ }
+
+ /*
+ * Get the intel_renderbuffer for the colorbuffer we're drawing into.
+ * And set up cliprects.
+ */
+ if (fb->Name == 0) {
+ /* drawing to window system buffer */
+ if (intel->sarea->pf_current_page == 1) {
+ /* page flipped back/front */
+ front ^= 1;
+ }
+ if (front) {
+ intelSetFrontClipRects(intel);
+ colorRegion = intel_get_rb_region(fb, BUFFER_FRONT_LEFT);
+ }
+ else {
+ intelSetBackClipRects(intel);
+ colorRegion = intel_get_rb_region(fb, BUFFER_BACK_LEFT);
+ }
+ }
+ else {
+ /* drawing to user-created FBO */
+ struct intel_renderbuffer *irb;
+ intelSetRenderbufferClipRects(intel);
+ irb = intel_renderbuffer(fb->_ColorDrawBuffers[0][0]);
+ colorRegion = (irb && irb->region) ? irb->region : NULL;
+ }
+
+ /* Update culling direction which changes depending on the
+ * orientation of the buffer:
+ */
+ if (ctx->Driver.FrontFace)
+ ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
+ else
+ ctx->NewState |= _NEW_POLYGON;
+
+ if (!colorRegion) {
+ FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_TRUE);
+ }
+ else {
+ FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_FALSE);
+ }
+
+ /***
+ *** Get depth buffer region and check if we need a software fallback.
+ *** Note that the depth buffer is usually a DEPTH_STENCIL buffer.
+ ***/
+ if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
+ irbDepth = intel_renderbuffer(fb->_DepthBuffer->Wrapped);
+ if (irbDepth->region) {
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_FALSE);
+ depthRegion = irbDepth->region;
+ }
+ else {
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_TRUE);
+ depthRegion = NULL;
+ }
+ }
+ else {
+ /* not using depth buffer */
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_FALSE);
+ depthRegion = NULL;
+ }
+
+ /***
+ *** Stencil buffer
+ *** This can only be hardware accelerated if we're using a
+ *** combined DEPTH_STENCIL buffer (for now anyway).
+ ***/
+ if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
+ irbStencil = intel_renderbuffer(fb->_StencilBuffer->Wrapped);
+ if (irbStencil && irbStencil->region) {
+ ASSERT(irbStencil->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_FALSE);
+ /* need to re-compute stencil hw state */
+ ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
+ if (!depthRegion)
+ depthRegion = irbStencil->region;
+ }
+ else {
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_TRUE);
+ }
+ }
+ else {
+ /* XXX FBO: instead of FALSE, pass ctx->Stencil.Enabled ??? */
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_FALSE);
+ /* need to re-compute stencil hw state */
+ ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
+ }
+
+
+ /**
+ ** Release old regions, reference new regions
+ **/
+#if 0 /* XXX FBO: this seems to be redundant with i915_state_draw_region() */
+ if (intel->draw_region != colorRegion) {
+ intel_region_release(&intel->draw_region);
+ intel_region_reference(&intel->draw_region, colorRegion);
+ }
+ if (intel->intelScreen->depth_region != depthRegion) {
+ intel_region_release(&intel->intelScreen->depth_region);
+ intel_region_reference(&intel->intelScreen->depth_region, depthRegion);
+ }
+#endif
+
+ intel->vtbl.set_draw_region(intel, colorRegion, depthRegion);
+
+ /* update viewport since it depends on window size */
+ ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
+ ctx->Viewport.Width, ctx->Viewport.Height);
+
+ /* Update hardware scissor */
+ ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
+ ctx->Scissor.Width, ctx->Scissor.Height);
+}
+
+
+static void
+intelDrawBuffer(GLcontext * ctx, GLenum mode)
+{
+ intel_draw_buffer(ctx, ctx->DrawBuffer);
+}
+
+
+static void
+intelReadBuffer(GLcontext * ctx, GLenum mode)
+{
+ if (ctx->ReadBuffer == ctx->DrawBuffer) {
+ /* This will update FBO completeness status.
+ * A framebuffer will be incomplete if the GL_READ_BUFFER setting
+ * refers to a missing renderbuffer. Calling glReadBuffer can set
+ * that straight and can make the drawing buffer complete.
+ */
+ intel_draw_buffer(ctx, ctx->DrawBuffer);
+ }
+ /* Generally, functions which read pixels (glReadPixels, glCopyPixels, etc)
+ * reference ctx->ReadBuffer and do appropriate state checks.
+ */
+}
+
+
+void
+intelInitBufferFuncs(struct dd_function_table *functions)
+{
+ functions->Clear = intelClear;
+ functions->GetBufferSize = intelBufferSize;
+ functions->ResizeBuffers = _mesa_resize_framebuffer;
+ functions->DrawBuffer = intelDrawBuffer;
+ functions->ReadBuffer = intelReadBuffer;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_BUFFERS_H
+#define INTEL_BUFFERS_H
+
+
+struct intel_context;
+
+
+extern GLboolean
+intel_intersect_cliprects(drm_clip_rect_t * dest,
+ const drm_clip_rect_t * a,
+ const drm_clip_rect_t * b);
+
+extern struct intel_region *intel_readbuf_region(struct intel_context *intel);
+
+extern struct intel_region *intel_drawbuf_region(struct intel_context *intel);
+
+extern void intelSwapBuffers(__DRIdrawablePrivate * dPriv);
+
+extern void intelWindowMoved(struct intel_context *intel);
+
+extern void intel_draw_buffer(GLcontext * ctx, struct gl_framebuffer *fb);
+
+extern void intelInitBufferFuncs(struct dd_function_table *functions);
+
+extern void
+intelRotateWindow(struct intel_context *intel,
+ __DRIdrawablePrivate * dPriv, GLuint srcBuf);
+
+#endif /* INTEL_BUFFERS_H */
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "glheader.h"
+#include "context.h"
+#include "matrix.h"
+#include "simple_list.h"
+#include "extensions.h"
+#include "framebuffer.h"
+#include "imports.h"
+
+#include "swrast/swrast.h"
+#include "swrast_setup/swrast_setup.h"
+#include "tnl/tnl.h"
+#include "array_cache/acache.h"
+
+#include "tnl/t_pipeline.h"
+#include "tnl/t_vertex.h"
+
+#include "drivers/common/driverfuncs.h"
+
+#include "intel_screen.h"
+
+#include "i830_dri.h"
+
+#include "intel_buffers.h"
+#include "intel_tex.h"
+#include "intel_span.h"
+#include "intel_tris.h"
+#include "intel_ioctl.h"
+#include "intel_batchbuffer.h"
+#include "intel_blit.h"
+#include "intel_pixel.h"
+#include "intel_regions.h"
+#include "intel_buffer_objects.h"
+#include "intel_fbo.h"
+
+#include "vblank.h"
+#include "utils.h"
+#include "xmlpool.h" /* for symbolic values of enum-type options */
+#ifndef INTEL_DEBUG
+int INTEL_DEBUG = (0);
+#endif
+
+#define need_GL_ARB_multisample
+#define need_GL_ARB_point_parameters
+#define need_GL_ARB_texture_compression
+#define need_GL_ARB_vertex_buffer_object
+#define need_GL_ARB_vertex_program
+#define need_GL_ARB_window_pos
+#define need_GL_EXT_blend_color
+#define need_GL_EXT_blend_equation_separate
+#define need_GL_EXT_blend_func_separate
+#define need_GL_EXT_blend_minmax
+#define need_GL_EXT_cull_vertex
+#define need_GL_EXT_fog_coord
+#define need_GL_EXT_framebuffer_object
+#define need_GL_EXT_multi_draw_arrays
+#define need_GL_EXT_secondary_color
+#define need_GL_NV_vertex_program
+#include "extension_helper.h"
+
+
+#define DRIVER_DATE "20060929"
+
+_glthread_Mutex lockMutex;
+static GLboolean lockMutexInit = GL_FALSE;
+
+
+static const GLubyte *
+intelGetString(GLcontext * ctx, GLenum name)
+{
+ const char *chipset;
+ static char buffer[128];
+
+ switch (name) {
+ case GL_VENDOR:
+ return (GLubyte *) "Tungsten Graphics, Inc";
+ break;
+
+ case GL_RENDERER:
+ switch (intel_context(ctx)->intelScreen->deviceID) {
+ case PCI_CHIP_845_G:
+ chipset = "Intel(R) 845G";
+ break;
+ case PCI_CHIP_I830_M:
+ chipset = "Intel(R) 830M";
+ break;
+ case PCI_CHIP_I855_GM:
+ chipset = "Intel(R) 852GM/855GM";
+ break;
+ case PCI_CHIP_I865_G:
+ chipset = "Intel(R) 865G";
+ break;
+ case PCI_CHIP_I915_G:
+ chipset = "Intel(R) 915G";
+ break;
+ case PCI_CHIP_I915_GM:
+ chipset = "Intel(R) 915GM";
+ break;
+ case PCI_CHIP_I945_G:
+ chipset = "Intel(R) 945G";
+ break;
+ case PCI_CHIP_I945_GM:
+ chipset = "Intel(R) 945GM";
+ break;
+ default:
+ chipset = "Unknown Intel Chipset";
+ break;
+ }
+
+ (void) driGetRendererString(buffer, chipset, DRIVER_DATE, 0);
+ return (GLubyte *) buffer;
+
+ default:
+ return NULL;
+ }
+}
+
+
+/**
+ * Extension strings exported by the intel driver.
+ *
+ * \note
+ * It appears that ARB_texture_env_crossbar has "disappeared" compared to the
+ * old i830-specific driver.
+ */
+const struct dri_extension card_extensions[] = {
+ {"GL_ARB_multisample", GL_ARB_multisample_functions},
+ {"GL_ARB_multitexture", NULL},
+ {"GL_ARB_point_parameters", GL_ARB_point_parameters_functions},
+ {"GL_ARB_texture_border_clamp", NULL},
+ {"GL_ARB_texture_compression", GL_ARB_texture_compression_functions},
+ {"GL_ARB_texture_cube_map", NULL},
+ {"GL_ARB_texture_env_add", NULL},
+ {"GL_ARB_texture_env_combine", NULL},
+ {"GL_ARB_texture_env_dot3", NULL},
+ {"GL_ARB_texture_mirrored_repeat", NULL},
+ {"GL_ARB_texture_rectangle", NULL},
+ {"GL_ARB_vertex_buffer_object", GL_ARB_vertex_buffer_object_functions},
+ {"GL_ARB_pixel_buffer_object", NULL},
+ {"GL_ARB_vertex_program", GL_ARB_vertex_program_functions},
+ {"GL_ARB_window_pos", GL_ARB_window_pos_functions},
+ {"GL_EXT_blend_color", GL_EXT_blend_color_functions},
+ {"GL_EXT_blend_equation_separate",
+ GL_EXT_blend_equation_separate_functions},
+ {"GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions},
+ {"GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions},
+ {"GL_EXT_blend_subtract", NULL},
+ {"GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions},
+ {"GL_EXT_fog_coord", GL_EXT_fog_coord_functions},
+ {"GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions},
+ {"GL_EXT_multi_draw_arrays", GL_EXT_multi_draw_arrays_functions},
+#if 1 /* XXX FBO temporary? */
+ {"GL_EXT_packed_depth_stencil", NULL},
+#endif
+ {"GL_EXT_secondary_color", GL_EXT_secondary_color_functions},
+ {"GL_EXT_stencil_wrap", NULL},
+ {"GL_EXT_texture_edge_clamp", NULL},
+ {"GL_EXT_texture_env_combine", NULL},
+ {"GL_EXT_texture_env_dot3", NULL},
+ {"GL_EXT_texture_filter_anisotropic", NULL},
+ {"GL_EXT_texture_lod_bias", NULL},
+ {"GL_3DFX_texture_compression_FXT1", NULL},
+ {"GL_APPLE_client_storage", NULL},
+ {"GL_MESA_pack_invert", NULL},
+ {"GL_MESA_ycbcr_texture", NULL},
+ {"GL_NV_blend_square", NULL},
+ {"GL_NV_vertex_program", GL_NV_vertex_program_functions},
+ {"GL_NV_vertex_program1_1", NULL},
+/* { "GL_SGIS_generate_mipmap", NULL }, */
+ {NULL, NULL}
+};
+
+extern const struct tnl_pipeline_stage _intel_render_stage;
+
+static const struct tnl_pipeline_stage *intel_pipeline[] = {
+ &_tnl_vertex_transform_stage,
+ &_tnl_vertex_cull_stage,
+ &_tnl_normal_transform_stage,
+ &_tnl_lighting_stage,
+ &_tnl_fog_coordinate_stage,
+ &_tnl_texgen_stage,
+ &_tnl_texture_transform_stage,
+ &_tnl_point_attenuation_stage,
+ &_tnl_arb_vertex_program_stage,
+ &_tnl_vertex_program_stage,
+#if 1
+ &_intel_render_stage, /* ADD: unclipped rastersetup-to-dma */
+#endif
+ &_tnl_render_stage,
+ 0,
+};
+
+
+static const struct dri_debug_control debug_control[] = {
+ {"tex", DEBUG_TEXTURE},
+ {"state", DEBUG_STATE},
+ {"ioctl", DEBUG_IOCTL},
+ {"blit", DEBUG_BLIT},
+ {"mip", DEBUG_MIPTREE},
+ {"fall", DEBUG_FALLBACKS},
+ {"verb", DEBUG_VERBOSE},
+ {"bat", DEBUG_BATCH},
+ {"pix", DEBUG_PIXEL},
+ {"buf", DEBUG_BUFMGR},
+ {"reg", DEBUG_REGION},
+ {"fbo", DEBUG_FBO},
+ {"lock", DEBUG_LOCK},
+ {NULL, 0}
+};
+
+
+static void
+intelInvalidateState(GLcontext * ctx, GLuint new_state)
+{
+ _swrast_InvalidateState(ctx, new_state);
+ _swsetup_InvalidateState(ctx, new_state);
+ _ac_InvalidateState(ctx, new_state);
+ _tnl_InvalidateState(ctx, new_state);
+ _tnl_invalidate_vertex_state(ctx, new_state);
+ intel_context(ctx)->NewGLState |= new_state;
+}
+
+
+void
+intelFlush(GLcontext * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ if (intel->Fallback)
+ _swrast_flush(ctx);
+
+ INTEL_FIREVERTICES(intel);
+
+ if (intel->batch->map != intel->batch->ptr)
+ intel_batchbuffer_flush(intel->batch);
+
+ /* XXX: Need to do an MI_FLUSH here.
+ */
+}
+
+
+/**
+ * Check if we need to rotate/warp the front color buffer to the
+ * rotated screen. We generally need to do this when we get a glFlush
+ * or glFinish after drawing to the front color buffer.
+ */
+static void
+intelCheckFrontRotate(GLcontext * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ if (intel->ctx.DrawBuffer->_ColorDrawBufferMask[0] ==
+ BUFFER_BIT_FRONT_LEFT) {
+ intelScreenPrivate *screen = intel->intelScreen;
+ if (screen->current_rotation != 0) {
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+ intelRotateWindow(intel, dPriv, BUFFER_BIT_FRONT_LEFT);
+ }
+ }
+}
+
+
+/**
+ * Called via glFlush.
+ */
+static void
+intelglFlush(GLcontext * ctx)
+{
+ intelFlush(ctx);
+ intelCheckFrontRotate(ctx);
+}
+
+void
+intelFinish(GLcontext * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ intelFlush(ctx);
+ if (intel->batch->last_fence) {
+ driFenceFinish(intel->batch->last_fence,
+ 0, GL_FALSE);
+ driFenceUnReference(intel->batch->last_fence);
+ intel->batch->last_fence = NULL;
+ }
+ intelCheckFrontRotate(ctx);
+}
+
+
+void
+intelInitDriverFunctions(struct dd_function_table *functions)
+{
+ _mesa_init_driver_functions(functions);
+
+ functions->Flush = intelglFlush;
+ functions->Finish = intelFinish;
+ functions->GetString = intelGetString;
+ functions->UpdateState = intelInvalidateState;
+ functions->CopyColorTable = _swrast_CopyColorTable;
+ functions->CopyColorSubTable = _swrast_CopyColorSubTable;
+ functions->CopyConvolutionFilter1D = _swrast_CopyConvolutionFilter1D;
+ functions->CopyConvolutionFilter2D = _swrast_CopyConvolutionFilter2D;
+
+ intelInitTextureFuncs(functions);
+ intelInitPixelFuncs(functions);
+ intelInitStateFuncs(functions);
+ intelInitBufferFuncs(functions);
+}
+
+
+GLboolean
+intelInitContext(struct intel_context *intel,
+ const __GLcontextModes * mesaVis,
+ __DRIcontextPrivate * driContextPriv,
+ void *sharedContextPrivate,
+ struct dd_function_table *functions)
+{
+ GLcontext *ctx = &intel->ctx;
+ GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
+ __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
+ intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
+ drmI830Sarea *saPriv = (drmI830Sarea *)
+ (((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
+ int fthrottle_mode;
+
+ if (!_mesa_initialize_context(&intel->ctx,
+ mesaVis, shareCtx,
+ functions, (void *) intel))
+ return GL_FALSE;
+
+ driContextPriv->driverPrivate = intel;
+ intel->intelScreen = intelScreen;
+ intel->driScreen = sPriv;
+ intel->sarea = saPriv;
+
+ if (!lockMutexInit) {
+ lockMutexInit = GL_TRUE;
+ _glthread_INIT_MUTEX(lockMutex);
+ }
+
+ driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
+ intel->driScreen->myNum, "i915");
+
+ ctx->Const.MaxTextureMaxAnisotropy = 2.0;
+
+ /* This doesn't yet catch all non-conformant rendering, but it's a
+ * start.
+ */
+ if (getenv("INTEL_STRICT_CONFORMANCE")) {
+ intel->strict_conformance = 1;
+ }
+
+ ctx->Const.MinLineWidth = 1.0;
+ ctx->Const.MinLineWidthAA = 1.0;
+ ctx->Const.MaxLineWidth = 3.0;
+ ctx->Const.MaxLineWidthAA = 3.0;
+ ctx->Const.LineWidthGranularity = 1.0;
+
+ ctx->Const.MinPointSize = 1.0;
+ ctx->Const.MinPointSizeAA = 1.0;
+ ctx->Const.MaxPointSize = 255.0;
+ ctx->Const.MaxPointSizeAA = 3.0;
+ ctx->Const.PointSizeGranularity = 1.0;
+
+ ctx->Const.MaxColorAttachments = 4; /* XXX FBO: review this */
+
+ /* Initialize the software rasterizer and helper modules. */
+ _swrast_CreateContext(ctx);
+ _ac_CreateContext(ctx);
+ _tnl_CreateContext(ctx);
+ _swsetup_CreateContext(ctx);
+
+ /* Install the customized pipeline: */
+ _tnl_destroy_pipeline(ctx);
+ _tnl_install_pipeline(ctx, intel_pipeline);
+
+ /* Configure swrast to match hardware characteristics: */
+ _swrast_allow_pixel_fog(ctx, GL_FALSE);
+ _swrast_allow_vertex_fog(ctx, GL_TRUE);
+
+ /* Dri stuff */
+ intel->hHWContext = driContextPriv->hHWContext;
+ intel->driFd = sPriv->fd;
+ intel->driHwLock = (drmLock *) & sPriv->pSAREA->lock;
+
+ intel->hw_stipple = 1;
+
+ /* XXX FBO: this doesn't seem to be used anywhere */
+ switch (mesaVis->depthBits) {
+ case 0: /* what to do in this case? */
+ case 16:
+ intel->polygon_offset_scale = 1.0 / 0xffff;
+ break;
+ case 24:
+ intel->polygon_offset_scale = 2.0 / 0xffffff; /* req'd to pass glean */
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ /* Initialize swrast, tnl driver tables: */
+ intelInitSpanFuncs(ctx);
+ intelInitTriFuncs(ctx);
+
+
+ intel->RenderIndex = ~0;
+
+ fthrottle_mode = driQueryOptioni(&intel->optionCache, "fthrottle_mode");
+ intel->iw.irq_seq = -1;
+ intel->irqsEmitted = 0;
+
+ intel->do_irqs = (intel->intelScreen->irq_active &&
+ fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS);
+
+ intel->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
+
+ intel->vblank_flags = (intel->intelScreen->irq_active != 0)
+ ? driGetDefaultVBlankFlags(&intel->optionCache) : VBLANK_FLAG_NO_IRQ;
+
+ (*dri_interface->getUST) (&intel->swap_ust);
+ _math_matrix_ctr(&intel->ViewportMatrix);
+
+ /* Disable imaging extension until convolution is working in
+ * teximage paths:
+ */
+ driInitExtensions(ctx, card_extensions,
+/* GL_TRUE, */
+ GL_FALSE);
+
+
+ intel->batch = intel_batchbuffer_alloc(intel);
+ intel->last_swap_fence = NULL;
+ intel->first_swap_fence = NULL;
+
+ intel_bufferobj_init(intel);
+ intel_fbo_init(intel);
+
+ if (intel->ctx.Mesa_DXTn) {
+ _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
+ _mesa_enable_extension(ctx, "GL_S3_s3tc");
+ }
+ else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
+ _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
+ }
+
+ intel->prim.primitive = ~0;
+
+
+#if DO_DEBUG
+ INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
+#endif
+
+ if (getenv("INTEL_NO_RAST")) {
+ fprintf(stderr, "disabling 3D rasterization\n");
+ FALLBACK(intel, INTEL_FALLBACK_USER, 1);
+ }
+
+ return GL_TRUE;
+}
+
+void
+intelDestroyContext(__DRIcontextPrivate * driContextPriv)
+{
+ struct intel_context *intel =
+ (struct intel_context *) driContextPriv->driverPrivate;
+
+ assert(intel); /* should never be null */
+ if (intel) {
+ GLboolean release_texture_heaps;
+
+ INTEL_FIREVERTICES(intel);
+
+ intel->vtbl.destroy(intel);
+
+ release_texture_heaps = (intel->ctx.Shared->RefCount == 1);
+ _swsetup_DestroyContext(&intel->ctx);
+ _tnl_DestroyContext(&intel->ctx);
+ _ac_DestroyContext(&intel->ctx);
+
+ _swrast_DestroyContext(&intel->ctx);
+ intel->Fallback = 0; /* don't call _swrast_Flush later */
+
+ intel_batchbuffer_free(intel->batch);
+
+ if (intel->last_swap_fence) {
+ driFenceFinish(intel->last_swap_fence, DRM_FENCE_TYPE_EXE, GL_TRUE);
+ driFenceUnReference(intel->last_swap_fence);
+ intel->last_swap_fence = NULL;
+ }
+ if (intel->first_swap_fence) {
+ driFenceFinish(intel->first_swap_fence, DRM_FENCE_TYPE_EXE, GL_TRUE);
+ driFenceUnReference(intel->first_swap_fence);
+ intel->first_swap_fence = NULL;
+ }
+
+
+ if (release_texture_heaps) {
+ /* This share group is about to go away, free our private
+ * texture object data.
+ */
+ if (INTEL_DEBUG & DEBUG_TEXTURE)
+ fprintf(stderr, "do something to free texture heaps\n");
+ }
+
+ /* free the Mesa context */
+ _mesa_free_context_data(&intel->ctx);
+ }
+}
+
+GLboolean
+intelUnbindContext(__DRIcontextPrivate * driContextPriv)
+{
+ return GL_TRUE;
+}
+
+GLboolean
+intelMakeCurrent(__DRIcontextPrivate * driContextPriv,
+ __DRIdrawablePrivate * driDrawPriv,
+ __DRIdrawablePrivate * driReadPriv)
+{
+
+ if (driContextPriv) {
+ struct intel_context *intel =
+ (struct intel_context *) driContextPriv->driverPrivate;
+ GLframebuffer *drawFb = (GLframebuffer *) driDrawPriv->driverPrivate;
+ GLframebuffer *readFb = (GLframebuffer *) driReadPriv->driverPrivate;
+
+
+ /* XXX FBO temporary fix-ups! */
+ /* if the renderbuffers don't have regions, init them from the context */
+ {
+ struct intel_renderbuffer *irbFront
+ = intel_get_renderbuffer(drawFb, BUFFER_FRONT_LEFT);
+ struct intel_renderbuffer *irbBack
+ = intel_get_renderbuffer(drawFb, BUFFER_BACK_LEFT);
+ struct intel_renderbuffer *irbDepth
+ = intel_get_renderbuffer(drawFb, BUFFER_DEPTH);
+ struct intel_renderbuffer *irbStencil
+ = intel_get_renderbuffer(drawFb, BUFFER_STENCIL);
+
+ if (irbFront && !irbFront->region) {
+ intel_region_reference(&irbFront->region, intel->intelScreen->front_region);
+ }
+ if (irbBack && !irbBack->region) {
+ intel_region_reference(&irbBack->region, intel->intelScreen->back_region);
+ }
+ if (irbDepth && !irbDepth->region) {
+ intel_region_reference(&irbDepth->region, intel->intelScreen->depth_region);
+ }
+ if (irbStencil && !irbStencil->region) {
+ intel_region_reference(&irbStencil->region, intel->intelScreen->depth_region);
+ }
+ }
+
+ _mesa_make_current(&intel->ctx, drawFb, readFb);
+
+ /* The drawbuffer won't always be updated by _mesa_make_current:
+ */
+ if (intel->ctx.DrawBuffer == drawFb) {
+
+ if (intel->driDrawable != driDrawPriv) {
+ driDrawableInitVBlank(driDrawPriv, intel->vblank_flags, &intel->vbl_seq);
+ intel->driDrawable = driDrawPriv;
+ intelWindowMoved(intel);
+ }
+
+ intel_draw_buffer(&intel->ctx, drawFb);
+ }
+ }
+ else {
+ _mesa_make_current(NULL, NULL, NULL);
+ }
+
+ return GL_TRUE;
+}
+
+static void
+intelContendedLock(struct intel_context *intel, GLuint flags)
+{
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+ __DRIscreenPrivate *sPriv = intel->driScreen;
+ intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
+ drmI830Sarea *sarea = intel->sarea;
+
+ drmGetLock(intel->driFd, intel->hHWContext, flags);
+
+ if (INTEL_DEBUG & DEBUG_LOCK)
+ _mesa_printf("%s - got contended lock\n", __progname);
+
+ /* If the window moved, may need to set a new cliprect now.
+ *
+ * NOTE: This releases and regains the hw lock, so all state
+ * checking must be done *after* this call:
+ */
+ if (dPriv)
+ DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
+
+ if (sarea->width != intelScreen->width ||
+ sarea->height != intelScreen->height ||
+ sarea->rotation != intelScreen->current_rotation) {
+
+ intelUpdateScreenRotation(sPriv, sarea);
+
+ /*
+ * This will drop the outstanding batchbuffer on the floor
+ * FIXME: This should be done for all contexts?
+ */
+
+ intel_batchbuffer_reset(intel->batch);
+
+ /* lose all primitives */
+ intel->prim.primitive = ~0;
+ intel->prim.start_ptr = 0;
+ intel->prim.flush = 0;
+
+ /* re-emit all state */
+ intel->vtbl.lost_hardware(intel);
+
+ /* force window update */
+ intel->lastStamp = 0;
+ }
+
+
+ /* Drawable changed?
+ */
+ if (dPriv && intel->lastStamp != dPriv->lastStamp) {
+ intelWindowMoved(intel);
+ intel->lastStamp = dPriv->lastStamp;
+ }
+}
+
+
+extern _glthread_Mutex lockMutex;
+
+
+/* Lock the hardware and validate our state.
+ */
+void LOCK_HARDWARE( struct intel_context *intel )
+{
+ char __ret=0;
+
+ _glthread_LOCK_MUTEX(lockMutex);
+ assert(!intel->locked);
+
+ if (intel->swap_scheduled) {
+ drmVBlank vbl;
+ vbl.request.type = DRM_VBLANK_ABSOLUTE;
+ if ( intel->vblank_flags & VBLANK_FLAG_SECONDARY ) {
+ vbl.request.type |= DRM_VBLANK_SECONDARY;
+ }
+ vbl.request.sequence = intel->vbl_seq;
+ drmWaitVBlank(intel->driFd, &vbl);
+ intel->swap_scheduled = 0;
+ }
+
+ DRM_CAS(intel->driHwLock, intel->hHWContext,
+ (DRM_LOCK_HELD|intel->hHWContext), __ret);
+
+ if (__ret)
+ intelContendedLock( intel, 0 );
+
+ if (INTEL_DEBUG & DEBUG_LOCK)
+ _mesa_printf("%s - locked\n", __progname);
+
+ intel->locked = 1;
+}
+
+
+ /* Unlock the hardware using the global current context
+ */
+void UNLOCK_HARDWARE( struct intel_context *intel )
+{
+ intel->locked = 0;
+
+ DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
+
+ _glthread_UNLOCK_MUTEX(lockMutex);
+
+ if (INTEL_DEBUG & DEBUG_LOCK)
+ _mesa_printf("%s - unlocked\n", __progname);
+}
+
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTELCONTEXT_INC
+#define INTELCONTEXT_INC
+
+
+
+#include "mtypes.h"
+#include "drm.h"
+#include "mm.h"
+#include "texmem.h"
+
+#include "intel_screen.h"
+#include "i915_drm.h"
+#include "i830_common.h"
+#include "tnl/t_vertex.h"
+
+#define TAG(x) intel##x
+#include "tnl_dd/t_dd_vertex.h"
+#undef TAG
+
+#define DV_PF_555 (1<<8)
+#define DV_PF_565 (2<<8)
+#define DV_PF_8888 (3<<8)
+
+struct intel_region;
+struct intel_context;
+struct _DriBufferObject;
+
+typedef void (*intel_tri_func) (struct intel_context *, intelVertex *,
+ intelVertex *, intelVertex *);
+typedef void (*intel_line_func) (struct intel_context *, intelVertex *,
+ intelVertex *);
+typedef void (*intel_point_func) (struct intel_context *, intelVertex *);
+
+#define INTEL_FALLBACK_DRAW_BUFFER 0x1
+#define INTEL_FALLBACK_READ_BUFFER 0x2
+#define INTEL_FALLBACK_DEPTH_BUFFER 0x4
+#define INTEL_FALLBACK_STENCIL_BUFFER 0x8
+#define INTEL_FALLBACK_USER 0x10
+#define INTEL_FALLBACK_RENDERMODE 0x20
+
+extern void intelFallback(struct intel_context *intel, GLuint bit,
+ GLboolean mode);
+#define FALLBACK( intel, bit, mode ) intelFallback( intel, bit, mode )
+
+
+#define INTEL_WRITE_PART 0x1
+#define INTEL_WRITE_FULL 0x2
+#define INTEL_READ 0x4
+
+struct intel_texture_object
+{
+ struct gl_texture_object base; /* The "parent" object */
+
+ /* The mipmap tree must include at least these levels once
+ * validated:
+ */
+ GLuint firstLevel;
+ GLuint lastLevel;
+
+ /* Offset for firstLevel image:
+ */
+ GLuint textureOffset;
+
+ /* On validation any active images held in main memory or in other
+ * regions will be copied to this region and the old storage freed.
+ */
+ struct intel_mipmap_tree *mt;
+};
+
+
+
+struct intel_texture_image
+{
+ struct gl_texture_image base;
+
+ /* These aren't stored in gl_texture_image
+ */
+ GLuint level;
+ GLuint face;
+
+ /* If intelImage->mt != NULL, image data is stored here.
+ * Else if intelImage->base.Data != NULL, image is stored there.
+ * Else there is no image data.
+ */
+ struct intel_mipmap_tree *mt;
+};
+
+
+#define INTEL_MAX_FIXUP 64
+
+struct intel_context
+{
+ GLcontext ctx; /* the parent class */
+
+ struct
+ {
+ void (*destroy) (struct intel_context * intel);
+ void (*emit_state) (struct intel_context * intel);
+ void (*lost_hardware) (struct intel_context * intel);
+ void (*update_texture_state) (struct intel_context * intel);
+
+ void (*render_start) (struct intel_context * intel);
+ void (*set_draw_region) (struct intel_context * intel,
+ struct intel_region * draw_region,
+ struct intel_region * depth_region);
+
+ GLuint(*flush_cmd) (void);
+
+ void (*reduced_primitive_state) (struct intel_context * intel,
+ GLenum rprim);
+
+ GLboolean(*check_vertex_size) (struct intel_context * intel,
+ GLuint expected);
+
+
+ /* Metaops:
+ */
+ void (*install_meta_state) (struct intel_context * intel);
+ void (*leave_meta_state) (struct intel_context * intel);
+
+ void (*meta_draw_region) (struct intel_context * intel,
+ struct intel_region * draw_region,
+ struct intel_region * depth_region);
+
+ void (*meta_color_mask) (struct intel_context * intel, GLboolean);
+
+ void (*meta_stencil_replace) (struct intel_context * intel,
+ GLuint mask, GLuint clear);
+
+ void (*meta_depth_replace) (struct intel_context * intel);
+
+ void (*meta_texture_blend_replace) (struct intel_context * intel);
+
+ void (*meta_no_stencil_write) (struct intel_context * intel);
+ void (*meta_no_depth_write) (struct intel_context * intel);
+ void (*meta_no_texture) (struct intel_context * intel);
+
+ void (*meta_import_pixel_state) (struct intel_context * intel);
+
+ GLboolean(*meta_tex_rect_source) (struct intel_context * intel,
+ struct _DriBufferObject * buffer,
+ GLuint offset,
+ GLuint pitch,
+ GLuint height,
+ GLenum format, GLenum type);
+ void (*rotate_window) (struct intel_context * intel,
+ __DRIdrawablePrivate * dPriv, GLuint srcBuf);
+
+ void (*assert_not_dirty) (struct intel_context *intel);
+
+ } vtbl;
+
+ GLint refcount;
+ GLuint Fallback;
+ GLuint NewGLState;
+
+ struct _DriFenceObject *last_swap_fence;
+ struct _DriFenceObject *first_swap_fence;
+
+ struct intel_batchbuffer *batch;
+
+ struct
+ {
+ GLuint id;
+ GLuint primitive;
+ GLubyte *start_ptr;
+ void (*flush) (struct intel_context *);
+ } prim;
+
+ GLboolean locked;
+ char *prevLockFile;
+ int prevLockLine;
+
+ GLuint ClearColor565;
+ GLuint ClearColor8888;
+
+ /* Offsets of fields within the current vertex:
+ */
+ GLuint coloroffset;
+ GLuint specoffset;
+ GLuint wpos_offset;
+ GLuint wpos_size;
+
+ struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
+ GLuint vertex_attr_count;
+
+ GLfloat polygon_offset_scale; /* dependent on depth_scale, bpp */
+
+ GLboolean hw_stipple;
+ GLboolean strict_conformance;
+
+ /* AGP memory buffer manager:
+ */
+ struct bufmgr *bm;
+
+
+ /* State for intelvb.c and inteltris.c.
+ */
+ GLuint RenderIndex;
+ GLmatrix ViewportMatrix;
+ GLenum render_primitive;
+ GLenum reduced_primitive;
+ GLuint vertex_size;
+ GLubyte *verts; /* points to tnl->clipspace.vertex_buf */
+
+#if 0
+ struct intel_region *front_region; /* XXX FBO: obsolete */
+ struct intel_region *rotated_region; /* XXX FBO: obsolete */
+ struct intel_region *back_region; /* XXX FBO: obsolete */
+ struct intel_region *draw_region; /* XXX FBO: rename to color_region */
+ struct intel_region *depth_region; /**< currently bound depth/Z region */
+#endif
+
+ /* Fallback rasterization functions
+ */
+ intel_point_func draw_point;
+ intel_line_func draw_line;
+ intel_tri_func draw_tri;
+
+ /* These refer to the current drawing buffer:
+ */
+ int drawX, drawY; /**< origin of drawing area within region */
+ GLuint numClipRects; /**< cliprects for drawing */
+ drm_clip_rect_t *pClipRects;
+ drm_clip_rect_t fboRect; /**< cliprect for FBO rendering */
+
+ int perf_boxes;
+
+ GLuint do_usleeps;
+ int do_irqs;
+ GLuint irqsEmitted;
+ drm_i915_irq_wait_t iw;
+
+ drm_context_t hHWContext;
+ drmLock *driHwLock;
+ int driFd;
+
+ __DRIdrawablePrivate *driDrawable;
+ __DRIscreenPrivate *driScreen;
+ intelScreenPrivate *intelScreen;
+ drmI830Sarea *sarea;
+
+ GLuint lastStamp;
+
+ /**
+ * Configuration cache
+ */
+ driOptionCache optionCache;
+
+ /* VBI
+ */
+ GLuint vbl_seq;
+ GLuint vblank_flags;
+
+ int64_t swap_ust;
+ int64_t swap_missed_ust;
+
+ GLuint swap_count;
+ GLuint swap_missed_count;
+
+ GLuint swap_scheduled;
+};
+
+/* These are functions now:
+ */
+void LOCK_HARDWARE( struct intel_context *intel );
+void UNLOCK_HARDWARE( struct intel_context *intel );
+
+extern char *__progname;
+
+
+#define SUBPIXEL_X 0.125
+#define SUBPIXEL_Y 0.125
+
+#define INTEL_FIREVERTICES(intel) \
+do { \
+ if ((intel)->prim.flush) \
+ (intel)->prim.flush(intel); \
+} while (0)
+
+/* ================================================================
+ * Color packing:
+ */
+
+#define INTEL_PACKCOLOR4444(r,g,b,a) \
+ ((((a) & 0xf0) << 8) | (((r) & 0xf0) << 4) | ((g) & 0xf0) | ((b) >> 4))
+
+#define INTEL_PACKCOLOR1555(r,g,b,a) \
+ ((((r) & 0xf8) << 7) | (((g) & 0xf8) << 2) | (((b) & 0xf8) >> 3) | \
+ ((a) ? 0x8000 : 0))
+
+#define INTEL_PACKCOLOR565(r,g,b) \
+ ((((r) & 0xf8) << 8) | (((g) & 0xfc) << 3) | (((b) & 0xf8) >> 3))
+
+#define INTEL_PACKCOLOR8888(r,g,b,a) \
+ ((a<<24) | (r<<16) | (g<<8) | b)
+
+
+
+/* ================================================================
+ * From linux kernel i386 header files, copes with odd sizes better
+ * than COPY_DWORDS would:
+ * XXX Put this in src/mesa/main/imports.h ???
+ */
+#if defined(i386) || defined(__i386__)
+static INLINE void *
+__memcpy(void *to, const void *from, size_t n)
+{
+ int d0, d1, d2;
+ __asm__ __volatile__("rep ; movsl\n\t"
+ "testb $2,%b4\n\t"
+ "je 1f\n\t"
+ "movsw\n"
+ "1:\ttestb $1,%b4\n\t"
+ "je 2f\n\t"
+ "movsb\n" "2:":"=&c"(d0), "=&D"(d1), "=&S"(d2)
+ :"0"(n / 4), "q"(n), "1"((long) to), "2"((long) from)
+ :"memory");
+ return (to);
+}
+#else
+#define __memcpy(a,b,c) memcpy(a,b,c)
+#endif
+
+
+
+/* ================================================================
+ * Debugging:
+ */
+#define DO_DEBUG 1
+#if DO_DEBUG
+extern int INTEL_DEBUG;
+#else
+#define INTEL_DEBUG 0
+#endif
+
+#define DEBUG_TEXTURE 0x1
+#define DEBUG_STATE 0x2
+#define DEBUG_IOCTL 0x4
+#define DEBUG_BLIT 0x8
+#define DEBUG_MIPTREE 0x10
+#define DEBUG_FALLBACKS 0x20
+#define DEBUG_VERBOSE 0x40
+#define DEBUG_BATCH 0x80
+#define DEBUG_PIXEL 0x100
+#define DEBUG_BUFMGR 0x200
+#define DEBUG_REGION 0x400
+#define DEBUG_FBO 0x800
+#define DEBUG_LOCK 0x1000
+
+#define DBG(...) do { if (INTEL_DEBUG & FILE_DEBUG_FLAG) _mesa_printf(__VA_ARGS__); } while(0)
+
+
+#define PCI_CHIP_845_G 0x2562
+#define PCI_CHIP_I830_M 0x3577
+#define PCI_CHIP_I855_GM 0x3582
+#define PCI_CHIP_I865_G 0x2572
+#define PCI_CHIP_I915_G 0x2582
+#define PCI_CHIP_I915_GM 0x2592
+#define PCI_CHIP_I945_G 0x2772
+#define PCI_CHIP_I945_GM 0x27A2
+
+
+/* ================================================================
+ * intel_context.c:
+ */
+
+extern GLboolean intelInitContext(struct intel_context *intel,
+ const __GLcontextModes * mesaVis,
+ __DRIcontextPrivate * driContextPriv,
+ void *sharedContextPrivate,
+ struct dd_function_table *functions);
+
+extern void intelGetLock(struct intel_context *intel, GLuint flags);
+
+extern void intelInitState(GLcontext * ctx);
+extern void intelFinish(GLcontext * ctx);
+extern void intelFlush(GLcontext * ctx);
+
+extern void intelInitDriverFunctions(struct dd_function_table *functions);
+
+
+/* ================================================================
+ * intel_state.c:
+ */
+extern void intelInitStateFuncs(struct dd_function_table *functions);
+
+#define COMPAREFUNC_ALWAYS 0
+#define COMPAREFUNC_NEVER 0x1
+#define COMPAREFUNC_LESS 0x2
+#define COMPAREFUNC_EQUAL 0x3
+#define COMPAREFUNC_LEQUAL 0x4
+#define COMPAREFUNC_GREATER 0x5
+#define COMPAREFUNC_NOTEQUAL 0x6
+#define COMPAREFUNC_GEQUAL 0x7
+
+#define STENCILOP_KEEP 0
+#define STENCILOP_ZERO 0x1
+#define STENCILOP_REPLACE 0x2
+#define STENCILOP_INCRSAT 0x3
+#define STENCILOP_DECRSAT 0x4
+#define STENCILOP_INCR 0x5
+#define STENCILOP_DECR 0x6
+#define STENCILOP_INVERT 0x7
+
+#define LOGICOP_CLEAR 0
+#define LOGICOP_NOR 0x1
+#define LOGICOP_AND_INV 0x2
+#define LOGICOP_COPY_INV 0x3
+#define LOGICOP_AND_RVRSE 0x4
+#define LOGICOP_INV 0x5
+#define LOGICOP_XOR 0x6
+#define LOGICOP_NAND 0x7
+#define LOGICOP_AND 0x8
+#define LOGICOP_EQUIV 0x9
+#define LOGICOP_NOOP 0xa
+#define LOGICOP_OR_INV 0xb
+#define LOGICOP_COPY 0xc
+#define LOGICOP_OR_RVRSE 0xd
+#define LOGICOP_OR 0xe
+#define LOGICOP_SET 0xf
+
+#define BLENDFACT_ZERO 0x01
+#define BLENDFACT_ONE 0x02
+#define BLENDFACT_SRC_COLR 0x03
+#define BLENDFACT_INV_SRC_COLR 0x04
+#define BLENDFACT_SRC_ALPHA 0x05
+#define BLENDFACT_INV_SRC_ALPHA 0x06
+#define BLENDFACT_DST_ALPHA 0x07
+#define BLENDFACT_INV_DST_ALPHA 0x08
+#define BLENDFACT_DST_COLR 0x09
+#define BLENDFACT_INV_DST_COLR 0x0a
+#define BLENDFACT_SRC_ALPHA_SATURATE 0x0b
+#define BLENDFACT_CONST_COLOR 0x0c
+#define BLENDFACT_INV_CONST_COLOR 0x0d
+#define BLENDFACT_CONST_ALPHA 0x0e
+#define BLENDFACT_INV_CONST_ALPHA 0x0f
+#define BLENDFACT_MASK 0x0f
+
+#define MI_BATCH_BUFFER_END (0xA<<23)
+
+
+extern int intel_translate_compare_func(GLenum func);
+extern int intel_translate_stencil_op(GLenum op);
+extern int intel_translate_blend_factor(GLenum factor);
+extern int intel_translate_logic_op(GLenum opcode);
+
+
+/*======================================================================
+ * Inline conversion functions.
+ * These are better-typed than the macros used previously:
+ */
+static INLINE struct intel_context *
+intel_context(GLcontext * ctx)
+{
+ return (struct intel_context *) ctx;
+}
+
+static INLINE struct intel_texture_object *
+intel_texture_object(struct gl_texture_object *obj)
+{
+ return (struct intel_texture_object *) obj;
+}
+
+static INLINE struct intel_texture_image *
+intel_texture_image(struct gl_texture_image *img)
+{
+ return (struct intel_texture_image *) img;
+}
+
+extern struct intel_renderbuffer *intel_renderbuffer(struct gl_renderbuffer
+ *rb);
+
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "imports.h"
+#include "context.h"
+#include "depthstencil.h"
+#include "fbobject.h"
+#include "framebuffer.h"
+#include "hash.h"
+#include "mtypes.h"
+#include "renderbuffer.h"
+
+#include "intel_context.h"
+#include "intel_fbo.h"
+#include "intel_depthstencil.h"
+#include "intel_regions.h"
+
+
+/**
+ * The GL_EXT_framebuffer_object allows the user to create their own
+ * framebuffer objects consisting of color renderbuffers (0 or more),
+ * depth renderbuffers (0 or 1) and stencil renderbuffers (0 or 1).
+ *
+ * The spec considers depth and stencil renderbuffers to be totally independent
+ * buffers. In reality, most graphics hardware today uses a combined
+ * depth+stencil buffer (one 32-bit pixel = 24 bits of Z + 8 bits of stencil).
+ *
+ * This causes difficulty because the user may create some number of depth
+ * renderbuffers and some number of stencil renderbuffers and bind them
+ * together in framebuffers in any combination.
+ *
+ * This code manages all that.
+ *
+ * 1. Depth renderbuffers are always allocated in hardware as 32bpp
+ * GL_DEPTH24_STENCIL8 buffers.
+ *
+ * 2. Stencil renderbuffers are initially allocated in software as 8bpp
+ * GL_STENCIL_INDEX8 buffers.
+ *
+ * 3. Depth and Stencil renderbuffers use the PairedStencil and PairedDepth
+ * fields (respectively) to indicate if the buffer's currently paired
+ * with another stencil or depth buffer (respectively).
+ *
+ * 4. When a depth and stencil buffer are initially both attached to the
+ * current framebuffer, we merge the stencil buffer values into the
+ * depth buffer (really a depth+stencil buffer). The then hardware uses
+ * the combined buffer.
+ *
+ * 5. Whenever a depth or stencil buffer is reallocated (with
+ * glRenderbufferStorage) we undo the pairing and copy the stencil values
+ * from the combined depth/stencil buffer back to the stencil-only buffer.
+ *
+ * 6. We also undo the pairing when we find a change in buffer bindings.
+ *
+ * 7. If a framebuffer is only using a depth renderbuffer (no stencil), we
+ * just use the combined depth/stencil buffer and ignore the stencil values.
+ *
+ * 8. If a framebuffer is only using a stencil renderbuffer (no depth) we have
+ * to promote the 8bpp software stencil buffer to a 32bpp hardware
+ * depth+stencil buffer.
+ *
+ */
+
+
+
+static void
+map_regions(GLcontext * ctx,
+ struct intel_renderbuffer *depthRb,
+ struct intel_renderbuffer *stencilRb)
+{
+ struct intel_context *intel = intel_context(ctx);
+ if (depthRb && depthRb->region) {
+ intel_region_map(intel->intelScreen, depthRb->region);
+ depthRb->pfMap = depthRb->region->map;
+ depthRb->pfPitch = depthRb->region->pitch;
+ }
+ if (stencilRb && stencilRb->region) {
+ intel_region_map(intel->intelScreen, stencilRb->region);
+ stencilRb->pfMap = stencilRb->region->map;
+ stencilRb->pfPitch = stencilRb->region->pitch;
+ }
+}
+
+static void
+unmap_regions(GLcontext * ctx,
+ struct intel_renderbuffer *depthRb,
+ struct intel_renderbuffer *stencilRb)
+{
+ struct intel_context *intel = intel_context(ctx);
+ if (depthRb && depthRb->region) {
+ intel_region_unmap(intel->intelScreen, depthRb->region);
+ depthRb->pfMap = NULL;
+ depthRb->pfPitch = 0;
+ }
+ if (stencilRb && stencilRb->region) {
+ intel_region_unmap(intel->intelScreen, stencilRb->region);
+ stencilRb->pfMap = NULL;
+ stencilRb->pfPitch = 0;
+ }
+}
+
+
+
+/**
+ * Undo the pairing/interleaving between depth and stencil buffers.
+ * irb should be a depth/stencil or stencil renderbuffer.
+ */
+void
+intel_unpair_depth_stencil(GLcontext * ctx, struct intel_renderbuffer *irb)
+{
+ if (irb->PairedStencil) {
+ /* irb is a depth/stencil buffer */
+ struct gl_renderbuffer *stencilRb;
+ struct intel_renderbuffer *stencilIrb;
+
+ ASSERT(irb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+
+ stencilRb = _mesa_lookup_renderbuffer(ctx, irb->PairedStencil);
+ stencilIrb = intel_renderbuffer(stencilRb);
+ if (stencilIrb) {
+ /* need to extract stencil values from the depth buffer */
+ ASSERT(stencilIrb->PairedDepth == irb->Base.Name);
+ map_regions(ctx, irb, stencilIrb);
+ _mesa_extract_stencil(ctx, &irb->Base, &stencilIrb->Base);
+ unmap_regions(ctx, irb, stencilIrb);
+ stencilIrb->PairedDepth = 0;
+ }
+ irb->PairedStencil = 0;
+ }
+ else if (irb->PairedDepth) {
+ /* irb is a stencil buffer */
+ struct gl_renderbuffer *depthRb;
+ struct intel_renderbuffer *depthIrb;
+
+ ASSERT(irb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT ||
+ irb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+
+ depthRb = _mesa_lookup_renderbuffer(ctx, irb->PairedDepth);
+ depthIrb = intel_renderbuffer(depthRb);
+ if (depthIrb) {
+ /* need to extract stencil values from the depth buffer */
+ ASSERT(depthIrb->PairedStencil == irb->Base.Name);
+ map_regions(ctx, depthIrb, irb);
+ _mesa_extract_stencil(ctx, &depthIrb->Base, &irb->Base);
+ unmap_regions(ctx, depthIrb, irb);
+ depthIrb->PairedStencil = 0;
+ }
+ irb->PairedDepth = 0;
+ }
+ else {
+ _mesa_problem(ctx, "Problem in undo_depth_stencil_pairing");
+ }
+
+ ASSERT(irb->PairedStencil == 0);
+ ASSERT(irb->PairedDepth == 0);
+}
+
+
+/**
+ * Examine the depth and stencil renderbuffers which are attached to the
+ * framebuffer. If both depth and stencil are attached, make sure that the
+ * renderbuffers are 'paired' (combined). If only depth or only stencil is
+ * attached, undo any previous pairing.
+ *
+ * Must be called if NewState & _NEW_BUFFER (when renderbuffer attachments
+ * change, for example).
+ */
+void
+intel_validate_paired_depth_stencil(GLcontext * ctx,
+ struct gl_framebuffer *fb)
+{
+ struct intel_renderbuffer *depthRb, *stencilRb;
+
+ depthRb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
+ stencilRb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
+
+ if (depthRb && stencilRb) {
+ if (depthRb == stencilRb) {
+ /* Using a user-created combined depth/stencil buffer.
+ * Nothing to do.
+ */
+ ASSERT(depthRb->Base._BaseFormat == GL_DEPTH_STENCIL_EXT);
+ ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+ }
+ else {
+ /* Separate depth/stencil buffers, need to interleave now */
+ ASSERT(depthRb->Base._BaseFormat == GL_DEPTH_COMPONENT);
+ ASSERT(stencilRb->Base._BaseFormat == GL_STENCIL_INDEX);
+ /* may need to interleave depth/stencil now */
+ if (depthRb->PairedStencil == stencilRb->Base.Name) {
+ /* OK, the depth and stencil buffers are already interleaved */
+ ASSERT(stencilRb->PairedDepth == depthRb->Base.Name);
+ }
+ else {
+ /* need to setup new pairing/interleaving */
+ if (depthRb->PairedStencil) {
+ intel_unpair_depth_stencil(ctx, depthRb);
+ }
+ if (stencilRb->PairedDepth) {
+ intel_unpair_depth_stencil(ctx, stencilRb);
+ }
+
+ ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+ ASSERT(stencilRb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT ||
+ stencilRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+
+ /* establish new pairing: interleave stencil into depth buffer */
+ map_regions(ctx, depthRb, stencilRb);
+ _mesa_insert_stencil(ctx, &depthRb->Base, &stencilRb->Base);
+ unmap_regions(ctx, depthRb, stencilRb);
+ depthRb->PairedStencil = stencilRb->Base.Name;
+ stencilRb->PairedDepth = depthRb->Base.Name;
+ }
+
+ }
+ }
+ else if (depthRb) {
+ /* Depth buffer but no stencil buffer.
+ * We'll use a GL_DEPTH24_STENCIL8 buffer and ignore the stencil bits.
+ */
+ /* can't assert this until storage is allocated:
+ ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+ */
+ /* intel_undo any previous pairing */
+ if (depthRb->PairedStencil) {
+ intel_unpair_depth_stencil(ctx, depthRb);
+ }
+ }
+ else if (stencilRb) {
+ /* Stencil buffer but no depth buffer.
+ * Since h/w doesn't typically support just 8bpp stencil w/out Z,
+ * we'll use a GL_DEPTH24_STENCIL8 buffer and ignore the depth bits.
+ */
+ /* undo any previous pairing */
+ if (stencilRb->PairedDepth) {
+ intel_unpair_depth_stencil(ctx, stencilRb);
+ }
+ if (stencilRb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT) {
+ /* promote buffer to GL_DEPTH24_STENCIL8 for hw rendering */
+ _mesa_promote_stencil(ctx, &stencilRb->Base);
+ ASSERT(stencilRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+ }
+ }
+
+ /* Finally, update the fb->_DepthBuffer and fb->_StencilBuffer fields */
+ _mesa_update_depth_buffer(ctx, fb, BUFFER_DEPTH);
+ if (depthRb && depthRb->PairedStencil)
+ _mesa_update_stencil_buffer(ctx, fb, BUFFER_DEPTH);
+ else
+ _mesa_update_stencil_buffer(ctx, fb, BUFFER_STENCIL);
+
+
+ /* The hardware should use fb->Attachment[BUFFER_DEPTH].Renderbuffer
+ * first, if present, then fb->Attachment[BUFFER_STENCIL].Renderbuffer
+ * if present.
+ */
+}
--- /dev/null
+
+#ifndef INTEL_DEPTH_STENCIL_H
+#define INTEL_DEPTH_STENCIL_H
+
+
+extern void
+intel_unpair_depth_stencil(GLcontext * ctx, struct intel_renderbuffer *irb);
+
+extern void
+intel_validate_paired_depth_stencil(GLcontext * ctx,
+ struct gl_framebuffer *fb);
+
+
+#endif /* INTEL_DEPTH_STENCIL_H */
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "imports.h"
+#include "mtypes.h"
+#include "fbobject.h"
+#include "framebuffer.h"
+#include "renderbuffer.h"
+#include "context.h"
+#include "texformat.h"
+#include "texrender.h"
+
+#include "intel_context.h"
+#include "intel_buffers.h"
+#include "intel_depthstencil.h"
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "intel_span.h"
+
+
+#define FILE_DEBUG_FLAG DEBUG_FBO
+
+#define INTEL_RB_CLASS 0x12345678
+
+
+/* XXX FBO: move this to intel_context.h (inlined) */
+/**
+ * Return a gl_renderbuffer ptr casted to intel_renderbuffer.
+ * NULL will be returned if the rb isn't really an intel_renderbuffer.
+ * This is determiend by checking the ClassID.
+ */
+struct intel_renderbuffer *
+intel_renderbuffer(struct gl_renderbuffer *rb)
+{
+ struct intel_renderbuffer *irb = (struct intel_renderbuffer *) rb;
+ if (irb && irb->Base.ClassID == INTEL_RB_CLASS) {
+ /*_mesa_warning(NULL, "Returning non-intel Rb\n");*/
+ return irb;
+ }
+ else
+ return NULL;
+}
+
+
+struct intel_renderbuffer *
+intel_get_renderbuffer(struct gl_framebuffer *fb, GLuint attIndex)
+{
+ return intel_renderbuffer(fb->Attachment[attIndex].Renderbuffer);
+}
+
+
+struct intel_region *
+intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
+{
+ struct intel_renderbuffer *irb
+ = intel_renderbuffer(fb->Attachment[attIndex].Renderbuffer);
+ if (irb)
+ return irb->region;
+ else
+ return NULL;
+}
+
+
+
+/**
+ * Create a new framebuffer object.
+ */
+static struct gl_framebuffer *
+intel_new_framebuffer(GLcontext * ctx, GLuint name)
+{
+ /* there's no intel_framebuffer at this time, just use Mesa's class */
+ return _mesa_new_framebuffer(ctx, name);
+}
+
+
+static void
+intel_delete_renderbuffer(struct gl_renderbuffer *rb)
+{
+ GET_CURRENT_CONTEXT(ctx);
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+
+ ASSERT(irb);
+
+ if (irb->PairedStencil || irb->PairedDepth) {
+ intel_unpair_depth_stencil(ctx, irb);
+ }
+
+ if (intel && irb->region) {
+ intel_region_release(&irb->region);
+ }
+
+ _mesa_free(irb);
+}
+
+
+
+/**
+ * Return a pointer to a specific pixel in a renderbuffer.
+ */
+static void *
+intel_get_pointer(GLcontext * ctx, struct gl_renderbuffer *rb,
+ GLint x, GLint y)
+{
+ /* By returning NULL we force all software rendering to go through
+ * the span routines.
+ */
+ return NULL;
+}
+
+
+
+/**
+ * Called via glRenderbufferStorageEXT() to set the format and allocate
+ * storage for a user-created renderbuffer.
+ */
+static GLboolean
+intel_alloc_renderbuffer_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat,
+ GLuint width, GLuint height)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+ GLboolean softwareBuffer = GL_FALSE;
+ int cpp;
+
+ ASSERT(rb->Name != 0);
+
+ switch (internalFormat) {
+ case GL_R3_G3_B2:
+ case GL_RGB4:
+ case GL_RGB5:
+ rb->_ActualFormat = GL_RGB5;
+ rb->DataType = GL_UNSIGNED_BYTE;
+ rb->RedBits = 5;
+ rb->GreenBits = 6;
+ rb->BlueBits = 5;
+ cpp = 2;
+ break;
+ case GL_RGB:
+ case GL_RGB8:
+ case GL_RGB10:
+ case GL_RGB12:
+ case GL_RGB16:
+ case GL_RGBA:
+ case GL_RGBA2:
+ case GL_RGBA4:
+ case GL_RGB5_A1:
+ case GL_RGBA8:
+ case GL_RGB10_A2:
+ case GL_RGBA12:
+ case GL_RGBA16:
+ rb->_ActualFormat = GL_RGBA8;
+ rb->DataType = GL_UNSIGNED_BYTE;
+ rb->RedBits = 8;
+ rb->GreenBits = 8;
+ rb->BlueBits = 8;
+ rb->AlphaBits = 8;
+ cpp = 4;
+ break;
+ case GL_STENCIL_INDEX:
+ case GL_STENCIL_INDEX1_EXT:
+ case GL_STENCIL_INDEX4_EXT:
+ case GL_STENCIL_INDEX8_EXT:
+ case GL_STENCIL_INDEX16_EXT:
+ /* alloc a depth+stencil buffer */
+ rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
+ rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
+ rb->StencilBits = 8;
+ cpp = 4;
+ break;
+ case GL_DEPTH_COMPONENT16:
+ rb->_ActualFormat = GL_DEPTH_COMPONENT16;
+ rb->DataType = GL_UNSIGNED_SHORT;
+ rb->DepthBits = 16;
+ cpp = 2;
+ break;
+ case GL_DEPTH_COMPONENT:
+ case GL_DEPTH_COMPONENT24:
+ case GL_DEPTH_COMPONENT32:
+ rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
+ rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
+ rb->DepthBits = 24;
+ cpp = 4;
+ break;
+ case GL_DEPTH_STENCIL_EXT:
+ case GL_DEPTH24_STENCIL8_EXT:
+ rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
+ rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
+ rb->DepthBits = 24;
+ rb->StencilBits = 8;
+ cpp = 4;
+ break;
+ default:
+ _mesa_problem(ctx,
+ "Unexpected format in intel_alloc_renderbuffer_storage");
+ return GL_FALSE;
+ }
+
+ intelFlush(ctx);
+
+ /* free old region */
+ if (irb->region) {
+ intel_region_release(&irb->region);
+ }
+
+ /* allocate new memory region/renderbuffer */
+ if (softwareBuffer) {
+ return _mesa_soft_renderbuffer_storage(ctx, rb, internalFormat,
+ width, height);
+ }
+ else {
+ /* Choose a pitch to match hardware requirements:
+ */
+ GLuint pitch = ((cpp * width + 63) & ~63) / cpp;
+
+ /* alloc hardware renderbuffer */
+ DBG("Allocating %d x %d Intel RBO (pitch %d)\n", width,
+ height, pitch);
+
+ irb->region = intel_region_alloc(intel->intelScreen, cpp, pitch, height);
+ if (!irb->region)
+ return GL_FALSE; /* out of memory? */
+
+ ASSERT(irb->region->buffer);
+
+ rb->Width = width;
+ rb->Height = height;
+
+ /* This sets the Get/PutRow/Value functions */
+ intel_set_span_functions(&irb->Base);
+
+ return GL_TRUE;
+ }
+}
+
+
+
+/**
+ * Called for each hardware renderbuffer when a _window_ is resized.
+ * Just update fields.
+ * Not used for user-created renderbuffers!
+ */
+static GLboolean
+intel_alloc_window_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat, GLuint width, GLuint height)
+{
+ ASSERT(rb->Name == 0);
+ rb->Width = width;
+ rb->Height = height;
+ rb->_ActualFormat = internalFormat;
+ return GL_TRUE;
+}
+
+
+static GLboolean
+intel_nop_alloc_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat, GLuint width, GLuint height)
+{
+ _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
+ return GL_FALSE;
+}
+
+
+
+/**
+ * Create a new intel_renderbuffer which corresponds to an on-screen window,
+ * not a user-created renderbuffer.
+ * \param width the screen width
+ * \param height the screen height
+ */
+struct intel_renderbuffer *
+intel_create_renderbuffer(GLenum intFormat, GLsizei width, GLsizei height,
+ int offset, int pitch, int cpp, void *map)
+{
+ GET_CURRENT_CONTEXT(ctx);
+
+ struct intel_renderbuffer *irb;
+ const GLuint name = 0;
+
+ irb = CALLOC_STRUCT(intel_renderbuffer);
+ if (!irb) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
+ return NULL;
+ }
+
+ _mesa_init_renderbuffer(&irb->Base, name);
+ irb->Base.ClassID = INTEL_RB_CLASS;
+
+ switch (intFormat) {
+ case GL_RGB5:
+ irb->Base._ActualFormat = GL_RGB5;
+ irb->Base._BaseFormat = GL_RGBA;
+ irb->Base.RedBits = 5;
+ irb->Base.GreenBits = 6;
+ irb->Base.BlueBits = 5;
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ cpp = 2;
+ break;
+ case GL_RGBA8:
+ irb->Base._ActualFormat = GL_RGBA8;
+ irb->Base._BaseFormat = GL_RGBA;
+ irb->Base.RedBits = 8;
+ irb->Base.GreenBits = 8;
+ irb->Base.BlueBits = 8;
+ irb->Base.AlphaBits = 8;
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ cpp = 4;
+ break;
+ case GL_STENCIL_INDEX8_EXT:
+ irb->Base._ActualFormat = GL_STENCIL_INDEX8_EXT;
+ irb->Base._BaseFormat = GL_STENCIL_INDEX;
+ irb->Base.StencilBits = 8;
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ cpp = 1;
+ break;
+ case GL_DEPTH_COMPONENT16:
+ irb->Base._ActualFormat = GL_DEPTH_COMPONENT16;
+ irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
+ irb->Base.DepthBits = 16;
+ irb->Base.DataType = GL_UNSIGNED_SHORT;
+ cpp = 2;
+ break;
+ case GL_DEPTH_COMPONENT24:
+ irb->Base._ActualFormat = GL_DEPTH24_STENCIL8_EXT;
+ irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
+ irb->Base.DepthBits = 24;
+ irb->Base.DataType = GL_UNSIGNED_INT;
+ cpp = 4;
+ break;
+ case GL_DEPTH24_STENCIL8_EXT:
+ irb->Base._ActualFormat = GL_DEPTH24_STENCIL8_EXT;
+ irb->Base._BaseFormat = GL_DEPTH_STENCIL_EXT;
+ irb->Base.DepthBits = 24;
+ irb->Base.StencilBits = 8;
+ irb->Base.DataType = GL_UNSIGNED_INT_24_8_EXT;
+ cpp = 4;
+ break;
+ default:
+ _mesa_problem(NULL,
+ "Unexpected intFormat in intel_create_renderbuffer");
+ return NULL;
+ }
+
+ irb->Base.InternalFormat = intFormat;
+
+ /* intel-specific methods */
+ irb->Base.Delete = intel_delete_renderbuffer;
+ irb->Base.AllocStorage = intel_alloc_window_storage;
+ irb->Base.GetPointer = intel_get_pointer;
+ /* This sets the Get/PutRow/Value functions */
+ intel_set_span_functions(&irb->Base);
+
+ irb->pfMap = map;
+ irb->pfPitch = pitch / cpp; /* in pixels */
+
+#if 00
+ irb->region = intel_region_create_static(intel,
+ DRM_MM_TT,
+ offset, map, cpp, width, height);
+#endif
+
+ return irb;
+}
+
+
+/**
+ * Create a new renderbuffer object.
+ * Typically called via glBindRenderbufferEXT().
+ */
+static struct gl_renderbuffer *
+intel_new_renderbuffer(GLcontext * ctx, GLuint name)
+{
+ /*struct intel_context *intel = intel_context(ctx); */
+ struct intel_renderbuffer *irb;
+
+ irb = CALLOC_STRUCT(intel_renderbuffer);
+ if (!irb) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
+ return NULL;
+ }
+
+ _mesa_init_renderbuffer(&irb->Base, name);
+ irb->Base.ClassID = INTEL_RB_CLASS;
+
+ /* intel-specific methods */
+ irb->Base.Delete = intel_delete_renderbuffer;
+ irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
+ irb->Base.GetPointer = intel_get_pointer;
+ /* span routines set in alloc_storage function */
+
+ return &irb->Base;
+}
+
+
+/**
+ * Called via glBindFramebufferEXT().
+ */
+static void
+intel_bind_framebuffer(GLcontext * ctx, GLenum target,
+ struct gl_framebuffer *fb)
+{
+ if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
+ intel_draw_buffer(ctx, fb);
+ /* Integer depth range depends on depth buffer bits */
+ ctx->Driver.DepthRange(ctx, ctx->Viewport.Near, ctx->Viewport.Far);
+ }
+ else {
+ /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
+ }
+}
+
+
+/**
+ * Called via glFramebufferRenderbufferEXT().
+ */
+static void
+intel_framebuffer_renderbuffer(GLcontext * ctx,
+ struct gl_framebuffer *fb,
+ GLenum attachment, struct gl_renderbuffer *rb)
+{
+ DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
+
+ intelFlush(ctx);
+
+ _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
+ intel_draw_buffer(ctx, fb);
+}
+
+
+/**
+ * When glFramebufferTexture[123]D is called this function sets up the
+ * gl_renderbuffer wrapp around the texture image.
+ * This will have the region info needed for hardware rendering.
+ */
+static struct intel_renderbuffer *
+intel_wrap_texture(GLcontext * ctx, struct gl_texture_image *texImage)
+{
+ const GLuint name = ~0; /* not significant, but distinct for debugging */
+ struct intel_renderbuffer *irb;
+
+ /* make an intel_renderbuffer to wrap the texture image */
+ irb = CALLOC_STRUCT(intel_renderbuffer);
+ if (!irb) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
+ return NULL;
+ }
+
+ _mesa_init_renderbuffer(&irb->Base, name);
+ irb->Base.ClassID = INTEL_RB_CLASS;
+
+ if (texImage->TexFormat == &_mesa_texformat_argb8888) {
+ irb->Base._ActualFormat = GL_RGBA8;
+ irb->Base._BaseFormat = GL_RGBA;
+ DBG("Render to RGBA8 texture OK\n");
+ }
+ else if (texImage->TexFormat == &_mesa_texformat_rgb565) {
+ irb->Base._ActualFormat = GL_RGB5;
+ irb->Base._BaseFormat = GL_RGB;
+ DBG("Render to RGB5 texture OK\n");
+ }
+ else if (texImage->TexFormat == &_mesa_texformat_z16) {
+ irb->Base._ActualFormat = GL_DEPTH_COMPONENT16;
+ irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
+ DBG("Render to DEPTH16 texture OK\n");
+ }
+ else {
+ DBG("Render to texture BAD FORMAT %d\n",
+ texImage->TexFormat->MesaFormat);
+ _mesa_free(irb);
+ return NULL;
+ }
+
+ irb->Base.InternalFormat = irb->Base._ActualFormat;
+ irb->Base.Width = texImage->Width;
+ irb->Base.Height = texImage->Height;
+ irb->Base.DataType = GL_UNSIGNED_BYTE; /* FBO XXX fix */
+ irb->Base.RedBits = texImage->TexFormat->RedBits;
+ irb->Base.GreenBits = texImage->TexFormat->GreenBits;
+ irb->Base.BlueBits = texImage->TexFormat->BlueBits;
+ irb->Base.AlphaBits = texImage->TexFormat->AlphaBits;
+ irb->Base.DepthBits = texImage->TexFormat->DepthBits;
+
+ irb->Base.Delete = intel_delete_renderbuffer;
+ irb->Base.AllocStorage = intel_nop_alloc_storage;
+ intel_set_span_functions(&irb->Base);
+
+ irb->RenderToTexture = GL_TRUE;
+
+ return irb;
+}
+
+
+/**
+ * Called by glFramebufferTexture[123]DEXT() (and other places) to
+ * prepare for rendering into texture memory. This might be called
+ * many times to choose different texture levels, cube faces, etc
+ * before intel_finish_render_texture() is ever called.
+ */
+static void
+intel_render_texture(GLcontext * ctx,
+ struct gl_framebuffer *fb,
+ struct gl_renderbuffer_attachment *att)
+{
+ struct gl_texture_image *newImage
+ = att->Texture->Image[att->CubeMapFace][att->TextureLevel];
+ struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
+ struct intel_texture_image *intel_image;
+ GLuint imageOffset;
+
+ (void) fb;
+
+ ASSERT(newImage);
+
+ if (!irb) {
+ irb = intel_wrap_texture(ctx, newImage);
+ if (irb) {
+ /* bind the wrapper to the attachment point */
+ att->Renderbuffer = &irb->Base;
+ }
+ else {
+ /* fallback to software rendering */
+ _mesa_render_texture(ctx, fb, att);
+ return;
+ }
+ }
+
+ DBG("Begin render texture tid %x tex=%u w=%d h=%d refcount=%d\n",
+ _glthread_GetID(),
+ att->Texture->Name, newImage->Width, newImage->Height,
+ irb->Base.RefCount);
+
+ /* point the renderbufer's region to the texture image region */
+ intel_image = intel_texture_image(newImage);
+ if (irb->region != intel_image->mt->region) {
+ if (irb->region)
+ intel_region_release(&irb->region);
+ intel_region_reference(&irb->region, intel_image->mt->region);
+ }
+
+ /* compute offset of the particular 2D image within the texture region */
+ imageOffset = intel_miptree_image_offset(intel_image->mt,
+ att->CubeMapFace,
+ att->TextureLevel);
+
+ if (att->Texture->Target == GL_TEXTURE_3D) {
+ const GLuint *offsets = intel_miptree_depth_offsets(intel_image->mt,
+ att->TextureLevel);
+ imageOffset += offsets[att->Zoffset];
+ }
+
+ /* store that offset in the region */
+ intel_image->mt->region->draw_offset = imageOffset;
+
+ /* update drawing region, etc */
+ intel_draw_buffer(ctx, fb);
+}
+
+
+/**
+ * Called by Mesa when rendering to a texture is done.
+ */
+static void
+intel_finish_render_texture(GLcontext * ctx,
+ struct gl_renderbuffer_attachment *att)
+{
+ struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
+
+ DBG("End render texture (tid %x) tex %u\n", _glthread_GetID(), att->Texture->Name);
+
+ if (irb) {
+ /* just release the region */
+ intel_region_release(&irb->region);
+ }
+ else if (att->Renderbuffer) {
+ /* software fallback */
+ _mesa_finish_render_texture(ctx, att);
+ /* XXX FBO: Need to unmap the buffer (or in intelSpanRenderStart???) */
+ }
+}
+
+
+/**
+ * Do one-time context initializations related to GL_EXT_framebuffer_object.
+ * Hook in device driver functions.
+ */
+void
+intel_fbo_init(struct intel_context *intel)
+{
+ intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
+ intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
+ intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
+ intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
+ intel->ctx.Driver.RenderTexture = intel_render_texture;
+ intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_FBO_H
+#define INTEL_FBO_H
+
+
+struct intel_context;
+struct intel_region;
+
+
+/**
+ * Intel renderbuffer, derived from gl_renderbuffer.
+ * Note: The PairedDepth and PairedStencil fields use renderbuffer IDs,
+ * not pointers because in some circumstances a deleted renderbuffer could
+ * result in a dangling pointer here.
+ */
+struct intel_renderbuffer
+{
+ struct gl_renderbuffer Base;
+ struct intel_region *region;
+ void *pfMap; /* possibly paged flipped map pointer */
+ GLuint pfPitch; /* possibly paged flipped pitch */
+ GLboolean RenderToTexture; /* RTT? */
+
+ GLuint PairedDepth; /**< only used if this is a depth renderbuffer */
+ GLuint PairedStencil; /**< only used if this is a stencil renderbuffer */
+};
+
+
+extern struct intel_renderbuffer *intel_create_renderbuffer(GLenum intFormat,
+ GLsizei width,
+ GLsizei height,
+ int offset,
+ int pitch,
+ int cpp,
+ void *map);
+
+
+extern void intel_fbo_init(struct intel_context *intel);
+
+
+/* XXX make inline or macro */
+extern struct intel_renderbuffer *intel_get_renderbuffer(struct gl_framebuffer
+ *fb,
+ GLuint attIndex);
+
+
+/* XXX make inline or macro */
+extern struct intel_region *intel_get_rb_region(struct gl_framebuffer *fb,
+ GLuint attIndex);
+
+
+
+
+#endif /* INTEL_FBO_H */
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sched.h>
+
+#include "mtypes.h"
+#include "context.h"
+#include "swrast/swrast.h"
+
+#include "intel_context.h"
+#include "intel_ioctl.h"
+#include "intel_batchbuffer.h"
+#include "intel_blit.h"
+#include "intel_regions.h"
+#include "drm.h"
+
+#define FILE_DEBUG_FLAG DEBUG_IOCTL
+
+int
+intelEmitIrqLocked(struct intel_context *intel)
+{
+ drmI830IrqEmit ie;
+ int ret, seq;
+
+ assert(((*(int *) intel->driHwLock) & ~DRM_LOCK_CONT) ==
+ (DRM_LOCK_HELD | intel->hHWContext));
+
+ ie.irq_seq = &seq;
+
+ ret = drmCommandWriteRead(intel->driFd, DRM_I830_IRQ_EMIT,
+ &ie, sizeof(ie));
+ if (ret) {
+ fprintf(stderr, "%s: drmI830IrqEmit: %d\n", __FUNCTION__, ret);
+ exit(1);
+ }
+
+ DBG("%s --> %d\n", __FUNCTION__, seq);
+
+ return seq;
+}
+
+void
+intelWaitIrq(struct intel_context *intel, int seq)
+{
+ int ret;
+
+ DBG("%s %d\n", __FUNCTION__, seq);
+
+ intel->iw.irq_seq = seq;
+
+ do {
+ ret =
+ drmCommandWrite(intel->driFd, DRM_I830_IRQ_WAIT, &intel->iw,
+ sizeof(intel->iw));
+ } while (ret == -EAGAIN || ret == -EINTR);
+
+ if (ret) {
+ fprintf(stderr, "%s: drmI830IrqWait: %d\n", __FUNCTION__, ret);
+ exit(1);
+ }
+}
+
+
+void
+intel_batch_ioctl(struct intel_context *intel,
+ GLuint start_offset,
+ GLuint used,
+ GLboolean ignore_cliprects, GLboolean allow_unlock)
+{
+ drmI830BatchBuffer batch;
+
+ assert(intel->locked);
+ assert(used);
+
+ DBG("%s used %d offset %x..%x ignore_cliprects %d\n",
+ __FUNCTION__,
+ used, start_offset, start_offset + used, ignore_cliprects);
+
+ /* Throw away non-effective packets. Won't work once we have
+ * hardware contexts which would preserve statechanges beyond a
+ * single buffer.
+ */
+
+
+
+ batch.start = start_offset;
+ batch.used = used;
+ batch.cliprects = intel->pClipRects;
+ batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
+ batch.DR1 = 0;
+ batch.DR4 = ((((GLuint) intel->drawX) & 0xffff) |
+ (((GLuint) intel->drawY) << 16));
+
+ DBG("%s: 0x%x..0x%x DR4: %x cliprects: %d\n",
+ __FUNCTION__,
+ batch.start,
+ batch.start + batch.used * 4, batch.DR4, batch.num_cliprects);
+
+ if (drmCommandWrite(intel->driFd, DRM_I830_BATCHBUFFER, &batch,
+ sizeof(batch))) {
+ fprintf(stderr, "DRM_I830_BATCHBUFFER: %d\n", -errno);
+ UNLOCK_HARDWARE(intel);
+ exit(1);
+ }
+
+ /* FIXME: use hardware contexts to avoid 'losing' hardware after
+ * each buffer flush.
+ */
+ intel->vtbl.lost_hardware(intel);
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_IOCTL_H
+#define INTEL_IOCTL_H
+
+#include "intel_context.h"
+
+void intelWaitIrq(struct intel_context *intel, int seq);
+int intelEmitIrqLocked(struct intel_context *intel);
+
+void intel_batch_ioctl(struct intel_context *intel,
+ GLuint start_offset,
+ GLuint used,
+ GLboolean ignore_cliprects, GLboolean allow_unlock);
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "enums.h"
+
+#define FILE_DEBUG_FLAG DEBUG_MIPTREE
+
+static GLenum
+target_to_target(GLenum target)
+{
+ switch (target) {
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
+ return GL_TEXTURE_CUBE_MAP_ARB;
+ default:
+ return target;
+ }
+}
+
+struct intel_mipmap_tree *
+intel_miptree_create(struct intel_context *intel,
+ GLenum target,
+ GLenum internal_format,
+ GLuint first_level,
+ GLuint last_level,
+ GLuint width0,
+ GLuint height0,
+ GLuint depth0, GLuint cpp, GLboolean compressed)
+{
+ GLboolean ok;
+ struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
+
+ DBG("%s target %s format %s level %d..%d\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(target),
+ _mesa_lookup_enum_by_nr(internal_format), first_level, last_level);
+
+ mt->target = target_to_target(target);
+ mt->internal_format = internal_format;
+ mt->first_level = first_level;
+ mt->last_level = last_level;
+ mt->width0 = width0;
+ mt->height0 = height0;
+ mt->depth0 = depth0;
+ mt->cpp = compressed ? 2 : cpp;
+ mt->compressed = compressed;
+ mt->refcount = 1;
+
+ switch (intel->intelScreen->deviceID) {
+ case PCI_CHIP_I945_G:
+ case PCI_CHIP_I945_GM:
+ ok = i945_miptree_layout(mt);
+ break;
+ case PCI_CHIP_I915_G:
+ case PCI_CHIP_I915_GM:
+ case PCI_CHIP_I830_M:
+ case PCI_CHIP_I855_GM:
+ case PCI_CHIP_I865_G:
+ default:
+ /* All the i830 chips and the i915 use this layout:
+ */
+ ok = i915_miptree_layout(mt);
+ break;
+ }
+
+ if (ok)
+ mt->region = intel_region_alloc(intel->intelScreen,
+ mt->cpp, mt->pitch, mt->total_height);
+
+ if (!mt->region) {
+ free(mt);
+ return NULL;
+ }
+
+ return mt;
+}
+
+
+void
+intel_miptree_reference(struct intel_mipmap_tree **dst,
+ struct intel_mipmap_tree *src)
+{
+ src->refcount++;
+ *dst = src;
+ DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
+}
+
+void
+intel_miptree_release(struct intel_context *intel,
+ struct intel_mipmap_tree **mt)
+{
+ if (!*mt)
+ return;
+
+ DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
+ if (--(*mt)->refcount <= 0) {
+ GLuint i;
+
+ DBG("%s deleting %p\n", __FUNCTION__, *mt);
+
+ intel_region_release(&((*mt)->region));
+
+ for (i = 0; i < MAX_TEXTURE_LEVELS; i++)
+ if ((*mt)->level[i].image_offset)
+ free((*mt)->level[i].image_offset);
+
+ free(*mt);
+ }
+ *mt = NULL;
+}
+
+
+
+
+/* Can the image be pulled into a unified mipmap tree. This mirrors
+ * the completeness test in a lot of ways.
+ *
+ * Not sure whether I want to pass gl_texture_image here.
+ */
+GLboolean
+intel_miptree_match_image(struct intel_mipmap_tree *mt,
+ struct gl_texture_image *image,
+ GLuint face, GLuint level)
+{
+ /* Images with borders are never pulled into mipmap trees.
+ */
+ if (image->Border)
+ return GL_FALSE;
+
+ if (image->InternalFormat != mt->internal_format ||
+ image->IsCompressed != mt->compressed)
+ return GL_FALSE;
+
+ /* Test image dimensions against the base level image adjusted for
+ * minification. This will also catch images not present in the
+ * tree, changed targets, etc.
+ */
+ if (image->Width != mt->level[level].width ||
+ image->Height != mt->level[level].height ||
+ image->Depth != mt->level[level].depth)
+ return GL_FALSE;
+
+ return GL_TRUE;
+}
+
+
+void
+intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
+ GLuint level,
+ GLuint nr_images,
+ GLuint x, GLuint y, GLuint w, GLuint h, GLuint d)
+{
+
+ mt->level[level].width = w;
+ mt->level[level].height = h;
+ mt->level[level].depth = d;
+ mt->level[level].level_offset = (x + y * mt->pitch) * mt->cpp;
+ mt->level[level].nr_images = nr_images;
+
+ DBG("%s level %d size: %d,%d,%d offset %d,%d (0x%x)\n", __FUNCTION__,
+ level, w, h, d, x, y, mt->level[level].level_offset);
+
+ /* Not sure when this would happen, but anyway:
+ */
+ if (mt->level[level].image_offset) {
+ free(mt->level[level].image_offset);
+ mt->level[level].image_offset = NULL;
+ }
+
+ assert(nr_images);
+
+ mt->level[level].image_offset = malloc(nr_images * sizeof(GLuint));
+ mt->level[level].image_offset[0] = 0;
+}
+
+
+
+void
+intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
+ GLuint level, GLuint img, GLuint x, GLuint y)
+{
+ if (img == 0 && level == 0)
+ assert(x == 0 && y == 0);
+
+ assert(img < mt->level[level].nr_images);
+
+ mt->level[level].image_offset[img] = (x + y * mt->pitch);
+
+ DBG("%s level %d img %d pos %d,%d image_offset %x\n",
+ __FUNCTION__, level, img, x, y, mt->level[level].image_offset[img]);
+}
+
+
+/* Although we use the image_offset[] array to store relative offsets
+ * to cube faces, Mesa doesn't know anything about this and expects
+ * each cube face to be treated as a separate image.
+ *
+ * These functions present that view to mesa:
+ */
+const GLuint *
+intel_miptree_depth_offsets(struct intel_mipmap_tree *mt, GLuint level)
+{
+ static const GLuint zero = 0;
+
+ if (mt->target != GL_TEXTURE_3D || mt->level[level].nr_images == 1)
+ return &zero;
+ else
+ return mt->level[level].image_offset;
+}
+
+
+GLuint
+intel_miptree_image_offset(struct intel_mipmap_tree * mt,
+ GLuint face, GLuint level)
+{
+ if (mt->target == GL_TEXTURE_CUBE_MAP_ARB)
+ return (mt->level[level].level_offset +
+ mt->level[level].image_offset[face] * mt->cpp);
+ else
+ return mt->level[level].level_offset;
+}
+
+
+
+/**
+ * Map a teximage in a mipmap tree.
+ * \param row_stride returns row stride in bytes
+ * \param image_stride returns image stride in bytes (for 3D textures).
+ * \return address of mapping
+ */
+GLubyte *
+intel_miptree_image_map(struct intel_context * intel,
+ struct intel_mipmap_tree * mt,
+ GLuint face,
+ GLuint level,
+ GLuint * row_stride, GLuint * image_offsets)
+{
+ DBG("%s \n", __FUNCTION__);
+
+ if (row_stride)
+ *row_stride = mt->pitch * mt->cpp;
+
+ if (image_offsets)
+ memcpy(image_offsets, mt->level[level].image_offset,
+ mt->level[level].depth * sizeof(GLuint));
+
+ return (intel_region_map(intel->intelScreen, mt->region) +
+ intel_miptree_image_offset(mt, face, level));
+}
+
+void
+intel_miptree_image_unmap(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ DBG("%s\n", __FUNCTION__);
+ intel_region_unmap(intel->intelScreen, mt->region);
+}
+
+
+
+/* Upload data for a particular image.
+ */
+void
+intel_miptree_image_data(struct intel_context *intel,
+ struct intel_mipmap_tree *dst,
+ GLuint face,
+ GLuint level,
+ void *src,
+ GLuint src_row_pitch, GLuint src_image_pitch)
+{
+ GLuint depth = dst->level[level].depth;
+ GLuint dst_offset = intel_miptree_image_offset(dst, face, level);
+ const GLuint *dst_depth_offset = intel_miptree_depth_offsets(dst, level);
+ GLuint i;
+
+ DBG("%s\n", __FUNCTION__);
+ for (i = 0; i < depth; i++) {
+ intel_region_data(intel->intelScreen, dst->region, dst_offset + dst_depth_offset[i], 0, 0, src, src_row_pitch, 0, 0, /* source x,y */
+ dst->level[level].width, dst->level[level].height);
+
+ src += src_image_pitch;
+ }
+}
+
+/* Copy mipmap image between trees
+ */
+void
+intel_miptree_image_copy(struct intel_context *intel,
+ struct intel_mipmap_tree *dst,
+ GLuint face, GLuint level,
+ struct intel_mipmap_tree *src)
+{
+ GLuint width = src->level[level].width;
+ GLuint height = src->level[level].height;
+ GLuint depth = src->level[level].depth;
+ GLuint dst_offset = intel_miptree_image_offset(dst, face, level);
+ GLuint src_offset = intel_miptree_image_offset(src, face, level);
+ const GLuint *dst_depth_offset = intel_miptree_depth_offsets(dst, level);
+ const GLuint *src_depth_offset = intel_miptree_depth_offsets(src, level);
+ GLuint i;
+
+ for (i = 0; i < depth; i++) {
+ intel_region_copy(intel->intelScreen,
+ dst->region, dst_offset + dst_depth_offset[i],
+ 0,
+ 0,
+ src->region, src_offset + src_depth_offset[i],
+ 0, 0, width, height);
+ }
+
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_MIPMAP_TREE_H
+#define INTEL_MIPMAP_TREE_H
+
+#include "intel_regions.h"
+
+/* A layer on top of the intel_regions code which adds:
+ *
+ * - Code to size and layout a region to hold a set of mipmaps.
+ * - Query to determine if a new image fits in an existing tree.
+ * - More refcounting
+ * - maybe able to remove refcounting from intel_region?
+ * - ?
+ *
+ * The fixed mipmap layout of intel hardware where one offset
+ * specifies the position of all images in a mipmap hierachy
+ * complicates the implementation of GL texture image commands,
+ * compared to hardware where each image is specified with an
+ * independent offset.
+ *
+ * In an ideal world, each texture object would be associated with a
+ * single bufmgr buffer or 2d intel_region, and all the images within
+ * the texture object would slot into the tree as they arrive. The
+ * reality can be a little messier, as images can arrive from the user
+ * with sizes that don't fit in the existing tree, or in an order
+ * where the tree layout cannot be guessed immediately.
+ *
+ * This structure encodes an idealized mipmap tree. The GL image
+ * commands build these where possible, otherwise store the images in
+ * temporary system buffers.
+ */
+
+
+/**
+ * Describes the location of each texture image within a texture region.
+ */
+struct intel_mipmap_level
+{
+ GLuint level_offset;
+ GLuint width;
+ GLuint height;
+ GLuint depth;
+ GLuint nr_images;
+
+ /* Explicitly store the offset of each image for each cube face or
+ * depth value. Pretty much have to accept that hardware formats
+ * are going to be so diverse that there is no unified way to
+ * compute the offsets of depth/cube images within a mipmap level,
+ * so have to store them as a lookup table:
+ */
+ GLuint *image_offset;
+};
+
+struct intel_mipmap_tree
+{
+ /* Effectively the key:
+ */
+ GLenum target;
+ GLenum internal_format;
+
+ GLuint first_level;
+ GLuint last_level;
+
+ GLuint width0, height0, depth0; /**< Level zero image dimensions */
+ GLuint cpp;
+ GLboolean compressed;
+
+ /* Derived from the above:
+ */
+ GLuint pitch;
+ GLuint depth_pitch; /* per-image on i945? */
+ GLuint total_height;
+
+ /* Includes image offset tables:
+ */
+ struct intel_mipmap_level level[MAX_TEXTURE_LEVELS];
+
+ /* The data is held here:
+ */
+ struct intel_region *region;
+
+ /* These are also refcounted:
+ */
+ GLuint refcount;
+};
+
+
+
+struct intel_mipmap_tree *intel_miptree_create(struct intel_context *intel,
+ GLenum target,
+ GLenum internal_format,
+ GLuint first_level,
+ GLuint last_level,
+ GLuint width0,
+ GLuint height0,
+ GLuint depth0,
+ GLuint cpp,
+ GLboolean compressed);
+
+void intel_miptree_reference(struct intel_mipmap_tree **dst,
+ struct intel_mipmap_tree *src);
+
+void intel_miptree_release(struct intel_context *intel,
+ struct intel_mipmap_tree **mt);
+
+/* Check if an image fits an existing mipmap tree layout
+ */
+GLboolean intel_miptree_match_image(struct intel_mipmap_tree *mt,
+ struct gl_texture_image *image,
+ GLuint face, GLuint level);
+
+/* Return a pointer to an image within a tree. Return image stride as
+ * well.
+ */
+GLubyte *intel_miptree_image_map(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ GLuint face,
+ GLuint level,
+ GLuint * row_stride, GLuint * image_stride);
+
+void intel_miptree_image_unmap(struct intel_context *intel,
+ struct intel_mipmap_tree *mt);
+
+
+/* Return the linear offset of an image relative to the start of the
+ * tree:
+ */
+GLuint intel_miptree_image_offset(struct intel_mipmap_tree *mt,
+ GLuint face, GLuint level);
+
+/* Return pointers to each 2d slice within an image. Indexed by depth
+ * value.
+ */
+const GLuint *intel_miptree_depth_offsets(struct intel_mipmap_tree *mt,
+ GLuint level);
+
+
+void intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
+ GLuint level,
+ GLuint nr_images,
+ GLuint x, GLuint y,
+ GLuint w, GLuint h, GLuint d);
+
+void intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
+ GLuint level,
+ GLuint img, GLuint x, GLuint y);
+
+
+/* Upload an image into a tree
+ */
+void intel_miptree_image_data(struct intel_context *intel,
+ struct intel_mipmap_tree *dst,
+ GLuint face,
+ GLuint level,
+ void *src,
+ GLuint src_row_pitch, GLuint src_image_pitch);
+
+/* Copy an image between two trees
+ */
+void intel_miptree_image_copy(struct intel_context *intel,
+ struct intel_mipmap_tree *dst,
+ GLuint face, GLuint level,
+ struct intel_mipmap_tree *src);
+
+/* i915_mipmap_tree.c:
+ */
+GLboolean i915_miptree_layout(struct intel_mipmap_tree *mt);
+GLboolean i945_miptree_layout(struct intel_mipmap_tree *mt);
+
+
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portionsalloc
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "enums.h"
+#include "state.h"
+#include "swrast/swrast.h"
+
+#include "intel_context.h"
+#include "intel_pixel.h"
+#include "intel_regions.h"
+
+
+/**
+ * Check if any fragment operations are in effect which might effect
+ * glDraw/CopyPixels.
+ */
+GLboolean
+intel_check_blit_fragment_ops(GLcontext * ctx)
+{
+ if (ctx->NewState)
+ _mesa_update_state(ctx);
+
+ /* XXX Note: Scissor could be done with the blitter:
+ */
+ return !(ctx->_ImageTransferState ||
+ ctx->Color.AlphaEnabled ||
+ ctx->Depth.Test ||
+ ctx->Fog.Enabled ||
+ ctx->Scissor.Enabled ||
+ ctx->Stencil.Enabled ||
+ !ctx->Color.ColorMask[0] ||
+ !ctx->Color.ColorMask[1] ||
+ !ctx->Color.ColorMask[2] ||
+ !ctx->Color.ColorMask[3] ||
+ ctx->Color.ColorLogicOpEnabled ||
+ ctx->Texture._EnabledUnits || ctx->FragmentProgram._Enabled);
+}
+
+
+GLboolean
+intel_check_meta_tex_fragment_ops(GLcontext * ctx)
+{
+ if (ctx->NewState)
+ _mesa_update_state(ctx);
+
+ /* Some of _ImageTransferState (scale, bias) could be done with
+ * fragment programs on i915.
+ */
+ return !(ctx->_ImageTransferState || ctx->Fog.Enabled || /* not done yet */
+ ctx->Texture._EnabledUnits || ctx->FragmentProgram._Enabled);
+}
+
+/* The intel_region struct doesn't really do enough to capture the
+ * format of the pixels in the region. For now this code assumes that
+ * the region is a display surface and hence is either ARGB8888 or
+ * RGB565.
+ * XXX FBO: If we'd pass in the intel_renderbuffer instead of region, we'd
+ * know the buffer's pixel format.
+ *
+ * \param format as given to glDraw/ReadPixels
+ * \param type as given to glDraw/ReadPixels
+ */
+GLboolean
+intel_check_blit_format(struct intel_region * region,
+ GLenum format, GLenum type)
+{
+ if (region->cpp == 4 &&
+ (type == GL_UNSIGNED_INT_8_8_8_8_REV ||
+ type == GL_UNSIGNED_BYTE) && format == GL_BGRA) {
+ return GL_TRUE;
+ }
+
+ if (region->cpp == 2 &&
+ type == GL_UNSIGNED_SHORT_5_6_5_REV && format == GL_BGR) {
+ return GL_TRUE;
+ }
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ fprintf(stderr, "%s: bad format for blit (cpp %d, type %s format %s)\n",
+ __FUNCTION__, region->cpp,
+ _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
+
+ return GL_FALSE;
+}
+
+
+void
+intelInitPixelFuncs(struct dd_function_table *functions)
+{
+ functions->Accum = _swrast_Accum;
+ functions->Bitmap = _swrast_Bitmap;
+ functions->CopyPixels = intelCopyPixels;
+ functions->ReadPixels = intelReadPixels;
+ functions->DrawPixels = intelDrawPixels;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_PIXEL_H
+#define INTEL_PIXEL_H
+
+#include "mtypes.h"
+
+void intelInitPixelFuncs(struct dd_function_table *functions);
+
+GLboolean intel_check_blit_fragment_ops(GLcontext * ctx);
+
+GLboolean intel_check_meta_tex_fragment_ops(GLcontext * ctx);
+
+GLboolean intel_check_blit_format(struct intel_region *region,
+ GLenum format, GLenum type);
+
+
+void intelReadPixels(GLcontext * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack,
+ GLvoid * pixels);
+
+void intelDrawPixels(GLcontext * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ GLenum format,
+ GLenum type,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLvoid * pixels);
+
+void intelCopyPixels(GLcontext * ctx,
+ GLint srcx, GLint srcy,
+ GLsizei width, GLsizei height,
+ GLint destx, GLint desty, GLenum type);
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portionsalloc
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "enums.h"
+#include "image.h"
+#include "colormac.h"
+#include "mtypes.h"
+#include "macros.h"
+#include "bufferobj.h"
+#include "swrast/swrast.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_ioctl.h"
+#include "intel_batchbuffer.h"
+#include "intel_blit.h"
+#include "intel_regions.h"
+#include "intel_buffer_objects.h"
+
+
+
+#define FILE_DEBUG_FLAG DEBUG_PIXEL
+
+
+/* Unlike the other intel_pixel_* functions, the expectation here is
+ * that the incoming data is not in a PBO. With the XY_TEXT blit
+ * method, there's no benefit haveing it in a PBO, but we could
+ * implement a path based on XY_MONO_SRC_COPY_BLIT which might benefit
+ * PBO bitmaps. I think they are probably pretty rare though - I
+ * wonder if Xgl uses them?
+ */
+static const GLubyte *map_pbo( GLcontext *ctx,
+ GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte *bitmap )
+{
+ GLubyte *buf;
+
+ if (!_mesa_validate_pbo_access(2, unpack, width, height, 1,
+ GL_COLOR_INDEX, GL_BITMAP,
+ (GLvoid *) bitmap)) {
+ _mesa_error(ctx, GL_INVALID_OPERATION,"glBitmap(invalid PBO access)");
+ return NULL;
+ }
+
+ buf = (GLubyte *) ctx->Driver.MapBuffer(ctx, GL_PIXEL_UNPACK_BUFFER_EXT,
+ GL_READ_ONLY_ARB,
+ unpack->BufferObj);
+ if (!buf) {
+ _mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(PBO is mapped)");
+ return NULL;
+ }
+
+ return ADD_POINTERS(buf, bitmap);
+}
+
+static GLboolean test_bit( const GLubyte *src,
+ GLuint bit )
+{
+ return (src[bit/8] & (1<<(bit % 8))) ? 1 : 0;
+}
+
+static void set_bit( GLubyte *dest,
+ GLuint bit )
+{
+ dest[bit/8] |= 1 << (bit % 8);
+}
+
+static int align(int x, int align)
+{
+ return (x + align - 1) & ~(align - 1);
+}
+
+/* Extract a rectangle's worth of data from the bitmap. Called
+ * per-cliprect.
+ */
+static GLuint get_bitmap_rect(GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte *bitmap,
+ GLuint x, GLuint y,
+ GLuint w, GLuint h,
+ GLubyte *dest,
+ GLuint row_align,
+ GLboolean invert)
+{
+ GLuint src_offset = (x + unpack->SkipPixels) & 0x7;
+ GLuint mask = unpack->LsbFirst ? 0 : 7;
+ GLuint bit = 0;
+ GLint row, col;
+ GLint first, last;
+ GLint incr;
+ GLuint count = 0;
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s %d,%d %dx%d bitmap %dx%d skip %d src_offset %d mask %d\n",
+ __FUNCTION__, x,y,w,h,width,height,unpack->SkipPixels, src_offset, mask);
+
+ if (invert) {
+ first = h-1;
+ last = 0;
+ incr = -1;
+ }
+ else {
+ first = 0;
+ last = h-1;
+ incr = 1;
+ }
+
+ /* Require that dest be pre-zero'd.
+ */
+ for (row = first; row != (last+incr); row += incr) {
+ const GLubyte *rowsrc = _mesa_image_address2d(unpack, bitmap,
+ width, height,
+ GL_COLOR_INDEX, GL_BITMAP,
+ y + row, x);
+
+ for (col = 0; col < w; col++, bit++) {
+ if (test_bit(rowsrc, (col + src_offset) ^ mask)) {
+ set_bit(dest, bit ^ 7);
+ count++;
+ }
+ }
+
+ if (row_align)
+ bit = (bit + row_align - 1) & ~(row_align - 1);
+ }
+
+ return count;
+}
+
+
+
+
+/*
+ * Render a bitmap.
+ */
+static GLboolean
+do_blit_bitmap( GLcontext *ctx,
+ GLint dstx, GLint dsty,
+ GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte *bitmap )
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *dst = intel_drawbuf_region(intel);
+
+ union {
+ GLuint ui;
+ GLubyte ub[4];
+ } color;
+
+
+ if (unpack->BufferObj->Name) {
+ bitmap = map_pbo(ctx, width, height, unpack, bitmap);
+ if (bitmap == NULL)
+ return GL_TRUE; /* even though this is an error, we're done */
+ }
+
+ UNCLAMPED_FLOAT_TO_CHAN(color.ub[0], ctx->Current.RasterColor[2]);
+ UNCLAMPED_FLOAT_TO_CHAN(color.ub[1], ctx->Current.RasterColor[1]);
+ UNCLAMPED_FLOAT_TO_CHAN(color.ub[2], ctx->Current.RasterColor[0]);
+ UNCLAMPED_FLOAT_TO_CHAN(color.ub[3], ctx->Current.RasterColor[3]);
+
+ /* Does zoom apply to bitmaps?
+ */
+ if (!intel_check_blit_fragment_ops(ctx) ||
+ ctx->Pixel.ZoomX != 1.0F ||
+ ctx->Pixel.ZoomY != 1.0F)
+ return GL_FALSE;
+
+ LOCK_HARDWARE(intel);
+
+ if (intel->driDrawable->numClipRects) {
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+ drm_clip_rect_t *box = dPriv->pClipRects;
+ drm_clip_rect_t dest_rect;
+ GLint nbox = dPriv->numClipRects;
+ GLint srcx = 0, srcy = 0;
+ GLint orig_screen_x1, orig_screen_y2;
+ GLuint i;
+
+
+ orig_screen_x1 = dPriv->x + dstx;
+ orig_screen_y2 = dPriv->y + (dPriv->h - dsty);
+
+ /* Do scissoring in GL coordinates:
+ */
+x if (ctx->Scissor.Enabled)
+ {
+ GLint x = ctx->Scissor.X;
+ GLint y = ctx->Scissor.Y;
+ GLuint w = ctx->Scissor.Width;
+ GLuint h = ctx->Scissor.Height;
+
+ if (!_mesa_clip_to_region(x, y, x+w-1, y+h-1, &dstx, &dsty, &width, &height))
+ goto out;
+ }
+
+ /* Convert from GL to hardware coordinates:
+ */
+ dsty = dPriv->y + (dPriv->h - dsty - height);
+ dstx = dPriv->x + dstx;
+
+ dest_rect.x1 = dstx;
+ dest_rect.y1 = dsty;
+ dest_rect.x2 = dstx + width;
+ dest_rect.y2 = dsty + height;
+
+ for (i = 0; i < nbox; i++) {
+ drm_clip_rect_t rect;
+ int box_w, box_h;
+ GLint px, py;
+ GLuint stipple[32];
+
+ if (!intel_intersect_cliprects(&rect, &dest_rect, &box[i]))
+ continue;
+
+ /* Now go back to GL coordinates to figure out what subset of
+ * the bitmap we are uploading for this cliprect:
+ */
+ box_w = rect.x2 - rect.x1;
+ box_h = rect.y2 - rect.y1;
+ srcx = rect.x1 - orig_screen_x1;
+ srcy = orig_screen_y2 - rect.y2;
+
+
+#define DY 32
+#define DX 32
+
+ /* Then, finally, chop it all into chunks that can be
+ * digested by hardware:
+ */
+ for (py = 0; py < box_h; py += DY) {
+ for (px = 0; px < box_w; px += DX) {
+ int h = MIN2(DY, box_h - py);
+ int w = MIN2(DX, box_w - px);
+ GLuint sz = align(align(w,8) * h, 64)/8;
+
+ assert(sz <= sizeof(stipple));
+ memset(stipple, 0, sz);
+
+ /* May need to adjust this when padding has been introduced in
+ * sz above:
+ */
+ if (get_bitmap_rect(width, height, unpack,
+ bitmap,
+ srcx + px, srcy + py, w, h,
+ (GLubyte *)stipple,
+ 8,
+ GL_TRUE) == 0)
+ continue;
+
+ /*
+ */
+ intelEmitImmediateColorExpandBlit( intel,
+ dst->cpp,
+ (GLubyte *)stipple,
+ sz,
+ color.ui,
+ dst->pitch,
+ dst->buffer,
+ 0,
+ dst->tiled,
+ rect.x1 + px,
+ rect.y2 - (py + h),
+ w, h);
+ }
+ }
+ }
+ intel->need_flush = GL_TRUE;
+ out:
+ intel_batchbuffer_flush(intel->batch);
+ }
+ UNLOCK_HARDWARE(intel);
+
+
+ if (unpack->BufferObj->Name) {
+ /* done with PBO so unmap it now */
+ ctx->Driver.UnmapBuffer(ctx, GL_PIXEL_UNPACK_BUFFER_EXT,
+ unpack->BufferObj);
+ }
+
+ return GL_TRUE;
+}
+
+
+
+
+
+/* There are a large number of possible ways to implement bitmap on
+ * this hardware, most of them have some sort of drawback. Here are a
+ * few that spring to mind:
+ *
+ * Blit:
+ * - XY_MONO_SRC_BLT_CMD
+ * - use XY_SETUP_CLIP_BLT for cliprect clipping.
+ * - XY_TEXT_BLT
+ * - XY_TEXT_IMMEDIATE_BLT
+ * - blit per cliprect, subject to maximum immediate data size.
+ * - XY_COLOR_BLT
+ * - per pixel or run of pixels
+ * - XY_PIXEL_BLT
+ * - good for sparse bitmaps
+ *
+ * 3D engine:
+ * - Point per pixel
+ * - Translate bitmap to an alpha texture and render as a quad
+ * - Chop bitmap up into 32x32 squares and render w/polygon stipple.
+ */
+void
+intelBitmap(GLcontext * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte * pixels)
+{
+ if (do_blit_bitmap(ctx, x, y, width, height,
+ unpack, pixels))
+ return;
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s: fallback to swrast\n", __FUNCTION__);
+
+ _swrast_Bitmap(ctx, x, y, width, height, unpack, pixels);
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "enums.h"
+#include "image.h"
+#include "state.h"
+#include "mtypes.h"
+#include "macros.h"
+#include "swrast/swrast.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_ioctl.h"
+#include "intel_batchbuffer.h"
+#include "intel_buffers.h"
+#include "intel_blit.h"
+#include "intel_regions.h"
+#include "intel_tris.h"
+#include "intel_pixel.h"
+
+#define FILE_DEBUG_FLAG DEBUG_PIXEL
+
+static struct intel_region *
+copypix_src_region(struct intel_context *intel, GLenum type)
+{
+ switch (type) {
+ case GL_COLOR:
+ return intel_readbuf_region(intel);
+ case GL_DEPTH:
+ /* Don't think this is really possible execpt at 16bpp, when we have no stencil.
+ */
+ if (intel->intelScreen->depth_region &&
+ intel->intelScreen->depth_region->cpp == 2)
+ return intel->intelScreen->depth_region;
+ case GL_STENCIL:
+ /* Don't think this is really possible.
+ */
+ break;
+ case GL_DEPTH_STENCIL_EXT:
+ /* Does it matter whether it is stencil/depth or depth/stencil?
+ */
+ return intel->intelScreen->depth_region;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Check if any fragment operations are in effect which might effect
+ * glCopyPixels. Differs from intel_check_blit_fragment_ops in that
+ * we allow Scissor.
+ */
+static GLboolean
+intel_check_copypixel_blit_fragment_ops(GLcontext * ctx)
+{
+ if (ctx->NewState)
+ _mesa_update_state(ctx);
+
+ /* Could do logicop with the blitter:
+ */
+ return !(ctx->_ImageTransferState ||
+ ctx->Color.AlphaEnabled ||
+ ctx->Depth.Test ||
+ ctx->Fog.Enabled ||
+ ctx->Stencil.Enabled ||
+ !ctx->Color.ColorMask[0] ||
+ !ctx->Color.ColorMask[1] ||
+ !ctx->Color.ColorMask[2] ||
+ !ctx->Color.ColorMask[3] ||
+ ctx->Color.ColorLogicOpEnabled ||
+ ctx->Texture._EnabledUnits ||
+ ctx->FragmentProgram._Enabled);
+}
+
+/* Doesn't work for overlapping regions. Could do a double copy or
+ * just fallback.
+ */
+static GLboolean
+do_texture_copypixels(GLcontext * ctx,
+ GLint srcx, GLint srcy,
+ GLsizei width, GLsizei height,
+ GLint dstx, GLint dsty, GLenum type)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *dst = intel_drawbuf_region(intel);
+ struct intel_region *src = copypix_src_region(intel, type);
+ GLenum src_format;
+ GLenum src_type;
+
+ DBG("%s %d,%d %dx%d --> %d,%d\n", __FUNCTION__,
+ srcx, srcy, width, height, dstx, dsty);
+
+ if (!src || !dst || type != GL_COLOR)
+ return GL_FALSE;
+
+ /* Can't handle overlapping regions. Don't have sufficient control
+ * over rasterization to pull it off in-place. Punt on these for
+ * now.
+ *
+ * XXX: do a copy to a temporary.
+ */
+ if (src->buffer == dst->buffer) {
+ drm_clip_rect_t srcbox;
+ drm_clip_rect_t dstbox;
+ drm_clip_rect_t tmp;
+
+ srcbox.x1 = srcx;
+ srcbox.y1 = srcy;
+ srcbox.x2 = srcx + width;
+ srcbox.y2 = srcy + height;
+
+ dstbox.x1 = dstx;
+ dstbox.y1 = dsty;
+ dstbox.x2 = dstx + width * ctx->Pixel.ZoomX;
+ dstbox.y2 = dsty + height * ctx->Pixel.ZoomY;
+
+ DBG("src %d,%d %d,%d\n", srcbox.x1, srcbox.y1, srcbox.x2, srcbox.y2);
+ DBG("dst %d,%d %d,%d (%dx%d) (%f,%f)\n", dstbox.x1, dstbox.y1, dstbox.x2, dstbox.y2,
+ width, height, ctx->Pixel.ZoomX, ctx->Pixel.ZoomY);
+
+ if (intel_intersect_cliprects(&tmp, &srcbox, &dstbox)) {
+ DBG("%s: regions overlap\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+ }
+
+ intelFlush(&intel->ctx);
+
+ intel->vtbl.install_meta_state(intel);
+
+ /* Is this true? Also will need to turn depth testing on according
+ * to state:
+ */
+ intel->vtbl.meta_no_stencil_write(intel);
+ intel->vtbl.meta_no_depth_write(intel);
+
+ /* Set the 3d engine to draw into the destination region:
+ */
+ intel->vtbl.meta_draw_region(intel, dst, intel->intelScreen->depth_region);
+
+ intel->vtbl.meta_import_pixel_state(intel);
+
+ if (src->cpp == 2) {
+ src_format = GL_RGB;
+ src_type = GL_UNSIGNED_SHORT_5_6_5;
+ }
+ else {
+ src_format = GL_BGRA;
+ src_type = GL_UNSIGNED_BYTE;
+ }
+
+ /* Set the frontbuffer up as a large rectangular texture.
+ */
+ if (!intel->vtbl.meta_tex_rect_source(intel, src->buffer, 0,
+ src->pitch,
+ src->height, src_format, src_type)) {
+ intel->vtbl.leave_meta_state(intel);
+ return GL_FALSE;
+ }
+
+
+ intel->vtbl.meta_texture_blend_replace(intel);
+
+ LOCK_HARDWARE(intel);
+
+ if (intel->driDrawable->numClipRects) {
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+
+
+ srcy = dPriv->h - srcy - height; /* convert from gl to hardware coords */
+
+ srcx += dPriv->x;
+ srcy += dPriv->y;
+
+ /* Clip against the source region. This is the only source
+ * clipping we do. XXX: Just set the texcord wrap mode to clamp
+ * or similar.
+ *
+ */
+ if (0) {
+ GLint orig_x = srcx;
+ GLint orig_y = srcy;
+
+ if (!_mesa_clip_to_region(0, 0, src->pitch, src->height,
+ &srcx, &srcy, &width, &height))
+ goto out;
+
+ dstx += srcx - orig_x;
+ dsty += (srcy - orig_y) * ctx->Pixel.ZoomY;
+ }
+
+ /* Just use the regular cliprect mechanism... Does this need to
+ * even hold the lock???
+ */
+ intel_meta_draw_quad(intel,
+ dstx,
+ dstx + width * ctx->Pixel.ZoomX,
+ dPriv->h - (dsty + height * ctx->Pixel.ZoomY),
+ dPriv->h - (dsty), 0, /* XXX: what z value? */
+ 0x00ff00ff,
+ srcx, srcx + width, srcy, srcy + height);
+
+ out:
+ intel->vtbl.leave_meta_state(intel);
+ intel_batchbuffer_flush(intel->batch);
+ }
+ UNLOCK_HARDWARE(intel);
+
+ DBG("%s: success\n", __FUNCTION__);
+ return GL_TRUE;
+}
+
+
+
+
+
+/**
+ * CopyPixels with the blitter. Don't support zooming, pixel transfer, etc.
+ */
+static GLboolean
+do_blit_copypixels(GLcontext * ctx,
+ GLint srcx, GLint srcy,
+ GLsizei width, GLsizei height,
+ GLint dstx, GLint dsty, GLenum type)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *dst = intel_drawbuf_region(intel);
+ struct intel_region *src = copypix_src_region(intel, type);
+
+ /* Copypixels can be more than a straight copy. Ensure all the
+ * extra operations are disabled:
+ */
+ if (!intel_check_copypixel_blit_fragment_ops(ctx) ||
+ ctx->Pixel.ZoomX != 1.0F || ctx->Pixel.ZoomY != 1.0F)
+ return GL_FALSE;
+
+ if (!src || !dst)
+ return GL_FALSE;
+
+
+
+ intelFlush(&intel->ctx);
+
+ LOCK_HARDWARE(intel);
+
+ if (intel->driDrawable->numClipRects) {
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+ drm_clip_rect_t *box = dPriv->pClipRects;
+ drm_clip_rect_t dest_rect;
+ GLint nbox = dPriv->numClipRects;
+ GLint delta_x = 0;
+ GLint delta_y = 0;
+ GLuint i;
+
+ /* Do scissoring in GL coordinates:
+ */
+ if (ctx->Scissor.Enabled)
+ {
+ GLint x = ctx->Scissor.X;
+ GLint y = ctx->Scissor.Y;
+ GLuint w = ctx->Scissor.Width;
+ GLuint h = ctx->Scissor.Height;
+ GLint dx = dstx - srcx;
+ GLint dy = dsty - srcy;
+
+ if (!_mesa_clip_to_region(x, y, x+w-1, y+h-1, &dstx, &dsty, &width, &height))
+ goto out;
+
+ srcx = dstx - dx;
+ srcy = dsty - dy;
+ }
+
+ /* Convert from GL to hardware coordinates:
+ */
+ dsty = dPriv->h - dsty - height;
+ srcy = dPriv->h - srcy - height;
+ dstx += dPriv->x;
+ dsty += dPriv->y;
+ srcx += dPriv->x;
+ srcy += dPriv->y;
+
+ /* Clip against the source region. This is the only source
+ * clipping we do. Dst is clipped with cliprects below.
+ */
+ {
+ delta_x = srcx - dstx;
+ delta_y = srcy - dsty;
+
+ if (!_mesa_clip_to_region(0, 0, src->pitch, src->height,
+ &srcx, &srcy, &width, &height))
+ goto out;
+
+ dstx = srcx - delta_x;
+ dsty = srcy - delta_y;
+ }
+
+ dest_rect.x1 = dstx;
+ dest_rect.y1 = dsty;
+ dest_rect.x2 = dstx + width;
+ dest_rect.y2 = dsty + height;
+
+ /* Could do slightly more clipping: Eg, take the intersection of
+ * the existing set of cliprects and those cliprects translated
+ * by delta_x, delta_y:
+ *
+ * This code will not overwrite other windows, but will
+ * introduce garbage when copying from obscured window regions.
+ */
+ for (i = 0; i < nbox; i++) {
+ drm_clip_rect_t rect;
+
+ if (!intel_intersect_cliprects(&rect, &dest_rect, &box[i]))
+ continue;
+
+
+ intelEmitCopyBlit(intel, dst->cpp,
+ src->pitch, src->buffer, 0,
+ dst->pitch, dst->buffer, 0,
+ rect.x1 + delta_x, rect.y1 + delta_y, /* srcx, srcy */
+ rect.x1, rect.y1, /* dstx, dsty */
+ rect.x2 - rect.x1, rect.y2 - rect.y1);
+ }
+
+ out:
+ intel_batchbuffer_flush(intel->batch);
+ }
+ UNLOCK_HARDWARE(intel);
+
+ DBG("%s: success\n", __FUNCTION__);
+ return GL_TRUE;
+}
+
+
+void
+intelCopyPixels(GLcontext * ctx,
+ GLint srcx, GLint srcy,
+ GLsizei width, GLsizei height,
+ GLint destx, GLint desty, GLenum type)
+{
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+ if (do_blit_copypixels(ctx, srcx, srcy, width, height, destx, desty, type))
+ return;
+
+ if (do_texture_copypixels(ctx, srcx, srcy, width, height, destx, desty, type))
+ return;
+
+ DBG("fallback to _swrast_CopyPixels\n");
+
+ _swrast_CopyPixels(ctx, srcx, srcy, width, height, destx, desty, type);
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portionsalloc
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "enums.h"
+#include "image.h"
+#include "mtypes.h"
+#include "macros.h"
+#include "bufferobj.h"
+#include "swrast/swrast.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_ioctl.h"
+#include "intel_batchbuffer.h"
+#include "intel_blit.h"
+#include "intel_buffers.h"
+#include "intel_regions.h"
+#include "intel_pixel.h"
+#include "intel_buffer_objects.h"
+#include "intel_tris.h"
+
+
+
+static GLboolean
+do_texture_drawpixels(GLcontext * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLvoid * pixels)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *dst = intel_drawbuf_region(intel);
+ struct intel_buffer_object *src = intel_buffer_object(unpack->BufferObj);
+ GLuint rowLength = unpack->RowLength ? unpack->RowLength : width;
+ GLuint src_offset;
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+ intelFlush(&intel->ctx);
+ intel->vtbl.render_start(intel);
+ intel->vtbl.emit_state(intel);
+
+ if (!dst)
+ return GL_FALSE;
+
+ if (src) {
+ if (!_mesa_validate_pbo_access(2, unpack, width, height, 1,
+ format, type, pixels)) {
+ _mesa_error(ctx, GL_INVALID_OPERATION, "glDrawPixels");
+ return GL_TRUE;
+ }
+ }
+ else {
+ /* PBO only for now:
+ */
+/* _mesa_printf("%s - not PBO\n", __FUNCTION__); */
+ return GL_FALSE;
+ }
+
+ /* There are a couple of things we can't do yet, one of which is
+ * set the correct state for pixel operations when GL texturing is
+ * enabled. That's a pretty rare state and probably not worth the
+ * effort. A completely device-independent version of this may do
+ * more.
+ *
+ * Similarly, we make no attempt to merge metaops processing with
+ * an enabled fragment program, though it would certainly be
+ * possible.
+ */
+ if (!intel_check_meta_tex_fragment_ops(ctx)) {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s - bad GL fragment state for metaops texture\n",
+ __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ intel->vtbl.install_meta_state(intel);
+
+
+ /* Is this true? Also will need to turn depth testing on according
+ * to state:
+ */
+ intel->vtbl.meta_no_stencil_write(intel);
+ intel->vtbl.meta_no_depth_write(intel);
+
+ /* Set the 3d engine to draw into the destination region:
+ */
+ intel->vtbl.meta_draw_region(intel, dst, intel->intelScreen->depth_region);
+
+ intel->vtbl.meta_import_pixel_state(intel);
+
+ src_offset = (GLuint) _mesa_image_address(2, unpack, pixels, width, height,
+ format, type, 0, 0, 0);
+
+
+ /* Setup the pbo up as a rectangular texture, if possible.
+ *
+ * TODO: This is almost always possible if the i915 fragment
+ * program is adjusted to correctly swizzle the sampled colors.
+ * The major exception is any 24bit texture, like RGB888, for which
+ * there is no hardware support.
+ */
+ if (!intel->vtbl.meta_tex_rect_source(intel, src->buffer, src_offset,
+ rowLength, height, format, type)) {
+ intel->vtbl.leave_meta_state(intel);
+ return GL_FALSE;
+ }
+
+ intel->vtbl.meta_texture_blend_replace(intel);
+
+
+ LOCK_HARDWARE(intel);
+
+ if (intel->driDrawable->numClipRects) {
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+ GLint srcx, srcy;
+ GLint dstx, dsty;
+
+ dstx = x;
+ dsty = dPriv->h - (y + height);
+
+ srcx = 0; /* skiprows/pixels already done */
+ srcy = 0;
+
+ if (0) {
+ const GLint orig_x = dstx;
+ const GLint orig_y = dsty;
+
+ if (!_mesa_clip_to_region(0, 0, dst->pitch, dst->height,
+ &dstx, &dsty, &width, &height))
+ goto out;
+
+ srcx += dstx - orig_x;
+ srcy += dsty - orig_y;
+ }
+
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("draw %d,%d %dx%d\n", dstx, dsty, width, height);
+
+ /* Must use the regular cliprect mechanism in order to get the
+ * drawing origin set correctly. Otherwise scissor state is in
+ * incorrect coordinate space. Does this even need to hold the
+ * lock???
+ */
+ intel_meta_draw_quad(intel,
+ dstx, dstx + width * ctx->Pixel.ZoomX,
+ dPriv->h - (y + height * ctx->Pixel.ZoomY),
+ dPriv->h - (y),
+ -ctx->Current.RasterPos[2] * .5,
+ 0x00ff00ff,
+ srcx, srcx + width, srcy + height, srcy);
+ out:
+ intel->vtbl.leave_meta_state(intel);
+ intel_batchbuffer_flush(intel->batch);
+ }
+ UNLOCK_HARDWARE(intel);
+ return GL_TRUE;
+}
+
+
+
+
+
+/* Pros:
+ * - no waiting for idle before updating framebuffer.
+ *
+ * Cons:
+ * - if upload is by memcpy, this may actually be slower than fallback path.
+ * - uploads the whole image even if destination is clipped
+ *
+ * Need to benchmark.
+ *
+ * Given the questions about performance, implement for pbo's only.
+ * This path is definitely a win if the pbo is already in agp. If it
+ * turns out otherwise, we can add the code necessary to upload client
+ * data to agp space before performing the blit. (Though it may turn
+ * out to be better/simpler just to use the texture engine).
+ */
+static GLboolean
+do_blit_drawpixels(GLcontext * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLvoid * pixels)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *dest = intel_drawbuf_region(intel);
+ struct intel_buffer_object *src = intel_buffer_object(unpack->BufferObj);
+ GLuint src_offset;
+ GLuint rowLength;
+ struct _DriFenceObject *fence = NULL;
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s\n", __FUNCTION__);
+
+
+ if (!dest) {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s - no dest\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ if (src) {
+ /* This validation should be done by core mesa:
+ */
+ if (!_mesa_validate_pbo_access(2, unpack, width, height, 1,
+ format, type, pixels)) {
+ _mesa_error(ctx, GL_INVALID_OPERATION, "glDrawPixels");
+ return GL_TRUE;
+ }
+ }
+ else {
+ /* PBO only for now:
+ */
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s - not PBO\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ if (!intel_check_blit_format(dest, format, type)) {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s - bad format for blit\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ if (!intel_check_meta_tex_fragment_ops(ctx)) {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s - bad GL fragment state for meta tex\n",
+ __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ if (ctx->Pixel.ZoomX != 1.0F) {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s - bad PixelZoomX for blit\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+
+ if (unpack->RowLength > 0)
+ rowLength = unpack->RowLength;
+ else
+ rowLength = width;
+
+ if (ctx->Pixel.ZoomY == -1.0F) {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s - bad PixelZoomY for blit\n", __FUNCTION__);
+ return GL_FALSE; /* later */
+ y -= height;
+ }
+ else if (ctx->Pixel.ZoomY == 1.0F) {
+ rowLength = -rowLength;
+ }
+ else {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s - bad PixelZoomY for blit\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ src_offset = (GLuint) _mesa_image_address(2, unpack, pixels, width, height,
+ format, type, 0, 0, 0);
+
+ intelFlush(&intel->ctx);
+ LOCK_HARDWARE(intel);
+
+ if (intel->driDrawable->numClipRects) {
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+ int nbox = dPriv->numClipRects;
+ drm_clip_rect_t *box = dPriv->pClipRects;
+ drm_clip_rect_t rect;
+ drm_clip_rect_t dest_rect;
+ struct _DriBufferObject *src_buffer =
+ intel_bufferobj_buffer(intel, src, INTEL_READ);
+ int i;
+
+ dest_rect.x1 = dPriv->x + x;
+ dest_rect.y1 = dPriv->y + dPriv->h - (y + height);
+ dest_rect.x2 = dest_rect.x1 + width;
+ dest_rect.y2 = dest_rect.y1 + height;
+
+ for (i = 0; i < nbox; i++) {
+ if (!intel_intersect_cliprects(&rect, &dest_rect, &box[i]))
+ continue;
+
+ intelEmitCopyBlit(intel,
+ dest->cpp,
+ rowLength,
+ src_buffer, src_offset,
+ dest->pitch,
+ dest->buffer, 0,
+ rect.x1 - dest_rect.x1,
+ rect.y2 - dest_rect.y2,
+ rect.x1,
+ rect.y1, rect.x2 - rect.x1, rect.y2 - rect.y1);
+ }
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceReference(fence);
+ }
+ UNLOCK_HARDWARE(intel);
+
+ if (intel->driDrawable->numClipRects)
+ driFenceFinish(fence, DRM_FENCE_TYPE_EXE | DRM_I915_FENCE_TYPE_RW, GL_FALSE);
+
+ driFenceUnReference(fence);
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s - DONE\n", __FUNCTION__);
+
+ return GL_TRUE;
+}
+
+
+
+void
+intelDrawPixels(GLcontext * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ GLenum format,
+ GLenum type,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLvoid * pixels)
+{
+ if (do_blit_drawpixels(ctx, x, y, width, height, format, type,
+ unpack, pixels))
+ return;
+
+ if (do_texture_drawpixels(ctx, x, y, width, height, format, type,
+ unpack, pixels))
+ return;
+
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s: fallback to swrast\n", __FUNCTION__);
+
+ _swrast_DrawPixels(ctx, x, y, width, height, format, type, unpack, pixels);
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "enums.h"
+#include "mtypes.h"
+#include "macros.h"
+#include "image.h"
+#include "bufferobj.h"
+#include "swrast/swrast.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_ioctl.h"
+#include "intel_batchbuffer.h"
+#include "intel_blit.h"
+#include "intel_buffers.h"
+#include "intel_regions.h"
+#include "intel_pixel.h"
+#include "intel_buffer_objects.h"
+
+/* For many applications, the new ability to pull the source buffers
+ * back out of the GTT and then do the packing/conversion operations
+ * in software will be as much of an improvement as trying to get the
+ * blitter and/or texture engine to do the work.
+ *
+ * This step is gated on private backbuffers.
+ *
+ * Obviously the frontbuffer can't be pulled back, so that is either
+ * an argument for blit/texture readpixels, or for blitting to a
+ * temporary and then pulling that back.
+ *
+ * When the destination is a pbo, however, it's not clear if it is
+ * ever going to be pulled to main memory (though the access param
+ * will be a good hint). So it sounds like we do want to be able to
+ * choose between blit/texture implementation on the gpu and pullback
+ * and cpu-based copying.
+ *
+ * Unless you can magically turn client memory into a PBO for the
+ * duration of this call, there will be a cpu-based copying step in
+ * any case.
+ */
+
+
+static GLboolean
+do_texture_readpixels(GLcontext * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack,
+ struct intel_region *dest_region)
+{
+#if 0
+ struct intel_context *intel = intel_context(ctx);
+ intelScreenPrivate *screen = intel->intelScreen;
+ GLint pitch = pack->RowLength ? pack->RowLength : width;
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+ int textureFormat;
+ GLenum glTextureFormat;
+ int destFormat, depthFormat, destPitch;
+ drm_clip_rect_t tmp;
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+
+ if (ctx->_ImageTransferState ||
+ pack->SwapBytes || pack->LsbFirst || !pack->Invert) {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ fprintf(stderr, "%s: check_color failed\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ intel->vtbl.meta_texrect_source(intel, intel_readbuf_region(intel));
+
+ if (!intel->vtbl.meta_render_dest(intel, dest_region, type, format)) {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ fprintf(stderr, "%s: couldn't set dest %s/%s\n",
+ __FUNCTION__,
+ _mesa_lookup_enum_by_nr(type),
+ _mesa_lookup_enum_by_nr(format));
+ return GL_FALSE;
+ }
+
+ LOCK_HARDWARE(intel);
+
+ if (intel->driDrawable->numClipRects) {
+ intel->vtbl.install_meta_state(intel);
+ intel->vtbl.meta_no_depth_write(intel);
+ intel->vtbl.meta_no_stencil_write(intel);
+
+ if (!driClipRectToFramebuffer(ctx->ReadBuffer, &x, &y, &width, &height)) {
+ UNLOCK_HARDWARE(intel);
+ SET_STATE(i830, state);
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ fprintf(stderr, "%s: cliprect failed\n", __FUNCTION__);
+ return GL_TRUE;
+ }
+
+ y = dPriv->h - y - height;
+ x += dPriv->x;
+ y += dPriv->y;
+
+
+ /* Set the frontbuffer up as a large rectangular texture.
+ */
+ intel->vtbl.meta_tex_rect_source(intel, src_region, textureFormat);
+
+
+ intel->vtbl.meta_texture_blend_replace(i830, glTextureFormat);
+
+
+ /* Set the 3d engine to draw into the destination region:
+ */
+
+ intel->vtbl.meta_draw_region(intel, dest_region);
+ intel->vtbl.meta_draw_format(intel, destFormat, depthFormat); /* ?? */
+
+
+ /* Draw a single quad, no cliprects:
+ */
+ intel->vtbl.meta_disable_cliprects(intel);
+
+ intel->vtbl.draw_quad(intel,
+ 0, width, 0, height,
+ 0x00ff00ff, x, x + width, y, y + height);
+
+ intel->vtbl.leave_meta_state(intel);
+ }
+ UNLOCK_HARDWARE(intel);
+
+ intel_region_wait_fence(ctx, dest_region); /* required by GL */
+ return GL_TRUE;
+#endif
+
+ return GL_FALSE;
+}
+
+
+
+
+static GLboolean
+do_blit_readpixels(GLcontext * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *src = intel_readbuf_region(intel);
+ struct intel_buffer_object *dst = intel_buffer_object(pack->BufferObj);
+ GLuint dst_offset;
+ GLuint rowLength;
+ struct _DriFenceObject *fence = NULL;
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s\n", __FUNCTION__);
+
+ if (!src)
+ return GL_FALSE;
+
+ if (dst) {
+ /* XXX This validation should be done by core mesa:
+ */
+ if (!_mesa_validate_pbo_access(2, pack, width, height, 1,
+ format, type, pixels)) {
+ _mesa_error(ctx, GL_INVALID_OPERATION, "glDrawPixels");
+ return GL_TRUE;
+ }
+ }
+ else {
+ /* PBO only for now:
+ */
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s - not PBO\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+
+ if (ctx->_ImageTransferState ||
+ !intel_check_blit_format(src, format, type)) {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s - bad format for blit\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ if (pack->Alignment != 1 || pack->SwapBytes || pack->LsbFirst) {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s: bad packing params\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ if (pack->RowLength > 0)
+ rowLength = pack->RowLength;
+ else
+ rowLength = width;
+
+ if (pack->Invert) {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s: MESA_PACK_INVERT not done yet\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+ else {
+ rowLength = -rowLength;
+ }
+
+ /* XXX 64-bit cast? */
+ dst_offset = (GLuint) _mesa_image_address(2, pack, pixels, width, height,
+ format, type, 0, 0, 0);
+
+
+ /* Although the blits go on the command buffer, need to do this and
+ * fire with lock held to guarentee cliprects are correct.
+ */
+ intelFlush(&intel->ctx);
+ LOCK_HARDWARE(intel);
+
+ if (intel->driDrawable->numClipRects) {
+ GLboolean all = (width * height * src->cpp == dst->Base.Size &&
+ x == 0 && dst_offset == 0);
+
+ struct _DriBufferObject *dst_buffer =
+ intel_bufferobj_buffer(intel, dst, all ? INTEL_WRITE_FULL :
+ INTEL_WRITE_PART);
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+ int nbox = dPriv->numClipRects;
+ drm_clip_rect_t *box = dPriv->pClipRects;
+ drm_clip_rect_t rect;
+ drm_clip_rect_t src_rect;
+ int i;
+
+ src_rect.x1 = dPriv->x + x;
+ src_rect.y1 = dPriv->y + dPriv->h - (y + height);
+ src_rect.x2 = src_rect.x1 + width;
+ src_rect.y2 = src_rect.y1 + height;
+
+
+
+ for (i = 0; i < nbox; i++) {
+ if (!intel_intersect_cliprects(&rect, &src_rect, &box[i]))
+ continue;
+
+ intelEmitCopyBlit(intel,
+ src->cpp,
+ src->pitch, src->buffer, 0,
+ rowLength,
+ dst_buffer, dst_offset,
+ rect.x1,
+ rect.y1,
+ rect.x1 - src_rect.x1,
+ rect.y2 - src_rect.y2,
+ rect.x2 - rect.x1, rect.y2 - rect.y1);
+ }
+
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceReference(fence);
+
+ }
+ UNLOCK_HARDWARE(intel);
+
+ if (intel->driDrawable->numClipRects)
+ driFenceFinish(fence, DRM_FENCE_TYPE_EXE | DRM_I915_FENCE_TYPE_RW,
+ GL_FALSE);
+
+ driFenceUnReference(fence);
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s - DONE\n", __FUNCTION__);
+
+ return GL_TRUE;
+}
+
+void
+intelReadPixels(GLcontext * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
+{
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+ intelFlush(ctx);
+
+ if (do_blit_readpixels
+ (ctx, x, y, width, height, format, type, pack, pixels))
+ return;
+
+ if (do_texture_readpixels
+ (ctx, x, y, width, height, format, type, pack, pixels))
+ return;
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ _mesa_printf("%s: fallback to swrast\n", __FUNCTION__);
+
+ _swrast_ReadPixels(ctx, x, y, width, height, format, type, pack, pixels);
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef _INTEL_REG_H_
+#define _INTEL_REG_H_
+
+
+
+#define CMD_3D (0x3<<29)
+
+
+#define _3DPRIMITIVE ((0x3<<29)|(0x1f<<24))
+#define PRIM_INDIRECT (1<<23)
+#define PRIM_INLINE (0<<23)
+#define PRIM_INDIRECT_SEQUENTIAL (0<<17)
+#define PRIM_INDIRECT_ELTS (1<<17)
+
+#define PRIM3D_TRILIST (0x0<<18)
+#define PRIM3D_TRISTRIP (0x1<<18)
+#define PRIM3D_TRISTRIP_RVRSE (0x2<<18)
+#define PRIM3D_TRIFAN (0x3<<18)
+#define PRIM3D_POLY (0x4<<18)
+#define PRIM3D_LINELIST (0x5<<18)
+#define PRIM3D_LINESTRIP (0x6<<18)
+#define PRIM3D_RECTLIST (0x7<<18)
+#define PRIM3D_POINTLIST (0x8<<18)
+#define PRIM3D_DIB (0x9<<18)
+#define PRIM3D_MASK (0x1f<<18)
+
+#define I915PACKCOLOR4444(r,g,b,a) \
+ ((((a) & 0xf0) << 8) | (((r) & 0xf0) << 4) | ((g) & 0xf0) | ((b) >> 4))
+
+#define I915PACKCOLOR1555(r,g,b,a) \
+ ((((r) & 0xf8) << 7) | (((g) & 0xf8) << 2) | (((b) & 0xf8) >> 3) | \
+ ((a) ? 0x8000 : 0))
+
+#define I915PACKCOLOR565(r,g,b) \
+ ((((r) & 0xf8) << 8) | (((g) & 0xfc) << 3) | (((b) & 0xf8) >> 3))
+
+#define I915PACKCOLOR8888(r,g,b,a) \
+ ((a<<24) | (r<<16) | (g<<8) | b)
+
+
+
+
+#define BR00_BITBLT_CLIENT 0x40000000
+#define BR00_OP_COLOR_BLT 0x10000000
+#define BR00_OP_SRC_COPY_BLT 0x10C00000
+#define BR13_SOLID_PATTERN 0x80000000
+
+#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
+#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
+#define XY_COLOR_BLT_WRITE_RGB (1<<20)
+
+#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Provide additional functionality on top of bufmgr buffers:
+ * - 2d semantics and blit operations
+ * - refcounting of buffers for multiple images in a buffer.
+ * - refcounting of buffer mappings.
+ * - some logic for moving the buffers to the best memory pools for
+ * given operations.
+ *
+ * Most of this is to make it easier to implement the fixed-layout
+ * mipmap tree required by intel hardware in the face of GL's
+ * programming interface where each image can be specifed in random
+ * order and it isn't clear what layout the tree should have until the
+ * last moment.
+ */
+
+#include "intel_context.h"
+#include "intel_regions.h"
+#include "intel_blit.h"
+#include "intel_buffer_objects.h"
+#include "dri_bufmgr.h"
+#include "intel_batchbuffer.h"
+
+#define FILE_DEBUG_FLAG DEBUG_REGION
+
+void
+intel_region_idle(intelScreenPrivate *intelScreen, struct intel_region *region)
+{
+ DBG("%s\n", __FUNCTION__);
+ if (region && region->buffer)
+ driBOWaitIdle(region->buffer, GL_FALSE);
+}
+
+/* XXX: Thread safety?
+ */
+GLubyte *
+intel_region_map(intelScreenPrivate *intelScreen, struct intel_region *region)
+{
+ DBG("%s\n", __FUNCTION__);
+ if (!region->map_refcount++) {
+ if (region->pbo)
+ intel_region_cow(intelScreen, region);
+
+ region->map = driBOMap(region->buffer,
+ DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
+ }
+
+ return region->map;
+}
+
+void
+intel_region_unmap(intelScreenPrivate *intelScreen, struct intel_region *region)
+{
+ DBG("%s\n", __FUNCTION__);
+ if (!--region->map_refcount) {
+ driBOUnmap(region->buffer);
+ region->map = NULL;
+ }
+}
+
+#undef TEST_CACHED_TEXTURES
+
+struct intel_region *
+intel_region_alloc(intelScreenPrivate *intelScreen,
+ GLuint cpp, GLuint pitch, GLuint height)
+{
+ struct intel_region *region = calloc(sizeof(*region), 1);
+
+ DBG("%s\n", __FUNCTION__);
+
+ region->cpp = cpp;
+ region->pitch = pitch;
+ region->height = height; /* needed? */
+ region->refcount = 1;
+
+ driGenBuffers(intelScreen->regionPool,
+ "region", 1, ®ion->buffer, 64,
+#ifdef TEST_CACHED_TEXTURES
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_BIND_CACHED |
+ DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
+#else
+ 0,
+#endif
+ 0);
+ driBOData(region->buffer, pitch * cpp * height, NULL, 0);
+ return region;
+}
+
+void
+intel_region_reference(struct intel_region **dst, struct intel_region *src)
+{
+ assert(*dst == NULL);
+ if (src) {
+ src->refcount++;
+ *dst = src;
+ }
+}
+
+void
+intel_region_release(struct intel_region **region)
+{
+ if (!*region)
+ return;
+
+ DBG("%s %d\n", __FUNCTION__, (*region)->refcount - 1);
+
+ ASSERT((*region)->refcount > 0);
+ (*region)->refcount--;
+
+ if ((*region)->refcount == 0) {
+ assert((*region)->map_refcount == 0);
+
+ if ((*region)->pbo)
+ (*region)->pbo->region = NULL;
+ (*region)->pbo = NULL;
+ driBOUnReference((*region)->buffer);
+ free(*region);
+ }
+ *region = NULL;
+}
+
+
+struct intel_region *
+intel_region_create_static(intelScreenPrivate *intelScreen,
+ GLuint mem_type,
+ GLuint offset,
+ void *virtual,
+ GLuint cpp, GLuint pitch, GLuint height)
+{
+ struct intel_region *region = calloc(sizeof(*region), 1);
+ DBG("%s\n", __FUNCTION__);
+
+ region->cpp = cpp;
+ region->pitch = pitch;
+ region->height = height; /* needed? */
+ region->refcount = 1;
+
+ /*
+ * We use a "shared" buffer type to indicate buffers created and
+ * shared by others.
+ */
+
+ driGenBuffers(intelScreen->staticPool, "static region", 1,
+ ®ion->buffer, 64,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_NO_MOVE |
+ DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
+ driBOSetStatic(region->buffer, offset, pitch * cpp * height, virtual, 0);
+
+ return region;
+}
+
+
+
+void
+intel_region_update_static(intelScreenPrivate *intelScreen,
+ struct intel_region *region,
+ GLuint mem_type,
+ GLuint offset,
+ void *virtual,
+ GLuint cpp, GLuint pitch, GLuint height)
+{
+ DBG("%s\n", __FUNCTION__);
+
+ region->cpp = cpp;
+ region->pitch = pitch;
+ region->height = height; /* needed? */
+
+ /*
+ * We use a "shared" buffer type to indicate buffers created and
+ * shared by others.
+ */
+
+ driDeleteBuffers(1, ®ion->buffer);
+ driGenBuffers(intelScreen->staticPool, "static region", 1,
+ ®ion->buffer, 64,
+ DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_NO_MOVE |
+ DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
+ driBOSetStatic(region->buffer, offset, pitch * cpp * height, virtual, 0);
+
+}
+
+
+
+/*
+ * XXX Move this into core Mesa?
+ */
+static void
+_mesa_copy_rect(GLubyte * dst,
+ GLuint cpp,
+ GLuint dst_pitch,
+ GLuint dst_x,
+ GLuint dst_y,
+ GLuint width,
+ GLuint height,
+ GLubyte * src, GLuint src_pitch, GLuint src_x, GLuint src_y)
+{
+ GLuint i;
+
+ dst_pitch *= cpp;
+ src_pitch *= cpp;
+ dst += dst_x * cpp;
+ src += src_x * cpp;
+ dst += dst_y * dst_pitch;
+ src += src_y * dst_pitch;
+ width *= cpp;
+
+ if (width == dst_pitch && width == src_pitch)
+ memcpy(dst, src, height * width);
+ else {
+ for (i = 0; i < height; i++) {
+ memcpy(dst, src, width);
+ dst += dst_pitch;
+ src += src_pitch;
+ }
+ }
+}
+
+
+/* Upload data to a rectangular sub-region. Lots of choices how to do this:
+ *
+ * - memcpy by span to current destination
+ * - upload data as new buffer and blit
+ *
+ * Currently always memcpy.
+ */
+void
+intel_region_data(intelScreenPrivate *intelScreen,
+ struct intel_region *dst,
+ GLuint dst_offset,
+ GLuint dstx, GLuint dsty,
+ void *src, GLuint src_pitch,
+ GLuint srcx, GLuint srcy, GLuint width, GLuint height)
+{
+ struct intel_context *intel = intelScreenContext(intelScreen);
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (intel == NULL)
+ return;
+
+ if (dst->pbo) {
+ if (dstx == 0 &&
+ dsty == 0 && width == dst->pitch && height == dst->height)
+ intel_region_release_pbo(intelScreen, dst);
+ else
+ intel_region_cow(intelScreen, dst);
+ }
+
+
+ LOCK_HARDWARE(intel);
+
+ _mesa_copy_rect(intel_region_map(intelScreen, dst) + dst_offset,
+ dst->cpp,
+ dst->pitch,
+ dstx, dsty, width, height, src, src_pitch, srcx, srcy);
+
+ intel_region_unmap(intelScreen, dst);
+
+ UNLOCK_HARDWARE(intel);
+
+}
+
+/* Copy rectangular sub-regions. Need better logic about when to
+ * push buffers into AGP - will currently do so whenever possible.
+ */
+void
+intel_region_copy(intelScreenPrivate *intelScreen,
+ struct intel_region *dst,
+ GLuint dst_offset,
+ GLuint dstx, GLuint dsty,
+ struct intel_region *src,
+ GLuint src_offset,
+ GLuint srcx, GLuint srcy, GLuint width, GLuint height)
+{
+ struct intel_context *intel = intelScreenContext(intelScreen);
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (intel == NULL)
+ return;
+
+ if (dst->pbo) {
+ if (dstx == 0 &&
+ dsty == 0 && width == dst->pitch && height == dst->height)
+ intel_region_release_pbo(intelScreen, dst);
+ else
+ intel_region_cow(intelScreen, dst);
+ }
+
+ assert(src->cpp == dst->cpp);
+
+ intelEmitCopyBlit(intel,
+ dst->cpp,
+ src->pitch, src->buffer, src_offset,
+ dst->pitch, dst->buffer, dst_offset,
+ srcx, srcy, dstx, dsty, width, height);
+}
+
+/* Fill a rectangular sub-region. Need better logic about when to
+ * push buffers into AGP - will currently do so whenever possible.
+ */
+void
+intel_region_fill(intelScreenPrivate *intelScreen,
+ struct intel_region *dst,
+ GLuint dst_offset,
+ GLuint dstx, GLuint dsty,
+ GLuint width, GLuint height, GLuint color)
+{
+ struct intel_context *intel = intelScreenContext(intelScreen);
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (intel == NULL)
+ return;
+
+ if (dst->pbo) {
+ if (dstx == 0 &&
+ dsty == 0 && width == dst->pitch && height == dst->height)
+ intel_region_release_pbo(intelScreen, dst);
+ else
+ intel_region_cow(intelScreen, dst);
+ }
+
+ intelEmitFillBlit(intel,
+ dst->cpp,
+ dst->pitch, dst->buffer, dst_offset,
+ dstx, dsty, width, height, color);
+}
+
+/* Attach to a pbo, discarding our data. Effectively zero-copy upload
+ * the pbo's data.
+ */
+void
+intel_region_attach_pbo(intelScreenPrivate *intelScreen,
+ struct intel_region *region,
+ struct intel_buffer_object *pbo)
+{
+ if (region->pbo == pbo)
+ return;
+
+ /* If there is already a pbo attached, break the cow tie now.
+ * Don't call intel_region_release_pbo() as that would
+ * unnecessarily allocate a new buffer we would have to immediately
+ * discard.
+ */
+ if (region->pbo) {
+ region->pbo->region = NULL;
+ region->pbo = NULL;
+ }
+
+ if (region->buffer) {
+ driDeleteBuffers(1, ®ion->buffer);
+ region->buffer = NULL;
+ }
+
+ region->pbo = pbo;
+ region->pbo->region = region;
+ region->buffer = driBOReference(pbo->buffer);
+}
+
+
+/* Break the COW tie to the pbo. The pbo gets to keep the data.
+ */
+void
+intel_region_release_pbo(intelScreenPrivate *intelScreen,
+ struct intel_region *region)
+{
+ assert(region->buffer == region->pbo->buffer);
+ region->pbo->region = NULL;
+ region->pbo = NULL;
+ driBOUnReference(region->buffer);
+ region->buffer = NULL;
+
+ driGenBuffers(intelScreen->regionPool,
+ "region", 1, ®ion->buffer, 64, 0, 0);
+ driBOData(region->buffer,
+ region->cpp * region->pitch * region->height, NULL, 0);
+}
+
+/* Break the COW tie to the pbo. Both the pbo and the region end up
+ * with a copy of the data.
+ */
+void
+intel_region_cow(intelScreenPrivate *intelScreen, struct intel_region *region)
+{
+ struct intel_context *intel = intelScreenContext(intelScreen);
+ struct intel_buffer_object *pbo = region->pbo;
+
+ if (intel == NULL)
+ return;
+
+ intel_region_release_pbo(intelScreen, region);
+
+ assert(region->cpp * region->pitch * region->height == pbo->Base.Size);
+
+ DBG("%s (%d bytes)\n", __FUNCTION__, pbo->Base.Size);
+
+ /* Now blit from the texture buffer to the new buffer:
+ */
+
+ intel_batchbuffer_flush(intel->batch);
+
+ if (!intel->locked) {
+ LOCK_HARDWARE(intel);
+ intelEmitCopyBlit(intel,
+ region->cpp,
+ region->pitch,
+ region->buffer, 0,
+ region->pitch,
+ pbo->buffer, 0,
+ 0, 0, 0, 0, region->pitch, region->height);
+
+ intel_batchbuffer_flush(intel->batch);
+ UNLOCK_HARDWARE(intel);
+ }
+ else {
+ intelEmitCopyBlit(intel,
+ region->cpp,
+ region->pitch,
+ region->buffer, 0,
+ region->pitch,
+ pbo->buffer, 0,
+ 0, 0, 0, 0, region->pitch, region->height);
+
+ intel_batchbuffer_flush(intel->batch);
+ }
+}
+
+struct _DriBufferObject *
+intel_region_buffer(intelScreenPrivate *intelScreen,
+ struct intel_region *region, GLuint flag)
+{
+ if (region->pbo) {
+ if (flag == INTEL_WRITE_PART)
+ intel_region_cow(intelScreen, region);
+ else if (flag == INTEL_WRITE_FULL)
+ intel_region_release_pbo(intelScreen, region);
+ }
+
+ return region->buffer;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_REGIONS_H
+#define INTEL_REGIONS_H
+
+#include "mtypes.h"
+#include "intel_screen.h"
+
+struct intel_context;
+struct intel_buffer_object;
+
+/**
+ * A layer on top of the bufmgr buffers that adds a few useful things:
+ *
+ * - Refcounting for local buffer references.
+ * - Refcounting for buffer maps
+ * - Buffer dimensions - pitch and height.
+ * - Blitter commands for copying 2D regions between buffers. (really???)
+ */
+struct intel_region
+{
+ struct _DriBufferObject *buffer; /**< buffer manager's buffer ID */
+ GLuint refcount; /**< Reference count for region */
+ GLuint cpp; /**< bytes per pixel */
+ GLuint pitch; /**< in pixels */
+ GLuint height; /**< in pixels */
+ GLubyte *map; /**< only non-NULL when region is actually mapped */
+ GLuint map_refcount; /**< Reference count for mapping */
+
+ GLuint draw_offset; /**< Offset of drawing address within the region */
+
+ struct intel_buffer_object *pbo; /* zero-copy uploads */
+};
+
+
+/* Allocate a refcounted region. Pointers to regions should only be
+ * copied by calling intel_reference_region().
+ */
+struct intel_region *intel_region_alloc(intelScreenPrivate *intelScreen,
+ GLuint cpp,
+ GLuint pitch, GLuint height);
+
+void intel_region_reference(struct intel_region **dst,
+ struct intel_region *src);
+
+void intel_region_release(struct intel_region **ib);
+
+extern struct intel_region
+*intel_region_create_static(intelScreenPrivate *intelScreen,
+ GLuint mem_type,
+ GLuint offset,
+ void *virtual,
+ GLuint cpp,
+ GLuint pitch, GLuint height);
+extern void
+intel_region_update_static(intelScreenPrivate *intelScreen,
+ struct intel_region *region,
+ GLuint mem_type,
+ GLuint offset,
+ void *virtual,
+ GLuint cpp, GLuint pitch, GLuint height);
+
+
+void intel_region_idle(intelScreenPrivate *intelScreen,
+ struct intel_region *ib);
+
+/* Map/unmap regions. This is refcounted also:
+ */
+GLubyte *intel_region_map(intelScreenPrivate *intelScreen,
+ struct intel_region *ib);
+
+void intel_region_unmap(intelScreenPrivate *intelScreen, struct intel_region *ib);
+
+
+/* Upload data to a rectangular sub-region
+ */
+void intel_region_data(intelScreenPrivate *intelScreen,
+ struct intel_region *dest,
+ GLuint dest_offset,
+ GLuint destx, GLuint desty,
+ void *src, GLuint src_stride,
+ GLuint srcx, GLuint srcy, GLuint width, GLuint height);
+
+/* Copy rectangular sub-regions
+ */
+void intel_region_copy(intelScreenPrivate *intelScreen,
+ struct intel_region *dest,
+ GLuint dest_offset,
+ GLuint destx, GLuint desty,
+ struct intel_region *src,
+ GLuint src_offset,
+ GLuint srcx, GLuint srcy, GLuint width, GLuint height);
+
+/* Fill a rectangular sub-region
+ */
+void intel_region_fill(intelScreenPrivate *intelScreen,
+ struct intel_region *dest,
+ GLuint dest_offset,
+ GLuint destx, GLuint desty,
+ GLuint width, GLuint height, GLuint color);
+
+/* Helpers for zerocopy uploads, particularly texture image uploads:
+ */
+void intel_region_attach_pbo(intelScreenPrivate *intelScreen,
+ struct intel_region *region,
+ struct intel_buffer_object *pbo);
+void intel_region_release_pbo(intelScreenPrivate *intelScreen,
+ struct intel_region *region);
+void intel_region_cow(intelScreenPrivate *intelScreen,
+ struct intel_region *region);
+
+struct _DriBufferObject *intel_region_buffer(intelScreenPrivate *intelScreen,
+ struct intel_region *region,
+ GLuint flag);
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/*
+ * Render unclipped vertex buffers by emitting vertices directly to
+ * dma buffers. Use strip/fan hardware acceleration where possible.
+ *
+ */
+#include "glheader.h"
+#include "context.h"
+#include "macros.h"
+#include "imports.h"
+#include "mtypes.h"
+#include "enums.h"
+
+#include "tnl/t_context.h"
+#include "tnl/t_vertex.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_tris.h"
+#include "intel_batchbuffer.h"
+#include "intel_reg.h"
+
+/*
+ * Render unclipped vertex buffers by emitting vertices directly to
+ * dma buffers. Use strip/fan hardware primitives where possible.
+ * Try to simulate missing primitives with indexed vertices.
+ */
+#define HAVE_POINTS 0 /* Has it, but can't use because subpixel has to
+ * be adjusted for points on the INTEL/I845G
+ */
+#define HAVE_LINES 1
+#define HAVE_LINE_STRIPS 1
+#define HAVE_TRIANGLES 1
+#define HAVE_TRI_STRIPS 1
+#define HAVE_TRI_STRIP_1 0 /* has it, template can't use it yet */
+#define HAVE_TRI_FANS 1
+#define HAVE_POLYGONS 1
+#define HAVE_QUADS 0
+#define HAVE_QUAD_STRIPS 0
+
+#define HAVE_ELTS 0
+
+static GLuint hw_prim[GL_POLYGON + 1] = {
+ 0,
+ PRIM3D_LINELIST,
+ PRIM3D_LINESTRIP,
+ PRIM3D_LINESTRIP,
+ PRIM3D_TRILIST,
+ PRIM3D_TRISTRIP,
+ PRIM3D_TRIFAN,
+ 0,
+ 0,
+ PRIM3D_POLY
+};
+
+static const GLenum reduced_prim[GL_POLYGON + 1] = {
+ GL_POINTS,
+ GL_LINES,
+ GL_LINES,
+ GL_LINES,
+ GL_TRIANGLES,
+ GL_TRIANGLES,
+ GL_TRIANGLES,
+ GL_TRIANGLES,
+ GL_TRIANGLES,
+ GL_TRIANGLES
+};
+
+static const int scale_prim[GL_POLYGON + 1] = {
+ 0, /* fallback case */
+ 1,
+ 2,
+ 2,
+ 1,
+ 3,
+ 3,
+ 0, /* fallback case */
+ 0, /* fallback case */
+ 3
+};
+
+
+static void
+intelDmaPrimitive(struct intel_context *intel, GLenum prim)
+{
+ if (0)
+ fprintf(stderr, "%s %s\n", __FUNCTION__, _mesa_lookup_enum_by_nr(prim));
+ INTEL_FIREVERTICES(intel);
+ intel->vtbl.reduced_primitive_state(intel, reduced_prim[prim]);
+ intelStartInlinePrimitive(intel, hw_prim[prim], INTEL_BATCH_CLIPRECTS);
+}
+
+
+#define LOCAL_VARS struct intel_context *intel = intel_context(ctx)
+#define INIT( prim ) \
+do { \
+ intelDmaPrimitive( intel, prim ); \
+} while (0)
+
+#define FLUSH() INTEL_FIREVERTICES(intel)
+
+#define GET_SUBSEQUENT_VB_MAX_VERTS() \
+ ((intel->batch->size - 1500) / (intel->vertex_size*4))
+#define GET_CURRENT_VB_MAX_VERTS() GET_SUBSEQUENT_VB_MAX_VERTS()
+
+#define ALLOC_VERTS( nr ) \
+ intelExtendInlinePrimitive( intel, (nr) * intel->vertex_size )
+
+#define EMIT_VERTS( ctx, j, nr, buf ) \
+ _tnl_emit_vertices_to_buffer(ctx, j, (j)+(nr), buf )
+
+#define TAG(x) intel_##x
+#include "tnl_dd/t_dd_dmatmp.h"
+
+
+/**********************************************************************/
+/* Render pipeline stage */
+/**********************************************************************/
+
+/* Heuristic to choose between the two render paths:
+ */
+static GLboolean
+choose_render(struct intel_context *intel, struct vertex_buffer *VB)
+{
+ int vertsz = intel->vertex_size;
+ int cost_render = 0;
+ int cost_fallback = 0;
+ int nr_prims = 0;
+ int nr_rprims = 0;
+ int nr_rverts = 0;
+ int rprim = intel->reduced_primitive;
+ int i = 0;
+
+ for (i = 0; i < VB->PrimitiveCount; i++) {
+ GLuint prim = VB->Primitive[i].mode;
+ GLuint length = VB->Primitive[i].count;
+
+ if (!length)
+ continue;
+
+ nr_prims++;
+ nr_rverts += length * scale_prim[prim & PRIM_MODE_MASK];
+
+ if (reduced_prim[prim & PRIM_MODE_MASK] != rprim) {
+ nr_rprims++;
+ rprim = reduced_prim[prim & PRIM_MODE_MASK];
+ }
+ }
+
+ /* One point for each generated primitive:
+ */
+ cost_render = nr_prims;
+ cost_fallback = nr_rprims;
+
+ /* One point for every 1024 dwords (4k) of dma:
+ */
+ cost_render += (vertsz * i) / 1024;
+ cost_fallback += (vertsz * nr_rverts) / 1024;
+
+ if (0)
+ fprintf(stderr, "cost render: %d fallback: %d\n",
+ cost_render, cost_fallback);
+
+ if (cost_render > cost_fallback)
+ return GL_FALSE;
+
+ return GL_TRUE;
+}
+
+
+static GLboolean
+intel_run_render(GLcontext * ctx, struct tnl_pipeline_stage *stage)
+{
+ struct intel_context *intel = intel_context(ctx);
+ TNLcontext *tnl = TNL_CONTEXT(ctx);
+ struct vertex_buffer *VB = &tnl->vb;
+ GLuint i;
+
+ /* Don't handle clipping or indexed vertices.
+ */
+ if (intel->RenderIndex != 0 ||
+ !intel_validate_render(ctx, VB) || !choose_render(intel, VB)) {
+ return GL_TRUE;
+ }
+
+ tnl->clipspace.new_inputs |= VERT_BIT_POS;
+
+ tnl->Driver.Render.Start(ctx);
+
+ for (i = 0; i < VB->PrimitiveCount; i++) {
+ GLuint prim = VB->Primitive[i].mode;
+ GLuint start = VB->Primitive[i].start;
+ GLuint length = VB->Primitive[i].count;
+
+ if (!length)
+ continue;
+
+ intel_render_tab_verts[prim & PRIM_MODE_MASK] (ctx, start,
+ start + length, prim);
+ }
+
+ tnl->Driver.Render.Finish(ctx);
+
+ INTEL_FIREVERTICES(intel);
+
+ return GL_FALSE; /* finished the pipe */
+}
+
+const struct tnl_pipeline_stage _intel_render_stage = {
+ "intel render",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ intel_run_render /* run */
+};
--- /dev/null
+
+/**
+ * Routines for simple 2D->2D transformations for rotated, flipped screens.
+ *
+ * XXX This code is not intel-specific. Move it into a common/utility
+ * someday.
+ */
+
+#include "intel_rotate.h"
+
+#define MIN2(A, B) ( ((A) < (B)) ? (A) : (B) )
+
+#define ABS(A) ( ((A) < 0) ? -(A) : (A) )
+
+
+void
+matrix23Set(struct matrix23 *m,
+ int m00, int m01, int m02, int m10, int m11, int m12)
+{
+ m->m00 = m00;
+ m->m01 = m01;
+ m->m02 = m02;
+ m->m10 = m10;
+ m->m11 = m11;
+ m->m12 = m12;
+}
+
+
+/*
+ * Transform (x,y) coordinate by the given matrix.
+ */
+void
+matrix23TransformCoordf(const struct matrix23 *m, float *x, float *y)
+{
+ const float x0 = *x;
+ const float y0 = *y;
+
+ *x = m->m00 * x0 + m->m01 * y0 + m->m02;
+ *y = m->m10 * x0 + m->m11 * y0 + m->m12;
+}
+
+
+void
+matrix23TransformCoordi(const struct matrix23 *m, int *x, int *y)
+{
+ const int x0 = *x;
+ const int y0 = *y;
+
+ *x = m->m00 * x0 + m->m01 * y0 + m->m02;
+ *y = m->m10 * x0 + m->m11 * y0 + m->m12;
+}
+
+
+/*
+ * Transform a width and height by the given matrix.
+ * XXX this could be optimized quite a bit.
+ */
+void
+matrix23TransformDistance(const struct matrix23 *m, int *xDist, int *yDist)
+{
+ int x0 = 0, y0 = 0;
+ int x1 = *xDist, y1 = 0;
+ int x2 = 0, y2 = *yDist;
+ matrix23TransformCoordi(m, &x0, &y0);
+ matrix23TransformCoordi(m, &x1, &y1);
+ matrix23TransformCoordi(m, &x2, &y2);
+
+ *xDist = (x1 - x0) + (x2 - x0);
+ *yDist = (y1 - y0) + (y2 - y0);
+
+ if (*xDist < 0)
+ *xDist = -*xDist;
+ if (*yDist < 0)
+ *yDist = -*yDist;
+}
+
+
+/**
+ * Transform the rect defined by (x, y, w, h) by m.
+ */
+void
+matrix23TransformRect(const struct matrix23 *m, int *x, int *y, int *w,
+ int *h)
+{
+ int x0 = *x, y0 = *y;
+ int x1 = *x + *w, y1 = *y;
+ int x2 = *x + *w, y2 = *y + *h;
+ int x3 = *x, y3 = *y + *h;
+ matrix23TransformCoordi(m, &x0, &y0);
+ matrix23TransformCoordi(m, &x1, &y1);
+ matrix23TransformCoordi(m, &x2, &y2);
+ matrix23TransformCoordi(m, &x3, &y3);
+ *w = ABS(x1 - x0) + ABS(x2 - x1);
+ /**w = ABS(*w);*/
+ *h = ABS(y1 - y0) + ABS(y2 - y1);
+ /**h = ABS(*h);*/
+ *x = MIN2(x0, x1);
+ *x = MIN2(*x, x2);
+ *y = MIN2(y0, y1);
+ *y = MIN2(*y, y2);
+}
+
+
+/*
+ * Make rotation matrix for width X height screen.
+ */
+void
+matrix23Rotate(struct matrix23 *m, int width, int height, int angle)
+{
+ switch (angle) {
+ case 0:
+ matrix23Set(m, 1, 0, 0, 0, 1, 0);
+ break;
+ case 90:
+ matrix23Set(m, 0, 1, 0, -1, 0, width);
+ break;
+ case 180:
+ matrix23Set(m, -1, 0, width, 0, -1, height);
+ break;
+ case 270:
+ matrix23Set(m, 0, -1, height, 1, 0, 0);
+ break;
+ default:
+ /*abort() */ ;
+ }
+}
+
+
+/*
+ * Make flip/reflection matrix for width X height screen.
+ */
+void
+matrix23Flip(struct matrix23 *m, int width, int height, int xflip, int yflip)
+{
+ if (xflip) {
+ m->m00 = -1;
+ m->m01 = 0;
+ m->m02 = width - 1;
+ }
+ else {
+ m->m00 = 1;
+ m->m01 = 0;
+ m->m02 = 0;
+ }
+ if (yflip) {
+ m->m10 = 0;
+ m->m11 = -1;
+ m->m12 = height - 1;
+ }
+ else {
+ m->m10 = 0;
+ m->m11 = 1;
+ m->m12 = 0;
+ }
+}
+
+
+/*
+ * result = a * b
+ */
+void
+matrix23Multiply(struct matrix23 *result,
+ const struct matrix23 *a, const struct matrix23 *b)
+{
+ result->m00 = a->m00 * b->m00 + a->m01 * b->m10;
+ result->m01 = a->m00 * b->m01 + a->m01 * b->m11;
+ result->m02 = a->m00 * b->m02 + a->m01 * b->m12 + a->m02;
+
+ result->m10 = a->m10 * b->m00 + a->m11 * b->m10;
+ result->m11 = a->m10 * b->m01 + a->m11 * b->m11;
+ result->m12 = a->m10 * b->m02 + a->m11 * b->m12 + a->m12;
+}
+
+
+#if 000
+
+#include <stdio.h>
+
+int
+main(int argc, char *argv[])
+{
+ int width = 500, height = 400;
+ int rot;
+ int fx = 0, fy = 0; /* flip x and/or y ? */
+ int coords[4][2];
+
+ /* four corner coords to test with */
+ coords[0][0] = 0;
+ coords[0][1] = 0;
+ coords[1][0] = width - 1;
+ coords[1][1] = 0;
+ coords[2][0] = width - 1;
+ coords[2][1] = height - 1;
+ coords[3][0] = 0;
+ coords[3][1] = height - 1;
+
+
+ for (rot = 0; rot < 360; rot += 90) {
+ struct matrix23 rotate, flip, m;
+ int i;
+
+ printf("Rot %d, xFlip %d, yFlip %d:\n", rot, fx, fy);
+
+ /* make transformation matrix 'm' */
+ matrix23Rotate(&rotate, width, height, rot);
+ matrix23Flip(&flip, width, height, fx, fy);
+ matrix23Multiply(&m, &rotate, &flip);
+
+ /* xform four coords */
+ for (i = 0; i < 4; i++) {
+ int x = coords[i][0];
+ int y = coords[i][1];
+ matrix23TransformCoordi(&m, &x, &y);
+ printf(" %d, %d -> %d %d\n", coords[i][0], coords[i][1], x, y);
+ }
+
+ /* xform width, height */
+ {
+ int x = width;
+ int y = height;
+ matrix23TransformDistance(&m, &x, &y);
+ printf(" %d x %d -> %d x %d\n", width, height, x, y);
+ }
+
+ /* xform rect */
+ {
+ int x = 50, y = 10, w = 200, h = 100;
+ matrix23TransformRect(&m, &x, &y, &w, &h);
+ printf(" %d,%d %d x %d -> %d, %d %d x %d\n", 50, 10, 200, 100,
+ x, y, w, h);
+ }
+
+ }
+
+ return 0;
+}
+#endif
--- /dev/null
+#ifndef INTEL_ROTATE_H
+#define INTEL_ROTATE_H 1
+
+struct matrix23
+{
+ int m00, m01, m02;
+ int m10, m11, m12;
+};
+
+
+
+extern void
+matrix23Set(struct matrix23 *m,
+ int m00, int m01, int m02, int m10, int m11, int m12);
+
+extern void matrix23TransformCoordi(const struct matrix23 *m, int *x, int *y);
+
+extern void
+matrix23TransformCoordf(const struct matrix23 *m, float *x, float *y);
+
+extern void
+matrix23TransformDistance(const struct matrix23 *m, int *xDist, int *yDist);
+
+extern void
+matrix23TransformRect(const struct matrix23 *m,
+ int *x, int *y, int *w, int *h);
+
+extern void
+matrix23Rotate(struct matrix23 *m, int width, int height, int angle);
+
+extern void
+matrix23Flip(struct matrix23 *m, int width, int height, int xflip, int yflip);
+
+extern void
+matrix23Multiply(struct matrix23 *result,
+ const struct matrix23 *a, const struct matrix23 *b);
+
+
+#endif /* INTEL_ROTATE_H */
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "context.h"
+#include "framebuffer.h"
+#include "matrix.h"
+#include "renderbuffer.h"
+#include "simple_list.h"
+#include "utils.h"
+#include "vblank.h"
+#include "xmlpool.h"
+
+
+#include "intel_screen.h"
+
+#include "intel_buffers.h"
+#include "intel_tex.h"
+#include "intel_span.h"
+#include "intel_tris.h"
+#include "intel_ioctl.h"
+#include "intel_fbo.h"
+
+#include "i830_dri.h"
+#include "dri_bufpool.h"
+#include "intel_regions.h"
+#include "intel_batchbuffer.h"
+
+PUBLIC const char __driConfigOptions[] =
+ DRI_CONF_BEGIN DRI_CONF_SECTION_PERFORMANCE
+ DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
+ DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_DEF_INTERVAL_0)
+ DRI_CONF_SECTION_END DRI_CONF_SECTION_QUALITY
+ DRI_CONF_FORCE_S3TC_ENABLE(false)
+ DRI_CONF_ALLOW_LARGE_TEXTURES(1)
+ DRI_CONF_SECTION_END DRI_CONF_END;
+ const GLuint __driNConfigOptions = 4;
+
+#ifdef USE_NEW_INTERFACE
+ static PFNGLXCREATECONTEXTMODES create_context_modes = NULL;
+#endif /*USE_NEW_INTERFACE */
+
+ extern const struct dri_extension card_extensions[];
+
+/**
+ * Map all the memory regions described by the screen.
+ * \return GL_TRUE if success, GL_FALSE if error.
+ */
+GLboolean
+intelMapScreenRegions(__DRIscreenPrivate * sPriv)
+{
+ intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
+
+ if (intelScreen->front.handle) {
+ if (drmMap(sPriv->fd,
+ intelScreen->front.handle,
+ intelScreen->front.size,
+ (drmAddress *) & intelScreen->front.map) != 0) {
+ _mesa_problem(NULL, "drmMap(frontbuffer) failed!");
+ return GL_FALSE;
+ }
+ }
+ else {
+ _mesa_warning(NULL, "no front buffer handle in intelMapScreenRegions!");
+ }
+
+ if (0)
+ _mesa_printf("Back 0x%08x ", intelScreen->back.handle);
+ if (drmMap(sPriv->fd,
+ intelScreen->back.handle,
+ intelScreen->back.size,
+ (drmAddress *) & intelScreen->back.map) != 0) {
+ intelUnmapScreenRegions(intelScreen);
+ return GL_FALSE;
+ }
+
+ if (0)
+ _mesa_printf("Depth 0x%08x ", intelScreen->depth.handle);
+ if (drmMap(sPriv->fd,
+ intelScreen->depth.handle,
+ intelScreen->depth.size,
+ (drmAddress *) & intelScreen->depth.map) != 0) {
+ intelUnmapScreenRegions(intelScreen);
+ return GL_FALSE;
+ }
+
+#if 0
+ _mesa_printf("TEX 0x%08x ", intelScreen->tex.handle);
+ if (drmMap(sPriv->fd,
+ intelScreen->tex.handle,
+ intelScreen->tex.size,
+ (drmAddress *) & intelScreen->tex.map) != 0) {
+ intelUnmapScreenRegions(intelScreen);
+ return GL_FALSE;
+ }
+#endif
+ if (0)
+ printf("Mappings: front: %p back: %p depth: %p tex: %p\n",
+ intelScreen->front.map,
+ intelScreen->back.map,
+ intelScreen->depth.map, intelScreen->tex.map);
+ return GL_TRUE;
+}
+
+
+static struct intel_region *
+intel_recreate_static(intelScreenPrivate *intelScreen,
+ struct intel_region *region,
+ GLuint mem_type,
+ GLuint offset,
+ void *virtual,
+ GLuint cpp, GLuint pitch, GLuint height)
+{
+ if (region) {
+ intel_region_update_static(intelScreen, region, mem_type, offset,
+ virtual, cpp, pitch, height);
+ } else {
+ region = intel_region_create_static(intelScreen, mem_type, offset,
+ virtual, cpp, pitch, height);
+ }
+ return region;
+}
+
+
+/* Create intel_region structs to describe the static front,back,depth
+ * buffers created by the xserver.
+ *
+ * Although FBO's mean we now no longer use these as render targets in
+ * all circumstances, they won't go away until the back and depth
+ * buffers become private, and the front and rotated buffers will
+ * remain even then.
+ *
+ * Note that these don't allocate video memory, just describe
+ * allocations alread made by the X server.
+ */
+static void
+intel_recreate_static_regions(intelScreenPrivate *intelScreen)
+{
+ intelScreen->front_region =
+ intel_recreate_static(intelScreen,
+ intelScreen->front_region,
+ DRM_BO_FLAG_MEM_TT,
+ intelScreen->front.offset,
+ intelScreen->front.map,
+ intelScreen->cpp,
+ intelScreen->front.pitch / intelScreen->cpp,
+ intelScreen->height);
+
+ intelScreen->rotated_region =
+ intel_recreate_static(intelScreen,
+ intelScreen->rotated_region,
+ DRM_BO_FLAG_MEM_TT,
+ intelScreen->rotated.offset,
+ intelScreen->rotated.map,
+ intelScreen->cpp,
+ intelScreen->rotated.pitch /
+ intelScreen->cpp, intelScreen->height);
+
+
+ intelScreen->back_region =
+ intel_recreate_static(intelScreen,
+ intelScreen->back_region,
+ DRM_BO_FLAG_MEM_TT,
+ intelScreen->back.offset,
+ intelScreen->back.map,
+ intelScreen->cpp,
+ intelScreen->back.pitch / intelScreen->cpp,
+ intelScreen->height);
+
+ /* Still assuming front.cpp == depth.cpp
+ */
+ intelScreen->depth_region =
+ intel_recreate_static(intelScreen,
+ intelScreen->depth_region,
+ DRM_BO_FLAG_MEM_TT,
+ intelScreen->depth.offset,
+ intelScreen->depth.map,
+ intelScreen->cpp,
+ intelScreen->depth.pitch / intelScreen->cpp,
+ intelScreen->height);
+}
+
+/**
+ * Use the information in the sarea to update the screen parameters
+ * related to screen rotation. Needs to be called locked.
+ */
+void
+intelUpdateScreenRotation(__DRIscreenPrivate * sPriv, drmI830Sarea * sarea)
+{
+ intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
+
+ intelUnmapScreenRegions(intelScreen);
+ intelUpdateScreenFromSAREA(intelScreen, sarea);
+ if (!intelMapScreenRegions(sPriv)) {
+ fprintf(stderr, "ERROR Remapping screen regions!!!\n");
+ }
+ intel_recreate_static_regions(intelScreen);
+}
+
+
+void
+intelUnmapScreenRegions(intelScreenPrivate * intelScreen)
+{
+#define REALLY_UNMAP 1
+ if (intelScreen->front.map) {
+#if REALLY_UNMAP
+ if (drmUnmap(intelScreen->front.map, intelScreen->front.size) != 0)
+ printf("drmUnmap front failed!\n");
+#endif
+ intelScreen->front.map = NULL;
+ }
+ if (intelScreen->back.map) {
+#if REALLY_UNMAP
+ if (drmUnmap(intelScreen->back.map, intelScreen->back.size) != 0)
+ printf("drmUnmap back failed!\n");
+#endif
+ intelScreen->back.map = NULL;
+ }
+ if (intelScreen->depth.map) {
+#if REALLY_UNMAP
+ drmUnmap(intelScreen->depth.map, intelScreen->depth.size);
+ intelScreen->depth.map = NULL;
+#endif
+ }
+ if (intelScreen->tex.map) {
+#if REALLY_UNMAP
+ drmUnmap(intelScreen->tex.map, intelScreen->tex.size);
+ intelScreen->tex.map = NULL;
+#endif
+ }
+}
+
+
+static void
+intelPrintDRIInfo(intelScreenPrivate * intelScreen,
+ __DRIscreenPrivate * sPriv, I830DRIPtr gDRIPriv)
+{
+ fprintf(stderr, "*** Front size: 0x%x offset: 0x%x pitch: %d\n",
+ intelScreen->front.size, intelScreen->front.offset,
+ intelScreen->front.pitch);
+ fprintf(stderr, "*** Back size: 0x%x offset: 0x%x pitch: %d\n",
+ intelScreen->back.size, intelScreen->back.offset,
+ intelScreen->back.pitch);
+ fprintf(stderr, "*** Depth size: 0x%x offset: 0x%x pitch: %d\n",
+ intelScreen->depth.size, intelScreen->depth.offset,
+ intelScreen->depth.pitch);
+ fprintf(stderr, "*** Rotated size: 0x%x offset: 0x%x pitch: %d\n",
+ intelScreen->rotated.size, intelScreen->rotated.offset,
+ intelScreen->rotated.pitch);
+ fprintf(stderr, "*** Texture size: 0x%x offset: 0x%x\n",
+ intelScreen->tex.size, intelScreen->tex.offset);
+ fprintf(stderr, "*** Memory : 0x%x\n", gDRIPriv->mem);
+}
+
+
+static void
+intelPrintSAREA(const drmI830Sarea * sarea)
+{
+ fprintf(stderr, "SAREA: sarea width %d height %d\n", sarea->width,
+ sarea->height);
+ fprintf(stderr, "SAREA: pitch: %d\n", sarea->pitch);
+ fprintf(stderr,
+ "SAREA: front offset: 0x%08x size: 0x%x handle: 0x%x\n",
+ sarea->front_offset, sarea->front_size,
+ (unsigned) sarea->front_handle);
+ fprintf(stderr,
+ "SAREA: back offset: 0x%08x size: 0x%x handle: 0x%x\n",
+ sarea->back_offset, sarea->back_size,
+ (unsigned) sarea->back_handle);
+ fprintf(stderr, "SAREA: depth offset: 0x%08x size: 0x%x handle: 0x%x\n",
+ sarea->depth_offset, sarea->depth_size,
+ (unsigned) sarea->depth_handle);
+ fprintf(stderr, "SAREA: tex offset: 0x%08x size: 0x%x handle: 0x%x\n",
+ sarea->tex_offset, sarea->tex_size, (unsigned) sarea->tex_handle);
+ fprintf(stderr, "SAREA: rotation: %d\n", sarea->rotation);
+ fprintf(stderr,
+ "SAREA: rotated offset: 0x%08x size: 0x%x\n",
+ sarea->rotated_offset, sarea->rotated_size);
+ fprintf(stderr, "SAREA: rotated pitch: %d\n", sarea->rotated_pitch);
+}
+
+
+/**
+ * A number of the screen parameters are obtained/computed from
+ * information in the SAREA. This function updates those parameters.
+ */
+void
+intelUpdateScreenFromSAREA(intelScreenPrivate * intelScreen,
+ drmI830Sarea * sarea)
+{
+ intelScreen->width = sarea->width;
+ intelScreen->height = sarea->height;
+
+ intelScreen->front.offset = sarea->front_offset;
+ intelScreen->front.pitch = sarea->pitch * intelScreen->cpp;
+ intelScreen->front.handle = sarea->front_handle;
+ intelScreen->front.size = sarea->front_size;
+
+ intelScreen->back.offset = sarea->back_offset;
+ intelScreen->back.pitch = sarea->pitch * intelScreen->cpp;
+ intelScreen->back.handle = sarea->back_handle;
+ intelScreen->back.size = sarea->back_size;
+
+ intelScreen->depth.offset = sarea->depth_offset;
+ intelScreen->depth.pitch = sarea->pitch * intelScreen->cpp;
+ intelScreen->depth.handle = sarea->depth_handle;
+ intelScreen->depth.size = sarea->depth_size;
+
+ intelScreen->tex.offset = sarea->tex_offset;
+ intelScreen->logTextureGranularity = sarea->log_tex_granularity;
+ intelScreen->tex.handle = sarea->tex_handle;
+ intelScreen->tex.size = sarea->tex_size;
+
+ intelScreen->rotated.offset = sarea->rotated_offset;
+ intelScreen->rotated.pitch = sarea->rotated_pitch * intelScreen->cpp;
+ intelScreen->rotated.size = sarea->rotated_size;
+ intelScreen->current_rotation = sarea->rotation;
+ matrix23Rotate(&intelScreen->rotMatrix,
+ sarea->width, sarea->height, sarea->rotation);
+ intelScreen->rotatedWidth = sarea->virtualX;
+ intelScreen->rotatedHeight = sarea->virtualY;
+
+ if (0)
+ intelPrintSAREA(sarea);
+}
+
+
+static GLboolean
+intelInitDriver(__DRIscreenPrivate * sPriv)
+{
+ intelScreenPrivate *intelScreen;
+ I830DRIPtr gDRIPriv = (I830DRIPtr) sPriv->pDevPriv;
+ drmI830Sarea *sarea;
+ unsigned batchPoolSize = 1024*1024;
+
+ PFNGLXSCRENABLEEXTENSIONPROC glx_enable_extension =
+ (PFNGLXSCRENABLEEXTENSIONPROC) (*dri_interface->
+ getProcAddress("glxEnableExtension"));
+ void *const psc = sPriv->psc->screenConfigs;
+
+ if (sPriv->devPrivSize != sizeof(I830DRIRec)) {
+ fprintf(stderr,
+ "\nERROR! sizeof(I830DRIRec) does not match passed size from device driver\n");
+ return GL_FALSE;
+ }
+
+ /* Allocate the private area */
+ intelScreen = (intelScreenPrivate *) CALLOC(sizeof(intelScreenPrivate));
+ if (!intelScreen) {
+ fprintf(stderr, "\nERROR! Allocating private area failed\n");
+ return GL_FALSE;
+ }
+ /* parse information in __driConfigOptions */
+ driParseOptionInfo(&intelScreen->optionCache,
+ __driConfigOptions, __driNConfigOptions);
+
+ intelScreen->driScrnPriv = sPriv;
+ sPriv->private = (void *) intelScreen;
+ intelScreen->sarea_priv_offset = gDRIPriv->sarea_priv_offset;
+ sarea = (drmI830Sarea *)
+ (((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
+
+ intelScreen->maxBatchSize = BATCH_SZ;
+ intelScreen->deviceID = gDRIPriv->deviceID;
+ if (intelScreen->deviceID == PCI_CHIP_I865_G)
+ intelScreen->maxBatchSize = 4096;
+ batchPoolSize /= intelScreen->maxBatchSize;
+
+ intelScreen->mem = gDRIPriv->mem;
+ intelScreen->cpp = gDRIPriv->cpp;
+
+ switch (gDRIPriv->bitsPerPixel) {
+ case 16:
+ intelScreen->fbFormat = DV_PF_565;
+ break;
+ case 32:
+ intelScreen->fbFormat = DV_PF_8888;
+ break;
+ default:
+ exit(1);
+ break;
+ }
+
+ intelUpdateScreenFromSAREA(intelScreen, sarea);
+
+ if (!intelMapScreenRegions(sPriv)) {
+ fprintf(stderr, "\nERROR! mapping regions\n");
+ _mesa_free(intelScreen);
+ sPriv->private = NULL;
+ return GL_FALSE;
+ }
+
+#if 0
+
+ /*
+ * FIXME: Remove this code and its references.
+ */
+
+ intelScreen->tex.offset = gDRIPriv->textureOffset;
+ intelScreen->logTextureGranularity = gDRIPriv->logTextureGranularity;
+ intelScreen->tex.handle = gDRIPriv->textures;
+ intelScreen->tex.size = gDRIPriv->textureSize;
+
+#else
+ intelScreen->tex.offset = 0;
+ intelScreen->logTextureGranularity = 0;
+ intelScreen->tex.handle = 0;
+ intelScreen->tex.size = 0;
+#endif
+
+ intelScreen->sarea_priv_offset = gDRIPriv->sarea_priv_offset;
+
+ if (0)
+ intelPrintDRIInfo(intelScreen, sPriv, gDRIPriv);
+
+ intelScreen->drmMinor = sPriv->drmMinor;
+
+ /* Determine if IRQs are active? */
+ {
+ int ret;
+ drmI830GetParam gp;
+
+ gp.param = I830_PARAM_IRQ_ACTIVE;
+ gp.value = &intelScreen->irq_active;
+
+ ret = drmCommandWriteRead(sPriv->fd, DRM_I830_GETPARAM,
+ &gp, sizeof(gp));
+ if (ret) {
+ fprintf(stderr, "drmI830GetParam: %d\n", ret);
+ return GL_FALSE;
+ }
+ }
+
+ /* Determine if batchbuffers are allowed */
+ {
+ int ret;
+ drmI830GetParam gp;
+
+ gp.param = I830_PARAM_ALLOW_BATCHBUFFER;
+ gp.value = &intelScreen->allow_batchbuffer;
+
+ ret = drmCommandWriteRead(sPriv->fd, DRM_I830_GETPARAM,
+ &gp, sizeof(gp));
+ if (ret) {
+ fprintf(stderr, "drmI830GetParam: (%d) %d\n", gp.param, ret);
+ return GL_FALSE;
+ }
+ }
+
+ if (glx_enable_extension != NULL) {
+ (*glx_enable_extension) (psc, "GLX_SGI_swap_control");
+ (*glx_enable_extension) (psc, "GLX_SGI_video_sync");
+ (*glx_enable_extension) (psc, "GLX_MESA_swap_control");
+ (*glx_enable_extension) (psc, "GLX_MESA_swap_frame_usage");
+ (*glx_enable_extension) (psc, "GLX_SGI_make_current_read");
+ }
+
+ intelScreen->regionPool = driDRMPoolInit(sPriv->fd);
+
+ if (!intelScreen->regionPool)
+ return GL_FALSE;
+
+ intelScreen->staticPool = driDRMStaticPoolInit(sPriv->fd);
+
+ if (!intelScreen->staticPool)
+ return GL_FALSE;
+
+ intelScreen->texPool = intelScreen->regionPool;
+
+ intelScreen->batchPool = driBatchPoolInit(sPriv->fd,
+ DRM_BO_FLAG_EXE |
+ DRM_BO_FLAG_MEM_TT |
+ DRM_BO_FLAG_MEM_LOCAL,
+ intelScreen->maxBatchSize,
+ batchPoolSize, 5);
+ if (!intelScreen->batchPool) {
+ fprintf(stderr, "Failed to initialize batch pool - possible incorrect agpgart installed\n");
+ return GL_FALSE;
+ }
+
+ intel_recreate_static_regions(intelScreen);
+
+ return GL_TRUE;
+}
+
+
+static void
+intelDestroyScreen(__DRIscreenPrivate * sPriv)
+{
+ intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
+
+ intelUnmapScreenRegions(intelScreen);
+
+ driPoolTakeDown(intelScreen->regionPool);
+ driPoolTakeDown(intelScreen->staticPool);
+ driPoolTakeDown(intelScreen->batchPool);
+ FREE(intelScreen);
+ sPriv->private = NULL;
+}
+
+
+/**
+ * This is called when we need to set up GL rendering to a new X window.
+ */
+static GLboolean
+intelCreateBuffer(__DRIscreenPrivate * driScrnPriv,
+ __DRIdrawablePrivate * driDrawPriv,
+ const __GLcontextModes * mesaVis, GLboolean isPixmap)
+{
+ intelScreenPrivate *screen = (intelScreenPrivate *) driScrnPriv->private;
+
+ if (isPixmap) {
+ return GL_FALSE; /* not implemented */
+ }
+ else {
+ GLboolean swStencil = (mesaVis->stencilBits > 0 &&
+ mesaVis->depthBits != 24);
+ GLenum rgbFormat = (mesaVis->redBits == 5 ? GL_RGB5 : GL_RGBA8);
+
+ struct gl_framebuffer *fb = _mesa_create_framebuffer(mesaVis);
+
+ /* setup the hardware-based renderbuffers */
+ {
+ struct intel_renderbuffer *frontRb
+ = intel_create_renderbuffer(rgbFormat,
+ screen->width, screen->height,
+ screen->front.offset,
+ screen->front.pitch,
+ screen->cpp,
+ screen->front.map);
+ intel_set_span_functions(&frontRb->Base);
+ _mesa_add_renderbuffer(fb, BUFFER_FRONT_LEFT, &frontRb->Base);
+ }
+
+ if (mesaVis->doubleBufferMode) {
+ struct intel_renderbuffer *backRb
+ = intel_create_renderbuffer(rgbFormat,
+ screen->width, screen->height,
+ screen->back.offset,
+ screen->back.pitch,
+ screen->cpp,
+ screen->back.map);
+ intel_set_span_functions(&backRb->Base);
+ _mesa_add_renderbuffer(fb, BUFFER_BACK_LEFT, &backRb->Base);
+ }
+
+ if (mesaVis->depthBits == 24 && mesaVis->stencilBits == 8) {
+ /* combined depth/stencil buffer */
+ struct intel_renderbuffer *depthStencilRb
+ = intel_create_renderbuffer(GL_DEPTH24_STENCIL8_EXT,
+ screen->width, screen->height,
+ screen->depth.offset,
+ screen->depth.pitch,
+ screen->cpp, /* 4! */
+ screen->depth.map);
+ intel_set_span_functions(&depthStencilRb->Base);
+ /* note: bind RB to two attachment points */
+ _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &depthStencilRb->Base);
+ _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &depthStencilRb->Base);
+ }
+ else if (mesaVis->depthBits == 16) {
+ /* just 16-bit depth buffer, no hw stencil */
+ struct intel_renderbuffer *depthRb
+ = intel_create_renderbuffer(GL_DEPTH_COMPONENT16,
+ screen->width, screen->height,
+ screen->depth.offset,
+ screen->depth.pitch,
+ screen->cpp, /* 2! */
+ screen->depth.map);
+ intel_set_span_functions(&depthRb->Base);
+ _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &depthRb->Base);
+ }
+
+ /* now add any/all software-based renderbuffers we may need */
+ _mesa_add_soft_renderbuffers(fb, GL_FALSE, /* never sw color */
+ GL_FALSE, /* never sw depth */
+ swStencil, mesaVis->accumRedBits > 0, GL_FALSE, /* never sw alpha */
+ GL_FALSE /* never sw aux */ );
+ driDrawPriv->driverPrivate = (void *) fb;
+
+ return (driDrawPriv->driverPrivate != NULL);
+ }
+}
+
+static void
+intelDestroyBuffer(__DRIdrawablePrivate * driDrawPriv)
+{
+ _mesa_destroy_framebuffer((GLframebuffer *) (driDrawPriv->driverPrivate));
+}
+
+
+/**
+ * Get information about previous buffer swaps.
+ */
+static int
+intelGetSwapInfo(__DRIdrawablePrivate * dPriv, __DRIswapInfo * sInfo)
+{
+ struct intel_context *intel;
+
+ if ((dPriv == NULL) || (dPriv->driContextPriv == NULL)
+ || (dPriv->driContextPriv->driverPrivate == NULL)
+ || (sInfo == NULL)) {
+ return -1;
+ }
+
+ intel = dPriv->driContextPriv->driverPrivate;
+ sInfo->swap_count = intel->swap_count;
+ sInfo->swap_ust = intel->swap_ust;
+ sInfo->swap_missed_count = intel->swap_missed_count;
+
+ sInfo->swap_missed_usage = (sInfo->swap_missed_count != 0)
+ ? driCalculateSwapUsage(dPriv, 0, intel->swap_missed_ust)
+ : 0.0;
+
+ return 0;
+}
+
+
+/* There are probably better ways to do this, such as an
+ * init-designated function to register chipids and createcontext
+ * functions.
+ */
+extern GLboolean i830CreateContext(const __GLcontextModes * mesaVis,
+ __DRIcontextPrivate * driContextPriv,
+ void *sharedContextPrivate);
+
+extern GLboolean i915CreateContext(const __GLcontextModes * mesaVis,
+ __DRIcontextPrivate * driContextPriv,
+ void *sharedContextPrivate);
+
+
+
+
+static GLboolean
+intelCreateContext(const __GLcontextModes * mesaVis,
+ __DRIcontextPrivate * driContextPriv,
+ void *sharedContextPrivate)
+{
+ __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
+ intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
+
+ switch (intelScreen->deviceID) {
+ /* Don't deal with i830 until texture work complete:
+ */
+ case PCI_CHIP_845_G:
+ case PCI_CHIP_I830_M:
+ case PCI_CHIP_I855_GM:
+ case PCI_CHIP_I865_G:
+ return i830CreateContext(mesaVis, driContextPriv, sharedContextPrivate);
+
+ case PCI_CHIP_I915_G:
+ case PCI_CHIP_I915_GM:
+ case PCI_CHIP_I945_G:
+ case PCI_CHIP_I945_GM:
+ return i915CreateContext(mesaVis, driContextPriv, sharedContextPrivate);
+
+ default:
+ fprintf(stderr, "Unrecognized deviceID %x\n", intelScreen->deviceID);
+ return GL_FALSE;
+ }
+}
+
+
+static const struct __DriverAPIRec intelAPI = {
+ .InitDriver = intelInitDriver,
+ .DestroyScreen = intelDestroyScreen,
+ .CreateContext = intelCreateContext,
+ .DestroyContext = intelDestroyContext,
+ .CreateBuffer = intelCreateBuffer,
+ .DestroyBuffer = intelDestroyBuffer,
+ .SwapBuffers = intelSwapBuffers,
+ .MakeCurrent = intelMakeCurrent,
+ .UnbindContext = intelUnbindContext,
+ .GetSwapInfo = intelGetSwapInfo,
+ .GetMSC = driGetMSC32,
+ .WaitForMSC = driWaitForMSC32,
+ .WaitForSBC = NULL,
+ .SwapBuffersMSC = NULL,
+ .CopySubBuffer = intelCopySubBuffer
+};
+
+
+static __GLcontextModes *
+intelFillInModes(unsigned pixel_bits, unsigned depth_bits,
+ unsigned stencil_bits, GLboolean have_back_buffer)
+{
+ __GLcontextModes *modes;
+ __GLcontextModes *m;
+ unsigned num_modes;
+ unsigned depth_buffer_factor;
+ unsigned back_buffer_factor;
+ GLenum fb_format;
+ GLenum fb_type;
+
+ /* GLX_SWAP_COPY_OML is only supported because the Intel driver doesn't
+ * support pageflipping at all.
+ */
+ static const GLenum back_buffer_modes[] = {
+ GLX_NONE, GLX_SWAP_UNDEFINED_OML, GLX_SWAP_COPY_OML
+ };
+
+ u_int8_t depth_bits_array[3];
+ u_int8_t stencil_bits_array[3];
+
+
+ depth_bits_array[0] = 0;
+ depth_bits_array[1] = depth_bits;
+ depth_bits_array[2] = depth_bits;
+
+ /* Just like with the accumulation buffer, always provide some modes
+ * with a stencil buffer. It will be a sw fallback, but some apps won't
+ * care about that.
+ */
+ stencil_bits_array[0] = 0;
+ stencil_bits_array[1] = 0;
+ stencil_bits_array[2] = (stencil_bits == 0) ? 8 : stencil_bits;
+
+ depth_buffer_factor = ((depth_bits != 0) || (stencil_bits != 0)) ? 3 : 1;
+ back_buffer_factor = (have_back_buffer) ? 3 : 1;
+
+ num_modes = depth_buffer_factor * back_buffer_factor * 4;
+
+ if (pixel_bits == 16) {
+ fb_format = GL_RGB;
+ fb_type = GL_UNSIGNED_SHORT_5_6_5;
+ }
+ else {
+ fb_format = GL_BGRA;
+ fb_type = GL_UNSIGNED_INT_8_8_8_8_REV;
+ }
+
+ modes =
+ (*dri_interface->createContextModes) (num_modes,
+ sizeof(__GLcontextModes));
+ m = modes;
+ if (!driFillInModes(&m, fb_format, fb_type,
+ depth_bits_array, stencil_bits_array,
+ depth_buffer_factor, back_buffer_modes,
+ back_buffer_factor, GLX_TRUE_COLOR)) {
+ fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
+ __LINE__);
+ return NULL;
+ }
+ if (!driFillInModes(&m, fb_format, fb_type,
+ depth_bits_array, stencil_bits_array,
+ depth_buffer_factor, back_buffer_modes,
+ back_buffer_factor, GLX_DIRECT_COLOR)) {
+ fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
+ __LINE__);
+ return NULL;
+ }
+
+ /* Mark the visual as slow if there are "fake" stencil bits.
+ */
+ for (m = modes; m != NULL; m = m->next) {
+ if ((m->stencilBits != 0) && (m->stencilBits != stencil_bits)) {
+ m->visualRating = GLX_SLOW_CONFIG;
+ }
+ }
+
+ return modes;
+}
+
+
+/**
+ * This is the bootstrap function for the driver. libGL supplies all of the
+ * requisite information about the system, and the driver initializes itself.
+ * This routine also fills in the linked list pointed to by \c driver_modes
+ * with the \c __GLcontextModes that the driver can support for windows or
+ * pbuffers.
+ *
+ * \return A pointer to a \c __DRIscreenPrivate on success, or \c NULL on
+ * failure.
+ */
+PUBLIC void *
+__driCreateNewScreen_20050727(__DRInativeDisplay * dpy, int scrn,
+ __DRIscreen * psc,
+ const __GLcontextModes * modes,
+ const __DRIversion * ddx_version,
+ const __DRIversion * dri_version,
+ const __DRIversion * drm_version,
+ const __DRIframebuffer * frame_buffer,
+ drmAddress pSAREA, int fd,
+ int internal_api_version,
+ const __DRIinterfaceMethods * interface,
+ __GLcontextModes ** driver_modes)
+{
+ __DRIscreenPrivate *psp;
+ static const __DRIversion ddx_expected = { 1, 5, 0 };
+ static const __DRIversion dri_expected = { 4, 0, 0 };
+ static const __DRIversion drm_expected = { 1, 7, 0 };
+
+ dri_interface = interface;
+
+ if (!driCheckDriDdxDrmVersions2("i915",
+ dri_version, &dri_expected,
+ ddx_version, &ddx_expected,
+ drm_version, &drm_expected)) {
+ return NULL;
+ }
+
+ psp = __driUtilCreateNewScreen(dpy, scrn, psc, NULL,
+ ddx_version, dri_version, drm_version,
+ frame_buffer, pSAREA, fd,
+ internal_api_version, &intelAPI);
+ if (psp != NULL) {
+ I830DRIPtr dri_priv = (I830DRIPtr) psp->pDevPriv;
+ *driver_modes = intelFillInModes(dri_priv->cpp * 8,
+ (dri_priv->cpp == 2) ? 16 : 24,
+ (dri_priv->cpp == 2) ? 0 : 8, 1);
+
+ /* Calling driInitExtensions here, with a NULL context pointer, does not actually
+ * enable the extensions. It just makes sure that all the dispatch offsets for all
+ * the extensions that *might* be enables are known. This is needed because the
+ * dispatch offsets need to be known when _mesa_context_create is called, but we can't
+ * enable the extensions until we have a context pointer.
+ *
+ * Hello chicken. Hello egg. How are you two today?
+ */
+ driInitExtensions(NULL, card_extensions, GL_FALSE);
+ }
+
+ return (void *) psp;
+}
+
+struct intel_context *intelScreenContext(intelScreenPrivate *intelScreen)
+{
+ /*
+ * This should probably change to have the screen allocate a dummy
+ * context at screen creation. For now just use the current context.
+ */
+
+ GET_CURRENT_CONTEXT(ctx);
+ if (ctx == NULL) {
+ _mesa_problem(NULL, "No current context in intelScreenContext\n");
+ return NULL;
+ }
+ return intel_context(ctx);
+}
+
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _INTEL_INIT_H_
+#define _INTEL_INIT_H_
+
+#include <sys/time.h>
+#include "dri_util.h"
+#include "intel_rotate.h"
+#include "i830_common.h"
+#include "xmlconfig.h"
+#include "dri_bufpool.h"
+
+/* XXX: change name or eliminate to avoid conflict with "struct
+ * intel_region"!!!
+ */
+typedef struct
+{
+ drm_handle_t handle;
+ drmSize size; /* region size in bytes */
+ char *map; /* memory map */
+ int offset; /* from start of video mem, in bytes */
+ int pitch; /* row stride, in bytes */
+} intelRegion;
+
+typedef struct
+{
+ intelRegion front;
+ intelRegion back;
+ intelRegion rotated;
+ intelRegion depth;
+ intelRegion tex;
+
+ struct intel_region *front_region;
+ struct intel_region *back_region;
+ struct intel_region *depth_region;
+ struct intel_region *rotated_region;
+
+ int deviceID;
+ int width;
+ int height;
+ int mem; /* unused */
+
+ int cpp; /* for front and back buffers */
+/* int bitsPerPixel; */
+ int fbFormat; /* XXX FBO: this is obsolete - remove after i830 updates */
+
+ int logTextureGranularity;
+
+ __DRIscreenPrivate *driScrnPriv;
+ unsigned int sarea_priv_offset;
+
+ int drmMinor;
+
+ int irq_active;
+ int allow_batchbuffer;
+
+ struct matrix23 rotMatrix;
+
+ int current_rotation; /* 0, 90, 180 or 270 */
+ int rotatedWidth, rotatedHeight;
+
+ /**
+ * Configuration cache with default values for all contexts
+ */
+ driOptionCache optionCache;
+ struct _DriBufferPool *batchPool;
+ struct _DriBufferPool *texPool;
+ struct _DriBufferPool *regionPool;
+ struct _DriBufferPool *staticPool;
+ unsigned int maxBatchSize;
+} intelScreenPrivate;
+
+
+
+extern GLboolean intelMapScreenRegions(__DRIscreenPrivate * sPriv);
+
+extern void intelUnmapScreenRegions(intelScreenPrivate * intelScreen);
+
+extern void
+intelUpdateScreenFromSAREA(intelScreenPrivate * intelScreen,
+ drmI830Sarea * sarea);
+
+extern void intelDestroyContext(__DRIcontextPrivate * driContextPriv);
+
+extern GLboolean intelUnbindContext(__DRIcontextPrivate * driContextPriv);
+
+extern GLboolean
+intelMakeCurrent(__DRIcontextPrivate * driContextPriv,
+ __DRIdrawablePrivate * driDrawPriv,
+ __DRIdrawablePrivate * driReadPriv);
+
+extern void intelSwapBuffers(__DRIdrawablePrivate * dPriv);
+
+extern void
+intelCopySubBuffer(__DRIdrawablePrivate * dPriv, int x, int y, int w, int h);
+
+extern struct _DriBufferPool *driBatchPoolInit(int fd, unsigned flags,
+ unsigned long bufSize,
+ unsigned numBufs,
+ unsigned checkDelayed);
+
+extern struct intel_context *intelScreenContext(intelScreenPrivate *intelScreen);
+
+extern void
+intelUpdateScreenRotation(__DRIscreenPrivate * sPriv, drmI830Sarea * sarea);
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "macros.h"
+#include "mtypes.h"
+#include "colormac.h"
+
+#include "intel_fbo.h"
+#include "intel_screen.h"
+#include "intel_span.h"
+#include "intel_regions.h"
+#include "intel_ioctl.h"
+#include "intel_tex.h"
+
+#include "swrast/swrast.h"
+
+/*
+ break intelWriteRGBASpan_ARGB8888
+*/
+
+#undef DBG
+#define DBG 0
+
+#define LOCAL_VARS \
+ struct intel_context *intel = intel_context(ctx); \
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
+ const GLint yScale = irb->RenderToTexture ? 1 : -1; \
+ const GLint yBias = irb->RenderToTexture ? 0 : irb->Base.Height - 1; \
+ GLubyte *buf = (GLubyte *) irb->pfMap \
+ + (intel->drawY * irb->pfPitch + intel->drawX) * irb->region->cpp;\
+ GLuint p; \
+ assert(irb->pfMap);\
+ (void) p;
+
+/* XXX FBO: this is identical to the macro in spantmp2.h except we get
+ * the cliprect info from the context, not the driDrawable.
+ * Move this into spantmp2.h someday.
+ */
+#define HW_CLIPLOOP() \
+ do { \
+ int _nc = intel->numClipRects; \
+ while ( _nc-- ) { \
+ int minx = intel->pClipRects[_nc].x1 - intel->drawX; \
+ int miny = intel->pClipRects[_nc].y1 - intel->drawY; \
+ int maxx = intel->pClipRects[_nc].x2 - intel->drawX; \
+ int maxy = intel->pClipRects[_nc].y2 - intel->drawY;
+
+
+
+
+#define Y_FLIP(_y) ((_y) * yScale + yBias)
+
+#define HW_LOCK()
+
+#define HW_UNLOCK()
+
+/* 16 bit, RGB565 color spanline and pixel functions
+ */
+#define SPANTMP_PIXEL_FMT GL_RGB
+#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_5_6_5
+
+#define TAG(x) intel##x##_RGB565
+#define TAG2(x,y) intel##x##_RGB565##y
+#define GET_PTR(X,Y) (buf + ((Y) * irb->pfPitch + (X)) * 2)
+#include "spantmp2.h"
+
+/* 32 bit, ARGB8888 color spanline and pixel functions
+ */
+#define SPANTMP_PIXEL_FMT GL_BGRA
+#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV
+
+#define TAG(x) intel##x##_ARGB8888
+#define TAG2(x,y) intel##x##_ARGB8888##y
+#define GET_PTR(X,Y) (buf + ((Y) * irb->pfPitch + (X)) * 4)
+#include "spantmp2.h"
+
+
+#define LOCAL_DEPTH_VARS \
+ struct intel_context *intel = intel_context(ctx); \
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
+ const GLuint pitch = irb->pfPitch/***XXX region->pitch*/; /* in pixels */ \
+ const GLint yScale = irb->RenderToTexture ? 1 : -1; \
+ const GLint yBias = irb->RenderToTexture ? 0 : irb->Base.Height - 1; \
+ char *buf = (char *) irb->pfMap/*XXX use region->map*/ + \
+ (intel->drawY * pitch + intel->drawX) * irb->region->cpp;
+
+
+#define LOCAL_STENCIL_VARS LOCAL_DEPTH_VARS
+
+/**
+ ** 16-bit depthbuffer functions.
+ **/
+#define WRITE_DEPTH( _x, _y, d ) \
+ ((GLushort *)buf)[(_x) + (_y) * pitch] = d;
+
+#define READ_DEPTH( d, _x, _y ) \
+ d = ((GLushort *)buf)[(_x) + (_y) * pitch];
+
+
+#define TAG(x) intel##x##_z16
+#include "depthtmp.h"
+
+
+/**
+ ** 24/8-bit interleaved depth/stencil functions
+ ** Note: we're actually reading back combined depth+stencil values.
+ ** The wrappers in main/depthstencil.c are used to extract the depth
+ ** and stencil values.
+ **/
+/* Change ZZZS -> SZZZ */
+#define WRITE_DEPTH( _x, _y, d ) { \
+ GLuint tmp = ((d) >> 8) | ((d) << 24); \
+ ((GLuint *)buf)[(_x) + (_y) * pitch] = tmp; \
+}
+
+/* Change SZZZ -> ZZZS */
+#define READ_DEPTH( d, _x, _y ) { \
+ GLuint tmp = ((GLuint *)buf)[(_x) + (_y) * pitch]; \
+ d = (tmp << 8) | (tmp >> 24); \
+}
+
+#define TAG(x) intel##x##_z24_s8
+#include "depthtmp.h"
+
+
+/**
+ ** 8-bit stencil function (XXX FBO: This is obsolete)
+ **/
+#define WRITE_STENCIL( _x, _y, d ) { \
+ GLuint tmp = ((GLuint *)buf)[(_x) + (_y) * pitch]; \
+ tmp &= 0xffffff; \
+ tmp |= ((d) << 24); \
+ ((GLuint *) buf)[(_x) + (_y) * pitch] = tmp; \
+}
+
+#define READ_STENCIL( d, _x, _y ) \
+ d = ((GLuint *)buf)[(_x) + (_y) * pitch] >> 24;
+
+#define TAG(x) intel##x##_z24_s8
+#include "stenciltmp.h"
+
+
+
+/**
+ * Map or unmap all the renderbuffers which we may need during
+ * software rendering.
+ * XXX in the future, we could probably convey extra information to
+ * reduce the number of mappings needed. I.e. if doing a glReadPixels
+ * from the depth buffer, we really only need one mapping.
+ *
+ * XXX Rewrite this function someday.
+ * We can probably just loop over all the renderbuffer attachments,
+ * map/unmap all of them, and not worry about the _ColorDrawBuffers
+ * _ColorReadBuffer, _DepthBuffer or _StencilBuffer fields.
+ */
+static void
+intel_map_unmap_buffers(struct intel_context *intel, GLboolean map)
+{
+ GLcontext *ctx = &intel->ctx;
+ GLuint i, j;
+ struct intel_renderbuffer *irb;
+
+ /* color draw buffers */
+ for (i = 0; i < ctx->Const.MaxDrawBuffers; i++) {
+ for (j = 0; j < ctx->DrawBuffer->_NumColorDrawBuffers[i]; j++) {
+ struct gl_renderbuffer *rb =
+ ctx->DrawBuffer->_ColorDrawBuffers[i][j];
+ irb = intel_renderbuffer(rb);
+ if (irb) {
+ /* this is a user-created intel_renderbuffer */
+ if (irb->region) {
+ if (map)
+ intel_region_map(intel->intelScreen, irb->region);
+ else
+ intel_region_unmap(intel->intelScreen, irb->region);
+ }
+ irb->pfMap = irb->region->map;
+ irb->pfPitch = irb->region->pitch;
+ }
+ }
+ }
+
+ /* check for render to textures */
+ for (i = 0; i < BUFFER_COUNT; i++) {
+ struct gl_renderbuffer_attachment *att =
+ ctx->DrawBuffer->Attachment + i;
+ struct gl_texture_object *tex = att->Texture;
+ if (tex) {
+ /* render to texture */
+ ASSERT(att->Renderbuffer);
+ if (map) {
+ struct gl_texture_image *texImg;
+ texImg = tex->Image[att->CubeMapFace][att->TextureLevel];
+ intel_tex_map_images(intel, intel_texture_object(tex));
+ }
+ else {
+ intel_tex_unmap_images(intel, intel_texture_object(tex));
+ }
+ }
+ }
+
+ /* color read buffers */
+ irb = intel_renderbuffer(ctx->ReadBuffer->_ColorReadBuffer);
+ if (irb && irb->region) {
+ if (map)
+ intel_region_map(intel->intelScreen, irb->region);
+ else
+ intel_region_unmap(intel->intelScreen, irb->region);
+ irb->pfMap = irb->region->map;
+ irb->pfPitch = irb->region->pitch;
+ }
+
+ /* Account for front/back color page flipping.
+ * The span routines use the pfMap and pfPitch fields which will
+ * swap the front/back region map/pitch if we're page flipped.
+ * Do this after mapping, above, so the map field is valid.
+ */
+#if 0
+ if (map && ctx->DrawBuffer->Name == 0) {
+ struct intel_renderbuffer *irbFront
+ = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_FRONT_LEFT);
+ struct intel_renderbuffer *irbBack
+ = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_BACK_LEFT);
+ if (irbBack) {
+ /* double buffered */
+ if (intel->sarea->pf_current_page == 0) {
+ irbFront->pfMap = irbFront->region->map;
+ irbFront->pfPitch = irbFront->region->pitch;
+ irbBack->pfMap = irbBack->region->map;
+ irbBack->pfPitch = irbBack->region->pitch;
+ }
+ else {
+ irbFront->pfMap = irbBack->region->map;
+ irbFront->pfPitch = irbBack->region->pitch;
+ irbBack->pfMap = irbFront->region->map;
+ irbBack->pfPitch = irbFront->region->pitch;
+ }
+ }
+ }
+#endif
+
+ /* depth buffer (Note wrapper!) */
+ if (ctx->DrawBuffer->_DepthBuffer) {
+ irb = intel_renderbuffer(ctx->DrawBuffer->_DepthBuffer->Wrapped);
+ if (irb && irb->region && irb->Base.Name != 0) {
+ if (map) {
+ intel_region_map(intel->intelScreen, irb->region);
+ irb->pfMap = irb->region->map;
+ irb->pfPitch = irb->region->pitch;
+ }
+ else {
+ intel_region_unmap(intel->intelScreen, irb->region);
+ irb->pfMap = NULL;
+ irb->pfPitch = 0;
+ }
+ }
+ }
+
+ /* stencil buffer (Note wrapper!) */
+ if (ctx->DrawBuffer->_StencilBuffer) {
+ irb = intel_renderbuffer(ctx->DrawBuffer->_StencilBuffer->Wrapped);
+ if (irb && irb->region && irb->Base.Name != 0) {
+ if (map) {
+ intel_region_map(intel->intelScreen, irb->region);
+ irb->pfMap = irb->region->map;
+ irb->pfPitch = irb->region->pitch;
+ }
+ else {
+ intel_region_unmap(intel->intelScreen, irb->region);
+ irb->pfMap = NULL;
+ irb->pfPitch = 0;
+ }
+ }
+ }
+}
+
+
+
+/**
+ * Prepare for softare rendering. Map current read/draw framebuffers'
+ * renderbuffes and all currently bound texture objects.
+ *
+ * Old note: Moved locking out to get reasonable span performance.
+ */
+void
+intelSpanRenderStart(GLcontext * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ GLuint i;
+
+ intelFinish(&intel->ctx);
+ LOCK_HARDWARE(intel);
+
+#if 0
+ /* Just map the framebuffer and all textures. Bufmgr code will
+ * take care of waiting on the necessary fences:
+ */
+ intel_region_map(intel->intelScreen, intel->front_region);
+ intel_region_map(intel->intelScreen, intel->back_region);
+ intel_region_map(intel->intelScreen, intel->intelScreen->depth_region);
+#endif
+
+ for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled) {
+ struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
+ intel_tex_map_images(intel, intel_texture_object(texObj));
+ }
+ }
+
+ intel_map_unmap_buffers(intel, GL_TRUE);
+}
+
+/**
+ * Called when done softare rendering. Unmap the buffers we mapped in
+ * the above function.
+ */
+void
+intelSpanRenderFinish(GLcontext * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ GLuint i;
+
+ _swrast_flush(ctx);
+
+ /* Now unmap the framebuffer:
+ */
+#if 0
+ intel_region_unmap(intel, intel->front_region);
+ intel_region_unmap(intel, intel->back_region);
+ intel_region_unmap(intel, intel->intelScreen->depth_region);
+#endif
+
+ for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled) {
+ struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
+ intel_tex_unmap_images(intel, intel_texture_object(texObj));
+ }
+ }
+
+ intel_map_unmap_buffers(intel, GL_FALSE);
+
+ UNLOCK_HARDWARE(intel);
+}
+
+
+void
+intelInitSpanFuncs(GLcontext * ctx)
+{
+ struct swrast_device_driver *swdd = _swrast_GetDeviceDriverReference(ctx);
+ swdd->SpanRenderStart = intelSpanRenderStart;
+ swdd->SpanRenderFinish = intelSpanRenderFinish;
+}
+
+
+/**
+ * Plug in appropriate span read/write functions for the given renderbuffer.
+ * These are used for the software fallbacks.
+ */
+void
+intel_set_span_functions(struct gl_renderbuffer *rb)
+{
+ if (rb->_ActualFormat == GL_RGB5) {
+ /* 565 RGB */
+ intelInitPointers_RGB565(rb);
+ }
+ else if (rb->_ActualFormat == GL_RGBA8) {
+ /* 8888 RGBA */
+ intelInitPointers_ARGB8888(rb);
+ }
+ else if (rb->_ActualFormat == GL_DEPTH_COMPONENT16) {
+ intelInitDepthPointers_z16(rb);
+ }
+ else if (rb->_ActualFormat == GL_DEPTH_COMPONENT24 || /* XXX FBO remove */
+ rb->_ActualFormat == GL_DEPTH24_STENCIL8_EXT) {
+ intelInitDepthPointers_z24_s8(rb);
+ }
+ else if (rb->_ActualFormat == GL_STENCIL_INDEX8_EXT) { /* XXX FBO remove */
+ intelInitStencilPointers_z24_s8(rb);
+ }
+ else {
+ _mesa_problem(NULL,
+ "Unexpected _ActualFormat in intelSetSpanFunctions");
+ }
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _INTEL_SPAN_H
+#define _INTEL_SPAN_H
+
+extern void intelInitSpanFuncs(GLcontext * ctx);
+
+extern void intelSpanRenderFinish(GLcontext * ctx);
+extern void intelSpanRenderStart(GLcontext * ctx);
+
+extern void intel_set_span_functions(struct gl_renderbuffer *rb);
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "glheader.h"
+#include "context.h"
+#include "macros.h"
+#include "enums.h"
+#include "colormac.h"
+#include "dd.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_fbo.h"
+#include "intel_regions.h"
+#include "swrast/swrast.h"
+
+int
+intel_translate_compare_func(GLenum func)
+{
+ switch (func) {
+ case GL_NEVER:
+ return COMPAREFUNC_NEVER;
+ case GL_LESS:
+ return COMPAREFUNC_LESS;
+ case GL_LEQUAL:
+ return COMPAREFUNC_LEQUAL;
+ case GL_GREATER:
+ return COMPAREFUNC_GREATER;
+ case GL_GEQUAL:
+ return COMPAREFUNC_GEQUAL;
+ case GL_NOTEQUAL:
+ return COMPAREFUNC_NOTEQUAL;
+ case GL_EQUAL:
+ return COMPAREFUNC_EQUAL;
+ case GL_ALWAYS:
+ return COMPAREFUNC_ALWAYS;
+ }
+
+ fprintf(stderr, "Unknown value in %s: %x\n", __FUNCTION__, func);
+ return COMPAREFUNC_ALWAYS;
+}
+
+int
+intel_translate_stencil_op(GLenum op)
+{
+ switch (op) {
+ case GL_KEEP:
+ return STENCILOP_KEEP;
+ case GL_ZERO:
+ return STENCILOP_ZERO;
+ case GL_REPLACE:
+ return STENCILOP_REPLACE;
+ case GL_INCR:
+ return STENCILOP_INCRSAT;
+ case GL_DECR:
+ return STENCILOP_DECRSAT;
+ case GL_INCR_WRAP:
+ return STENCILOP_INCR;
+ case GL_DECR_WRAP:
+ return STENCILOP_DECR;
+ case GL_INVERT:
+ return STENCILOP_INVERT;
+ default:
+ return STENCILOP_ZERO;
+ }
+}
+
+int
+intel_translate_blend_factor(GLenum factor)
+{
+ switch (factor) {
+ case GL_ZERO:
+ return BLENDFACT_ZERO;
+ case GL_SRC_ALPHA:
+ return BLENDFACT_SRC_ALPHA;
+ case GL_ONE:
+ return BLENDFACT_ONE;
+ case GL_SRC_COLOR:
+ return BLENDFACT_SRC_COLR;
+ case GL_ONE_MINUS_SRC_COLOR:
+ return BLENDFACT_INV_SRC_COLR;
+ case GL_DST_COLOR:
+ return BLENDFACT_DST_COLR;
+ case GL_ONE_MINUS_DST_COLOR:
+ return BLENDFACT_INV_DST_COLR;
+ case GL_ONE_MINUS_SRC_ALPHA:
+ return BLENDFACT_INV_SRC_ALPHA;
+ case GL_DST_ALPHA:
+ return BLENDFACT_DST_ALPHA;
+ case GL_ONE_MINUS_DST_ALPHA:
+ return BLENDFACT_INV_DST_ALPHA;
+ case GL_SRC_ALPHA_SATURATE:
+ return BLENDFACT_SRC_ALPHA_SATURATE;
+ case GL_CONSTANT_COLOR:
+ return BLENDFACT_CONST_COLOR;
+ case GL_ONE_MINUS_CONSTANT_COLOR:
+ return BLENDFACT_INV_CONST_COLOR;
+ case GL_CONSTANT_ALPHA:
+ return BLENDFACT_CONST_ALPHA;
+ case GL_ONE_MINUS_CONSTANT_ALPHA:
+ return BLENDFACT_INV_CONST_ALPHA;
+ }
+
+ fprintf(stderr, "Unknown value in %s: %x\n", __FUNCTION__, factor);
+ return BLENDFACT_ZERO;
+}
+
+int
+intel_translate_logic_op(GLenum opcode)
+{
+ switch (opcode) {
+ case GL_CLEAR:
+ return LOGICOP_CLEAR;
+ case GL_AND:
+ return LOGICOP_AND;
+ case GL_AND_REVERSE:
+ return LOGICOP_AND_RVRSE;
+ case GL_COPY:
+ return LOGICOP_COPY;
+ case GL_COPY_INVERTED:
+ return LOGICOP_COPY_INV;
+ case GL_AND_INVERTED:
+ return LOGICOP_AND_INV;
+ case GL_NOOP:
+ return LOGICOP_NOOP;
+ case GL_XOR:
+ return LOGICOP_XOR;
+ case GL_OR:
+ return LOGICOP_OR;
+ case GL_OR_INVERTED:
+ return LOGICOP_OR_INV;
+ case GL_NOR:
+ return LOGICOP_NOR;
+ case GL_EQUIV:
+ return LOGICOP_EQUIV;
+ case GL_INVERT:
+ return LOGICOP_INV;
+ case GL_OR_REVERSE:
+ return LOGICOP_OR_RVRSE;
+ case GL_NAND:
+ return LOGICOP_NAND;
+ case GL_SET:
+ return LOGICOP_SET;
+ default:
+ return LOGICOP_SET;
+ }
+}
+
+
+static void
+intelClearColor(GLcontext * ctx, const GLfloat color[4])
+{
+ struct intel_context *intel = intel_context(ctx);
+ GLubyte clear[4];
+
+ CLAMPED_FLOAT_TO_UBYTE(clear[0], color[0]);
+ CLAMPED_FLOAT_TO_UBYTE(clear[1], color[1]);
+ CLAMPED_FLOAT_TO_UBYTE(clear[2], color[2]);
+ CLAMPED_FLOAT_TO_UBYTE(clear[3], color[3]);
+
+ /* compute both 32 and 16-bit clear values */
+ intel->ClearColor8888 = INTEL_PACKCOLOR8888(clear[0], clear[1],
+ clear[2], clear[3]);
+ intel->ClearColor565 = INTEL_PACKCOLOR565(clear[0], clear[1], clear[2]);
+}
+
+
+/**
+ * Update the viewport transformation matrix. Depends on:
+ * - viewport pos/size
+ * - depthrange
+ * - window pos/size or FBO size
+ */
+static void
+intelCalcViewport(GLcontext * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ const GLfloat *v = ctx->Viewport._WindowMap.m;
+ const GLfloat depthScale = 1.0F / ctx->DrawBuffer->_DepthMaxF;
+ GLfloat *m = intel->ViewportMatrix.m;
+ GLfloat yScale, yBias;
+
+ if (ctx->DrawBuffer->Name) {
+ /* User created FBO */
+ struct intel_renderbuffer *irb
+ = intel_renderbuffer(ctx->DrawBuffer->_ColorDrawBuffers[0][0]);
+ if (irb && !irb->RenderToTexture) {
+ /* y=0=top */
+ yScale = -1.0;
+ yBias = irb->Base.Height;
+ }
+ else {
+ /* y=0=bottom */
+ yScale = 1.0;
+ yBias = 0.0;
+ }
+ }
+ else {
+ /* window buffer, y=0=top */
+ yScale = -1.0;
+ yBias = (intel->driDrawable) ? intel->driDrawable->h : 0.0F;
+ }
+
+ m[MAT_SX] = v[MAT_SX];
+ m[MAT_TX] = v[MAT_TX] + SUBPIXEL_X;
+
+ m[MAT_SY] = v[MAT_SY] * yScale;
+ m[MAT_TY] = v[MAT_TY] * yScale + yBias + SUBPIXEL_Y;
+
+ m[MAT_SZ] = v[MAT_SZ] * depthScale;
+ m[MAT_TZ] = v[MAT_TZ] * depthScale;
+}
+
+static void
+intelViewport(GLcontext * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height)
+{
+ intelCalcViewport(ctx);
+}
+
+static void
+intelDepthRange(GLcontext * ctx, GLclampd nearval, GLclampd farval)
+{
+ intelCalcViewport(ctx);
+}
+
+/* Fallback to swrast for select and feedback.
+ */
+static void
+intelRenderMode(GLcontext * ctx, GLenum mode)
+{
+ struct intel_context *intel = intel_context(ctx);
+ FALLBACK(intel, INTEL_FALLBACK_RENDERMODE, (mode != GL_RENDER));
+}
+
+
+void
+intelInitStateFuncs(struct dd_function_table *functions)
+{
+ functions->RenderMode = intelRenderMode;
+ functions->Viewport = intelViewport;
+ functions->DepthRange = intelDepthRange;
+ functions->ClearColor = intelClearColor;
+}
+
+
+
+
+void
+intelInitState(GLcontext * ctx)
+{
+ /* Mesa should do this for us:
+ */
+ ctx->Driver.AlphaFunc(ctx, ctx->Color.AlphaFunc, ctx->Color.AlphaRef);
+
+ ctx->Driver.BlendColor(ctx, ctx->Color.BlendColor);
+
+ ctx->Driver.BlendEquationSeparate(ctx,
+ ctx->Color.BlendEquationRGB,
+ ctx->Color.BlendEquationA);
+
+ ctx->Driver.BlendFuncSeparate(ctx,
+ ctx->Color.BlendSrcRGB,
+ ctx->Color.BlendDstRGB,
+ ctx->Color.BlendSrcA, ctx->Color.BlendDstA);
+
+ ctx->Driver.ColorMask(ctx,
+ ctx->Color.ColorMask[RCOMP],
+ ctx->Color.ColorMask[GCOMP],
+ ctx->Color.ColorMask[BCOMP],
+ ctx->Color.ColorMask[ACOMP]);
+
+ ctx->Driver.CullFace(ctx, ctx->Polygon.CullFaceMode);
+ ctx->Driver.DepthFunc(ctx, ctx->Depth.Func);
+ ctx->Driver.DepthMask(ctx, ctx->Depth.Mask);
+
+ ctx->Driver.Enable(ctx, GL_ALPHA_TEST, ctx->Color.AlphaEnabled);
+ ctx->Driver.Enable(ctx, GL_BLEND, ctx->Color.BlendEnabled);
+ ctx->Driver.Enable(ctx, GL_COLOR_LOGIC_OP, ctx->Color.ColorLogicOpEnabled);
+ ctx->Driver.Enable(ctx, GL_COLOR_SUM, ctx->Fog.ColorSumEnabled);
+ ctx->Driver.Enable(ctx, GL_CULL_FACE, ctx->Polygon.CullFlag);
+ ctx->Driver.Enable(ctx, GL_DEPTH_TEST, ctx->Depth.Test);
+ ctx->Driver.Enable(ctx, GL_DITHER, ctx->Color.DitherFlag);
+ ctx->Driver.Enable(ctx, GL_FOG, ctx->Fog.Enabled);
+ ctx->Driver.Enable(ctx, GL_LIGHTING, ctx->Light.Enabled);
+ ctx->Driver.Enable(ctx, GL_LINE_SMOOTH, ctx->Line.SmoothFlag);
+ ctx->Driver.Enable(ctx, GL_POLYGON_STIPPLE, ctx->Polygon.StippleFlag);
+ ctx->Driver.Enable(ctx, GL_SCISSOR_TEST, ctx->Scissor.Enabled);
+ ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
+ ctx->Driver.Enable(ctx, GL_TEXTURE_1D, GL_FALSE);
+ ctx->Driver.Enable(ctx, GL_TEXTURE_2D, GL_FALSE);
+ ctx->Driver.Enable(ctx, GL_TEXTURE_RECTANGLE_NV, GL_FALSE);
+ ctx->Driver.Enable(ctx, GL_TEXTURE_3D, GL_FALSE);
+ ctx->Driver.Enable(ctx, GL_TEXTURE_CUBE_MAP, GL_FALSE);
+
+ ctx->Driver.Fogfv(ctx, GL_FOG_COLOR, ctx->Fog.Color);
+ ctx->Driver.Fogfv(ctx, GL_FOG_MODE, 0);
+ ctx->Driver.Fogfv(ctx, GL_FOG_DENSITY, &ctx->Fog.Density);
+ ctx->Driver.Fogfv(ctx, GL_FOG_START, &ctx->Fog.Start);
+ ctx->Driver.Fogfv(ctx, GL_FOG_END, &ctx->Fog.End);
+
+ ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
+
+ {
+ GLfloat f = (GLfloat) ctx->Light.Model.ColorControl;
+ ctx->Driver.LightModelfv(ctx, GL_LIGHT_MODEL_COLOR_CONTROL, &f);
+ }
+
+ ctx->Driver.LineWidth(ctx, ctx->Line.Width);
+ ctx->Driver.LogicOpcode(ctx, ctx->Color.LogicOp);
+ ctx->Driver.PointSize(ctx, ctx->Point.Size);
+ ctx->Driver.PolygonStipple(ctx, (const GLubyte *) ctx->PolygonStipple);
+ ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
+ ctx->Scissor.Width, ctx->Scissor.Height);
+ ctx->Driver.ShadeModel(ctx, ctx->Light.ShadeModel);
+ ctx->Driver.StencilFuncSeparate(ctx, GL_FRONT,
+ ctx->Stencil.Function[0],
+ ctx->Stencil.Ref[0],
+ ctx->Stencil.ValueMask[0]);
+ ctx->Driver.StencilFuncSeparate(ctx, GL_BACK,
+ ctx->Stencil.Function[1],
+ ctx->Stencil.Ref[1],
+ ctx->Stencil.ValueMask[1]);
+ ctx->Driver.StencilMaskSeparate(ctx, GL_FRONT, ctx->Stencil.WriteMask[0]);
+ ctx->Driver.StencilMaskSeparate(ctx, GL_BACK, ctx->Stencil.WriteMask[1]);
+ ctx->Driver.StencilOpSeparate(ctx, GL_FRONT,
+ ctx->Stencil.FailFunc[0],
+ ctx->Stencil.ZFailFunc[0],
+ ctx->Stencil.ZPassFunc[0]);
+ ctx->Driver.StencilOpSeparate(ctx, GL_BACK,
+ ctx->Stencil.FailFunc[1],
+ ctx->Stencil.ZFailFunc[1],
+ ctx->Stencil.ZPassFunc[1]);
+
+
+ /* XXX this isn't really needed */
+ ctx->Driver.DrawBuffer(ctx, ctx->Color.DrawBuffer[0]);
+}
--- /dev/null
+#ifndef INTEL_STRUCTS_H
+#define INTEL_STRUCTS_H
+
+struct br0 {
+ GLuint length:8;
+ GLuint pad0:3;
+ GLuint dst_tiled:1;
+ GLuint pad1:8;
+ GLuint write_rgb:1;
+ GLuint write_alpha:1;
+ GLuint opcode:7;
+ GLuint client:3;
+};
+
+
+struct br13 {
+ GLint dest_pitch:16;
+ GLuint rop:8;
+ GLuint color_depth:2;
+ GLuint pad1:3;
+ GLuint mono_source_transparency:1;
+ GLuint clipping_enable:1;
+ GLuint pad0:1;
+};
+
+
+
+/* This is an attempt to move some of the 2D interaction in this
+ * driver to using structs for packets rather than a bunch of #defines
+ * and dwords.
+ */
+struct xy_color_blit {
+ struct br0 br0;
+ struct br13 br13;
+
+ struct {
+ GLuint dest_x1:16;
+ GLuint dest_y1:16;
+ } dw2;
+
+ struct {
+ GLuint dest_x2:16;
+ GLuint dest_y2:16;
+ } dw3;
+
+ GLuint dest_base_addr;
+ GLuint color;
+};
+
+struct xy_src_copy_blit {
+ struct br0 br0;
+ struct br13 br13;
+
+ struct {
+ GLuint dest_x1:16;
+ GLuint dest_y1:16;
+ } dw2;
+
+ struct {
+ GLuint dest_x2:16;
+ GLuint dest_y2:16;
+ } dw3;
+
+ GLuint dest_base_addr;
+
+ struct {
+ GLuint src_x1:16;
+ GLuint src_y1:16;
+ } dw5;
+
+ struct {
+ GLint src_pitch:16;
+ GLuint pad:16;
+ } dw6;
+
+ GLuint src_base_addr;
+};
+
+struct xy_setup_blit {
+ struct br0 br0;
+ struct br13 br13;
+
+ struct {
+ GLuint clip_x1:16;
+ GLuint clip_y1:16;
+ } dw2;
+
+ struct {
+ GLuint clip_x2:16;
+ GLuint clip_y2:16;
+ } dw3;
+
+ GLuint dest_base_addr;
+ GLuint background_color;
+ GLuint foreground_color;
+ GLuint pattern_base_addr;
+};
+
+
+struct xy_text_immediate_blit {
+ struct {
+ GLuint length:8;
+ GLuint pad2:3;
+ GLuint dst_tiled:1;
+ GLuint pad1:4;
+ GLuint byte_packed:1;
+ GLuint pad0:5;
+ GLuint opcode:7;
+ GLuint client:3;
+ } dw0;
+
+ struct {
+ GLuint dest_x1:16;
+ GLuint dest_y1:16;
+ } dw1;
+
+ struct {
+ GLuint dest_x2:16;
+ GLuint dest_y2:16;
+ } dw2;
+
+ /* Src bitmap data follows as inline dwords.
+ */
+};
+
+
+#define CLIENT_2D 0x2
+#define OPCODE_XY_SETUP_BLT 0x1
+#define OPCODE_XY_COLOR_BLT 0x50
+#define OPCODE_XY_TEXT_IMMEDIATE_BLT 0x31
+
+#endif
--- /dev/null
+#include "texobj.h"
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_tex.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+static GLboolean
+intelIsTextureResident(GLcontext * ctx, struct gl_texture_object *texObj)
+{
+#if 0
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_object *intelObj = intel_texture_object(texObj);
+
+ return
+ intelObj->mt &&
+ intelObj->mt->region &&
+ intel_is_region_resident(intel, intelObj->mt->region);
+#endif
+ return 1;
+}
+
+
+
+static struct gl_texture_image *
+intelNewTextureImage(GLcontext * ctx)
+{
+ DBG("%s\n", __FUNCTION__);
+ (void) ctx;
+ return (struct gl_texture_image *) CALLOC_STRUCT(intel_texture_image);
+}
+
+
+static struct gl_texture_object *
+intelNewTextureObject(GLcontext * ctx, GLuint name, GLenum target)
+{
+ struct intel_texture_object *obj = CALLOC_STRUCT(intel_texture_object);
+
+ DBG("%s\n", __FUNCTION__);
+ _mesa_initialize_texture_object(&obj->base, name, target);
+
+ return &obj->base;
+}
+
+static void
+intelDeleteTextureObject(GLcontext *ctx,
+ struct gl_texture_object *texObj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_object *intelObj = intel_texture_object(texObj);
+
+ if (intelObj->mt)
+ intel_miptree_release(intel, &intelObj->mt);
+
+ _mesa_delete_texture_object(ctx, texObj);
+}
+
+
+static void
+intelFreeTextureImageData(GLcontext * ctx, struct gl_texture_image *texImage)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (intelImage->mt) {
+ intel_miptree_release(intel, &intelImage->mt);
+ }
+
+ if (texImage->Data) {
+ free(texImage->Data);
+ texImage->Data = NULL;
+ }
+}
+
+
+#ifndef __x86_64__
+static unsigned
+fastrdtsc(void)
+{
+ unsigned eax;
+ __asm__ volatile ("\t"
+ "pushl %%ebx\n\t"
+ "cpuid\n\t" ".byte 0x0f, 0x31\n\t"
+ "popl %%ebx\n":"=a" (eax)
+ :"0"(0)
+ :"ecx", "edx", "cc");
+
+ return eax;
+}
+#else
+static unsigned
+fastrdtsc(void)
+{
+ unsigned eax;
+ __asm__ volatile ("\t" "cpuid\n\t" ".byte 0x0f, 0x31\n\t":"=a" (eax)
+ :"0"(0)
+ :"ecx", "edx", "ebx", "cc");
+
+ return eax;
+}
+#endif
+
+static unsigned
+time_diff(unsigned t, unsigned t2)
+{
+ return ((t < t2) ? t2 - t : 0xFFFFFFFFU - (t - t2 - 1));
+}
+
+
+/* The system memcpy (at least on ubuntu 5.10) has problems copying
+ * to agp (writecombined) memory from a source which isn't 64-byte
+ * aligned - there is a 4x performance falloff.
+ *
+ * The x86 __memcpy is immune to this but is slightly slower
+ * (10%-ish) than the system memcpy.
+ *
+ * The sse_memcpy seems to have a slight cliff at 64/32 bytes, but
+ * isn't much faster than x86_memcpy for agp copies.
+ *
+ * TODO: switch dynamically.
+ */
+static void *
+do_memcpy(void *dest, const void *src, size_t n)
+{
+ if ((((unsigned) src) & 63) || (((unsigned) dest) & 63)) {
+ return __memcpy(dest, src, n);
+ }
+ else
+ return memcpy(dest, src, n);
+}
+
+
+static void *
+timed_memcpy(void *dest, const void *src, size_t n)
+{
+ void *ret;
+ unsigned t1, t2;
+ double rate;
+
+ if ((((unsigned) src) & 63) || (((unsigned) dest) & 63))
+ _mesa_printf("Warning - non-aligned texture copy!\n");
+
+ t1 = fastrdtsc();
+ ret = do_memcpy(dest, src, n);
+ t2 = fastrdtsc();
+
+ rate = time_diff(t1, t2);
+ rate /= (double) n;
+ _mesa_printf("timed_memcpy: %u %u --> %f clocks/byte\n", t1, t2, rate);
+ return ret;
+}
+
+
+void
+intelInitTextureFuncs(struct dd_function_table *functions)
+{
+ functions->ChooseTextureFormat = intelChooseTextureFormat;
+ functions->TexImage1D = intelTexImage1D;
+ functions->TexImage2D = intelTexImage2D;
+ functions->TexImage3D = intelTexImage3D;
+ functions->TexSubImage1D = intelTexSubImage1D;
+ functions->TexSubImage2D = intelTexSubImage2D;
+ functions->TexSubImage3D = intelTexSubImage3D;
+ functions->CopyTexImage1D = intelCopyTexImage1D;
+ functions->CopyTexImage2D = intelCopyTexImage2D;
+ functions->CopyTexSubImage1D = intelCopyTexSubImage1D;
+ functions->CopyTexSubImage2D = intelCopyTexSubImage2D;
+ functions->GetTexImage = intelGetTexImage;
+ functions->NewTextureObject = intelNewTextureObject;
+ functions->NewTextureImage = intelNewTextureImage;
+ functions->DeleteTexture = intelDeleteTextureObject;
+ functions->FreeTexImageData = intelFreeTextureImageData;
+ functions->UpdateTexturePalette = 0;
+ functions->IsTextureResident = intelIsTextureResident;
+
+ if (INTEL_DEBUG & DEBUG_BUFMGR)
+ functions->TextureMemCpy = timed_memcpy;
+ else
+ functions->TextureMemCpy = do_memcpy;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTELTEX_INC
+#define INTELTEX_INC
+
+#include "mtypes.h"
+#include "intel_context.h"
+#include "texmem.h"
+
+
+void intelInitTextureFuncs(struct dd_function_table *functions);
+
+const struct gl_texture_format *intelChooseTextureFormat(GLcontext * ctx,
+ GLint internalFormat,
+ GLenum format,
+ GLenum type);
+
+
+void intelTexImage3D(GLcontext * ctx,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint height, GLint depth,
+ GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage);
+
+void intelTexSubImage3D(GLcontext * ctx,
+ GLenum target,
+ GLint level,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLsizei width, GLsizei height, GLsizei depth,
+ GLenum format, GLenum type,
+ const GLvoid * pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage);
+
+void intelTexImage2D(GLcontext * ctx,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint height, GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage);
+
+void intelTexSubImage2D(GLcontext * ctx,
+ GLenum target,
+ GLint level,
+ GLint xoffset, GLint yoffset,
+ GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const GLvoid * pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage);
+
+void intelTexImage1D(GLcontext * ctx,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage);
+
+void intelTexSubImage1D(GLcontext * ctx,
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLsizei width,
+ GLenum format, GLenum type,
+ const GLvoid * pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage);
+
+void intelCopyTexImage1D(GLcontext * ctx, GLenum target, GLint level,
+ GLenum internalFormat,
+ GLint x, GLint y, GLsizei width, GLint border);
+
+void intelCopyTexImage2D(GLcontext * ctx, GLenum target, GLint level,
+ GLenum internalFormat,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLint border);
+
+void intelCopyTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
+ GLint xoffset, GLint x, GLint y, GLsizei width);
+
+void intelCopyTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
+ GLint xoffset, GLint yoffset,
+ GLint x, GLint y, GLsizei width, GLsizei height);
+
+void intelGetTexImage(GLcontext * ctx, GLenum target, GLint level,
+ GLenum format, GLenum type, GLvoid * pixels,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage);
+
+GLuint intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit);
+
+void intel_tex_map_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj);
+
+void intel_tex_unmap_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj);
+
+#endif
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "mtypes.h"
+#include "enums.h"
+#include "image.h"
+#include "teximage.h"
+#include "swrast/swrast.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_buffers.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "intel_fbo.h"
+#include "intel_tex.h"
+#include "intel_blit.h"
+#include "intel_pixel.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+/**
+ * Get the intel_region which is the source for any glCopyTex[Sub]Image call.
+ *
+ * Do the best we can using the blitter. A future project is to use
+ * the texture engine and fragment programs for these copies.
+ */
+static const struct intel_region *
+get_teximage_source(struct intel_context *intel, GLenum internalFormat)
+{
+ struct intel_renderbuffer *irb;
+
+ DBG("%s %s\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(internalFormat));
+
+ switch (internalFormat) {
+ case GL_DEPTH_COMPONENT:
+ case GL_DEPTH_COMPONENT16_ARB:
+ irb = intel_get_renderbuffer(intel->ctx.ReadBuffer, BUFFER_DEPTH);
+ if (irb && irb->region && irb->region->cpp == 2)
+ return irb->region;
+ return NULL;
+ case GL_DEPTH24_STENCIL8_EXT:
+ case GL_DEPTH_STENCIL_EXT:
+ irb = intel_get_renderbuffer(intel->ctx.ReadBuffer, BUFFER_DEPTH);
+ if (irb && irb->region && irb->region->cpp == 4)
+ return irb->region;
+ return NULL;
+ case GL_RGBA:
+ case GL_RGBA8:
+ return intel_readbuf_region(intel);
+ case GL_RGB:
+ if (intel->intelScreen->cpp == 2)
+ return intel_readbuf_region(intel);
+ return NULL;
+ default:
+ return NULL;
+ }
+}
+
+
+static GLboolean
+do_copy_texsubimage(struct intel_context *intel,
+ struct intel_texture_image *intelImage,
+ GLenum internalFormat,
+ GLint dstx, GLint dsty,
+ GLint x, GLint y, GLsizei width, GLsizei height)
+{
+ GLcontext *ctx = &intel->ctx;
+ const struct intel_region *src =
+ get_teximage_source(intel, internalFormat);
+
+ if (!intelImage->mt || !src) {
+ DBG("%s fail %p %p\n", __FUNCTION__, intelImage->mt, src);
+ return GL_FALSE;
+ }
+
+ intelFlush(ctx);
+ LOCK_HARDWARE(intel);
+ {
+ GLuint image_offset = intel_miptree_image_offset(intelImage->mt,
+ intelImage->face,
+ intelImage->level);
+ const GLint orig_x = x;
+ const GLint orig_y = y;
+ const struct gl_framebuffer *fb = ctx->DrawBuffer;
+
+ if (_mesa_clip_to_region(fb->_Xmin, fb->_Ymin, fb->_Xmax, fb->_Ymax,
+ &x, &y, &width, &height)) {
+ /* Update dst for clipped src. Need to also clip the source rect.
+ */
+ dstx += x - orig_x;
+ dsty += y - orig_y;
+
+ if (ctx->ReadBuffer->Name == 0) {
+ /* reading from a window, adjust x, y */
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+ GLuint window_y;
+ /* window_y = position of window on screen if y=0=bottom */
+ window_y = intel->intelScreen->height - (dPriv->y + dPriv->h);
+ y = window_y + y;
+ x += dPriv->x;
+ }
+ else {
+ /* reading from a FBO */
+ /* invert Y */
+ y = ctx->ReadBuffer->Height - y - 1;
+ }
+
+
+ /* A bit of fiddling to get the blitter to work with -ve
+ * pitches. But we get a nice inverted blit this way, so it's
+ * worth it:
+ */
+ intelEmitCopyBlit(intel,
+ intelImage->mt->cpp,
+ -src->pitch,
+ src->buffer,
+ src->height * src->pitch * src->cpp,
+ intelImage->mt->pitch,
+ intelImage->mt->region->buffer,
+ image_offset,
+ x, y + height, dstx, dsty, width, height);
+
+ intel_batchbuffer_flush(intel->batch);
+ }
+ }
+
+
+ UNLOCK_HARDWARE(intel);
+
+#if 0
+ /* GL_SGIS_generate_mipmap -- this can be accelerated now.
+ * XXX Add a ctx->Driver.GenerateMipmaps() function?
+ */
+ if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
+ intel_generate_mipmap(ctx, target,
+ &ctx->Texture.Unit[ctx->Texture.CurrentUnit],
+ texObj);
+ }
+#endif
+
+ return GL_TRUE;
+}
+
+
+
+
+
+void
+intelCopyTexImage1D(GLcontext * ctx, GLenum target, GLint level,
+ GLenum internalFormat,
+ GLint x, GLint y, GLsizei width, GLint border)
+{
+ struct gl_texture_unit *texUnit =
+ &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
+ struct gl_texture_object *texObj =
+ _mesa_select_tex_object(ctx, texUnit, target);
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, level);
+
+ if (border)
+ goto fail;
+
+ /* Setup or redefine the texture object, mipmap tree and texture
+ * image. Don't populate yet.
+ */
+ ctx->Driver.TexImage1D(ctx, target, level, internalFormat,
+ width, border,
+ GL_RGBA, CHAN_TYPE, NULL,
+ &ctx->DefaultPacking, texObj, texImage);
+
+ if (!do_copy_texsubimage(intel_context(ctx),
+ intel_texture_image(texImage),
+ internalFormat, 0, 0, x, y, width, 1))
+ goto fail;
+
+ return;
+
+ fail:
+ _swrast_copy_teximage1d(ctx, target, level, internalFormat, x, y,
+ width, border);
+}
+
+void
+intelCopyTexImage2D(GLcontext * ctx, GLenum target, GLint level,
+ GLenum internalFormat,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLint border)
+{
+ struct gl_texture_unit *texUnit =
+ &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
+ struct gl_texture_object *texObj =
+ _mesa_select_tex_object(ctx, texUnit, target);
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, level);
+
+ if (border)
+ goto fail;
+
+ /* Setup or redefine the texture object, mipmap tree and texture
+ * image. Don't populate yet.
+ */
+ ctx->Driver.TexImage2D(ctx, target, level, internalFormat,
+ width, height, border,
+ GL_RGBA, CHAN_TYPE, NULL,
+ &ctx->DefaultPacking, texObj, texImage);
+
+
+ if (!do_copy_texsubimage(intel_context(ctx),
+ intel_texture_image(texImage),
+ internalFormat, 0, 0, x, y, width, height))
+ goto fail;
+
+ return;
+
+ fail:
+ _swrast_copy_teximage2d(ctx, target, level, internalFormat, x, y,
+ width, height, border);
+}
+
+
+void
+intelCopyTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
+ GLint xoffset, GLint x, GLint y, GLsizei width)
+{
+ struct gl_texture_unit *texUnit =
+ &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
+ struct gl_texture_object *texObj =
+ _mesa_select_tex_object(ctx, texUnit, target);
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, level);
+ GLenum internalFormat = texImage->InternalFormat;
+
+ /* XXX need to check <border> as in above function? */
+
+ /* Need to check texture is compatible with source format.
+ */
+
+ if (!do_copy_texsubimage(intel_context(ctx),
+ intel_texture_image(texImage),
+ internalFormat, xoffset, 0, x, y, width, 1)) {
+ _swrast_copy_texsubimage1d(ctx, target, level, xoffset, x, y, width);
+ }
+}
+
+
+
+void
+intelCopyTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
+ GLint xoffset, GLint yoffset,
+ GLint x, GLint y, GLsizei width, GLsizei height)
+{
+ struct gl_texture_unit *texUnit =
+ &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
+ struct gl_texture_object *texObj =
+ _mesa_select_tex_object(ctx, texUnit, target);
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, level);
+ GLenum internalFormat = texImage->InternalFormat;
+
+
+ /* Need to check texture is compatible with source format.
+ */
+
+ if (!do_copy_texsubimage(intel_context(ctx),
+ intel_texture_image(texImage),
+ internalFormat,
+ xoffset, yoffset, x, y, width, height)) {
+
+ DBG("%s - fallback to swrast\n", __FUNCTION__);
+
+ _swrast_copy_texsubimage2d(ctx, target, level,
+ xoffset, yoffset, x, y, width, height);
+ }
+}
--- /dev/null
+#include "intel_context.h"
+#include "intel_tex.h"
+#include "texformat.h"
+#include "enums.h"
+
+/* It works out that this function is fine for all the supported
+ * hardware. However, there is still a need to map the formats onto
+ * hardware descriptors.
+ */
+/* Note that the i915 can actually support many more formats than
+ * these if we take the step of simply swizzling the colors
+ * immediately after sampling...
+ */
+const struct gl_texture_format *
+intelChooseTextureFormat(GLcontext * ctx, GLint internalFormat,
+ GLenum format, GLenum type)
+{
+ struct intel_context *intel = intel_context(ctx);
+ const GLboolean do32bpt = (intel->intelScreen->cpp == 4);
+
+ switch (internalFormat) {
+ case 4:
+ case GL_RGBA:
+ case GL_COMPRESSED_RGBA:
+ if (format == GL_BGRA) {
+ if (type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_INT_8_8_8_8_REV) {
+ return &_mesa_texformat_argb8888;
+ }
+ else if (type == GL_UNSIGNED_SHORT_4_4_4_4_REV) {
+ return &_mesa_texformat_argb4444;
+ }
+ else if (type == GL_UNSIGNED_SHORT_1_5_5_5_REV) {
+ return &_mesa_texformat_argb1555;
+ }
+ }
+ return do32bpt ? &_mesa_texformat_argb8888 : &_mesa_texformat_argb4444;
+
+ case 3:
+ case GL_RGB:
+ case GL_COMPRESSED_RGB:
+ if (format == GL_RGB && type == GL_UNSIGNED_SHORT_5_6_5) {
+ return &_mesa_texformat_rgb565;
+ }
+ return do32bpt ? &_mesa_texformat_argb8888 : &_mesa_texformat_rgb565;
+
+ case GL_RGBA8:
+ case GL_RGB10_A2:
+ case GL_RGBA12:
+ case GL_RGBA16:
+ return do32bpt ? &_mesa_texformat_argb8888 : &_mesa_texformat_argb4444;
+
+ case GL_RGBA4:
+ case GL_RGBA2:
+ return &_mesa_texformat_argb4444;
+
+ case GL_RGB5_A1:
+ return &_mesa_texformat_argb1555;
+
+ case GL_RGB8:
+ case GL_RGB10:
+ case GL_RGB12:
+ case GL_RGB16:
+ return &_mesa_texformat_argb8888;
+
+ case GL_RGB5:
+ case GL_RGB4:
+ case GL_R3_G3_B2:
+ return &_mesa_texformat_rgb565;
+
+ case GL_ALPHA:
+ case GL_ALPHA4:
+ case GL_ALPHA8:
+ case GL_ALPHA12:
+ case GL_ALPHA16:
+ case GL_COMPRESSED_ALPHA:
+ return &_mesa_texformat_a8;
+
+ case 1:
+ case GL_LUMINANCE:
+ case GL_LUMINANCE4:
+ case GL_LUMINANCE8:
+ case GL_LUMINANCE12:
+ case GL_LUMINANCE16:
+ case GL_COMPRESSED_LUMINANCE:
+ return &_mesa_texformat_l8;
+
+ case 2:
+ case GL_LUMINANCE_ALPHA:
+ case GL_LUMINANCE4_ALPHA4:
+ case GL_LUMINANCE6_ALPHA2:
+ case GL_LUMINANCE8_ALPHA8:
+ case GL_LUMINANCE12_ALPHA4:
+ case GL_LUMINANCE12_ALPHA12:
+ case GL_LUMINANCE16_ALPHA16:
+ case GL_COMPRESSED_LUMINANCE_ALPHA:
+ return &_mesa_texformat_al88;
+
+ case GL_INTENSITY:
+ case GL_INTENSITY4:
+ case GL_INTENSITY8:
+ case GL_INTENSITY12:
+ case GL_INTENSITY16:
+ case GL_COMPRESSED_INTENSITY:
+ return &_mesa_texformat_i8;
+
+ case GL_YCBCR_MESA:
+ if (type == GL_UNSIGNED_SHORT_8_8_MESA || type == GL_UNSIGNED_BYTE)
+ return &_mesa_texformat_ycbcr;
+ else
+ return &_mesa_texformat_ycbcr_rev;
+
+ case GL_COMPRESSED_RGB_FXT1_3DFX:
+ return &_mesa_texformat_rgb_fxt1;
+ case GL_COMPRESSED_RGBA_FXT1_3DFX:
+ return &_mesa_texformat_rgba_fxt1;
+
+ case GL_RGB_S3TC:
+ case GL_RGB4_S3TC:
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ return &_mesa_texformat_rgb_dxt1;
+
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ return &_mesa_texformat_rgba_dxt1;
+
+ case GL_RGBA_S3TC:
+ case GL_RGBA4_S3TC:
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ return &_mesa_texformat_rgba_dxt3;
+
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
+ return &_mesa_texformat_rgba_dxt5;
+
+ case GL_DEPTH_COMPONENT:
+ case GL_DEPTH_COMPONENT16:
+ case GL_DEPTH_COMPONENT24:
+ case GL_DEPTH_COMPONENT32:
+ return &_mesa_texformat_z16;
+
+ default:
+ fprintf(stderr, "unexpected texture format %s in %s\n",
+ _mesa_lookup_enum_by_nr(internalFormat), __FUNCTION__);
+ return NULL;
+ }
+
+ return NULL; /* never get here */
+}
--- /dev/null
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "glheader.h"
+#include "macros.h"
+#include "mtypes.h"
+#include "enums.h"
+#include "colortab.h"
+#include "convolve.h"
+#include "context.h"
+#include "simple_list.h"
+#include "texcompress.h"
+#include "texformat.h"
+#include "texobj.h"
+#include "texstore.h"
+
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_buffer_objects.h"
+#include "intel_batchbuffer.h"
+#include "intel_tex.h"
+#include "intel_ioctl.h"
+#include "intel_blit.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+/* Functions to store texture images. Where possible, mipmap_tree's
+ * will be created or further instantiated with image data, otherwise
+ * images will be stored in malloc'd memory. A validation step is
+ * required to pull those images into a mipmap tree, or otherwise
+ * decide a fallback is required.
+ */
+
+
+static int
+logbase2(int n)
+{
+ GLint i = 1;
+ GLint log2 = 0;
+
+ while (n > i) {
+ i *= 2;
+ log2++;
+ }
+
+ return log2;
+}
+
+
+/* Otherwise, store it in memory if (Border != 0) or (any dimension ==
+ * 1).
+ *
+ * Otherwise, if max_level >= level >= min_level, create tree with
+ * space for textures from min_level down to max_level.
+ *
+ * Otherwise, create tree with space for textures from (level
+ * 0)..(1x1). Consider pruning this tree at a validation if the
+ * saving is worth it.
+ */
+static void
+guess_and_alloc_mipmap_tree(struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ struct intel_texture_image *intelImage)
+{
+ GLuint firstLevel;
+ GLuint lastLevel;
+ GLuint width = intelImage->base.Width;
+ GLuint height = intelImage->base.Height;
+ GLuint depth = intelImage->base.Depth;
+ GLuint l2width, l2height, l2depth;
+ GLuint i;
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (intelImage->base.Border)
+ return;
+
+ if (intelImage->level > intelObj->base.BaseLevel &&
+ (intelImage->base.Width == 1 ||
+ (intelObj->base.Target != GL_TEXTURE_1D &&
+ intelImage->base.Height == 1) ||
+ (intelObj->base.Target == GL_TEXTURE_3D &&
+ intelImage->base.Depth == 1)))
+ return;
+
+ /* If this image disrespects BaseLevel, allocate from level zero.
+ * Usually BaseLevel == 0, so it's unlikely to happen.
+ */
+ if (intelImage->level < intelObj->base.BaseLevel)
+ firstLevel = 0;
+ else
+ firstLevel = intelObj->base.BaseLevel;
+
+
+ /* Figure out image dimensions at start level.
+ */
+ for (i = intelImage->level; i > firstLevel; i--) {
+ width <<= 1;
+ if (height != 1)
+ height <<= 1;
+ if (depth != 1)
+ depth <<= 1;
+ }
+
+ /* Guess a reasonable value for lastLevel. This is probably going
+ * to be wrong fairly often and might mean that we have to look at
+ * resizable buffers, or require that buffers implement lazy
+ * pagetable arrangements.
+ */
+ if ((intelObj->base.MinFilter == GL_NEAREST ||
+ intelObj->base.MinFilter == GL_LINEAR) &&
+ intelImage->level == firstLevel) {
+ lastLevel = firstLevel;
+ }
+ else {
+ l2width = logbase2(width);
+ l2height = logbase2(height);
+ l2depth = logbase2(depth);
+ lastLevel = firstLevel + MAX2(MAX2(l2width, l2height), l2depth);
+ }
+
+ assert(!intelObj->mt);
+ intelObj->mt = intel_miptree_create(intel,
+ intelObj->base.Target,
+ intelImage->base.InternalFormat,
+ firstLevel,
+ lastLevel,
+ width,
+ height,
+ depth,
+ intelImage->base.TexFormat->TexelBytes,
+ intelImage->base.IsCompressed);
+
+ DBG("%s - success\n", __FUNCTION__);
+}
+
+
+
+
+static GLuint
+target_to_face(GLenum target)
+{
+ switch (target) {
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
+ return ((GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X);
+ default:
+ return 0;
+ }
+}
+
+/* There are actually quite a few combinations this will work for,
+ * more than what I've listed here.
+ */
+static GLboolean
+check_pbo_format(GLint internalFormat,
+ GLenum format, GLenum type,
+ const struct gl_texture_format *mesa_format)
+{
+ switch (internalFormat) {
+ case 4:
+ case GL_RGBA:
+ return (format == GL_BGRA &&
+ (type == GL_UNSIGNED_BYTE ||
+ type == GL_UNSIGNED_INT_8_8_8_8_REV) &&
+ mesa_format == &_mesa_texformat_argb8888);
+ case 3:
+ case GL_RGB:
+ return (format == GL_RGB &&
+ type == GL_UNSIGNED_SHORT_5_6_5 &&
+ mesa_format == &_mesa_texformat_rgb565);
+ case GL_YCBCR_MESA:
+ return (type == GL_UNSIGNED_SHORT_8_8_MESA || type == GL_UNSIGNED_BYTE);
+ default:
+ return GL_FALSE;
+ }
+}
+
+
+/* XXX: Do this for TexSubImage also:
+ */
+static GLboolean
+try_pbo_upload(struct intel_context *intel,
+ struct intel_texture_image *intelImage,
+ const struct gl_pixelstore_attrib *unpack,
+ GLint internalFormat,
+ GLint width, GLint height,
+ GLenum format, GLenum type, const void *pixels)
+{
+ struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
+ GLuint src_offset, src_stride;
+ GLuint dst_offset, dst_stride;
+
+ if (!pbo ||
+ intel->ctx._ImageTransferState ||
+ unpack->SkipPixels || unpack->SkipRows) {
+ _mesa_printf("%s: failure 1\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ src_offset = (GLuint) pixels;
+
+ if (unpack->RowLength > 0)
+ src_stride = unpack->RowLength;
+ else
+ src_stride = width;
+
+ dst_offset = intel_miptree_image_offset(intelImage->mt,
+ intelImage->face,
+ intelImage->level);
+
+ dst_stride = intelImage->mt->pitch;
+
+ intelFlush(&intel->ctx);
+ LOCK_HARDWARE(intel);
+ {
+ struct _DriBufferObject *src_buffer =
+ intel_bufferobj_buffer(intel, pbo, INTEL_READ);
+ struct _DriBufferObject *dst_buffer =
+ intel_region_buffer(intel->intelScreen, intelImage->mt->region,
+ INTEL_WRITE_FULL);
+
+
+ intelEmitCopyBlit(intel,
+ intelImage->mt->cpp,
+ src_stride, src_buffer, src_offset,
+ dst_stride, dst_buffer, dst_offset,
+ 0, 0, 0, 0, width, height);
+
+ intel_batchbuffer_flush(intel->batch);
+ }
+ UNLOCK_HARDWARE(intel);
+
+ return GL_TRUE;
+}
+
+
+
+static GLboolean
+try_pbo_zcopy(struct intel_context *intel,
+ struct intel_texture_image *intelImage,
+ const struct gl_pixelstore_attrib *unpack,
+ GLint internalFormat,
+ GLint width, GLint height,
+ GLenum format, GLenum type, const void *pixels)
+{
+ struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
+ GLuint src_offset, src_stride;
+ GLuint dst_offset, dst_stride;
+
+ if (!pbo ||
+ intel->ctx._ImageTransferState ||
+ unpack->SkipPixels || unpack->SkipRows) {
+ _mesa_printf("%s: failure 1\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ src_offset = (GLuint) pixels;
+
+ if (unpack->RowLength > 0)
+ src_stride = unpack->RowLength;
+ else
+ src_stride = width;
+
+ dst_offset = intel_miptree_image_offset(intelImage->mt,
+ intelImage->face,
+ intelImage->level);
+
+ dst_stride = intelImage->mt->pitch;
+
+ if (src_stride != dst_stride || dst_offset != 0 || src_offset != 0) {
+ _mesa_printf("%s: failure 2\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ intel_region_attach_pbo(intel->intelScreen, intelImage->mt->region, pbo);
+
+ return GL_TRUE;
+}
+
+
+
+
+
+
+static void
+intelTexImage(GLcontext * ctx,
+ GLint dims,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint height, GLint depth,
+ GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *unpack,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_object *intelObj = intel_texture_object(texObj);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+ GLint postConvWidth = width;
+ GLint postConvHeight = height;
+ GLint texelBytes, sizeInBytes;
+ GLuint dstRowStride;
+
+
+ DBG("%s target %s level %d %dx%dx%d border %d\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(target), level, width, height, depth, border);
+
+ intelFlush(ctx);
+
+ intelImage->face = target_to_face(target);
+ intelImage->level = level;
+
+ if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
+ _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
+ &postConvHeight);
+ }
+
+ /* choose the texture format */
+ texImage->TexFormat = intelChooseTextureFormat(ctx, internalFormat,
+ format, type);
+
+ assert(texImage->TexFormat);
+
+ switch (dims) {
+ case 1:
+ texImage->FetchTexelc = texImage->TexFormat->FetchTexel1D;
+ texImage->FetchTexelf = texImage->TexFormat->FetchTexel1Df;
+ break;
+ case 2:
+ texImage->FetchTexelc = texImage->TexFormat->FetchTexel2D;
+ texImage->FetchTexelf = texImage->TexFormat->FetchTexel2Df;
+ break;
+ case 3:
+ texImage->FetchTexelc = texImage->TexFormat->FetchTexel3D;
+ texImage->FetchTexelf = texImage->TexFormat->FetchTexel3Df;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ texelBytes = texImage->TexFormat->TexelBytes;
+
+
+ /* Minimum pitch of 32 bytes */
+ if (postConvWidth * texelBytes < 32) {
+ postConvWidth = 32 / texelBytes;
+ texImage->RowStride = postConvWidth;
+ }
+
+ assert(texImage->RowStride == postConvWidth);
+
+ /* Release the reference to a potentially orphaned buffer.
+ * Release any old malloced memory.
+ */
+ if (intelImage->mt) {
+ intel_miptree_release(intel, &intelImage->mt);
+ assert(!texImage->Data);
+ }
+ else if (texImage->Data) {
+ _mesa_align_free(texImage->Data);
+ }
+
+ /* If this is the only texture image in the tree, could call
+ * bmBufferData with NULL data to free the old block and avoid
+ * waiting on any outstanding fences.
+ */
+ if (intelObj->mt &&
+ intelObj->mt->first_level == level &&
+ intelObj->mt->last_level == level &&
+ intelObj->mt->target != GL_TEXTURE_CUBE_MAP_ARB &&
+ !intel_miptree_match_image(intelObj->mt, &intelImage->base,
+ intelImage->face, intelImage->level)) {
+
+ DBG("release it\n");
+ intel_miptree_release(intel, &intelObj->mt);
+ assert(!intelObj->mt);
+ }
+
+ if (!intelObj->mt) {
+ guess_and_alloc_mipmap_tree(intel, intelObj, intelImage);
+ if (!intelObj->mt) {
+ DBG("guess_and_alloc_mipmap_tree: failed\n");
+ }
+ }
+
+
+ assert(!intelImage->mt);
+
+ if (intelObj->mt &&
+ intel_miptree_match_image(intelObj->mt, &intelImage->base,
+ intelImage->face, intelImage->level)) {
+
+ intel_miptree_reference(&intelImage->mt, intelObj->mt);
+ assert(intelImage->mt);
+ }
+
+ if (!intelImage->mt)
+ DBG("XXX: Image did not fit into tree - storing in local memory!\n");
+
+ /* PBO fastpaths:
+ */
+ if (dims <= 2 &&
+ intelImage->mt &&
+ intel_buffer_object(unpack->BufferObj) &&
+ check_pbo_format(internalFormat, format,
+ type, intelImage->base.TexFormat)) {
+
+ DBG("trying pbo upload\n");
+
+ /* Attempt to texture directly from PBO data (zero copy upload).
+ *
+ * Currently disable as it can lead to worse as well as better
+ * performance (in particular when intel_region_cow() is
+ * required).
+ */
+ if (intelObj->mt == intelImage->mt &&
+ intelObj->mt->first_level == level &&
+ intelObj->mt->last_level == level) {
+
+ if (try_pbo_zcopy(intel, intelImage, unpack,
+ internalFormat,
+ width, height, format, type, pixels)) {
+
+ DBG("pbo zcopy upload succeeded\n");
+ return;
+ }
+ }
+
+
+ /* Otherwise, attempt to use the blitter for PBO image uploads.
+ */
+ if (try_pbo_upload(intel, intelImage, unpack,
+ internalFormat,
+ width, height, format, type, pixels)) {
+ DBG("pbo upload succeeded\n");
+ return;
+ }
+
+ DBG("pbo upload failed\n");
+ }
+
+
+
+ /* intelCopyTexImage calls this function with pixels == NULL, with
+ * the expectation that the mipmap tree will be set up but nothing
+ * more will be done. This is where those calls return:
+ */
+ pixels = _mesa_validate_pbo_teximage(ctx, dims, width, height, 1,
+ format, type,
+ pixels, unpack, "glTexImage");
+ if (!pixels)
+ return;
+
+
+ if (intelImage->mt)
+ intel_region_idle(intel->intelScreen, intelImage->mt->region);
+
+ LOCK_HARDWARE(intel);
+
+ if (intelImage->mt) {
+ texImage->Data = intel_miptree_image_map(intel,
+ intelImage->mt,
+ intelImage->face,
+ intelImage->level,
+ &dstRowStride,
+ intelImage->base.ImageOffsets);
+ }
+ else {
+ /* Allocate regular memory and store the image there temporarily. */
+ if (texImage->IsCompressed) {
+ sizeInBytes = texImage->CompressedSize;
+ dstRowStride =
+ _mesa_compressed_row_stride(texImage->InternalFormat, width);
+ assert(dims != 3);
+ }
+ else {
+ dstRowStride = postConvWidth * texelBytes;
+ sizeInBytes = depth * dstRowStride * postConvHeight;
+ }
+
+ texImage->Data = malloc(sizeInBytes);
+ }
+
+ DBG("Upload image %dx%dx%d row_len %x "
+ "pitch %x\n",
+ width, height, depth, width * texelBytes, dstRowStride);
+
+ /* Copy data. Would like to know when it's ok for us to eg. use
+ * the blitter to copy. Or, use the hardware to do the format
+ * conversion and copy:
+ */
+ if (!texImage->TexFormat->StoreImage(ctx, dims,
+ texImage->_BaseFormat,
+ texImage->TexFormat,
+ texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */
+ dstRowStride,
+ texImage->ImageOffsets,
+ width, height, depth,
+ format, type, pixels, unpack)) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
+ }
+
+ _mesa_unmap_teximage_pbo(ctx, unpack);
+
+ if (intelImage->mt) {
+ intel_miptree_image_unmap(intel, intelImage->mt);
+ texImage->Data = NULL;
+ }
+
+ UNLOCK_HARDWARE(intel);
+
+#if 0
+ /* GL_SGIS_generate_mipmap -- this can be accelerated now.
+ */
+ if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
+ intel_generate_mipmap(ctx, target,
+ &ctx->Texture.Unit[ctx->Texture.CurrentUnit],
+ texObj);
+ }
+#endif
+}
+
+void
+intelTexImage3D(GLcontext * ctx,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint height, GLint depth,
+ GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *unpack,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexImage(ctx, 3, target, level,
+ internalFormat, width, height, depth, border,
+ format, type, pixels, unpack, texObj, texImage);
+}
+
+
+void
+intelTexImage2D(GLcontext * ctx,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint height, GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *unpack,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexImage(ctx, 2, target, level,
+ internalFormat, width, height, 1, border,
+ format, type, pixels, unpack, texObj, texImage);
+}
+
+void
+intelTexImage1D(GLcontext * ctx,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *unpack,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexImage(ctx, 1, target, level,
+ internalFormat, width, 1, 1, border,
+ format, type, pixels, unpack, texObj, texImage);
+}
+
+
+
+/**
+ * Need to map texture image into memory before copying image data,
+ * then unmap it.
+ */
+void
+intelGetTexImage(GLcontext * ctx, GLenum target, GLint level,
+ GLenum format, GLenum type, GLvoid * pixels,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+
+ /* Map */
+ if (intelImage->mt) {
+ /* Image is stored in hardware format in a buffer managed by the
+ * kernel. Need to explicitly map and unmap it.
+ */
+ intelImage->base.Data =
+ intel_miptree_image_map(intel,
+ intelImage->mt,
+ intelImage->face,
+ intelImage->level,
+ &intelImage->base.RowStride,
+ intelImage->base.ImageOffsets);
+ }
+ else {
+ /* Otherwise, the image should actually be stored in
+ * intelImage->base.Data. This is pretty confusing for
+ * everybody, I'd much prefer to separate the two functions of
+ * texImage->Data - storage for texture images in main memory
+ * and access (ie mappings) of images. In other words, we'd
+ * create a new texImage->Map field and leave Data simply for
+ * storage.
+ */
+ assert(intelImage->base.Data);
+ }
+
+ _mesa_get_teximage(ctx, target, level, format, type, pixels,
+ texObj, texImage);
+
+ /* Unmap */
+ if (intelImage->mt) {
+ intel_miptree_image_unmap(intel, intelImage->mt);
+ intelImage->base.Data = NULL;
+ }
+}
--- /dev/null
+
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "mtypes.h"
+#include "texobj.h"
+#include "texstore.h"
+#include "enums.h"
+
+#include "intel_context.h"
+#include "intel_tex.h"
+#include "intel_mipmap_tree.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+static void
+intelTexSubimage(GLcontext * ctx,
+ GLint dims,
+ GLenum target, GLint level,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLint width, GLint height, GLint depth,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+ GLuint dstImageStride;
+ GLuint dstRowStride;
+
+ DBG("%s target %s level %d offset %d,%d %dx%d\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(target),
+ level, xoffset, yoffset, width, height);
+
+ intelFlush(ctx);
+
+ pixels =
+ _mesa_validate_pbo_teximage(ctx, dims, width, height, depth, format,
+ type, pixels, packing, "glTexSubImage2D");
+ if (!pixels)
+ return;
+
+ if (intelImage->mt)
+ intel_region_idle(intel->intelScreen, intelImage->mt->region);
+
+ LOCK_HARDWARE(intel);
+
+ /* Map buffer if necessary. Need to lock to prevent other contexts
+ * from uploading the buffer under us.
+ */
+ if (intelImage->mt)
+ texImage->Data = intel_miptree_image_map(intel,
+ intelImage->mt,
+ intelImage->face,
+ intelImage->level,
+ &dstRowStride,
+ &dstImageStride);
+
+ assert(dstRowStride);
+
+ if (!texImage->TexFormat->StoreImage(ctx, dims, texImage->_BaseFormat,
+ texImage->TexFormat,
+ texImage->Data,
+ xoffset, yoffset, zoffset,
+ dstRowStride,
+ texImage->ImageOffsets,
+ width, height, depth,
+ format, type, pixels, packing)) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
+ }
+
+#if 0
+ /* GL_SGIS_generate_mipmap */
+ if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
+ _mesa_generate_mipmap(ctx, target,
+ &ctx->Texture.Unit[ctx->Texture.CurrentUnit],
+ texObj);
+ }
+#endif
+
+ _mesa_unmap_teximage_pbo(ctx, packing);
+
+ if (intelImage->mt) {
+ intel_miptree_image_unmap(intel, intelImage->mt);
+ texImage->Data = NULL;
+ }
+
+ UNLOCK_HARDWARE(intel);
+}
+
+
+
+
+
+void
+intelTexSubImage3D(GLcontext * ctx,
+ GLenum target,
+ GLint level,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLsizei width, GLsizei height, GLsizei depth,
+ GLenum format, GLenum type,
+ const GLvoid * pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+
+ intelTexSubimage(ctx, 3,
+ target, level,
+ xoffset, yoffset, zoffset,
+ width, height, depth,
+ format, type, pixels, packing, texObj, texImage);
+
+}
+
+
+
+void
+intelTexSubImage2D(GLcontext * ctx,
+ GLenum target,
+ GLint level,
+ GLint xoffset, GLint yoffset,
+ GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const GLvoid * pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+
+ intelTexSubimage(ctx, 2,
+ target, level,
+ xoffset, yoffset, 0,
+ width, height, 1,
+ format, type, pixels, packing, texObj, texImage);
+
+}
+
+
+void
+intelTexSubImage1D(GLcontext * ctx,
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLsizei width,
+ GLenum format, GLenum type,
+ const GLvoid * pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexSubimage(ctx, 1,
+ target, level,
+ xoffset, 0, 0,
+ width, 1, 1,
+ format, type, pixels, packing, texObj, texImage);
+
+}
--- /dev/null
+#include "mtypes.h"
+#include "macros.h"
+
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_tex.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+/**
+ * Compute which mipmap levels that really need to be sent to the hardware.
+ * This depends on the base image size, GL_TEXTURE_MIN_LOD,
+ * GL_TEXTURE_MAX_LOD, GL_TEXTURE_BASE_LEVEL, and GL_TEXTURE_MAX_LEVEL.
+ */
+static void
+intel_calculate_first_last_level(struct intel_texture_object *intelObj)
+{
+ struct gl_texture_object *tObj = &intelObj->base;
+ const struct gl_texture_image *const baseImage =
+ tObj->Image[0][tObj->BaseLevel];
+
+ /* These must be signed values. MinLod and MaxLod can be negative numbers,
+ * and having firstLevel and lastLevel as signed prevents the need for
+ * extra sign checks.
+ */
+ int firstLevel;
+ int lastLevel;
+
+ /* Yes, this looks overly complicated, but it's all needed.
+ */
+ switch (tObj->Target) {
+ case GL_TEXTURE_1D:
+ case GL_TEXTURE_2D:
+ case GL_TEXTURE_3D:
+ case GL_TEXTURE_CUBE_MAP:
+ if (tObj->MinFilter == GL_NEAREST || tObj->MinFilter == GL_LINEAR) {
+ /* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL.
+ */
+ firstLevel = lastLevel = tObj->BaseLevel;
+ }
+ else {
+ firstLevel = tObj->BaseLevel + (GLint) (tObj->MinLod + 0.5);
+ firstLevel = MAX2(firstLevel, tObj->BaseLevel);
+ lastLevel = tObj->BaseLevel + (GLint) (tObj->MaxLod + 0.5);
+ lastLevel = MAX2(lastLevel, tObj->BaseLevel);
+ lastLevel = MIN2(lastLevel, tObj->BaseLevel + baseImage->MaxLog2);
+ lastLevel = MIN2(lastLevel, tObj->MaxLevel);
+ lastLevel = MAX2(firstLevel, lastLevel); /* need at least one level */
+ }
+ break;
+ case GL_TEXTURE_RECTANGLE_NV:
+ case GL_TEXTURE_4D_SGIS:
+ firstLevel = lastLevel = 0;
+ break;
+ default:
+ return;
+ }
+
+ /* save these values */
+ intelObj->firstLevel = firstLevel;
+ intelObj->lastLevel = lastLevel;
+}
+
+static void
+copy_image_data_to_tree(struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ struct intel_texture_image *intelImage)
+{
+ if (intelImage->mt) {
+ /* Copy potentially with the blitter:
+ */
+ intel_miptree_image_copy(intel,
+ intelObj->mt,
+ intelImage->face,
+ intelImage->level, intelImage->mt);
+
+ intel_miptree_release(intel, &intelImage->mt);
+ }
+ else {
+ assert(intelImage->base.Data != NULL);
+
+ /* More straightforward upload.
+ */
+ intel_miptree_image_data(intel,
+ intelObj->mt,
+ intelImage->face,
+ intelImage->level,
+ intelImage->base.Data,
+ intelImage->base.RowStride,
+ intelImage->base.RowStride *
+ intelImage->base.Height);
+ _mesa_align_free(intelImage->base.Data);
+ intelImage->base.Data = NULL;
+ }
+
+ intel_miptree_reference(&intelImage->mt, intelObj->mt);
+}
+
+
+/*
+ */
+GLuint
+intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
+{
+ struct gl_texture_object *tObj = intel->ctx.Texture.Unit[unit]._Current;
+ struct intel_texture_object *intelObj = intel_texture_object(tObj);
+
+ GLuint face, i;
+ GLuint nr_faces = 0;
+ struct intel_texture_image *firstImage;
+
+ /* We know/require this is true by now:
+ */
+ assert(intelObj->base.Complete);
+
+ /* What levels must the tree include at a minimum?
+ */
+ intel_calculate_first_last_level(intelObj);
+ firstImage =
+ intel_texture_image(intelObj->base.Image[0][intelObj->firstLevel]);
+
+ /* Fallback case:
+ */
+ if (firstImage->base.Border) {
+ if (intelObj->mt) {
+ intel_miptree_release(intel, &intelObj->mt);
+ }
+ return GL_FALSE;
+ }
+
+
+ /* If both firstImage and intelObj have a tree which can contain
+ * all active images, favour firstImage. Note that because of the
+ * completeness requirement, we know that the image dimensions
+ * will match.
+ */
+ if (firstImage->mt &&
+ firstImage->mt != intelObj->mt &&
+ firstImage->mt->first_level <= intelObj->firstLevel &&
+ firstImage->mt->last_level >= intelObj->lastLevel) {
+
+ if (intelObj->mt)
+ intel_miptree_release(intel, &intelObj->mt);
+
+ intel_miptree_reference(&intelObj->mt, firstImage->mt);
+ }
+
+ /* Check tree can hold all active levels. Check tree matches
+ * target, imageFormat, etc.
+ *
+ * XXX: For some layouts (eg i945?), the test might have to be
+ * first_level == firstLevel, as the tree isn't valid except at the
+ * original start level. Hope to get around this by
+ * programming minLod, maxLod, baseLevel into the hardware and
+ * leaving the tree alone.
+ */
+ if (intelObj->mt &&
+ ((intelObj->mt->first_level > intelObj->firstLevel) ||
+ (intelObj->mt->last_level < intelObj->lastLevel) ||
+ (intelObj->mt->internal_format != firstImage->base.InternalFormat))) {
+ intel_miptree_release(intel, &intelObj->mt);
+ }
+
+
+ /* May need to create a new tree:
+ */
+ if (!intelObj->mt) {
+ intelObj->mt = intel_miptree_create(intel,
+ intelObj->base.Target,
+ firstImage->base.InternalFormat,
+ intelObj->firstLevel,
+ intelObj->lastLevel,
+ firstImage->base.Width,
+ firstImage->base.Height,
+ firstImage->base.Depth,
+ firstImage->base.TexFormat->
+ TexelBytes,
+ firstImage->base.IsCompressed);
+ }
+
+ /* Pull in any images not in the object's tree:
+ */
+ nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
+ for (face = 0; face < nr_faces; face++) {
+ for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
+ struct intel_texture_image *intelImage =
+ intel_texture_image(intelObj->base.Image[face][i]);
+
+ /* Need to import images in main memory or held in other trees.
+ */
+ if (intelObj->mt != intelImage->mt) {
+ copy_image_data_to_tree(intel, intelObj, intelImage);
+ }
+ }
+ }
+
+ return GL_TRUE;
+}
+
+
+
+void
+intel_tex_map_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj)
+{
+ GLuint nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
+ GLuint face, i;
+
+ DBG("%s\n", __FUNCTION__);
+
+ for (face = 0; face < nr_faces; face++) {
+ for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
+ struct intel_texture_image *intelImage =
+ intel_texture_image(intelObj->base.Image[face][i]);
+
+ if (intelImage->mt) {
+ intelImage->base.Data =
+ intel_miptree_image_map(intel,
+ intelImage->mt,
+ intelImage->face,
+ intelImage->level,
+ &intelImage->base.RowStride,
+ intelImage->base.ImageOffsets);
+ /* convert stride to texels, not bytes */
+ intelImage->base.RowStride /= intelImage->mt->cpp;
+/* intelImage->base.ImageStride /= intelImage->mt->cpp; */
+ }
+ }
+ }
+}
+
+
+
+void
+intel_tex_unmap_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj)
+{
+ GLuint nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
+ GLuint face, i;
+
+ for (face = 0; face < nr_faces; face++) {
+ for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
+ struct intel_texture_image *intelImage =
+ intel_texture_image(intelObj->base.Image[face][i]);
+
+ if (intelImage->mt) {
+ intel_miptree_image_unmap(intel, intelImage->mt);
+ intelImage->base.Data = NULL;
+ }
+ }
+ }
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "context.h"
+#include "macros.h"
+#include "enums.h"
+#include "texobj.h"
+#include "state.h"
+#include "dd.h"
+
+#include "swrast/swrast.h"
+#include "swrast_setup/swrast_setup.h"
+#include "tnl/t_context.h"
+#include "tnl/t_pipeline.h"
+#include "tnl/t_vertex.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_tris.h"
+#include "intel_batchbuffer.h"
+#include "intel_reg.h"
+#include "intel_span.h"
+#include "intel_tex.h"
+
+static void intelRenderPrimitive(GLcontext * ctx, GLenum prim);
+static void intelRasterPrimitive(GLcontext * ctx, GLenum rprim,
+ GLuint hwprim);
+
+/*
+ */
+static void
+intel_flush_inline_primitive(struct intel_context *intel)
+{
+ GLuint used = intel->batch->ptr - intel->prim.start_ptr;
+
+ assert(intel->prim.primitive != ~0);
+
+/* _mesa_printf("/\n"); */
+
+ if (used < 8)
+ goto do_discard;
+
+ *(int *) intel->prim.start_ptr = (_3DPRIMITIVE |
+ intel->prim.primitive | (used / 4 - 2));
+
+ goto finished;
+
+ do_discard:
+ intel->batch->ptr -= used;
+
+ finished:
+ intel->prim.primitive = ~0;
+ intel->prim.start_ptr = 0;
+ intel->prim.flush = 0;
+}
+
+
+/* Emit a primitive referencing vertices in a vertex buffer.
+ */
+void
+intelStartInlinePrimitive(struct intel_context *intel,
+ GLuint prim, GLuint batch_flags)
+{
+ BATCH_LOCALS;
+
+ intel->vtbl.emit_state(intel);
+
+ /* Need to make sure at the very least that we don't wrap
+ * batchbuffers in BEGIN_BATCH below, otherwise the primitive will
+ * be emitted to a batchbuffer missing the required full-state
+ * preamble.
+ */
+ if (intel_batchbuffer_space(intel->batch) < 100) {
+ intel_batchbuffer_flush(intel->batch);
+ intel->vtbl.emit_state(intel);
+ }
+
+/* _mesa_printf("%s *", __progname); */
+
+ /* Emit a slot which will be filled with the inline primitive
+ * command later.
+ */
+ BEGIN_BATCH(2, batch_flags);
+ OUT_BATCH(0);
+
+ intel->prim.start_ptr = intel->batch->ptr;
+ intel->prim.primitive = prim;
+ intel->prim.flush = intel_flush_inline_primitive;
+
+ OUT_BATCH(0);
+ ADVANCE_BATCH();
+
+/* _mesa_printf(">"); */
+}
+
+
+void
+intelWrapInlinePrimitive(struct intel_context *intel)
+{
+ GLuint prim = intel->prim.primitive;
+ GLuint batchflags = intel->batch->flags;
+
+ intel_flush_inline_primitive(intel);
+ intel_batchbuffer_flush(intel->batch);
+ intelStartInlinePrimitive(intel, prim, batchflags); /* ??? */
+}
+
+GLuint *
+intelExtendInlinePrimitive(struct intel_context *intel, GLuint dwords)
+{
+ GLuint sz = dwords * sizeof(GLuint);
+ GLuint *ptr;
+
+ assert(intel->prim.flush == intel_flush_inline_primitive);
+
+ if (intel_batchbuffer_space(intel->batch) < sz)
+ intelWrapInlinePrimitive(intel);
+
+/* _mesa_printf("."); */
+
+ intel->vtbl.assert_not_dirty(intel);
+
+ ptr = (GLuint *) intel->batch->ptr;
+ intel->batch->ptr += sz;
+
+ return ptr;
+}
+
+
+
+/***********************************************************************
+ * Emit primitives as inline vertices *
+ ***********************************************************************/
+
+#ifdef __i386__
+#define COPY_DWORDS( j, vb, vertsize, v ) \
+do { \
+ int __tmp; \
+ __asm__ __volatile__( "rep ; movsl" \
+ : "=%c" (j), "=D" (vb), "=S" (__tmp) \
+ : "0" (vertsize), \
+ "D" ((long)vb), \
+ "S" ((long)v) ); \
+} while (0)
+#else
+#define COPY_DWORDS( j, vb, vertsize, v ) \
+do { \
+ for ( j = 0 ; j < vertsize ; j++ ) { \
+ vb[j] = ((GLuint *)v)[j]; \
+ } \
+ vb += vertsize; \
+} while (0)
+#endif
+
+static void
+intel_draw_quad(struct intel_context *intel,
+ intelVertexPtr v0,
+ intelVertexPtr v1, intelVertexPtr v2, intelVertexPtr v3)
+{
+ GLuint vertsize = intel->vertex_size;
+ GLuint *vb = intelExtendInlinePrimitive(intel, 6 * vertsize);
+ int j;
+
+ COPY_DWORDS(j, vb, vertsize, v0);
+ COPY_DWORDS(j, vb, vertsize, v1);
+
+ /* If smooth shading, draw like a trifan which gives better
+ * rasterization. Otherwise draw as two triangles with provoking
+ * vertex in third position as required for flat shading.
+ */
+ if (intel->ctx.Light.ShadeModel == GL_FLAT) {
+ COPY_DWORDS(j, vb, vertsize, v3);
+ COPY_DWORDS(j, vb, vertsize, v1);
+ }
+ else {
+ COPY_DWORDS(j, vb, vertsize, v2);
+ COPY_DWORDS(j, vb, vertsize, v0);
+ }
+
+ COPY_DWORDS(j, vb, vertsize, v2);
+ COPY_DWORDS(j, vb, vertsize, v3);
+}
+
+static void
+intel_draw_triangle(struct intel_context *intel,
+ intelVertexPtr v0, intelVertexPtr v1, intelVertexPtr v2)
+{
+ GLuint vertsize = intel->vertex_size;
+ GLuint *vb = intelExtendInlinePrimitive(intel, 3 * vertsize);
+ int j;
+
+ COPY_DWORDS(j, vb, vertsize, v0);
+ COPY_DWORDS(j, vb, vertsize, v1);
+ COPY_DWORDS(j, vb, vertsize, v2);
+}
+
+
+static void
+intel_draw_line(struct intel_context *intel,
+ intelVertexPtr v0, intelVertexPtr v1)
+{
+ GLuint vertsize = intel->vertex_size;
+ GLuint *vb = intelExtendInlinePrimitive(intel, 2 * vertsize);
+ int j;
+
+ COPY_DWORDS(j, vb, vertsize, v0);
+ COPY_DWORDS(j, vb, vertsize, v1);
+}
+
+
+static void
+intel_draw_point(struct intel_context *intel, intelVertexPtr v0)
+{
+ GLuint vertsize = intel->vertex_size;
+ GLuint *vb = intelExtendInlinePrimitive(intel, vertsize);
+ int j;
+
+ /* Adjust for sub pixel position -- still required for conform. */
+ *(float *) &vb[0] = v0->v.x - 0.125;
+ *(float *) &vb[1] = v0->v.y - 0.125;
+ for (j = 2; j < vertsize; j++)
+ vb[j] = v0->ui[j];
+}
+
+
+
+/***********************************************************************
+ * Fixup for ARB_point_parameters *
+ ***********************************************************************/
+
+/* Currently not working - VERT_ATTRIB_POINTSIZE isn't correctly
+ * represented in the fragment program InputsRead field.
+ */
+static void
+intel_atten_point(struct intel_context *intel, intelVertexPtr v0)
+{
+ GLcontext *ctx = &intel->ctx;
+ GLfloat psz[4], col[4], restore_psz, restore_alpha;
+
+ _tnl_get_attr(ctx, v0, _TNL_ATTRIB_POINTSIZE, psz);
+ _tnl_get_attr(ctx, v0, _TNL_ATTRIB_COLOR0, col);
+
+ restore_psz = psz[0];
+ restore_alpha = col[3];
+
+ if (psz[0] >= ctx->Point.Threshold) {
+ psz[0] = MIN2(psz[0], ctx->Point.MaxSize);
+ }
+ else {
+ GLfloat dsize = psz[0] / ctx->Point.Threshold;
+ psz[0] = MAX2(ctx->Point.Threshold, ctx->Point.MinSize);
+ col[3] *= dsize * dsize;
+ }
+
+ if (psz[0] < 1.0)
+ psz[0] = 1.0;
+
+ if (restore_psz != psz[0] || restore_alpha != col[3]) {
+ _tnl_set_attr(ctx, v0, _TNL_ATTRIB_POINTSIZE, psz);
+ _tnl_set_attr(ctx, v0, _TNL_ATTRIB_COLOR0, col);
+
+ intel_draw_point(intel, v0);
+
+ psz[0] = restore_psz;
+ col[3] = restore_alpha;
+
+ _tnl_set_attr(ctx, v0, _TNL_ATTRIB_POINTSIZE, psz);
+ _tnl_set_attr(ctx, v0, _TNL_ATTRIB_COLOR0, col);
+ }
+ else
+ intel_draw_point(intel, v0);
+}
+
+
+
+
+
+/***********************************************************************
+ * Fixup for I915 WPOS texture coordinate *
+ ***********************************************************************/
+
+
+
+static void
+intel_wpos_triangle(struct intel_context *intel,
+ intelVertexPtr v0, intelVertexPtr v1, intelVertexPtr v2)
+{
+ GLuint offset = intel->wpos_offset;
+ GLuint size = intel->wpos_size;
+
+ __memcpy(((char *) v0) + offset, v0, size);
+ __memcpy(((char *) v1) + offset, v1, size);
+ __memcpy(((char *) v2) + offset, v2, size);
+
+ intel_draw_triangle(intel, v0, v1, v2);
+}
+
+
+static void
+intel_wpos_line(struct intel_context *intel,
+ intelVertexPtr v0, intelVertexPtr v1)
+{
+ GLuint offset = intel->wpos_offset;
+ GLuint size = intel->wpos_size;
+
+ __memcpy(((char *) v0) + offset, v0, size);
+ __memcpy(((char *) v1) + offset, v1, size);
+
+ intel_draw_line(intel, v0, v1);
+}
+
+
+static void
+intel_wpos_point(struct intel_context *intel, intelVertexPtr v0)
+{
+ GLuint offset = intel->wpos_offset;
+ GLuint size = intel->wpos_size;
+
+ __memcpy(((char *) v0) + offset, v0, size);
+
+ intel_draw_point(intel, v0);
+}
+
+
+
+
+
+
+/***********************************************************************
+ * Macros for t_dd_tritmp.h to draw basic primitives *
+ ***********************************************************************/
+
+#define TRI( a, b, c ) \
+do { \
+ if (DO_FALLBACK) \
+ intel->draw_tri( intel, a, b, c ); \
+ else \
+ intel_draw_triangle( intel, a, b, c ); \
+} while (0)
+
+#define QUAD( a, b, c, d ) \
+do { \
+ if (DO_FALLBACK) { \
+ intel->draw_tri( intel, a, b, d ); \
+ intel->draw_tri( intel, b, c, d ); \
+ } else \
+ intel_draw_quad( intel, a, b, c, d ); \
+} while (0)
+
+#define LINE( v0, v1 ) \
+do { \
+ if (DO_FALLBACK) \
+ intel->draw_line( intel, v0, v1 ); \
+ else \
+ intel_draw_line( intel, v0, v1 ); \
+} while (0)
+
+#define POINT( v0 ) \
+do { \
+ if (DO_FALLBACK) \
+ intel->draw_point( intel, v0 ); \
+ else \
+ intel_draw_point( intel, v0 ); \
+} while (0)
+
+
+/***********************************************************************
+ * Build render functions from dd templates *
+ ***********************************************************************/
+
+#define INTEL_OFFSET_BIT 0x01
+#define INTEL_TWOSIDE_BIT 0x02
+#define INTEL_UNFILLED_BIT 0x04
+#define INTEL_FALLBACK_BIT 0x08
+#define INTEL_MAX_TRIFUNC 0x10
+
+
+static struct
+{
+ tnl_points_func points;
+ tnl_line_func line;
+ tnl_triangle_func triangle;
+ tnl_quad_func quad;
+} rast_tab[INTEL_MAX_TRIFUNC];
+
+
+#define DO_FALLBACK (IND & INTEL_FALLBACK_BIT)
+#define DO_OFFSET (IND & INTEL_OFFSET_BIT)
+#define DO_UNFILLED (IND & INTEL_UNFILLED_BIT)
+#define DO_TWOSIDE (IND & INTEL_TWOSIDE_BIT)
+#define DO_FLAT 0
+#define DO_TRI 1
+#define DO_QUAD 1
+#define DO_LINE 1
+#define DO_POINTS 1
+#define DO_FULL_QUAD 1
+
+#define HAVE_RGBA 1
+#define HAVE_SPEC 1
+#define HAVE_BACK_COLORS 0
+#define HAVE_HW_FLATSHADE 1
+#define VERTEX intelVertex
+#define TAB rast_tab
+
+/* Only used to pull back colors into vertices (ie, we know color is
+ * floating point).
+ */
+#define INTEL_COLOR( dst, src ) \
+do { \
+ UNCLAMPED_FLOAT_TO_UBYTE((dst)[0], (src)[2]); \
+ UNCLAMPED_FLOAT_TO_UBYTE((dst)[1], (src)[1]); \
+ UNCLAMPED_FLOAT_TO_UBYTE((dst)[2], (src)[0]); \
+ UNCLAMPED_FLOAT_TO_UBYTE((dst)[3], (src)[3]); \
+} while (0)
+
+#define INTEL_SPEC( dst, src ) \
+do { \
+ UNCLAMPED_FLOAT_TO_UBYTE((dst)[0], (src)[2]); \
+ UNCLAMPED_FLOAT_TO_UBYTE((dst)[1], (src)[1]); \
+ UNCLAMPED_FLOAT_TO_UBYTE((dst)[2], (src)[0]); \
+} while (0)
+
+
+#define DEPTH_SCALE intel->polygon_offset_scale
+#define UNFILLED_TRI unfilled_tri
+#define UNFILLED_QUAD unfilled_quad
+#define VERT_X(_v) _v->v.x
+#define VERT_Y(_v) _v->v.y
+#define VERT_Z(_v) _v->v.z
+#define AREA_IS_CCW( a ) (a > 0)
+#define GET_VERTEX(e) (intel->verts + (e * intel->vertex_size * sizeof(GLuint)))
+
+#define VERT_SET_RGBA( v, c ) if (coloroffset) INTEL_COLOR( v->ub4[coloroffset], c )
+#define VERT_COPY_RGBA( v0, v1 ) if (coloroffset) v0->ui[coloroffset] = v1->ui[coloroffset]
+#define VERT_SAVE_RGBA( idx ) if (coloroffset) color[idx] = v[idx]->ui[coloroffset]
+#define VERT_RESTORE_RGBA( idx ) if (coloroffset) v[idx]->ui[coloroffset] = color[idx]
+
+#define VERT_SET_SPEC( v, c ) if (specoffset) INTEL_SPEC( v->ub4[specoffset], c )
+#define VERT_COPY_SPEC( v0, v1 ) if (specoffset) COPY_3V(v0->ub4[specoffset], v1->ub4[specoffset])
+#define VERT_SAVE_SPEC( idx ) if (specoffset) spec[idx] = v[idx]->ui[specoffset]
+#define VERT_RESTORE_SPEC( idx ) if (specoffset) v[idx]->ui[specoffset] = spec[idx]
+
+#define LOCAL_VARS(n) \
+ struct intel_context *intel = intel_context(ctx); \
+ GLuint color[n], spec[n]; \
+ GLuint coloroffset = intel->coloroffset; \
+ GLboolean specoffset = intel->specoffset; \
+ (void) color; (void) spec; (void) coloroffset; (void) specoffset;
+
+
+/***********************************************************************
+ * Helpers for rendering unfilled primitives *
+ ***********************************************************************/
+
+static const GLuint hw_prim[GL_POLYGON + 1] = {
+ PRIM3D_POINTLIST,
+ PRIM3D_LINELIST,
+ PRIM3D_LINELIST,
+ PRIM3D_LINELIST,
+ PRIM3D_TRILIST,
+ PRIM3D_TRILIST,
+ PRIM3D_TRILIST,
+ PRIM3D_TRILIST,
+ PRIM3D_TRILIST,
+ PRIM3D_TRILIST
+};
+
+#define RASTERIZE(x) intelRasterPrimitive( ctx, x, hw_prim[x] )
+#define RENDER_PRIMITIVE intel->render_primitive
+#define TAG(x) x
+#define IND INTEL_FALLBACK_BIT
+#include "tnl_dd/t_dd_unfilled.h"
+#undef IND
+
+/***********************************************************************
+ * Generate GL render functions *
+ ***********************************************************************/
+
+#define IND (0)
+#define TAG(x) x
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_OFFSET_BIT)
+#define TAG(x) x##_offset
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_TWOSIDE_BIT)
+#define TAG(x) x##_twoside
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_TWOSIDE_BIT|INTEL_OFFSET_BIT)
+#define TAG(x) x##_twoside_offset
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_UNFILLED_BIT)
+#define TAG(x) x##_unfilled
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_OFFSET_BIT|INTEL_UNFILLED_BIT)
+#define TAG(x) x##_offset_unfilled
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_TWOSIDE_BIT|INTEL_UNFILLED_BIT)
+#define TAG(x) x##_twoside_unfilled
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_TWOSIDE_BIT|INTEL_OFFSET_BIT|INTEL_UNFILLED_BIT)
+#define TAG(x) x##_twoside_offset_unfilled
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_FALLBACK_BIT)
+#define TAG(x) x##_fallback
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_OFFSET_BIT|INTEL_FALLBACK_BIT)
+#define TAG(x) x##_offset_fallback
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_TWOSIDE_BIT|INTEL_FALLBACK_BIT)
+#define TAG(x) x##_twoside_fallback
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_TWOSIDE_BIT|INTEL_OFFSET_BIT|INTEL_FALLBACK_BIT)
+#define TAG(x) x##_twoside_offset_fallback
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_UNFILLED_BIT|INTEL_FALLBACK_BIT)
+#define TAG(x) x##_unfilled_fallback
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_OFFSET_BIT|INTEL_UNFILLED_BIT|INTEL_FALLBACK_BIT)
+#define TAG(x) x##_offset_unfilled_fallback
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_TWOSIDE_BIT|INTEL_UNFILLED_BIT|INTEL_FALLBACK_BIT)
+#define TAG(x) x##_twoside_unfilled_fallback
+#include "tnl_dd/t_dd_tritmp.h"
+
+#define IND (INTEL_TWOSIDE_BIT|INTEL_OFFSET_BIT|INTEL_UNFILLED_BIT| \
+ INTEL_FALLBACK_BIT)
+#define TAG(x) x##_twoside_offset_unfilled_fallback
+#include "tnl_dd/t_dd_tritmp.h"
+
+
+static void
+init_rast_tab(void)
+{
+ init();
+ init_offset();
+ init_twoside();
+ init_twoside_offset();
+ init_unfilled();
+ init_offset_unfilled();
+ init_twoside_unfilled();
+ init_twoside_offset_unfilled();
+ init_fallback();
+ init_offset_fallback();
+ init_twoside_fallback();
+ init_twoside_offset_fallback();
+ init_unfilled_fallback();
+ init_offset_unfilled_fallback();
+ init_twoside_unfilled_fallback();
+ init_twoside_offset_unfilled_fallback();
+}
+
+
+/***********************************************************************
+ * Rasterization fallback helpers *
+ ***********************************************************************/
+
+
+/* This code is hit only when a mix of accelerated and unaccelerated
+ * primitives are being drawn, and only for the unaccelerated
+ * primitives.
+ */
+static void
+intel_fallback_tri(struct intel_context *intel,
+ intelVertex * v0, intelVertex * v1, intelVertex * v2)
+{
+ GLcontext *ctx = &intel->ctx;
+ SWvertex v[3];
+
+ if (0)
+ fprintf(stderr, "\n%s\n", __FUNCTION__);
+
+ INTEL_FIREVERTICES(intel);
+
+ _swsetup_Translate(ctx, v0, &v[0]);
+ _swsetup_Translate(ctx, v1, &v[1]);
+ _swsetup_Translate(ctx, v2, &v[2]);
+ intelSpanRenderStart(ctx);
+ _swrast_Triangle(ctx, &v[0], &v[1], &v[2]);
+ intelSpanRenderFinish(ctx);
+}
+
+
+static void
+intel_fallback_line(struct intel_context *intel,
+ intelVertex * v0, intelVertex * v1)
+{
+ GLcontext *ctx = &intel->ctx;
+ SWvertex v[2];
+
+ if (0)
+ fprintf(stderr, "\n%s\n", __FUNCTION__);
+
+ INTEL_FIREVERTICES(intel);
+
+ _swsetup_Translate(ctx, v0, &v[0]);
+ _swsetup_Translate(ctx, v1, &v[1]);
+ intelSpanRenderStart(ctx);
+ _swrast_Line(ctx, &v[0], &v[1]);
+ intelSpanRenderFinish(ctx);
+}
+
+static void
+intel_fallback_point(struct intel_context *intel,
+ intelVertex * v0)
+{
+ GLcontext *ctx = &intel->ctx;
+ SWvertex v[1];
+
+ if (0)
+ fprintf(stderr, "\n%s\n", __FUNCTION__);
+
+ INTEL_FIREVERTICES(intel);
+
+ _swsetup_Translate(ctx, v0, &v[0]);
+ intelSpanRenderStart(ctx);
+ _swrast_Point(ctx, &v[0]);
+ intelSpanRenderFinish(ctx);
+}
+
+
+/**********************************************************************/
+/* Render unclipped begin/end objects */
+/**********************************************************************/
+
+#define IND 0
+#define V(x) (intelVertex *)(vertptr + ((x)*vertsize*sizeof(GLuint)))
+#define RENDER_POINTS( start, count ) \
+ for ( ; start < count ; start++) POINT( V(ELT(start)) );
+#define RENDER_LINE( v0, v1 ) LINE( V(v0), V(v1) )
+#define RENDER_TRI( v0, v1, v2 ) TRI( V(v0), V(v1), V(v2) )
+#define RENDER_QUAD( v0, v1, v2, v3 ) QUAD( V(v0), V(v1), V(v2), V(v3) )
+#define INIT(x) intelRenderPrimitive( ctx, x )
+#undef LOCAL_VARS
+#define LOCAL_VARS \
+ struct intel_context *intel = intel_context(ctx); \
+ GLubyte *vertptr = (GLubyte *)intel->verts; \
+ const GLuint vertsize = intel->vertex_size; \
+ const GLuint * const elt = TNL_CONTEXT(ctx)->vb.Elts; \
+ (void) elt;
+#define RESET_STIPPLE
+#define RESET_OCCLUSION
+#define PRESERVE_VB_DEFS
+#define ELT(x) x
+#define TAG(x) intel_##x##_verts
+#include "tnl/t_vb_rendertmp.h"
+#undef ELT
+#undef TAG
+#define TAG(x) intel_##x##_elts
+#define ELT(x) elt[x]
+#include "tnl/t_vb_rendertmp.h"
+
+/**********************************************************************/
+/* Render clipped primitives */
+/**********************************************************************/
+
+
+
+static void
+intelRenderClippedPoly(GLcontext * ctx, const GLuint * elts, GLuint n)
+{
+ struct intel_context *intel = intel_context(ctx);
+ TNLcontext *tnl = TNL_CONTEXT(ctx);
+ struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
+ GLuint prim = intel->render_primitive;
+
+ /* Render the new vertices as an unclipped polygon.
+ */
+ {
+ GLuint *tmp = VB->Elts;
+ VB->Elts = (GLuint *) elts;
+ tnl->Driver.Render.PrimTabElts[GL_POLYGON] (ctx, 0, n,
+ PRIM_BEGIN | PRIM_END);
+ VB->Elts = tmp;
+ }
+
+ /* Restore the render primitive
+ */
+ if (prim != GL_POLYGON)
+ tnl->Driver.Render.PrimitiveNotify(ctx, prim);
+}
+
+static void
+intelRenderClippedLine(GLcontext * ctx, GLuint ii, GLuint jj)
+{
+ TNLcontext *tnl = TNL_CONTEXT(ctx);
+
+ tnl->Driver.Render.Line(ctx, ii, jj);
+}
+
+static void
+intelFastRenderClippedPoly(GLcontext * ctx, const GLuint * elts, GLuint n)
+{
+ struct intel_context *intel = intel_context(ctx);
+ const GLuint vertsize = intel->vertex_size;
+ GLuint *vb = intelExtendInlinePrimitive(intel, (n - 2) * 3 * vertsize);
+ GLubyte *vertptr = (GLubyte *) intel->verts;
+ const GLuint *start = (const GLuint *) V(elts[0]);
+ int i, j;
+
+ for (i = 2; i < n; i++) {
+ COPY_DWORDS(j, vb, vertsize, V(elts[i - 1]));
+ COPY_DWORDS(j, vb, vertsize, V(elts[i]));
+ COPY_DWORDS(j, vb, vertsize, start);
+ }
+}
+
+/**********************************************************************/
+/* Choose render functions */
+/**********************************************************************/
+
+
+
+
+#define ANY_FALLBACK_FLAGS (DD_LINE_STIPPLE | DD_TRI_STIPPLE | DD_POINT_ATTEN | DD_POINT_SMOOTH | DD_TRI_SMOOTH)
+#define ANY_RASTER_FLAGS (DD_TRI_LIGHT_TWOSIDE | DD_TRI_OFFSET | DD_TRI_UNFILLED)
+
+void
+intelChooseRenderState(GLcontext * ctx)
+{
+ TNLcontext *tnl = TNL_CONTEXT(ctx);
+ struct intel_context *intel = intel_context(ctx);
+ GLuint flags = ctx->_TriangleCaps;
+ const struct gl_fragment_program *fprog = ctx->FragmentProgram._Current;
+ GLboolean have_wpos = (fprog && (fprog->Base.InputsRead & FRAG_BIT_WPOS));
+ GLuint index = 0;
+
+ if (INTEL_DEBUG & DEBUG_STATE)
+ fprintf(stderr, "\n%s\n", __FUNCTION__);
+
+ if ((flags & (ANY_FALLBACK_FLAGS | ANY_RASTER_FLAGS)) || have_wpos) {
+
+ if (flags & ANY_RASTER_FLAGS) {
+ if (flags & DD_TRI_LIGHT_TWOSIDE)
+ index |= INTEL_TWOSIDE_BIT;
+ if (flags & DD_TRI_OFFSET)
+ index |= INTEL_OFFSET_BIT;
+ if (flags & DD_TRI_UNFILLED)
+ index |= INTEL_UNFILLED_BIT;
+ }
+
+ if (have_wpos) {
+ intel->draw_point = intel_wpos_point;
+ intel->draw_line = intel_wpos_line;
+ intel->draw_tri = intel_wpos_triangle;
+
+ /* Make sure these get called:
+ */
+ index |= INTEL_FALLBACK_BIT;
+ }
+ else {
+ intel->draw_point = intel_draw_point;
+ intel->draw_line = intel_draw_line;
+ intel->draw_tri = intel_draw_triangle;
+ }
+
+ /* Hook in fallbacks for specific primitives.
+ */
+ if (flags & ANY_FALLBACK_FLAGS) {
+ if (flags & DD_LINE_STIPPLE)
+ intel->draw_line = intel_fallback_line;
+
+ if ((flags & DD_TRI_STIPPLE) && !intel->hw_stipple)
+ intel->draw_tri = intel_fallback_tri;
+
+ if (flags & DD_TRI_SMOOTH) {
+ if (intel->strict_conformance)
+ intel->draw_tri = intel_fallback_tri;
+ }
+
+ if (flags & DD_POINT_ATTEN) {
+ if (0)
+ intel->draw_point = intel_atten_point;
+ else
+ intel->draw_point = intel_fallback_point;
+ }
+
+ if (flags & DD_POINT_SMOOTH) {
+ if (intel->strict_conformance)
+ intel->draw_point = intel_fallback_point;
+ }
+
+ index |= INTEL_FALLBACK_BIT;
+ }
+ }
+
+ if (intel->RenderIndex != index) {
+ intel->RenderIndex = index;
+
+ tnl->Driver.Render.Points = rast_tab[index].points;
+ tnl->Driver.Render.Line = rast_tab[index].line;
+ tnl->Driver.Render.Triangle = rast_tab[index].triangle;
+ tnl->Driver.Render.Quad = rast_tab[index].quad;
+
+ if (index == 0) {
+ tnl->Driver.Render.PrimTabVerts = intel_render_tab_verts;
+ tnl->Driver.Render.PrimTabElts = intel_render_tab_elts;
+ tnl->Driver.Render.ClippedLine = line; /* from tritmp.h */
+ tnl->Driver.Render.ClippedPolygon = intelFastRenderClippedPoly;
+ }
+ else {
+ tnl->Driver.Render.PrimTabVerts = _tnl_render_tab_verts;
+ tnl->Driver.Render.PrimTabElts = _tnl_render_tab_elts;
+ tnl->Driver.Render.ClippedLine = intelRenderClippedLine;
+ tnl->Driver.Render.ClippedPolygon = intelRenderClippedPoly;
+ }
+ }
+}
+
+static const GLenum reduced_prim[GL_POLYGON + 1] = {
+ GL_POINTS,
+ GL_LINES,
+ GL_LINES,
+ GL_LINES,
+ GL_TRIANGLES,
+ GL_TRIANGLES,
+ GL_TRIANGLES,
+ GL_TRIANGLES,
+ GL_TRIANGLES,
+ GL_TRIANGLES
+};
+
+
+/**********************************************************************/
+/* High level hooks for t_vb_render.c */
+/**********************************************************************/
+
+
+
+
+static void
+intelRunPipeline(GLcontext * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ _mesa_lock_context_textures(ctx);
+
+ if (ctx->NewState)
+ _mesa_update_state_locked(ctx);
+
+ if (intel->NewGLState) {
+ if (intel->NewGLState & _NEW_TEXTURE) {
+ intel->vtbl.update_texture_state(intel);
+ }
+
+ if (!intel->Fallback) {
+ if (intel->NewGLState & _INTEL_NEW_RENDERSTATE)
+ intelChooseRenderState(ctx);
+ }
+
+ intel->NewGLState = 0;
+ }
+
+ _tnl_run_pipeline(ctx);
+
+ _mesa_unlock_context_textures(ctx);
+}
+
+static void
+intelRenderStart(GLcontext * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ intel->vtbl.render_start(intel_context(ctx));
+ intel->vtbl.emit_state(intel);
+}
+
+static void
+intelRenderFinish(GLcontext * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ if (intel->RenderIndex & INTEL_FALLBACK_BIT)
+ _swrast_flush(ctx);
+
+ INTEL_FIREVERTICES(intel);
+}
+
+
+
+
+ /* System to flush dma and emit state changes based on the rasterized
+ * primitive.
+ */
+static void
+intelRasterPrimitive(GLcontext * ctx, GLenum rprim, GLuint hwprim)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ if (0)
+ fprintf(stderr, "%s %s %x\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(rprim), hwprim);
+
+ intel->vtbl.reduced_primitive_state(intel, rprim);
+
+ /* Start a new primitive. Arrange to have it flushed later on.
+ */
+ if (hwprim != intel->prim.primitive) {
+ INTEL_FIREVERTICES(intel);
+
+ intelStartInlinePrimitive(intel, hwprim, INTEL_BATCH_CLIPRECTS);
+ }
+}
+
+
+ /*
+ */
+static void
+intelRenderPrimitive(GLcontext * ctx, GLenum prim)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ if (0)
+ fprintf(stderr, "%s %s\n", __FUNCTION__, _mesa_lookup_enum_by_nr(prim));
+
+ /* Let some clipping routines know which primitive they're dealing
+ * with.
+ */
+ intel->render_primitive = prim;
+
+ /* Shortcircuit this when called from t_dd_rendertmp.h for unfilled
+ * triangles. The rasterized primitive will always be reset by
+ * lower level functions in that case, potentially pingponging the
+ * state:
+ */
+ if (reduced_prim[prim] == GL_TRIANGLES &&
+ (ctx->_TriangleCaps & DD_TRI_UNFILLED))
+ return;
+
+ /* Set some primitive-dependent state and Start? a new primitive.
+ */
+ intelRasterPrimitive(ctx, reduced_prim[prim], hw_prim[prim]);
+}
+
+
+ /**********************************************************************/
+ /* Transition to/from hardware rasterization. */
+ /**********************************************************************/
+
+static char *fallbackStrings[] = {
+ [0] = "Draw buffer",
+ [1] = "Read buffer",
+ [2] = "Depth buffer",
+ [3] = "Stencil buffer",
+ [4] = "User disable",
+ [5] = "Render mode",
+
+ [12] = "Texture",
+ [13] = "Color mask",
+ [14] = "Stencil",
+ [15] = "Stipple",
+ [16] = "Program",
+ [17] = "Logic op",
+ [18] = "Smooth polygon",
+ [19] = "Smooth point",
+};
+
+
+static char *
+getFallbackString(GLuint bit)
+{
+ int i = 0;
+ while (bit > 1) {
+ i++;
+ bit >>= 1;
+ }
+ return fallbackStrings[i];
+}
+
+
+
+void
+intelFallback(struct intel_context *intel, GLuint bit, GLboolean mode)
+{
+ GLcontext *ctx = &intel->ctx;
+ TNLcontext *tnl = TNL_CONTEXT(ctx);
+ GLuint oldfallback = intel->Fallback;
+
+ if (mode) {
+ intel->Fallback |= bit;
+ if (oldfallback == 0) {
+ intelFlush(ctx);
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "ENTER FALLBACK %x: %s\n",
+ bit, getFallbackString(bit));
+ _swsetup_Wakeup(ctx);
+ intel->RenderIndex = ~0;
+ }
+ }
+ else {
+ intel->Fallback &= ~bit;
+ if (oldfallback == bit) {
+ _swrast_flush(ctx);
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "LEAVE FALLBACK %s\n", getFallbackString(bit));
+ tnl->Driver.Render.Start = intelRenderStart;
+ tnl->Driver.Render.PrimitiveNotify = intelRenderPrimitive;
+ tnl->Driver.Render.Finish = intelRenderFinish;
+ tnl->Driver.Render.BuildVertices = _tnl_build_vertices;
+ tnl->Driver.Render.CopyPV = _tnl_copy_pv;
+ tnl->Driver.Render.Interp = _tnl_interp;
+
+ _tnl_invalidate_vertex_state(ctx, ~0);
+ _tnl_invalidate_vertices(ctx, ~0);
+ _tnl_install_attrs(ctx,
+ intel->vertex_attrs,
+ intel->vertex_attr_count,
+ intel->ViewportMatrix.m, 0);
+
+ intel->NewGLState |= _INTEL_NEW_RENDERSTATE;
+ }
+ }
+}
+
+union fi
+{
+ GLfloat f;
+ GLint i;
+};
+
+
+/**********************************************************************/
+/* Used only with the metaops callbacks. */
+/**********************************************************************/
+void
+intel_meta_draw_poly(struct intel_context *intel,
+ GLuint n,
+ GLfloat xy[][2],
+ GLfloat z, GLuint color, GLfloat tex[][2])
+{
+ union fi *vb;
+ GLint i;
+
+ /* All 3d primitives should be emitted with INTEL_BATCH_CLIPRECTS,
+ * otherwise the drawing origin (DR4) might not be set correctly.
+ */
+ intelStartInlinePrimitive(intel, PRIM3D_TRIFAN, INTEL_BATCH_CLIPRECTS);
+ vb = (union fi *) intelExtendInlinePrimitive(intel, n * 6);
+
+ for (i = 0; i < n; i++) {
+ vb[0].f = xy[i][0];
+ vb[1].f = xy[i][1];
+ vb[2].f = z;
+ vb[3].i = color;
+ vb[4].f = tex[i][0];
+ vb[5].f = tex[i][1];
+ vb += 6;
+ }
+
+ INTEL_FIREVERTICES(intel);
+}
+
+void
+intel_meta_draw_quad(struct intel_context *intel,
+ GLfloat x0, GLfloat x1,
+ GLfloat y0, GLfloat y1,
+ GLfloat z,
+ GLuint color,
+ GLfloat s0, GLfloat s1, GLfloat t0, GLfloat t1)
+{
+ GLfloat xy[4][2];
+ GLfloat tex[4][2];
+
+ xy[0][0] = x0;
+ xy[0][1] = y0;
+ xy[1][0] = x1;
+ xy[1][1] = y0;
+ xy[2][0] = x1;
+ xy[2][1] = y1;
+ xy[3][0] = x0;
+ xy[3][1] = y1;
+
+ tex[0][0] = s0;
+ tex[0][1] = t0;
+ tex[1][0] = s1;
+ tex[1][1] = t0;
+ tex[2][0] = s1;
+ tex[2][1] = t1;
+ tex[3][0] = s0;
+ tex[3][1] = t1;
+
+ intel_meta_draw_poly(intel, 4, xy, z, color, tex);
+}
+
+
+
+/**********************************************************************/
+/* Initialization. */
+/**********************************************************************/
+
+
+void
+intelInitTriFuncs(GLcontext * ctx)
+{
+ TNLcontext *tnl = TNL_CONTEXT(ctx);
+ static int firsttime = 1;
+
+ if (firsttime) {
+ init_rast_tab();
+ firsttime = 0;
+ }
+
+ tnl->Driver.RunPipeline = intelRunPipeline;
+ tnl->Driver.Render.Start = intelRenderStart;
+ tnl->Driver.Render.Finish = intelRenderFinish;
+ tnl->Driver.Render.PrimitiveNotify = intelRenderPrimitive;
+ tnl->Driver.Render.ResetLineStipple = _swrast_ResetLineStipple;
+ tnl->Driver.Render.BuildVertices = _tnl_build_vertices;
+ tnl->Driver.Render.CopyPV = _tnl_copy_pv;
+ tnl->Driver.Render.Interp = _tnl_interp;
+}
--- /dev/null
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTELTRIS_INC
+#define INTELTRIS_INC
+
+#include "mtypes.h"
+
+
+
+#define _INTEL_NEW_RENDERSTATE (_DD_NEW_LINE_STIPPLE | \
+ _DD_NEW_TRI_UNFILLED | \
+ _DD_NEW_TRI_LIGHT_TWOSIDE | \
+ _DD_NEW_TRI_OFFSET | \
+ _DD_NEW_TRI_STIPPLE | \
+ _NEW_PROGRAM | \
+ _NEW_POLYGONSTIPPLE)
+
+extern void intelInitTriFuncs(GLcontext * ctx);
+
+extern void intelChooseRenderState(GLcontext * ctx);
+
+extern void intelStartInlinePrimitive(struct intel_context *intel,
+ GLuint prim, GLuint flags);
+extern void intelWrapInlinePrimitive(struct intel_context *intel);
+
+GLuint *intelExtendInlinePrimitive(struct intel_context *intel,
+ GLuint dwords);
+
+
+void intel_meta_draw_quad(struct intel_context *intel,
+ GLfloat x0, GLfloat x1,
+ GLfloat y0, GLfloat y1,
+ GLfloat z,
+ GLuint color,
+ GLfloat s0, GLfloat s1, GLfloat t0, GLfloat t1);
+
+void intel_meta_draw_poly(struct intel_context *intel,
+ GLuint n,
+ GLfloat xy[][2],
+ GLfloat z, GLuint color, GLfloat tex[][2]);
+
+
+
+#endif
--- /dev/null
+/**************************************************************************
+
+Copyright 2001 VA Linux Systems Inc., Fremont, California.
+Copyright 2002 Tungsten Graphics Inc., Cedar Park, Texas.
+
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+on the rights to use, copy, modify, merge, publish, distribute, sub
+license, and/or sell copies of the Software, and to permit persons to whom
+the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the next
+paragraph) shall be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ATI, VA LINUX SYSTEMS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/i810/i830_common.h,v 1.1 2002/09/11 00:29:32 dawes Exp $ */
+
+#ifndef _I830_COMMON_H_
+#define _I830_COMMON_H_
+
+
+#define I830_NR_TEX_REGIONS 255 /* maximum due to use of chars for next/prev */
+#define I830_LOG_MIN_TEX_REGION_SIZE 14
+
+
+/* Driver specific DRM command indices
+ * NOTE: these are not OS specific, but they are driver specific
+ */
+#define DRM_I830_INIT 0x00
+#define DRM_I830_FLUSH 0x01
+#define DRM_I830_FLIP 0x02
+#define DRM_I830_BATCHBUFFER 0x03
+#define DRM_I830_IRQ_EMIT 0x04
+#define DRM_I830_IRQ_WAIT 0x05
+#define DRM_I830_GETPARAM 0x06
+#define DRM_I830_SETPARAM 0x07
+#define DRM_I830_ALLOC 0x08
+#define DRM_I830_FREE 0x09
+#define DRM_I830_INIT_HEAP 0x0a
+#define DRM_I830_CMDBUFFER 0x0b
+#define DRM_I830_DESTROY_HEAP 0x0c
+
+typedef struct {
+ enum {
+ I830_INIT_DMA = 0x01,
+ I830_CLEANUP_DMA = 0x02,
+ I830_RESUME_DMA = 0x03
+ } func;
+ unsigned int mmio_offset;
+ int sarea_priv_offset;
+ unsigned int ring_start;
+ unsigned int ring_end;
+ unsigned int ring_size;
+ unsigned int front_offset;
+ unsigned int back_offset;
+ unsigned int depth_offset;
+ unsigned int w;
+ unsigned int h;
+ unsigned int pitch;
+ unsigned int pitch_bits;
+ unsigned int back_pitch;
+ unsigned int depth_pitch;
+ unsigned int cpp;
+ unsigned int chipset;
+} drmI830Init;
+
+typedef struct {
+ drmTextureRegion texList[I830_NR_TEX_REGIONS+1];
+ int last_upload; /* last time texture was uploaded */
+ int last_enqueue; /* last time a buffer was enqueued */
+ int last_dispatch; /* age of the most recently dispatched buffer */
+ int ctxOwner; /* last context to upload state */
+ int texAge;
+ int pf_enabled; /* is pageflipping allowed? */
+ int pf_active;
+ int pf_current_page; /* which buffer is being displayed? */
+ int perf_boxes; /* performance boxes to be displayed */
+ int width, height; /* screen size in pixels */
+
+ drm_handle_t front_handle;
+ int front_offset;
+ int front_size;
+
+ drm_handle_t back_handle;
+ int back_offset;
+ int back_size;
+
+ drm_handle_t depth_handle;
+ int depth_offset;
+ int depth_size;
+
+ drm_handle_t tex_handle;
+ int tex_offset;
+ int tex_size;
+ int log_tex_granularity;
+ int pitch;
+ int rotation; /* 0, 90, 180 or 270 */
+ int rotated_offset;
+ int rotated_size;
+ int rotated_pitch;
+ int virtualX, virtualY;
+
+ unsigned int front_tiled;
+ unsigned int back_tiled;
+ unsigned int depth_tiled;
+ unsigned int rotated_tiled;
+ unsigned int rotated2_tiled;
+
+ int pipeA_x;
+ int pipeA_y;
+ int pipeA_w;
+ int pipeA_h;
+ int pipeB_x;
+ int pipeB_y;
+ int pipeB_w;
+ int pipeB_h;
+} drmI830Sarea;
+
+/* Flags for perf_boxes
+ */
+#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */
+#define I830_BOX_FLIP 0x2 /* populated by kernel */
+#define I830_BOX_WAIT 0x4 /* populated by kernel & client */
+#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */
+#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */
+
+
+typedef struct {
+ int start; /* agp offset */
+ int used; /* nr bytes in use */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO*/
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ drm_clip_rect_t *cliprects; /* pointer to userspace cliprects */
+} drmI830BatchBuffer;
+
+typedef struct {
+ char *buf; /* agp offset */
+ int sz; /* nr bytes in use */
+ int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
+ int DR4; /* window origin for GFX_OP_DRAWRECT_INFO*/
+ int num_cliprects; /* mulitpass with multiple cliprects? */
+ drm_clip_rect_t *cliprects; /* pointer to userspace cliprects */
+} drmI830CmdBuffer;
+
+typedef struct {
+ int *irq_seq;
+} drmI830IrqEmit;
+
+typedef struct {
+ int irq_seq;
+} drmI830IrqWait;
+
+typedef struct {
+ int param;
+ int *value;
+} drmI830GetParam;
+
+#define I830_PARAM_IRQ_ACTIVE 1
+#define I830_PARAM_ALLOW_BATCHBUFFER 2
+
+typedef struct {
+ int param;
+ int value;
+} drmI830SetParam;
+
+#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1
+#define I830_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
+#define I830_SETPARAM_ALLOW_BATCHBUFFER 3
+
+
+/* A memory manager for regions of shared memory:
+ */
+#define I830_MEM_REGION_AGP 1
+
+typedef struct {
+ int region;
+ int alignment;
+ int size;
+ int *region_offset; /* offset from start of fb or agp */
+} drmI830MemAlloc;
+
+typedef struct {
+ int region;
+ int region_offset;
+} drmI830MemFree;
+
+typedef struct {
+ int region;
+ int size;
+ int start;
+} drmI830MemInitHeap;
+
+typedef struct {
+ int region;
+} drmI830MemDestroyHeap;
+
+
+#endif /* _I830_DRM_H_ */
--- /dev/null
+/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/i810/i830_dri.h,v 1.4 2002/10/30 12:52:18 alanh Exp $ */
+
+#ifndef _I830_DRI_H
+#define _I830_DRI_H
+
+#include "xf86drm.h"
+#include "i830_common.h"
+
+#define I830_MAX_DRAWABLES 256
+
+#define I830_MAJOR_VERSION 1
+#define I830_MINOR_VERSION 3
+#define I830_PATCHLEVEL 0
+
+#define I830_REG_SIZE 0x80000
+
+typedef struct _I830DRIRec {
+ drm_handle_t regs;
+ drmSize regsSize;
+
+ drmSize backbufferSize;
+ drm_handle_t backbuffer;
+
+ drmSize depthbufferSize;
+ drm_handle_t depthbuffer;
+
+ drmSize rotatedSize;
+ drm_handle_t rotatedbuffer;
+
+ drm_handle_t textures;
+ int textureSize;
+
+ drm_handle_t agp_buffers;
+ drmSize agp_buf_size;
+
+ int deviceID;
+ int width;
+ int height;
+ int mem;
+ int cpp;
+ int bitsPerPixel;
+
+ int fbOffset;
+ int fbStride;
+
+ int backOffset;
+ int backPitch;
+
+ int depthOffset;
+ int depthPitch;
+
+ int rotatedOffset;
+ int rotatedPitch;
+
+ int logTextureGranularity;
+ int textureOffset;
+
+ int irq;
+ int sarea_priv_offset;
+} I830DRIRec, *I830DRIPtr;
+
+typedef struct {
+ /* Nothing here yet */
+ int dummy;
+} I830ConfigPrivRec, *I830ConfigPrivPtr;
+
+typedef struct {
+ /* Nothing here yet */
+ int dummy;
+} I830DRIContextRec, *I830DRIContextPtr;
+
+
+#endif
--- /dev/null
+#ifndef _INTEL_H_
+#define _INTEL_H_
+
+#include "xf86drm.h" /* drm_handle_t, etc */
+
+/* Intel */
+#ifndef PCI_CHIP_I810
+#define PCI_CHIP_I810 0x7121
+#define PCI_CHIP_I810_DC100 0x7123
+#define PCI_CHIP_I810_E 0x7125
+#define PCI_CHIP_I815 0x1132
+#define PCI_CHIP_I810_BRIDGE 0x7120
+#define PCI_CHIP_I810_DC100_BRIDGE 0x7122
+#define PCI_CHIP_I810_E_BRIDGE 0x7124
+#define PCI_CHIP_I815_BRIDGE 0x1130
+#endif
+
+#define PCI_CHIP_845_G 0x2562
+#define PCI_CHIP_I830_M 0x3577
+
+#ifndef PCI_CHIP_I855_GM
+#define PCI_CHIP_I855_GM 0x3582
+#define PCI_CHIP_I855_GM_BRIDGE 0x3580
+#endif
+
+#ifndef PCI_CHIP_I865_G
+#define PCI_CHIP_I865_G 0x2572
+#define PCI_CHIP_I865_G_BRIDGE 0x2570
+#endif
+
+#ifndef PCI_CHIP_I915_G
+#define PCI_CHIP_I915_G 0x2582
+#define PCI_CHIP_I915_G_BRIDGE 0x2580
+#endif
+
+#ifndef PCI_CHIP_I915_GM
+#define PCI_CHIP_I915_GM 0x2592
+#define PCI_CHIP_I915_GM_BRIDGE 0x2590
+#endif
+
+#ifndef PCI_CHIP_E7221_G
+#define PCI_CHIP_E7221_G 0x258A
+/* Same as I915_G_BRIDGE */
+#define PCI_CHIP_E7221_G_BRIDGE 0x2580
+#endif
+
+#ifndef PCI_CHIP_I945_G
+#define PCI_CHIP_I945_G 0x2772
+#define PCI_CHIP_I945_G_BRIDGE 0x2770
+#endif
+
+#ifndef PCI_CHIP_I945_GM
+#define PCI_CHIP_I945_GM 0x27A2
+#define PCI_CHIP_I945_GM_BRIDGE 0x27A0
+#endif
+
+#define IS_I810(pI810) (pI810->Chipset == PCI_CHIP_I810 || \
+ pI810->Chipset == PCI_CHIP_I810_DC100 || \
+ pI810->Chipset == PCI_CHIP_I810_E)
+#define IS_I815(pI810) (pI810->Chipset == PCI_CHIP_I815)
+#define IS_I830(pI810) (pI810->Chipset == PCI_CHIP_I830_M)
+#define IS_845G(pI810) (pI810->Chipset == PCI_CHIP_845_G)
+#define IS_I85X(pI810) (pI810->Chipset == PCI_CHIP_I855_GM)
+#define IS_I852(pI810) (pI810->Chipset == PCI_CHIP_I855_GM && (pI810->variant == I852_GM || pI810->variant == I852_GME))
+#define IS_I855(pI810) (pI810->Chipset == PCI_CHIP_I855_GM && (pI810->variant == I855_GM || pI810->variant == I855_GME))
+#define IS_I865G(pI810) (pI810->Chipset == PCI_CHIP_I865_G)
+
+#define IS_I915G(pI810) (pI810->Chipset == PCI_CHIP_I915_G || pI810->Chipset == PCI_CHIP_E7221_G)
+#define IS_I915GM(pI810) (pI810->Chipset == PCI_CHIP_I915_GM)
+#define IS_I945G(pI810) (pI810->Chipset == PCI_CHIP_I945_G)
+#define IS_I945GM(pI810) (pI810->Chipset == PCI_CHIP_I945_GM)
+#define IS_I9XX(pI810) (IS_I915G(pI810) || IS_I915GM(pI810) || IS_I945G(pI810) || IS_I945GM(pI810))
+
+#define IS_MOBILE(pI810) (IS_I830(pI810) || IS_I85X(pI810) || IS_I915GM(pI810) || IS_I945GM(pI810))
+
+#define I830_GMCH_CTRL 0x52
+
+
+#define I830_GMCH_GMS_MASK 0x70
+#define I830_GMCH_GMS_DISABLED 0x00
+#define I830_GMCH_GMS_LOCAL 0x10
+#define I830_GMCH_GMS_STOLEN_512 0x20
+#define I830_GMCH_GMS_STOLEN_1024 0x30
+#define I830_GMCH_GMS_STOLEN_8192 0x40
+
+#define I855_GMCH_GMS_MASK (0x7 << 4)
+#define I855_GMCH_GMS_DISABLED 0x00
+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+#define I915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
+
+typedef unsigned char Bool;
+#define TRUE 1
+#define FALSE 0
+
+#define PIPE_NONE 0<<0
+#define PIPE_CRT 1<<0
+#define PIPE_TV 1<<1
+#define PIPE_DFP 1<<2
+#define PIPE_LFP 1<<3
+#define PIPE_CRT2 1<<4
+#define PIPE_TV2 1<<5
+#define PIPE_DFP2 1<<6
+#define PIPE_LFP2 1<<7
+
+typedef struct _I830MemPool *I830MemPoolPtr;
+typedef struct _I830MemRange *I830MemRangePtr;
+typedef struct _I830MemRange {
+ long Start;
+ long End;
+ long Size;
+ unsigned long Physical;
+ unsigned long Offset; /* Offset of AGP-allocated portion */
+ unsigned long Alignment;
+ drm_handle_t Key;
+ unsigned long Pitch; // add pitch
+ I830MemPoolPtr Pool;
+} I830MemRange;
+
+typedef struct _I830MemPool {
+ I830MemRange Total;
+ I830MemRange Free;
+ I830MemRange Fixed;
+ I830MemRange Allocated;
+} I830MemPool;
+
+typedef struct {
+ int tail_mask;
+ I830MemRange mem;
+ unsigned char *virtual_start;
+ int head;
+ int tail;
+ int space;
+} I830RingBuffer;
+
+typedef struct _I830Rec {
+ unsigned char *MMIOBase;
+ unsigned char *FbBase;
+ int cpp;
+
+ unsigned int bios_version;
+
+ /* These are set in PreInit and never changed. */
+ long FbMapSize;
+ long TotalVideoRam;
+ I830MemRange StolenMemory; /* pre-allocated memory */
+ long BIOSMemorySize; /* min stolen pool size */
+ int BIOSMemSizeLoc;
+
+ /* These change according to what has been allocated. */
+ long FreeMemory;
+ I830MemRange MemoryAperture;
+ I830MemPool StolenPool;
+ long allocatedMemory;
+
+ /* Regions allocated either from the above pools, or from agpgart. */
+ /* for single and dual head configurations */
+ I830MemRange FrontBuffer;
+ I830MemRange FrontBuffer2;
+ I830MemRange Scratch;
+ I830MemRange Scratch2;
+
+ I830RingBuffer *LpRing;
+
+ I830MemRange BackBuffer;
+ I830MemRange DepthBuffer;
+ I830MemRange TexMem;
+ int TexGranularity;
+ I830MemRange ContextMem;
+ int drmMinor;
+ Bool have3DWindows;
+
+ Bool NeedRingBufferLow;
+ Bool allowPageFlip;
+ Bool disableTiling;
+
+ int Chipset;
+ unsigned long LinearAddr;
+ unsigned long MMIOAddr;
+
+ drmSize registerSize; /**< \brief MMIO register map size */
+ drm_handle_t registerHandle; /**< \brief MMIO register map handle */
+ // IOADDRESS ioBase;
+ int irq; /**< \brief IRQ number */
+ int GttBound;
+
+ drm_handle_t ring_map;
+ unsigned int Fence[8];
+
+} I830Rec;
+
+/*
+ * 12288 is set as the maximum, chosen because it is enough for
+ * 1920x1440@32bpp with a 2048 pixel line pitch with some to spare.
+ */
+#define I830_MAXIMUM_VBIOS_MEM 12288
+#define I830_DEFAULT_VIDEOMEM_2D (MB(32) / 1024)
+#define I830_DEFAULT_VIDEOMEM_3D (MB(64) / 1024)
+
+/* Flags for memory allocation function */
+#define FROM_ANYWHERE 0x00000000
+#define FROM_POOL_ONLY 0x00000001
+#define FROM_NEW_ONLY 0x00000002
+#define FROM_MASK 0x0000000f
+
+#define ALLOCATE_AT_TOP 0x00000010
+#define ALLOCATE_AT_BOTTOM 0x00000020
+#define FORCE_GAPS 0x00000040
+
+#define NEED_PHYSICAL_ADDR 0x00000100
+#define ALIGN_BOTH_ENDS 0x00000200
+#define FORCE_LOW 0x00000400
+
+#define ALLOC_NO_TILING 0x00001000
+#define ALLOC_INITIAL 0x00002000
+
+#define ALLOCATE_DRY_RUN 0x80000000
+
+/* Chipset registers for VIDEO BIOS memory RW access */
+#define _855_DRAM_RW_CONTROL 0x58
+#define _845_DRAM_RW_CONTROL 0x90
+#define DRAM_WRITE 0x33330000
+
+#define KB(x) ((x) * 1024)
+#define MB(x) ((x) * KB(1024))
+
+#define GTT_PAGE_SIZE KB(4)
+#define ROUND_TO(x, y) (((x) + (y) - 1) / (y) * (y))
+#define ROUND_DOWN_TO(x, y) ((x) / (y) * (y))
+#define ROUND_TO_PAGE(x) ROUND_TO((x), GTT_PAGE_SIZE)
+#define ROUND_TO_MB(x) ROUND_TO((x), MB(1))
+#define PRIMARY_RINGBUFFER_SIZE KB(128)
+
+
+/* Ring buffer registers, p277, overview p19
+ */
+#define LP_RING 0x2030
+#define HP_RING 0x2040
+
+#define RING_TAIL 0x00
+#define TAIL_ADDR 0x000FFFF8
+#define I830_TAIL_MASK 0x001FFFF8
+
+#define RING_HEAD 0x04
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define I830_HEAD_MASK 0x001FFFFC
+
+#define RING_START 0x08
+#define START_ADDR 0x03FFFFF8
+#define I830_RING_START_MASK 0xFFFFF000
+
+#define RING_LEN 0x0C
+#define RING_NR_PAGES 0x001FF000
+#define I830_RING_NR_PAGES 0x001FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
+
+
+/* Fence/Tiling ranges [0..7]
+ */
+#define FENCE 0x2000
+#define FENCE_NR 8
+
+#define I915G_FENCE_START_MASK 0x0ff00000
+
+#define I830_FENCE_START_MASK 0x07f80000
+
+#define FENCE_START_MASK 0x03F80000
+#define FENCE_X_MAJOR 0x00000000
+#define FENCE_Y_MAJOR 0x00001000
+#define FENCE_SIZE_MASK 0x00000700
+#define FENCE_SIZE_512K 0x00000000
+#define FENCE_SIZE_1M 0x00000100
+#define FENCE_SIZE_2M 0x00000200
+#define FENCE_SIZE_4M 0x00000300
+#define FENCE_SIZE_8M 0x00000400
+#define FENCE_SIZE_16M 0x00000500
+#define FENCE_SIZE_32M 0x00000600
+#define FENCE_SIZE_64M 0x00000700
+#define I915G_FENCE_SIZE_1M 0x00000000
+#define I915G_FENCE_SIZE_2M 0x00000100
+#define I915G_FENCE_SIZE_4M 0x00000200
+#define I915G_FENCE_SIZE_8M 0x00000300
+#define I915G_FENCE_SIZE_16M 0x00000400
+#define I915G_FENCE_SIZE_32M 0x00000500
+#define I915G_FENCE_SIZE_64M 0x00000600
+#define I915G_FENCE_SIZE_128M 0x00000700
+#define FENCE_PITCH_1 0x00000000
+#define FENCE_PITCH_2 0x00000010
+#define FENCE_PITCH_4 0x00000020
+#define FENCE_PITCH_8 0x00000030
+#define FENCE_PITCH_16 0x00000040
+#define FENCE_PITCH_32 0x00000050
+#define FENCE_PITCH_64 0x00000060
+#define FENCE_VALID 0x00000001
+
+#include <mmio.h>
+
+# define MMIO_IN8(base, offset) \
+ *(volatile unsigned char *)(((unsigned char*)(base)) + (offset))
+# define MMIO_IN32(base, offset) \
+ read_MMIO_LE32(base, offset)
+# define MMIO_OUT8(base, offset, val) \
+ *(volatile unsigned char *)(((unsigned char*)(base)) + (offset)) = (val)
+# define MMIO_OUT32(base, offset, val) \
+ *(volatile unsigned int *)(void *)(((unsigned char*)(base)) + (offset)) = CPU_TO_LE32(val)
+
+
+ /* Memory mapped register access macros */
+#define INREG8(addr) MMIO_IN8(MMIO, addr)
+#define INREG(addr) MMIO_IN32(MMIO, addr)
+#define OUTREG8(addr, val) MMIO_OUT8(MMIO, addr, val)
+#define OUTREG(addr, val) MMIO_OUT32(MMIO, addr, val)
+
+#define DSPABASE 0x70184
+
+#endif
--- /dev/null
+/**
+ * \file server/intel_dri.c
+ * \brief File to perform the device-specific initialization tasks typically
+ * done in the X server.
+ *
+ * Here they are converted to run in the client (or perhaps a standalone
+ * process), and to work with the frame buffer device rather than the X
+ * server infrastructure.
+ *
+ * Copyright (C) 2006 Dave Airlie (airlied@linux.ie)
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sub license, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial portions
+ of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR
+ ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include "driver.h"
+#include "drm.h"
+
+#include "intel.h"
+#include "i830_dri.h"
+
+#include "memops.h"
+#include "pciaccess.h"
+
+static size_t drm_page_size;
+static int nextTile = 0;
+#define xf86DrvMsg(...) do {} while(0)
+
+static const int pitches[] = {
+ 128 * 8,
+ 128 * 16,
+ 128 * 32,
+ 128 * 64,
+ 0
+};
+
+static Bool I830DRIDoMappings(DRIDriverContext *ctx, I830Rec *pI830, drmI830Sarea *sarea);
+
+static unsigned long
+GetBestTileAlignment(unsigned long size)
+{
+ unsigned long i;
+
+ for (i = KB(512); i < size; i <<= 1)
+ ;
+
+ if (i > MB(64))
+ i = MB(64);
+
+ return i;
+}
+
+static void SetFenceRegs(const DRIDriverContext *ctx, I830Rec *pI830)
+{
+ int i;
+ unsigned char *MMIO = ctx->MMIOAddress;
+
+ for (i = 0; i < 8; i++) {
+ OUTREG(FENCE + i * 4, pI830->Fence[i]);
+ // if (I810_DEBUG & DEBUG_VERBOSE_VGA)
+ fprintf(stderr,"Fence Register : %x\n", pI830->Fence[i]);
+ }
+}
+
+/* Tiled memory is good... really, really good...
+ *
+ * Need to make it less likely that we miss out on this - probably
+ * need to move the frontbuffer away from the 'guarenteed' alignment
+ * of the first memory segment, or perhaps allocate a discontigous
+ * framebuffer to get more alignment 'sweet spots'.
+ */
+static void
+SetFence(const DRIDriverContext *ctx, I830Rec *pI830,
+ int nr, unsigned int start, unsigned int pitch,
+ unsigned int size)
+{
+ unsigned int val;
+ unsigned int fence_mask = 0;
+ unsigned int fence_pitch;
+
+ if (nr < 0 || nr > 7) {
+ fprintf(stderr,
+ "SetFence: fence %d out of range\n",nr);
+ return;
+ }
+
+ pI830->Fence[nr] = 0;
+
+ if (IS_I9XX(pI830))
+ fence_mask = ~I915G_FENCE_START_MASK;
+ else
+ fence_mask = ~I830_FENCE_START_MASK;
+
+ if (start & fence_mask) {
+ fprintf(stderr,
+ "SetFence: %d: start (0x%08x) is not %s aligned\n",
+ nr, start, (IS_I9XX(pI830)) ? "1MB" : "512k");
+ return;
+ }
+
+ if (start % size) {
+ fprintf(stderr,
+ "SetFence: %d: start (0x%08x) is not size (%dk) aligned\n",
+ nr, start, size / 1024);
+ return;
+ }
+
+ if (pitch & 127) {
+ fprintf(stderr,
+ "SetFence: %d: pitch (%d) not a multiple of 128 bytes\n",
+ nr, pitch);
+ return;
+ }
+
+ val = (start | FENCE_X_MAJOR | FENCE_VALID);
+
+ if (IS_I9XX(pI830)) {
+ switch (size) {
+ case MB(1):
+ val |= I915G_FENCE_SIZE_1M;
+ break;
+ case MB(2):
+ val |= I915G_FENCE_SIZE_2M;
+ break;
+ case MB(4):
+ val |= I915G_FENCE_SIZE_4M;
+ break;
+ case MB(8):
+ val |= I915G_FENCE_SIZE_8M;
+ break;
+ case MB(16):
+ val |= I915G_FENCE_SIZE_16M;
+ break;
+ case MB(32):
+ val |= I915G_FENCE_SIZE_32M;
+ break;
+ case MB(64):
+ val |= I915G_FENCE_SIZE_64M;
+ break;
+ default:
+ fprintf(stderr,
+ "SetFence: %d: illegal size (%d kByte)\n", nr, size / 1024);
+ return;
+ }
+ } else {
+ switch (size) {
+ case KB(512):
+ val |= FENCE_SIZE_512K;
+ break;
+ case MB(1):
+ val |= FENCE_SIZE_1M;
+ break;
+ case MB(2):
+ val |= FENCE_SIZE_2M;
+ break;
+ case MB(4):
+ val |= FENCE_SIZE_4M;
+ break;
+ case MB(8):
+ val |= FENCE_SIZE_8M;
+ break;
+ case MB(16):
+ val |= FENCE_SIZE_16M;
+ break;
+ case MB(32):
+ val |= FENCE_SIZE_32M;
+ break;
+ case MB(64):
+ val |= FENCE_SIZE_64M;
+ break;
+ default:
+ fprintf(stderr,
+ "SetFence: %d: illegal size (%d kByte)\n", nr, size / 1024);
+ return;
+ }
+ }
+
+ if (IS_I9XX(pI830))
+ fence_pitch = pitch / 512;
+ else
+ fence_pitch = pitch / 128;
+
+ switch (fence_pitch) {
+ case 1:
+ val |= FENCE_PITCH_1;
+ break;
+ case 2:
+ val |= FENCE_PITCH_2;
+ break;
+ case 4:
+ val |= FENCE_PITCH_4;
+ break;
+ case 8:
+ val |= FENCE_PITCH_8;
+ break;
+ case 16:
+ val |= FENCE_PITCH_16;
+ break;
+ case 32:
+ val |= FENCE_PITCH_32;
+ break;
+ case 64:
+ val |= FENCE_PITCH_64;
+ break;
+ default:
+ fprintf(stderr,
+ "SetFence: %d: illegal pitch (%d)\n", nr, pitch);
+ return;
+ }
+
+ pI830->Fence[nr] = val;
+}
+
+static Bool
+MakeTiles(const DRIDriverContext *ctx, I830Rec *pI830, I830MemRange *pMem)
+{
+ int pitch, ntiles, i;
+
+ pitch = pMem->Pitch * ctx->cpp;
+ /*
+ * Simply try to break the region up into at most four pieces of size
+ * equal to the alignment.
+ */
+ ntiles = ROUND_TO(pMem->Size, pMem->Alignment) / pMem->Alignment;
+ if (ntiles >= 4) {
+ return FALSE;
+ }
+
+ for (i = 0; i < ntiles; i++, nextTile++) {
+ SetFence(ctx, pI830, nextTile, pMem->Start + i * pMem->Alignment,
+ pitch, pMem->Alignment);
+ }
+ return TRUE;
+}
+
+static void I830SetupMemoryTiling(const DRIDriverContext *ctx, I830Rec *pI830)
+{
+ int i;
+
+ /* Clear out */
+ for (i = 0; i < 8; i++)
+ pI830->Fence[i] = 0;
+
+ nextTile = 0;
+
+ if (pI830->BackBuffer.Alignment >= KB(512)) {
+ if (MakeTiles(ctx, pI830, &(pI830->BackBuffer))) {
+ fprintf(stderr,
+ "Activating tiled memory for the back buffer.\n");
+ } else {
+ fprintf(stderr,
+ "MakeTiles failed for the back buffer.\n");
+ pI830->allowPageFlip = FALSE;
+ }
+ }
+
+ if (pI830->DepthBuffer.Alignment >= KB(512)) {
+ if (MakeTiles(ctx, pI830, &(pI830->DepthBuffer))) {
+ fprintf(stderr,
+ "Activating tiled memory for the depth buffer.\n");
+ } else {
+ fprintf(stderr,
+ "MakeTiles failed for the depth buffer.\n");
+ }
+ }
+
+ return;
+}
+
+static int I830DetectMemory(const DRIDriverContext *ctx, I830Rec *pI830)
+{
+ struct pci_device host_bridge;
+ uint32_t gmch_ctrl;
+ int memsize = 0;
+ int range;
+
+ memset(&host_bridge, 0, sizeof(host_bridge));
+
+ pci_device_cfg_read_u32(&host_bridge, &gmch_ctrl, I830_GMCH_CTRL);
+
+ /* We need to reduce the stolen size, by the GTT and the popup.
+ * The GTT varying according the the FbMapSize and the popup is 4KB */
+ range = (ctx->shared.fbSize / (1024*1024)) + 4;
+
+ if (IS_I85X(pI830) || IS_I865G(pI830) || IS_I9XX(pI830)) {
+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ case I855_GMCH_GMS_STOLEN_1M:
+ memsize = MB(1) - KB(range);
+ break;
+ case I855_GMCH_GMS_STOLEN_4M:
+ memsize = MB(4) - KB(range);
+ break;
+ case I855_GMCH_GMS_STOLEN_8M:
+ memsize = MB(8) - KB(range);
+ break;
+ case I855_GMCH_GMS_STOLEN_16M:
+ memsize = MB(16) - KB(range);
+ break;
+ case I855_GMCH_GMS_STOLEN_32M:
+ memsize = MB(32) - KB(range);
+ break;
+ case I915G_GMCH_GMS_STOLEN_48M:
+ if (IS_I9XX(pI830))
+ memsize = MB(48) - KB(range);
+ break;
+ case I915G_GMCH_GMS_STOLEN_64M:
+ if (IS_I9XX(pI830))
+ memsize = MB(64) - KB(range);
+ break;
+ }
+ } else {
+ switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+ case I830_GMCH_GMS_STOLEN_512:
+ memsize = KB(512) - KB(range);
+ break;
+ case I830_GMCH_GMS_STOLEN_1024:
+ memsize = MB(1) - KB(range);
+ break;
+ case I830_GMCH_GMS_STOLEN_8192:
+ memsize = MB(8) - KB(range);
+ break;
+ case I830_GMCH_GMS_LOCAL:
+ memsize = 0;
+ xf86DrvMsg(pScrn->scrnIndex, X_WARNING,
+ "Local memory found, but won't be used.\n");
+ break;
+ }
+ }
+ if (memsize > 0) {
+ fprintf(stderr,
+ "detected %d kB stolen memory.\n", memsize / 1024);
+ } else {
+ fprintf(stderr,
+ "no video memory detected.\n");
+ }
+ return memsize;
+}
+
+static int AgpInit(const DRIDriverContext *ctx, I830Rec *info)
+{
+ unsigned long mode = 0x4;
+
+ if (drmAgpAcquire(ctx->drmFD) < 0) {
+ fprintf(stderr, "[gart] AGP not available\n");
+ return 0;
+ }
+
+ if (drmAgpEnable(ctx->drmFD, mode) < 0) {
+ fprintf(stderr, "[gart] AGP not enabled\n");
+ drmAgpRelease(ctx->drmFD);
+ return 0;
+ }
+ else
+ fprintf(stderr, "[gart] AGP enabled at %dx\n", ctx->agpmode);
+
+ return 1;
+}
+
+/*
+ * Allocate memory from the given pool. Grow the pool if needed and if
+ * possible.
+ */
+static unsigned long
+AllocFromPool(const DRIDriverContext *ctx, I830Rec *pI830,
+ I830MemRange *result, I830MemPool *pool,
+ long size, unsigned long alignment, int flags)
+{
+ long needed, start, end;
+
+ if (!result || !pool || !size)
+ return 0;
+
+ /* Calculate how much space is needed. */
+ if (alignment <= GTT_PAGE_SIZE)
+ needed = size;
+ else {
+ start = ROUND_TO(pool->Free.Start, alignment);
+ end = ROUND_TO(start + size, alignment);
+ needed = end - pool->Free.Start;
+ }
+ if (needed > pool->Free.Size) {
+ return 0;
+ }
+
+ result->Start = ROUND_TO(pool->Free.Start, alignment);
+ pool->Free.Start += needed;
+ result->End = pool->Free.Start;
+
+ pool->Free.Size = pool->Free.End - pool->Free.Start;
+ result->Size = result->End - result->Start;
+ result->Pool = pool;
+ result->Alignment = alignment;
+ return needed;
+}
+
+static unsigned long AllocFromAGP(const DRIDriverContext *ctx, I830Rec *pI830, long size, unsigned long alignment, I830MemRange *result)
+{
+ unsigned long start, end;
+ unsigned long newApStart, newApEnd;
+ int ret;
+ if (!result || !size)
+ return 0;
+
+ if (!alignment)
+ alignment = 4;
+
+ start = ROUND_TO(pI830->MemoryAperture.Start, alignment);
+ end = ROUND_TO(start + size, alignment);
+ newApStart = end;
+ newApEnd = pI830->MemoryAperture.End;
+
+ ret=drmAgpAlloc(ctx->drmFD, size, 0, &(result->Physical), (drm_handle_t *)&(result->Key));
+
+ if (ret)
+ {
+ fprintf(stderr,"drmAgpAlloc failed %d\n", ret);
+ return 0;
+ }
+ pI830->allocatedMemory += size;
+ pI830->MemoryAperture.Start = newApStart;
+ pI830->MemoryAperture.End = newApEnd;
+ pI830->MemoryAperture.Size = newApEnd - newApStart;
+ // pI830->FreeMemory -= size;
+ result->Start = start;
+ result->End = start + size;
+ result->Size = size;
+ result->Offset = start;
+ result->Alignment = alignment;
+ result->Pool = NULL;
+
+ return size;
+}
+
+unsigned long
+I830AllocVidMem(const DRIDriverContext *ctx, I830Rec *pI830, I830MemRange *result, I830MemPool *pool, long size, unsigned long alignment, int flags)
+{
+ int ret;
+
+ if (!result)
+ return 0;
+
+ /* Make sure these are initialised. */
+ result->Size = 0;
+ result->Key = -1;
+
+ if (!size) {
+ return 0;
+ }
+
+ if (pool->Free.Size < size)
+ return AllocFromAGP(ctx, pI830, size, alignment, result);
+ else
+ {
+ ret = AllocFromPool(ctx, pI830, result, pool, size, alignment, flags);
+
+ if (ret==0)
+ return AllocFromAGP(ctx, pI830, size, alignment, result);
+ return ret;
+ }
+}
+
+static Bool BindAgpRange(const DRIDriverContext *ctx, I830MemRange *mem)
+{
+ if (!mem)
+ return FALSE;
+
+ if (mem->Key == -1)
+ return TRUE;
+
+ return !drmAgpBind(ctx->drmFD, mem->Key, mem->Offset);
+}
+
+/* simple memory allocation routines needed */
+/* put ring buffer in low memory */
+/* need to allocate front, back, depth buffers aligned correctly,
+ allocate ring buffer,
+*/
+
+/* */
+static Bool
+I830AllocateMemory(const DRIDriverContext *ctx, I830Rec *pI830)
+{
+ unsigned long size, ret;
+ unsigned long lines, lineSize, align;
+
+ /* allocate ring buffer */
+ memset(pI830->LpRing, 0, sizeof(I830RingBuffer));
+ pI830->LpRing->mem.Key = -1;
+
+ size = PRIMARY_RINGBUFFER_SIZE;
+
+ ret = I830AllocVidMem(ctx, pI830, &pI830->LpRing->mem, &pI830->StolenPool, size, 0x1000, 0);
+
+ if (ret != size)
+ {
+ fprintf(stderr,"unable to allocate ring buffer %ld\n", ret);
+ return FALSE;
+ }
+
+ pI830->LpRing->tail_mask = pI830->LpRing->mem.Size - 1;
+
+
+ /* allocate front buffer */
+ memset(&(pI830->FrontBuffer), 0, sizeof(pI830->FrontBuffer));
+ pI830->FrontBuffer.Key = -1;
+ pI830->FrontBuffer.Pitch = ctx->shared.virtualWidth;
+
+ align = KB(512);
+
+ lineSize = ctx->shared.virtualWidth * ctx->cpp;
+ lines = (ctx->shared.virtualHeight + 15) / 16 * 16;
+ size = lineSize * lines;
+ size = ROUND_TO_PAGE(size);
+
+ align = GetBestTileAlignment(size);
+
+ ret = I830AllocVidMem(ctx, pI830, &pI830->FrontBuffer, &pI830->StolenPool, size, align, 0);
+ if (ret < size)
+ {
+ fprintf(stderr,"unable to allocate front buffer %ld\n", ret);
+ return FALSE;
+ }
+
+ memset(&(pI830->BackBuffer), 0, sizeof(pI830->BackBuffer));
+ pI830->BackBuffer.Key = -1;
+ pI830->BackBuffer.Pitch = ctx->shared.virtualWidth;
+
+ ret = I830AllocVidMem(ctx, pI830, &pI830->BackBuffer, &pI830->StolenPool, size, align, 0);
+ if (ret < size)
+ {
+ fprintf(stderr,"unable to allocate back buffer %ld\n", ret);
+ return FALSE;
+ }
+
+ memset(&(pI830->DepthBuffer), 0, sizeof(pI830->DepthBuffer));
+ pI830->DepthBuffer.Key = -1;
+ pI830->DepthBuffer.Pitch = ctx->shared.virtualWidth;
+
+ ret = I830AllocVidMem(ctx, pI830, &pI830->DepthBuffer, &pI830->StolenPool, size, align, 0);
+ if (ret < size)
+ {
+ fprintf(stderr,"unable to allocate depth buffer %ld\n", ret);
+ return FALSE;
+ }
+
+ memset(&(pI830->ContextMem), 0, sizeof(pI830->ContextMem));
+ pI830->ContextMem.Key = -1;
+ size = KB(32);
+
+ ret = I830AllocVidMem(ctx, pI830, &pI830->ContextMem, &pI830->StolenPool, size, align, 0);
+ if (ret < size)
+ {
+ fprintf(stderr,"unable to allocate context buffer %ld\n", ret);
+ return FALSE;
+ }
+
+ memset(&(pI830->TexMem), 0, sizeof(pI830->TexMem));
+ pI830->TexMem.Key = -1;
+
+ size = 32768 * 1024;
+ ret = AllocFromAGP(ctx, pI830, size, align, &pI830->TexMem);
+ if (ret < size)
+ {
+ fprintf(stderr,"unable to allocate texture memory %ld\n", ret);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static Bool
+I830BindMemory(const DRIDriverContext *ctx, I830Rec *pI830)
+{
+ if (!BindAgpRange(ctx, &pI830->LpRing->mem))
+ return FALSE;
+ if (!BindAgpRange(ctx, &pI830->FrontBuffer))
+ return FALSE;
+ if (!BindAgpRange(ctx, &pI830->BackBuffer))
+ return FALSE;
+ if (!BindAgpRange(ctx, &pI830->DepthBuffer))
+ return FALSE;
+ if (!BindAgpRange(ctx, &pI830->ContextMem))
+ return FALSE;
+ if (!BindAgpRange(ctx, &pI830->TexMem))
+ return FALSE;
+
+ return TRUE;
+}
+
+static Bool
+I830CleanupDma(const DRIDriverContext *ctx)
+{
+ drmI830Init info;
+
+ memset(&info, 0, sizeof(drmI830Init));
+ info.func = I830_CLEANUP_DMA;
+
+ if (drmCommandWrite(ctx->drmFD, DRM_I830_INIT,
+ &info, sizeof(drmI830Init))) {
+ fprintf(stderr, "I830 Dma Cleanup Failed\n");
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static Bool
+I830InitDma(const DRIDriverContext *ctx, I830Rec *pI830)
+{
+ I830RingBuffer *ring = pI830->LpRing;
+ drmI830Init info;
+
+ memset(&info, 0, sizeof(drmI830Init));
+ info.func = I830_INIT_DMA;
+
+ info.ring_start = ring->mem.Start + pI830->LinearAddr;
+ info.ring_end = ring->mem.End + pI830->LinearAddr;
+ info.ring_size = ring->mem.Size;
+
+ info.mmio_offset = (unsigned int)ctx->MMIOStart;
+
+ info.sarea_priv_offset = sizeof(drm_sarea_t);
+
+ info.front_offset = pI830->FrontBuffer.Start;
+ info.back_offset = pI830->BackBuffer.Start;
+ info.depth_offset = pI830->DepthBuffer.Start;
+ info.w = ctx->shared.virtualWidth;
+ info.h = ctx->shared.virtualHeight;
+ info.pitch = ctx->shared.virtualWidth;
+ info.back_pitch = pI830->BackBuffer.Pitch;
+ info.depth_pitch = pI830->DepthBuffer.Pitch;
+ info.cpp = ctx->cpp;
+
+ if (drmCommandWrite(ctx->drmFD, DRM_I830_INIT,
+ &info, sizeof(drmI830Init))) {
+ fprintf(stderr,
+ "I830 Dma Initialization Failed\n");
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static int I830CheckDRMVersion( const DRIDriverContext *ctx,
+ I830Rec *pI830 )
+{
+ drmVersionPtr version;
+
+ version = drmGetVersion(ctx->drmFD);
+
+ if (version) {
+ int req_minor, req_patch;
+
+ req_minor = 4;
+ req_patch = 0;
+
+ if (version->version_major != 1 ||
+ version->version_minor < req_minor ||
+ (version->version_minor == req_minor &&
+ version->version_patchlevel < req_patch)) {
+ /* Incompatible drm version */
+ fprintf(stderr,
+ "[dri] I830DRIScreenInit failed because of a version "
+ "mismatch.\n"
+ "[dri] i915.o kernel module version is %d.%d.%d "
+ "but version 1.%d.%d or newer is needed.\n"
+ "[dri] Disabling DRI.\n",
+ version->version_major,
+ version->version_minor,
+ version->version_patchlevel,
+ req_minor,
+ req_patch);
+ drmFreeVersion(version);
+ return 0;
+ }
+
+ pI830->drmMinor = version->version_minor;
+ drmFreeVersion(version);
+ }
+ return 1;
+}
+
+static void
+I830SetRingRegs(const DRIDriverContext *ctx, I830Rec *pI830)
+{
+ unsigned int itemp;
+ unsigned char *MMIO = ctx->MMIOAddress;
+
+ OUTREG(LP_RING + RING_LEN, 0);
+ OUTREG(LP_RING + RING_TAIL, 0);
+ OUTREG(LP_RING + RING_HEAD, 0);
+
+ if ((long)(pI830->LpRing->mem.Start & I830_RING_START_MASK) !=
+ pI830->LpRing->mem.Start) {
+ fprintf(stderr,
+ "I830SetRingRegs: Ring buffer start (%lx) violates its "
+ "mask (%x)\n", pI830->LpRing->mem.Start, I830_RING_START_MASK);
+ }
+ /* Don't care about the old value. Reserved bits must be zero anyway. */
+ itemp = pI830->LpRing->mem.Start & I830_RING_START_MASK;
+ OUTREG(LP_RING + RING_START, itemp);
+
+ if (((pI830->LpRing->mem.Size - 4096) & I830_RING_NR_PAGES) !=
+ pI830->LpRing->mem.Size - 4096) {
+ fprintf(stderr,
+ "I830SetRingRegs: Ring buffer size - 4096 (%lx) violates its "
+ "mask (%x)\n", pI830->LpRing->mem.Size - 4096,
+ I830_RING_NR_PAGES);
+ }
+ /* Don't care about the old value. Reserved bits must be zero anyway. */
+ itemp = (pI830->LpRing->mem.Size - 4096) & I830_RING_NR_PAGES;
+ itemp |= (RING_NO_REPORT | RING_VALID);
+ OUTREG(LP_RING + RING_LEN, itemp);
+
+ pI830->LpRing->head = INREG(LP_RING + RING_HEAD) & I830_HEAD_MASK;
+ pI830->LpRing->tail = INREG(LP_RING + RING_TAIL);
+ pI830->LpRing->space = pI830->LpRing->head - (pI830->LpRing->tail + 8);
+ if (pI830->LpRing->space < 0)
+ pI830->LpRing->space += pI830->LpRing->mem.Size;
+
+ SetFenceRegs(ctx, pI830);
+
+ /* RESET THE DISPLAY PIPE TO POINT TO THE FRONTBUFFER - hacky
+ hacky hacky */
+ OUTREG(DSPABASE, pI830->FrontBuffer.Start + pI830->LinearAddr);
+
+}
+
+static Bool
+I830SetParam(const DRIDriverContext *ctx, int param, int value)
+{
+ drmI830SetParam sp;
+
+ memset(&sp, 0, sizeof(sp));
+ sp.param = param;
+ sp.value = value;
+
+ if (drmCommandWrite(ctx->drmFD, DRM_I830_SETPARAM, &sp, sizeof(sp))) {
+ fprintf(stderr, "I830 SetParam Failed\n");
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static Bool
+I830DRIMapScreenRegions(DRIDriverContext *ctx, I830Rec *pI830, drmI830Sarea *sarea)
+{
+ fprintf(stderr,
+ "[drm] Mapping front buffer\n");
+
+ if (drmAddMap(ctx->drmFD,
+ (drm_handle_t)(sarea->front_offset + pI830->LinearAddr),
+ sarea->front_size,
+ DRM_FRAME_BUFFER, /*DRM_AGP,*/
+ 0,
+ &sarea->front_handle) < 0) {
+ fprintf(stderr,
+ "[drm] drmAddMap(front_handle) failed. Disabling DRI\n");
+ return FALSE;
+ }
+ ctx->shared.hFrameBuffer = sarea->front_handle;
+ ctx->shared.fbSize = sarea->front_size;
+ fprintf(stderr, "[drm] Front Buffer = 0x%08x\n",
+ sarea->front_handle);
+
+ if (drmAddMap(ctx->drmFD,
+ (drm_handle_t)(sarea->back_offset),
+ sarea->back_size, DRM_AGP, 0,
+ &sarea->back_handle) < 0) {
+ fprintf(stderr,
+ "[drm] drmAddMap(back_handle) failed. Disabling DRI\n");
+ return FALSE;
+ }
+ fprintf(stderr, "[drm] Back Buffer = 0x%08x\n",
+ sarea->back_handle);
+
+ if (drmAddMap(ctx->drmFD,
+ (drm_handle_t)sarea->depth_offset,
+ sarea->depth_size, DRM_AGP, 0,
+ &sarea->depth_handle) < 0) {
+ fprintf(stderr,
+ "[drm] drmAddMap(depth_handle) failed. Disabling DRI\n");
+ return FALSE;
+ }
+ fprintf(stderr, "[drm] Depth Buffer = 0x%08x\n",
+ sarea->depth_handle);
+
+ if (drmAddMap(ctx->drmFD,
+ (drm_handle_t)sarea->tex_offset,
+ sarea->tex_size, DRM_AGP, 0,
+ &sarea->tex_handle) < 0) {
+ fprintf(stderr,
+ "[drm] drmAddMap(tex_handle) failed. Disabling DRI\n");
+ return FALSE;
+ }
+ fprintf(stderr, "[drm] textures = 0x%08x\n",
+ sarea->tex_handle);
+
+ return TRUE;
+}
+
+
+static void
+I830DRIUnmapScreenRegions(const DRIDriverContext *ctx, I830Rec *pI830, drmI830Sarea *sarea)
+{
+#if 1
+ if (sarea->front_handle) {
+ drmRmMap(ctx->drmFD, sarea->front_handle);
+ sarea->front_handle = 0;
+ }
+#endif
+ if (sarea->back_handle) {
+ drmRmMap(ctx->drmFD, sarea->back_handle);
+ sarea->back_handle = 0;
+ }
+ if (sarea->depth_handle) {
+ drmRmMap(ctx->drmFD, sarea->depth_handle);
+ sarea->depth_handle = 0;
+ }
+ if (sarea->tex_handle) {
+ drmRmMap(ctx->drmFD, sarea->tex_handle);
+ sarea->tex_handle = 0;
+ }
+}
+
+static void
+I830InitTextureHeap(const DRIDriverContext *ctx, I830Rec *pI830, drmI830Sarea *sarea)
+{
+ /* Start up the simple memory manager for agp space */
+ drmI830MemInitHeap drmHeap;
+ drmHeap.region = I830_MEM_REGION_AGP;
+ drmHeap.start = 0;
+ drmHeap.size = sarea->tex_size;
+
+ if (drmCommandWrite(ctx->drmFD, DRM_I830_INIT_HEAP,
+ &drmHeap, sizeof(drmHeap))) {
+ fprintf(stderr,
+ "[drm] Failed to initialized agp heap manager\n");
+ } else {
+ fprintf(stderr,
+ "[drm] Initialized kernel agp heap manager, %d\n",
+ sarea->tex_size);
+
+ I830SetParam(ctx, I830_SETPARAM_TEX_LRU_LOG_GRANULARITY,
+ sarea->log_tex_granularity);
+ }
+}
+
+static Bool
+I830DRIDoMappings(DRIDriverContext *ctx, I830Rec *pI830, drmI830Sarea *sarea)
+{
+ if (drmAddMap(ctx->drmFD,
+ (drm_handle_t)pI830->LpRing->mem.Start,
+ pI830->LpRing->mem.Size, DRM_AGP, 0,
+ &pI830->ring_map) < 0) {
+ fprintf(stderr,
+ "[drm] drmAddMap(ring_map) failed. Disabling DRI\n");
+ return FALSE;
+ }
+ fprintf(stderr, "[drm] ring buffer = 0x%08x\n",
+ pI830->ring_map);
+
+ if (I830InitDma(ctx, pI830) == FALSE) {
+ return FALSE;
+ }
+
+ /* init to zero to be safe */
+
+ I830DRIMapScreenRegions(ctx, pI830, sarea);
+ I830InitTextureHeap(ctx, pI830, sarea);
+
+ if (ctx->pciDevice != PCI_CHIP_845_G &&
+ ctx->pciDevice != PCI_CHIP_I830_M) {
+ I830SetParam(ctx, I830_SETPARAM_USE_MI_BATCHBUFFER_START, 1 );
+ }
+
+ /* Okay now initialize the dma engine */
+ {
+ pI830->irq = drmGetInterruptFromBusID(ctx->drmFD,
+ ctx->pciBus,
+ ctx->pciDevice,
+ ctx->pciFunc);
+
+ if (drmCtlInstHandler(ctx->drmFD, pI830->irq)) {
+ fprintf(stderr,
+ "[drm] failure adding irq handler\n");
+ pI830->irq = 0;
+ return FALSE;
+ }
+ else
+ fprintf(stderr,
+ "[drm] dma control initialized, using IRQ %d\n",
+ pI830->irq);
+ }
+
+ fprintf(stderr, "[dri] visual configs initialized\n");
+
+ return TRUE;
+}
+
+static Bool
+I830ClearScreen(DRIDriverContext *ctx, I830Rec *pI830, drmI830Sarea *sarea)
+{
+ /* need to drmMap front and back buffers and zero them */
+ drmAddress map_addr;
+ int ret;
+
+ ret = drmMap(ctx->drmFD,
+ sarea->front_handle,
+ sarea->front_size,
+ &map_addr);
+
+ if (ret)
+ {
+ fprintf(stderr, "Unable to map front buffer\n");
+ return FALSE;
+ }
+
+ drimemsetio((char *)map_addr,
+ 0,
+ sarea->front_size);
+ drmUnmap(map_addr, sarea->front_size);
+
+
+ ret = drmMap(ctx->drmFD,
+ sarea->back_handle,
+ sarea->back_size,
+ &map_addr);
+
+ if (ret)
+ {
+ fprintf(stderr, "Unable to map back buffer\n");
+ return FALSE;
+ }
+
+ drimemsetio((char *)map_addr,
+ 0,
+ sarea->back_size);
+ drmUnmap(map_addr, sarea->back_size);
+
+ return TRUE;
+}
+
+static Bool
+I830ScreenInit(DRIDriverContext *ctx, I830Rec *pI830)
+
+{
+ I830DRIPtr pI830DRI;
+ drmI830Sarea *pSAREAPriv;
+ int err;
+
+ drm_page_size = getpagesize();
+
+ pI830->registerSize = ctx->MMIOSize;
+ /* This is a hack for now. We have to have more than a 4k page here
+ * because of the size of the state. However, the state should be
+ * in a per-context mapping. This will be added in the Mesa 3.5 port
+ * of the I830 driver.
+ */
+ ctx->shared.SAREASize = SAREA_MAX;
+
+ /* Note that drmOpen will try to load the kernel module, if needed. */
+ ctx->drmFD = drmOpen("i915", NULL );
+ if (ctx->drmFD < 0) {
+ fprintf(stderr, "[drm] drmOpen failed\n");
+ return 0;
+ }
+
+ if ((err = drmSetBusid(ctx->drmFD, ctx->pciBusID)) < 0) {
+ fprintf(stderr, "[drm] drmSetBusid failed (%d, %s), %s\n",
+ ctx->drmFD, ctx->pciBusID, strerror(-err));
+ return 0;
+ }
+
+ if (drmAddMap( ctx->drmFD,
+ 0,
+ ctx->shared.SAREASize,
+ DRM_SHM,
+ DRM_CONTAINS_LOCK,
+ &ctx->shared.hSAREA) < 0)
+ {
+ fprintf(stderr, "[drm] drmAddMap failed\n");
+ return 0;
+ }
+
+ fprintf(stderr, "[drm] added %d byte SAREA at 0x%08x\n",
+ ctx->shared.SAREASize, ctx->shared.hSAREA);
+
+ if (drmMap( ctx->drmFD,
+ ctx->shared.hSAREA,
+ ctx->shared.SAREASize,
+ (drmAddressPtr)(&ctx->pSAREA)) < 0)
+ {
+ fprintf(stderr, "[drm] drmMap failed\n");
+ return 0;
+
+ }
+
+ memset(ctx->pSAREA, 0, ctx->shared.SAREASize);
+ fprintf(stderr, "[drm] mapped SAREA 0x%08x to %p, size %d\n",
+ ctx->shared.hSAREA, ctx->pSAREA, ctx->shared.SAREASize);
+
+
+ if (drmAddMap(ctx->drmFD,
+ ctx->MMIOStart,
+ ctx->MMIOSize,
+ DRM_REGISTERS,
+ DRM_READ_ONLY,
+ &pI830->registerHandle) < 0) {
+ fprintf(stderr, "[drm] drmAddMap mmio failed\n");
+ return 0;
+ }
+ fprintf(stderr,
+ "[drm] register handle = 0x%08x\n", pI830->registerHandle);
+
+
+ if (!I830CheckDRMVersion(ctx, pI830)) {
+ return FALSE;
+ }
+
+ /* Create a 'server' context so we can grab the lock for
+ * initialization ioctls.
+ */
+ if ((err = drmCreateContext(ctx->drmFD, &ctx->serverContext)) != 0) {
+ fprintf(stderr, "%s: drmCreateContext failed %d\n", __FUNCTION__, err);
+ return 0;
+ }
+
+ DRM_LOCK(ctx->drmFD, ctx->pSAREA, ctx->serverContext, 0);
+
+ /* Initialize the SAREA private data structure */
+ pSAREAPriv = (drmI830Sarea *)(((char*)ctx->pSAREA) +
+ sizeof(drm_sarea_t));
+ memset(pSAREAPriv, 0, sizeof(*pSAREAPriv));
+
+ pI830->StolenMemory.Size = I830DetectMemory(ctx, pI830);
+ pI830->StolenMemory.Start = 0;
+ pI830->StolenMemory.End = pI830->StolenMemory.Size;
+
+ pI830->MemoryAperture.Start = pI830->StolenMemory.End;
+ pI830->MemoryAperture.End = KB(40000);
+ pI830->MemoryAperture.Size = pI830->MemoryAperture.End - pI830->MemoryAperture.Start;
+
+ pI830->StolenPool.Fixed = pI830->StolenMemory;
+ pI830->StolenPool.Total = pI830->StolenMemory;
+ pI830->StolenPool.Free = pI830->StolenPool.Total;
+ pI830->FreeMemory = pI830->StolenPool.Total.Size;
+
+ if (!AgpInit(ctx, pI830))
+ return FALSE;
+
+ if (I830AllocateMemory(ctx, pI830) == FALSE)
+ {
+ return FALSE;
+ }
+
+ if (I830BindMemory(ctx, pI830) == FALSE)
+ {
+ return FALSE;
+ }
+
+ pSAREAPriv->front_offset = pI830->FrontBuffer.Start;
+ pSAREAPriv->front_size = pI830->FrontBuffer.Size;
+ pSAREAPriv->width = ctx->shared.virtualWidth;
+ pSAREAPriv->height = ctx->shared.virtualHeight;
+ pSAREAPriv->pitch = ctx->shared.virtualWidth;
+ pSAREAPriv->virtualX = ctx->shared.virtualWidth;
+ pSAREAPriv->virtualY = ctx->shared.virtualHeight;
+ pSAREAPriv->back_offset = pI830->BackBuffer.Start;
+ pSAREAPriv->back_size = pI830->BackBuffer.Size;
+ pSAREAPriv->depth_offset = pI830->DepthBuffer.Start;
+ pSAREAPriv->depth_size = pI830->DepthBuffer.Size;
+ pSAREAPriv->tex_offset = pI830->TexMem.Start;
+ pSAREAPriv->tex_size = pI830->TexMem.Size;
+ pSAREAPriv->log_tex_granularity = pI830->TexGranularity;
+
+ ctx->driverClientMsg = malloc(sizeof(I830DRIRec));
+ ctx->driverClientMsgSize = sizeof(I830DRIRec);
+ pI830DRI = (I830DRIPtr)ctx->driverClientMsg;
+ pI830DRI->deviceID = pI830->Chipset;
+ pI830DRI->regsSize = I830_REG_SIZE;
+ pI830DRI->width = ctx->shared.virtualWidth;
+ pI830DRI->height = ctx->shared.virtualHeight;
+ pI830DRI->mem = ctx->shared.fbSize;
+ pI830DRI->cpp = ctx->cpp;
+ pI830DRI->backOffset = pI830->BackBuffer.Start;
+ pI830DRI->backPitch = pI830->BackBuffer.Pitch;
+
+ pI830DRI->depthOffset = pI830->DepthBuffer.Start;
+ pI830DRI->depthPitch = pI830->DepthBuffer.Pitch;
+
+ pI830DRI->fbOffset = pI830->FrontBuffer.Start;
+ pI830DRI->fbStride = pI830->FrontBuffer.Pitch;
+
+ pI830DRI->bitsPerPixel = ctx->bpp;
+ pI830DRI->sarea_priv_offset = sizeof(drm_sarea_t);
+
+ err = I830DRIDoMappings(ctx, pI830, pSAREAPriv);
+ if (err == FALSE)
+ return FALSE;
+
+ I830SetupMemoryTiling(ctx, pI830);
+
+ /* Quick hack to clear the front & back buffers. Could also use
+ * the clear ioctl to do this, but would need to setup hw state
+ * first.
+ */
+ I830ClearScreen(ctx, pI830, pSAREAPriv);
+
+ I830SetRingRegs(ctx, pI830);
+
+ return TRUE;
+}
+
+
+/**
+ * \brief Validate the fbdev mode.
+ *
+ * \param ctx display handle.
+ *
+ * \return one on success, or zero on failure.
+ *
+ * Saves some registers and returns 1.
+ *
+ * \sa radeonValidateMode().
+ */
+static int i830ValidateMode( const DRIDriverContext *ctx )
+{
+ return 1;
+}
+
+/**
+ * \brief Examine mode returned by fbdev.
+ *
+ * \param ctx display handle.
+ *
+ * \return one on success, or zero on failure.
+ *
+ * Restores registers that fbdev has clobbered and returns 1.
+ *
+ * \sa i810ValidateMode().
+ */
+static int i830PostValidateMode( const DRIDriverContext *ctx )
+{
+ I830Rec *pI830 = ctx->driverPrivate;
+
+ I830SetRingRegs(ctx, pI830);
+ return 1;
+}
+
+
+/**
+ * \brief Initialize the framebuffer device mode
+ *
+ * \param ctx display handle.
+ *
+ * \return one on success, or zero on failure.
+ *
+ * Fills in \p info with some default values and some information from \p ctx
+ * and then calls I810ScreenInit() for the screen initialization.
+ *
+ * Before exiting clears the framebuffer memory accessing it directly.
+ */
+static int i830InitFBDev( DRIDriverContext *ctx )
+{
+ I830Rec *pI830 = calloc(1, sizeof(I830Rec));
+ int i;
+
+ {
+ int dummy = ctx->shared.virtualWidth;
+
+ switch (ctx->bpp / 8) {
+ case 1: dummy = (ctx->shared.virtualWidth + 127) & ~127; break;
+ case 2: dummy = (ctx->shared.virtualWidth + 31) & ~31; break;
+ case 3:
+ case 4: dummy = (ctx->shared.virtualWidth + 15) & ~15; break;
+ }
+
+ ctx->shared.virtualWidth = dummy;
+ ctx->shared.Width = ctx->shared.virtualWidth;
+ }
+
+
+ for (i = 0; pitches[i] != 0; i++) {
+ if (pitches[i] >= ctx->shared.virtualWidth) {
+ ctx->shared.virtualWidth = pitches[i];
+ break;
+ }
+ }
+
+ ctx->driverPrivate = (void *)pI830;
+
+ pI830->LpRing = calloc(1, sizeof(I830RingBuffer));
+ pI830->Chipset = ctx->chipset;
+ pI830->LinearAddr = ctx->FBStart;
+
+ if (!I830ScreenInit( ctx, pI830 ))
+ return 0;
+
+
+ return 1;
+}
+
+
+/**
+ * \brief The screen is being closed, so clean up any state and free any
+ * resources used by the DRI.
+ *
+ * \param ctx display handle.
+ *
+ * Unmaps the SAREA, closes the DRM device file descriptor and frees the driver
+ * private data.
+ */
+static void i830HaltFBDev( DRIDriverContext *ctx )
+{
+ drmI830Sarea *pSAREAPriv;
+ I830Rec *pI830 = ctx->driverPrivate;
+
+ if (pI830->irq) {
+ drmCtlUninstHandler(ctx->drmFD);
+ pI830->irq = 0; }
+
+ I830CleanupDma(ctx);
+
+ pSAREAPriv = (drmI830Sarea *)(((char*)ctx->pSAREA) +
+ sizeof(drm_sarea_t));
+
+ I830DRIUnmapScreenRegions(ctx, pI830, pSAREAPriv);
+ drmUnmap( ctx->pSAREA, ctx->shared.SAREASize );
+ drmClose(ctx->drmFD);
+
+ if (ctx->driverPrivate) {
+ free(ctx->driverPrivate);
+ ctx->driverPrivate = 0;
+ }
+}
+
+
+extern void i810NotifyFocus( int );
+
+/**
+ * \brief Exported driver interface for Mini GLX.
+ *
+ * \sa DRIDriverRec.
+ */
+const struct DRIDriverRec __driDriver = {
+ i830ValidateMode,
+ i830PostValidateMode,
+ i830InitFBDev,
+ i830HaltFBDev,
+ NULL,//I830EngineShutdown,
+ NULL, //I830EngineRestore,
+#ifndef _EMBEDDED
+ 0,
+#else
+ i810NotifyFocus,
+#endif
+};
-void intelClearWithBlit(GLcontext *ctx, GLbitfield flags, GLboolean all,
- GLint cx1, GLint cy1, GLint cw, GLint ch)
+void intelClearWithBlit(GLcontext *ctx, GLbitfield flags)
{
struct intel_context *intel = intel_context( ctx );
intelScreenPrivate *intelScreen = intel->intelScreen;
GLuint clear_depth, clear_color;
- GLint cx, cy;
+ GLint cx, cy, cw, ch;
GLint cpp = intelScreen->cpp;
+ GLboolean all;
GLint i;
struct intel_region *front = intel->front_region;
struct intel_region *back = intel->back_region;
intelFlush( &intel->ctx );
LOCK_HARDWARE( intel );
{
- /* Refresh the cx/y/w/h values as they may have been invalidated
- * by a new window position or size picked up when we did
- * LOCK_HARDWARE above. The values passed by mesa are not
- * reliable.
- */
- {
- cx = ctx->DrawBuffer->_Xmin;
- cy = ctx->DrawBuffer->_Ymin;
- ch = ctx->DrawBuffer->_Ymax - ctx->DrawBuffer->_Ymin;
- cw = ctx->DrawBuffer->_Xmax - ctx->DrawBuffer->_Xmin;
- }
+ /* get clear bounds after locking */
+ cx = ctx->DrawBuffer->_Xmin;
+ cy = ctx->DrawBuffer->_Ymin;
+ ch = ctx->DrawBuffer->_Ymax - ctx->DrawBuffer->_Ymin;
+ cw = ctx->DrawBuffer->_Xmax - ctx->DrawBuffer->_Xmin;
+ all = (cw == ctx->DrawBuffer->Width && ch == ctx->DrawBuffer->Height);
/* flip top to bottom */
- cy = intel->driDrawable->h-cy1-ch;
- cx = cx1 + intel->drawX;
+ cy = intel->driDrawable->h - cy - ch;
+ cx = cx + intel->drawX;
cy += intel->drawY;
/* adjust for page flipping */
extern void intelCopyBuffer( const __DRIdrawablePrivate *dpriv,
const drm_clip_rect_t *rect );
-extern void intelClearWithBlit(GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint cx1, GLint cy1, GLint cw, GLint ch);
+extern void intelClearWithBlit(GLcontext *ctx, GLbitfield mask);
extern void intelEmitCopyBlit( struct intel_context *intel,
GLuint cpp,
* machine independent. Maybe we'll get there one day.
*/
static void intelClearWithTris(struct intel_context *intel,
- GLbitfield mask,
- GLboolean all,
- GLint cx, GLint cy,
- GLint cw, GLint ch)
+ GLbitfield mask)
{
+ GLcontext *ctx = &intel->ctx;
drm_clip_rect_t clear;
+ GLint cx, cy, cw, ch;
if (INTEL_DEBUG & DEBUG_DRI)
_mesa_printf("%s %x\n", __FUNCTION__, mask);
intel->vtbl.install_meta_state(intel);
- /* Refresh the cx/y/w/h values as they may have been invalidated
- * by a new window position or size picked up when we did
- * LOCK_HARDWARE above. The values passed by mesa are not
- * reliable.
- */
- {
- GLcontext *ctx = &intel->ctx;
- cx = ctx->DrawBuffer->_Xmin;
- cy = ctx->DrawBuffer->_Ymin;
- ch = ctx->DrawBuffer->_Ymax - ctx->DrawBuffer->_Ymin;
- cw = ctx->DrawBuffer->_Xmax - ctx->DrawBuffer->_Xmin;
- }
+ /* Get clear bounds after locking */
+ cx = ctx->DrawBuffer->_Xmin;
+ cy = ctx->DrawBuffer->_Ymin;
+ cw = ctx->DrawBuffer->_Xmax - ctx->DrawBuffer->_Xmin;
+ ch = ctx->DrawBuffer->_Ymax - ctx->DrawBuffer->_Ymin;
clear.x1 = cx;
clear.y1 = cy;
-static void intelClear(GLcontext *ctx,
- GLbitfield mask,
- GLboolean all,
- GLint cx, GLint cy,
- GLint cw, GLint ch)
+static void intelClear(GLcontext *ctx, GLbitfield mask)
{
struct intel_context *intel = intel_context( ctx );
const GLuint colorMask = *((GLuint *) &ctx->Color.ColorMask);
GLbitfield swrast_mask = 0;
if (INTEL_DEBUG & DEBUG_DRI)
- fprintf(stderr, "%s %x all %d dims %d,%d %dx%d\n", __FUNCTION__,
- mask, all, cx, cy, cw, ch);
+ fprintf(stderr, "%s %x\n", __FUNCTION__, mask);
if (mask & BUFFER_BIT_FRONT_LEFT) {
intelFlush( ctx );
if (blit_mask)
- intelClearWithBlit( ctx, blit_mask, all, cx, cy, cw, ch );
+ intelClearWithBlit( ctx, blit_mask );
if (tri_mask)
- intelClearWithTris( intel, tri_mask, all, cx, cy, cw, ch);
+ intelClearWithTris( intel, tri_mask );
if (swrast_mask)
- _swrast_Clear( ctx, swrast_mask, all, cx, cy, cw, ch );
+ _swrast_Clear( ctx, swrast_mask );
}
* Buffer clear
*/
-static void mach64DDClear( GLcontext *ctx, GLbitfield mask, GLboolean allFoo,
- GLint cxFoo, GLint cyFoo, GLint cwFoo, GLint chFoo )
+static void mach64DDClear( GLcontext *ctx, GLbitfield mask )
{
mach64ContextPtr mmesa = MACH64_CONTEXT( ctx );
__DRIdrawablePrivate *dPriv = mmesa->driDrawable;
}
if ( mask )
- _swrast_Clear( ctx, mask, 0, 0, 0, 0, 0 );
+ _swrast_Clear( ctx, mask );
if ( !flags )
return;
mmesa->driDrawable = driDrawPriv;
mmesa->dirty = ~0;
mmesa->dirty_cliprects = (MGA_FRONT|MGA_BACK);
- mmesa->mesa_drawable = driDrawPriv;
}
mmesa->driReadable = driReadPriv;
__DRIdrawablePrivate *driDrawable;
__DRIdrawablePrivate *driReadable;
- /**
- * Drawable used by Mesa for software fallbacks for reading and
- * writing. It is set by Mesa's \c SetBuffer callback, and will always be
- * either \c mga_context_t::driDrawable or \c mga_context_t::driReadable.
- */
- __DRIdrawablePrivate *mesa_drawable;
-
__DRIscreenPrivate *driScreen;
struct mga_screen_private_s *mgaScreen;
drm_mga_sarea_t *sarea;
#include "mga_xmesa.h"
#include "utils.h"
-#define DRIVER_DATE "20050609"
+#define DRIVER_DATE "20061030"
/***************************************
}
-static void mgaBufferSize(GLframebuffer *buffer, GLuint *width, GLuint *height)
-{
- GET_CURRENT_CONTEXT(ctx);
- mgaContextPtr mmesa = MGA_CONTEXT(ctx);
-
- /* Need to lock to make sure the driDrawable is uptodate. This
- * information is used to resize Mesa's software buffers, so it has
- * to be correct.
- */
- LOCK_HARDWARE( mmesa );
- *width = mmesa->driDrawable->w;
- *height = mmesa->driDrawable->h;
- UNLOCK_HARDWARE( mmesa );
-}
-
-
void mgaInitDriverFuncs( struct dd_function_table *functions )
{
- functions->GetBufferSize = mgaBufferSize;
functions->GetString = mgaGetString;
}
static void
-mgaClear( GLcontext *ctx, GLbitfield mask, GLboolean allFoo,
- GLint cxFoo, GLint cyFoo, GLint cwFoo, GLint chFoo )
+mgaClear( GLcontext *ctx, GLbitfield mask )
{
mgaContextPtr mmesa = MGA_CONTEXT(ctx);
__DRIdrawablePrivate *dPriv = mmesa->driDrawable;
}
if (mask)
- _swrast_Clear( ctx, mask, 0, 0, 0, 0, 0 );
+ _swrast_Clear( ctx, mask );
}
#define LOCAL_VARS \
mgaContextPtr mmesa = MGA_CONTEXT(ctx); \
- __DRIdrawablePrivate *dPriv = mmesa->mesa_drawable; \
__DRIscreenPrivate *sPriv = mmesa->driScreen; \
driRenderbuffer *drb = (driRenderbuffer *) rb; \
+ const __DRIdrawablePrivate *dPriv = drb->dPriv; \
GLuint pitch = drb->pitch; \
GLuint height = dPriv->h; \
char *buf = (char *)(sPriv->pFB + \
#define LOCAL_DEPTH_VARS \
mgaContextPtr mmesa = MGA_CONTEXT(ctx); \
- __DRIdrawablePrivate *dPriv = mmesa->mesa_drawable; \
__DRIscreenPrivate *sPriv = mmesa->driScreen; \
driRenderbuffer *drb = (driRenderbuffer *) rb; \
+ const __DRIdrawablePrivate *dPriv = drb->dPriv; \
GLuint pitch = drb->pitch; \
GLuint height = dPriv->h; \
char *buf = (char *)(sPriv->pFB + \
}
-static void mgaXMesaSetFrontClipRects( mgaContextPtr mmesa )
+static void mga_set_cliprects(mgaContextPtr mmesa)
{
__DRIdrawablePrivate *driDrawable = mmesa->driDrawable;
- if (driDrawable->numClipRects == 0) {
- static drm_clip_rect_t zeroareacliprect = {0,0,0,0};
- mmesa->numClipRects = 1;
- mmesa->pClipRects = &zeroareacliprect;
- } else {
- mmesa->numClipRects = driDrawable->numClipRects;
- mmesa->pClipRects = driDrawable->pClipRects;
- }
- mmesa->drawX = driDrawable->x;
- mmesa->drawY = driDrawable->y;
-
- mmesa->setup.dstorg = mmesa->drawOffset;
- mmesa->dirty |= MGA_UPLOAD_CONTEXT | MGA_UPLOAD_CLIPRECTS;
-}
-
-
-static void mgaXMesaSetBackClipRects( mgaContextPtr mmesa )
-{
- __DRIdrawablePrivate *driDrawable = mmesa->driDrawable;
-
- if (driDrawable->numBackClipRects == 0)
- {
+ if ((mmesa->draw_buffer != MGA_FRONT)
+ || (driDrawable->numBackClipRects == 0)) {
if (driDrawable->numClipRects == 0) {
static drm_clip_rect_t zeroareacliprect = {0,0,0,0};
mmesa->numClipRects = 1;
void mgaUpdateRects( mgaContextPtr mmesa, GLuint buffers )
{
- __DRIdrawablePrivate *driDrawable = mmesa->driDrawable;
+ __DRIdrawablePrivate *const driDrawable = mmesa->driDrawable;
+ __DRIdrawablePrivate *const driReadable = mmesa->driReadable;
drm_mga_sarea_t *sarea = mmesa->sarea;
- DRI_VALIDATE_DRAWABLE_INFO(mmesa->driScreen, driDrawable);
mmesa->dirty_cliprects = 0;
- if (mmesa->draw_buffer == MGA_FRONT)
- mgaXMesaSetFrontClipRects( mmesa );
- else
- mgaXMesaSetBackClipRects( mmesa );
+ driUpdateFramebufferSize(mmesa->glCtx, driDrawable);
+ if (driDrawable != driReadable) {
+ driUpdateFramebufferSize(mmesa->glCtx, driReadable);
+ }
+
+ mga_set_cliprects(mmesa);
sarea->req_drawable = driDrawable->draw;
sarea->req_draw_buffer = mmesa->draw_buffer;
mgaUpdateClipping( mmesa->glCtx );
mgaCalcViewport( mmesa->glCtx );
-
- mmesa->dirty |= MGA_UPLOAD_CLIPRECTS;
}
switch ( ctx->DrawBuffer->_ColorDrawBufferMask[0] ) {
case BUFFER_BIT_FRONT_LEFT:
mmesa->setup.dstorg = mmesa->mgaScreen->frontOffset;
- mmesa->dirty |= MGA_UPLOAD_CONTEXT;
mmesa->draw_buffer = MGA_FRONT;
- mgaXMesaSetFrontClipRects( mmesa );
- FALLBACK( ctx, MGA_FALLBACK_DRAW_BUFFER, GL_FALSE );
break;
case BUFFER_BIT_BACK_LEFT:
mmesa->setup.dstorg = mmesa->mgaScreen->backOffset;
mmesa->draw_buffer = MGA_BACK;
- mmesa->dirty |= MGA_UPLOAD_CONTEXT;
- mgaXMesaSetBackClipRects( mmesa );
- FALLBACK( ctx, MGA_FALLBACK_DRAW_BUFFER, GL_FALSE );
break;
default:
/* GL_NONE or GL_FRONT_AND_BACK or stereo left&right, etc */
FALLBACK( ctx, MGA_FALLBACK_DRAW_BUFFER, GL_TRUE );
return;
}
+
+ mmesa->dirty |= MGA_UPLOAD_CONTEXT;
+ mga_set_cliprects(mmesa);
+ FALLBACK(ctx, MGA_FALLBACK_DRAW_BUFFER, GL_FALSE);
}
* Buffer clear
*/
-static void r128Clear( GLcontext *ctx, GLbitfield mask, GLboolean allFoo,
- GLint cxFoo, GLint cyFoo, GLint cwFoo, GLint chFoo )
+static void r128Clear( GLcontext *ctx, GLbitfield mask )
{
r128ContextPtr rmesa = R128_CONTEXT(ctx);
__DRIdrawablePrivate *dPriv = rmesa->driDrawable;
}
if ( mask )
- _swrast_Clear( ctx, mask, 0, 0, 0, 0, 0 );
+ _swrast_Clear( ctx, mask );
}
/* ================================================================
* Buffer clear
*/
-static void r200Clear( GLcontext *ctx, GLbitfield mask, GLboolean allFoo,
- GLint cxFoo, GLint cyFoo, GLint cwFoo, GLint chFoo )
+static void r200Clear( GLcontext *ctx, GLbitfield mask )
{
r200ContextPtr rmesa = R200_CONTEXT(ctx);
__DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
if ( mask ) {
if (R200_DEBUG & DEBUG_FALLBACKS)
fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
- _swrast_Clear( ctx, mask, 0, 0, 0, 0, 0 );
+ _swrast_Clear( ctx, mask );
}
if ( !flags )
{ "LG2", 1, R300_FPI0_OUTC_REPL_ALPHA, R300_FPI2_OUTA_LG2 },
{ "RCP", 1, R300_FPI0_OUTC_REPL_ALPHA, R300_FPI2_OUTA_RCP },
{ "RSQ", 1, R300_FPI0_OUTC_REPL_ALPHA, R300_FPI2_OUTA_RSQ },
- { "REPL_ALPHA", 1, R300_FPI0_OUTC_REPL_ALPHA, PFS_INVAL }
+ { "REPL_ALPHA", 1, R300_FPI0_OUTC_REPL_ALPHA, PFS_INVAL },
+ { "CMPH", 3, R300_FPI0_OUTC_CMPH, PFS_INVAL },
};
#define MAKE_SWZ3(x, y, z) (MAKE_SWIZZLE4(SWIZZLE_##x, \
valid: GL_FALSE
};
-/* constant zero source */
+/* constant one source */
static const pfs_reg_t pfs_one = {
type: REG_TYPE_CONST,
index: 0,
valid: GL_TRUE
};
-/* constant one source */
+/* constant half source */
+static const pfs_reg_t pfs_half = {
+ type: REG_TYPE_CONST,
+ index: 0,
+ v_swz: SWIZZLE_HHH,
+ s_swz: SWIZZLE_HALF,
+ valid: GL_TRUE
+};
+
+/* constant zero source */
static const pfs_reg_t pfs_zero = {
type: REG_TYPE_CONST,
index: 0,
return r;
}
-#if 0
static pfs_reg_t emit_const4fv(struct r300_fragment_program *rp, GLfloat *cp)
{
pfs_reg_t r = undef;
ERROR("Out of hw constants!\n");
return r;
}
-
- COPY_4V(rp->constant[r.index], cp);
+ COPY_4V(rp->constant[r.index], cp);
r.valid = GL_TRUE;
return r;
}
-#endif
static __inline pfs_reg_t negate(pfs_reg_t r)
{
cs->dest_in_node = 0;
}
- if (rp->cur_node == 0) rp->first_node_has_tex = 1;
+ if (rp->cur_node == 0)
+ rp->first_node_has_tex = 1;
- rp->tex.inst[rp->tex.length++] = 0
- | (hwsrc << R300_FPITX_SRC_SHIFT)
- | (hwdest << R300_FPITX_DST_SHIFT)
- | (unit << R300_FPITX_IMAGE_SHIFT)
- | (opcode << R300_FPITX_OPCODE_SHIFT); /* not entirely sure about this */
+ rp->tex.inst[rp->tex.length++] = 0
+ | (hwsrc << R300_FPITX_SRC_SHIFT)
+ | (hwdest << R300_FPITX_DST_SHIFT)
+ | (unit << R300_FPITX_IMAGE_SHIFT)
+ /* not entirely sure about this */
+ | (opcode << R300_FPITX_OPCODE_SHIFT);
cs->dest_in_node |= (1 << hwdest);
if (coord.type != REG_TYPE_CONST)
vop = r300_fpop[op].v_op;
sop = r300_fpop[op].s_op;
- argc = r300_fpop[op].argc;
+ argc = r300_fpop[op].argc;
if ((mask & WRITEMASK_XYZ) || vop == R300_FPI0_OUTC_DP3)
emit_vop = GL_TRUE;
const struct prog_instruction *inst = mp->Base.Instructions;
struct prog_instruction *fpi;
pfs_reg_t src[3], dest, temp;
+ pfs_reg_t cnst;
int flags, mask = 0;
+ GLfloat cnstv[4] = {0.0, 0.0, 0.0, 0.0};
if (!inst || inst[0].Opcode == OPCODE_END) {
ERROR("empty program?\n");
flags);
break;
case OPCODE_LIT:
- ERROR("LIT not implemented\n");
+ /* LIT
+ * if (s.x < 0) t.x = 0; else t.x = s.x;
+ * if (s.y < 0) t.y = 0; else t.y = s.y;
+ * if (s.w > 128.0) t.w = 128.0; else t.w = s.w;
+ * if (s.w < -128.0) t.w = -128.0; else t.w = s.w;
+ * r.x = 1.0
+ * if (t.x > 0) r.y = pow(t.y, t.w); else r.y = 0;
+ * Also r.y = 0 if t.y < 0
+ * For the t.x > 0 FGLRX use the CMPH opcode which
+ * change the compare to (t.x + 0.5) > 0.5 we may
+ * save one instruction by doing CMP -t.x
+ */
+ cnstv[0] = cnstv[1] = cnstv[2] = cnstv[4] = 0.50001;
+ src[0] = t_src(rp, fpi->SrcReg[0]);
+ temp = get_temp_reg(rp);
+ cnst = emit_const4fv(rp, cnstv);
+ emit_arith(rp, PFS_OP_CMP, temp,
+ WRITEMASK_X | WRITEMASK_Y,
+ src[0], pfs_zero, src[0], flags);
+ emit_arith(rp, PFS_OP_MIN, temp, WRITEMASK_Z,
+ swizzle(keep(src[0]), W, W, W, W),
+ cnst, undef, flags);
+ emit_arith(rp, PFS_OP_LG2, temp, WRITEMASK_W,
+ swizzle(temp, Y, Y, Y, Y),
+ undef, undef, flags);
+ emit_arith(rp, PFS_OP_MAX, temp, WRITEMASK_Z,
+ temp, negate(cnst), undef, flags);
+ emit_arith(rp, PFS_OP_MAD, temp, WRITEMASK_W,
+ temp, swizzle(temp, Z, Z, Z, Z),
+ pfs_zero, flags);
+ emit_arith(rp, PFS_OP_EX2, temp, WRITEMASK_W,
+ temp, undef, undef, flags);
+ emit_arith(rp, PFS_OP_MAD, dest, WRITEMASK_Y,
+ swizzle(keep(temp), X, X, X, X),
+ pfs_one, pfs_zero, flags);
+#if 0
+ emit_arith(rp, PFS_OP_MAD, temp, WRITEMASK_X,
+ temp, pfs_one, pfs_half, flags);
+ emit_arith(rp, PFS_OP_CMPH, temp, WRITEMASK_Z,
+ swizzle(keep(temp), W, W, W, W),
+ pfs_zero, swizzle(keep(temp), X, X, X, X),
+ flags);
+#else
+ emit_arith(rp, PFS_OP_CMP, temp, WRITEMASK_Z,
+ pfs_zero,
+ swizzle(keep(temp), W, W, W, W),
+ negate(swizzle(keep(temp), X, X, X, X)),
+ flags);
+#endif
+ emit_arith(rp, PFS_OP_CMP, dest, WRITEMASK_Z,
+ pfs_zero, temp,
+ negate(swizzle(keep(temp), Y, Y, Y, Y)),
+ flags);
+ emit_arith(rp, PFS_OP_MAD, dest,
+ WRITEMASK_X | WRITEMASK_W,
+ pfs_one,
+ pfs_one,
+ pfs_zero,
+ flags);
+ free_temp(rp, temp);
break;
case OPCODE_LRP:
src[0] = t_src(rp, fpi->SrcReg[0]);
return GL_FALSE;
}
-
+
return GL_TRUE;
}
#define PFS_OP_RCP 9
#define PFS_OP_RSQ 10
#define PFS_OP_REPL_ALPHA 11
-#define MAX_PFS_OP 11
+#define PFS_OP_CMPH 12
+#define MAX_PFS_OP 12
#define PFS_FLAG_SAT (1 << 0)
#define PFS_FLAG_ABS (1 << 1)
/**
* Buffer clear
*/
-static void r300Clear(GLcontext * ctx, GLbitfield mask, GLboolean allFoo,
- GLint cxFoo, GLint cyFoo, GLint cwFoo, GLint chFoo)
+static void r300Clear(GLcontext * ctx, GLbitfield mask)
{
r300ContextPtr r300 = R300_CONTEXT(ctx);
__DRIdrawablePrivate *dPriv = r300->radeon.dri.drawable;
if (RADEON_DEBUG & DEBUG_FALLBACKS)
fprintf(stderr, "%s: swrast clear, mask: %x\n",
__FUNCTION__, mask);
- _swrast_Clear(ctx, mask, 0, 0, 0, 0, 0);
+ _swrast_Clear(ctx, mask);
}
swapped = r300->radeon.doPageFlip && (r300->radeon.sarea->pfCurrentPage == 1);
* - DP4: Use OUTC_DP4, OUTA_DP4
* - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
* - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
+ * - CMPH: If ARG2 > 0.5, return ARG0, else return ARG1
* - CMP: If ARG2 < 0, return ARG1, else return ARG0
* - FLR: use FRC+MAD
* - XPD: use MAD+MAD
# define R300_FPI0_OUTC_DP4 (2 << 23)
# define R300_FPI0_OUTC_MIN (4 << 23)
# define R300_FPI0_OUTC_MAX (5 << 23)
+# define R300_FPI0_OUTC_CMPH (7 << 23)
# define R300_FPI0_OUTC_CMP (8 << 23)
# define R300_FPI0_OUTC_FRC (9 << 23)
# define R300_FPI0_OUTC_REPL_ALPHA (10 << 23)
static void r300Enable(GLcontext* ctx, GLenum cap, GLboolean state)
{
r300ContextPtr r300 = R300_CONTEXT(ctx);
- uint32_t newval;
if (RADEON_DEBUG & DEBUG_STATE)
fprintf(stderr, "%s( %s = %s )\n", __FUNCTION__,
GLint i, texelBytes;
GLint numLevels;
GLint log2Width, log2Height, log2Depth;
- const GLuint ui = 1;
/* Set the hardware texture format
*/
}
}
-
-/* Return the width and height of the given buffer.
- */
-static void radeonGetBufferSize(GLframebuffer * buffer,
- GLuint * width, GLuint * height)
-{
- GET_CURRENT_CONTEXT(ctx);
- radeonContextPtr radeon = RADEON_CONTEXT(ctx);
-
- LOCK_HARDWARE(radeon);
- *width = radeon->dri.drawable->w;
- *height = radeon->dri.drawable->h;
- UNLOCK_HARDWARE(radeon);
-}
-
-
/* Initialize the driver's misc functions.
*/
static void radeonInitDriverFuncs(struct dd_function_table *functions)
{
- functions->GetBufferSize = radeonGetBufferSize;
+ functions->GetBufferSize = NULL;
functions->GetString = radeonGetString;
}
/* DRI fields */
radeon->dri.context = driContextPriv;
radeon->dri.screen = sPriv;
- radeon->dri.drawable = NULL; /* Set by XMesaMakeCurrent */
+ radeon->dri.drawable = NULL;
+ radeon->dri.readable = NULL;
radeon->dri.hwContext = driContextPriv->hHWContext;
radeon->dri.hwLock = &sPriv->pSAREA->lock;
radeon->dri.fd = sPriv->fd;
fprintf(stderr, "%s ctx %p\n", __FUNCTION__,
radeon->glCtx);
- if (radeon->dri.drawable != driDrawPriv) {
+ if ( (radeon->dri.drawable != driDrawPriv)
+ || (radeon->dri.readable != driReadPriv) ) {
+
driDrawableInitVBlank(driDrawPriv,
radeon->vblank_flags,
&radeon->vbl_seq);
radeon->dri.drawable = driDrawPriv;
-
+ radeon->dri.readable = driReadPriv;
+
r300UpdateWindow(radeon->glCtx);
r300UpdateViewportOffset(radeon->glCtx);
}
struct radeon_dri_mirror {
__DRIcontextPrivate *context; /* DRI context */
__DRIscreenPrivate *screen; /* DRI screen */
- __DRIdrawablePrivate *drawable; /* DRI drawable bound to this ctx */
+ /**
+ * DRI drawable bound to this context for drawing.
+ */
+ __DRIdrawablePrivate *drawable;
+
+ /**
+ * DRI drawable bound to this context for reading.
+ */
+ __DRIdrawablePrivate *readable;
drm_context_t hwContext;
drm_hw_lock_t *hwLock;
* Called by radeonGetLock() after the lock has been obtained.
*/
static void r300RegainedLock(radeonContextPtr radeon)
-{
- __DRIdrawablePrivate *dPriv = radeon->dri.drawable;
+{
int i;
+ __DRIdrawablePrivate *const drawable = radeon->dri.drawable;
r300ContextPtr r300 = (r300ContextPtr)radeon;
+ drm_radeon_sarea_t *sarea = radeon->sarea;
- if (radeon->lastStamp != dPriv->lastStamp) {
- _mesa_resize_framebuffer(radeon->glCtx,
- (GLframebuffer*)dPriv->driverPrivate,
- dPriv->w, dPriv->h);
-
+ if ( radeon->lastStamp != drawable->lastStamp ) {
radeonUpdatePageFlipping(radeon);
-
- if (radeon->glCtx->DrawBuffer->_ColorDrawBufferMask[0] == BUFFER_BIT_BACK_LEFT)
- radeonSetCliprects(radeon, GL_BACK_LEFT);
- else
- radeonSetCliprects(radeon, GL_FRONT_LEFT);
-
+ radeonSetCliprects(radeon);
#if 1
r300UpdateViewportOffset( radeon->glCtx );
- driUpdateFramebufferSize(radeon->glCtx, dPriv);
+ driUpdateFramebufferSize(radeon->glCtx, drawable);
#else
radeonUpdateScissor(radeon->glCtx);
#endif
- radeon->lastStamp = dPriv->lastStamp;
+ radeon->lastStamp = drawable->lastStamp;
}
- for (i = 0; i < r300->nr_heaps; i++) {
- DRI_AGE_TEXTURES(r300->texture_heaps[i]);
+ if (sarea->ctx_owner != radeon->dri.hwContext) {
+ sarea->ctx_owner = radeon->dri.hwContext;
+
+ for (i = 0; i < r300->nr_heaps; i++) {
+ DRI_AGE_TEXTURES(r300->texture_heaps[i]);
+ }
}
}
*/
void radeonGetLock(radeonContextPtr radeon, GLuint flags)
{
- __DRIdrawablePrivate *dPriv = radeon->dri.drawable;
+ __DRIdrawablePrivate *const drawable = radeon->dri.drawable;
+ __DRIdrawablePrivate *const readable = radeon->dri.readable;
__DRIscreenPrivate *sPriv = radeon->dri.screen;
- drm_radeon_sarea_t *sarea = radeon->sarea;
- assert (dPriv != NULL);
+ assert (drawable != NULL);
drmGetLock(radeon->dri.fd, radeon->dri.hwContext, flags);
* Since the hardware state depends on having the latest drawable
* clip rects, all state checking must be done _after_ this call.
*/
- DRI_VALIDATE_DRAWABLE_INFO(sPriv, dPriv);
-
- if (sarea->ctx_owner != radeon->dri.hwContext)
- sarea->ctx_owner = radeon->dri.hwContext;
+ DRI_VALIDATE_DRAWABLE_INFO( sPriv, drawable );
+ if (drawable != readable) {
+ DRI_VALIDATE_DRAWABLE_INFO( sPriv, readable );
+ }
if (IS_R300_CLASS(radeon->radeonScreen))
r300RegainedLock(radeon);
#include "radeon_ioctl.h"
#include "radeon_state.h"
#include "r300_ioctl.h"
-
+#include "framebuffer.h"
/* =============================================================
* Scissoring
if (ctx->Scissor.Enabled) {
/* We don't pipeline cliprect changes */
r300Flush(ctx);
-
radeonUpdateScissor(ctx);
}
}
/**
* Update cliprects and scissors.
*/
-void radeonSetCliprects(radeonContextPtr radeon, GLenum mode)
+void radeonSetCliprects(radeonContextPtr radeon)
{
- __DRIdrawablePrivate *dPriv = radeon->dri.drawable;
-
- switch (mode) {
- case GL_FRONT_LEFT:
- radeon->numClipRects = dPriv->numClipRects;
- radeon->pClipRects = dPriv->pClipRects;
- break;
- case GL_BACK_LEFT:
- /* Can't ignore 2d windows if we are page flipping.
- */
- if (dPriv->numBackClipRects == 0 || radeon->doPageFlip) {
- radeon->numClipRects = dPriv->numClipRects;
- radeon->pClipRects = dPriv->pClipRects;
+ __DRIdrawablePrivate *const drawable = radeon->dri.drawable;
+ __DRIdrawablePrivate *const readable = radeon->dri.readable;
+ GLframebuffer *const draw_fb = (GLframebuffer*)drawable->driverPrivate;
+ GLframebuffer *const read_fb = (GLframebuffer*)readable->driverPrivate;
+
+ if (draw_fb->_ColorDrawBufferMask[0] == BUFFER_BIT_BACK_LEFT) {
+ /* Can't ignore 2d windows if we are page flipping. */
+ if (drawable->numBackClipRects == 0 || radeon->doPageFlip) {
+ radeon->numClipRects = drawable->numClipRects;
+ radeon->pClipRects = drawable->pClipRects;
} else {
- radeon->numClipRects = dPriv->numBackClipRects;
- radeon->pClipRects = dPriv->pBackClipRects;
+ radeon->numClipRects = drawable->numBackClipRects;
+ radeon->pClipRects = drawable->pBackClipRects;
+ }
+ } else {
+ /* front buffer (or none, or multiple buffers */
+ radeon->numClipRects = drawable->numClipRects;
+ radeon->pClipRects = drawable->pClipRects;
+ }
+
+ if ((draw_fb->Width != drawable->w) ||
+ (draw_fb->Height != drawable->h)) {
+ printf("w,h %d %d\n",
+ radeon->glCtx->DrawBuffer->Width,
+ radeon->glCtx->DrawBuffer->Height);
+
+ _mesa_resize_framebuffer(radeon->glCtx, draw_fb,
+ drawable->w, drawable->h);
+ draw_fb->Initialized = GL_TRUE;
+ }
+
+ if (drawable != readable) {
+ if ((read_fb->Width != readable->w) ||
+ (read_fb->Height != readable->h)) {
+ _mesa_resize_framebuffer(radeon->glCtx, read_fb,
+ readable->w, readable->h);
+ read_fb->Initialized = GL_TRUE;
}
- break;
- default:
- fprintf(stderr, "bad mode in radeonSetCliprects\n");
- radeon->numClipRects = 0;
- radeon->pClipRects = 0;
- return;
}
if (radeon->state.scissor.enabled)
#include "radeon_context.h"
extern void radeonRecalcScissorRects(radeonContextPtr radeon);
-extern void radeonSetCliprects(radeonContextPtr radeon, GLenum mode);
+extern void radeonSetCliprects(radeonContextPtr radeon);
extern void radeonUpdateScissor(GLcontext* ctx);
extern void radeonEnable(GLcontext* ctx, GLenum cap, GLboolean state);
*/
#define RADEON_MAX_CLEARS 256
-static void radeonClear( GLcontext *ctx, GLbitfield mask, GLboolean allFoo,
- GLint cxFoo, GLint cyFoo, GLint cwFoo, GLint chFoo )
+static void radeonClear( GLcontext *ctx, GLbitfield mask )
{
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
__DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
if ( mask ) {
if (RADEON_DEBUG & DEBUG_FALLBACKS)
fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
- _swrast_Clear( ctx, mask, 0, 0, 0, 0, 0 );
+ _swrast_Clear( ctx, mask );
}
if ( !flags )
#include "light.h"
#include "state.h"
#include "context.h"
+#include "framebuffer.h"
#include "vbo/vbo.h"
#include "tnl/tnl.h"
}
if ((draw_fb->Width != drawable->w) || (draw_fb->Height != drawable->h)) {
- _mesa_resize_framebuffer(&rmesa->glCtx, draw_fb,
+ _mesa_resize_framebuffer(rmesa->glCtx, draw_fb,
drawable->w, drawable->h);
draw_fb->Initialized = GL_TRUE;
}
if (drawable != readable) {
if ((read_fb->Width != readable->w) || (read_fb->Height != readable->h)) {
- _mesa_resize_framebuffer(&rmesa->glCtx, read_fb,
+ _mesa_resize_framebuffer(rmesa->glCtx, read_fb,
readable->w, readable->h);
read_fb->Initialized = GL_TRUE;
}
* Buffer clear
*/
-static void s3vDDClear( GLcontext *ctx, GLbitfield mask, GLboolean allFoo,
- GLint cxFoo, GLint cyFoo, GLint cwFoo, GLint chFoo )
+static void s3vDDClear( GLcontext *ctx, GLbitfield mask )
{
s3vContextPtr vmesa = S3V_CONTEXT(ctx);
unsigned int _stride;
if ( mask )
DEBUG(("still masked ;3(\n")); */ /* yes */
#else
- _swrast_Clear( ctx, mask, 0, 0, 0, 0, 0 );
+ _swrast_Clear( ctx, mask );
#endif
}
}
-static void savageDDClear( GLcontext *ctx, GLbitfield mask, GLboolean allFoo,
- GLint cxFoo, GLint cyFoo, GLint cwFoo, GLint chFoo )
+static void savageDDClear( GLcontext *ctx, GLbitfield mask )
{
savageContextPtr imesa = SAVAGE_CONTEXT( ctx );
GLuint colorMask, depthMask, clearColor, clearDepth, flags;
}
if (mask)
- _swrast_Clear( ctx, mask, 0, 0, 0, 0, 0 );
+ _swrast_Clear( ctx, mask );
}
/*
}
void
-sis6326DDClear(GLcontext *ctx, GLbitfield mask, GLboolean allFoo,
- GLint xFoo, GLint yFoo, GLint widthFoo, GLint heightFoo)
+sis6326DDClear(GLcontext *ctx, GLbitfield mask)
{
sisContextPtr smesa = SIS_CONTEXT(ctx);
GLint x1, y1, width1, height1;
UNLOCK_HARDWARE();
if (mask != 0)
- _swrast_Clear(ctx, mask, 0, 0, 0, 0, 0);
+ _swrast_Clear(ctx, mask);
}
}
void
-sisDDClear( GLcontext * ctx, GLbitfield mask, GLboolean allFoo,
- GLint xFoo, GLint yFoo, GLint widthFoo, GLint heightFoo )
+sisDDClear( GLcontext * ctx, GLbitfield mask )
{
sisContextPtr smesa = SIS_CONTEXT(ctx);
UNLOCK_HARDWARE();
if (mask != 0)
- _swrast_Clear( ctx, mask, 0, 0, 0, 0, 0);
+ _swrast_Clear( ctx, mask);
}
GLint width, GLint height )
{
sisContextPtr smesa = SIS_CONTEXT(ctx);
-
int count;
- GLuint depth = smesa->bytesPerPixel;
drm_clip_rect_t *pExtents = NULL;
GLint xx, yy;
GLint x0, y0, width0, height0;
if (width <= 0 || height <= 0)
continue;
- int cmd;
-
mWait3DCmdQueue (8);
MMIO(REG_SRC_PITCH, (smesa->bytesPerPixel == 4) ?
BLIT_DEPTH_32 : BLIT_DEPTH_16);
const __GLcontextModes *mesaVis,
GLboolean isPixmap )
{
- sisScreenPtr screen = (sisScreenPtr) driScrnPriv->private;
+ /*sisScreenPtr screen = (sisScreenPtr) driScrnPriv->private;*/
struct gl_framebuffer *fb;
if (isPixmap)
#include "sis_context.h"
/* sis6326_clear.c */
-extern void sis6326DDClear( GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint x, GLint y, GLint width, GLint height );
+extern void sis6326DDClear( GLcontext *ctx, GLbitfield mask );
extern void sis6326DDClearColor( GLcontext * ctx, const GLfloat color[4] );
extern void sis6326DDClearDepth( GLcontext * ctx, GLclampd d );
extern void sis6326UpdateZPattern(sisContextPtr smesa, GLclampd z);
/* sis_clear.c */
-extern void sisDDClear( GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint x, GLint y, GLint width, GLint height );
+extern void sisDDClear( GLcontext *ctx, GLbitfield mask );
extern void sisDDClearColor( GLcontext * ctx, const GLfloat color[4] );
extern void sisDDClearDepth( GLcontext * ctx, GLclampd d );
extern void sisDDClearStencil( GLcontext * ctx, GLint s );
/* Clear the color and/or depth buffers.
*/
-static void tdfxClear( GLcontext *ctx,
- GLbitfield mask, GLboolean all,
- GLint xFoo, GLint yFoo, GLint widthFoo, GLint heightFoo)
+static void tdfxClear( GLcontext *ctx, GLbitfield mask )
{
tdfxContextPtr fxMesa = (tdfxContextPtr) ctx->DriverCtx;
GLbitfield softwareMask = mask & (BUFFER_BIT_ACCUM);
}
if (softwareMask)
- _swrast_Clear( ctx, softwareMask, 0, 0, 0, 0, 0);
+ _swrast_Clear(ctx, softwareMask);
}
GLint mipWidth, mipHeight;
tdfxMipMapLevel *mip;
struct gl_texture_image *mipImage;
- const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
const GLint maxLevels = _mesa_max_texture_levels(ctx, texObj->Target);
assert(!texImage->IsCompressed);
mipWidth, mipHeight, border,
format, type,
NULL);
- mipImage = _mesa_select_tex_image(ctx, texUnit, target, level);
+ mipImage = _mesa_select_tex_image(ctx, texObj, target, level);
mip = TDFX_TEXIMAGE_DATA(mipImage);
_mesa_halve2x2_teximage2d(ctx,
texImage,
GLint mipWidth, mipHeight;
tdfxMipMapLevel *mip;
struct gl_texture_image *mipImage;
- const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
const GLint maxLevels = _mesa_max_texture_levels(ctx, texObj->Target);
assert(!texImage->IsCompressed);
break;
}
++level;
- mipImage = _mesa_select_tex_image(ctx, texUnit, target, level);
+ mipImage = _mesa_select_tex_image(ctx, texObj, target, level);
mip = TDFX_TEXIMAGE_DATA(mipImage);
_mesa_halve2x2_teximage2d(ctx,
texImage,
}
-static void tridentDDClear( GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint cxFoo, GLint cyFoo, GLint cwFoo, GLint chFoo)
+static void tridentDDClear( GLcontext *ctx, GLbitfield mask )
{
tridentContextPtr tmesa = TRIDENT_CONTEXT(ctx);
unsigned char *MMIO = tmesa->tridentScreen->mmio.map;
UNLOCK_HARDWARE(tmesa);
if ( mask )
- _swrast_Clear( ctx, mask, 0, 0, 0, 0, 0);
+ _swrast_Clear( ctx, mask );
}
static void tridentDDShadeModel( GLcontext *ctx, GLenum mode )
-static void viaClear(GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint cxFoo, GLint cyFoo, GLint cwFoo, GLint chFoo)
+static void viaClear(GLcontext *ctx, GLbitfield mask)
{
struct via_context *vmesa = VIA_CONTEXT(ctx);
__DRIdrawablePrivate *dPriv = vmesa->driDrawable;
drm_clip_rect_t *boxes, *tmp_boxes = 0;
int nr = 0;
GLint cx, cy, cw, ch;
+ GLboolean all;
LOCK_HARDWARE(vmesa);
cy = ctx->DrawBuffer->_Ymin;
cw = ctx->DrawBuffer->_Xmax - cx;
ch = ctx->DrawBuffer->_Ymax - cy;
+ all = (cw == ctx->DrawBuffer->Width && ch == ctx->DrawBuffer->Height);
/* flip top to bottom */
cy = dPriv->h - cy - ch;
}
if (mask)
- _swrast_Clear(ctx, mask, 0, 0, 0, 0, 0);
+ _swrast_Clear(ctx, mask);
}
ggi_ctx->clearcolor = col;
}
-static void gl_ggiClear(GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint x, GLint y, GLint width, GLint height)
+static void gl_ggiClear(GLcontext *ctx, GLbitfield mask)
{
ggi_mesa_context_t ggi_ctx = (ggi_mesa_context_t)ctx->DriverCtx;
+ int x = ctx->DrawBuffer->_Xmin;
+ int y = ctx->DrawBuffer->_Ymin;
+ int w = ctx->DrawBuffer->_Xmax - x;
+ int h = ctx->DrawBuffer->_Ymax - y;
+ GLboolean all = (w == ctx->DrawBuffer->Width && h == ctx->DrawBuffer->height)
GGIMESADPRINT_CORE("gl_ggiClear() called\n");
mask &= ~(DD_FRONT_LEFT_BIT | DD_BACK_LEFT_BIT);
}
- _swrast_Clear(ctx, mask, all, x, y, width, height);
+ _swrast_Clear(ctx, mask);
}
/* Clear the color and/or depth buffers */
-static void fxDDClear( GLcontext *ctx,
- GLbitfield mask, GLboolean all,
- GLint x, GLint y, GLint width, GLint height )
+static void fxDDClear( GLcontext *ctx, GLbitfield mask )
{
fxMesaContext fxMesa = FX_CONTEXT(ctx);
GLbitfield softwareMask = mask & (BUFFER_BIT_ACCUM);
const FxU8 clearS = (FxU8) (ctx->Stencil.Clear & 0xff);
if ( TDFX_DEBUG & MESA_VERBOSE ) {
- fprintf( stderr, "fxDDClear( %d, %d, %d, %d )\n",
- (int) x, (int) y, (int) width, (int) height );
+ fprintf( stderr, "fxDDClear\n");
}
/* we can't clear accum buffers nor stereo */
grRenderBuffer(fxMesa->currentFB);
if (softwareMask)
- _swrast_Clear( ctx, softwareMask, all, x, y, width, height );
+ _swrast_Clear( ctx, softwareMask );
}
-/* $Id: svgamesa15.c,v 1.11 2002/11/11 18:42:39 brianp Exp $ */
+/* $Id: svgamesa15.c,v 1.11.36.1 2006/11/02 12:02:17 alanh Exp $ */
/*
* Mesa 3-D graphics library
/* SVGAMesa->clear_hicolor=(red)<<10 | (green)<<5 | (blue);*/
}
-void __clear15( GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint x, GLint y, GLint width, GLint height )
+void __clear15( GLcontext *ctx, GLbitfield mask )
{
int i, j;
+ int x = ctx->DrawBuffer->_Xmin;
+ int y = ctx->DrawBuffer->_Ymin;
+ int width = ctx->DrawBuffer->_Xmax - x;
+ int height = ctx->DrawBuffer->_Ymax - y;
+ GLboolean all = (width == ctx->DrawBuffer->Width && height == ctx->DrawBuffer->height)
if (mask & DD_FRONT_LEFT_BIT) {
GLshort *shortBuffer=(void *)SVGABuffer.FrontBuffer;
}
if (mask)
- _swrast_Clear( ctx, mask, all, x, y, width, height );
+ _swrast_Clear( ctx, mask );
}
void __write_rgba_span15( const GLcontext *ctx, GLuint n, GLint x, GLint y,
-/* $Id: svgamesa16.c,v 1.11 2002/11/11 18:42:40 brianp Exp $ */
+/* $Id: svgamesa16.c,v 1.11.36.1 2006/11/02 12:02:17 alanh Exp $ */
/*
* Mesa 3-D graphics library
/* SVGAMesa->clear_hicolor=(red)<<11 | (green)<<5 | (blue); */
}
-void __clear16( GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint x, GLint y, GLint width, GLint height )
+void __clear16( GLcontext *ctx, GLbitfield mask )
{
int i,j;
+ int x = ctx->DrawBuffer->_Xmin;
+ int y = ctx->DrawBuffer->_Ymin;
+ int width = ctx->DrawBuffer->_Xmax - x;
+ int height = ctx->DrawBuffer->_Ymax - y;
+ GLboolean all = (width == ctx->DrawBuffer->Width && height == ctx->DrawBuffer->height)
if (mask & DD_FRONT_LEFT_BIT) {
if (all) {
}
if (mask)
- _swrast_Clear( ctx, mask, all, x, y, width, height );
+ _swrast_Clear( ctx, mask );
}
void __write_rgba_span16( const GLcontext *ctx, GLuint n, GLint x, GLint y,
-/* $Id: svgamesa24.c,v 1.12 2002/11/11 18:42:41 brianp Exp $ */
+/* $Id: svgamesa24.c,v 1.12.36.1 2006/11/02 12:02:17 alanh Exp $ */
/*
* Mesa 3-D graphics library
/* SVGAMesa->clear_truecolor = red<<16 | green<<8 | blue; */
}
-void __clear24( GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint x, GLint y, GLint width, GLint height )
+void __clear24( GLcontext *ctx, GLbitfield mask )
{
int i,j;
+ int x = ctx->DrawBuffer->_Xmin;
+ int y = ctx->DrawBuffer->_Ymin;
+ int width = ctx->DrawBuffer->_Xmax - x;
+ int height = ctx->DrawBuffer->_Ymax - y;
+ GLboolean all = (width == ctx->DrawBuffer->Width && height == ctx->DrawBuffer->height)
if (mask & DD_FRONT_LEFT_BIT) {
if (all) {
}
if (mask)
- _swrast_Clear( ctx, mask, all, x, y, width, height );
+ _swrast_Clear( ctx, mask );
}
void __write_rgba_span24( const GLcontext *ctx, GLuint n, GLint x, GLint y,
-/* $Id: svgamesa32.c,v 1.12 2002/11/11 18:42:42 brianp Exp $ */
+/* $Id: svgamesa32.c,v 1.12.36.1 2006/11/02 12:02:17 alanh Exp $ */
/*
* Mesa 3-D graphics library
SVGAMesa->clear_truecolor = (col[0] << 16) | (col[1] << 8) | col[2];
}
-void __clear32( GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint x, GLint y, GLint width, GLint height )
+void __clear32( GLcontext *ctx, GLbitfield mask )
{
int i,j;
+ int x = ctx->DrawBuffer->_Xmin;
+ int y = ctx->DrawBuffer->_Ymin;
+ int width = ctx->DrawBuffer->_Xmax - x;
+ int height = ctx->DrawBuffer->_Ymax - y;
+ GLboolean all = (width == ctx->DrawBuffer->Width && height == ctx->DrawBuffer->height)
if (mask & DD_FRONT_LEFT_BIT) {
if (all) {
}
if (mask)
- _swrast_Clear( ctx, mask, all, x, y, width, height );
+ _swrast_Clear( ctx, mask );
}
void __write_rgba_span32( const GLcontext *ctx, GLuint n, GLint x, GLint y,
-/* $Id: svgamesa8.c,v 1.9 2005/05/04 20:11:39 brianp Exp $ */
+/* $Id: svgamesa8.c,v 1.9.10.1 2006/11/02 12:02:17 alanh Exp $ */
/*
* Mesa 3-D graphics library
SVGAMesa->clear_index = index;
}
-void __clear8( GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint x, GLint y, GLint width, GLint height )
+void __clear8( GLcontext *ctx, GLbitfield mask )
{
int i,j;
+ int x = ctx->DrawBuffer->_Xmin;
+ int y = ctx->DrawBuffer->_Ymin;
+ int width = ctx->DrawBuffer->_Xmax - x;
+ int height = ctx->DrawBuffer->_Ymax - y;
+ GLboolean all = (width == ctx->DrawBuffer->Width && height == ctx->DrawBuffer->height)
if (mask & DD_FRONT_LEFT_BIT) {
if (all) {
}
if (mask)
- _swrast_Clear( ctx, mask, all, x, y, width, height );
+ _swrast_Clear( ctx, mask );
}
void __write_ci32_span8( const GLcontext *ctx, struct gl_renderbuffer *rb,
* Clearing of the other non-color buffers is left to the swrast.
*/
-static void clear(GLcontext *ctx,
- GLbitfield mask,
- GLboolean all,
- GLint xFoo, GLint yFoo,
- GLint widthFoo, GLint heightFoo)
+static void clear(GLcontext *ctx, GLbitfield mask)
{
#define FLIP(Y) (ctx->DrawBuffer->Height - (Y) - 1)
const GLint x = ctx->DrawBuffer->_Xmin;
ctx->Color.ColorMask[1] != 0xff ||
ctx->Color.ColorMask[2] != 0xff ||
ctx->Color.ColorMask[3] != 0xff) {
- _swrast_Clear(ctx, mask, all, x, y, width, height);
+ _swrast_Clear(ctx, mask);
return;
}
/* Try for a fast clear - clearing entire buffer with a single
* byte value. */
- if (all) { /* entire buffer */
+ if (width == ctx->DrawBuffer->Width &&
+ height == ctx->DrawBuffer->Height) { /* entire buffer */
/* Now check for an easy clear value */
switch (bytesPerPixel) {
case 1:
/* Call swrast if there is anything left to clear (like DEPTH) */
if (mask)
- _swrast_Clear(ctx, mask, all, x, y, width, height);
+ _swrast_Clear(ctx, mask);
#undef FLIP
}
static void
-clear_buffers( GLcontext *ctx, GLbitfield mask,
- GLboolean all, GLint xFoo, GLint yFoo,
- GLint widthFoo, GLint heightFoo )
+clear_buffers(GLcontext *ctx, GLbitfield buffers)
{
if (ctx->DrawBuffer->Name == 0) {
/* this is a window system framebuffer */
/* we can't handle color or index masking */
if (*colorMask == 0xffffffff && ctx->Color.IndexMask == 0xffffffff) {
- if (mask & BUFFER_BIT_FRONT_LEFT) {
+ if (buffers & BUFFER_BIT_FRONT_LEFT) {
/* clear front color buffer */
struct gl_renderbuffer *frontRb
= ctx->DrawBuffer->Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
if (b->frontxrb == xmesa_renderbuffer(frontRb)) {
/* renderbuffer is not wrapped - great! */
b->frontxrb->clearFunc(ctx, b->frontxrb, x, y, width, height);
- mask &= ~BUFFER_BIT_FRONT_LEFT;
+ buffers &= ~BUFFER_BIT_FRONT_LEFT;
}
else {
/* we can't directly clear an alpha-wrapped color buffer */
}
}
- if (mask & BUFFER_BIT_BACK_LEFT) {
+ if (buffers & BUFFER_BIT_BACK_LEFT) {
/* clear back color buffer */
struct gl_renderbuffer *backRb
= ctx->DrawBuffer->Attachment[BUFFER_BACK_LEFT].Renderbuffer;
if (b->backxrb == xmesa_renderbuffer(backRb)) {
/* renderbuffer is not wrapped - great! */
b->backxrb->clearFunc(ctx, b->backxrb, x, y, width, height);
- mask &= ~BUFFER_BIT_BACK_LEFT;
+ buffers &= ~BUFFER_BIT_BACK_LEFT;
}
}
}
}
- if (mask)
- _swrast_Clear( ctx, mask, 0, 0, 0, 0, 0);
+ if (buffers)
+ _swrast_Clear(ctx, buffers);
}
if (mask & GL_TEXTURE_BIT) {
struct gl_texture_attrib *attr;
GLuint u;
+
+ _mesa_lock_context_textures(ctx);
/* Bump the texture object reference counts so that they don't
* inadvertantly get deleted.
*/
_mesa_copy_texture_object(&attr->Unit[u].SavedRect,
attr->Unit[u].CurrentRect);
}
+
+ _mesa_unlock_context_textures(ctx);
+
newnode = new_attrib_node( GL_TEXTURE_BIT );
newnode->data = attr;
newnode->next = head;
}
if (ctx->RenderMode == GL_RENDER) {
- const GLint x = ctx->DrawBuffer->_Xmin;
- const GLint y = ctx->DrawBuffer->_Ymin;
- const GLint height = ctx->DrawBuffer->_Ymax - ctx->DrawBuffer->_Ymin;
- const GLint width = ctx->DrawBuffer->_Xmax - ctx->DrawBuffer->_Xmin;
GLbitfield bufferMask;
/* don't clear depth buffer if depth writing disabled */
}
ASSERT(ctx->Driver.Clear);
- ctx->Driver.Clear( ctx, bufferMask, (GLboolean) !ctx->Scissor.Enabled,
- x, y, width, height );
+ ctx->Driver.Clear(ctx, bufferMask);
}
}
ss->DefaultCubeMap->RefCount += MAX_TEXTURE_IMAGE_UNITS;
ss->DefaultRect->RefCount += MAX_TEXTURE_IMAGE_UNITS;
+ _glthread_INIT_MUTEX(ss->TexMutex);
+ ss->TextureStateStamp = 0;
+
+
#if FEATURE_EXT_framebuffer_object
ss->FrameBuffers = _mesa_NewHashTable();
if (!ss->FrameBuffers)
ctx->Const.VertexProgram.MaxLocalParams = MAX_PROGRAM_LOCAL_PARAMS;
ctx->Const.VertexProgram.MaxEnvParams = MAX_NV_VERTEX_PROGRAM_PARAMS;
ctx->Const.VertexProgram.MaxAddressRegs = MAX_VERTEX_PROGRAM_ADDRESS_REGS;
+ ctx->Const.VertexProgram.MaxUniformComponents = MAX_VERTEX_UNIFORM_COMPONENTS;
init_natives(&ctx->Const.VertexProgram);
#endif
#if FEATURE_ARB_fragment_program
ctx->Const.FragmentProgram.MaxLocalParams = MAX_PROGRAM_LOCAL_PARAMS;
ctx->Const.FragmentProgram.MaxEnvParams = MAX_NV_FRAGMENT_PROGRAM_PARAMS;
ctx->Const.FragmentProgram.MaxAddressRegs = MAX_FRAGMENT_PROGRAM_ADDRESS_REGS;
+ ctx->Const.FragmentProgram.MaxUniformComponents = MAX_FRAGMENT_UNIFORM_COMPONENTS;
init_natives(&ctx->Const.FragmentProgram);
#endif
ctx->Const.MaxProgramMatrices = MAX_PROGRAM_MATRICES;
ctx->Const.MaxRenderbufferSize = MAX_WIDTH;
#endif
+#if FEATURE_ARB_vertex_shader
+ ctx->Const.MaxVertexTextureImageUnits = MAX_VERTEX_TEXTURE_IMAGE_UNITS;
+ ctx->Const.MaxVaryingFloats = MAX_VARYING_FLOATS;
+#endif
+
/* sanity checks */
ASSERT(ctx->Const.MaxTextureUnits == MIN2(ctx->Const.MaxTextureImageUnits,
ctx->Const.MaxTextureCoordUnits));
/**
* Clear the color/depth/stencil/accum buffer(s).
- *
- * \param mask a bitmask of the DD_*_BIT values defined above that indicates
- * which buffers need to be cleared.
- * \param all if true then clear the whole buffer, else clear only the
- * region defined by <tt>(x, y, width, height)</tt>.
- *
- * This function must obey the glColorMask(), glIndexMask() and
- * glStencilMask() settings!
- * Software Mesa can do masked clears if the device driver can't.
+ * \param buffers a bitmask of BUFFER_BIT_* flags indicating which
+ * renderbuffers need to be cleared.
*/
- void (*Clear)( GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint x, GLint y, GLint width, GLint height );
-
+ void (*Clear)( GLcontext *ctx, GLbitfield buffers );
- /**
- * \name For hardware accumulation buffer
- */
- /*@{*/
/**
* Execute glAccum command.
*/
void (*Accum)( GLcontext *ctx, GLenum op, GLfloat value );
- /*@}*/
/**
- * \name glDraw(), glRead(), glCopyPixels() and glBitmap() functions
+ * \name Image-related functions
*/
/*@{*/
/**
- * This is called by glDrawPixels().
- *
+ * Called by glDrawPixels().
* \p unpack describes how to unpack the source image data.
*/
void (*DrawPixels)( GLcontext *ctx,
GLvoid *dest );
/**
- * Do a glCopyPixels().
- *
- * This function must respect all rasterization state, glPixelTransfer(),
- * glPixelZoom(), etc.
+ * Called by glCopyPixels().
*/
void (*CopyPixels)( GLcontext *ctx, GLint srcx, GLint srcy,
GLsizei width, GLsizei height,
GLint dstx, GLint dsty, GLenum type );
/**
- * This is called by glBitmap().
- *
- * Works the same as dd_function_table::DrawPixels, above.
+ * Called by glBitmap().
*/
void (*Bitmap)( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
{ OFF, "GL_ATI_texture_env_combine3", F(ATI_texture_env_combine3)},
{ OFF, "GL_ATI_texture_mirror_once", F(ATI_texture_mirror_once)},
{ OFF, "GL_ATI_fragment_shader", F(ATI_fragment_shader)},
+ { OFF, "GL_ATI_separate_stencil", F(ATI_separate_stencil)},
{ OFF, "GL_IBM_multimode_draw_arrays", F(IBM_multimode_draw_arrays) },
{ ON, "GL_IBM_rasterpos_clip", F(IBM_rasterpos_clip) },
{ OFF, "GL_IBM_texture_mirrored_repeat", F(ARB_texture_mirrored_repeat)},
#endif
ctx->Extensions.ATI_texture_env_combine3 = GL_TRUE;
ctx->Extensions.ATI_texture_mirror_once = GL_TRUE;
+ ctx->Extensions.ATI_separate_stencil = GL_TRUE;
ctx->Extensions.EXT_blend_color = GL_TRUE;
ctx->Extensions.EXT_blend_equation_separate = GL_TRUE;
ctx->Extensions.EXT_blend_func_separate = GL_TRUE;
ctx->Extensions.EXT_secondary_color = GL_TRUE;
ctx->Extensions.EXT_shared_texture_palette = GL_TRUE;
ctx->Extensions.EXT_stencil_wrap = GL_TRUE;
- ctx->Extensions.EXT_stencil_two_side = GL_TRUE;
+ ctx->Extensions.EXT_stencil_two_side = GL_FALSE; /* obsolete */
ctx->Extensions.EXT_texture_env_add = GL_TRUE;
ctx->Extensions.EXT_texture_env_combine = GL_TRUE;
ctx->Extensions.EXT_texture_env_dot3 = GL_TRUE;
#if FEATURE_ARB_shading_language_100
ctx->Extensions.ARB_shading_language_100 = GL_TRUE;
#endif
- ctx->Extensions.EXT_stencil_two_side = GL_FALSE; /* yes, turn it off */
+ ctx->Extensions.ATI_separate_stencil = GL_TRUE;
+ ctx->Extensions.EXT_stencil_two_side = GL_FALSE; /* obsolete */
#if FEATURE_ARB_vertex_shader
ctx->Extensions.ARB_vertex_shader = GL_TRUE;
#endif
}
FLUSH_VERTICES(ctx, _NEW_BUFFERS);
-
+ if (ctx->Driver.Flush) {
+ ctx->Driver.Flush(ctx);
+ }
if (framebuffer) {
/* Binding a user-created framebuffer object */
newFb = _mesa_lookup_framebuffer(ctx, framebuffer);
texObj = _mesa_select_tex_object(ctx, texUnit, target);
/* XXX this might not handle cube maps correctly */
+ _mesa_lock_texture(ctx, texObj);
_mesa_generate_mipmap(ctx, target, texUnit, texObj);
+ _mesa_unlock_texture(ctx, texObj);
}
break;
case GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB:
CHECK_EXT1(ARB_fragment_shader, "GetBooleanv");
- params[0] = INT_TO_BOOLEAN(MAX_FRAGMENT_UNIFORM_COMPONENTS);
+ params[0] = INT_TO_BOOLEAN(ctx->Const.FragmentProgram.MaxUniformComponents);
break;
case GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB:
CHECK_EXT1(ARB_fragment_shader, "GetBooleanv");
break;
case GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB:
CHECK_EXT1(ARB_vertex_shader, "GetBooleanv");
- params[0] = INT_TO_BOOLEAN(MAX_VERTEX_UNIFORM_COMPONENTS);
+ params[0] = INT_TO_BOOLEAN(ctx->Const.VertexProgram.MaxUniformComponents);
break;
case GL_MAX_VARYING_FLOATS_ARB:
CHECK_EXT1(ARB_vertex_shader, "GetBooleanv");
- params[0] = INT_TO_BOOLEAN(MAX_VARYING_FLOATS);
+ params[0] = INT_TO_BOOLEAN(ctx->Const.MaxVaryingFloats);
break;
case GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB:
CHECK_EXT1(ARB_vertex_shader, "GetBooleanv");
- params[0] = INT_TO_BOOLEAN(MAX_VERTEX_TEXTURE_IMAGE_UNITS);
+ params[0] = INT_TO_BOOLEAN(ctx->Const.MaxVertexTextureImageUnits);
break;
case GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB:
CHECK_EXT1(ARB_vertex_shader, "GetBooleanv");
break;
case GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB:
CHECK_EXT1(ARB_fragment_shader, "GetFloatv");
- params[0] = (GLfloat)(MAX_FRAGMENT_UNIFORM_COMPONENTS);
+ params[0] = (GLfloat)(ctx->Const.FragmentProgram.MaxUniformComponents);
break;
case GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB:
CHECK_EXT1(ARB_fragment_shader, "GetFloatv");
break;
case GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB:
CHECK_EXT1(ARB_vertex_shader, "GetFloatv");
- params[0] = (GLfloat)(MAX_VERTEX_UNIFORM_COMPONENTS);
+ params[0] = (GLfloat)(ctx->Const.VertexProgram.MaxUniformComponents);
break;
case GL_MAX_VARYING_FLOATS_ARB:
CHECK_EXT1(ARB_vertex_shader, "GetFloatv");
- params[0] = (GLfloat)(MAX_VARYING_FLOATS);
+ params[0] = (GLfloat)(ctx->Const.MaxVaryingFloats);
break;
case GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB:
CHECK_EXT1(ARB_vertex_shader, "GetFloatv");
- params[0] = (GLfloat)(MAX_VERTEX_TEXTURE_IMAGE_UNITS);
+ params[0] = (GLfloat)(ctx->Const.MaxVertexTextureImageUnits);
break;
case GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB:
CHECK_EXT1(ARB_vertex_shader, "GetFloatv");
break;
case GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB:
CHECK_EXT1(ARB_fragment_shader, "GetIntegerv");
- params[0] = MAX_FRAGMENT_UNIFORM_COMPONENTS;
+ params[0] = ctx->Const.FragmentProgram.MaxUniformComponents;
break;
case GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB:
CHECK_EXT1(ARB_fragment_shader, "GetIntegerv");
break;
case GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB:
CHECK_EXT1(ARB_vertex_shader, "GetIntegerv");
- params[0] = MAX_VERTEX_UNIFORM_COMPONENTS;
+ params[0] = ctx->Const.VertexProgram.MaxUniformComponents;
break;
case GL_MAX_VARYING_FLOATS_ARB:
CHECK_EXT1(ARB_vertex_shader, "GetIntegerv");
- params[0] = MAX_VARYING_FLOATS;
+ params[0] = ctx->Const.MaxVaryingFloats;
break;
case GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB:
CHECK_EXT1(ARB_vertex_shader, "GetIntegerv");
- params[0] = MAX_VERTEX_TEXTURE_IMAGE_UNITS;
+ params[0] = ctx->Const.MaxVertexTextureImageUnits;
break;
case GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB:
CHECK_EXT1(ARB_vertex_shader, "GetIntegerv");
# GL_ARB_fragment_shader
( "GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB", GLint,
- ["MAX_FRAGMENT_UNIFORM_COMPONENTS"], "", ["ARB_fragment_shader"] ),
+ ["ctx->Const.FragmentProgram.MaxUniformComponents"], "",
+ ["ARB_fragment_shader"] ),
( "GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB", GLenum,
["ctx->Hint.FragmentShaderDerivative"], "", ["ARB_fragment_shader"] ),
# GL_ARB_vertex_shader
( "GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB", GLint,
- ["MAX_VERTEX_UNIFORM_COMPONENTS"], "", ["ARB_vertex_shader"] ),
+ ["ctx->Const.VertexProgram.MaxUniformComponents"], "",
+ ["ARB_vertex_shader"] ),
( "GL_MAX_VARYING_FLOATS_ARB", GLint,
- ["MAX_VARYING_FLOATS"], "", ["ARB_vertex_shader"] ),
+ ["ctx->Const.MaxVaryingFloats"], "", ["ARB_vertex_shader"] ),
( "GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB", GLint,
- ["MAX_VERTEX_TEXTURE_IMAGE_UNITS"], "", ["ARB_vertex_shader"] ),
+ ["ctx->Const.MaxVertexTextureImageUnits"], "", ["ARB_vertex_shader"] ),
( "GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB", GLint,
["MAX_COMBINED_TEXTURE_IMAGE_UNITS"], "", ["ARB_vertex_shader"] )
]
static const char *version_1_3 = "1.3 Mesa " MESA_VERSION_STRING;
static const char *version_1_4 = "1.4 Mesa " MESA_VERSION_STRING;
static const char *version_1_5 = "1.5 Mesa " MESA_VERSION_STRING;
- static const char *version_2_0 = "1.5 Mesa " MESA_VERSION_STRING;/*XXX FIX*/
+ static const char *version_2_0 = "1.5 Mesa " MESA_VERSION_STRING;
#if FEATURE_ARB_shading_language_100
static const char *sl_version_110 = "1.10 Mesa " MESA_VERSION_STRING;
if (ctx->Extensions.ARB_draw_buffers &&
ctx->Extensions.ARB_point_sprite &&
ctx->Extensions.ARB_texture_non_power_of_two &&
- ctx->Extensions.EXT_stencil_two_side) {
+ ctx->Extensions.ATI_separate_stencil) {
return (const GLubyte *) version_2_0;
}
else {
GLint border, bytesPerTexel;
/* get src image parameters */
- srcImage = _mesa_select_tex_image(ctx, texUnit, target, level);
+ srcImage = _mesa_select_tex_image(ctx, texObj, target, level);
ASSERT(srcImage);
srcWidth = srcImage->Width;
srcHeight = srcImage->Height;
}
/* get dest gl_texture_image */
- dstImage = _mesa_get_tex_image(ctx, texUnit, target, level + 1);
+ dstImage = _mesa_get_tex_image(ctx, texObj, target, level + 1);
if (!dstImage) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "generating mipmaps");
return;
};
-/**
- * Virtual vertex program machine state.
- * Only used during program execution (may be moved someday):
- */
-struct gl_vertex_program_machine
-{
- GLfloat Temporaries[MAX_NV_VERTEX_PROGRAM_TEMPS][4];
- GLfloat Inputs[MAX_NV_VERTEX_PROGRAM_INPUTS][4];
- GLuint InputsSize[MAX_NV_VERTEX_PROGRAM_INPUTS];
- GLfloat Outputs[MAX_NV_VERTEX_PROGRAM_OUTPUTS][4];
- GLint AddressReg[4];
-};
-
-
/**
* Context state for vertex programs.
*/
GLfloat Parameters[MAX_NV_VERTEX_PROGRAM_PARAMS][4]; /**< Env params */
- struct gl_vertex_program_machine Machine;
-
/* For GL_NV_vertex_program only: */
GLenum TrackMatrix[MAX_NV_VERTEX_PROGRAM_PARAMS / 4];
GLenum TrackMatrixTransform[MAX_NV_VERTEX_PROGRAM_PARAMS / 4];
struct gl_texture_object *DefaultRect;
/*@}*/
+ /**
+ * \name Thread safety and statechange notification for texture
+ * objects.
+ *
+ * \todo Improve the granularity of locking.
+ */
+ /*@{*/
+ _glthread_Mutex TexMutex; /**< texobj thread safety */
+ GLuint TextureStateStamp; /**< state notification for shared tex */
+ /*@}*/
+
+
+
/**
* \name Vertex/fragment programs
*/
GLuint MaxNativeTemps;
GLuint MaxNativeAddressRegs; /* vertex program only, for now */
GLuint MaxNativeParameters;
+ /* For shaders */
+ GLuint MaxUniformComponents;
};
/* GL_EXT_framebuffer_object */
GLuint MaxColorAttachments;
GLuint MaxRenderbufferSize;
+ /* GL_ARB_vertex_shader */
+ GLuint MaxVertexTextureImageUnits;
+ GLuint MaxVaryingFloats;
};
GLboolean ATI_texture_mirror_once;
GLboolean ATI_texture_env_combine3;
GLboolean ATI_fragment_shader;
+ GLboolean ATI_separate_stencil;
GLboolean IBM_rasterpos_clip;
GLboolean IBM_multimode_draw_arrays;
GLboolean MESA_pack_invert;
GLboolean _ForceEyeCoords;
GLenum _CurrentProgram; /* currently executing program */
+ GLuint TextureStateTimestamp; /* detect changes to shared state */
+
struct gl_shine_tab *_ShineTable[2]; /**< Active shine tables */
struct gl_shine_tab *_ShineTabList; /**< MRU list of inactive shine tables */
/**@}*/
* _mesa_update_lighting() and _mesa_update_tnl_spaces().
*/
void
-_mesa_update_state( GLcontext *ctx )
+_mesa_update_state_locked( GLcontext *ctx )
{
GLbitfield new_state = ctx->NewState;
ctx->Array.NewState = 0;
}
+
+/* This is the usual entrypoint for state updates:
+ */
+void
+_mesa_update_state( GLcontext *ctx )
+{
+ _mesa_lock_context_textures(ctx);
+ _mesa_update_state_locked(ctx);
+ _mesa_unlock_context_textures(ctx);
+}
+
+
+
/*@}*/
extern void
_mesa_update_state( GLcontext *ctx );
+/* As above but can only be called between _mesa_lock_context_textures() and
+ * _mesa_unlock_context_textures().
+ */
+extern void
+_mesa_update_state_locked( GLcontext *ctx );
+
#endif
ref = CLAMP( ref, 0, stencilMax );
- if (ctx->Extensions.EXT_stencil_two_side) {
- /* only set active face state */
- const GLint face = ctx->Stencil.ActiveFace;
- if (ctx->Stencil.Function[face] == func &&
- ctx->Stencil.ValueMask[face] == mask &&
- ctx->Stencil.Ref[face] == ref)
- return;
- FLUSH_VERTICES(ctx, _NEW_STENCIL);
- ctx->Stencil.Function[face] = func;
- ctx->Stencil.Ref[face] = ref;
- ctx->Stencil.ValueMask[face] = mask;
- if (ctx->Driver.StencilFuncSeparate) {
- ctx->Driver.StencilFuncSeparate(ctx, face ? GL_BACK : GL_FRONT,
- func, ref, mask);
- }
- }
- else {
+ if (ctx->Extensions.ATI_separate_stencil) {
/* set both front and back state */
if (ctx->Stencil.Function[0] == func &&
ctx->Stencil.Function[1] == func &&
func, ref, mask);
}
}
+ else {
+ /* only set active face state */
+ const GLint face = ctx->Stencil.ActiveFace;
+ if (ctx->Stencil.Function[face] == func &&
+ ctx->Stencil.ValueMask[face] == mask &&
+ ctx->Stencil.Ref[face] == ref)
+ return;
+ FLUSH_VERTICES(ctx, _NEW_STENCIL);
+ ctx->Stencil.Function[face] = func;
+ ctx->Stencil.Ref[face] = ref;
+ ctx->Stencil.ValueMask[face] = mask;
+ if (ctx->Driver.StencilFuncSeparate) {
+ ctx->Driver.StencilFuncSeparate(ctx, face ? GL_BACK : GL_FRONT,
+ func, ref, mask);
+ }
+ }
}
GET_CURRENT_CONTEXT(ctx);
ASSERT_OUTSIDE_BEGIN_END(ctx);
- if (ctx->Extensions.EXT_stencil_two_side) {
- /* only set active face state */
- const GLint face = ctx->Stencil.ActiveFace;
- if (ctx->Stencil.WriteMask[face] == mask)
+ if (ctx->Extensions.ATI_separate_stencil) {
+ /* set both front and back state */
+ if (ctx->Stencil.WriteMask[0] == mask &&
+ ctx->Stencil.WriteMask[1] == mask)
return;
FLUSH_VERTICES(ctx, _NEW_STENCIL);
- ctx->Stencil.WriteMask[face] = mask;
+ ctx->Stencil.WriteMask[0] = ctx->Stencil.WriteMask[1] = mask;
if (ctx->Driver.StencilMaskSeparate) {
- ctx->Driver.StencilMaskSeparate(ctx, face ? GL_BACK : GL_FRONT, mask);
+ ctx->Driver.StencilMaskSeparate(ctx, GL_FRONT_AND_BACK, mask);
}
}
else {
- /* set both front and back state */
- if (ctx->Stencil.WriteMask[0] == mask &&
- ctx->Stencil.WriteMask[1] == mask)
+ /* only set active face state */
+ const GLint face = ctx->Stencil.ActiveFace;
+ if (ctx->Stencil.WriteMask[face] == mask)
return;
FLUSH_VERTICES(ctx, _NEW_STENCIL);
- ctx->Stencil.WriteMask[0] = ctx->Stencil.WriteMask[1] = mask;
+ ctx->Stencil.WriteMask[face] = mask;
if (ctx->Driver.StencilMaskSeparate) {
- ctx->Driver.StencilMaskSeparate(ctx, GL_FRONT_AND_BACK, mask);
+ ctx->Driver.StencilMaskSeparate(ctx, face ? GL_BACK : GL_FRONT, mask);
}
}
}
return;
}
- if (ctx->Extensions.EXT_stencil_two_side) {
- /* only set active face state */
- const GLint face = ctx->Stencil.ActiveFace;
- if (ctx->Stencil.ZFailFunc[face] == zfail &&
- ctx->Stencil.ZPassFunc[face] == zpass &&
- ctx->Stencil.FailFunc[face] == fail)
- return;
- FLUSH_VERTICES(ctx, _NEW_STENCIL);
- ctx->Stencil.ZFailFunc[face] = zfail;
- ctx->Stencil.ZPassFunc[face] = zpass;
- ctx->Stencil.FailFunc[face] = fail;
- if (ctx->Driver.StencilOpSeparate) {
- ctx->Driver.StencilOpSeparate(ctx, face ? GL_BACK : GL_FRONT,
- fail, zfail, zpass);
- }
- }
- else {
+ if (ctx->Extensions.ATI_separate_stencil) {
/* set both front and back state */
if (ctx->Stencil.ZFailFunc[0] == zfail &&
ctx->Stencil.ZFailFunc[1] == zfail &&
fail, zfail, zpass);
}
}
+ else {
+ /* only set active face state */
+ const GLint face = ctx->Stencil.ActiveFace;
+ if (ctx->Stencil.ZFailFunc[face] == zfail &&
+ ctx->Stencil.ZPassFunc[face] == zpass &&
+ ctx->Stencil.FailFunc[face] == fail)
+ return;
+ FLUSH_VERTICES(ctx, _NEW_STENCIL);
+ ctx->Stencil.ZFailFunc[face] = zfail;
+ ctx->Stencil.ZPassFunc[face] = zpass;
+ ctx->Stencil.FailFunc[face] = fail;
+ if (ctx->Driver.StencilOpSeparate) {
+ ctx->Driver.StencilOpSeparate(ctx, face ? GL_BACK : GL_FRONT,
+ fail, zfail, zpass);
+ }
+ }
}
FLUSH_VERTICES(ctx, _NEW_STENCIL);
- if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+ if (face != GL_BACK) {
ctx->Stencil.FailFunc[0] = fail;
ctx->Stencil.ZFailFunc[0] = zfail;
ctx->Stencil.ZPassFunc[0] = zpass;
}
- if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+ if (face != GL_FRONT) {
ctx->Stencil.FailFunc[1] = fail;
ctx->Stencil.ZFailFunc[1] = zfail;
ctx->Stencil.ZPassFunc[1] = zpass;
}
-
if (ctx->Driver.StencilOpSeparate) {
ctx->Driver.StencilOpSeparate(ctx, face, fail, zfail, zpass);
}
ctx->Stencil.Ref[1] = ref;
ctx->Stencil.ValueMask[1] = mask;
}
-
if (ctx->Driver.StencilFuncSeparate) {
ctx->Driver.StencilFuncSeparate(ctx, face, func, ref, mask);
}
FLUSH_VERTICES(ctx, _NEW_STENCIL);
- if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+ if (face != GL_BACK) {
ctx->Stencil.WriteMask[0] = mask;
}
- if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+ if (face != GL_FRONT) {
ctx->Stencil.WriteMask[1] = mask;
}
-
if (ctx->Driver.StencilMaskSeparate) {
ctx->Driver.StencilMaskSeparate(ctx, face, mask);
}
/*
* Mesa 3-D graphics library
- * Version: 6.5.1
+ * Version: 6.5.2
*
* Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
*
#ifdef __MINGW32__
#define DXTN_LIBNAME "dxtn.dll"
+#define RTLD_LAZY 0
+#define RTLD_GLOBAL 0
#elif defined(__DJGPP__)
#define DXTN_LIBNAME "dxtn.dxe"
#else
{
#if USE_EXTERNAL_DXTN_LIB
#ifdef __MINGW32__
- return (GenericFunc) GetProcAddress(handle, fname)
+ return (GenericFunc) GetProcAddress(handle, fname);
#elif defined(__DJGPP__)
/* need '_' prefix on symbol names */
char fname2[1000];
#include "shader/program.h"
#include "shader/program_instruction.h"
-#define MAX_INSTRUCTIONS 100
+/**
+ * According to Glean's texCombine test, no more than 21 instructions
+ * are needed. Allow a few extra just in case.
+ */
+#define MAX_INSTRUCTIONS 24
#define DISASSEM (MESA_VERBOSE & VERBOSE_DISASSEM)
GLuint nr = p->program->Base.NumInstructions++;
struct prog_instruction *inst = &p->program->Base.Instructions[nr];
- _mesa_init_instruction(inst);
+ _mesa_init_instructions(inst, 1);
inst->Opcode = op;
emit_arg( &inst->SrcReg[0], src0 );
* current texture env/combine mode.
*/
static void
-create_new_program(struct state_key *key, GLcontext *ctx,
+create_new_program(GLcontext *ctx, struct state_key *key,
struct gl_fragment_program *program)
{
+ struct prog_instruction instBuffer[MAX_INSTRUCTIONS];
struct texenv_fragment_program p;
GLuint unit;
struct ureg cf, out;
p.state = key;
p.program = program;
- p.program->Base.Instructions =
- (struct prog_instruction*) _mesa_malloc(sizeof(struct prog_instruction) * MAX_INSTRUCTIONS);
- p.program->Base.NumInstructions = 0;
+ /* During code generation, use locally-allocated instruction buffer,
+ * then alloc dynamic storage below.
+ */
+ p.program->Base.Instructions = instBuffer;
p.program->Base.Target = GL_FRAGMENT_PROGRAM_ARB;
p.program->NumTexIndirections = 1; /* correct? */
p.program->NumTexInstructions = 0;
p.program->NumAluInstructions = 0;
p.program->Base.String = 0;
p.program->Base.NumInstructions =
- p.program->Base.NumTemporaries =
- p.program->Base.NumParameters =
- p.program->Base.NumAttributes = p.program->Base.NumAddressRegs = 0;
+ p.program->Base.NumTemporaries =
+ p.program->Base.NumParameters =
+ p.program->Base.NumAttributes = p.program->Base.NumAddressRegs = 0;
p.program->Base.Parameters = _mesa_new_parameter_list();
p.program->Base.InputsRead = 0;
ASSERT(p.program->Base.NumInstructions <= MAX_INSTRUCTIONS);
+ /* Allocate final instruction array */
+ program->Base.Instructions
+ = _mesa_alloc_instructions(program->Base.NumInstructions);
+ if (!program->Base.Instructions) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY,
+ "generating tex env program");
+ return;
+ }
+ _mesa_memcpy(program->Base.Instructions, instBuffer,
+ sizeof(struct prog_instruction)
+ * program->Base.NumInstructions);
+
/* Notify driver the fragment program has (actually) changed.
*/
- if (ctx->Driver.ProgramStringNotify || DISASSEM) {
- if (ctx->Driver.ProgramStringNotify)
- ctx->Driver.ProgramStringNotify( ctx, GL_FRAGMENT_PROGRAM_ARB,
- &p.program->Base );
-
- if (DISASSEM) {
- _mesa_print_program(&p.program->Base);
- _mesa_printf("\n");
- }
+ if (ctx->Driver.ProgramStringNotify) {
+ ctx->Driver.ProgramStringNotify( ctx, GL_FRAGMENT_PROGRAM_ARB,
+ &p.program->Base );
+ }
+
+ if (DISASSEM) {
+ _mesa_print_program(&p.program->Base);
+ _mesa_printf("\n");
}
}
(struct gl_fragment_program *)
ctx->Driver.NewProgram(ctx, GL_FRAGMENT_PROGRAM_ARB, 0);
- create_new_program(&key, ctx, ctx->_TexEnvProgram);
+ create_new_program(ctx, &key, ctx->_TexEnvProgram);
cache_item(&ctx->Texture.env_fp_cache, hash, &key, ctx->_TexEnvProgram);
} else {
void
_mesa_delete_texture_image( GLcontext *ctx, struct gl_texture_image *texImage )
{
- if (texImage->Data) {
- ctx->Driver.FreeTexImageData( ctx, texImage );
- }
+ /* Free texImage->Data and/or any other driver-specific texture
+ * image storage.
+ */
+ ASSERT(ctx->Driver.FreeTexImageData);
+ ctx->Driver.FreeTexImageData( ctx, texImage );
+
ASSERT(texImage->Data == NULL);
if (texImage->ImageOffsets)
_mesa_free(texImage->ImageOffsets);
* \sa gl_texture_unit.
*/
struct gl_texture_image *
-_mesa_select_tex_image(GLcontext *ctx, const struct gl_texture_unit *texUnit,
- GLenum target, GLint level)
+_mesa_select_tex_image(GLcontext *ctx, const struct gl_texture_object *texObj,
+ GLenum target, GLint level)
{
- ASSERT(texUnit);
- ASSERT(level < MAX_TEXTURE_LEVELS);
+ ASSERT(texObj);
+
+ if (level < 0 || level >= MAX_TEXTURE_LEVELS)
+ return NULL;
+
switch (target) {
case GL_TEXTURE_1D:
- return texUnit->Current1D->Image[0][level];
case GL_PROXY_TEXTURE_1D:
- return ctx->Texture.Proxy1D->Image[0][level];
case GL_TEXTURE_2D:
- return texUnit->Current2D->Image[0][level];
case GL_PROXY_TEXTURE_2D:
- return ctx->Texture.Proxy2D->Image[0][level];
case GL_TEXTURE_3D:
- return texUnit->Current3D->Image[0][level];
case GL_PROXY_TEXTURE_3D:
- return ctx->Texture.Proxy3D->Image[0][level];
+ return texObj->Image[0][level];
+
case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
if (ctx->Extensions.ARB_texture_cube_map) {
GLuint face = ((GLuint) target -
(GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X);
- return texUnit->CurrentCubeMap->Image[face][level];
+ return texObj->Image[face][level];
}
else
return NULL;
+
case GL_PROXY_TEXTURE_CUBE_MAP_ARB:
if (ctx->Extensions.ARB_texture_cube_map)
- return ctx->Texture.ProxyCubeMap->Image[0][level];
+ return texObj->Image[0][level];
else
return NULL;
+
case GL_TEXTURE_RECTANGLE_NV:
- if (ctx->Extensions.NV_texture_rectangle) {
- ASSERT(level == 0);
- return texUnit->CurrentRect->Image[0][level];
- }
- else {
- return NULL;
- }
case GL_PROXY_TEXTURE_RECTANGLE_NV:
- if (ctx->Extensions.NV_texture_rectangle) {
- ASSERT(level == 0);
- return ctx->Texture.ProxyRect->Image[0][level];
- }
- else {
+ if (ctx->Extensions.NV_texture_rectangle && level == 0)
+ return texObj->Image[0][level];
+ else
return NULL;
- }
+
default:
- _mesa_problem(ctx, "bad target in _mesa_select_tex_image()");
return NULL;
}
}
* out of memory.
*/
struct gl_texture_image *
-_mesa_get_tex_image(GLcontext *ctx, const struct gl_texture_unit *texUnit,
+_mesa_get_tex_image(GLcontext *ctx, struct gl_texture_object *texObj,
GLenum target, GLint level)
{
struct gl_texture_image *texImage;
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
+
+ if (!texObj)
+ return NULL;
+
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
if (!texImage) {
- struct gl_texture_object *texObj;
texImage = ctx->Driver.NewTextureImage(ctx);
if (!texImage) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "texture image allocation");
return NULL;
}
- texObj = _mesa_select_tex_object(ctx, texUnit, target);
- ASSERT(texObj);
+
_mesa_set_tex_image(texObj, target, level, texImage);
}
+
return texImage;
}
GLint width, GLint height, GLint depth,
GLenum format, GLenum type )
{
- struct gl_texture_unit *texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- struct gl_texture_image *destTex;
-
/* Check target */
if (dimensions == 1) {
if (target != GL_TEXTURE_1D) {
return GL_TRUE;
}
}
- else if (ctx->Extensions.NV_texture_rectangle &&
- target == GL_TEXTURE_RECTANGLE_NV) {
+ else if (target == GL_TEXTURE_RECTANGLE_NV) {
if (!ctx->Extensions.NV_texture_rectangle) {
_mesa_error( ctx, GL_INVALID_ENUM, "glTexSubImage2D(target)" );
return GL_TRUE;
return GL_TRUE;
}
- destTex = _mesa_select_tex_image(ctx, texUnit, target, level);
+ if (!_mesa_is_legal_format_and_type(ctx, format, type)) {
+ _mesa_error(ctx, GL_INVALID_ENUM,
+ "glTexSubImage%dD(format or type)", dimensions);
+ return GL_TRUE;
+ }
+
+ return GL_FALSE;
+}
+static GLboolean
+subtexture_error_check2( GLcontext *ctx, GLuint dimensions,
+ GLenum target, GLint level,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLint width, GLint height, GLint depth,
+ GLenum format, GLenum type,
+ const struct gl_texture_image *destTex )
+{
if (!destTex) {
/* undefined image level */
_mesa_error(ctx, GL_INVALID_OPERATION, "glTexSubImage%dD", dimensions);
}
}
- if (!_mesa_is_legal_format_and_type(ctx, format, type)) {
- _mesa_error(ctx, GL_INVALID_ENUM,
- "glTexSubImage%dD(format or type)", dimensions);
- return GL_TRUE;
- }
-
#if FEATURE_EXT_texture_sRGB
if (destTex->InternalFormat == GL_COMPRESSED_SRGB_S3TC_DXT1_EXT ||
destTex->InternalFormat == GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT ||
#endif
if (destTex->IsCompressed) {
- const struct gl_texture_unit *texUnit;
- const struct gl_texture_image *texImage;
- texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
-
if (target == GL_TEXTURE_2D || target == GL_PROXY_TEXTURE_2D) {
/* OK */
}
return GL_TRUE;
}
/* size must be multiple of 4 or equal to whole texture size */
- if ((width & 3) && (GLuint) width != texImage->Width) {
+ if ((width & 3) && (GLuint) width != destTex->Width) {
_mesa_error(ctx, GL_INVALID_OPERATION,
"glTexSubImage%D(width)", dimensions);
return GL_TRUE;
}
- if ((height & 3) && (GLuint) height != texImage->Height) {
+ if ((height & 3) && (GLuint) height != destTex->Height) {
_mesa_error(ctx, GL_INVALID_OPERATION,
"glTexSubImage%D(width)", dimensions);
return GL_TRUE;
/* Basic level check (more checking in ctx->Driver.TestProxyTexImage) */
if (level < 0 || level >= MAX_TEXTURE_LEVELS) {
+ _mesa_error(ctx, GL_INVALID_VALUE,
+ "glCopyTexImage%dD(level=%d)", dimensions, level);
+ return GL_TRUE;
+ }
+
/* Check that the source buffer is complete */
if (ctx->ReadBuffer->Name) {
_mesa_test_framebuffer_completeness(ctx, ctx->ReadBuffer);
}
}
- _mesa_error(ctx, GL_INVALID_VALUE,
- "glCopyTexImage%dD(level=%d)", dimensions, level);
- return GL_TRUE;
- }
-
/* Check border */
if (border < 0 || border > 1 ||
((target == GL_TEXTURE_RECTANGLE_NV ||
copytexsubimage_error_check( GLcontext *ctx, GLuint dimensions,
GLenum target, GLint level,
GLint xoffset, GLint yoffset, GLint zoffset,
- GLsizei width, GLsizei height )
+ GLsizei width, GLsizei height)
{
- struct gl_texture_unit *texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- struct gl_texture_image *teximage;
-
/* Check target */
/* Check that the source buffer is complete */
if (ctx->ReadBuffer->Name) {
return GL_TRUE;
}
- teximage = _mesa_select_tex_image(ctx, texUnit, target, level);
+ return GL_FALSE;
+}
+
+static GLboolean
+copytexsubimage_error_check2( GLcontext *ctx, GLuint dimensions,
+ GLenum target, GLint level,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLsizei width, GLsizei height,
+ const struct gl_texture_image *teximage )
+{
if (!teximage) {
_mesa_error(ctx, GL_INVALID_OPERATION,
"glCopyTexSubImage%dD(undefined texture level: %d)",
if (!pixels)
return;
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
- if (!texImage) {
- /* invalid mipmap level, not an error */
- return;
- }
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
+ if (!texImage) {
+ /* invalid mipmap level, not an error */
+ goto out;
+ }
- /* Make sure the requested image format is compatible with the
- * texture's format. Note that a color index texture can be converted
- * to RGBA so that combo is allowed.
- */
- if (is_color_format(format)
- && !is_color_format(texImage->TexFormat->BaseFormat)
- && !is_index_format(texImage->TexFormat->BaseFormat)) {
- _mesa_error(ctx, GL_INVALID_OPERATION, "glGetTexImage(format mismatch)");
- return;
- }
- else if (is_index_format(format)
- && !is_index_format(texImage->TexFormat->BaseFormat)) {
- _mesa_error(ctx, GL_INVALID_OPERATION, "glGetTexImage(format mismatch)");
- return;
- }
- else if (is_depth_format(format)
- && !is_depth_format(texImage->TexFormat->BaseFormat)
- && !is_depthstencil_format(texImage->TexFormat->BaseFormat)) {
- _mesa_error(ctx, GL_INVALID_OPERATION, "glGetTexImage(format mismatch)");
- return;
- }
- else if (is_ycbcr_format(format)
- && !is_ycbcr_format(texImage->TexFormat->BaseFormat)) {
- _mesa_error(ctx, GL_INVALID_OPERATION, "glGetTexImage(format mismatch)");
- return;
- }
- else if (is_depthstencil_format(format)
- && !is_depthstencil_format(texImage->TexFormat->BaseFormat)) {
- _mesa_error(ctx, GL_INVALID_OPERATION, "glGetTexImage(format mismatch)");
- return;
- }
- if (ctx->Pack.BufferObj->Name) {
- /* packing texture image into a PBO */
- const GLuint dimensions = (target == GL_TEXTURE_3D) ? 3 : 2;
- if (!_mesa_validate_pbo_access(dimensions, &ctx->Pack, texImage->Width,
- texImage->Height, texImage->Depth,
- format, type, pixels)) {
- _mesa_error(ctx, GL_INVALID_OPERATION,
- "glGetTexImage(invalid PBO access)");
- return;
+ /* Make sure the requested image format is compatible with the
+ * texture's format. Note that a color index texture can be converted
+ * to RGBA so that combo is allowed.
+ */
+ if (is_color_format(format)
+ && !is_color_format(texImage->TexFormat->BaseFormat)
+ && !is_index_format(texImage->TexFormat->BaseFormat)) {
+ _mesa_error(ctx, GL_INVALID_OPERATION, "glGetTexImage(format mismatch)");
+ goto out;
+ }
+ else if (is_index_format(format)
+ && !is_index_format(texImage->TexFormat->BaseFormat)) {
+ _mesa_error(ctx, GL_INVALID_OPERATION, "glGetTexImage(format mismatch)");
+ goto out;
+ }
+ else if (is_depth_format(format)
+ && !is_depth_format(texImage->TexFormat->BaseFormat)
+ && !is_depthstencil_format(texImage->TexFormat->BaseFormat)) {
+ _mesa_error(ctx, GL_INVALID_OPERATION, "glGetTexImage(format mismatch)");
+ goto out;
+ }
+ else if (is_ycbcr_format(format)
+ && !is_ycbcr_format(texImage->TexFormat->BaseFormat)) {
+ _mesa_error(ctx, GL_INVALID_OPERATION, "glGetTexImage(format mismatch)");
+ goto out;
+ }
+ else if (is_depthstencil_format(format)
+ && !is_depthstencil_format(texImage->TexFormat->BaseFormat)) {
+ _mesa_error(ctx, GL_INVALID_OPERATION, "glGetTexImage(format mismatch)");
+ goto out;
+ }
+
+ if (ctx->Pack.BufferObj->Name) {
+ /* packing texture image into a PBO */
+ const GLuint dimensions = (target == GL_TEXTURE_3D) ? 3 : 2;
+ if (!_mesa_validate_pbo_access(dimensions, &ctx->Pack, texImage->Width,
+ texImage->Height, texImage->Depth,
+ format, type, pixels)) {
+ _mesa_error(ctx, GL_INVALID_OPERATION,
+ "glGetTexImage(invalid PBO access)");
+ goto out;
+ }
}
- }
- /* typically, this will call _mesa_get_teximage() */
- ctx->Driver.GetTexImage(ctx, target, level, format, type, pixels,
- texObj, texImage);
+ /* typically, this will call _mesa_get_teximage() */
+ ctx->Driver.GetTexImage(ctx, target, level, format, type, pixels,
+ texObj, texImage);
+
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
return; /* error was recorded */
}
- texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- texObj = _mesa_select_tex_object(ctx, texUnit, target);
- texImage = _mesa_get_tex_image(ctx, texUnit, target, level);
-
- if (!texImage) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage1D");
- return;
- }
- else if (texImage->Data) {
- ctx->Driver.FreeTexImageData( ctx, texImage );
- }
- ASSERT(texImage->Data == NULL);
- clear_teximage_fields(texImage); /* not really needed, but helpful */
- _mesa_init_teximage_fields(ctx, target, texImage,
- postConvWidth, 1, 1,
- border, internalFormat);
-
if (ctx->NewState & _IMAGE_NEW_TRANSFER_STATE)
- _mesa_update_state(ctx);
+ _mesa_update_state(ctx);
- ASSERT(ctx->Driver.TexImage1D);
+ texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
+ texObj = _mesa_select_tex_object(ctx, texUnit, target);
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_get_tex_image(ctx, texObj, target, level);
+ if (!texImage) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage1D");
+ goto out;
+ }
+
+ if (texImage->Data) {
+ ctx->Driver.FreeTexImageData( ctx, texImage );
+ }
- /* Give the texture to the driver! <pixels> may be null! */
- (*ctx->Driver.TexImage1D)(ctx, target, level, internalFormat,
- width, border, format, type, pixels,
- &ctx->Unpack, texObj, texImage);
+ ASSERT(texImage->Data == NULL);
- ASSERT(texImage->TexFormat);
+ clear_teximage_fields(texImage); /* not really needed, but helpful */
+ _mesa_init_teximage_fields(ctx, target, texImage,
+ postConvWidth, 1, 1,
+ border, internalFormat);
+
+ ASSERT(ctx->Driver.TexImage1D);
- update_fbo_texture(ctx, texObj, face, level);
+ /* Give the texture to the driver! <pixels> may be null! */
+ (*ctx->Driver.TexImage1D)(ctx, target, level, internalFormat,
+ width, border, format, type, pixels,
+ &ctx->Unpack, texObj, texImage);
+
+ ASSERT(texImage->TexFormat);
- /* state update */
- texObj->Complete = GL_FALSE;
- ctx->NewState |= _NEW_TEXTURE;
+ update_fbo_texture(ctx, texObj, face, level);
+
+ /* state update */
+ texObj->Complete = GL_FALSE;
+ ctx->NewState |= _NEW_TEXTURE;
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
else if (target == GL_PROXY_TEXTURE_1D) {
/* Proxy texture: check for errors and update proxy state */
return; /* error was recorded */
}
- texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- texObj = _mesa_select_tex_object(ctx, texUnit, target);
- texImage = _mesa_get_tex_image(ctx, texUnit, target, level);
- if (!texImage) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage2D");
- return;
- }
- else if (texImage->Data) {
- ctx->Driver.FreeTexImageData( ctx, texImage );
- }
- ASSERT(texImage->Data == NULL);
- clear_teximage_fields(texImage); /* not really needed, but helpful */
- _mesa_init_teximage_fields(ctx, target, texImage,
- postConvWidth, postConvHeight, 1,
- border, internalFormat);
-
if (ctx->NewState & _IMAGE_NEW_TRANSFER_STATE)
- _mesa_update_state(ctx);
+ _mesa_update_state(ctx);
- ASSERT(ctx->Driver.TexImage2D);
-
- /* Give the texture to the driver! <pixels> may be null! */
- (*ctx->Driver.TexImage2D)(ctx, target, level, internalFormat,
- width, height, border, format, type, pixels,
- &ctx->Unpack, texObj, texImage);
-
- ASSERT(texImage->TexFormat);
-
- update_fbo_texture(ctx, texObj, face, level);
-
- /* state update */
- texObj->Complete = GL_FALSE;
- ctx->NewState |= _NEW_TEXTURE;
+ texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
+ texObj = _mesa_select_tex_object(ctx, texUnit, target);
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_get_tex_image(ctx, texObj, target, level);
+ if (!texImage) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage2D");
+ goto out;
+ }
+
+ if (texImage->Data) {
+ ctx->Driver.FreeTexImageData( ctx, texImage );
+ }
+
+ ASSERT(texImage->Data == NULL);
+ clear_teximage_fields(texImage); /* not really needed, but helpful */
+ _mesa_init_teximage_fields(ctx, target, texImage,
+ postConvWidth, postConvHeight, 1,
+ border, internalFormat);
+
+ ASSERT(ctx->Driver.TexImage2D);
+
+ /* Give the texture to the driver! <pixels> may be null! */
+ (*ctx->Driver.TexImage2D)(ctx, target, level, internalFormat,
+ width, height, border, format, type, pixels,
+ &ctx->Unpack, texObj, texImage);
+
+ ASSERT(texImage->TexFormat);
+
+ update_fbo_texture(ctx, texObj, face, level);
+
+ /* state update */
+ texObj->Complete = GL_FALSE;
+ ctx->NewState |= _NEW_TEXTURE;
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
else if (target == GL_PROXY_TEXTURE_2D ||
(target == GL_PROXY_TEXTURE_CUBE_MAP_ARB &&
return; /* error was recorded */
}
+ if (ctx->NewState & _IMAGE_NEW_TRANSFER_STATE)
+ _mesa_update_state(ctx);
+
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
- texImage = _mesa_get_tex_image(ctx, texUnit, target, level);
- if (!texImage) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage3D");
- return;
- }
- else if (texImage->Data) {
- ctx->Driver.FreeTexImageData( ctx, texImage );
- }
- ASSERT(texImage->Data == NULL);
- clear_teximage_fields(texImage); /* not really needed, but helpful */
- _mesa_init_teximage_fields(ctx, target, texImage,
- width, height, depth,
- border, internalFormat);
-
- if (ctx->NewState & _IMAGE_NEW_TRANSFER_STATE)
- _mesa_update_state(ctx);
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_get_tex_image(ctx, texObj, target, level);
+ if (!texImage) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage3D");
+ goto out;
+ }
+
+ if (texImage->Data) {
+ ctx->Driver.FreeTexImageData( ctx, texImage );
+ }
+
+ ASSERT(texImage->Data == NULL);
+ clear_teximage_fields(texImage); /* not really needed, but helpful */
+ _mesa_init_teximage_fields(ctx, target, texImage,
+ width, height, depth,
+ border, internalFormat);
- ASSERT(ctx->Driver.TexImage3D);
+ ASSERT(ctx->Driver.TexImage3D);
- /* Give the texture to the driver! <pixels> may be null! */
- (*ctx->Driver.TexImage3D)(ctx, target, level, internalFormat,
- width, height, depth, border, format, type,
- pixels, &ctx->Unpack, texObj, texImage);
+ /* Give the texture to the driver! <pixels> may be null! */
+ (*ctx->Driver.TexImage3D)(ctx, target, level, internalFormat,
+ width, height, depth, border, format, type,
+ pixels, &ctx->Unpack, texObj, texImage);
- ASSERT(texImage->TexFormat);
+ ASSERT(texImage->TexFormat);
- update_fbo_texture(ctx, texObj, face, level);
+ update_fbo_texture(ctx, texObj, face, level);
- /* state update */
- texObj->Complete = GL_FALSE;
- ctx->NewState |= _NEW_TEXTURE;
+ /* state update */
+ texObj->Complete = GL_FALSE;
+ ctx->NewState |= _NEW_TEXTURE;
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
else if (target == GL_PROXY_TEXTURE_3D) {
/* Proxy texture: check for errors and update proxy state */
GLsizei postConvWidth = width;
struct gl_texture_unit *texUnit;
struct gl_texture_object *texObj;
- struct gl_texture_image *texImage;
+ struct gl_texture_image *texImage = NULL;
GET_CURRENT_CONTEXT(ctx);
ASSERT_OUTSIDE_BEGIN_END_AND_FLUSH(ctx);
}
if (subtexture_error_check(ctx, 1, target, level, xoffset, 0, 0,
- postConvWidth, 1, 1, format, type)) {
+ postConvWidth, 1, 1, format, type)) {
return; /* error was detected */
}
+
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
- assert(texImage);
+ assert(texObj);
- if (width == 0)
- return; /* no-op, not an error */
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
- /* If we have a border, xoffset=-1 is legal. Bias by border width */
- xoffset += texImage->Border;
+ if (subtexture_error_check2(ctx, 1, target, level, xoffset, 0, 0,
+ postConvWidth, 1, 1, format, type, texImage)) {
+ goto out; /* error was detected */
+ }
+
+ if (width == 0)
+ goto out; /* no-op, not an error */
+
+ /* If we have a border, xoffset=-1 is legal. Bias by border width */
+ xoffset += texImage->Border;
- ASSERT(ctx->Driver.TexSubImage1D);
- (*ctx->Driver.TexSubImage1D)(ctx, target, level, xoffset, width,
- format, type, pixels, &ctx->Unpack,
- texObj, texImage);
- ctx->NewState |= _NEW_TEXTURE;
+ ASSERT(ctx->Driver.TexSubImage1D);
+ (*ctx->Driver.TexSubImage1D)(ctx, target, level, xoffset, width,
+ format, type, pixels, &ctx->Unpack,
+ texObj, texImage);
+ ctx->NewState |= _NEW_TEXTURE;
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
}
if (subtexture_error_check(ctx, 2, target, level, xoffset, yoffset, 0,
- postConvWidth, postConvHeight, 1, format, type)) {
+ postConvWidth, postConvHeight, 1, format, type)) {
return; /* error was detected */
}
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
- assert(texImage);
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
- if (width == 0 || height == 0)
- return; /* no-op, not an error */
+ if (subtexture_error_check2(ctx, 2, target, level, xoffset, yoffset, 0,
+ postConvWidth, postConvHeight, 1, format, type,
+ texImage)) {
+ goto out; /* error was detected */
+ }
- /* If we have a border, xoffset=-1 is legal. Bias by border width */
- xoffset += texImage->Border;
- yoffset += texImage->Border;
+ if (width == 0 || height == 0)
+ goto out; /* no-op, not an error */
- ASSERT(ctx->Driver.TexSubImage2D);
- (*ctx->Driver.TexSubImage2D)(ctx, target, level, xoffset, yoffset,
- width, height, format, type, pixels,
- &ctx->Unpack, texObj, texImage);
- ctx->NewState |= _NEW_TEXTURE;
+ /* If we have a border, xoffset=-1 is legal. Bias by border width */
+ xoffset += texImage->Border;
+ yoffset += texImage->Border;
+
+ ASSERT(ctx->Driver.TexSubImage2D);
+ (*ctx->Driver.TexSubImage2D)(ctx, target, level, xoffset, yoffset,
+ width, height, format, type, pixels,
+ &ctx->Unpack, texObj, texImage);
+ ctx->NewState |= _NEW_TEXTURE;
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
- assert(texImage);
-
- if (width == 0 || height == 0 || height == 0)
- return; /* no-op, not an error */
-
- /* If we have a border, xoffset=-1 is legal. Bias by border width */
- xoffset += texImage->Border;
- yoffset += texImage->Border;
- zoffset += texImage->Border;
-
- ASSERT(ctx->Driver.TexSubImage3D);
- (*ctx->Driver.TexSubImage3D)(ctx, target, level,
- xoffset, yoffset, zoffset,
- width, height, depth,
- format, type, pixels,
- &ctx->Unpack, texObj, texImage );
- ctx->NewState |= _NEW_TEXTURE;
+
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
+
+ if (subtexture_error_check2(ctx, 3, target, level, xoffset, yoffset, zoffset,
+ width, height, depth, format, type, texImage)) {
+ goto out; /* error was detected */
+ }
+
+ if (width == 0 || height == 0 || height == 0)
+ goto out; /* no-op, not an error */
+
+ /* If we have a border, xoffset=-1 is legal. Bias by border width */
+ xoffset += texImage->Border;
+ yoffset += texImage->Border;
+ zoffset += texImage->Border;
+
+ ASSERT(ctx->Driver.TexSubImage3D);
+ (*ctx->Driver.TexSubImage3D)(ctx, target, level,
+ xoffset, yoffset, zoffset,
+ width, height, depth,
+ format, type, pixels,
+ &ctx->Unpack, texObj, texImage );
+ ctx->NewState |= _NEW_TEXTURE;
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
- texImage = _mesa_get_tex_image(ctx, texUnit, target, level);
- if (!texImage) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCopyTexImage1D");
- return;
- }
- else if (texImage->Data) {
- ctx->Driver.FreeTexImageData( ctx, texImage );
- }
- ASSERT(texImage->Data == NULL);
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_get_tex_image(ctx, texObj, target, level);
+ if (!texImage) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCopyTexImage1D");
+ goto out;
+ }
+
+ if (texImage->Data) {
+ ctx->Driver.FreeTexImageData( ctx, texImage );
+ }
+
+ ASSERT(texImage->Data == NULL);
- clear_teximage_fields(texImage); /* not really needed, but helpful */
- _mesa_init_teximage_fields(ctx, target, texImage, postConvWidth, 1, 1,
- border, internalFormat);
+ clear_teximage_fields(texImage); /* not really needed, but helpful */
+ _mesa_init_teximage_fields(ctx, target, texImage, postConvWidth, 1, 1,
+ border, internalFormat);
- ASSERT(ctx->Driver.CopyTexImage1D);
- (*ctx->Driver.CopyTexImage1D)(ctx, target, level, internalFormat,
- x, y, width, border);
+ ASSERT(ctx->Driver.CopyTexImage1D);
+ (*ctx->Driver.CopyTexImage1D)(ctx, target, level, internalFormat,
+ x, y, width, border);
- ASSERT(texImage->TexFormat);
+ ASSERT(texImage->TexFormat);
- update_fbo_texture(ctx, texObj, face, level);
+ update_fbo_texture(ctx, texObj, face, level);
- /* state update */
- texObj->Complete = GL_FALSE;
- ctx->NewState |= _NEW_TEXTURE;
+ /* state update */
+ texObj->Complete = GL_FALSE;
+ ctx->NewState |= _NEW_TEXTURE;
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
- texImage = _mesa_get_tex_image(ctx, texUnit, target, level);
- if (!texImage) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCopyTexImage2D");
- return;
- }
- else if (texImage->Data) {
- ctx->Driver.FreeTexImageData( ctx, texImage );
- }
- ASSERT(texImage->Data == NULL);
- clear_teximage_fields(texImage); /* not really needed, but helpful */
- _mesa_init_teximage_fields(ctx, target, texImage,
- postConvWidth, postConvHeight, 1,
- border, internalFormat);
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_get_tex_image(ctx, texObj, target, level);
- ASSERT(ctx->Driver.CopyTexImage2D);
- (*ctx->Driver.CopyTexImage2D)(ctx, target, level, internalFormat,
- x, y, width, height, border);
+ if (!texImage) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCopyTexImage2D");
+ goto out;
+ }
+
+ if (texImage->Data) {
+ ctx->Driver.FreeTexImageData( ctx, texImage );
+ }
+
+ ASSERT(texImage->Data == NULL);
- ASSERT(texImage->TexFormat);
+ clear_teximage_fields(texImage); /* not really needed, but helpful */
+ _mesa_init_teximage_fields(ctx, target, texImage,
+ postConvWidth, postConvHeight, 1,
+ border, internalFormat);
+
+ ASSERT(ctx->Driver.CopyTexImage2D);
+ (*ctx->Driver.CopyTexImage2D)(ctx, target, level, internalFormat,
+ x, y, width, height, border);
+
+ ASSERT(texImage->TexFormat);
- update_fbo_texture(ctx, texObj, face, level);
+ update_fbo_texture(ctx, texObj, face, level);
- /* state update */
- texObj->Complete = GL_FALSE;
- ctx->NewState |= _NEW_TEXTURE;
+ /* state update */
+ texObj->Complete = GL_FALSE;
+ ctx->NewState |= _NEW_TEXTURE;
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
-
void GLAPIENTRY
_mesa_CopyTexSubImage1D( GLenum target, GLint level,
GLint xoffset, GLint x, GLint y, GLsizei width )
{
struct gl_texture_unit *texUnit;
+ struct gl_texture_object *texObj;
struct gl_texture_image *texImage;
GLsizei postConvWidth = width;
GET_CURRENT_CONTEXT(ctx);
return;
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
- ASSERT(texImage);
+ texObj = _mesa_select_tex_object(ctx, texUnit, target);
+
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
+
+ if (copytexsubimage_error_check2(ctx, 1, target, level,
+ xoffset, 0, 0, postConvWidth, 1,
+ texImage))
+ goto out;
+
- /* If we have a border, xoffset=-1 is legal. Bias by border width */
- xoffset += texImage->Border;
+ /* If we have a border, xoffset=-1 is legal. Bias by border width */
+ xoffset += texImage->Border;
- ASSERT(ctx->Driver.CopyTexSubImage1D);
- (*ctx->Driver.CopyTexSubImage1D)(ctx, target, level, xoffset, x, y, width);
- ctx->NewState |= _NEW_TEXTURE;
+ ASSERT(ctx->Driver.CopyTexSubImage1D);
+ (*ctx->Driver.CopyTexSubImage1D)(ctx, target, level, xoffset, x, y, width);
+ ctx->NewState |= _NEW_TEXTURE;
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
GLint x, GLint y, GLsizei width, GLsizei height )
{
struct gl_texture_unit *texUnit;
+ struct gl_texture_object *texObj;
struct gl_texture_image *texImage;
GLsizei postConvWidth = width, postConvHeight = height;
GET_CURRENT_CONTEXT(ctx);
return;
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
- ASSERT(texImage);
+ texObj = _mesa_select_tex_object(ctx, texUnit, target);
+
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
- /* If we have a border, xoffset=-1 is legal. Bias by border width */
- xoffset += texImage->Border;
- yoffset += texImage->Border;
+ if (copytexsubimage_error_check2(ctx, 2, target, level, xoffset, yoffset, 0,
+ postConvWidth, postConvHeight, texImage))
+ goto out;
- ASSERT(ctx->Driver.CopyTexSubImage2D);
- (*ctx->Driver.CopyTexSubImage2D)(ctx, target, level,
- xoffset, yoffset, x, y, width, height);
- ctx->NewState |= _NEW_TEXTURE;
+ /* If we have a border, xoffset=-1 is legal. Bias by border width */
+ xoffset += texImage->Border;
+ yoffset += texImage->Border;
+
+ ASSERT(ctx->Driver.CopyTexSubImage2D);
+ (*ctx->Driver.CopyTexSubImage2D)(ctx, target, level,
+ xoffset, yoffset, x, y, width, height);
+ ctx->NewState |= _NEW_TEXTURE;
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
GLint x, GLint y, GLsizei width, GLsizei height )
{
struct gl_texture_unit *texUnit;
+ struct gl_texture_object *texObj;
struct gl_texture_image *texImage;
GLsizei postConvWidth = width, postConvHeight = height;
GET_CURRENT_CONTEXT(ctx);
return;
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
- ASSERT(texImage);
+ texObj = _mesa_select_tex_object(ctx, texUnit, target);
- /* If we have a border, xoffset=-1 is legal. Bias by border width */
- xoffset += texImage->Border;
- yoffset += texImage->Border;
- zoffset += texImage->Border;
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
- ASSERT(ctx->Driver.CopyTexSubImage3D);
- (*ctx->Driver.CopyTexSubImage3D)(ctx, target, level,
- xoffset, yoffset, zoffset,
- x, y, width, height);
- ctx->NewState |= _NEW_TEXTURE;
+ if (copytexsubimage_error_check2(ctx, 3, target, level, xoffset, yoffset,
+ zoffset, postConvWidth, postConvHeight,
+ texImage))
+ goto out;
+
+ /* If we have a border, xoffset=-1 is legal. Bias by border width */
+ xoffset += texImage->Border;
+ yoffset += texImage->Border;
+ zoffset += texImage->Border;
+
+ ASSERT(ctx->Driver.CopyTexSubImage3D);
+ (*ctx->Driver.CopyTexSubImage3D)(ctx, target, level,
+ xoffset, yoffset, zoffset,
+ x, y, width, height);
+ ctx->NewState |= _NEW_TEXTURE;
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
- texImage = _mesa_get_tex_image(ctx, texUnit, target, level);
- if (!texImage) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCompressedTexImage1D");
- return;
- }
- else if (texImage->Data) {
- ctx->Driver.FreeTexImageData( ctx, texImage );
- }
- ASSERT(texImage->Data == NULL);
- _mesa_init_teximage_fields(ctx, target, texImage, width, 1, 1,
- border, internalFormat);
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_get_tex_image(ctx, texObj, target, level);
+ if (!texImage) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCompressedTexImage1D");
+ goto out;
+ }
+
+ if (texImage->Data) {
+ ctx->Driver.FreeTexImageData( ctx, texImage );
+ }
+ ASSERT(texImage->Data == NULL);
+
+ _mesa_init_teximage_fields(ctx, target, texImage, width, 1, 1,
+ border, internalFormat);
- ASSERT(ctx->Driver.CompressedTexImage1D);
- (*ctx->Driver.CompressedTexImage1D)(ctx, target, level,
- internalFormat, width, border,
- imageSize, data,
- texObj, texImage);
+ ASSERT(ctx->Driver.CompressedTexImage1D);
+ (*ctx->Driver.CompressedTexImage1D)(ctx, target, level,
+ internalFormat, width, border,
+ imageSize, data,
+ texObj, texImage);
- /* state update */
- texObj->Complete = GL_FALSE;
- ctx->NewState |= _NEW_TEXTURE;
+ /* state update */
+ texObj->Complete = GL_FALSE;
+ ctx->NewState |= _NEW_TEXTURE;
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
else if (target == GL_PROXY_TEXTURE_1D) {
/* Proxy texture: check for errors and update proxy state */
else {
/* store the teximage parameters */
struct gl_texture_unit *texUnit;
+ struct gl_texture_object *texObj;
struct gl_texture_image *texImage;
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
- _mesa_init_teximage_fields(ctx, target, texImage, width, 1, 1,
- border, internalFormat);
+ texObj = _mesa_select_tex_object(ctx, texUnit, target);
+
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
+ _mesa_init_teximage_fields(ctx, target, texImage, width, 1, 1,
+ border, internalFormat);
+ }
+ _mesa_unlock_texture(ctx, texObj);
}
}
else {
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
- texImage = _mesa_get_tex_image(ctx, texUnit, target, level);
- if (!texImage) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCompressedTexImage2D");
- return;
- }
- else if (texImage->Data) {
- ctx->Driver.FreeTexImageData( ctx, texImage );
- }
- ASSERT(texImage->Data == NULL);
- _mesa_init_teximage_fields(ctx, target, texImage, width, height, 1,
- border, internalFormat);
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_get_tex_image(ctx, texObj, target, level);
+ if (!texImage) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCompressedTexImage2D");
+ goto out;
+ }
+
+ if (texImage->Data) {
+ ctx->Driver.FreeTexImageData( ctx, texImage );
+ }
+ ASSERT(texImage->Data == NULL);
- ASSERT(ctx->Driver.CompressedTexImage2D);
- (*ctx->Driver.CompressedTexImage2D)(ctx, target, level,
- internalFormat, width, height,
- border, imageSize, data,
- texObj, texImage);
+ _mesa_init_teximage_fields(ctx, target, texImage, width, height, 1,
+ border, internalFormat);
- /* state update */
- texObj->Complete = GL_FALSE;
- ctx->NewState |= _NEW_TEXTURE;
+ ASSERT(ctx->Driver.CompressedTexImage2D);
+ (*ctx->Driver.CompressedTexImage2D)(ctx, target, level,
+ internalFormat, width, height,
+ border, imageSize, data,
+ texObj, texImage);
+
+ /* state update */
+ texObj->Complete = GL_FALSE;
+ ctx->NewState |= _NEW_TEXTURE;
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
else if (target == GL_PROXY_TEXTURE_2D ||
(target == GL_PROXY_TEXTURE_CUBE_MAP_ARB &&
else {
/* store the teximage parameters */
struct gl_texture_unit *texUnit;
+ struct gl_texture_object *texObj;
struct gl_texture_image *texImage;
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
- _mesa_init_teximage_fields(ctx, target, texImage, width, height, 1,
- border, internalFormat);
+ texObj = _mesa_select_tex_object(ctx, texUnit, target);
+
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
+ _mesa_init_teximage_fields(ctx, target, texImage, width, height, 1,
+ border, internalFormat);
+ }
+ _mesa_unlock_texture(ctx, texObj);
}
}
else {
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
- texImage = _mesa_get_tex_image(ctx, texUnit, target, level);
- if (!texImage) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCompressedTexImage3D");
- return;
- }
- else if (texImage->Data) {
- ctx->Driver.FreeTexImageData( ctx, texImage );
- }
- ASSERT(texImage->Data == NULL);
-
- _mesa_init_teximage_fields(ctx, target, texImage, width, height, depth,
- border, internalFormat);
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_get_tex_image(ctx, texObj, target, level);
+ if (!texImage) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glCompressedTexImage3D");
+ goto out;
+ }
+
+ if (texImage->Data) {
+ ctx->Driver.FreeTexImageData( ctx, texImage );
+ }
+ ASSERT(texImage->Data == NULL);
- ASSERT(ctx->Driver.CompressedTexImage3D);
- (*ctx->Driver.CompressedTexImage3D)(ctx, target, level,
- internalFormat,
- width, height, depth,
- border, imageSize, data,
- texObj, texImage);
+ _mesa_init_teximage_fields(ctx, target, texImage, width, height, depth,
+ border, internalFormat);
- /* state update */
- texObj->Complete = GL_FALSE;
- ctx->NewState |= _NEW_TEXTURE;
+ ASSERT(ctx->Driver.CompressedTexImage3D);
+ (*ctx->Driver.CompressedTexImage3D)(ctx, target, level,
+ internalFormat,
+ width, height, depth,
+ border, imageSize, data,
+ texObj, texImage);
+
+ /* state update */
+ texObj->Complete = GL_FALSE;
+ ctx->NewState |= _NEW_TEXTURE;
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
else if (target == GL_PROXY_TEXTURE_3D) {
/* Proxy texture: check for errors and update proxy state */
else {
/* store the teximage parameters */
struct gl_texture_unit *texUnit;
+ struct gl_texture_object *texObj;
struct gl_texture_image *texImage;
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
- _mesa_init_teximage_fields(ctx, target, texImage, width, height,
- depth, border, internalFormat);
+ texObj = _mesa_select_tex_object(ctx, texUnit, target);
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
+ _mesa_init_teximage_fields(ctx, target, texImage, width, height,
+ depth, border, internalFormat);
+ }
+ _mesa_unlock_texture(ctx, texObj);
}
}
else {
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
- assert(texImage);
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
+ assert(texImage);
- if ((GLint) format != texImage->InternalFormat) {
- _mesa_error(ctx, GL_INVALID_OPERATION,
- "glCompressedTexSubImage1D(format)");
- return;
- }
+ if ((GLint) format != texImage->InternalFormat) {
+ _mesa_error(ctx, GL_INVALID_OPERATION,
+ "glCompressedTexSubImage1D(format)");
+ goto out;
+ }
- if ((width == 1 || width == 2) && (GLuint) width != texImage->Width) {
- _mesa_error(ctx, GL_INVALID_VALUE, "glCompressedTexSubImage1D(width)");
- return;
- }
+ if ((width == 1 || width == 2) && (GLuint) width != texImage->Width) {
+ _mesa_error(ctx, GL_INVALID_VALUE, "glCompressedTexSubImage1D(width)");
+ goto out;
+ }
- if (width == 0)
- return; /* no-op, not an error */
+ if (width == 0)
+ goto out; /* no-op, not an error */
- if (ctx->Driver.CompressedTexSubImage1D) {
- (*ctx->Driver.CompressedTexSubImage1D)(ctx, target, level,
- xoffset, width,
- format, imageSize, data,
- texObj, texImage);
+ if (ctx->Driver.CompressedTexSubImage1D) {
+ (*ctx->Driver.CompressedTexSubImage1D)(ctx, target, level,
+ xoffset, width,
+ format, imageSize, data,
+ texObj, texImage);
+ }
+ ctx->NewState |= _NEW_TEXTURE;
}
- ctx->NewState |= _NEW_TEXTURE;
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
- assert(texImage);
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
+ assert(texImage);
- if ((GLint) format != texImage->InternalFormat) {
- _mesa_error(ctx, GL_INVALID_OPERATION,
- "glCompressedTexSubImage2D(format)");
- return;
- }
+ if ((GLint) format != texImage->InternalFormat) {
+ _mesa_error(ctx, GL_INVALID_OPERATION,
+ "glCompressedTexSubImage2D(format)");
+ goto out;
+ }
- if (((width == 1 || width == 2) && (GLuint) width != texImage->Width) ||
- ((height == 1 || height == 2) && (GLuint) height != texImage->Height)) {
- _mesa_error(ctx, GL_INVALID_VALUE, "glCompressedTexSubImage2D(size)");
- return;
- }
+ if (((width == 1 || width == 2) && (GLuint) width != texImage->Width) ||
+ ((height == 1 || height == 2) && (GLuint) height != texImage->Height)) {
+ _mesa_error(ctx, GL_INVALID_VALUE, "glCompressedTexSubImage2D(size)");
+ goto out;
+ }
- if (width == 0 || height == 0)
- return; /* no-op, not an error */
+ if (width == 0 || height == 0)
+ goto out; /* no-op, not an error */
- if (ctx->Driver.CompressedTexSubImage2D) {
- (*ctx->Driver.CompressedTexSubImage2D)(ctx, target, level,
- xoffset, yoffset, width, height,
- format, imageSize, data,
- texObj, texImage);
+ if (ctx->Driver.CompressedTexSubImage2D) {
+ (*ctx->Driver.CompressedTexSubImage2D)(ctx, target, level,
+ xoffset, yoffset, width, height,
+ format, imageSize, data,
+ texObj, texImage);
+ }
+ ctx->NewState |= _NEW_TEXTURE;
}
- ctx->NewState |= _NEW_TEXTURE;
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
- assert(texImage);
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
+ assert(texImage);
- if ((GLint) format != texImage->InternalFormat) {
- _mesa_error(ctx, GL_INVALID_OPERATION,
- "glCompressedTexSubImage3D(format)");
- return;
- }
+ if ((GLint) format != texImage->InternalFormat) {
+ _mesa_error(ctx, GL_INVALID_OPERATION,
+ "glCompressedTexSubImage3D(format)");
+ goto out;
+ }
- if (((width == 1 || width == 2) && (GLuint) width != texImage->Width) ||
- ((height == 1 || height == 2) && (GLuint) height != texImage->Height) ||
- ((depth == 1 || depth == 2) && (GLuint) depth != texImage->Depth)) {
- _mesa_error(ctx, GL_INVALID_VALUE, "glCompressedTexSubImage3D(size)");
- return;
- }
+ if (((width == 1 || width == 2) && (GLuint) width != texImage->Width) ||
+ ((height == 1 || height == 2) && (GLuint) height != texImage->Height) ||
+ ((depth == 1 || depth == 2) && (GLuint) depth != texImage->Depth)) {
+ _mesa_error(ctx, GL_INVALID_VALUE, "glCompressedTexSubImage3D(size)");
+ goto out;
+ }
- if (width == 0 || height == 0 || depth == 0)
- return; /* no-op, not an error */
+ if (width == 0 || height == 0 || depth == 0)
+ goto out; /* no-op, not an error */
- if (ctx->Driver.CompressedTexSubImage3D) {
- (*ctx->Driver.CompressedTexSubImage3D)(ctx, target, level,
- xoffset, yoffset, zoffset,
- width, height, depth,
- format, imageSize, data,
- texObj, texImage);
+ if (ctx->Driver.CompressedTexSubImage3D) {
+ (*ctx->Driver.CompressedTexSubImage3D)(ctx, target, level,
+ xoffset, yoffset, zoffset,
+ width, height, depth,
+ format, imageSize, data,
+ texObj, texImage);
+ }
+ ctx->NewState |= _NEW_TEXTURE;
}
- ctx->NewState |= _NEW_TEXTURE;
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
_mesa_GetCompressedTexImageARB(GLenum target, GLint level, GLvoid *img)
{
const struct gl_texture_unit *texUnit;
- const struct gl_texture_object *texObj;
+ struct gl_texture_object *texObj;
struct gl_texture_image *texImage;
GLint maxLevels;
GET_CURRENT_CONTEXT(ctx);
ASSERT_OUTSIDE_BEGIN_END_AND_FLUSH(ctx);
+
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
if (!texObj) {
return;
}
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
- if (!texImage) {
- /* probably invalid mipmap level */
- _mesa_error(ctx, GL_INVALID_VALUE, "glGetCompressedTexImageARB(level)");
- return;
- }
- if (!texImage->IsCompressed) {
- _mesa_error(ctx, GL_INVALID_OPERATION, "glGetCompressedTexImageARB");
- return;
- }
+ _mesa_lock_texture(ctx, texObj);
+ {
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
+ if (!texImage) {
+ /* probably invalid mipmap level */
+ _mesa_error(ctx, GL_INVALID_VALUE, "glGetCompressedTexImageARB(level)");
+ goto out;
+ }
+
+ if (!texImage->IsCompressed) {
+ _mesa_error(ctx, GL_INVALID_OPERATION, "glGetCompressedTexImageARB");
+ goto out;
+ }
- /* this typically calls _mesa_get_compressed_teximage() */
- ctx->Driver.GetCompressedTexImage(ctx, target, level, img, texObj,texImage);
+ /* this typically calls _mesa_get_compressed_teximage() */
+ ctx->Driver.GetCompressedTexImage(ctx, target, level, img, texObj,texImage);
+ }
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
extern struct gl_texture_image *
-_mesa_select_tex_image(GLcontext *ctx, const struct gl_texture_unit *texUnit,
+_mesa_select_tex_image(GLcontext *ctx, const struct gl_texture_object *texObj,
GLenum target, GLint level);
extern struct gl_texture_image *
-_mesa_get_tex_image(GLcontext *ctx, const struct gl_texture_unit *texUnit,
+_mesa_get_tex_image(GLcontext *ctx, struct gl_texture_object *texObj,
GLenum target, GLint level);
GLint internalFormat, GLenum format, GLenum type,
GLint width, GLint height, GLint depth, GLint border);
+
+/* Lock a texture for updating. See also _mesa_lock_context_textures().
+ */
+static INLINE void _mesa_lock_texture(GLcontext *ctx,
+ struct gl_texture_object *texObj)
+{
+ _glthread_LOCK_MUTEX(ctx->Shared->TexMutex);
+ ctx->Shared->TextureStateStamp++;
+ (void) texObj;
+}
+
+static INLINE void _mesa_unlock_texture(GLcontext *ctx,
+ struct gl_texture_object *texObj)
+{
+ _glthread_UNLOCK_MUTEX(ctx->Shared->TexMutex);
+}
+
/*@}*/
if (textures[i] > 0) {
struct gl_texture_object *delObj
= _mesa_lookup_texture(ctx, textures[i]);
+
if (delObj) {
+ GLboolean delete;
+
+ _mesa_lock_texture(ctx, delObj);
/* Check if texture is bound to any framebuffer objects.
* If so, unbind.
* XXX all RefCount accesses should be protected by a mutex.
*/
delObj->RefCount--;
- if (delObj->RefCount == 0) {
+ delete = (delObj->RefCount == 0);
+ _mesa_unlock_texture(ctx, delObj);
+
+ /* We know that refcount went to zero above, so this is
+ * the only pointer left to delObj, so we don't have to
+ * worry about locking any more:
+ */
+ if (delete) {
ASSERT(delObj->Name != 0); /* Never delete default tex objs */
ASSERT(ctx->Driver.DeleteTexture);
(*ctx->Driver.DeleteTexture)(ctx, delObj);
return t && t->Target;
}
+/* Simplest implementation of texture locking: Grab the a new mutex in
+ * the shared context. Examine the shared context state timestamp and
+ * if there has been a change, set the appropriate bits in
+ * ctx->NewState.
+ *
+ * See also _mesa_lock/unlock_texture in texobj.h
+ */
+void _mesa_lock_context_textures( GLcontext *ctx )
+{
+ _glthread_LOCK_MUTEX(ctx->Shared->TexMutex);
+
+ if (ctx->Shared->TextureStateStamp != ctx->TextureStateTimestamp) {
+ ctx->NewState |= _NEW_TEXTURE;
+ ctx->TextureStateTimestamp = ctx->Shared->TextureStateStamp;
+ }
+}
+
+
+void _mesa_unlock_context_textures( GLcontext *ctx )
+{
+ assert(ctx->Shared->TextureStateStamp == ctx->TextureStateTimestamp);
+ _glthread_UNLOCK_MUTEX(ctx->Shared->TexMutex);
+}
+
/*@}*/
+
+
_mesa_test_texobj_completeness( const GLcontext *ctx,
struct gl_texture_object *obj );
+extern void _mesa_unlock_context_textures( GLcontext *ctx );
+extern void _mesa_lock_context_textures( GLcontext *ctx );
+
/*@}*/
/*@}*/
+
#endif
dst->Texture.Unit[i].Combine.ScaleShiftA = src->Texture.Unit[i].Combine.ScaleShiftA;
/* copy texture object bindings, not contents of texture objects */
+ _mesa_lock_context_textures(dst);
+
copy_texture_binding(src, &dst->Texture.Unit[i].Current1D,
src->Texture.Unit[i].Current1D);
copy_texture_binding(src, &dst->Texture.Unit[i].Current2D,
src->Texture.Unit[i].CurrentCubeMap);
copy_texture_binding(src, &dst->Texture.Unit[i].CurrentRect,
src->Texture.Unit[i].CurrentRect);
+
+ _mesa_unlock_context_textures(dst);
}
}
GLenum pname, GLint *params )
{
const struct gl_texture_unit *texUnit;
+ struct gl_texture_object *texObj;
const struct gl_texture_image *img = NULL;
GLuint dimensions;
GLboolean isProxy;
return;
}
- img = _mesa_select_tex_image(ctx, texUnit, target, level);
+ texObj = _mesa_select_tex_object(ctx, texUnit, target);
+ _mesa_lock_texture(ctx, texObj);
+
+ img = _mesa_select_tex_image(ctx, texObj, target, level);
if (!img || !img->TexFormat) {
/* undefined texture image */
if (pname == GL_TEXTURE_COMPONENTS)
*params = 1;
else
*params = 0;
- return;
+ goto out;
}
isProxy = _mesa_is_proxy_texture(target);
switch (pname) {
case GL_TEXTURE_WIDTH:
*params = img->Width;
- return;
+ break;
case GL_TEXTURE_HEIGHT:
*params = img->Height;
- return;
+ break;
case GL_TEXTURE_DEPTH:
*params = img->Depth;
- return;
+ break;
case GL_TEXTURE_INTERNAL_FORMAT:
*params = img->InternalFormat;
- return;
+ break;
case GL_TEXTURE_BORDER:
*params = img->Border;
- return;
+ break;
case GL_TEXTURE_RED_SIZE:
if (img->_BaseFormat == GL_RGB || img->_BaseFormat == GL_RGBA)
*params = img->TexFormat->RedBits;
else
*params = 0;
- return;
+ break;
case GL_TEXTURE_GREEN_SIZE:
if (img->_BaseFormat == GL_RGB || img->_BaseFormat == GL_RGBA)
*params = img->TexFormat->GreenBits;
else
*params = 0;
- return;
+ break;
case GL_TEXTURE_BLUE_SIZE:
if (img->_BaseFormat == GL_RGB || img->_BaseFormat == GL_RGBA)
*params = img->TexFormat->BlueBits;
else
*params = 0;
- return;
+ break;
case GL_TEXTURE_ALPHA_SIZE:
if (img->_BaseFormat == GL_ALPHA ||
img->_BaseFormat == GL_LUMINANCE_ALPHA ||
*params = img->TexFormat->AlphaBits;
else
*params = 0;
- return;
+ break;
case GL_TEXTURE_INTENSITY_SIZE:
if (img->_BaseFormat != GL_INTENSITY)
*params = 0;
*params = img->TexFormat->IntensityBits;
else /* intensity probably stored as rgb texture */
*params = MIN2(img->TexFormat->RedBits, img->TexFormat->GreenBits);
- return;
+ break;
case GL_TEXTURE_LUMINANCE_SIZE:
if (img->_BaseFormat != GL_LUMINANCE &&
img->_BaseFormat != GL_LUMINANCE_ALPHA)
*params = img->TexFormat->LuminanceBits;
else /* luminance probably stored as rgb texture */
*params = MIN2(img->TexFormat->RedBits, img->TexFormat->GreenBits);
- return;
+ break;
case GL_TEXTURE_INDEX_SIZE_EXT:
if (img->_BaseFormat == GL_COLOR_INDEX)
*params = img->TexFormat->IndexBits;
else
*params = 0;
- return;
+ break;
case GL_TEXTURE_DEPTH_SIZE_ARB:
if (ctx->Extensions.SGIX_depth_texture ||
ctx->Extensions.ARB_depth_texture)
else
_mesa_error(ctx, GL_INVALID_ENUM,
"glGetTexLevelParameter[if]v(pname)");
- return;
+ break;
case GL_TEXTURE_STENCIL_SIZE_EXT:
if (ctx->Extensions.EXT_packed_depth_stencil) {
*params = img->TexFormat->StencilBits;
_mesa_error(ctx, GL_INVALID_ENUM,
"glGetTexLevelParameter[if]v(pname)");
}
- return;
+ break;
/* GL_ARB_texture_compression */
case GL_TEXTURE_COMPRESSED_IMAGE_SIZE:
_mesa_error(ctx, GL_INVALID_ENUM,
"glGetTexLevelParameter[if]v(pname)");
}
- return;
+ break;
case GL_TEXTURE_COMPRESSED:
if (ctx->Extensions.ARB_texture_compression) {
*params = (GLint) img->IsCompressed;
_mesa_error(ctx, GL_INVALID_ENUM,
"glGetTexLevelParameter[if]v(pname)");
}
- return;
+ break;
/* GL_ARB_texture_float */
case GL_TEXTURE_RED_TYPE_ARB:
_mesa_error(ctx, GL_INVALID_ENUM,
"glGetTexLevelParameter[if]v(pname)");
}
- return;
+ break;
case GL_TEXTURE_GREEN_TYPE_ARB:
if (ctx->Extensions.ARB_texture_float) {
*params = img->TexFormat->GreenBits ? img->TexFormat->DataType : GL_NONE;
_mesa_error(ctx, GL_INVALID_ENUM,
"glGetTexLevelParameter[if]v(pname)");
}
- return;
+ break;
case GL_TEXTURE_BLUE_TYPE_ARB:
if (ctx->Extensions.ARB_texture_float) {
*params = img->TexFormat->BlueBits ? img->TexFormat->DataType : GL_NONE;
_mesa_error(ctx, GL_INVALID_ENUM,
"glGetTexLevelParameter[if]v(pname)");
}
- return;
+ break;
case GL_TEXTURE_ALPHA_TYPE_ARB:
if (ctx->Extensions.ARB_texture_float) {
*params = img->TexFormat->AlphaBits ? img->TexFormat->DataType : GL_NONE;
_mesa_error(ctx, GL_INVALID_ENUM,
"glGetTexLevelParameter[if]v(pname)");
}
- return;
+ break;
case GL_TEXTURE_LUMINANCE_TYPE_ARB:
if (ctx->Extensions.ARB_texture_float) {
*params = img->TexFormat->LuminanceBits ? img->TexFormat->DataType : GL_NONE;
_mesa_error(ctx, GL_INVALID_ENUM,
"glGetTexLevelParameter[if]v(pname)");
}
- return;
+ break;
case GL_TEXTURE_INTENSITY_TYPE_ARB:
if (ctx->Extensions.ARB_texture_float) {
*params = img->TexFormat->IntensityBits ? img->TexFormat->DataType : GL_NONE;
_mesa_error(ctx, GL_INVALID_ENUM,
"glGetTexLevelParameter[if]v(pname)");
}
- return;
+ break;
case GL_TEXTURE_DEPTH_TYPE_ARB:
if (ctx->Extensions.ARB_texture_float) {
*params = img->TexFormat->DepthBits ? img->TexFormat->DataType : GL_NONE;
_mesa_error(ctx, GL_INVALID_ENUM,
"glGetTexLevelParameter[if]v(pname)");
}
- return;
+ break;
default:
_mesa_error(ctx, GL_INVALID_ENUM,
"glGetTexLevelParameter[if]v(pname)");
}
+
+ out:
+ _mesa_unlock_texture(ctx, texObj);
}
{
struct gl_texture_unit *texUnit;
struct gl_texture_object *obj;
+ GLboolean error = GL_FALSE;
GET_CURRENT_CONTEXT(ctx);
ASSERT_OUTSIDE_BEGIN_END(ctx);
return;
}
+ _mesa_lock_texture(ctx, obj);
switch (pname) {
case GL_TEXTURE_MAG_FILTER:
*params = ENUM_TO_FLOAT(obj->MagFilter);
- return;
+ break;
case GL_TEXTURE_MIN_FILTER:
*params = ENUM_TO_FLOAT(obj->MinFilter);
- return;
+ break;
case GL_TEXTURE_WRAP_S:
*params = ENUM_TO_FLOAT(obj->WrapS);
- return;
+ break;
case GL_TEXTURE_WRAP_T:
*params = ENUM_TO_FLOAT(obj->WrapT);
- return;
+ break;
case GL_TEXTURE_WRAP_R:
*params = ENUM_TO_FLOAT(obj->WrapR);
- return;
+ break;
case GL_TEXTURE_BORDER_COLOR:
params[0] = CLAMP(obj->BorderColor[0], 0.0F, 1.0F);
params[1] = CLAMP(obj->BorderColor[1], 0.0F, 1.0F);
params[2] = CLAMP(obj->BorderColor[2], 0.0F, 1.0F);
params[3] = CLAMP(obj->BorderColor[3], 0.0F, 1.0F);
- return;
+ break;
case GL_TEXTURE_RESIDENT:
{
GLboolean resident;
resident = GL_TRUE;
*params = ENUM_TO_FLOAT(resident);
}
- return;
+ break;
case GL_TEXTURE_PRIORITY:
*params = obj->Priority;
- return;
+ break;
case GL_TEXTURE_MIN_LOD:
*params = obj->MinLod;
- return;
+ break;
case GL_TEXTURE_MAX_LOD:
*params = obj->MaxLod;
- return;
+ break;
case GL_TEXTURE_BASE_LEVEL:
*params = (GLfloat) obj->BaseLevel;
- return;
+ break;
case GL_TEXTURE_MAX_LEVEL:
*params = (GLfloat) obj->MaxLevel;
- return;
+ break;
case GL_TEXTURE_MAX_ANISOTROPY_EXT:
if (ctx->Extensions.EXT_texture_filter_anisotropic) {
*params = obj->MaxAnisotropy;
- return;
}
+ else
+ error = 1;
break;
case GL_TEXTURE_COMPARE_SGIX:
if (ctx->Extensions.SGIX_shadow) {
*params = (GLfloat) obj->CompareFlag;
- return;
}
+ else
+ error = 1;
break;
case GL_TEXTURE_COMPARE_OPERATOR_SGIX:
if (ctx->Extensions.SGIX_shadow) {
*params = (GLfloat) obj->CompareOperator;
- return;
}
+ else
+ error = 1;
break;
case GL_SHADOW_AMBIENT_SGIX: /* aka GL_TEXTURE_COMPARE_FAIL_VALUE_ARB */
if (ctx->Extensions.SGIX_shadow_ambient) {
*params = obj->ShadowAmbient;
- return;
}
+ else
+ error = 1;
break;
case GL_GENERATE_MIPMAP_SGIS:
if (ctx->Extensions.SGIS_generate_mipmap) {
*params = (GLfloat) obj->GenerateMipmap;
- return;
}
+ else
+ error = 1;
break;
case GL_TEXTURE_COMPARE_MODE_ARB:
if (ctx->Extensions.ARB_shadow) {
*params = (GLfloat) obj->CompareMode;
- return;
}
+ else
+ error = 1;
break;
case GL_TEXTURE_COMPARE_FUNC_ARB:
if (ctx->Extensions.ARB_shadow) {
*params = (GLfloat) obj->CompareFunc;
- return;
}
+ else
+ error = 1;
break;
case GL_DEPTH_TEXTURE_MODE_ARB:
if (ctx->Extensions.ARB_depth_texture) {
*params = (GLfloat) obj->DepthMode;
- return;
}
+ else
+ error = 1;
break;
case GL_TEXTURE_LOD_BIAS:
if (ctx->Extensions.EXT_texture_lod_bias) {
*params = obj->LodBias;
- return;
}
+ else
+ error = 1;
break;
default:
- ; /* silence warnings */
+ error = 1;
+ break;
}
- /* If we get here, pname was an unrecognized enum */
- _mesa_error(ctx, GL_INVALID_ENUM, "glGetTexParameterfv(pname=0x%x)",
- pname);
+ if (error)
+ _mesa_error(ctx, GL_INVALID_ENUM, "glGetTexParameterfv(pname=0x%x)",
+ pname);
+
+ _mesa_unlock_texture(ctx, obj);
}
/*
* Mesa 3-D graphics library
- * Version: 6.5.1
+ * Version: 6.5.2
*
* Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
*
reg->File = file;
reg->Index = index;
- reg->Abs = 0; /* NV only */
- reg->NegateAbs = 0; /* NV only */
reg->NegateBase = negate;
reg->Swizzle = MAKE_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
return 0;
if (parse_masked_dst_reg (ctx, inst, vc_head, Program, &file, &idx, &mask))
return 1;
- reg->CondMask = 0; /* NV only */
- reg->CondSwizzle = 0; /* NV only */
reg->File = file;
reg->Index = idx;
reg->WriteMask = mask;
reg->File = File;
reg->Index = Index;
- reg->Abs = 0; /* NV only */
- reg->NegateAbs = 0; /* NV only */
reg->NegateBase = Negate;
reg->Swizzle = (Swizzle[0] << 0);
GLubyte instClass, type, code;
GLboolean rel;
- _mesa_init_instruction(fp);
+ _mesa_init_instructions(fp, 1);
/* Record the position in the program string for debugging */
fp->StringPos = Program->Position;
/* The actual opcode name */
code = *(*inst)++;
- _mesa_init_instruction(vp);
+ _mesa_init_instructions(vp, 1);
/* Record the position in the program string for debugging */
vp->StringPos = Program->Position;
/* Finally, tag on an OPCODE_END instruction */
{
const GLuint numInst = Program->Base.NumInstructions;
- _mesa_init_instruction(Program->Base.Instructions + numInst);
+ _mesa_init_instructions(Program->Base.Instructions + numInst, 1);
Program->Base.Instructions[numInst].Opcode = OPCODE_END;
/* YYY Wrong Position in program, whatever, at least not random -> crash
Program->Position = parse_position (&inst);
GLubyte token[100];
/* Initialize the instruction */
- _mesa_init_instruction(inst);
+ _mesa_init_instructions(inst, 1);
/* special instructions */
if (Parse_String(parseState, "DEFINE")) {
/*
* Mesa 3-D graphics library
- * Version: 6.5.1
+ * Version: 6.5.2
*
* Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
*
return;
}
- _mesa_init_vp_per_vertex_registers(ctx);
- _mesa_init_vp_per_primitive_registers(ctx);
- COPY_4V(ctx->VertexProgram.Machine.Inputs[VERT_ATTRIB_POS], params);
- _mesa_exec_vertex_program(ctx, vprog);
+ _mesa_exec_vertex_state_program(ctx, vprog, params);
}
/*
* Mesa 3-D graphics library
- * Version: 6.5
+ * Version: 6.5.2
*
* Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
*
* per-vertex.
*/
void
-_mesa_init_vp_per_vertex_registers(GLcontext *ctx)
+_mesa_init_vp_per_vertex_registers(GLcontext *ctx, struct vp_machine *machine)
{
/* Input registers get initialized from the current vertex attribs */
- MEMCPY(ctx->VertexProgram.Machine.Inputs, ctx->Current.Attrib,
+ MEMCPY(machine->Inputs, ctx->Current.Attrib,
MAX_VERTEX_PROGRAM_ATTRIBS * 4 * sizeof(GLfloat));
if (ctx->VertexProgram.Current->IsNVProgram) {
GLuint i;
/* Output/result regs are initialized to [0,0,0,1] */
for (i = 0; i < MAX_NV_VERTEX_PROGRAM_OUTPUTS; i++) {
- ASSIGN_4V(ctx->VertexProgram.Machine.Outputs[i], 0.0F, 0.0F, 0.0F, 1.0F);
+ ASSIGN_4V(machine->Outputs[i], 0.0F, 0.0F, 0.0F, 1.0F);
}
/* Temp regs are initialized to [0,0,0,0] */
for (i = 0; i < MAX_NV_VERTEX_PROGRAM_TEMPS; i++) {
- ASSIGN_4V(ctx->VertexProgram.Machine.Temporaries[i], 0.0F, 0.0F, 0.0F, 0.0F);
+ ASSIGN_4V(machine->Temporaries[i], 0.0F, 0.0F, 0.0F, 0.0F);
}
- ASSIGN_4V(ctx->VertexProgram.Machine.AddressReg, 0, 0, 0, 0);
+ ASSIGN_4V(machine->AddressReg, 0, 0, 0, 0);
}
}
continue;
}
- /* load the matrix */
+ /* load the matrix values into sequential registers */
if (ctx->VertexProgram.TrackMatrixTransform[i] == GL_IDENTITY_NV) {
load_matrix(ctx->VertexProgram.Parameters, i*4, mat->m);
}
* For debugging. Dump the current vertex program machine registers.
*/
void
-_mesa_dump_vp_state( const struct gl_vertex_program_state *state )
+_mesa_dump_vp_state( const struct gl_vertex_program_state *state,
+ const struct vp_machine *machine)
{
int i;
_mesa_printf("VertexIn:\n");
for (i = 0; i < MAX_NV_VERTEX_PROGRAM_INPUTS; i++) {
_mesa_printf("%d: %f %f %f %f ", i,
- state->Machine.Inputs[i][0],
- state->Machine.Inputs[i][1],
- state->Machine.Inputs[i][2],
- state->Machine.Inputs[i][3]);
+ machine->Inputs[i][0],
+ machine->Inputs[i][1],
+ machine->Inputs[i][2],
+ machine->Inputs[i][3]);
}
_mesa_printf("\n");
_mesa_printf("VertexOut:\n");
for (i = 0; i < MAX_NV_VERTEX_PROGRAM_OUTPUTS; i++) {
_mesa_printf("%d: %f %f %f %f ", i,
- state->Machine.Outputs[i][0],
- state->Machine.Outputs[i][1],
- state->Machine.Outputs[i][2],
- state->Machine.Outputs[i][3]);
+ machine->Outputs[i][0],
+ machine->Outputs[i][1],
+ machine->Outputs[i][2],
+ machine->Outputs[i][3]);
}
_mesa_printf("\n");
_mesa_printf("Registers:\n");
for (i = 0; i < MAX_NV_VERTEX_PROGRAM_TEMPS; i++) {
_mesa_printf("%d: %f %f %f %f ", i,
- state->Machine.Temporaries[i][0],
- state->Machine.Temporaries[i][1],
- state->Machine.Temporaries[i][2],
- state->Machine.Temporaries[i][3]);
+ machine->Temporaries[i][0],
+ machine->Temporaries[i][1],
+ machine->Temporaries[i][2],
+ machine->Temporaries[i][3]);
}
_mesa_printf("\n");
* source register.
*/
static INLINE const GLfloat *
-get_register_pointer( const struct prog_src_register *source,
- const struct gl_vertex_program_state *state )
+get_register_pointer( GLcontext *ctx,
+ const struct prog_src_register *source,
+ struct vp_machine *machine,
+ const struct gl_vertex_program *program )
{
if (source->RelAddr) {
- const GLint reg = source->Index + state->Machine.AddressReg[0];
+ const GLint reg = source->Index + machine->AddressReg[0];
ASSERT( (source->File == PROGRAM_ENV_PARAM) ||
(source->File == PROGRAM_STATE_VAR) );
if (reg < 0 || reg > MAX_NV_VERTEX_PROGRAM_PARAMS)
return ZeroVec;
else if (source->File == PROGRAM_ENV_PARAM)
- return state->Parameters[reg];
- else
- return state->Current->Base.Parameters->ParameterValues[reg];
+ return ctx->VertexProgram.Parameters[reg];
+ else {
+ ASSERT(source->File == PROGRAM_LOCAL_PARAM);
+ return program->Base.Parameters->ParameterValues[reg];
+ }
}
else {
switch (source->File) {
case PROGRAM_TEMPORARY:
ASSERT(source->Index < MAX_NV_VERTEX_PROGRAM_TEMPS);
- return state->Machine.Temporaries[source->Index];
+ return machine->Temporaries[source->Index];
case PROGRAM_INPUT:
ASSERT(source->Index < MAX_NV_VERTEX_PROGRAM_INPUTS);
- return state->Machine.Inputs[source->Index];
+ return machine->Inputs[source->Index];
case PROGRAM_OUTPUT:
/* This is only needed for the PRINT instruction */
ASSERT(source->Index < MAX_NV_VERTEX_PROGRAM_OUTPUTS);
- return state->Machine.Outputs[source->Index];
+ return machine->Outputs[source->Index];
case PROGRAM_LOCAL_PARAM:
ASSERT(source->Index < MAX_PROGRAM_LOCAL_PARAMS);
- return state->Current->Base.LocalParams[source->Index];
+ return program->Base.LocalParams[source->Index];
case PROGRAM_ENV_PARAM:
ASSERT(source->Index < MAX_NV_VERTEX_PROGRAM_PARAMS);
- return state->Parameters[source->Index];
+ return ctx->VertexProgram.Parameters[source->Index];
case PROGRAM_STATE_VAR:
- ASSERT(source->Index < state->Current->Base.Parameters->NumParameters);
- return state->Current->Base.Parameters->ParameterValues[source->Index];
+ ASSERT(source->Index < program->Base.Parameters->NumParameters);
+ return program->Base.Parameters->ParameterValues[source->Index];
default:
_mesa_problem(NULL,
"Bad source register file in get_register_pointer");
* Apply swizzling and negating as needed.
*/
static INLINE void
-fetch_vector4( const struct prog_src_register *source,
- const struct gl_vertex_program_state *state,
+fetch_vector4( GLcontext *ctx,
+ const struct prog_src_register *source,
+ struct vp_machine *machine,
+ const struct gl_vertex_program *program,
GLfloat result[4] )
{
- const GLfloat *src = get_register_pointer(source, state);
-
+ const GLfloat *src = get_register_pointer(ctx, source, machine, program);
+ ASSERT(src);
+ result[0] = src[GET_SWZ(source->Swizzle, 0)];
+ result[1] = src[GET_SWZ(source->Swizzle, 1)];
+ result[2] = src[GET_SWZ(source->Swizzle, 2)];
+ result[3] = src[GET_SWZ(source->Swizzle, 3)];
if (source->NegateBase) {
- result[0] = -src[GET_SWZ(source->Swizzle, 0)];
- result[1] = -src[GET_SWZ(source->Swizzle, 1)];
- result[2] = -src[GET_SWZ(source->Swizzle, 2)];
- result[3] = -src[GET_SWZ(source->Swizzle, 3)];
- }
- else {
- result[0] = src[GET_SWZ(source->Swizzle, 0)];
- result[1] = src[GET_SWZ(source->Swizzle, 1)];
- result[2] = src[GET_SWZ(source->Swizzle, 2)];
- result[3] = src[GET_SWZ(source->Swizzle, 3)];
+ result[0] = -result[0];
+ result[1] = -result[1];
+ result[2] = -result[2];
+ result[3] = -result[3];
}
}
* As above, but only return result[0] element.
*/
static INLINE void
-fetch_vector1( const struct prog_src_register *source,
- const struct gl_vertex_program_state *state,
+fetch_vector1( GLcontext *ctx,
+ const struct prog_src_register *source,
+ struct vp_machine *machine,
+ const struct gl_vertex_program *program,
GLfloat result[4] )
{
- const GLfloat *src = get_register_pointer(source, state);
-
+ const GLfloat *src = get_register_pointer(ctx, source, machine, program);
+ ASSERT(src);
+ result[0] = src[GET_SWZ(source->Swizzle, 0)];
if (source->NegateBase) {
- result[0] = -src[GET_SWZ(source->Swizzle, 0)];
- }
- else {
- result[0] = src[GET_SWZ(source->Swizzle, 0)];
+ result[0] = -result[0];
}
}
* Store 4 floats into a register.
*/
static void
-store_vector4( const struct prog_dst_register *dest,
- struct gl_vertex_program_state *state,
+store_vector4( const struct prog_instruction *inst,
+ struct vp_machine *machine,
const GLfloat value[4] )
{
+ const struct prog_dst_register *dest = &(inst->DstReg);
GLfloat *dst;
switch (dest->File) {
- case PROGRAM_TEMPORARY:
- dst = state->Machine.Temporaries[dest->Index];
- break;
case PROGRAM_OUTPUT:
- dst = state->Machine.Outputs[dest->Index];
+ dst = machine->Outputs[dest->Index];
+ break;
+ case PROGRAM_TEMPORARY:
+ dst = machine->Temporaries[dest->Index];
break;
case PROGRAM_ENV_PARAM:
+ /* Only for VP state programs */
{
/* a slight hack */
GET_CURRENT_CONTEXT(ctx);
* Execute the given vertex program
*/
void
-_mesa_exec_vertex_program(GLcontext *ctx, const struct gl_vertex_program *program)
+_mesa_exec_vertex_program(GLcontext *ctx,
+ struct vp_machine *machine,
+ const struct gl_vertex_program *program)
{
- struct gl_vertex_program_state *state = &ctx->VertexProgram;
const struct prog_instruction *inst;
ctx->_CurrentProgram = GL_VERTEX_PROGRAM_ARB; /* or NV, doesn't matter */
* by the MVP matrix and store in the vertex position result register.
*/
if (ctx->VertexProgram.Current->IsPositionInvariant) {
- TRANSFORM_POINT( ctx->VertexProgram.Machine.Outputs[VERT_RESULT_HPOS],
+ TRANSFORM_POINT( machine->Outputs[VERT_RESULT_HPOS],
ctx->_ModelProjectMatrix.m,
- ctx->VertexProgram.Machine.Inputs[VERT_ATTRIB_POS]);
+ machine->Inputs[VERT_ATTRIB_POS]);
/* XXX: This could go elsewhere */
ctx->VertexProgram.Current->Base.OutputsWritten |= VERT_BIT_POS;
case OPCODE_MOV:
{
GLfloat t[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- store_vector4( &inst->DstReg, state, t );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ store_vector4( inst, machine, t );
}
break;
case OPCODE_LIT:
{
const GLfloat epsilon = 1.0F / 256.0F; /* per NV spec */
GLfloat t[4], lit[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
t[0] = MAX2(t[0], 0.0F);
t[1] = MAX2(t[1], 0.0F);
t[3] = CLAMP(t[3], -(128.0F - epsilon), (128.0F - epsilon));
lit[1] = t[0];
lit[2] = (t[0] > 0.0) ? (GLfloat) _mesa_pow(t[1], t[3]) : 0.0F;
lit[3] = 1.0;
- store_vector4( &inst->DstReg, state, lit );
+ store_vector4( inst, machine, lit );
}
break;
case OPCODE_RCP:
{
GLfloat t[4];
- fetch_vector1( &inst->SrcReg[0], state, t );
+ fetch_vector1( ctx, &inst->SrcReg[0], machine, program, t );
if (t[0] != 1.0F)
t[0] = 1.0F / t[0]; /* div by zero is infinity! */
t[1] = t[2] = t[3] = t[0];
- store_vector4( &inst->DstReg, state, t );
+ store_vector4( inst, machine, t );
}
break;
case OPCODE_RSQ:
{
GLfloat t[4];
- fetch_vector1( &inst->SrcReg[0], state, t );
+ fetch_vector1( ctx, &inst->SrcReg[0], machine, program, t );
t[0] = INV_SQRTF(FABSF(t[0]));
t[1] = t[2] = t[3] = t[0];
- store_vector4( &inst->DstReg, state, t );
+ store_vector4( inst, machine, t );
}
break;
case OPCODE_EXP:
{
GLfloat t[4], q[4], floor_t0;
- fetch_vector1( &inst->SrcReg[0], state, t );
+ fetch_vector1( ctx, &inst->SrcReg[0], machine, program, t );
floor_t0 = FLOORF(t[0]);
if (floor_t0 > FLT_MAX_EXP) {
SET_POS_INFINITY(q[0]);
}
q[1] = t[0] - floor_t0;
q[3] = 1.0F;
- store_vector4( &inst->DstReg, state, q );
+ store_vector4( inst, machine, q );
}
break;
case OPCODE_LOG:
{
GLfloat t[4], q[4], abs_t0;
- fetch_vector1( &inst->SrcReg[0], state, t );
+ fetch_vector1( ctx, &inst->SrcReg[0], machine, program, t );
abs_t0 = FABSF(t[0]);
if (abs_t0 != 0.0F) {
/* Since we really can't handle infinite values on VMS
SET_NEG_INFINITY(q[2]);
}
q[3] = 1.0;
- store_vector4( &inst->DstReg, state, q );
+ store_vector4( inst, machine, q );
}
break;
case OPCODE_MUL:
{
GLfloat t[4], u[4], prod[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- fetch_vector4( &inst->SrcReg[1], state, u );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ fetch_vector4( ctx, &inst->SrcReg[1], machine, program, u );
prod[0] = t[0] * u[0];
prod[1] = t[1] * u[1];
prod[2] = t[2] * u[2];
prod[3] = t[3] * u[3];
- store_vector4( &inst->DstReg, state, prod );
+ store_vector4( inst, machine, prod );
}
break;
case OPCODE_ADD:
{
GLfloat t[4], u[4], sum[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- fetch_vector4( &inst->SrcReg[1], state, u );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ fetch_vector4( ctx, &inst->SrcReg[1], machine, program, u );
sum[0] = t[0] + u[0];
sum[1] = t[1] + u[1];
sum[2] = t[2] + u[2];
sum[3] = t[3] + u[3];
- store_vector4( &inst->DstReg, state, sum );
+ store_vector4( inst, machine, sum );
}
break;
case OPCODE_DP3:
{
GLfloat t[4], u[4], dot[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- fetch_vector4( &inst->SrcReg[1], state, u );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ fetch_vector4( ctx, &inst->SrcReg[1], machine, program, u );
dot[0] = t[0] * u[0] + t[1] * u[1] + t[2] * u[2];
dot[1] = dot[2] = dot[3] = dot[0];
- store_vector4( &inst->DstReg, state, dot );
+ store_vector4( inst, machine, dot );
}
break;
case OPCODE_DP4:
{
GLfloat t[4], u[4], dot[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- fetch_vector4( &inst->SrcReg[1], state, u );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ fetch_vector4( ctx, &inst->SrcReg[1], machine, program, u );
dot[0] = t[0] * u[0] + t[1] * u[1] + t[2] * u[2] + t[3] * u[3];
dot[1] = dot[2] = dot[3] = dot[0];
- store_vector4( &inst->DstReg, state, dot );
+ store_vector4( inst, machine, dot );
}
break;
case OPCODE_DST:
{
GLfloat t[4], u[4], dst[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- fetch_vector4( &inst->SrcReg[1], state, u );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ fetch_vector4( ctx, &inst->SrcReg[1], machine, program, u );
dst[0] = 1.0F;
dst[1] = t[1] * u[1];
dst[2] = t[2];
dst[3] = u[3];
- store_vector4( &inst->DstReg, state, dst );
+ store_vector4( inst, machine, dst );
}
break;
case OPCODE_MIN:
{
GLfloat t[4], u[4], min[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- fetch_vector4( &inst->SrcReg[1], state, u );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ fetch_vector4( ctx, &inst->SrcReg[1], machine, program, u );
min[0] = (t[0] < u[0]) ? t[0] : u[0];
min[1] = (t[1] < u[1]) ? t[1] : u[1];
min[2] = (t[2] < u[2]) ? t[2] : u[2];
min[3] = (t[3] < u[3]) ? t[3] : u[3];
- store_vector4( &inst->DstReg, state, min );
+ store_vector4( inst, machine, min );
}
break;
case OPCODE_MAX:
{
GLfloat t[4], u[4], max[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- fetch_vector4( &inst->SrcReg[1], state, u );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ fetch_vector4( ctx, &inst->SrcReg[1], machine, program, u );
max[0] = (t[0] > u[0]) ? t[0] : u[0];
max[1] = (t[1] > u[1]) ? t[1] : u[1];
max[2] = (t[2] > u[2]) ? t[2] : u[2];
max[3] = (t[3] > u[3]) ? t[3] : u[3];
- store_vector4( &inst->DstReg, state, max );
+ store_vector4( inst, machine, max );
}
break;
case OPCODE_SLT:
{
GLfloat t[4], u[4], slt[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- fetch_vector4( &inst->SrcReg[1], state, u );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ fetch_vector4( ctx, &inst->SrcReg[1], machine, program, u );
slt[0] = (t[0] < u[0]) ? 1.0F : 0.0F;
slt[1] = (t[1] < u[1]) ? 1.0F : 0.0F;
slt[2] = (t[2] < u[2]) ? 1.0F : 0.0F;
slt[3] = (t[3] < u[3]) ? 1.0F : 0.0F;
- store_vector4( &inst->DstReg, state, slt );
+ store_vector4( inst, machine, slt );
}
break;
case OPCODE_SGE:
{
GLfloat t[4], u[4], sge[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- fetch_vector4( &inst->SrcReg[1], state, u );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ fetch_vector4( ctx, &inst->SrcReg[1], machine, program, u );
sge[0] = (t[0] >= u[0]) ? 1.0F : 0.0F;
sge[1] = (t[1] >= u[1]) ? 1.0F : 0.0F;
sge[2] = (t[2] >= u[2]) ? 1.0F : 0.0F;
sge[3] = (t[3] >= u[3]) ? 1.0F : 0.0F;
- store_vector4( &inst->DstReg, state, sge );
+ store_vector4( inst, machine, sge );
}
break;
case OPCODE_MAD:
{
GLfloat t[4], u[4], v[4], sum[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- fetch_vector4( &inst->SrcReg[1], state, u );
- fetch_vector4( &inst->SrcReg[2], state, v );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ fetch_vector4( ctx, &inst->SrcReg[1], machine, program, u );
+ fetch_vector4( ctx, &inst->SrcReg[2], machine, program, v );
sum[0] = t[0] * u[0] + v[0];
sum[1] = t[1] * u[1] + v[1];
sum[2] = t[2] * u[2] + v[2];
sum[3] = t[3] * u[3] + v[3];
- store_vector4( &inst->DstReg, state, sum );
+ store_vector4( inst, machine, sum );
}
break;
case OPCODE_ARL:
{
GLfloat t[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- state->Machine.AddressReg[0] = (GLint) FLOORF(t[0]);
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ machine->AddressReg[0] = (GLint) FLOORF(t[0]);
}
break;
case OPCODE_DPH:
{
GLfloat t[4], u[4], dot[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- fetch_vector4( &inst->SrcReg[1], state, u );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ fetch_vector4( ctx, &inst->SrcReg[1], machine, program, u );
dot[0] = t[0] * u[0] + t[1] * u[1] + t[2] * u[2] + u[3];
dot[1] = dot[2] = dot[3] = dot[0];
- store_vector4( &inst->DstReg, state, dot );
+ store_vector4( inst, machine, dot );
}
break;
case OPCODE_RCC:
{
GLfloat t[4], u;
- fetch_vector1( &inst->SrcReg[0], state, t );
+ fetch_vector1( ctx, &inst->SrcReg[0], machine, program, t );
if (t[0] == 1.0F)
u = 1.0F;
else
}
}
t[0] = t[1] = t[2] = t[3] = u;
- store_vector4( &inst->DstReg, state, t );
+ store_vector4( inst, machine, t );
}
break;
case OPCODE_SUB: /* GL_NV_vertex_program1_1 */
{
GLfloat t[4], u[4], sum[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- fetch_vector4( &inst->SrcReg[1], state, u );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ fetch_vector4( ctx, &inst->SrcReg[1], machine, program, u );
sum[0] = t[0] - u[0];
sum[1] = t[1] - u[1];
sum[2] = t[2] - u[2];
sum[3] = t[3] - u[3];
- store_vector4( &inst->DstReg, state, sum );
+ store_vector4( inst, machine, sum );
}
break;
case OPCODE_ABS: /* GL_NV_vertex_program1_1 */
{
GLfloat t[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
if (t[0] < 0.0) t[0] = -t[0];
if (t[1] < 0.0) t[1] = -t[1];
if (t[2] < 0.0) t[2] = -t[2];
if (t[3] < 0.0) t[3] = -t[3];
- store_vector4( &inst->DstReg, state, t );
+ store_vector4( inst, machine, t );
}
break;
case OPCODE_FLR: /* GL_ARB_vertex_program */
{
GLfloat t[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
t[0] = FLOORF(t[0]);
t[1] = FLOORF(t[1]);
t[2] = FLOORF(t[2]);
t[3] = FLOORF(t[3]);
- store_vector4( &inst->DstReg, state, t );
+ store_vector4( inst, machine, t );
}
break;
case OPCODE_FRC: /* GL_ARB_vertex_program */
{
GLfloat t[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
t[0] = t[0] - FLOORF(t[0]);
t[1] = t[1] - FLOORF(t[1]);
t[2] = t[2] - FLOORF(t[2]);
t[3] = t[3] - FLOORF(t[3]);
- store_vector4( &inst->DstReg, state, t );
+ store_vector4( inst, machine, t );
}
break;
case OPCODE_EX2: /* GL_ARB_vertex_program */
{
GLfloat t[4];
- fetch_vector1( &inst->SrcReg[0], state, t );
+ fetch_vector1( ctx, &inst->SrcReg[0], machine, program, t );
t[0] = t[1] = t[2] = t[3] = (GLfloat)_mesa_pow(2.0, t[0]);
- store_vector4( &inst->DstReg, state, t );
+ store_vector4( inst, machine, t );
}
break;
case OPCODE_LG2: /* GL_ARB_vertex_program */
{
GLfloat t[4];
- fetch_vector1( &inst->SrcReg[0], state, t );
+ fetch_vector1( ctx, &inst->SrcReg[0], machine, program, t );
t[0] = t[1] = t[2] = t[3] = LOG2(t[0]);
- store_vector4( &inst->DstReg, state, t );
+ store_vector4( inst, machine, t );
}
break;
case OPCODE_POW: /* GL_ARB_vertex_program */
{
GLfloat t[4], u[4];
- fetch_vector1( &inst->SrcReg[0], state, t );
- fetch_vector1( &inst->SrcReg[1], state, u );
+ fetch_vector1( ctx, &inst->SrcReg[0], machine, program, t );
+ fetch_vector1( ctx, &inst->SrcReg[1], machine, program, u );
t[0] = t[1] = t[2] = t[3] = (GLfloat)_mesa_pow(t[0], u[0]);
- store_vector4( &inst->DstReg, state, t );
+ store_vector4( inst, machine, t );
}
break;
case OPCODE_XPD: /* GL_ARB_vertex_program */
{
GLfloat t[4], u[4], cross[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
- fetch_vector4( &inst->SrcReg[1], state, u );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
+ fetch_vector4( ctx, &inst->SrcReg[1], machine, program, u );
cross[0] = t[1] * u[2] - t[2] * u[1];
cross[1] = t[2] * u[0] - t[0] * u[2];
cross[2] = t[0] * u[1] - t[1] * u[0];
- store_vector4( &inst->DstReg, state, cross );
+ store_vector4( inst, machine, cross );
}
break;
case OPCODE_SWZ: /* GL_ARB_vertex_program */
{
const struct prog_src_register *source = &inst->SrcReg[0];
- const GLfloat *src = get_register_pointer(source, state);
+ const GLfloat *src = get_register_pointer(ctx, source,
+ machine, program);
GLfloat result[4];
GLuint i;
/* do extended swizzling here */
for (i = 0; i < 4; i++) {
- if (GET_SWZ(source->Swizzle, i) == SWIZZLE_ZERO)
+ const GLuint swz = GET_SWZ(source->Swizzle, i);
+ if (swz == SWIZZLE_ZERO)
result[i] = 0.0;
- else if (GET_SWZ(source->Swizzle, i) == SWIZZLE_ONE)
+ else if (swz == SWIZZLE_ONE)
result[i] = 1.0;
- else
- result[i] = src[GET_SWZ(source->Swizzle, i)];
+ else {
+ ASSERT(swz >= 0);
+ ASSERT(swz <= 3);
+ result[i] = src[swz];
+ }
if (source->NegateBase & (1 << i))
result[i] = -result[i];
}
- store_vector4( &inst->DstReg, state, result );
+ store_vector4( inst, machine, result );
}
break;
case OPCODE_PRINT:
if (inst->SrcReg[0].File) {
GLfloat t[4];
- fetch_vector4( &inst->SrcReg[0], state, t );
+ fetch_vector4( ctx, &inst->SrcReg[0], machine, program, t );
_mesa_printf("%s%g, %g, %g, %g\n",
(char *) inst->Data, t[0], t[1], t[2], t[3]);
}
}
-
/**
-Thoughts on vertex program optimization:
-
-The obvious thing to do is to compile the vertex program into X86/SSE/3DNow!
-assembly code. That will probably be a lot of work.
-
-Another approach might be to replace the vp_instruction->Opcode field with
-a pointer to a specialized C function which executes the instruction.
-In particular we can write functions which skip swizzling, negating,
-masking, relative addressing, etc. when they're not needed.
-
-For example:
-
-void simple_add( struct prog_instruction *inst )
+ * Execute a vertex state program.
+ * \sa _mesa_ExecuteProgramNV
+ */
+void
+_mesa_exec_vertex_state_program(GLcontext *ctx,
+ struct gl_vertex_program *vprog,
+ const GLfloat *params)
{
- GLfloat *sum = machine->Registers[inst->DstReg.Register];
- GLfloat *a = machine->Registers[inst->SrcReg[0].Register];
- GLfloat *b = machine->Registers[inst->SrcReg[1].Register];
- sum[0] = a[0] + b[0];
- sum[1] = a[1] + b[1];
- sum[2] = a[2] + b[2];
- sum[3] = a[3] + b[3];
+ struct vp_machine machine;
+ _mesa_init_vp_per_vertex_registers(ctx, &machine);
+ _mesa_init_vp_per_primitive_registers(ctx);
+ COPY_4V(machine.Inputs[VERT_ATTRIB_POS], params);
+ _mesa_exec_vertex_program(ctx, &machine, vprog);
}
-
-*/
-
-/*
-
-KW:
-
-A first step would be to 'vectorize' the programs in the same way as
-the normal transformation code in the tnl module. Thus each opcode
-takes zero or more input vectors (registers) and produces one or more
-output vectors.
-
-These operations would intially be coded in C, with machine-specific
-assembly following, as is currently the case for matrix
-transformations in the math/ directory. The preprocessing scheme for
-selecting simpler operations Brian describes above would also work
-here.
-
-This should give reasonable performance without excessive effort.
-
-*/
/*
* Mesa 3-D graphics library
- * Version: 6.1
+ * Version: 6.5.2
*
- * Copyright (C) 1999-2004 Brian Paul All Rights Reserved.
+ * Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
#ifndef NVVERTEXEC_H
#define NVVERTEXEC_H
+
+/**
+ * Virtual vertex program machine state.
+ * Only used during program execution.
+ */
+struct vp_machine
+{
+ GLfloat Temporaries[MAX_NV_VERTEX_PROGRAM_TEMPS][4];
+ GLfloat Inputs[MAX_NV_VERTEX_PROGRAM_INPUTS][4];
+ GLuint InputsSize[MAX_NV_VERTEX_PROGRAM_INPUTS];
+ GLfloat Outputs[MAX_NV_VERTEX_PROGRAM_OUTPUTS][4];
+ GLint AddressReg[4];
+};
+
+
+
extern void
-_mesa_init_vp_per_vertex_registers(GLcontext *ctx);
+_mesa_init_vp_per_vertex_registers(GLcontext *ctx, struct vp_machine *machine);
extern void
_mesa_init_vp_per_primitive_registers(GLcontext *ctx);
extern void
-_mesa_exec_vertex_program(GLcontext *ctx, const struct gl_vertex_program *program);
+_mesa_exec_vertex_program(GLcontext *ctx,
+ struct vp_machine *machine,
+ const struct gl_vertex_program *program);
+
+extern void
+_mesa_exec_vertex_state_program(GLcontext *ctx,
+ struct gl_vertex_program *vprog,
+ const GLfloat *params);
extern void
-_mesa_dump_vp_state( const struct gl_vertex_program_state *state );
+_mesa_dump_vp_state( const struct gl_vertex_program_state *state,
+ const struct vp_machine *machine);
#endif
struct prog_instruction *inst = program + parseState->numInst;
/* Initialize the instruction */
- _mesa_init_instruction(inst);
+ _mesa_init_instructions(inst, 1);
if (Parse_String(parseState, "MOV")) {
if (!Parse_UnaryOpInstruction(parseState, inst, OPCODE_MOV))
/*
* Mesa 3-D graphics library
- * Version: 6.5.1
+ * Version: 6.5.2
*
* Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
*
static const char *
make_state_string(const GLint stateTokens[6]);
-static GLuint
+static GLbitfield
make_state_flags(const GLint state[]);
paramList->Parameters[index].StateIndexes[i]
= (enum state_index) stateTokens[i];
}
- paramList->StateFlags |=
- make_state_flags(stateTokens);
+ paramList->StateFlags |= make_state_flags(stateTokens);
}
/* free name string here since we duplicated it in add_parameter() */
{
/* state[1] is either 0=front or 1=back side */
const GLuint face = (GLuint) state[1];
+ const struct gl_material *mat = &ctx->Light.Material;
+ ASSERT(face == 0 || face == 1);
+ /* we rely on tokens numbered so that _BACK_ == _FRONT_+ 1 */
+ ASSERT(MAT_ATTRIB_FRONT_AMBIENT + 1 == MAT_ATTRIB_BACK_AMBIENT);
+ /* XXX we could get rid of this switch entirely with a little
+ * work in arbprogparse.c's parse_state_single_item().
+ */
/* state[2] is the material attribute */
switch (state[2]) {
case STATE_AMBIENT:
- if (face == 0)
- COPY_4V(value, ctx->Light.Material.Attrib[MAT_ATTRIB_FRONT_AMBIENT]);
- else
- COPY_4V(value, ctx->Light.Material.Attrib[MAT_ATTRIB_BACK_AMBIENT]);
+ COPY_4V(value, mat->Attrib[MAT_ATTRIB_FRONT_AMBIENT + face]);
return;
case STATE_DIFFUSE:
- if (face == 0)
- COPY_4V(value, ctx->Light.Material.Attrib[MAT_ATTRIB_FRONT_DIFFUSE]);
- else
- COPY_4V(value, ctx->Light.Material.Attrib[MAT_ATTRIB_BACK_DIFFUSE]);
+ COPY_4V(value, mat->Attrib[MAT_ATTRIB_FRONT_DIFFUSE + face]);
return;
case STATE_SPECULAR:
- if (face == 0)
- COPY_4V(value, ctx->Light.Material.Attrib[MAT_ATTRIB_FRONT_SPECULAR]);
- else
- COPY_4V(value, ctx->Light.Material.Attrib[MAT_ATTRIB_BACK_SPECULAR]);
+ COPY_4V(value, mat->Attrib[MAT_ATTRIB_FRONT_SPECULAR + face]);
return;
case STATE_EMISSION:
- if (face == 0)
- COPY_4V(value, ctx->Light.Material.Attrib[MAT_ATTRIB_FRONT_EMISSION]);
- else
- COPY_4V(value, ctx->Light.Material.Attrib[MAT_ATTRIB_BACK_EMISSION]);
+ COPY_4V(value, mat->Attrib[MAT_ATTRIB_FRONT_EMISSION + face]);
return;
case STATE_SHININESS:
- if (face == 0)
- value[0] = ctx->Light.Material.Attrib[MAT_ATTRIB_FRONT_SHININESS][0];
- else
- value[0] = ctx->Light.Material.Attrib[MAT_ATTRIB_BACK_SHININESS][0];
+ value[0] = mat->Attrib[MAT_ATTRIB_FRONT_SHININESS + face][0];
value[1] = 0.0F;
value[2] = 0.0F;
value[3] = 1.0F;
/**
- * Return a bit mask of the Mesa state flags under which a parameter's
- * value might change.
+ * Return a bitmask of the Mesa state flags (_NEW_* values) which would
+ * indicate that the given context state may have changed.
+ * The bitmask is used during validation to determine if we need to update
+ * vertex/fragment program parameters (like "state.material.color") when
+ * some GL state has changed.
*/
-static GLuint make_state_flags(const GLint state[])
+static GLbitfield
+make_state_flags(const GLint state[])
{
switch (state[0]) {
case STATE_MATERIAL:
/**
* Initialize program instruction fields to defaults.
+ * \param inst first instruction to initialize
+ * \param count number of instructions to initialize
*/
void
-_mesa_init_instruction(struct prog_instruction *inst)
+_mesa_init_instructions(struct prog_instruction *inst, GLuint count)
{
- _mesa_bzero(inst, sizeof(struct prog_instruction));
-
- inst->SrcReg[0].File = PROGRAM_UNDEFINED;
- inst->SrcReg[0].Swizzle = SWIZZLE_NOOP;
- inst->SrcReg[1].File = PROGRAM_UNDEFINED;
- inst->SrcReg[1].Swizzle = SWIZZLE_NOOP;
- inst->SrcReg[2].File = PROGRAM_UNDEFINED;
- inst->SrcReg[2].Swizzle = SWIZZLE_NOOP;
-
- inst->DstReg.File = PROGRAM_UNDEFINED;
- inst->DstReg.WriteMask = WRITEMASK_XYZW;
- inst->DstReg.CondMask = COND_TR;
- inst->DstReg.CondSwizzle = SWIZZLE_NOOP;
-
- inst->SaturateMode = SATURATE_OFF;
- inst->Precision = FLOAT32;
+ GLuint i;
+
+ _mesa_bzero(inst, count * sizeof(struct prog_instruction));
+
+ for (i = 0; i < count; i++) {
+ inst[i].SrcReg[0].File = PROGRAM_UNDEFINED;
+ inst[i].SrcReg[0].Swizzle = SWIZZLE_NOOP;
+ inst[i].SrcReg[1].File = PROGRAM_UNDEFINED;
+ inst[i].SrcReg[1].Swizzle = SWIZZLE_NOOP;
+ inst[i].SrcReg[2].File = PROGRAM_UNDEFINED;
+ inst[i].SrcReg[2].Swizzle = SWIZZLE_NOOP;
+
+ inst[i].DstReg.File = PROGRAM_UNDEFINED;
+ inst[i].DstReg.WriteMask = WRITEMASK_XYZW;
+ inst[i].DstReg.CondMask = COND_TR;
+ inst[i].DstReg.CondSwizzle = SWIZZLE_NOOP;
+
+ inst[i].SaturateMode = SATURATE_OFF;
+ inst[i].Precision = FLOAT32;
+ }
}
"glGetProgramRegisterfvMESA(registerName)");
return;
}
- COPY_4V(v, ctx->VertexProgram.Machine.Temporaries[i]);
+#if 0 /* FIX ME */
+ ctx->Driver.GetVertexProgramRegister(ctx, PROGRAM_TEMPORARY, i, v);
+#endif
}
else if (reg[0] == 'v' && reg[1] == '[') {
/* Vertex Input attribute */
_mesa_sprintf(number, "%d", i);
if (_mesa_strncmp(reg + 2, name, 4) == 0 ||
_mesa_strncmp(reg + 2, number, _mesa_strlen(number)) == 0) {
- COPY_4V(v, ctx->VertexProgram.Machine.Inputs[i]);
+#if 0 /* FIX ME */
+ ctx->Driver.GetVertexProgramRegister(ctx, PROGRAM_INPUT,
+ i, v);
+#endif
return;
}
}
extern void
-_mesa_init_instruction(struct prog_instruction *inst);
+_mesa_init_instructions(struct prog_instruction *inst, GLuint count);
extern GLuint
_mesa_num_inst_src_regs(enum prog_opcode opcode);
* newInst[2] = DP4 result.position.z, mvp.row[2], vertex.position;
* newInst[3] = DP4 result.position.w, mvp.row[3], vertex.position;
*/
+ _mesa_init_instructions(newInst, 4);
for (i = 0; i < 4; i++) {
- _mesa_init_instruction(newInst + i);
newInst[i].Opcode = OPCODE_DP4;
newInst[i].DstReg.File = PROGRAM_OUTPUT;
newInst[i].DstReg.Index = VERT_RESULT_HPOS;
GLfloat fogVals[4];
GLuint fogConsts; /* constant values for EXP, EXP2 mode */
- if (fprog->FogOption != GL_NONE) {
+ if (fprog->FogOption == GL_NONE) {
_mesa_problem(ctx, "_mesa_append_fog_code() called for fragment program"
" with FogOption == GL_NONE");
return;
}
assert(inst->Opcode == OPCODE_END); /* we'll overwrite this inst */
- for (i = 0; i < 6; i++)
- _mesa_init_instruction(inst + i);
+ _mesa_init_instructions(inst, 6);
/* emit instructions to compute fog blending factor */
if (fprog->FogOption == GL_LINEAR) {
/*
* Mesa 3-D graphics library
- * Version: 6.5
+ * Version: 6.6
*
* Copyright (C) 2006 Brian Paul All Rights Reserved.
*
(**pro).UpdateFixedVarying(pro, SLANG_FRAGMENT_FIXED_FRAGCOLOR,
vec, 0, 4 * sizeof(GLfloat), GL_FALSE);
COPY_4V(span->array->color.sz4.rgba[i], vec);
+
+ (**pro).UpdateFixedVarying(pro, SLANG_FRAGMENT_FIXED_FRAGDEPTH, vec, 0,
+ sizeof (GLfloat), GL_FALSE);
+ if (vec[0] <= 0.0f)
+ span->array->z[i] = 0;
+ else if (vec[0] >= 1.0f)
+ span->array->z[i] = ctx->DrawBuffer->_DepthMax;
+ else
+ span->array->z[i] = IROUND(vec[0] * ctx->DrawBuffer->_DepthMaxF);
}
}
}
/**
* Called via the device driver's ctx->Driver.Clear() function if the
* device driver can't clear one or more of the buffers itself.
- * \param mask bitfield of BUFER_BIT_* values indicating which renderbuffers
- * are to be cleared.
+ * \param buffers bitfield of BUFFER_BIT_* values indicating which
+ * renderbuffers are to be cleared.
* \param all if GL_TRUE, clear whole buffer, else clear specified region.
*/
void
-_swrast_Clear(GLcontext *ctx, GLbitfield mask,
- GLboolean all, GLint x, GLint y, GLint width, GLint height)
+_swrast_Clear(GLcontext *ctx, GLbitfield buffers)
{
SWcontext *swrast = SWRAST_CONTEXT(ctx);
- (void) all; (void) x; (void) y; (void) width; (void) height;
-
#ifdef DEBUG_FOO
{
const GLbitfield legalBits =
BUFFER_BIT_AUX1 |
BUFFER_BIT_AUX2 |
BUFFER_BIT_AUX3;
- assert((mask & (~legalBits)) == 0);
+ assert((buffers & (~legalBits)) == 0);
}
#endif
RENDER_START(swrast,ctx);
/* do software clearing here */
- if (mask) {
- if (mask & ctx->DrawBuffer->_ColorDrawBufferMask[0]) {
+ if (buffers) {
+ if (buffers & ctx->DrawBuffer->_ColorDrawBufferMask[0]) {
clear_color_buffers(ctx);
}
- if (mask & BUFFER_BIT_DEPTH) {
+ if (buffers & BUFFER_BIT_DEPTH) {
_swrast_clear_depth_buffer(ctx, ctx->DrawBuffer->_DepthBuffer);
}
- if (mask & BUFFER_BIT_ACCUM) {
+ if (buffers & BUFFER_BIT_ACCUM) {
_swrast_clear_accum_buffer(ctx,
ctx->DrawBuffer->Attachment[BUFFER_ACCUM].Renderbuffer);
}
- if (mask & BUFFER_BIT_STENCIL) {
+ if (buffers & BUFFER_BIT_STENCIL) {
_swrast_clear_stencil_buffer(ctx, ctx->DrawBuffer->_StencilBuffer);
}
}
* program parameters with current state values.
*/
static void
-_swrast_update_fragment_program( GLcontext *ctx )
+_swrast_update_fragment_program(GLcontext *ctx, GLbitfield newState)
{
if (ctx->FragmentProgram._Enabled) {
const struct gl_fragment_program *fp = ctx->FragmentProgram._Current;
- _mesa_load_state_parameters(ctx, fp->Base.Parameters);
+ if (fp->Base.Parameters->StateFlags & newState)
+ _mesa_load_state_parameters(ctx, fp->Base.Parameters);
}
}
if (swrast->NewState & (_NEW_FOG | _NEW_PROGRAM))
_swrast_update_fog_state( ctx );
- if (swrast->NewState & _NEW_PROGRAM)
- _swrast_update_fragment_program( ctx );
+ if (swrast->NewState & (_NEW_MODELVIEW |
+ _NEW_PROJECTION |
+ _NEW_TEXTURE_MATRIX |
+ _NEW_FOG |
+ _NEW_LIGHT |
+ _NEW_LINE |
+ _NEW_TEXTURE |
+ _NEW_TRANSFORM |
+ _NEW_POINT |
+ _NEW_VIEWPORT |
+ _NEW_PROGRAM))
+ _swrast_update_fragment_program( ctx, swrast->NewState );
if (swrast->NewState & _NEW_TEXTURE)
_swrast_update_texture_samplers( ctx );
#include "s_span.h"
-/* if 1, print some debugging info */
+/* See comments below for info about this */
+#define LAMBDA_ZERO 1
+
+/* debug predicate */
#define DEBUG_FRAG 0
ASSERT(source->Index < (GLint) program->Base.Parameters->NumParameters);
return program->Base.Parameters->ParameterValues[source->Index];
default:
- _mesa_problem(ctx, "Invalid input register file %d in fetch_vector4",
- source->File);
+ _mesa_problem(ctx, "Invalid input register file %d in fp "
+ "get_register_pointer", source->File);
return NULL;
}
}
return;
}
-#if DEBUG_FRAG
+#if 0
if (value[0] > 1.0e10 ||
IS_INF_OR_NAN(value[0]) ||
IS_INF_OR_NAN(value[1]) ||
{
GLuint pc;
-#if DEBUG_FRAG
- printf("execute fragment program --------------------\n");
-#endif
+ if (DEBUG_FRAG) {
+ printf("execute fragment program --------------------\n");
+ }
for (pc = 0; pc < maxInst; pc++) {
const struct prog_instruction *inst = program->Base.Instructions + pc;
ctx->FragmentProgram.CallbackData);
}
+ if (DEBUG_FRAG) {
+ _mesa_print_instruction(inst);
+ }
+
switch (inst->Opcode) {
case OPCODE_ABS:
{
result[2] = a[2] + b[2];
result[3] = a[3] + b[3];
store_vector4( inst, machine, result );
+ if (DEBUG_FRAG) {
+ printf("ADD (%g %g %g %g) = (%g %g %g %g) + (%g %g %g %g)\n",
+ result[0], result[1], result[2], result[3],
+ a[0], a[1], a[2], a[3],
+ b[0], b[1], b[2], b[3]);
+ }
}
break;
case OPCODE_CMP:
fetch_vector4( ctx, &inst->SrcReg[1], machine, program, b );
result[0] = result[1] = result[2] = result[3] = DOT3(a, b);
store_vector4( inst, machine, result );
-#if DEBUG_FRAG
- printf("DP3 %g = (%g %g %g) . (%g %g %g)\n",
- result[0], a[0], a[1], a[2], b[0], b[1], b[2]);
-#endif
+ if (DEBUG_FRAG) {
+ printf("DP3 %g = (%g %g %g) . (%g %g %g)\n",
+ result[0], a[0], a[1], a[2], b[0], b[1], b[2]);
+ }
}
break;
case OPCODE_DP4:
fetch_vector4( ctx, &inst->SrcReg[1], machine, program, b );
result[0] = result[1] = result[2] = result[3] = DOT4(a,b);
store_vector4( inst, machine, result );
-#if DEBUG_FRAG
- printf("DP4 %g = (%g, %g %g %g) . (%g, %g %g %g)\n",
- result[0], a[0], a[1], a[2], a[3], b[0], b[1], b[2], b[3]);
-#endif
+ if (DEBUG_FRAG) {
+ printf("DP4 %g = (%g, %g %g %g) . (%g, %g %g %g)\n",
+ result[0], a[0], a[1], a[2], a[3],
+ b[0], b[1], b[2], b[3]);
+ }
}
break;
case OPCODE_DPH:
}
result[3] = 1.0F;
store_vector4( inst, machine, result );
+ if (DEBUG_FRAG) {
+ printf("LIT (%g %g %g %g) : (%g %g %g %g)\n",
+ result[0], result[1], result[2], result[3],
+ a[0], a[1], a[2], a[3]);
+ }
}
break;
case OPCODE_LRP:
result[2] = a[2] * b[2] + (1.0F - a[2]) * c[2];
result[3] = a[3] * b[3] + (1.0F - a[3]) * c[3];
store_vector4( inst, machine, result );
-#if DEBUG_FRAG
- printf("LRP (%g %g %g %g) = (%g %g %g %g), "
- "(%g %g %g %g), (%g %g %g %g)\n",
- result[0], result[1], result[2], result[3],
- a[0], a[1], a[2], a[3],
- b[0], b[1], b[2], b[3],
- c[0], c[1], c[2], c[3]);
-#endif
+ if (DEBUG_FRAG) {
+ printf("LRP (%g %g %g %g) = (%g %g %g %g), "
+ "(%g %g %g %g), (%g %g %g %g)\n",
+ result[0], result[1], result[2], result[3],
+ a[0], a[1], a[2], a[3],
+ b[0], b[1], b[2], b[3],
+ c[0], c[1], c[2], c[3]);
+ }
}
break;
case OPCODE_MAD:
result[2] = a[2] * b[2] + c[2];
result[3] = a[3] * b[3] + c[3];
store_vector4( inst, machine, result );
+ if (DEBUG_FRAG) {
+ printf("MAD (%g %g %g %g) = (%g %g %g %g) * "
+ "(%g %g %g %g) + (%g %g %g %g)\n",
+ result[0], result[1], result[2], result[3],
+ a[0], a[1], a[2], a[3],
+ b[0], b[1], b[2], b[3],
+ c[0], c[1], c[2], c[3]);
+ }
}
break;
case OPCODE_MAX:
result[2] = MAX2(a[2], b[2]);
result[3] = MAX2(a[3], b[3]);
store_vector4( inst, machine, result );
-#if DEBUG_FRAG
- printf("MAX (%g %g %g %g) = (%g %g %g %g), (%g %g %g %g)\n",
- result[0], result[1], result[2], result[3],
- a[0], a[1], a[2], a[3],
- b[0], b[1], b[2], b[3]);
-#endif
+ if (DEBUG_FRAG) {
+ printf("MAX (%g %g %g %g) = (%g %g %g %g), (%g %g %g %g)\n",
+ result[0], result[1], result[2], result[3],
+ a[0], a[1], a[2], a[3],
+ b[0], b[1], b[2], b[3]);
+ }
}
break;
case OPCODE_MIN:
GLfloat result[4];
fetch_vector4( ctx, &inst->SrcReg[0], machine, program, result );
store_vector4( inst, machine, result );
-#if DEBUG_FRAG
- printf("MOV (%g %g %g %g)\n",
- result[0], result[1], result[2], result[3]);
-#endif
+ if (DEBUG_FRAG) {
+ printf("MOV (%g %g %g %g)\n",
+ result[0], result[1], result[2], result[3]);
+ }
}
break;
case OPCODE_MUL:
result[2] = a[2] * b[2];
result[3] = a[3] * b[3];
store_vector4( inst, machine, result );
-#if DEBUG_FRAG
- printf("MUL (%g %g %g %g) = (%g %g %g %g) * (%g %g %g %g)\n",
- result[0], result[1], result[2], result[3],
- a[0], a[1], a[2], a[3],
- b[0], b[1], b[2], b[3]);
-#endif
+ if (DEBUG_FRAG) {
+ printf("MUL (%g %g %g %g) = (%g %g %g %g) * (%g %g %g %g)\n",
+ result[0], result[1], result[2], result[3],
+ a[0], a[1], a[2], a[3],
+ b[0], b[1], b[2], b[3]);
+ }
}
break;
case OPCODE_PK2H: /* pack two 16-bit floats in one 32-bit float */
{
GLfloat a[4], result[4];
fetch_vector1( ctx, &inst->SrcReg[0], machine, program, a );
-#if DEBUG_FRAG
- if (a[0] == 0)
- printf("RCP(0)\n");
- else if (IS_INF_OR_NAN(a[0]))
- printf("RCP(inf)\n");
-#endif
+ if (DEBUG_FRAG) {
+ if (a[0] == 0)
+ printf("RCP(0)\n");
+ else if (IS_INF_OR_NAN(a[0]))
+ printf("RCP(inf)\n");
+ }
result[0] = result[1] = result[2] = result[3] = 1.0F / a[0];
store_vector4( inst, machine, result );
}
a[0] = FABSF(a[0]);
result[0] = result[1] = result[2] = result[3] = INV_SQRTF(a[0]);
store_vector4( inst, machine, result );
-#if DEBUG_FRAG
- printf("RSQ %g = 1/sqrt(|%g|)\n", result[0], a[0]);
-#endif
+ if (DEBUG_FRAG) {
+ printf("RSQ %g = 1/sqrt(|%g|)\n", result[0], a[0]);
+ }
}
break;
case OPCODE_SCS: /* sine and cos */
result[2] = a[2] - b[2];
result[3] = a[3] - b[3];
store_vector4( inst, machine, result );
-#if DEBUG_FRAG
- printf("SUB (%g %g %g %g) = (%g %g %g %g) - (%g %g %g %g)\n",
- result[0], result[1], result[2], result[3],
- a[0], a[1], a[2], a[3], b[0], b[1], b[2], b[3]);
-#endif
+ if (DEBUG_FRAG) {
+ printf("SUB (%g %g %g %g) = (%g %g %g %g) - (%g %g %g %g)\n",
+ result[0], result[1], result[2], result[3],
+ a[0], a[1], a[2], a[3], b[0], b[1], b[2], b[3]);
+ }
}
break;
case OPCODE_SWZ: /* extended swizzle */
case OPCODE_TEX: /* Both ARB and NV frag prog */
/* Texel lookup */
{
- GLfloat texcoord[4], color[4];
- fetch_vector4( ctx, &inst->SrcReg[0], machine, program, texcoord );
- /* Note: we pass 0 for LOD. The ARB extension requires it
- * while the NV extension says it's implementation dependant.
+ /* Note: only use the precomputed lambda value when we're
+ * sampling texture unit [K] with texcoord[K].
+ * Otherwise, the lambda value may have no relation to the
+ * instruction's texcoord or texture image. Using the wrong
+ * lambda is usually bad news.
+ * The rest of the time, just use zero (until we get a more
+ * sophisticated way of computing lambda).
*/
- /* KW: Previously lambda was passed as zero, but I
- * believe this is incorrect, the spec seems to
- * indicate rather that lambda should not be
- * changed/biased, unlike TXB where texcoord[3] is
- * added to the lambda calculations. The lambda should
- * still be calculated normally for TEX & TXP though,
- * not set to zero. Otherwise it's very difficult to
- * implement normal GL semantics through the fragment
- * shader.
- */
- fetch_texel( ctx, texcoord,
- span->array->lambda[inst->TexSrcUnit][column],
- inst->TexSrcUnit, color );
-#if DEBUG_FRAG
- if (color[3])
- printf("color[3] = %f\n", color[3]);
-#endif
+ GLfloat coord[4], color[4], lambda;
+ if (inst->SrcReg[0].File == PROGRAM_INPUT &&
+ inst->SrcReg[0].Index == FRAG_ATTRIB_TEX0+inst->TexSrcUnit)
+ lambda = span->array->lambda[inst->TexSrcUnit][column];
+ else
+ lambda = 0.0;
+ fetch_vector4(ctx, &inst->SrcReg[0], machine, program, coord);
+ fetch_texel( ctx, coord, lambda, inst->TexSrcUnit, color );
+ if (DEBUG_FRAG) {
+ printf("TEX (%g, %g, %g, %g) = texture[%d][%g, %g, %g, %g], "
+ "lod %f\n",
+ color[0], color[1], color[2], color[3],
+ inst->TexSrcUnit,
+ coord[0], coord[1], coord[2], coord[3], lambda);
+ }
store_vector4( inst, machine, color );
}
break;
case OPCODE_TXB: /* GL_ARB_fragment_program only */
/* Texel lookup with LOD bias */
{
- GLfloat texcoord[4], color[4], bias, lambda;
-
- fetch_vector4( ctx, &inst->SrcReg[0], machine, program, texcoord );
- /* texcoord[3] is the bias to add to lambda */
+ GLfloat coord[4], color[4], lambda, bias;
+ if (inst->SrcReg[0].File == PROGRAM_INPUT &&
+ inst->SrcReg[0].Index == FRAG_ATTRIB_TEX0+inst->TexSrcUnit)
+ lambda = span->array->lambda[inst->TexSrcUnit][column];
+ else
+ lambda = 0.0;
+ fetch_vector4(ctx, &inst->SrcReg[0], machine, program, coord);
+ /* coord[3] is the bias to add to lambda */
bias = ctx->Texture.Unit[inst->TexSrcUnit].LodBias
+ ctx->Texture.Unit[inst->TexSrcUnit]._Current->LodBias
- + texcoord[3];
- lambda = span->array->lambda[inst->TexSrcUnit][column] + bias;
- fetch_texel( ctx, texcoord, lambda,
- inst->TexSrcUnit, color );
+ + coord[3];
+ fetch_texel(ctx, coord, lambda + bias, inst->TexSrcUnit, color);
store_vector4( inst, machine, color );
}
break;
case OPCODE_TXP: /* GL_ARB_fragment_program only */
/* Texture lookup w/ projective divide */
{
- GLfloat texcoord[4], color[4];
- fetch_vector4( ctx, &inst->SrcReg[0], machine, program, texcoord );
+ GLfloat texcoord[4], color[4], lambda;
+ if (inst->SrcReg[0].File == PROGRAM_INPUT &&
+ inst->SrcReg[0].Index == FRAG_ATTRIB_TEX0+inst->TexSrcUnit)
+ lambda = span->array->lambda[inst->TexSrcUnit][column];
+ else
+ lambda = 0.0;
+ fetch_vector4(ctx, &inst->SrcReg[0], machine, program,texcoord);
/* Not so sure about this test - if texcoord[3] is
* zero, we'd probably be fine except for an ASSERT in
* IROUND_POS() which gets triggered by the inf values created.
texcoord[1] /= texcoord[3];
texcoord[2] /= texcoord[3];
}
- /* KW: Previously lambda was passed as zero, but I
- * believe this is incorrect, the spec seems to
- * indicate rather that lambda should not be
- * changed/biased, unlike TXB where texcoord[3] is
- * added to the lambda calculations. The lambda should
- * still be calculated normally for TEX & TXP though,
- * not set to zero.
- */
- fetch_texel( ctx, texcoord,
- span->array->lambda[inst->TexSrcUnit][column],
- inst->TexSrcUnit, color );
+ fetch_texel( ctx, texcoord, lambda, inst->TexSrcUnit, color );
store_vector4( inst, machine, color );
}
break;
case OPCODE_TXP_NV: /* GL_NV_fragment_program only */
/* Texture lookup w/ projective divide */
{
- GLfloat texcoord[4], color[4];
- fetch_vector4( ctx, &inst->SrcReg[0], machine, program, texcoord );
+ GLfloat texcoord[4], color[4], lambda;
+ if (inst->SrcReg[0].File == PROGRAM_INPUT &&
+ inst->SrcReg[0].Index == FRAG_ATTRIB_TEX0+inst->TexSrcUnit)
+ lambda = span->array->lambda[inst->TexSrcUnit][column];
+ else
+ lambda = 0.0;
+ fetch_vector4(ctx, &inst->SrcReg[0], machine, program,texcoord);
if (inst->TexSrcTarget != TEXTURE_CUBE_INDEX &&
texcoord[3] != 0.0) {
texcoord[0] /= texcoord[3];
texcoord[1] /= texcoord[3];
texcoord[2] /= texcoord[3];
}
- fetch_texel( ctx, texcoord,
- span->array->lambda[inst->TexSrcUnit][column],
- inst->TexSrcUnit, color );
+ fetch_texel( ctx, texcoord, lambda, inst->TexSrcUnit, color );
store_vector4( inst, machine, color );
}
break;
ctx->_CurrentProgram = GL_FRAGMENT_PROGRAM_ARB; /* or NV, doesn't matter */
- if (program->Base.Parameters) {
- _mesa_load_state_parameters(ctx, program->Base.Parameters);
- }
-
run_program(ctx, span, 0, span->end);
if (program->Base.OutputsWritten & (1 << FRAG_RESULT_DEPR)) {
case GL_FLOAT:
{
GLfloat (*spec)[4] = span->array->color.sz4.spec;
+#if CHAN_BITS <= 16
+ GLfloat r = CHAN_TO_FLOAT(FixedToChan(span->specRed));
+ GLfloat g = CHAN_TO_FLOAT(FixedToChan(span->specGreen));
+ GLfloat b = CHAN_TO_FLOAT(FixedToChan(span->specBlue));
+#else
+ GLfloat r = span->specRed;
+ GLfloat g = span->specGreen;
+ GLfloat b = span->specBlue;
+#endif
+ GLfloat dr, dg, db;
if (span->interpMask & SPAN_FLAT) {
- GLfloat color[4];
- color[RCOMP] = span->specRed;
- color[GCOMP] = span->specGreen;
- color[BCOMP] = span->specBlue;
- color[ACOMP] = 0.0F;
- for (i = 0; i < n; i++) {
- COPY_4V(spec[i], color);
- }
+ dr = dg = db = 0.0;
}
else {
- GLfloat r = span->specRed;
- GLfloat g = span->specGreen;
- GLfloat b = span->specBlue;
- GLfloat dr = span->specRedStep;
- GLfloat dg = span->specGreenStep;
- GLfloat db = span->specBlueStep;
- for (i = 0; i < n; i++) {
- spec[i][RCOMP] = r;
- spec[i][GCOMP] = g;
- spec[i][BCOMP] = b;
- spec[i][ACOMP] = 0.0F;
- r += dr;
- g += dg;
- b += db;
- }
+#if CHAN_BITS <= 16
+ dr = CHAN_TO_FLOAT(FixedToChan(span->specRedStep));
+ dg = CHAN_TO_FLOAT(FixedToChan(span->specGreenStep));
+ db = CHAN_TO_FLOAT(FixedToChan(span->specBlueStep));
+#else
+ dr = span->specRedStep;
+ dg = span->specGreenStep;
+ db = span->specBlueStep;
+#endif
+ }
+ for (i = 0; i < n; i++) {
+ spec[i][RCOMP] = r;
+ spec[i][GCOMP] = g;
+ spec[i][BCOMP] = b;
+ spec[i][ACOMP] = 0.0F;
+ r += dr;
+ g += dg;
+ b += db;
}
}
break;
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
ASSERT(texObj);
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
ASSERT(texImage);
ASSERT(ctx->Driver.TexImage1D);
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
ASSERT(texObj);
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
ASSERT(texImage);
ASSERT(ctx->Driver.TexImage2D);
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
ASSERT(texObj);
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
ASSERT(texImage);
ASSERT(ctx->Driver.TexImage1D);
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
ASSERT(texObj);
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
ASSERT(texImage);
ASSERT(ctx->Driver.TexImage2D);
texUnit = &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
texObj = _mesa_select_tex_object(ctx, texUnit, target);
ASSERT(texObj);
- texImage = _mesa_select_tex_image(ctx, texUnit, target, level);
+ texImage = _mesa_select_tex_image(ctx, texObj, target, level);
ASSERT(texImage);
ASSERT(ctx->Driver.TexImage3D);
GLbitfield mask, GLenum filter);
extern void
-_swrast_Clear( GLcontext *ctx, GLbitfield mask, GLboolean all,
- GLint x, GLint y, GLint width, GLint height );
+_swrast_Clear(GLcontext *ctx, GLbitfield buffers);
extern void
_swrast_Accum(GLcontext *ctx, GLenum op, GLfloat value);
/*
* Mesa 3-D graphics library
- * Version: 6.5
+ * Version: 6.5.2
*
* Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
*
if (ctx->Fog.Enabled ||
(ctx->FragmentProgram._Active &&
- ctx->FragmentProgram._Current->FogOption != GL_NONE))
+ (ctx->FragmentProgram._Current->FogOption != GL_NONE ||
+ ctx->FragmentProgram._Current->Base.InputsRead & FRAG_BIT_FOGC)))
RENDERINPUTS_SET( tnl->render_inputs_bitset, _TNL_ATTRIB_FOG );
if (ctx->Polygon.FrontMode != GL_FILL ||
struct vp_stage_data *store = VP_STAGE_DATA(stage);
struct vertex_buffer *VB = &tnl->vb;
struct gl_vertex_program *program = ctx->VertexProgram.Current;
+ struct vp_machine machine;
GLuint i;
if (ctx->ShaderObjects._VertexShaderPresent)
for (i = 0; i < VB->Count; i++) {
GLuint attr;
- _mesa_init_vp_per_vertex_registers(ctx);
+ _mesa_init_vp_per_vertex_registers(ctx, &machine);
#if 0
printf("Input %d: %f, %f, %f, %f\n", i,
const GLuint size = VB->AttribPtr[attr]->size;
const GLuint stride = VB->AttribPtr[attr]->stride;
const GLfloat *data = (GLfloat *) (ptr + stride * i);
- COPY_CLEAN_4V(ctx->VertexProgram.Machine.Inputs[attr], size, data);
+ COPY_CLEAN_4V(machine.Inputs[attr], size, data);
}
}
/* execute the program */
ASSERT(program);
- _mesa_exec_vertex_program(ctx, program);
+ _mesa_exec_vertex_program(ctx, &machine, program);
/* Fixup fog an point size results if needed */
if (ctx->Fog.Enabled &&
(program->Base.OutputsWritten & (1 << VERT_RESULT_FOGC)) == 0) {
- ctx->VertexProgram.Machine.Outputs[VERT_RESULT_FOGC][0] = 1.0;
+ machine.Outputs[VERT_RESULT_FOGC][0] = 1.0;
}
if (ctx->VertexProgram.PointSizeEnabled &&
(program->Base.OutputsWritten & (1 << VERT_RESULT_PSIZ)) == 0) {
- ctx->VertexProgram.Machine.Outputs[VERT_RESULT_PSIZ][0] = ctx->Point.Size;
+ machine.Outputs[VERT_RESULT_PSIZ][0] = ctx->Point.Size;
}
/* copy the output registers into the VB->attribs arrays */
/* XXX (optimize) could use a conditional and smaller loop limit here */
for (attr = 0; attr < 15; attr++) {
- COPY_4V(store->attribs[attr].data[i],
- ctx->VertexProgram.Machine.Outputs[attr]);
+ COPY_4V(store->attribs[attr].data[i], machine.Outputs[attr]);
}
}