TOP = ../../../../../..
include $(TOP)/configs/current
-LIBNAME = i915_dri.so
+LIBNAME = i965_dri.so
PIPE_DRIVERS = \
$(TOP)/src/gallium/state_trackers/dri/libdridrm.a \
$(TOP)/src/gallium/drivers/trace/libtrace.a \
$(TOP)/src/gallium/drivers/softpipe/libsoftpipe.a \
$(TOP)/src/gallium/drivers/identity/libidentity.a \
- $(TOP)/src/gallium/drivers/i915/libi915.a
+ $(TOP)/src/gallium/drivers/i965/libi965.a
DRIVER_SOURCES =
symlinks: $(TOP)/$(LIB_DIR)/gallium
@rm -f $(TOP)/$(LIB_DIR)/gallium/i965_dri.so
- ln -s i915_dri.so $(TOP)/$(LIB_DIR)/gallium/i965_dri.so
+ ln -s i965_dri.so $(TOP)/$(LIB_DIR)/gallium/i965_dri.so
drivers = [
st_dri,
- inteldrm,
- softpipe,
- i915,
+ i965drm,
+ i965,
trace,
]
env.LoadableModule(
- target ='i915_dri.so',
+ target ='i965_dri.so',
source = COMMON_GALLIUM_SOURCES,
LIBS = drivers + mesa + auxiliaries + env['LIBS'],
SHLIBPREFIX = '',
GALLIUMDIR = ../../../..
include $(TOP)/configs/current
-LIBNAME = EGL_i915.so
+LIBNAME = EGL_i965.so
PIPE_DRIVERS = \
$(TOP)/src/gallium/state_trackers/egl/libegldrm.a \
- $(GALLIUMDIR)/winsys/drm/intel/gem/libinteldrm.a \
+ $(GALLIUMDIR)/winsys/drm/i965/gem/libi965drm.a \
$(TOP)/src/gallium/drivers/softpipe/libsoftpipe.a \
$(TOP)/src/gallium/drivers/trace/libtrace.a \
- $(TOP)/src/gallium/drivers/i915/libi915.a
+ $(TOP)/src/gallium/drivers/i965/libi965.a
DRIVER_SOURCES =
LIBNAME = inteldrm
C_SOURCES = \
- intel_drm_batchbuffer.c \
- intel_drm_buffer.c \
- intel_drm_fence.c \
- intel_drm_api.c
+ i965_drm_batchbuffer.c \
+ i965_drm_buffer.c \
+ i965_drm_fence.c \
+ i965_drm_api.c
LIBRARY_INCLUDES = $(shell pkg-config libdrm --cflags-only-I)
env = drienv.Clone()
-inteldrm_sources = [
- 'intel_drm_api.c',
- 'intel_drm_batchbuffer.c',
- 'intel_drm_buffer.c',
- 'intel_drm_fence.c',
+i965drm_sources = [
+ 'i965_drm_api.c',
+ 'i965_drm_batchbuffer.c',
+ 'i965_drm_buffer.c',
+ 'i965_drm_fence.c',
]
-inteldrm = env.ConvenienceLibrary(
- target ='inteldrm',
- source = inteldrm_sources,
+i965drm = env.ConvenienceLibrary(
+ target ='i965drm',
+ source = i965drm_sources,
)
-Export('inteldrm')
+Export('i965drm')
--- /dev/null
+
+#include "state_tracker/drm_api.h"
+
+#include "i965_drm_winsys.h"
+#include "util/u_memory.h"
+
+#include "brw/brw_context.h" /* XXX: shouldn't be doing this */
+#include "brw/brw_screen.h" /* XXX: shouldn't be doing this */
+
+#include "trace/tr_drm.h"
+
+/*
+ * Helper functions
+ */
+
+
+static void
+i965_drm_get_device_id(unsigned int *device_id)
+{
+ char path[512];
+ FILE *file;
+ void *shutup_gcc;
+
+ /*
+ * FIXME: Fix this up to use a drm ioctl or whatever.
+ */
+
+ snprintf(path, sizeof(path), "/sys/class/drm/card0/device/device");
+ file = fopen(path, "r");
+ if (!file) {
+ return;
+ }
+
+ shutup_gcc = fgets(path, sizeof(path), file);
+ sscanf(path, "%x", device_id);
+ fclose(file);
+}
+
+static struct i965_buffer *
+i965_drm_buffer_from_handle(struct i965_drm_winsys *idws,
+ const char* name, unsigned handle)
+{
+ struct i965_drm_buffer *buf = CALLOC_STRUCT(i965_drm_buffer);
+ uint32_t tile = 0, swizzle = 0;
+
+ if (!buf)
+ return NULL;
+
+ buf->magic = 0xDEAD1337;
+ buf->bo = drm_i965_bo_gem_create_from_name(idws->pools.gem, name, handle);
+ buf->flinked = TRUE;
+ buf->flink = handle;
+
+ if (!buf->bo)
+ goto err;
+
+ drm_i965_bo_get_tiling(buf->bo, &tile, &swizzle);
+ if (tile != I965_TILE_NONE)
+ buf->map_gtt = TRUE;
+
+ return (struct i965_buffer *)buf;
+
+err:
+ FREE(buf);
+ return NULL;
+}
+
+
+/*
+ * Exported functions
+ */
+
+
+static struct pipe_texture *
+i965_drm_texture_from_shared_handle(struct drm_api *api,
+ struct pipe_screen *screen,
+ struct pipe_texture *templ,
+ const char* name,
+ unsigned pitch,
+ unsigned handle)
+{
+ struct i965_drm_winsys *idws = i965_drm_winsys(i965_screen(screen)->iws);
+ struct i965_buffer *buffer;
+
+ buffer = i965_drm_buffer_from_handle(idws, name, handle);
+ if (!buffer)
+ return NULL;
+
+ return i965_texture_blanket_i965(screen, templ, pitch, buffer);
+}
+
+static boolean
+i965_drm_shared_handle_from_texture(struct drm_api *api,
+ struct pipe_screen *screen,
+ struct pipe_texture *texture,
+ unsigned *pitch,
+ unsigned *handle)
+{
+ struct i965_drm_buffer *buf = NULL;
+ struct i965_buffer *buffer = NULL;
+ if (!i965_get_texture_buffer_i965(texture, &buffer, pitch))
+ return FALSE;
+
+ buf = i965_drm_buffer(buffer);
+ if (!buf->flinked) {
+ if (drm_i965_bo_flink(buf->bo, &buf->flink))
+ return FALSE;
+ buf->flinked = TRUE;
+ }
+
+ *handle = buf->flink;
+
+ return TRUE;
+}
+
+static boolean
+i965_drm_local_handle_from_texture(struct drm_api *api,
+ struct pipe_screen *screen,
+ struct pipe_texture *texture,
+ unsigned *pitch,
+ unsigned *handle)
+{
+ struct i965_buffer *buffer = NULL;
+ if (!i965_get_texture_buffer_i965(texture, &buffer, pitch))
+ return FALSE;
+
+ *handle = i965_drm_buffer(buffer)->bo->handle;
+
+ return TRUE;
+}
+
+static void
+i965_drm_winsys_destroy(struct i965_winsys *iws)
+{
+ struct i965_drm_winsys *idws = i965_drm_winsys(iws);
+
+ drm_i965_bufmgr_destroy(idws->pools.gem);
+
+ FREE(idws);
+}
+
+static struct pipe_screen *
+i965_drm_create_screen(struct drm_api *api, int drmFD,
+ struct drm_create_screen_arg *arg)
+{
+ struct i965_drm_winsys *idws;
+ unsigned int deviceID;
+
+ if (arg != NULL) {
+ switch(arg->mode) {
+ case DRM_CREATE_NORMAL:
+ break;
+ default:
+ return NULL;
+ }
+ }
+
+ idws = CALLOC_STRUCT(i965_drm_winsys);
+ if (!idws)
+ return NULL;
+
+ i965_drm_get_device_id(&deviceID);
+
+ i965_drm_winsys_init_batchbuffer_functions(idws);
+ i965_drm_winsys_init_buffer_functions(idws);
+ i965_drm_winsys_init_fence_functions(idws);
+
+ idws->fd = drmFD;
+ idws->id = deviceID;
+ idws->max_batch_size = 16 * 4096;
+
+ idws->base.destroy = i965_drm_winsys_destroy;
+
+ idws->pools.gem = drm_i965_bufmgr_gem_init(idws->fd, idws->max_batch_size);
+ drm_i965_bufmgr_gem_enable_reuse(idws->pools.gem);
+
+ idws->softpipe = FALSE;
+ idws->dump_cmd = debug_get_bool_option("I965_DUMP_CMD", FALSE);
+
+ return i965_create_screen(&idws->base, deviceID);
+}
+
+static struct pipe_context *
+i965_drm_create_context(struct drm_api *api, struct pipe_screen *screen)
+{
+ return i965_create_context(screen);
+}
+
+static void
+destroy(struct drm_api *api)
+{
+
+}
+
+struct drm_api i965_drm_api =
+{
+ .create_context = i965_drm_create_context,
+ .create_screen = i965_drm_create_screen,
+ .texture_from_shared_handle = i965_drm_texture_from_shared_handle,
+ .shared_handle_from_texture = i965_drm_shared_handle_from_texture,
+ .local_handle_from_texture = i965_drm_local_handle_from_texture,
+ .destroy = destroy,
+};
+
+struct drm_api *
+drm_api_create()
+{
+ return trace_drm_create(&i965_drm_api);
+}
--- /dev/null
+
+#include "intel_drm_winsys.h"
+#include "util/u_memory.h"
+
+#include "i915_drm.h"
+
+#define BATCH_RESERVED 16
+
+#define INTEL_DEFAULT_RELOCS 100
+#define INTEL_MAX_RELOCS 400
+
+#define INTEL_BATCH_NO_CLIPRECTS 0x1
+#define INTEL_BATCH_CLIPRECTS 0x2
+
+#undef INTEL_RUN_SYNC
+#undef INTEL_MAP_BATCHBUFFER
+#undef INTEL_MAP_GTT
+#define INTEL_ALWAYS_FLUSH
+
+struct intel_drm_batchbuffer
+{
+ struct intel_batchbuffer base;
+
+ size_t actual_size;
+
+ drm_intel_bo *bo;
+};
+
+static INLINE struct intel_drm_batchbuffer *
+intel_drm_batchbuffer(struct intel_batchbuffer *batch)
+{
+ return (struct intel_drm_batchbuffer *)batch;
+}
+
+static void
+intel_drm_batchbuffer_reset(struct intel_drm_batchbuffer *batch)
+{
+ struct intel_drm_winsys *idws = intel_drm_winsys(batch->base.iws);
+ int ret;
+
+ if (batch->bo)
+ drm_intel_bo_unreference(batch->bo);
+ batch->bo = drm_intel_bo_alloc(idws->pools.gem,
+ "gallium3d_batchbuffer",
+ batch->actual_size,
+ 4096);
+
+#ifdef INTEL_MAP_BATCHBUFFER
+#ifdef INTEL_MAP_GTT
+ ret = drm_intel_gem_bo_map_gtt(batch->bo);
+#else
+ ret = drm_intel_bo_map(batch->bo, TRUE);
+#endif
+ assert(ret == 0);
+ batch->base.map = batch->bo->virtual;
+#else
+ (void)ret;
+#endif
+
+ memset(batch->base.map, 0, batch->actual_size);
+ batch->base.ptr = batch->base.map;
+ batch->base.size = batch->actual_size - BATCH_RESERVED;
+ batch->base.relocs = 0;
+}
+
+static struct intel_batchbuffer *
+intel_drm_batchbuffer_create(struct intel_winsys *iws)
+{
+ struct intel_drm_winsys *idws = intel_drm_winsys(iws);
+ struct intel_drm_batchbuffer *batch = CALLOC_STRUCT(intel_drm_batchbuffer);
+
+ batch->actual_size = idws->max_batch_size;
+
+#ifdef INTEL_MAP_BATCHBUFFER
+ batch->base.map = NULL;
+#else
+ batch->base.map = MALLOC(batch->actual_size);
+#endif
+ batch->base.ptr = NULL;
+ batch->base.size = 0;
+
+ batch->base.relocs = 0;
+ batch->base.max_relocs = 300;/*INTEL_DEFAULT_RELOCS;*/
+
+ batch->base.iws = iws;
+
+ intel_drm_batchbuffer_reset(batch);
+
+ return &batch->base;
+}
+
+static int
+intel_drm_batchbuffer_reloc(struct intel_batchbuffer *ibatch,
+ struct intel_buffer *buffer,
+ enum intel_buffer_usage usage,
+ unsigned pre_add)
+{
+ struct intel_drm_batchbuffer *batch = intel_drm_batchbuffer(ibatch);
+ unsigned write_domain = 0;
+ unsigned read_domain = 0;
+ unsigned offset;
+ int ret = 0;
+
+ assert(batch->base.relocs < batch->base.max_relocs);
+
+ if (usage == INTEL_USAGE_SAMPLER) {
+ write_domain = 0;
+ read_domain = I915_GEM_DOMAIN_SAMPLER;
+
+ } else if (usage == INTEL_USAGE_RENDER) {
+ write_domain = I915_GEM_DOMAIN_RENDER;
+ read_domain = I915_GEM_DOMAIN_RENDER;
+
+ } else if (usage == INTEL_USAGE_2D_TARGET) {
+ write_domain = I915_GEM_DOMAIN_RENDER;
+ read_domain = I915_GEM_DOMAIN_RENDER;
+
+ } else if (usage == INTEL_USAGE_2D_SOURCE) {
+ write_domain = 0;
+ read_domain = I915_GEM_DOMAIN_RENDER;
+
+ } else if (usage == INTEL_USAGE_VERTEX) {
+ write_domain = 0;
+ read_domain = I915_GEM_DOMAIN_VERTEX;
+
+ } else {
+ assert(0);
+ return -1;
+ }
+
+ offset = (unsigned)(batch->base.ptr - batch->base.map);
+
+ ret = drm_intel_bo_emit_reloc(batch->bo, offset,
+ intel_bo(buffer), pre_add,
+ read_domain,
+ write_domain);
+
+ ((uint32_t*)batch->base.ptr)[0] = intel_bo(buffer)->offset + pre_add;
+ batch->base.ptr += 4;
+
+ if (!ret)
+ batch->base.relocs++;
+
+ return ret;
+}
+
+static void
+intel_drm_batchbuffer_flush(struct intel_batchbuffer *ibatch,
+ struct pipe_fence_handle **fence)
+{
+ struct intel_drm_batchbuffer *batch = intel_drm_batchbuffer(ibatch);
+ unsigned used = 0;
+ int ret = 0;
+ int i;
+
+ assert(intel_batchbuffer_space(ibatch) >= 0);
+
+ used = batch->base.ptr - batch->base.map;
+ assert((used & 3) == 0);
+
+
+#ifdef INTEL_ALWAYS_FLUSH
+ /* MI_FLUSH | FLUSH_MAP_CACHE */
+ intel_batchbuffer_dword(ibatch, (0x4<<23)|(1<<0));
+ used += 4;
+#endif
+
+ if ((used & 4) == 0) {
+ /* MI_NOOP */
+ intel_batchbuffer_dword(ibatch, 0);
+ }
+ /* MI_BATCH_BUFFER_END */
+ intel_batchbuffer_dword(ibatch, (0xA<<23));
+
+ used = batch->base.ptr - batch->base.map;
+ assert((used & 4) == 0);
+
+#ifdef INTEL_MAP_BATCHBUFFER
+#ifdef INTEL_MAP_GTT
+ drm_intel_gem_bo_unmap_gtt(batch->bo);
+#else
+ drm_intel_bo_unmap(batch->bo);
+#endif
+#else
+ drm_intel_bo_subdata(batch->bo, 0, used, batch->base.map);
+#endif
+
+ /* Do the sending to HW */
+ ret = drm_intel_bo_exec(batch->bo, used, NULL, 0, 0);
+ assert(ret == 0);
+
+ if (intel_drm_winsys(ibatch->iws)->dump_cmd) {
+ unsigned *ptr;
+ drm_intel_bo_map(batch->bo, FALSE);
+ ptr = (unsigned*)batch->bo->virtual;
+
+ debug_printf("%s:\n", __func__);
+ for (i = 0; i < used / 4; i++, ptr++) {
+ debug_printf("\t%08x: %08x\n", i*4, *ptr);
+ }
+
+ drm_intel_bo_unmap(batch->bo);
+ } else {
+#ifdef INTEL_RUN_SYNC
+ drm_intel_bo_map(batch->bo, FALSE);
+ drm_intel_bo_unmap(batch->bo);
+#endif
+ }
+
+ if (fence) {
+ ibatch->iws->fence_reference(ibatch->iws, fence, NULL);
+
+#ifdef INTEL_RUN_SYNC
+ /* we run synced to GPU so just pass null */
+ (*fence) = intel_drm_fence_create(NULL);
+#else
+ (*fence) = intel_drm_fence_create(batch->bo);
+#endif
+ }
+
+ intel_drm_batchbuffer_reset(batch);
+}
+
+static void
+intel_drm_batchbuffer_destroy(struct intel_batchbuffer *ibatch)
+{
+ struct intel_drm_batchbuffer *batch = intel_drm_batchbuffer(ibatch);
+
+ if (batch->bo)
+ drm_intel_bo_unreference(batch->bo);
+
+#ifndef INTEL_MAP_BATCHBUFFER
+ FREE(batch->base.map);
+#endif
+ FREE(batch);
+}
+
+void intel_drm_winsys_init_batchbuffer_functions(struct intel_drm_winsys *idws)
+{
+ idws->base.batchbuffer_create = intel_drm_batchbuffer_create;
+ idws->base.batchbuffer_reloc = intel_drm_batchbuffer_reloc;
+ idws->base.batchbuffer_flush = intel_drm_batchbuffer_flush;
+ idws->base.batchbuffer_destroy = intel_drm_batchbuffer_destroy;
+}
--- /dev/null
+
+#include "i965_drm_winsys.h"
+#include "util/u_memory.h"
+
+#include "i915_drm.h"
+
+static struct intel_buffer *
+intel_drm_buffer_create(struct intel_winsys *iws,
+ unsigned size, unsigned alignment,
+ enum intel_buffer_type type)
+{
+ struct intel_drm_buffer *buf = CALLOC_STRUCT(intel_drm_buffer);
+ struct intel_drm_winsys *idws = intel_drm_winsys(iws);
+ drm_intel_bufmgr *pool;
+ char *name;
+
+ if (!buf)
+ return NULL;
+
+ buf->magic = 0xDEAD1337;
+ buf->flinked = FALSE;
+ buf->flink = 0;
+ buf->map_gtt = FALSE;
+
+ if (type == INTEL_NEW_TEXTURE) {
+ name = "gallium3d_texture";
+ pool = idws->pools.gem;
+ } else if (type == INTEL_NEW_VERTEX) {
+ name = "gallium3d_vertex";
+ pool = idws->pools.gem;
+ buf->map_gtt = TRUE;
+ } else if (type == INTEL_NEW_SCANOUT) {
+ name = "gallium3d_scanout";
+ pool = idws->pools.gem;
+ buf->map_gtt = TRUE;
+ } else {
+ assert(0);
+ name = "gallium3d_unknown";
+ pool = idws->pools.gem;
+ }
+
+ buf->bo = drm_intel_bo_alloc(pool, name, size, alignment);
+
+ if (!buf->bo)
+ goto err;
+
+ return (struct intel_buffer *)buf;
+
+err:
+ assert(0);
+ FREE(buf);
+ return NULL;
+}
+
+static int
+intel_drm_buffer_set_fence_reg(struct intel_winsys *iws,
+ struct intel_buffer *buffer,
+ unsigned stride,
+ enum intel_buffer_tile tile)
+{
+ struct intel_drm_buffer *buf = intel_drm_buffer(buffer);
+ assert(I915_TILING_NONE == INTEL_TILE_NONE);
+ assert(I915_TILING_X == INTEL_TILE_X);
+ assert(I915_TILING_Y == INTEL_TILE_Y);
+
+ if (tile != INTEL_TILE_NONE) {
+ assert(buf->map_count == 0);
+ buf->map_gtt = TRUE;
+ }
+
+ return drm_intel_bo_set_tiling(buf->bo, &tile, stride);
+}
+
+static void *
+intel_drm_buffer_map(struct intel_winsys *iws,
+ struct intel_buffer *buffer,
+ boolean write)
+{
+ struct intel_drm_buffer *buf = intel_drm_buffer(buffer);
+ drm_intel_bo *bo = intel_bo(buffer);
+ int ret = 0;
+
+ assert(bo);
+
+ if (buf->map_count)
+ goto out;
+
+ if (buf->map_gtt)
+ ret = drm_intel_gem_bo_map_gtt(bo);
+ else
+ ret = drm_intel_bo_map(bo, write);
+
+ buf->ptr = bo->virtual;
+
+ assert(ret == 0);
+out:
+ if (ret)
+ return NULL;
+
+ buf->map_count++;
+ return buf->ptr;
+}
+
+static void
+intel_drm_buffer_unmap(struct intel_winsys *iws,
+ struct intel_buffer *buffer)
+{
+ struct intel_drm_buffer *buf = intel_drm_buffer(buffer);
+
+ if (--buf->map_count)
+ return;
+
+ if (buf->map_gtt)
+ drm_intel_gem_bo_unmap_gtt(intel_bo(buffer));
+ else
+ drm_intel_bo_unmap(intel_bo(buffer));
+}
+
+static int
+intel_drm_buffer_write(struct intel_winsys *iws,
+ struct intel_buffer *buffer,
+ size_t offset,
+ size_t size,
+ const void *data)
+{
+ struct intel_drm_buffer *buf = intel_drm_buffer(buffer);
+
+ return drm_intel_bo_subdata(buf->bo, offset, size, (void*)data);
+}
+
+static void
+intel_drm_buffer_destroy(struct intel_winsys *iws,
+ struct intel_buffer *buffer)
+{
+ drm_intel_bo_unreference(intel_bo(buffer));
+
+#ifdef DEBUG
+ intel_drm_buffer(buffer)->magic = 0;
+ intel_drm_buffer(buffer)->bo = NULL;
+#endif
+
+ FREE(buffer);
+}
+
+void
+intel_drm_winsys_init_buffer_functions(struct intel_drm_winsys *idws)
+{
+ idws->base.buffer_create = intel_drm_buffer_create;
+ idws->base.buffer_set_fence_reg = intel_drm_buffer_set_fence_reg;
+ idws->base.buffer_map = intel_drm_buffer_map;
+ idws->base.buffer_unmap = intel_drm_buffer_unmap;
+ idws->base.buffer_write = intel_drm_buffer_write;
+ idws->base.buffer_destroy = intel_drm_buffer_destroy;
+}
--- /dev/null
+
+#include "intel_drm_winsys.h"
+#include "util/u_memory.h"
+#include "pipe/p_refcnt.h"
+
+/**
+ * Because gem does not have fence's we have to create our own fences.
+ *
+ * They work by keeping the batchbuffer around and checking if that has
+ * been idled. If bo is NULL fence has expired.
+ */
+struct intel_drm_fence
+{
+ struct pipe_reference reference;
+ drm_intel_bo *bo;
+};
+
+
+struct pipe_fence_handle *
+intel_drm_fence_create(drm_intel_bo *bo)
+{
+ struct intel_drm_fence *fence = CALLOC_STRUCT(intel_drm_fence);
+
+ pipe_reference_init(&fence->reference, 1);
+ /* bo is null if fence already expired */
+ if (bo) {
+ drm_intel_bo_reference(bo);
+ fence->bo = bo;
+ }
+
+ return (struct pipe_fence_handle *)fence;
+}
+
+static void
+intel_drm_fence_reference(struct intel_winsys *iws,
+ struct pipe_fence_handle **ptr,
+ struct pipe_fence_handle *fence)
+{
+ struct intel_drm_fence *old = (struct intel_drm_fence *)*ptr;
+ struct intel_drm_fence *f = (struct intel_drm_fence *)fence;
+
+ if (pipe_reference((struct pipe_reference**)ptr, &f->reference)) {
+ if (old->bo)
+ drm_intel_bo_unreference(old->bo);
+ FREE(old);
+ }
+}
+
+static int
+intel_drm_fence_signalled(struct intel_winsys *iws,
+ struct pipe_fence_handle *fence)
+{
+ assert(0);
+
+ return 0;
+}
+
+static int
+intel_drm_fence_finish(struct intel_winsys *iws,
+ struct pipe_fence_handle *fence)
+{
+ struct intel_drm_fence *f = (struct intel_drm_fence *)fence;
+
+ /* fence already expired */
+ if (!f->bo)
+ return 0;
+
+ drm_intel_bo_wait_rendering(f->bo);
+ drm_intel_bo_unreference(f->bo);
+ f->bo = NULL;
+
+ return 0;
+}
+
+void
+intel_drm_winsys_init_fence_functions(struct intel_drm_winsys *idws)
+{
+ idws->base.fence_reference = intel_drm_fence_reference;
+ idws->base.fence_signalled = intel_drm_fence_signalled;
+ idws->base.fence_finish = intel_drm_fence_finish;
+}
--- /dev/null
+
+#ifndef INTEL_DRM_WINSYS_H
+#define INTEL_DRM_WINSYS_H
+
+#include "i965/intel_batchbuffer.h"
+
+#include "drm.h"
+#include "intel_bufmgr.h"
+
+
+/*
+ * Winsys
+ */
+
+
+struct intel_drm_winsys
+{
+ struct intel_winsys base;
+
+ boolean softpipe;
+ boolean dump_cmd;
+
+ int fd; /**< Drm file discriptor */
+
+ unsigned id;
+
+ size_t max_batch_size;
+
+ struct {
+ drm_intel_bufmgr *gem;
+ } pools;
+};
+
+static INLINE struct intel_drm_winsys *
+intel_drm_winsys(struct intel_winsys *iws)
+{
+ return (struct intel_drm_winsys *)iws;
+}
+
+struct intel_drm_winsys * intel_drm_winsys_create(int fd, unsigned pci_id);
+struct pipe_fence_handle * intel_drm_fence_create(drm_intel_bo *bo);
+
+void intel_drm_winsys_init_batchbuffer_functions(struct intel_drm_winsys *idws);
+void intel_drm_winsys_init_buffer_functions(struct intel_drm_winsys *idws);
+void intel_drm_winsys_init_fence_functions(struct intel_drm_winsys *idws);
+
+
+/*
+ * Buffer
+ */
+
+
+struct intel_drm_buffer {
+ unsigned magic;
+
+ drm_intel_bo *bo;
+
+ void *ptr;
+ unsigned map_count;
+ boolean map_gtt;
+
+ boolean flinked;
+ unsigned flink;
+};
+
+static INLINE struct intel_drm_buffer *
+intel_drm_buffer(struct intel_buffer *buffer)
+{
+ return (struct intel_drm_buffer *)buffer;
+}
+
+static INLINE drm_intel_bo *
+intel_bo(struct intel_buffer *buffer)
+{
+ return intel_drm_buffer(buffer)->bo;
+}
+
+#endif
+++ /dev/null
-
-#include "state_tracker/drm_api.h"
-
-#include "intel_drm_winsys.h"
-#include "util/u_memory.h"
-
-#include "i915/i915_context.h"
-#include "i915/i915_screen.h"
-
-#include "trace/tr_drm.h"
-
-/*
- * Helper functions
- */
-
-
-static void
-intel_drm_get_device_id(unsigned int *device_id)
-{
- char path[512];
- FILE *file;
- void *shutup_gcc;
-
- /*
- * FIXME: Fix this up to use a drm ioctl or whatever.
- */
-
- snprintf(path, sizeof(path), "/sys/class/drm/card0/device/device");
- file = fopen(path, "r");
- if (!file) {
- return;
- }
-
- shutup_gcc = fgets(path, sizeof(path), file);
- sscanf(path, "%x", device_id);
- fclose(file);
-}
-
-static struct intel_buffer *
-intel_drm_buffer_from_handle(struct intel_drm_winsys *idws,
- const char* name, unsigned handle)
-{
- struct intel_drm_buffer *buf = CALLOC_STRUCT(intel_drm_buffer);
- uint32_t tile = 0, swizzle = 0;
-
- if (!buf)
- return NULL;
-
- buf->magic = 0xDEAD1337;
- buf->bo = drm_intel_bo_gem_create_from_name(idws->pools.gem, name, handle);
- buf->flinked = TRUE;
- buf->flink = handle;
-
- if (!buf->bo)
- goto err;
-
- drm_intel_bo_get_tiling(buf->bo, &tile, &swizzle);
- if (tile != INTEL_TILE_NONE)
- buf->map_gtt = TRUE;
-
- return (struct intel_buffer *)buf;
-
-err:
- FREE(buf);
- return NULL;
-}
-
-
-/*
- * Exported functions
- */
-
-
-static struct pipe_texture *
-intel_drm_texture_from_shared_handle(struct drm_api *api,
- struct pipe_screen *screen,
- struct pipe_texture *templ,
- const char* name,
- unsigned pitch,
- unsigned handle)
-{
- struct intel_drm_winsys *idws = intel_drm_winsys(i915_screen(screen)->iws);
- struct intel_buffer *buffer;
-
- buffer = intel_drm_buffer_from_handle(idws, name, handle);
- if (!buffer)
- return NULL;
-
- return i915_texture_blanket_intel(screen, templ, pitch, buffer);
-}
-
-static boolean
-intel_drm_shared_handle_from_texture(struct drm_api *api,
- struct pipe_screen *screen,
- struct pipe_texture *texture,
- unsigned *pitch,
- unsigned *handle)
-{
- struct intel_drm_buffer *buf = NULL;
- struct intel_buffer *buffer = NULL;
- if (!i915_get_texture_buffer_intel(texture, &buffer, pitch))
- return FALSE;
-
- buf = intel_drm_buffer(buffer);
- if (!buf->flinked) {
- if (drm_intel_bo_flink(buf->bo, &buf->flink))
- return FALSE;
- buf->flinked = TRUE;
- }
-
- *handle = buf->flink;
-
- return TRUE;
-}
-
-static boolean
-intel_drm_local_handle_from_texture(struct drm_api *api,
- struct pipe_screen *screen,
- struct pipe_texture *texture,
- unsigned *pitch,
- unsigned *handle)
-{
- struct intel_buffer *buffer = NULL;
- if (!i915_get_texture_buffer_intel(texture, &buffer, pitch))
- return FALSE;
-
- *handle = intel_drm_buffer(buffer)->bo->handle;
-
- return TRUE;
-}
-
-static void
-intel_drm_winsys_destroy(struct intel_winsys *iws)
-{
- struct intel_drm_winsys *idws = intel_drm_winsys(iws);
-
- drm_intel_bufmgr_destroy(idws->pools.gem);
-
- FREE(idws);
-}
-
-static struct pipe_screen *
-intel_drm_create_screen(struct drm_api *api, int drmFD,
- struct drm_create_screen_arg *arg)
-{
- struct intel_drm_winsys *idws;
- unsigned int deviceID;
-
- if (arg != NULL) {
- switch(arg->mode) {
- case DRM_CREATE_NORMAL:
- break;
- default:
- return NULL;
- }
- }
-
- idws = CALLOC_STRUCT(intel_drm_winsys);
- if (!idws)
- return NULL;
-
- intel_drm_get_device_id(&deviceID);
-
- intel_drm_winsys_init_batchbuffer_functions(idws);
- intel_drm_winsys_init_buffer_functions(idws);
- intel_drm_winsys_init_fence_functions(idws);
-
- idws->fd = drmFD;
- idws->id = deviceID;
- idws->max_batch_size = 16 * 4096;
-
- idws->base.destroy = intel_drm_winsys_destroy;
-
- idws->pools.gem = drm_intel_bufmgr_gem_init(idws->fd, idws->max_batch_size);
- drm_intel_bufmgr_gem_enable_reuse(idws->pools.gem);
-
- idws->softpipe = FALSE;
- idws->dump_cmd = debug_get_bool_option("INTEL_DUMP_CMD", FALSE);
-
- return i915_create_screen(&idws->base, deviceID);
-}
-
-static struct pipe_context *
-intel_drm_create_context(struct drm_api *api, struct pipe_screen *screen)
-{
- return i915_create_context(screen);
-}
-
-static void
-destroy(struct drm_api *api)
-{
-
-}
-
-struct drm_api intel_drm_api =
-{
- .create_context = intel_drm_create_context,
- .create_screen = intel_drm_create_screen,
- .texture_from_shared_handle = intel_drm_texture_from_shared_handle,
- .shared_handle_from_texture = intel_drm_shared_handle_from_texture,
- .local_handle_from_texture = intel_drm_local_handle_from_texture,
- .destroy = destroy,
-};
-
-struct drm_api *
-drm_api_create()
-{
- return trace_drm_create(&intel_drm_api);
-}
+++ /dev/null
-
-#include "intel_drm_winsys.h"
-#include "util/u_memory.h"
-
-#include "i915_drm.h"
-
-#define BATCH_RESERVED 16
-
-#define INTEL_DEFAULT_RELOCS 100
-#define INTEL_MAX_RELOCS 400
-
-#define INTEL_BATCH_NO_CLIPRECTS 0x1
-#define INTEL_BATCH_CLIPRECTS 0x2
-
-#undef INTEL_RUN_SYNC
-#undef INTEL_MAP_BATCHBUFFER
-#undef INTEL_MAP_GTT
-#define INTEL_ALWAYS_FLUSH
-
-struct intel_drm_batchbuffer
-{
- struct intel_batchbuffer base;
-
- size_t actual_size;
-
- drm_intel_bo *bo;
-};
-
-static INLINE struct intel_drm_batchbuffer *
-intel_drm_batchbuffer(struct intel_batchbuffer *batch)
-{
- return (struct intel_drm_batchbuffer *)batch;
-}
-
-static void
-intel_drm_batchbuffer_reset(struct intel_drm_batchbuffer *batch)
-{
- struct intel_drm_winsys *idws = intel_drm_winsys(batch->base.iws);
- int ret;
-
- if (batch->bo)
- drm_intel_bo_unreference(batch->bo);
- batch->bo = drm_intel_bo_alloc(idws->pools.gem,
- "gallium3d_batchbuffer",
- batch->actual_size,
- 4096);
-
-#ifdef INTEL_MAP_BATCHBUFFER
-#ifdef INTEL_MAP_GTT
- ret = drm_intel_gem_bo_map_gtt(batch->bo);
-#else
- ret = drm_intel_bo_map(batch->bo, TRUE);
-#endif
- assert(ret == 0);
- batch->base.map = batch->bo->virtual;
-#else
- (void)ret;
-#endif
-
- memset(batch->base.map, 0, batch->actual_size);
- batch->base.ptr = batch->base.map;
- batch->base.size = batch->actual_size - BATCH_RESERVED;
- batch->base.relocs = 0;
-}
-
-static struct intel_batchbuffer *
-intel_drm_batchbuffer_create(struct intel_winsys *iws)
-{
- struct intel_drm_winsys *idws = intel_drm_winsys(iws);
- struct intel_drm_batchbuffer *batch = CALLOC_STRUCT(intel_drm_batchbuffer);
-
- batch->actual_size = idws->max_batch_size;
-
-#ifdef INTEL_MAP_BATCHBUFFER
- batch->base.map = NULL;
-#else
- batch->base.map = MALLOC(batch->actual_size);
-#endif
- batch->base.ptr = NULL;
- batch->base.size = 0;
-
- batch->base.relocs = 0;
- batch->base.max_relocs = 300;/*INTEL_DEFAULT_RELOCS;*/
-
- batch->base.iws = iws;
-
- intel_drm_batchbuffer_reset(batch);
-
- return &batch->base;
-}
-
-static int
-intel_drm_batchbuffer_reloc(struct intel_batchbuffer *ibatch,
- struct intel_buffer *buffer,
- enum intel_buffer_usage usage,
- unsigned pre_add)
-{
- struct intel_drm_batchbuffer *batch = intel_drm_batchbuffer(ibatch);
- unsigned write_domain = 0;
- unsigned read_domain = 0;
- unsigned offset;
- int ret = 0;
-
- assert(batch->base.relocs < batch->base.max_relocs);
-
- if (usage == INTEL_USAGE_SAMPLER) {
- write_domain = 0;
- read_domain = I915_GEM_DOMAIN_SAMPLER;
-
- } else if (usage == INTEL_USAGE_RENDER) {
- write_domain = I915_GEM_DOMAIN_RENDER;
- read_domain = I915_GEM_DOMAIN_RENDER;
-
- } else if (usage == INTEL_USAGE_2D_TARGET) {
- write_domain = I915_GEM_DOMAIN_RENDER;
- read_domain = I915_GEM_DOMAIN_RENDER;
-
- } else if (usage == INTEL_USAGE_2D_SOURCE) {
- write_domain = 0;
- read_domain = I915_GEM_DOMAIN_RENDER;
-
- } else if (usage == INTEL_USAGE_VERTEX) {
- write_domain = 0;
- read_domain = I915_GEM_DOMAIN_VERTEX;
-
- } else {
- assert(0);
- return -1;
- }
-
- offset = (unsigned)(batch->base.ptr - batch->base.map);
-
- ret = drm_intel_bo_emit_reloc(batch->bo, offset,
- intel_bo(buffer), pre_add,
- read_domain,
- write_domain);
-
- ((uint32_t*)batch->base.ptr)[0] = intel_bo(buffer)->offset + pre_add;
- batch->base.ptr += 4;
-
- if (!ret)
- batch->base.relocs++;
-
- return ret;
-}
-
-static void
-intel_drm_batchbuffer_flush(struct intel_batchbuffer *ibatch,
- struct pipe_fence_handle **fence)
-{
- struct intel_drm_batchbuffer *batch = intel_drm_batchbuffer(ibatch);
- unsigned used = 0;
- int ret = 0;
- int i;
-
- assert(intel_batchbuffer_space(ibatch) >= 0);
-
- used = batch->base.ptr - batch->base.map;
- assert((used & 3) == 0);
-
-
-#ifdef INTEL_ALWAYS_FLUSH
- /* MI_FLUSH | FLUSH_MAP_CACHE */
- intel_batchbuffer_dword(ibatch, (0x4<<23)|(1<<0));
- used += 4;
-#endif
-
- if ((used & 4) == 0) {
- /* MI_NOOP */
- intel_batchbuffer_dword(ibatch, 0);
- }
- /* MI_BATCH_BUFFER_END */
- intel_batchbuffer_dword(ibatch, (0xA<<23));
-
- used = batch->base.ptr - batch->base.map;
- assert((used & 4) == 0);
-
-#ifdef INTEL_MAP_BATCHBUFFER
-#ifdef INTEL_MAP_GTT
- drm_intel_gem_bo_unmap_gtt(batch->bo);
-#else
- drm_intel_bo_unmap(batch->bo);
-#endif
-#else
- drm_intel_bo_subdata(batch->bo, 0, used, batch->base.map);
-#endif
-
- /* Do the sending to HW */
- ret = drm_intel_bo_exec(batch->bo, used, NULL, 0, 0);
- assert(ret == 0);
-
- if (intel_drm_winsys(ibatch->iws)->dump_cmd) {
- unsigned *ptr;
- drm_intel_bo_map(batch->bo, FALSE);
- ptr = (unsigned*)batch->bo->virtual;
-
- debug_printf("%s:\n", __func__);
- for (i = 0; i < used / 4; i++, ptr++) {
- debug_printf("\t%08x: %08x\n", i*4, *ptr);
- }
-
- drm_intel_bo_unmap(batch->bo);
- } else {
-#ifdef INTEL_RUN_SYNC
- drm_intel_bo_map(batch->bo, FALSE);
- drm_intel_bo_unmap(batch->bo);
-#endif
- }
-
- if (fence) {
- ibatch->iws->fence_reference(ibatch->iws, fence, NULL);
-
-#ifdef INTEL_RUN_SYNC
- /* we run synced to GPU so just pass null */
- (*fence) = intel_drm_fence_create(NULL);
-#else
- (*fence) = intel_drm_fence_create(batch->bo);
-#endif
- }
-
- intel_drm_batchbuffer_reset(batch);
-}
-
-static void
-intel_drm_batchbuffer_destroy(struct intel_batchbuffer *ibatch)
-{
- struct intel_drm_batchbuffer *batch = intel_drm_batchbuffer(ibatch);
-
- if (batch->bo)
- drm_intel_bo_unreference(batch->bo);
-
-#ifndef INTEL_MAP_BATCHBUFFER
- FREE(batch->base.map);
-#endif
- FREE(batch);
-}
-
-void intel_drm_winsys_init_batchbuffer_functions(struct intel_drm_winsys *idws)
-{
- idws->base.batchbuffer_create = intel_drm_batchbuffer_create;
- idws->base.batchbuffer_reloc = intel_drm_batchbuffer_reloc;
- idws->base.batchbuffer_flush = intel_drm_batchbuffer_flush;
- idws->base.batchbuffer_destroy = intel_drm_batchbuffer_destroy;
-}
+++ /dev/null
-
-#include "intel_drm_winsys.h"
-#include "util/u_memory.h"
-
-#include "i915_drm.h"
-
-static struct intel_buffer *
-intel_drm_buffer_create(struct intel_winsys *iws,
- unsigned size, unsigned alignment,
- enum intel_buffer_type type)
-{
- struct intel_drm_buffer *buf = CALLOC_STRUCT(intel_drm_buffer);
- struct intel_drm_winsys *idws = intel_drm_winsys(iws);
- drm_intel_bufmgr *pool;
- char *name;
-
- if (!buf)
- return NULL;
-
- buf->magic = 0xDEAD1337;
- buf->flinked = FALSE;
- buf->flink = 0;
- buf->map_gtt = FALSE;
-
- if (type == INTEL_NEW_TEXTURE) {
- name = "gallium3d_texture";
- pool = idws->pools.gem;
- } else if (type == INTEL_NEW_VERTEX) {
- name = "gallium3d_vertex";
- pool = idws->pools.gem;
- buf->map_gtt = TRUE;
- } else if (type == INTEL_NEW_SCANOUT) {
- name = "gallium3d_scanout";
- pool = idws->pools.gem;
- buf->map_gtt = TRUE;
- } else {
- assert(0);
- name = "gallium3d_unknown";
- pool = idws->pools.gem;
- }
-
- buf->bo = drm_intel_bo_alloc(pool, name, size, alignment);
-
- if (!buf->bo)
- goto err;
-
- return (struct intel_buffer *)buf;
-
-err:
- assert(0);
- FREE(buf);
- return NULL;
-}
-
-static int
-intel_drm_buffer_set_fence_reg(struct intel_winsys *iws,
- struct intel_buffer *buffer,
- unsigned stride,
- enum intel_buffer_tile tile)
-{
- struct intel_drm_buffer *buf = intel_drm_buffer(buffer);
- assert(I915_TILING_NONE == INTEL_TILE_NONE);
- assert(I915_TILING_X == INTEL_TILE_X);
- assert(I915_TILING_Y == INTEL_TILE_Y);
-
- if (tile != INTEL_TILE_NONE) {
- assert(buf->map_count == 0);
- buf->map_gtt = TRUE;
- }
-
- return drm_intel_bo_set_tiling(buf->bo, &tile, stride);
-}
-
-static void *
-intel_drm_buffer_map(struct intel_winsys *iws,
- struct intel_buffer *buffer,
- boolean write)
-{
- struct intel_drm_buffer *buf = intel_drm_buffer(buffer);
- drm_intel_bo *bo = intel_bo(buffer);
- int ret = 0;
-
- assert(bo);
-
- if (buf->map_count)
- goto out;
-
- if (buf->map_gtt)
- ret = drm_intel_gem_bo_map_gtt(bo);
- else
- ret = drm_intel_bo_map(bo, write);
-
- buf->ptr = bo->virtual;
-
- assert(ret == 0);
-out:
- if (ret)
- return NULL;
-
- buf->map_count++;
- return buf->ptr;
-}
-
-static void
-intel_drm_buffer_unmap(struct intel_winsys *iws,
- struct intel_buffer *buffer)
-{
- struct intel_drm_buffer *buf = intel_drm_buffer(buffer);
-
- if (--buf->map_count)
- return;
-
- if (buf->map_gtt)
- drm_intel_gem_bo_unmap_gtt(intel_bo(buffer));
- else
- drm_intel_bo_unmap(intel_bo(buffer));
-}
-
-static int
-intel_drm_buffer_write(struct intel_winsys *iws,
- struct intel_buffer *buffer,
- size_t offset,
- size_t size,
- const void *data)
-{
- struct intel_drm_buffer *buf = intel_drm_buffer(buffer);
-
- return drm_intel_bo_subdata(buf->bo, offset, size, (void*)data);
-}
-
-static void
-intel_drm_buffer_destroy(struct intel_winsys *iws,
- struct intel_buffer *buffer)
-{
- drm_intel_bo_unreference(intel_bo(buffer));
-
-#ifdef DEBUG
- intel_drm_buffer(buffer)->magic = 0;
- intel_drm_buffer(buffer)->bo = NULL;
-#endif
-
- FREE(buffer);
-}
-
-void
-intel_drm_winsys_init_buffer_functions(struct intel_drm_winsys *idws)
-{
- idws->base.buffer_create = intel_drm_buffer_create;
- idws->base.buffer_set_fence_reg = intel_drm_buffer_set_fence_reg;
- idws->base.buffer_map = intel_drm_buffer_map;
- idws->base.buffer_unmap = intel_drm_buffer_unmap;
- idws->base.buffer_write = intel_drm_buffer_write;
- idws->base.buffer_destroy = intel_drm_buffer_destroy;
-}
+++ /dev/null
-
-#include "intel_drm_winsys.h"
-#include "util/u_memory.h"
-#include "pipe/p_refcnt.h"
-
-/**
- * Because gem does not have fence's we have to create our own fences.
- *
- * They work by keeping the batchbuffer around and checking if that has
- * been idled. If bo is NULL fence has expired.
- */
-struct intel_drm_fence
-{
- struct pipe_reference reference;
- drm_intel_bo *bo;
-};
-
-
-struct pipe_fence_handle *
-intel_drm_fence_create(drm_intel_bo *bo)
-{
- struct intel_drm_fence *fence = CALLOC_STRUCT(intel_drm_fence);
-
- pipe_reference_init(&fence->reference, 1);
- /* bo is null if fence already expired */
- if (bo) {
- drm_intel_bo_reference(bo);
- fence->bo = bo;
- }
-
- return (struct pipe_fence_handle *)fence;
-}
-
-static void
-intel_drm_fence_reference(struct intel_winsys *iws,
- struct pipe_fence_handle **ptr,
- struct pipe_fence_handle *fence)
-{
- struct intel_drm_fence *old = (struct intel_drm_fence *)*ptr;
- struct intel_drm_fence *f = (struct intel_drm_fence *)fence;
-
- if (pipe_reference((struct pipe_reference**)ptr, &f->reference)) {
- if (old->bo)
- drm_intel_bo_unreference(old->bo);
- FREE(old);
- }
-}
-
-static int
-intel_drm_fence_signalled(struct intel_winsys *iws,
- struct pipe_fence_handle *fence)
-{
- assert(0);
-
- return 0;
-}
-
-static int
-intel_drm_fence_finish(struct intel_winsys *iws,
- struct pipe_fence_handle *fence)
-{
- struct intel_drm_fence *f = (struct intel_drm_fence *)fence;
-
- /* fence already expired */
- if (!f->bo)
- return 0;
-
- drm_intel_bo_wait_rendering(f->bo);
- drm_intel_bo_unreference(f->bo);
- f->bo = NULL;
-
- return 0;
-}
-
-void
-intel_drm_winsys_init_fence_functions(struct intel_drm_winsys *idws)
-{
- idws->base.fence_reference = intel_drm_fence_reference;
- idws->base.fence_signalled = intel_drm_fence_signalled;
- idws->base.fence_finish = intel_drm_fence_finish;
-}
+++ /dev/null
-
-#ifndef INTEL_DRM_WINSYS_H
-#define INTEL_DRM_WINSYS_H
-
-#include "i915/intel_batchbuffer.h"
-
-#include "drm.h"
-#include "intel_bufmgr.h"
-
-
-/*
- * Winsys
- */
-
-
-struct intel_drm_winsys
-{
- struct intel_winsys base;
-
- boolean softpipe;
- boolean dump_cmd;
-
- int fd; /**< Drm file discriptor */
-
- unsigned id;
-
- size_t max_batch_size;
-
- struct {
- drm_intel_bufmgr *gem;
- } pools;
-};
-
-static INLINE struct intel_drm_winsys *
-intel_drm_winsys(struct intel_winsys *iws)
-{
- return (struct intel_drm_winsys *)iws;
-}
-
-struct intel_drm_winsys * intel_drm_winsys_create(int fd, unsigned pci_id);
-struct pipe_fence_handle * intel_drm_fence_create(drm_intel_bo *bo);
-
-void intel_drm_winsys_init_batchbuffer_functions(struct intel_drm_winsys *idws);
-void intel_drm_winsys_init_buffer_functions(struct intel_drm_winsys *idws);
-void intel_drm_winsys_init_fence_functions(struct intel_drm_winsys *idws);
-
-
-/*
- * Buffer
- */
-
-
-struct intel_drm_buffer {
- unsigned magic;
-
- drm_intel_bo *bo;
-
- void *ptr;
- unsigned map_count;
- boolean map_gtt;
-
- boolean flinked;
- unsigned flink;
-};
-
-static INLINE struct intel_drm_buffer *
-intel_drm_buffer(struct intel_buffer *buffer)
-{
- return (struct intel_drm_buffer *)buffer;
-}
-
-static INLINE drm_intel_bo *
-intel_bo(struct intel_buffer *buffer)
-{
- return intel_drm_buffer(buffer)->bo;
-}
-
-#endif
LIBS = \
$(TOP)/src/gallium/state_trackers/xorg/libxorgtracker.a \
- $(TOP)/src/gallium/winsys/drm/intel/gem/libinteldrm.a \
- $(TOP)/src/gallium/drivers/i915/libi915.a \
+ $(TOP)/src/gallium/winsys/drm/i965/gem/libi965drm.a \
+ $(TOP)/src/gallium/drivers/i965/libi965.a \
$(TOP)/src/gallium/drivers/trace/libtrace.a \
$(TOP)/src/gallium/drivers/softpipe/libsoftpipe.a \
$(GALLIUM_AUXILIARIES)
$(TARGET): $(OBJECTS) Makefile $(TOP)/src/gallium/state_trackers/xorg/libxorgtracker.a $(LIBS)
$(TOP)/bin/mklib -noprefix -o $@ \
- $(OBJECTS) $(LIBS) $(shell pkg-config --libs libdrm) -ldrm_intel
+ $(OBJECTS) $(LIBS) $(shell pkg-config --libs libdrm) -ldrm_i965
clean:
rm -rf $(OBJECTS) $(TARGET)