LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/include \
- $(GALLIUM_TOP)/winsys/intel/drm
+ $(GALLIUM_TOP)/winsys/intel
LOCAL_SRC_FILES := $(C_SOURCES)
AM_CPPFLAGS = \
-Iinclude \
- -I$(top_srcdir)/src/gallium/winsys/intel/drm \
+ -I$(top_srcdir)/src/gallium/winsys/intel \
$(GALLIUM_CFLAGS)
AM_CFLAGS = \
/* in pairs */
assert(q->reg_read % 2 == 0);
- q->bo->map(q->bo, false);
- vals = q->bo->get_virtual(q->bo);
+ intel_bo_map(q->bo, false);
+ vals = intel_bo_get_virtual(q->bo);
for (i = 1; i < q->reg_read; i += 2)
depth_count += vals[i] - vals[i - 1];
- q->bo->unmap(q->bo);
+ intel_bo_unmap(q->bo);
/* accumulate so that the query can be resumed if wanted */
q->data.u64 += depth_count;
assert(q->reg_read == 1);
- q->bo->map(q->bo, false);
- vals = q->bo->get_virtual(q->bo);
+ intel_bo_map(q->bo, false);
+ vals = intel_bo_get_virtual(q->bo);
timestamp = vals[0];
- q->bo->unmap(q->bo);
+ intel_bo_unmap(q->bo);
q->data.u64 = timestamp_to_ns(timestamp);
q->reg_read = 0;
/* in pairs */
assert(q->reg_read % 2 == 0);
- q->bo->map(q->bo, false);
- vals = q->bo->get_virtual(q->bo);
+ intel_bo_map(q->bo, false);
+ vals = intel_bo_get_virtual(q->bo);
for (i = 1; i < q->reg_read; i += 2)
elapsed += vals[i] - vals[i - 1];
- q->bo->unmap(q->bo);
+ intel_bo_unmap(q->bo);
/* accumulate so that the query can be resumed if wanted */
q->data.u64 += timestamp_to_ns(elapsed);
p->invalidate_flags = ILO_3D_PIPELINE_INVALIDATE_ALL;
- p->workaround_bo = p->cp->winsys->alloc_buffer(p->cp->winsys,
+ p->workaround_bo = intel_winsys_alloc_buffer(p->cp->winsys,
"PIPE_CONTROL workaround", 4096, 0);
if (!p->workaround_bo) {
ilo_warn("failed to allocate PIPE_CONTROL workaround bo\n");
ilo_3d_pipeline_destroy(struct ilo_3d_pipeline *p)
{
if (p->workaround_bo)
- p->workaround_bo->unreference(p->workaround_bo);
+ intel_bo_unreference(p->workaround_bo);
FREE(p);
}
p->emit_draw(p, ilo, info);
ilo_cp_assert_no_implicit_flush(p->cp, false);
- err = ilo->winsys->check_aperture_space(ilo->winsys, &p->cp->bo, 1);
+ err = intel_winsys_check_aperture_space(ilo->winsys, &p->cp->bo, 1);
if (!err) {
success = true;
break;
init_brw(struct brw_context *brw, struct ilo_3d_pipeline *p)
{
brw->intel.gen = ILO_GEN_GET_MAJOR(p->dev->gen);
- brw->intel.batch.bo_dst.virtual = p->cp->bo->get_virtual(p->cp->bo);
+ brw->intel.batch.bo_dst.virtual = intel_bo_get_virtual(p->cp->bo);
brw->intel.batch.bo = &brw->intel.batch.bo_dst;
}
ilo_cp_dump(p->cp);
- err = p->cp->bo->map(p->cp->bo, false);
+ err = intel_bo_map(p->cp->bo, false);
if (!err) {
dump_3d_state(p);
- p->cp->bo->unmap(p->cp->bo);
+ intel_bo_unmap(p->cp->bo);
}
}
aper_check[0] = ilo->cp->bo;
aper_check[1] = dst->bo;
aper_check[2] = src->bo;
- if (ilo->winsys->check_aperture_space(ilo->winsys, aper_check, 3))
+ if (intel_winsys_check_aperture_space(ilo->winsys, aper_check, 3))
ilo_cp_flush(ilo->cp);
swctrl = 0x0;
aper_check[0] = ilo->cp->bo;
aper_check[1] = dst->bo;
aper_check[2] = src->bo;
- if (ilo->winsys->check_aperture_space(ilo->winsys, aper_check, 3))
+ if (intel_winsys_check_aperture_space(ilo->winsys, aper_check, 3))
ilo_cp_flush(ilo->cp);
while (size) {
/* make room if necessary */
aper_check[0] = ilo->cp->bo;
aper_check[1] = tex->bo;
- if (ilo->winsys->check_aperture_space(ilo->winsys, aper_check, 2))
+ if (intel_winsys_check_aperture_space(ilo->winsys, aper_check, 2))
ilo_cp_flush(ilo->cp);
gen6_XY_COLOR_BLT(ilo,
struct ilo_context *ilo = ilo_context(data);
if (ilo->last_cp_bo)
- ilo->last_cp_bo->unreference(ilo->last_cp_bo);
+ intel_bo_unreference(ilo->last_cp_bo);
/* remember the just flushed bo, on which fences could wait */
ilo->last_cp_bo = cp->bo;
- ilo->last_cp_bo->reference(ilo->last_cp_bo);
+ intel_bo_reference(ilo->last_cp_bo);
ilo_3d_cp_flushed(ilo->hw3d);
}
fence->bo = ilo->cp->bo;
if (fence->bo)
- fence->bo->reference(fence->bo);
+ intel_bo_reference(fence->bo);
}
*f = (struct pipe_fence_handle *) fence;
ilo_cleanup_states(ilo);
if (ilo->last_cp_bo)
- ilo->last_cp_bo->unreference(ilo->last_cp_bo);
+ intel_bo_unreference(ilo->last_cp_bo);
util_slab_destroy(&ilo->transfer_mempool);
{
ilo_printf("dumping %d bytes\n", cp->used * 4);
if (cp->used)
- cp->winsys->decode_commands(cp->winsys, cp->bo, cp->used * 4);
+ intel_winsys_decode_commands(cp->winsys, cp->bo, cp->used * 4);
}
/**
jmp->used = cp->used;
jmp->stolen = cp->stolen;
/* save reloc count to rewind ilo_cp_write_bo() */
- jmp->reloc_count = cp->bo->get_reloc_count(cp->bo);
+ jmp->reloc_count = intel_bo_get_reloc_count(cp->bo);
}
/**
cp->size = jmp->size;
cp->used = jmp->used;
cp->stolen = jmp->stolen;
- cp->bo->clear_relocs(cp->bo, jmp->reloc_count);
+ intel_bo_clear_relocs(cp->bo, jmp->reloc_count);
}
/**
int err;
if (!cp->sys) {
- cp->bo->unmap(cp->bo);
+ intel_bo_unmap(cp->bo);
return 0;
}
- err = cp->bo->pwrite(cp->bo, 0, cp->used * 4, cp->ptr);
+ err = intel_bo_pwrite(cp->bo, 0, cp->used * 4, cp->ptr);
if (likely(!err && cp->stolen)) {
const int offset = cp->bo_size - cp->stolen;
- err = cp->bo->pwrite(cp->bo, offset * 4,
+ err = intel_bo_pwrite(cp->bo, offset * 4,
cp->stolen * 4, &cp->ptr[offset]);
}
* allocate the new bo before unreferencing the old one so that they
* won't point at the same address, which is needed for jmpbuf
*/
- bo = cp->winsys->alloc_buffer(cp->winsys,
+ bo = intel_winsys_alloc_buffer(cp->winsys,
"batch buffer", cp->bo_size * 4, 0);
if (unlikely(!bo)) {
/* reuse the old one */
bo = cp->bo;
- bo->reference(bo);
+ intel_bo_reference(bo);
}
if (cp->bo)
- cp->bo->unreference(cp->bo);
+ intel_bo_unreference(cp->bo);
cp->bo = bo;
if (!cp->sys) {
- cp->bo->map(cp->bo, true);
- cp->ptr = cp->bo->get_virtual(cp->bo);
+ intel_bo_map(cp->bo, true);
+ cp->ptr = intel_bo_get_virtual(cp->bo);
}
}
flags |= cp->one_off_flags;
if (likely(do_exec))
- err = cp->bo->exec(cp->bo, cp->used * 4, ctx, flags);
+ err = intel_bo_exec(cp->bo, cp->used * 4, ctx, flags);
else
err = 0;
{
if (cp->bo) {
if (!cp->sys)
- cp->bo->unmap(cp->bo);
+ intel_bo_unmap(cp->bo);
- cp->bo->unreference(cp->bo);
+ intel_bo_unreference(cp->bo);
}
if (cp->render_ctx)
- cp->winsys->destroy_context(cp->winsys, cp->render_ctx);
+ intel_winsys_destroy_context(cp->winsys, cp->render_ctx);
FREE(cp->sys);
FREE(cp);
return NULL;
cp->winsys = winsys;
- cp->render_ctx = winsys->create_context(winsys);
+ cp->render_ctx = intel_winsys_create_context(winsys);
cp->ring = ILO_CP_RING_RENDER;
cp->no_implicit_flush = false;
uint32_t read_domains, uint32_t write_domain)
{
if (bo) {
- cp->bo->emit_reloc(cp->bo, cp->cmd_cur * 4,
+ intel_bo_emit_reloc(cp->bo, cp->cmd_cur * 4,
bo, val, read_domains, write_domain);
- ilo_cp_write(cp, val + bo->get_offset(bo));
+ ilo_cp_write(cp, val + intel_bo_get_offset(bo));
}
else {
ilo_cp_write(cp, val);
struct ilo_query *q = ilo_query(query);
if (q->bo)
- q->bo->unreference(q->bo);
+ intel_bo_unreference(q->bo);
FREE(q);
}
return false;
if (q->bo) {
- if (ilo->cp->bo->references(ilo->cp->bo, q->bo))
+ if (intel_bo_references(ilo->cp->bo, q->bo))
ilo_cp_flush(ilo->cp);
if (!wait && intel_bo_is_busy(q->bo))
const int size = reg_total * sizeof(uint64_t);
if (q->bo)
- q->bo->unreference(q->bo);
+ intel_bo_unreference(q->bo);
- q->bo = winsys->alloc_buffer(winsys, name, size, 0);
+ q->bo = intel_winsys_alloc_buffer(winsys, name, size, 0);
q->reg_total = (q->bo) ? reg_total : 0;
}
}
if (handle) {
- bo = is->winsys->import_handle(is->winsys, name,
+ bo = intel_winsys_import_handle(is->winsys, name,
tex->bo_width, tex->bo_height, tex->bo_cpp, handle);
}
else {
- bo = is->winsys->alloc(is->winsys, name,
+ bo = intel_winsys_alloc(is->winsys, name,
tex->bo_width, tex->bo_height, tex->bo_cpp,
tex->tiling, tex->bo_flags);
}
return false;
if (tex->bo)
- tex->bo->unreference(tex->bo);
+ intel_bo_unreference(tex->bo);
tex->bo = bo;
-
- /* winsys may decide to use a different tiling */
- tex->tiling = tex->bo->get_tiling(tex->bo);
- tex->bo_stride = tex->bo->get_pitch(tex->bo);
+ tex->tiling = intel_bo_get_tiling(bo);
+ tex->bo_stride = intel_bo_get_pitch(bo);
return true;
}
if (tex->separate_s8)
tex_destroy(tex->separate_s8);
- tex->bo->unreference(tex->bo);
+ intel_bo_unreference(tex->bo);
tex_free_slices(tex);
FREE(tex);
}
{
int err;
- err = tex->bo->export_handle(tex->bo, handle);
+ err = intel_bo_export_handle(tex->bo, handle);
return !err;
}
break;
}
- bo = is->winsys->alloc_buffer(is->winsys,
+ bo = intel_winsys_alloc_buffer(is->winsys,
name, buf->bo_size, buf->bo_flags);
if (!bo)
return false;
if (buf->bo)
- buf->bo->unreference(buf->bo);
+ intel_bo_unreference(buf->bo);
buf->bo = bo;
static void
buf_destroy(struct ilo_buffer *buf)
{
- buf->bo->unreference(buf->bo);
+ intel_bo_unreference(buf->bo);
FREE(buf);
}
uint32_t dw[2];
} timestamp;
- is->winsys->read_reg(is->winsys, TIMESTAMP, ×tamp.val);
+ intel_winsys_read_reg(is->winsys, TIMESTAMP, ×tamp.val);
/*
* From the Ivy Bridge PRM, volume 1 part 3, page 107:
struct ilo_fence *old = *ptr;
if (old->bo)
- old->bo->unreference(old->bo);
+ intel_bo_unreference(old->bo);
FREE(old);
}
/* mark signalled if the bo is idle */
if (fence->bo && !intel_bo_is_busy(fence->bo)) {
- fence->bo->unreference(fence->bo);
+ intel_bo_unreference(fence->bo);
fence->bo = NULL;
}
return true;
/* wait and see if it returns error */
- if (fence->bo->wait(fence->bo, wait_timeout))
+ if (intel_bo_wait(fence->bo, wait_timeout))
return false;
/* mark signalled */
- fence->bo->unreference(fence->bo);
+ intel_bo_unreference(fence->bo);
fence->bo = NULL;
return true;
struct ilo_screen *is = ilo_screen(screen);
/* as it seems, winsys is owned by the screen */
- is->winsys->destroy(is->winsys);
+ intel_winsys_destroy(is->winsys);
FREE(is);
}
is->winsys = ws;
- is->winsys->enable_reuse(is->winsys);
+ intel_winsys_enable_reuse(is->winsys);
- info = is->winsys->get_info(is->winsys);
+ info = intel_winsys_get_info(is->winsys);
if (!init_dev(&is->dev, info)) {
FREE(is);
return NULL;
ilo_shader_cache_reset(struct ilo_shader_cache *shc)
{
if (shc->bo)
- shc->bo->unreference(shc->bo);
+ intel_bo_unreference(shc->bo);
- shc->bo = shc->winsys->alloc_buffer(shc->winsys,
+ shc->bo = intel_winsys_alloc_buffer(shc->winsys,
"shader cache", shc->size, 0);
shc->busy = false;
shc->cur = 0;
ilo_shader_cache_destroy(struct ilo_shader_cache *shc)
{
if (shc->bo)
- shc->bo->unreference(shc->bo);
+ intel_bo_unreference(shc->bo);
FREE(shc);
}
if (shaders[i]->cache_seqno != shc->seqno) {
/* kernels must be aligned to 64-byte */
shc->cur = align(shc->cur, 64);
- shc->bo->pwrite(shc->bo, shc->cur,
+ intel_bo_pwrite(shc->bo, shc->cur,
shaders[i]->kernel_size, shaders[i]->kernel);
shaders[i]->cache_seqno = shc->seqno;
static bool
is_bo_busy(struct ilo_context *ilo, struct intel_bo *bo, bool *need_flush)
{
- const bool referenced = ilo->cp->bo->references(ilo->cp->bo, bo);
+ const bool referenced = intel_bo_references(ilo->cp->bo, bo);
if (need_flush)
*need_flush = referenced;
switch (xfer->method) {
case ILO_TRANSFER_MAP_CPU:
- err = bo->map(bo, (xfer->base.usage & PIPE_TRANSFER_WRITE));
+ err = intel_bo_map(bo, (xfer->base.usage & PIPE_TRANSFER_WRITE));
break;
case ILO_TRANSFER_MAP_GTT:
- err = bo->map_gtt(bo);
+ err = intel_bo_map_gtt(bo);
break;
case ILO_TRANSFER_MAP_UNSYNC:
- err = bo->map_unsynchronized(bo);
+ err = intel_bo_map_unsynchronized(bo);
break;
default:
assert(!"unknown mapping method");
{
const bool swizzle = ilo->dev->has_address_swizzling;
const struct pipe_box *box = &xfer->base.box;
- const uint8_t *src = tex->bo->get_virtual(tex->bo);
+ const uint8_t *src = intel_bo_get_virtual(tex->bo);
tex_tile_offset_func tile_offset;
unsigned tiles_per_row;
int slice;
if (tex->separate_s8) {
struct ilo_texture *s8_tex = tex->separate_s8;
- const uint8_t *s8_src = s8_tex->bo->get_virtual(s8_tex->bo);
+ const uint8_t *s8_src = intel_bo_get_virtual(s8_tex->bo);
tex_tile_offset_func s8_tile_offset;
unsigned s8_tiles_per_row;
int dst_cpp, dst_s8_pos, src_cpp_used;
{
const bool swizzle = ilo->dev->has_address_swizzling;
const struct pipe_box *box = &xfer->base.box;
- uint8_t *dst = tex->bo->get_virtual(tex->bo);
+ uint8_t *dst = intel_bo_get_virtual(tex->bo);
tex_tile_offset_func tile_offset;
unsigned tiles_per_row;
int slice;
if (tex->separate_s8) {
struct ilo_texture *s8_tex = tex->separate_s8;
- uint8_t *s8_dst = s8_tex->bo->get_virtual(s8_tex->bo);
+ uint8_t *s8_dst = intel_bo_get_virtual(s8_tex->bo);
tex_tile_offset_func s8_tile_offset;
unsigned s8_tiles_per_row;
int src_cpp, src_s8_pos, dst_cpp_used;
void *dst;
int slice;
- dst = tex->bo->get_virtual(tex->bo);
+ dst = intel_bo_get_virtual(tex->bo);
dst += tex_get_box_offset(tex, xfer->base.level, box);
/* slice stride is not always available */
int err;
if (prefer_cpu && (tex->tiling == INTEL_TILING_NONE || !linear_view))
- err = tex->bo->map(tex->bo, !for_read_back);
+ err = intel_bo_map(tex->bo, !for_read_back);
else
- err = tex->bo->map_gtt(tex->bo);
+ err = intel_bo_map_gtt(tex->bo);
if (!tex->separate_s8)
return !err;
- err = tex->separate_s8->bo->map(tex->separate_s8->bo, !for_read_back);
+ err = intel_bo_map(tex->separate_s8->bo, !for_read_back);
if (err)
- tex->bo->unmap(tex->bo);
+ intel_bo_unmap(tex->bo);
return !err;
}
const struct ilo_texture *tex)
{
if (tex->separate_s8)
- tex->separate_s8->bo->unmap(tex->separate_s8->bo);
+ intel_bo_unmap(tex->separate_s8->bo);
- tex->bo->unmap(tex->bo);
+ intel_bo_unmap(tex->bo);
}
static void
struct ilo_texture *tex,
struct ilo_transfer *xfer)
{
- tex->bo->unmap(tex->bo);
+ intel_bo_unmap(tex->bo);
}
static bool
else
xfer->base.layer_stride = 0;
- xfer->ptr = tex->bo->get_virtual(tex->bo);
+ xfer->ptr = intel_bo_get_virtual(tex->bo);
xfer->ptr += tex_get_box_offset(tex, xfer->base.level, &xfer->base.box);
return true;
xfer->base.stride = 0;
xfer->base.layer_stride = 0;
- xfer->ptr = buf->bo->get_virtual(buf->bo);
+ xfer->ptr = intel_bo_get_virtual(buf->bo);
xfer->ptr += xfer->base.box.x;
return true;
{
struct ilo_buffer *buf = ilo_buffer(xfer->base.resource);
- buf->bo->unmap(buf->bo);
+ intel_bo_unmap(buf->bo);
}
static void
ilo_cp_flush(ilo->cp);
}
- buf->bo->pwrite(buf->bo, offset, size, data);
+ intel_bo_pwrite(buf->bo, offset, size, data);
}
static void
#include "state_tracker/drm_driver.h"
#include "target-helpers/inline_debug_helper.h"
-#include "intel/drm/intel_drm_public.h"
-#include "intel/drm/intel_winsys.h"
+#include "intel/intel_winsys.h"
#include "ilo/ilo_public.h"
static struct pipe_screen *
struct intel_winsys *iws;
struct pipe_screen *screen;
- iws = intel_drm_winsys_create(fd);
+ iws = intel_winsys_create_for_fd(fd);
if (!iws)
return NULL;
screen = ilo_screen_create(iws);
if (!screen) {
- iws->destroy(iws);
+ intel_winsys_destroy(iws);
return NULL;
}
#include "i915/i915_public.h"
#include "target-helpers/inline_wrapper_sw_helper.h"
/* for ilo */
-#include "intel/drm/intel_drm_public.h"
+#include "intel/intel_winsys.h"
#include "ilo/ilo_public.h"
/* for nouveau */
#include "nouveau/drm/nouveau_drm_public.h"
struct intel_winsys *iws;
struct pipe_screen *screen;
- iws = intel_drm_winsys_create(fd);
+ iws = intel_winsys_create_for_fd(fd);
if (!iws)
return NULL;
+++ /dev/null
-/*
- * Mesa 3-D graphics library
- *
- * Copyright (C) 2012-2013 LunarG, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Chia-I Wu <olv@lunarg.com>
- */
-
-#ifndef INTEL_DRM_PUBLIC_H
-#define INTEL_DRM_PUBLIC_H
-
-struct intel_winsys;
-
-struct intel_winsys *intel_drm_winsys_create(int fd);
-
-#endif /* INTEL_DRM_PUBLIC_H */
#include "util/u_inlines.h"
#include "util/u_memory.h"
#include "util/u_debug.h"
-#include "intel_drm_public.h"
-#include "intel_winsys.h"
+#include "../intel_winsys.h"
#define BATCH_SZ (8192 * sizeof(uint32_t))
-struct intel_drm_winsys {
- struct intel_winsys base;
-
+struct intel_winsys {
int fd;
drm_intel_bufmgr *bufmgr;
struct drm_intel_decode *decode;
int array_size;
};
-struct intel_drm_bo {
- struct intel_bo base;
+struct intel_bo {
struct pipe_reference reference;
drm_intel_bo *bo;
unsigned long pitch;
};
-static inline struct intel_drm_winsys *
-intel_drm_winsys(struct intel_winsys *ws)
-{
- return (struct intel_drm_winsys *) ws;
-}
-
-static inline struct intel_drm_bo *
-intel_drm_bo(struct intel_bo *bo)
-{
- return (struct intel_drm_bo *) bo;
-}
-
-static int
-intel_drm_bo_export_handle(struct intel_bo *bo, struct winsys_handle *handle)
+int
+intel_bo_export_handle(struct intel_bo *bo, struct winsys_handle *handle)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
int err = 0;
switch (handle->type) {
{
uint32_t name;
- err = drm_intel_bo_flink(drm_bo->bo, &name);
+ err = drm_intel_bo_flink(bo->bo, &name);
if (!err)
handle->handle = name;
}
break;
case DRM_API_HANDLE_TYPE_KMS:
- handle->handle = drm_bo->bo->handle;
+ handle->handle = bo->bo->handle;
break;
#if 0
case DRM_API_HANDLE_TYPE_PRIME:
{
int fd;
- err = drm_intel_bo_gem_export_to_prime(drm_bo->bo, &fd);
+ err = drm_intel_bo_gem_export_to_prime(bo->bo, &fd);
if (!err)
handle->handle = fd;
}
if (err)
return err;
- handle->stride = drm_bo->pitch;
+ handle->stride = bo->pitch;
return 0;
}
-static int
-intel_drm_bo_exec(struct intel_bo *bo, int used,
- struct intel_context *ctx, unsigned long flags)
+int
+intel_bo_exec(struct intel_bo *bo, int used,
+ struct intel_context *ctx, unsigned long flags)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
-
if (ctx) {
- return drm_intel_gem_bo_context_exec(drm_bo->bo,
+ return drm_intel_gem_bo_context_exec(bo->bo,
(drm_intel_context *) ctx, used, flags);
}
else {
- return drm_intel_bo_mrb_exec(drm_bo->bo, used, NULL, 0, 0, flags);
+ return drm_intel_bo_mrb_exec(bo->bo, used, NULL, 0, 0, flags);
}
}
-static int
-intel_drm_bo_wait(struct intel_bo *bo, int64_t timeout)
+int
+intel_bo_wait(struct intel_bo *bo, int64_t timeout)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
int err;
- err = drm_intel_gem_bo_wait(drm_bo->bo, timeout);
+ err = drm_intel_gem_bo_wait(bo->bo, timeout);
/* consider the bo idle on errors */
if (err && err != -ETIME)
err = 0;
return err;
}
-static int
-intel_drm_bo_emit_reloc(struct intel_bo *bo, uint32_t offset,
- struct intel_bo *target_bo, uint32_t target_offset,
- uint32_t read_domains, uint32_t write_domain)
+int
+intel_bo_emit_reloc(struct intel_bo *bo, uint32_t offset,
+ struct intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- struct intel_drm_bo *target = intel_drm_bo(target_bo);
-
- return drm_intel_bo_emit_reloc(drm_bo->bo, offset,
- target->bo, target_offset, read_domains, write_domain);
+ return drm_intel_bo_emit_reloc(bo->bo, offset,
+ target_bo->bo, target_offset, read_domains, write_domain);
}
-static int
-intel_drm_bo_get_reloc_count(struct intel_bo *bo)
+int
+intel_bo_get_reloc_count(struct intel_bo *bo)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- return drm_intel_gem_bo_get_reloc_count(drm_bo->bo);
+ return drm_intel_gem_bo_get_reloc_count(bo->bo);
}
-static void
-intel_drm_bo_clear_relocs(struct intel_bo *bo, int start)
+void
+intel_bo_clear_relocs(struct intel_bo *bo, int start)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- return drm_intel_gem_bo_clear_relocs(drm_bo->bo, start);
+ return drm_intel_gem_bo_clear_relocs(bo->bo, start);
}
-static bool
-intel_drm_bo_references(struct intel_bo *bo, struct intel_bo *target_bo)
+bool
+intel_bo_references(struct intel_bo *bo, struct intel_bo *target_bo)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- struct intel_drm_bo *target = intel_drm_bo(target_bo);
-
- return drm_intel_bo_references(drm_bo->bo, target->bo);
+ return drm_intel_bo_references(bo->bo, target_bo->bo);
}
-static int
-intel_drm_bo_map(struct intel_bo *bo, bool write_enable)
+int
+intel_bo_map(struct intel_bo *bo, bool write_enable)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- return drm_intel_bo_map(drm_bo->bo, write_enable);
+ return drm_intel_bo_map(bo->bo, write_enable);
}
-static int
-intel_drm_bo_map_gtt(struct intel_bo *bo)
+int
+intel_bo_map_gtt(struct intel_bo *bo)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- return drm_intel_gem_bo_map_gtt(drm_bo->bo);
+ return drm_intel_gem_bo_map_gtt(bo->bo);
}
-static int
-intel_drm_bo_map_unsynchronized(struct intel_bo *bo)
+int
+intel_bo_map_unsynchronized(struct intel_bo *bo)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- return drm_intel_gem_bo_map_unsynchronized(drm_bo->bo);
+ return drm_intel_gem_bo_map_unsynchronized(bo->bo);
}
-static int
-intel_drm_bo_unmap(struct intel_bo *bo)
+int
+intel_bo_unmap(struct intel_bo *bo)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- return drm_intel_bo_unmap(drm_bo->bo);
+ return drm_intel_bo_unmap(bo->bo);
}
-static int
-intel_drm_bo_pwrite(struct intel_bo *bo, unsigned long offset,
- unsigned long size, const void *data)
+int
+intel_bo_pwrite(struct intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- return drm_intel_bo_subdata(drm_bo->bo, offset, size, data);
+ return drm_intel_bo_subdata(bo->bo, offset, size, data);
}
-static int
-intel_drm_bo_pread(struct intel_bo *bo, unsigned long offset,
- unsigned long size, void *data)
+int
+intel_bo_pread(struct intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- return drm_intel_bo_get_subdata(drm_bo->bo, offset, size, data);
+ return drm_intel_bo_get_subdata(bo->bo, offset, size, data);
}
-static unsigned long
-intel_drm_bo_get_size(struct intel_bo *bo)
+unsigned long
+intel_bo_get_size(const struct intel_bo *bo)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- return drm_bo->bo->size;
+ return bo->bo->size;
}
-static unsigned long
-intel_drm_bo_get_offset(struct intel_bo *bo)
+unsigned long
+intel_bo_get_offset(const struct intel_bo *bo)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- return drm_bo->bo->offset;
+ return bo->bo->offset;
}
-static void *
-intel_drm_bo_get_virtual(struct intel_bo *bo)
+void *
+intel_bo_get_virtual(const struct intel_bo *bo)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- return drm_bo->bo->virtual;
+ return bo->bo->virtual;
}
-static enum intel_tiling_mode
-intel_drm_bo_get_tiling(struct intel_bo *bo)
+enum intel_tiling_mode
+intel_bo_get_tiling(const struct intel_bo *bo)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- return drm_bo->tiling;
+ return bo->tiling;
}
-static unsigned long
-intel_drm_bo_get_pitch(struct intel_bo *bo)
+unsigned long
+intel_bo_get_pitch(const struct intel_bo *bo)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
- return drm_bo->pitch;
+ return bo->pitch;
}
-static void
-intel_drm_bo_reference(struct intel_bo *bo)
+void
+intel_bo_reference(struct intel_bo *bo)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
-
- pipe_reference(NULL, &drm_bo->reference);
+ pipe_reference(NULL, &bo->reference);
}
-static void
-intel_drm_bo_unreference(struct intel_bo *bo)
+void
+intel_bo_unreference(struct intel_bo *bo)
{
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
-
- if (pipe_reference(&drm_bo->reference, NULL)) {
- drm_intel_bo_unreference(drm_bo->bo);
- FREE(drm_bo);
+ if (pipe_reference(&bo->reference, NULL)) {
+ drm_intel_bo_unreference(bo->bo);
+ FREE(bo);
}
}
-static struct intel_drm_bo *
+static struct intel_bo *
create_bo(void)
{
- struct intel_drm_bo *drm_bo;
+ struct intel_bo *bo;
- drm_bo = CALLOC_STRUCT(intel_drm_bo);
- if (!drm_bo)
+ bo = CALLOC_STRUCT(intel_bo);
+ if (!bo)
return NULL;
- pipe_reference_init(&drm_bo->reference, 1);
- drm_bo->tiling = INTEL_TILING_NONE;
- drm_bo->pitch = 0;
+ pipe_reference_init(&bo->reference, 1);
+ bo->tiling = INTEL_TILING_NONE;
+ bo->pitch = 0;
- drm_bo->base.reference = intel_drm_bo_reference;
- drm_bo->base.unreference = intel_drm_bo_unreference;
-
- drm_bo->base.get_size = intel_drm_bo_get_size;
- drm_bo->base.get_offset = intel_drm_bo_get_offset;
- drm_bo->base.get_virtual = intel_drm_bo_get_virtual;
- drm_bo->base.get_tiling = intel_drm_bo_get_tiling;
- drm_bo->base.get_pitch = intel_drm_bo_get_pitch;
-
- drm_bo->base.map = intel_drm_bo_map;
- drm_bo->base.map_gtt = intel_drm_bo_map_gtt;
- drm_bo->base.map_unsynchronized = intel_drm_bo_map_unsynchronized;
- drm_bo->base.unmap = intel_drm_bo_unmap;
-
- drm_bo->base.pwrite = intel_drm_bo_pwrite;
- drm_bo->base.pread = intel_drm_bo_pread;
-
- drm_bo->base.emit_reloc = intel_drm_bo_emit_reloc;
- drm_bo->base.get_reloc_count = intel_drm_bo_get_reloc_count;
- drm_bo->base.clear_relocs = intel_drm_bo_clear_relocs;
- drm_bo->base.references = intel_drm_bo_references;
-
- drm_bo->base.exec = intel_drm_bo_exec;
- drm_bo->base.wait = intel_drm_bo_wait;
-
- drm_bo->base.export_handle = intel_drm_bo_export_handle;
-
- return drm_bo;
+ return bo;
}
-static struct intel_bo *
-intel_drm_winsys_alloc(struct intel_winsys *ws,
- const char *name,
- int width, int height, int cpp,
- enum intel_tiling_mode tiling,
- unsigned long flags)
-{
- struct intel_drm_winsys *drm_ws = intel_drm_winsys(ws);
- struct intel_drm_bo *drm_bo;
+struct intel_bo *
+intel_winsys_alloc(struct intel_winsys *winsys,
+ const char *name,
+ int width, int height, int cpp,
+ enum intel_tiling_mode tiling,
+ unsigned long flags)
+{
+ struct intel_bo *bo;
uint32_t real_tiling = tiling;
unsigned long pitch;
- drm_bo = create_bo();
- if (!drm_bo)
+ bo = create_bo();
+ if (!bo)
return NULL;
- drm_bo->bo = drm_intel_bo_alloc_tiled(drm_ws->bufmgr, name,
+ bo->bo = drm_intel_bo_alloc_tiled(winsys->bufmgr, name,
width, height, cpp, &real_tiling, &pitch, flags);
- if (!drm_bo->bo) {
- FREE(drm_bo);
+ if (!bo->bo) {
+ FREE(bo);
return NULL;
}
- drm_bo->tiling = real_tiling;
- drm_bo->pitch = pitch;
+ bo->tiling = real_tiling;
+ bo->pitch = pitch;
- return &drm_bo->base;
+ return bo;
}
-static struct intel_bo *
-intel_drm_winsys_alloc_buffer(struct intel_winsys *ws,
- const char *name,
- unsigned long size,
- unsigned long flags)
+struct intel_bo *
+intel_winsys_alloc_buffer(struct intel_winsys *winsys,
+ const char *name,
+ unsigned long size,
+ unsigned long flags)
{
const int alignment = 4096; /* always page-aligned */
- struct intel_drm_winsys *drm_ws = intel_drm_winsys(ws);
- struct intel_drm_bo *drm_bo;
+ struct intel_bo *bo;
- drm_bo = create_bo();
- if (!drm_bo)
+ bo = create_bo();
+ if (!bo)
return NULL;
if (flags == INTEL_ALLOC_FOR_RENDER) {
- drm_bo->bo = drm_intel_bo_alloc_for_render(drm_ws->bufmgr,
+ bo->bo = drm_intel_bo_alloc_for_render(winsys->bufmgr,
name, size, alignment);
}
else {
assert(!flags);
- drm_bo->bo = drm_intel_bo_alloc(drm_ws->bufmgr, name, size, alignment);
+ bo->bo = drm_intel_bo_alloc(winsys->bufmgr, name, size, alignment);
}
- if (!drm_bo->bo) {
- FREE(drm_bo);
+ if (!bo->bo) {
+ FREE(bo);
return NULL;
}
- return &drm_bo->base;
+ return bo;
}
-static struct intel_bo *
-intel_drm_winsys_import_handle(struct intel_winsys *ws,
- const char *name,
- int width, int height, int cpp,
- const struct winsys_handle *handle)
+struct intel_bo *
+intel_winsys_import_handle(struct intel_winsys *winsys,
+ const char *name,
+ int width, int height, int cpp,
+ const struct winsys_handle *handle)
{
- struct intel_drm_winsys *drm_ws = intel_drm_winsys(ws);
- struct intel_drm_bo *drm_bo;
+ struct intel_bo *bo;
const unsigned long pitch = handle->stride;
uint32_t tiling, swizzle;
int err;
- drm_bo = create_bo();
- if (!drm_bo)
+ bo = create_bo();
+ if (!bo)
return NULL;
switch (handle->type) {
case DRM_API_HANDLE_TYPE_SHARED:
{
const uint32_t gem_name = handle->handle;
- drm_bo->bo = drm_intel_bo_gem_create_from_name(drm_ws->bufmgr,
+ bo->bo = drm_intel_bo_gem_create_from_name(winsys->bufmgr,
name, gem_name);
}
break;
case DRM_API_HANDLE_TYPE_PRIME:
{
const int fd = (int) handle->handle;
- drm_bo->bo = drm_intel_bo_gem_create_from_prime(drm_ws->bufmgr,
+ bo->bo = drm_intel_bo_gem_create_from_prime(winsys->bufmgr,
fd, height * pitch);
}
break;
break;
}
- if (!drm_bo->bo) {
- FREE(drm_bo);
+ if (!bo->bo) {
+ FREE(bo);
return NULL;
}
- err = drm_intel_bo_get_tiling(drm_bo->bo, &tiling, &swizzle);
+ err = drm_intel_bo_get_tiling(bo->bo, &tiling, &swizzle);
if (err) {
- drm_intel_bo_unreference(drm_bo->bo);
- FREE(drm_bo);
+ drm_intel_bo_unreference(bo->bo);
+ FREE(bo);
return NULL;
}
- drm_bo->tiling = tiling;
- drm_bo->pitch = pitch;
+ bo->tiling = tiling;
+ bo->pitch = pitch;
- return &drm_bo->base;
+ return bo;
}
-static int
-intel_drm_winsys_check_aperture_space(struct intel_winsys *ws,
- struct intel_bo **bo_array,
- int count)
+int
+intel_winsys_check_aperture_space(struct intel_winsys *winsys,
+ struct intel_bo **bo_array,
+ int count)
{
- struct intel_drm_winsys *drm_ws = intel_drm_winsys(ws);
drm_intel_bo *drm_bo_array[8];
int i;
if (likely(count <= Elements(drm_bo_array))) {
for (i = 0; i < count; i++)
- drm_bo_array[i] = ((struct intel_drm_bo *) bo_array[i])->bo;
+ drm_bo_array[i] = bo_array[i]->bo;
return drm_intel_bufmgr_check_aperture_space(drm_bo_array, count);
}
/* resize bo array if necessary */
- if (drm_ws->array_size < count) {
- void *tmp = MALLOC(count * sizeof(*drm_ws->drm_bo_array));
+ if (winsys->array_size < count) {
+ void *tmp = MALLOC(count * sizeof(*winsys->drm_bo_array));
if (!tmp)
return -1;
- FREE(drm_ws->drm_bo_array);
- drm_ws->drm_bo_array = tmp;
- drm_ws->array_size = count;
+ FREE(winsys->drm_bo_array);
+ winsys->drm_bo_array = tmp;
+ winsys->array_size = count;
}
for (i = 0; i < count; i++)
- drm_ws->drm_bo_array[i] = ((struct intel_drm_bo *) bo_array[i])->bo;
+ winsys->drm_bo_array[i] = bo_array[i]->bo;
- return drm_intel_bufmgr_check_aperture_space(drm_ws->drm_bo_array, count);
+ return drm_intel_bufmgr_check_aperture_space(winsys->drm_bo_array, count);
}
-static void
-intel_drm_winsys_decode_commands(struct intel_winsys *ws,
- struct intel_bo *bo, int used)
+void
+intel_winsys_decode_commands(struct intel_winsys *winsys,
+ struct intel_bo *bo, int used)
{
- struct intel_drm_winsys *drm_ws = intel_drm_winsys(ws);
- struct intel_drm_bo *drm_bo = intel_drm_bo(bo);
int err;
- if (!drm_ws->decode) {
- drm_ws->decode = drm_intel_decode_context_alloc(drm_ws->info.devid);
- if (!drm_ws->decode)
+ if (!winsys->decode) {
+ winsys->decode = drm_intel_decode_context_alloc(winsys->info.devid);
+ if (!winsys->decode)
return;
/* debug_printf()/debug_error() uses stderr by default */
- drm_intel_decode_set_output_file(drm_ws->decode, stderr);
+ drm_intel_decode_set_output_file(winsys->decode, stderr);
}
- err = drm_intel_bo_map(drm_bo->bo, false);
+ err = drm_intel_bo_map(bo->bo, false);
if (err) {
debug_printf("failed to map buffer for decoding\n");
return;
/* in dwords */
used /= 4;
- drm_intel_decode_set_batch_pointer(drm_ws->decode,
- drm_bo->bo->virtual,
- drm_bo->bo->offset,
+ drm_intel_decode_set_batch_pointer(winsys->decode,
+ bo->bo->virtual,
+ bo->bo->offset,
used);
- drm_intel_decode(drm_ws->decode);
+ drm_intel_decode(winsys->decode);
- drm_intel_bo_unmap(drm_bo->bo);
+ drm_intel_bo_unmap(bo->bo);
}
-static int
-intel_drm_winsys_read_reg(struct intel_winsys *ws,
- uint32_t reg, uint64_t *val)
+int
+intel_winsys_read_reg(struct intel_winsys *winsys,
+ uint32_t reg, uint64_t *val)
{
- struct intel_drm_winsys *drm_ws = intel_drm_winsys(ws);
- return drm_intel_reg_read(drm_ws->bufmgr, reg, val);
+ return drm_intel_reg_read(winsys->bufmgr, reg, val);
}
-static void
-intel_drm_winsys_enable_reuse(struct intel_winsys *ws)
+void
+intel_winsys_enable_reuse(struct intel_winsys *winsys)
{
- struct intel_drm_winsys *drm_ws = intel_drm_winsys(ws);
- drm_intel_bufmgr_gem_enable_reuse(drm_ws->bufmgr);
+ drm_intel_bufmgr_gem_enable_reuse(winsys->bufmgr);
}
-static void
-intel_drm_winsys_destroy_context(struct intel_winsys *ws,
- struct intel_context *ctx)
+void
+intel_winsys_destroy_context(struct intel_winsys *winsys,
+ struct intel_context *ctx)
{
drm_intel_gem_context_destroy((drm_intel_context *) ctx);
}
-static struct intel_context *
-intel_drm_winsys_create_context(struct intel_winsys *ws)
+struct intel_context *
+intel_winsys_create_context(struct intel_winsys *winsys)
{
- struct intel_drm_winsys *drm_ws = intel_drm_winsys(ws);
-
return (struct intel_context *)
- drm_intel_gem_context_create(drm_ws->bufmgr);
+ drm_intel_gem_context_create(winsys->bufmgr);
}
-static const struct intel_winsys_info *
-intel_drm_winsys_get_info(struct intel_winsys *ws)
+const struct intel_winsys_info *
+intel_winsys_get_info(const struct intel_winsys *winsys)
{
- struct intel_drm_winsys *drm_ws = intel_drm_winsys(ws);
- return &drm_ws->info;
+ return &winsys->info;
}
-static void
-intel_drm_winsys_destroy(struct intel_winsys *ws)
+void
+intel_winsys_destroy(struct intel_winsys *winsys)
{
- struct intel_drm_winsys *drm_ws = intel_drm_winsys(ws);
-
- if (drm_ws->decode)
- drm_intel_decode_context_free(drm_ws->decode);
+ if (winsys->decode)
+ drm_intel_decode_context_free(winsys->decode);
- drm_intel_bufmgr_destroy(drm_ws->bufmgr);
- FREE(drm_ws->drm_bo_array);
- FREE(drm_ws);
+ drm_intel_bufmgr_destroy(winsys->bufmgr);
+ FREE(winsys->drm_bo_array);
+ FREE(winsys);
}
static bool
-get_param(struct intel_drm_winsys *drm_ws, int param, int *value)
+get_param(struct intel_winsys *winsys, int param, int *value)
{
struct drm_i915_getparam gp;
int err;
gp.param = param;
gp.value = value;
- err = drmCommandWriteRead(drm_ws->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
+ err = drmCommandWriteRead(winsys->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
if (err) {
*value = 0;
return false;
}
static bool
-test_address_swizzling(struct intel_drm_winsys *drm_ws)
+test_address_swizzling(struct intel_winsys *winsys)
{
drm_intel_bo *bo;
uint32_t tiling = I915_TILING_X, swizzle;
unsigned long pitch;
- bo = drm_intel_bo_alloc_tiled(drm_ws->bufmgr,
+ bo = drm_intel_bo_alloc_tiled(winsys->bufmgr,
"address swizzling test", 64, 64, 4, &tiling, &pitch, 0);
if (bo) {
drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
}
static bool
-init_info(struct intel_drm_winsys *drm_ws)
+init_info(struct intel_winsys *winsys)
{
- struct intel_winsys_info *info = &drm_ws->info;
+ struct intel_winsys_info *info = &winsys->info;
int val;
/* follow the classic driver here */
- get_param(drm_ws, I915_PARAM_HAS_RELAXED_DELTA, &val);
+ get_param(winsys, I915_PARAM_HAS_RELAXED_DELTA, &val);
if (!val) {
debug_error("kernel 2.6.39 required");
return false;
}
- info->devid = drm_intel_bufmgr_gem_get_devid(drm_ws->bufmgr);
+ info->devid = drm_intel_bufmgr_gem_get_devid(winsys->bufmgr);
- get_param(drm_ws, I915_PARAM_HAS_LLC, &val);
+ get_param(winsys, I915_PARAM_HAS_LLC, &val);
info->has_llc = val;
- get_param(drm_ws, I915_PARAM_HAS_GEN7_SOL_RESET, &val);
+ get_param(winsys, I915_PARAM_HAS_GEN7_SOL_RESET, &val);
info->has_gen7_sol_reset = val;
- info->has_address_swizzling = test_address_swizzling(drm_ws);
+ info->has_address_swizzling = test_address_swizzling(winsys);
return true;
}
struct intel_winsys *
-intel_drm_winsys_create(int fd)
+intel_winsys_create_for_fd(int fd)
{
- struct intel_drm_winsys *drm_ws;
+ struct intel_winsys *winsys;
- drm_ws = CALLOC_STRUCT(intel_drm_winsys);
- if (!drm_ws)
+ winsys = CALLOC_STRUCT(intel_winsys);
+ if (!winsys)
return NULL;
- drm_ws->fd = fd;
+ winsys->fd = fd;
- drm_ws->bufmgr = drm_intel_bufmgr_gem_init(drm_ws->fd, BATCH_SZ);
- if (!drm_ws->bufmgr) {
+ winsys->bufmgr = drm_intel_bufmgr_gem_init(winsys->fd, BATCH_SZ);
+ if (!winsys->bufmgr) {
debug_error("failed to create GEM buffer manager");
- FREE(drm_ws);
+ FREE(winsys);
return NULL;
}
- if (!init_info(drm_ws)) {
- drm_intel_bufmgr_destroy(drm_ws->bufmgr);
- FREE(drm_ws);
+ if (!init_info(winsys)) {
+ drm_intel_bufmgr_destroy(winsys->bufmgr);
+ FREE(winsys);
return NULL;
}
- drm_intel_bufmgr_gem_enable_fenced_relocs(drm_ws->bufmgr);
-
- drm_ws->base.destroy = intel_drm_winsys_destroy;
- drm_ws->base.get_info = intel_drm_winsys_get_info;
- drm_ws->base.enable_reuse = intel_drm_winsys_enable_reuse;
- drm_ws->base.create_context = intel_drm_winsys_create_context;
- drm_ws->base.destroy_context = intel_drm_winsys_destroy_context;
- drm_ws->base.read_reg = intel_drm_winsys_read_reg;
- drm_ws->base.alloc = intel_drm_winsys_alloc;
- drm_ws->base.alloc_buffer = intel_drm_winsys_alloc_buffer;
- drm_ws->base.import_handle = intel_drm_winsys_import_handle;
- drm_ws->base.check_aperture_space = intel_drm_winsys_check_aperture_space;
- drm_ws->base.decode_commands = intel_drm_winsys_decode_commands;
-
- return &drm_ws->base;
+ drm_intel_bufmgr_gem_enable_fenced_relocs(winsys->bufmgr);
+
+ return winsys;
}
+++ /dev/null
-/*
- * Mesa 3-D graphics library
- *
- * Copyright (C) 2012-2013 LunarG, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Chia-I Wu <olv@lunarg.com>
- */
-
-#ifndef INTEL_WINSYS_H
-#define INTEL_WINSYS_H
-
-#include "pipe/p_compiler.h"
-
-/* this is compatible with i915_drm.h's definitions */
-enum intel_exec_flag {
- /* bits[2:0]: ring type */
- INTEL_EXEC_DEFAULT = 0 << 0,
- INTEL_EXEC_RENDER = 1 << 0,
- INTEL_EXEC_BSD = 2 << 0,
- INTEL_EXEC_BLT = 3 << 0,
-
- /* bits[7:6]: constant buffer addressing mode */
-
- /* bits[8]: reset SO write offset register on GEN7+ */
- INTEL_EXEC_GEN7_SOL_RESET = 1 << 8,
-};
-
-/* this is compatible with i915_drm.h's definitions */
-enum intel_domain_flag {
- INTEL_DOMAIN_CPU = 0x00000001,
- INTEL_DOMAIN_RENDER = 0x00000002,
- INTEL_DOMAIN_SAMPLER = 0x00000004,
- INTEL_DOMAIN_COMMAND = 0x00000008,
- INTEL_DOMAIN_INSTRUCTION = 0x00000010,
- INTEL_DOMAIN_VERTEX = 0x00000020,
- INTEL_DOMAIN_GTT = 0x00000040,
-};
-
-/* this is compatible with i915_drm.h's definitions */
-enum intel_tiling_mode {
- INTEL_TILING_NONE = 0,
- INTEL_TILING_X = 1,
- INTEL_TILING_Y = 2,
-};
-
-/* this is compatible with intel_bufmgr.h's definitions */
-enum intel_alloc_flag {
- INTEL_ALLOC_FOR_RENDER = 1 << 0,
-};
-
-struct winsys_handle;
-struct intel_context;
-
-struct intel_winsys_info {
- int devid;
- bool has_llc;
- bool has_gen7_sol_reset;
- bool has_address_swizzling;
-};
-
-/**
- * Buffer objects.
- */
-struct intel_bo {
- void (*reference)(struct intel_bo *bo);
- void (*unreference)(struct intel_bo *bo);
-
- /* accessors */
- unsigned long (*get_size)(struct intel_bo *bo);
- unsigned long (*get_offset)(struct intel_bo *bo);
- void *(*get_virtual)(struct intel_bo *bo);
- enum intel_tiling_mode (*get_tiling)(struct intel_bo *bo);
- unsigned long (*get_pitch)(struct intel_bo *bo);
-
- /**
- * Map/unmap \p bo for CPU access.
- *
- * map() maps the backing store into CPU address space, cached. This
- * variant allows for fast random reads and writes. But the caller needs
- * handle tiling or swizzling manually if the bo is tiled or swizzled. If
- * write is enabled and there is no shared last-level cache (LLC), unmap()
- * needs to flush the cache, which is rather expensive.
- *
- * map_gtt() maps the bo for MMIO access, uncached but write-combined.
- * This variant promises a reasonable speed for sequential writes, but
- * reads would be very slow. Callers always have a linear view of the bo.
- *
- * map_unsynchronized() is similar to map_gtt(), except that it does not
- * wait until the bo is idle.
- */
- int (*map)(struct intel_bo *bo, bool write_enable);
- int (*map_gtt)(struct intel_bo *bo);
- int (*map_unsynchronized)(struct intel_bo *bo);
- int (*unmap)(struct intel_bo *bo);
-
- /**
- * Move data in to or out of the bo.
- */
- int (*pwrite)(struct intel_bo *bo, unsigned long offset,
- unsigned long size, const void *data);
- int (*pread)(struct intel_bo *bo, unsigned long offset,
- unsigned long size, void *data);
-
- /**
- * Add \p target_bo to the relocation list.
- *
- * When \p bo is submitted for execution, and if \p target_bo has moved,
- * the kernel will patch \p bo at \p offset to \p target_bo->offset plus
- * \p target_offset.
- */
- int (*emit_reloc)(struct intel_bo *bo, uint32_t offset,
- struct intel_bo *target_bo, uint32_t target_offset,
- uint32_t read_domains, uint32_t write_domain);
-
- /**
- * Return the current number of relocations.
- */
- int (*get_reloc_count)(struct intel_bo *bo);
-
- /**
- * Discard all relocations except the first \p start ones.
- *
- * Combined with \p get_reloc_count(), they can be used to undo
- * the \p emit_reloc() calls that were just made.
- */
- void (*clear_relocs)(struct intel_bo *bo, int start);
-
- /**
- * Return true if \p target_bo is on the relocation list of \p bo, or on
- * the relocation list of some bo that is referenced by \p bo.
- */
- bool (*references)(struct intel_bo *bo, struct intel_bo *target_bo);
-
- /**
- * Submit \p bo for execution.
- *
- * \p bo and all bos referenced by \p bo will be considered busy until all
- * commands are parsed and executed.
- */
- int (*exec)(struct intel_bo *bo, int used,
- struct intel_context *ctx, unsigned long flags);
-
- /**
- * Wait until \bo is idle, or \p timeout nanoseconds have passed. A
- * negative timeout means to wait indefinitely.
- *
- * \return 0 only when \p bo is idle
- */
- int (*wait)(struct intel_bo *bo, int64_t timeout);
-
- /**
- * Export a handle for inter-process sharing.
- */
- int (*export_handle)(struct intel_bo *bo, struct winsys_handle *handle);
-};
-
-/*
- * Interface to OS functions. This allows the pipe drivers to be OS agnostic.
- *
- * Check libdrm_intel out for documentation.
- */
-struct intel_winsys {
- void (*destroy)(struct intel_winsys *ws);
-
- const struct intel_winsys_info *(*get_info)(struct intel_winsys *ws);
-
- void (*enable_reuse)(struct intel_winsys *ws);
-
- struct intel_context *(*create_context)(struct intel_winsys *ws);
- void (*destroy_context)(struct intel_winsys *ws,
- struct intel_context *ctx);
-
- int (*read_reg)(struct intel_winsys *ws, uint32_t reg, uint64_t *val);
-
- struct intel_bo *(*alloc)(struct intel_winsys *ws,
- const char *name,
- int width, int height, int cpp,
- enum intel_tiling_mode tiling,
- unsigned long flags);
-
- struct intel_bo *(*alloc_buffer)(struct intel_winsys *ws,
- const char *name,
- unsigned long size,
- unsigned long flags);
-
- struct intel_bo *(*import_handle)(struct intel_winsys *ws,
- const char *name,
- int width, int height, int cpp,
- const struct winsys_handle *handle);
-
- int (*check_aperture_space)(struct intel_winsys *ws,
- struct intel_bo **bo_array,
- int count);
-
- void (*decode_commands)(struct intel_winsys *ws,
- struct intel_bo *bo, int used);
-};
-
-/**
- * Return true if \p bo is busy.
- */
-static inline bool
-intel_bo_is_busy(struct intel_bo *bo)
-{
- return (bo->wait(bo, 0) != 0);
-}
-
-#endif /* INTEL_WINSYS_H */
--- /dev/null
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 2012-2013 LunarG, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Chia-I Wu <olv@lunarg.com>
+ */
+
+#ifndef INTEL_WINSYS_H
+#define INTEL_WINSYS_H
+
+#include "pipe/p_compiler.h"
+
+/* this is compatible with i915_drm.h's definitions */
+enum intel_exec_flag {
+ /* bits[2:0]: ring type */
+ INTEL_EXEC_DEFAULT = 0 << 0,
+ INTEL_EXEC_RENDER = 1 << 0,
+ INTEL_EXEC_BSD = 2 << 0,
+ INTEL_EXEC_BLT = 3 << 0,
+
+ /* bits[7:6]: constant buffer addressing mode */
+
+ /* bits[8]: reset SO write offset register on GEN7+ */
+ INTEL_EXEC_GEN7_SOL_RESET = 1 << 8,
+};
+
+/* this is compatible with i915_drm.h's definitions */
+enum intel_domain_flag {
+ INTEL_DOMAIN_CPU = 0x00000001,
+ INTEL_DOMAIN_RENDER = 0x00000002,
+ INTEL_DOMAIN_SAMPLER = 0x00000004,
+ INTEL_DOMAIN_COMMAND = 0x00000008,
+ INTEL_DOMAIN_INSTRUCTION = 0x00000010,
+ INTEL_DOMAIN_VERTEX = 0x00000020,
+ INTEL_DOMAIN_GTT = 0x00000040,
+};
+
+/* this is compatible with i915_drm.h's definitions */
+enum intel_tiling_mode {
+ INTEL_TILING_NONE = 0,
+ INTEL_TILING_X = 1,
+ INTEL_TILING_Y = 2,
+};
+
+/* this is compatible with intel_bufmgr.h's definitions */
+enum intel_alloc_flag {
+ INTEL_ALLOC_FOR_RENDER = 1 << 0,
+};
+
+struct winsys_handle;
+struct intel_winsys;
+struct intel_context;
+struct intel_bo;
+
+struct intel_winsys_info {
+ int devid;
+ bool has_llc;
+ bool has_gen7_sol_reset;
+ bool has_address_swizzling;
+};
+
+struct intel_winsys *
+intel_winsys_create_for_fd(int fd);
+
+void
+intel_winsys_destroy(struct intel_winsys *winsys);
+
+const struct intel_winsys_info *
+intel_winsys_get_info(const struct intel_winsys *winsys);
+
+void
+intel_winsys_enable_reuse(struct intel_winsys *winsys);
+
+struct intel_context *
+intel_winsys_create_context(struct intel_winsys *winsys);
+
+void
+intel_winsys_destroy_context(struct intel_winsys *winsys,
+ struct intel_context *ctx);
+
+int
+intel_winsys_read_reg(struct intel_winsys *winsys,
+ uint32_t reg, uint64_t *val);
+
+struct intel_bo *
+intel_winsys_alloc_buffer(struct intel_winsys *winsys,
+ const char *name,
+ unsigned long size,
+ unsigned long flags);
+
+struct intel_bo *
+intel_winsys_alloc(struct intel_winsys *winsys,
+ const char *name,
+ int width, int height, int cpp,
+ enum intel_tiling_mode tiling,
+ unsigned long flags);
+
+struct intel_bo *
+intel_winsys_import_handle(struct intel_winsys *winsys,
+ const char *name,
+ int width, int height, int cpp,
+ const struct winsys_handle *handle);
+
+int
+intel_winsys_check_aperture_space(struct intel_winsys *winsys,
+ struct intel_bo **bo_array,
+ int count);
+
+void
+intel_winsys_decode_commands(struct intel_winsys *winsys,
+ struct intel_bo *bo, int used);
+
+void
+intel_bo_reference(struct intel_bo *bo);
+
+void
+intel_bo_unreference(struct intel_bo *bo);
+
+unsigned long
+intel_bo_get_size(const struct intel_bo *bo);
+
+unsigned long
+intel_bo_get_offset(const struct intel_bo *bo);
+
+void *
+intel_bo_get_virtual(const struct intel_bo *bo);
+
+enum intel_tiling_mode
+intel_bo_get_tiling(const struct intel_bo *bo);
+
+unsigned long
+intel_bo_get_pitch(const struct intel_bo *bo);
+
+/**
+ * Map/unmap \p bo for CPU access.
+ *
+ * map() maps the backing store into CPU address space, cached. This
+ * variant allows for fast random reads and writes. But the caller needs
+ * handle tiling or swizzling manually if the bo is tiled or swizzled. If
+ * write is enabled and there is no shared last-level cache (LLC), unmap()
+ * needs to flush the cache, which is rather expensive.
+ *
+ * map_gtt() maps the bo for MMIO access, uncached but write-combined.
+ * This variant promises a reasonable speed for sequential writes, but
+ * reads would be very slow. Callers always have a linear view of the bo.
+ *
+ * map_unsynchronized() is similar to map_gtt(), except that it does not
+ * wait until the bo is idle.
+ */
+int
+intel_bo_map(struct intel_bo *bo, bool write_enable);
+
+int
+intel_bo_map_gtt(struct intel_bo *bo);
+
+int
+intel_bo_map_unsynchronized(struct intel_bo *bo);
+
+int
+intel_bo_unmap(struct intel_bo *bo);
+
+/**
+ * Move data in to or out of the bo.
+ */
+int
+intel_bo_pwrite(struct intel_bo *bo, unsigned long offset,
+ unsigned long size, const void *data);
+int
+intel_bo_pread(struct intel_bo *bo, unsigned long offset,
+ unsigned long size, void *data);
+
+/**
+ * Add \p target_bo to the relocation list.
+ *
+ * When \p bo is submitted for execution, and if \p target_bo has moved,
+ * the kernel will patch \p bo at \p offset to \p target_bo->offset plus
+ * \p target_offset.
+ */
+int
+intel_bo_emit_reloc(struct intel_bo *bo, uint32_t offset,
+ struct intel_bo *target_bo, uint32_t target_offset,
+ uint32_t read_domains, uint32_t write_domain);
+
+/**
+ * Return the current number of relocations.
+ */
+int
+intel_bo_get_reloc_count(struct intel_bo *bo);
+
+/**
+ * Discard all relocations except the first \p start ones.
+ *
+ * Combined with \p get_reloc_count(), they can be used to undo
+ * the \p emit_reloc() calls that were just made.
+ */
+void
+intel_bo_clear_relocs(struct intel_bo *bo, int start);
+
+/**
+ * Return true if \p target_bo is on the relocation list of \p bo, or on
+ * the relocation list of some bo that is referenced by \p bo.
+ */
+bool
+intel_bo_references(struct intel_bo *bo, struct intel_bo *target_bo);
+
+/**
+ * Submit \p bo for execution.
+ *
+ * \p bo and all bos referenced by \p bo will be considered busy until all
+ * commands are parsed and executed.
+ */
+int
+intel_bo_exec(struct intel_bo *bo, int used,
+ struct intel_context *ctx, unsigned long flags);
+
+/**
+ * Wait until \bo is idle, or \p timeout nanoseconds have passed. A
+ * negative timeout means to wait indefinitely.
+ *
+ * \return 0 only when \p bo is idle
+ */
+int
+intel_bo_wait(struct intel_bo *bo, int64_t timeout);
+
+/**
+ * Export a handle for inter-process sharing.
+ */
+int
+intel_bo_export_handle(struct intel_bo *bo,
+ struct winsys_handle *handle);
+
+/**
+ * Return true if \p bo is busy.
+ */
+static inline bool
+intel_bo_is_busy(struct intel_bo *bo)
+{
+ return (intel_bo_wait(bo, 0) != 0);
+}
+
+#endif /* INTEL_WINSYS_H */