core/ilo_state_viewport.h \
core/ilo_state_zs.c \
core/ilo_state_zs.h \
+ core/ilo_vma.h \
core/intel_winsys.h \
ilo_blit.c \
ilo_blit.h \
ilo_public.h \
ilo_query.c \
ilo_query.h \
- ilo_resource.c \
- ilo_resource.h \
ilo_render.c \
ilo_render.h \
ilo_render_gen.h \
ilo_render_gen8.c \
ilo_render_media.c \
ilo_render_surface.c \
+ ilo_resource.c \
+ ilo_resource.h \
ilo_screen.c \
ilo_screen.h \
ilo_shader.c \
#ifndef ILO_BUFFER_H
#define ILO_BUFFER_H
-#include "intel_winsys.h"
-
#include "ilo_core.h"
#include "ilo_debug.h"
#include "ilo_dev.h"
struct ilo_buffer {
unsigned bo_size;
-
- /* managed by users */
- struct intel_bo *bo;
};
static inline void
#include "ilo_state_shader.h"
#include "ilo_state_viewport.h"
#include "ilo_state_zs.h"
+#include "ilo_vma.h"
#include "ilo_builder.h"
#include "ilo_builder_3d_top.h"
dw[5] |= builder->mocs << GEN8_DEPTH_DW5_MOCS__SHIFT;
- if (zs->depth_bo) {
- ilo_builder_batch_reloc64(builder, pos + 2, zs->depth_bo,
- zs->depth[1], (zs->z_readonly) ? 0 : INTEL_RELOC_WRITE);
+ if (zs->z_vma) {
+ ilo_builder_batch_reloc64(builder, pos + 2, zs->z_vma->bo,
+ zs->z_vma->bo_offset + zs->depth[1],
+ (zs->z_readonly) ? 0 : INTEL_RELOC_WRITE);
}
} else {
dw[1] = zs->depth[0];
else
dw[6] |= builder->mocs << GEN6_DEPTH_DW6_MOCS__SHIFT;
- if (zs->depth_bo) {
- ilo_builder_batch_reloc(builder, pos + 2, zs->depth_bo,
- zs->depth[1], (zs->z_readonly) ? 0 : INTEL_RELOC_WRITE);
+ if (zs->z_vma) {
+ ilo_builder_batch_reloc(builder, pos + 2, zs->z_vma->bo,
+ zs->z_vma->bo_offset + zs->depth[1],
+ (zs->z_readonly) ? 0 : INTEL_RELOC_WRITE);
}
}
}
dw[1] |= builder->mocs << GEN8_STENCIL_DW1_MOCS__SHIFT;
- if (zs->stencil_bo) {
- ilo_builder_batch_reloc64(builder, pos + 2, zs->stencil_bo,
- zs->stencil[1], (zs->s_readonly) ? 0 : INTEL_RELOC_WRITE);
+ if (zs->s_vma) {
+ ilo_builder_batch_reloc64(builder, pos + 2, zs->s_vma->bo,
+ zs->s_vma->bo_offset + zs->stencil[1],
+ (zs->s_readonly) ? 0 : INTEL_RELOC_WRITE);
}
} else {
dw[1] = zs->stencil[0];
dw[1] |= builder->mocs << GEN6_STENCIL_DW1_MOCS__SHIFT;
- if (zs->stencil_bo) {
- ilo_builder_batch_reloc(builder, pos + 2, zs->stencil_bo,
- zs->stencil[1], (zs->s_readonly) ? 0 : INTEL_RELOC_WRITE);
+ if (zs->s_vma) {
+ ilo_builder_batch_reloc(builder, pos + 2, zs->s_vma->bo,
+ zs->s_vma->bo_offset + zs->stencil[1],
+ (zs->s_readonly) ? 0 : INTEL_RELOC_WRITE);
}
}
}
dw[1] |= builder->mocs << GEN8_HIZ_DW1_MOCS__SHIFT;
- if (zs->hiz_bo) {
- ilo_builder_batch_reloc64(builder, pos + 2, zs->hiz_bo,
- zs->hiz[1], (zs->z_readonly) ? 0 : INTEL_RELOC_WRITE);
+ if (zs->hiz_vma) {
+ ilo_builder_batch_reloc64(builder, pos + 2, zs->hiz_vma->bo,
+ zs->hiz_vma->bo_offset + zs->hiz[1],
+ (zs->z_readonly) ? 0 : INTEL_RELOC_WRITE);
}
} else {
dw[1] = zs->hiz[0];
dw[1] |= builder->mocs << GEN6_HIZ_DW1_MOCS__SHIFT;
- if (zs->hiz_bo) {
- ilo_builder_batch_reloc(builder, pos + 2, zs->hiz_bo,
- zs->hiz[1], (zs->z_readonly) ? 0 : INTEL_RELOC_WRITE);
+ if (zs->hiz_vma) {
+ ilo_builder_batch_reloc(builder, pos + 2, zs->hiz_vma->bo,
+ zs->hiz_vma->bo_offset + zs->hiz[1],
+ (zs->z_readonly) ? 0 : INTEL_RELOC_WRITE);
}
}
}
#include "ilo_state_surface.h"
#include "ilo_state_urb.h"
#include "ilo_state_vf.h"
+#include "ilo_vma.h"
#include "ilo_builder.h"
static inline void
dw[3] = 0;
if (ilo_dev_gen(builder->dev) >= ILO_GEN(8)) {
- if (b->need_bo)
- ilo_builder_batch_reloc64(builder, pos + 1, b->bo, b->vb[1], 0);
+ if (b->vma) {
+ ilo_builder_batch_reloc64(builder, pos + 1, b->vma->bo,
+ b->vma->bo_offset + b->vb[1], 0);
+ }
dw[3] |= b->vb[2];
} else {
dw[3] |= vf->user_instancing[elem][1];
}
- if (b->need_bo) {
- ilo_builder_batch_reloc(builder, pos + 1, b->bo, b->vb[1], 0);
- ilo_builder_batch_reloc(builder, pos + 2, b->bo, b->vb[2], 0);
+ if (b->vma) {
+ ilo_builder_batch_reloc(builder, pos + 1, b->vma->bo,
+ b->vma->bo_offset + b->vb[1], 0);
+ ilo_builder_batch_reloc(builder, pos + 2, b->vma->bo,
+ b->vma->bo_offset + b->vb[2], 0);
}
}
pos = ilo_builder_batch_pointer(builder, cmd_len, &dw);
dw[0] = dw0;
- if (ib->need_bo) {
- ilo_builder_batch_reloc(builder, pos + 1, ib->bo, ib->ib[1], 0);
- ilo_builder_batch_reloc(builder, pos + 2, ib->bo, ib->ib[2], 0);
+ if (ib->vma) {
+ ilo_builder_batch_reloc(builder, pos + 1, ib->vma->bo,
+ ib->vma->bo_offset + ib->ib[1], 0);
+ ilo_builder_batch_reloc(builder, pos + 2, ib->vma->bo,
+ ib->vma->bo_offset + ib->ib[2], 0);
} else {
dw[1] = 0;
dw[2] = 0;
dw[1] = ib->ib[0] |
builder->mocs << GEN8_IB_DW1_MOCS__SHIFT;
- if (ib->need_bo) {
- ilo_builder_batch_reloc64(builder, pos + 2, ib->bo, ib->ib[1], 0);
+ if (ib->vma) {
+ ilo_builder_batch_reloc64(builder, pos + 2, ib->vma->bo,
+ ib->vma->bo_offset + ib->ib[1], 0);
} else {
dw[2] = 0;
dw[3] = 0;
builder->mocs << GEN7_SO_BUF_DW1_MOCS__SHIFT |
sol->strides[buffer] << GEN7_SO_BUF_DW1_PITCH__SHIFT;
- if (sb->need_bo) {
- ilo_builder_batch_reloc(builder, pos + 2, sb->bo,
- sb->so_buf[0], INTEL_RELOC_WRITE);
- ilo_builder_batch_reloc(builder, pos + 3, sb->bo,
- sb->so_buf[1], INTEL_RELOC_WRITE);
+ if (sb->vma) {
+ ilo_builder_batch_reloc(builder, pos + 2, sb->vma->bo,
+ sb->vma->bo_offset + sb->so_buf[0], INTEL_RELOC_WRITE);
+ ilo_builder_batch_reloc(builder, pos + 3, sb->vma->bo,
+ sb->vma->bo_offset + sb->so_buf[1], INTEL_RELOC_WRITE);
} else {
dw[2] = 0;
dw[3] = 0;
buffer << GEN7_SO_BUF_DW1_INDEX__SHIFT |
builder->mocs << GEN8_SO_BUF_DW1_MOCS__SHIFT;
- if (sb->need_bo) {
- ilo_builder_batch_reloc64(builder, pos + 2, sb->bo,
- sb->so_buf[1], INTEL_RELOC_WRITE);
+ if (sb->vma) {
+ ilo_builder_batch_reloc64(builder, pos + 2, sb->vma->bo,
+ sb->vma->bo_offset + sb->so_buf[1], INTEL_RELOC_WRITE);
} else {
dw[2] = 0;
dw[3] = 0;
dw[4] = sb->so_buf[2];
- if (sb->need_write_offset_bo) {
- ilo_builder_batch_reloc64(builder, pos + 5, sb->write_offset_bo,
- sizeof(uint32_t) * buffer, INTEL_RELOC_WRITE);
+ if (sb->write_offset_vma) {
+ ilo_builder_batch_reloc64(builder, pos + 5, sb->write_offset_vma->bo,
+ sb->write_offset_vma->bo_offset + sizeof(uint32_t) * buffer,
+ INTEL_RELOC_WRITE);
} else {
dw[5] = 0;
dw[6] = 0;
ILO_BUILDER_ITEM_SURFACE, state_align, state_len, &dw);
memcpy(dw, surf->surface, state_len << 2);
- if (surf->bo) {
+ if (surf->vma) {
const uint32_t mocs = (surf->scanout) ?
(GEN8_MOCS_MT_PTE | GEN8_MOCS_CT_L3) : builder->mocs;
dw[1] |= mocs << GEN8_SURFACE_DW1_MOCS__SHIFT;
- ilo_builder_surface_reloc64(builder, state_offset, 8, surf->bo,
- surf->surface[8], (surf->readonly) ? 0 : INTEL_RELOC_WRITE);
+ ilo_builder_surface_reloc64(builder, state_offset, 8, surf->vma->bo,
+ surf->vma->bo_offset + surf->surface[8],
+ (surf->readonly) ? 0 : INTEL_RELOC_WRITE);
}
} else {
state_align = 32;
ILO_BUILDER_ITEM_SURFACE, state_align, state_len, &dw);
memcpy(dw, surf->surface, state_len << 2);
- if (surf->bo) {
+ if (surf->vma) {
/*
* For scanouts, we should not enable caching in LLC. Since we only
* enable that on Gen8+, we are fine here.
*/
dw[5] |= builder->mocs << GEN6_SURFACE_DW5_MOCS__SHIFT;
- ilo_builder_surface_reloc(builder, state_offset, 1, surf->bo,
- surf->surface[1], (surf->readonly) ? 0 : INTEL_RELOC_WRITE);
+ ilo_builder_surface_reloc(builder, state_offset, 1, surf->vma->bo,
+ surf->vma->bo_offset + surf->surface[1],
+ (surf->readonly) ? 0 : INTEL_RELOC_WRITE);
}
}
#define ILO_IMAGE_H
#include "genhw/genhw.h"
-#include "intel_winsys.h"
#include "ilo_core.h"
#include "ilo_dev.h"
unsigned walk_layer_height;
unsigned bo_stride;
unsigned bo_height;
-
- /* managed by users */
- struct intel_bo *bo;
} aux;
-
- /* managed by users */
- struct intel_bo *bo;
};
struct pipe_resource;
*/
#include "ilo_debug.h"
-#include "ilo_buffer.h"
+#include "ilo_vma.h"
#include "ilo_state_sol.h"
static bool
{
ILO_DEV_ASSERT(dev, 7, 8);
- if (info->buf)
- assert(info->offset < info->buf->bo_size && info->size);
-
/*
* From the Ivy Bridge PRM, volume 2 part 1, page 208:
*
*/
assert(info->offset % 4 == 0);
+ if (info->vma) {
+ assert(info->vma->vm_alignment % 4 == 0);
+ assert(info->size && info->offset + info->size <= info->vma->vm_size);
+ }
+
/* Gen8+ only */
- if (info->write_offset_load || info->write_offset_save)
- assert(ilo_dev_gen(dev) >= ILO_GEN(8));
+ if (info->write_offset_load || info->write_offset_save) {
+ assert(ilo_dev_gen(dev) >= ILO_GEN(8) && info->write_offset_vma);
+ assert(info->write_offset_offset + sizeof(uint32_t) <=
+ info->write_offset_vma->vm_size);
+ }
/*
* From the Broadwell PRM, volume 2b, page 206:
sol_buffer_get_gen6_size(const struct ilo_dev *dev,
const struct ilo_state_sol_buffer_info *info)
{
- uint32_t size;
-
ILO_DEV_ASSERT(dev, 6, 8);
- if (!info->buf)
- return 0;
-
- size = (info->offset + info->size <= info->buf->bo_size) ? info->size :
- info->buf->bo_size - info->offset;
-
/*
* From the Ivy Bridge PRM, volume 2 part 1, page 208:
*
* "(Surface End Address) This field specifies the ending DWord
* address..."
*/
- size &= ~3;
-
- return size;
+ return (info->vma) ? info->size & ~3 : 0;
}
static bool
dw1 = 0;
- if (info->buf)
+ if (info->vma)
dw1 |= GEN8_SO_BUF_DW1_ENABLE;
if (info->write_offset_load)
dw1 |= GEN8_SO_BUF_DW1_OFFSET_WRITE_ENABLE;
else
ret &= sol_buffer_set_gen7_3dstate_so_buffer(sb, dev, info);
- sb->need_bo = (info->size > 0);
- sb->need_write_offset_bo = (info->write_offset_save ||
- (info->write_offset_load && !info->write_offset_imm_enable));
+ sb->vma = info->vma;
+ sb->write_offset_vma = info->write_offset_vma;
assert(ret);
uint8_t decl_count;
};
-struct ilo_buffer;
+struct ilo_vma;
struct ilo_state_sol_buffer_info {
- const struct ilo_buffer *buf;
+ const struct ilo_vma *vma;
uint32_t offset;
uint32_t size;
- /*
- * Gen8+ only. When enabled, require a write offset bo of at least
- * (sizeof(uint32_t) * ILO_STATE_SOL_MAX_BUFFER_COUNT) bytes
- */
+ /* Gen8+ only; at least sizeof(uint32_t) bytes */
+ const struct ilo_vma *write_offset_vma;
+ uint32_t write_offset_offset;
+
bool write_offset_load;
bool write_offset_save;
};
struct ilo_state_sol_buffer {
- uint32_t so_buf[4];
-
- bool need_bo;
- bool need_write_offset_bo;
+ uint32_t so_buf[5];
- /* managed by users */
- struct intel_bo *bo;
- struct intel_bo *write_offset_bo;
+ const struct ilo_vma *vma;
+ const struct ilo_vma *write_offset_vma;
};
static inline size_t
*/
#include "ilo_debug.h"
-#include "ilo_buffer.h"
#include "ilo_image.h"
+#include "ilo_vma.h"
#include "ilo_state_surface.h"
static bool
if (ilo_dev_gen(dev) >= ILO_GEN(7))
assert(info->access != ILO_STATE_SURFACE_ACCESS_DP_SVB);
- if (info->offset + info->size > info->buf->bo_size) {
+ if (info->offset + info->size > info->vma->vm_size) {
ilo_warn("invalid buffer range\n");
return false;
}
if (info->access != ILO_STATE_SURFACE_ACCESS_DP_SVB) {
assert(info->struct_size % info->format_size == 0);
- if (info->offset % info->struct_size) {
+ if (info->offset % info->struct_size ||
+ info->vma->vm_alignment % info->struct_size) {
ilo_warn("bad buffer offset\n");
return false;
}
* Nothing is said about Untyped* messages, but I guess they require the
* base address to be DWord aligned.
*/
- if (info->offset % 4) {
+ if (info->offset % 4 || info->vma->vm_alignment % 4) {
ilo_warn("bad RAW buffer offset\n");
return false;
}
break;
}
+ assert(info->img && info->vma);
+
+ if (info->img->tiling != GEN6_TILING_NONE)
+ assert(info->vma->vm_alignment % 4096 == 0);
+
+ if (info->aux_vma) {
+ assert(ilo_image_can_enable_aux(info->img, info->level_base));
+ /* always tiled */
+ assert(info->aux_vma->vm_alignment % 4096 == 0);
+ }
+
/*
* From the Sandy Bridge PRM, volume 4 part 1, page 78:
*
else
ret &= surface_set_gen6_null_SURFACE_STATE(surf, dev);
+ surf->vma = NULL;
surf->type = GEN6_SURFTYPE_NULL;
surf->readonly = true;
else
ret &= surface_set_gen6_buffer_SURFACE_STATE(surf, dev, info);
+ surf->vma = info->vma;
surf->readonly = info->readonly;
assert(ret);
else
ret &= surface_set_gen6_image_SURFACE_STATE(surf, dev, info);
+ surf->vma = info->vma;
+ surf->aux_vma = info->aux_vma;
+
surf->is_integer = info->is_integer;
surf->readonly = info->readonly;
surf->scanout = info->img->scanout;
#define ILO_STATE_SURFACE_H
#include "genhw/genhw.h"
-#include "intel_winsys.h"
#include "ilo_core.h"
#include "ilo_dev.h"
-struct ilo_buffer;
-struct ilo_image;
-
enum ilo_state_surface_access {
ILO_STATE_SURFACE_ACCESS_SAMPLER, /* sampling engine surfaces */
ILO_STATE_SURFACE_ACCESS_DP_RENDER, /* render target surfaces */
ILO_STATE_SURFACE_ACCESS_DP_SVB,
};
+struct ilo_vma;
+struct ilo_image;
+
struct ilo_state_surface_buffer_info {
- const struct ilo_buffer *buf;
+ const struct ilo_vma *vma;
+ uint32_t offset;
+ uint32_t size;
enum ilo_state_surface_access access;
bool readonly;
uint16_t struct_size;
-
- uint32_t offset;
- uint32_t size;
};
struct ilo_state_surface_image_info {
const struct ilo_image *img;
+ uint8_t level_base;
+ uint8_t level_count;
+ uint16_t slice_base;
+ uint16_t slice_count;
+
+ const struct ilo_vma *vma;
+ const struct ilo_vma *aux_vma;
enum ilo_state_surface_access access;
bool readonly;
bool is_cube_map;
bool is_array;
-
- uint8_t level_base;
- uint8_t level_count;
- uint16_t slice_base;
- uint16_t slice_count;
};
struct ilo_state_surface {
uint32_t surface[13];
+ const struct ilo_vma *vma;
+ const struct ilo_vma *aux_vma;
+
enum gen_surface_type type;
uint8_t min_lod;
uint8_t mip_count;
bool readonly;
bool scanout;
-
- /* managed by users */
- struct intel_bo *bo;
};
bool
*/
#include "ilo_debug.h"
-#include "ilo_buffer.h"
+#include "ilo_vma.h"
#include "ilo_state_vf.h"
static bool
{
ILO_DEV_ASSERT(dev, 6, 8);
- if (info->buf)
- assert(info->offset < info->buf->bo_size && info->size);
+ if (info->vma)
+ assert(info->size && info->offset + info->size <= info->vma->vm_size);
/*
* From the Sandy Bridge PRM, volume 2 part 1, page 86:
* aligned address, and BufferPitch must be a multiple of 64-bits."
*/
if (info->cv_has_double) {
+ if (info->vma)
+ assert(info->vma->vm_alignment % 8 == 0);
+
assert(info->stride % 8 == 0);
assert((info->offset + info->cv_double_vertex_offset_mod_8) % 8 == 0);
}
const struct ilo_state_vertex_buffer_info *info)
{
ILO_DEV_ASSERT(dev, 6, 8);
-
- if (!info->buf)
- return 0;
-
- return (info->offset + info->size <= info->buf->bo_size) ? info->size :
- info->buf->bo_size - info->offset;
+ return (info->vma) ? info->size : 0;
}
static bool
if (ilo_dev_gen(dev) >= ILO_GEN(7))
dw0 |= GEN7_VB_DW0_ADDR_MODIFIED;
- if (!info->buf)
+ if (!info->vma)
dw0 |= GEN6_VB_DW0_IS_NULL;
STATIC_ASSERT(ARRAY_SIZE(vb->vb) >= 3);
vb->vb[2] = (size) ? info->offset + size - 1 : 0;
}
- vb->need_bo = (info->buf != NULL);
+ vb->vma = info->vma;
return true;
}
*/
assert(info->offset % format_size == 0);
- if (info->buf)
- assert(info->offset < info->buf->bo_size && info->size);
+ if (info->vma) {
+ assert(info->vma->vm_alignment % format_size == 0);
+ assert(info->size && info->offset + info->size <= info->vma->vm_size);
+ }
return true;
}
ILO_DEV_ASSERT(dev, 6, 8);
- if (!info->buf)
+ if (!info->vma)
return 0;
- size = (info->offset + info->size <= info->buf->bo_size) ? info->size :
- info->buf->bo_size - info->offset;
-
+ size = info->size;
if (ilo_dev_gen(dev) < ILO_GEN(8)) {
const uint32_t format_size = get_index_format_size(info->format);
size -= (size % format_size);
ib->ib[2] = (size) ? info->offset + size - 1 : 0;
}
- ib->need_bo = (info->buf != NULL);
+ ib->vma = info->vma;
return true;
}
uint32_t dirty;
};
-struct ilo_buffer;
+struct ilo_vma;
struct ilo_state_vertex_buffer_info {
- const struct ilo_buffer *buf;
+ const struct ilo_vma *vma;
uint32_t offset;
uint32_t size;
struct ilo_state_vertex_buffer {
uint32_t vb[3];
- bool need_bo;
-
- /* managed by users */
- struct intel_bo *bo;
+ const struct ilo_vma *vma;
};
struct ilo_state_index_buffer_info {
- const struct ilo_buffer *buf;
+ const struct ilo_vma *vma;
uint32_t offset;
uint32_t size;
struct ilo_state_index_buffer {
uint32_t ib[3];
- bool need_bo;
-
- /* managed by users */
- struct intel_bo *bo;
+ const struct ilo_vma *vma;
};
static inline size_t
* Chia-I Wu <olv@lunarg.com>
*/
-#include "intel_winsys.h"
-
#include "ilo_debug.h"
#include "ilo_image.h"
+#include "ilo_vma.h"
#include "ilo_state_zs.h"
static bool
ILO_DEV_ASSERT(dev, 6, 8);
+ assert(!info->z_img == !info->z_vma);
+ assert(!info->s_img == !info->s_vma);
+
+ /* all tiled */
+ if (info->z_img) {
+ assert(info->z_img->tiling == GEN6_TILING_Y);
+ assert(info->z_vma->vm_alignment % 4096 == 0);
+ }
+ if (info->s_img) {
+ assert(info->s_img->tiling == GEN8_TILING_W);
+ assert(info->s_vma->vm_alignment % 4096 == 0);
+ }
+ if (info->hiz_vma) {
+ assert(info->z_img &&
+ ilo_image_can_enable_aux(info->z_img, info->level));
+ assert(info->z_vma->vm_alignment % 4096 == 0);
+ }
+
/*
* From the Ivy Bridge PRM, volume 2 part 1, page 315:
*
assert(info->level < img->level_count);
assert(img->bo_stride);
- if (info->hiz_enable) {
- assert(info->z_img &&
- ilo_image_can_enable_aux(info->z_img, info->level));
- }
-
if (info->is_cube_map) {
assert(get_gen6_surface_type(dev, img) == GEN6_SURFTYPE_2D);
assert(img->width0 == img->height0);
}
- if (info->z_img)
- assert(info->z_img->tiling == GEN6_TILING_Y);
- if (info->s_img)
- assert(info->s_img->tiling == GEN8_TILING_W);
-
return true;
}
w = img->width0;
h = img->height0;
- if (info->hiz_enable) {
+ if (info->hiz_vma) {
uint16_t align_w, align_h;
get_gen6_hiz_alignments(dev, info->z_img, &align_w, &align_h);
* to the same value (enabled or disabled) as Hierarchical Depth
* Buffer Enable."
*/
- if (!info->hiz_enable && format == GEN6_ZFORMAT_D24_UNORM_X8_UINT)
+ if (!info->hiz_vma && format == GEN6_ZFORMAT_D24_UNORM_X8_UINT)
format = GEN6_ZFORMAT_D24_UNORM_S8_UINT;
/* info->z_readonly and info->s_readonly are ignored on Gen6 */
if (info->z_img)
dw1 |= (info->z_img->bo_stride - 1) << GEN6_DEPTH_DW1_PITCH__SHIFT;
- if (info->hiz_enable || !info->z_img) {
+ if (info->hiz_vma || !info->z_img) {
dw1 |= GEN6_DEPTH_DW1_HIZ_ENABLE |
GEN6_DEPTH_DW1_SEPARATE_STENCIL;
}
if (info->z_img) {
if (!info->z_readonly)
dw1 |= GEN7_DEPTH_DW1_DEPTH_WRITE_ENABLE;
- if (info->hiz_enable)
+ if (info->hiz_vma)
dw1 |= GEN7_DEPTH_DW1_HIZ_ENABLE;
dw1 |= (info->z_img->bo_stride - 1) << GEN7_DEPTH_DW1_PITCH__SHIFT;
else
ret &= zs_set_gen6_null_3DSTATE_STENCIL_BUFFER(zs, dev);
- if (info->z_img && info->hiz_enable)
+ if (info->z_img && info->hiz_vma)
ret &= zs_set_gen6_3DSTATE_HIER_DEPTH_BUFFER(zs, dev, info);
else
ret &= zs_set_gen6_null_3DSTATE_HIER_DEPTH_BUFFER(zs, dev);
+ zs->z_vma = info->z_vma;
+ zs->s_vma = info->s_vma;
+ zs->hiz_vma = info->hiz_vma;
+
zs->z_readonly = info->z_readonly;
zs->s_readonly = info->s_readonly;
*/
assert(ilo_dev_gen(dev) >= ILO_GEN(7));
- zs->depth[0] &= ~GEN7_DEPTH_DW1_HIZ_ENABLE;
- zs_set_gen6_null_3DSTATE_HIER_DEPTH_BUFFER(zs, dev);
+ if (zs->hiz_vma) {
+ zs->depth[0] &= ~GEN7_DEPTH_DW1_HIZ_ENABLE;
+ zs_set_gen6_null_3DSTATE_HIER_DEPTH_BUFFER(zs, dev);
+ zs->hiz_vma = NULL;
+ }
return true;
}
#define ILO_STATE_ZS_H
#include "genhw/genhw.h"
-#include "intel_winsys.h"
#include "ilo_core.h"
#include "ilo_dev.h"
+struct ilo_vma;
struct ilo_image;
struct ilo_state_zs_info {
- /* both are optional */
+ /* both optional */
const struct ilo_image *z_img;
const struct ilo_image *s_img;
+ uint8_t level;
+ uint16_t slice_base;
+ uint16_t slice_count;
+
+ const struct ilo_vma *z_vma;
+ const struct ilo_vma *s_vma;
+ const struct ilo_vma *hiz_vma;
/* ignored prior to Gen7 */
bool z_readonly;
bool s_readonly;
- bool hiz_enable;
bool is_cube_map;
-
- uint8_t level;
- uint16_t slice_base;
- uint16_t slice_count;
};
struct ilo_state_zs {
uint32_t stencil[3];
uint32_t hiz[3];
+ const struct ilo_vma *z_vma;
+ const struct ilo_vma *s_vma;
+ const struct ilo_vma *hiz_vma;
+
/* TODO move this to ilo_image */
enum gen_depth_format depth_format;
bool z_readonly;
bool s_readonly;
-
- /* managed by users */
- struct intel_bo *depth_bo;
- struct intel_bo *stencil_bo;
- struct intel_bo *hiz_bo;
};
bool
--- /dev/null
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 2015 LunarG, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Chia-I Wu <olv@lunarg.com>
+ */
+
+#ifndef ILO_VMA_H
+#define ILO_VMA_H
+
+#include "ilo_core.h"
+#include "ilo_debug.h"
+#include "ilo_dev.h"
+
+struct intel_bo;
+
+/**
+ * A virtual memory area.
+ */
+struct ilo_vma {
+ /* address space */
+ uint32_t vm_size;
+ uint32_t vm_alignment;
+
+ /* backing storage */
+ struct intel_bo *bo;
+ uint32_t bo_offset;
+};
+
+static inline bool
+ilo_vma_init(struct ilo_vma *vma, const struct ilo_dev *dev,
+ uint32_t size, uint32_t alignment)
+{
+ assert(ilo_is_zeroed(vma, sizeof(*vma)));
+ assert(size && alignment);
+
+ vma->vm_alignment = alignment;
+ vma->vm_size = size;
+
+ return true;
+}
+
+static inline void
+ilo_vma_set_bo(struct ilo_vma *vma, const struct ilo_dev *dev,
+ struct intel_bo *bo, uint32_t offset)
+{
+ assert(offset % vma->vm_alignment == 0);
+
+ vma->bo = bo;
+ vma->bo_offset = offset;
+}
+
+#endif /* ILO_VMA_H */
static bool
buf_clear_region(struct ilo_blitter *blitter,
- struct ilo_buffer *buf, unsigned offset,
+ struct ilo_buffer_resource *buf, unsigned offset,
uint32_t val, unsigned size,
enum gen6_blt_mask value_mask,
enum gen6_blt_mask write_mask)
if (offset % cpp || size % cpp)
return false;
- dst.bo = buf->bo;
- dst.offset = offset;
+ dst.bo = buf->vma.bo;
+ dst.offset = buf->vma.bo_offset + offset;
ilo_blitter_blt_begin(blitter, GEN6_COLOR_BLT__SIZE *
(1 + size / 32764 / gen6_blt_max_scanlines),
static bool
buf_copy_region(struct ilo_blitter *blitter,
- struct ilo_buffer *dst_buf, unsigned dst_offset,
- struct ilo_buffer *src_buf, unsigned src_offset,
+ struct ilo_buffer_resource *dst_buf, unsigned dst_offset,
+ struct ilo_buffer_resource *src_buf, unsigned src_offset,
unsigned size)
{
const uint8_t rop = 0xcc; /* SRCCOPY */
struct ilo_builder *builder = &blitter->ilo->cp->builder;
struct gen6_blt_bo dst, src;
- dst.bo = dst_buf->bo;
- dst.offset = dst_offset;
+ dst.bo = dst_buf->vma.bo;
+ dst.offset = dst_buf->vma.bo_offset + dst_offset;
dst.pitch = 0;
- src.bo = src_buf->bo;
- src.offset = src_offset;
+ src.bo = src_buf->vma.bo;
+ src.offset = src_buf->vma.bo_offset + src_offset;
src.pitch = 0;
ilo_blitter_blt_begin(blitter, GEN6_SRC_COPY_BLT__SIZE *
(1 + size / 32764 / gen6_blt_max_scanlines),
- dst_buf->bo, GEN6_TILING_NONE, src_buf->bo, GEN6_TILING_NONE);
+ dst_buf->vma.bo, GEN6_TILING_NONE,
+ src_buf->vma.bo, GEN6_TILING_NONE);
while (size) {
unsigned width, height;
if (dst_box->width * cpp > gen6_blt_max_bytes_per_scanline)
return false;
- dst.bo = dst_tex->image.bo;
- dst.offset = 0;
+ dst.bo = dst_tex->vma.bo;
+ dst.offset = dst_tex->vma.bo_offset;
dst.pitch = dst_tex->image.bo_stride;
dst.tiling = dst_tex->image.tiling;
swctrl = ilo_blitter_blt_begin(blitter,
GEN6_XY_COLOR_BLT__SIZE * dst_box->depth,
- dst_tex->image.bo, dst_tex->image.tiling, NULL, GEN6_TILING_NONE);
+ dst_tex->vma.bo, dst_tex->image.tiling, NULL, GEN6_TILING_NONE);
for (slice = 0; slice < dst_box->depth; slice++) {
unsigned x, y;
break;
}
- dst.bo = dst_tex->image.bo;
- dst.offset = 0;
+ dst.bo = dst_tex->vma.bo;
+ dst.offset = dst_tex->vma.bo_offset;
dst.pitch = dst_tex->image.bo_stride;
dst.tiling = dst_tex->image.tiling;
- src.bo = src_tex->image.bo;
- src.offset = 0;
+ src.bo = src_tex->vma.bo;
+ src.offset = src_tex->vma.bo_offset;
src.pitch = src_tex->image.bo_stride;
src.tiling = src_tex->image.tiling;
src_box->height == 1 &&
src_box->depth == 1);
- success = buf_copy_region(blitter,
- ilo_buffer(dst), dst_offset, ilo_buffer(src), src_offset, size);
+ success = buf_copy_region(blitter, ilo_buffer_resource(dst), dst_offset,
+ ilo_buffer_resource(src), src_offset, size);
}
else if (dst->target != PIPE_BUFFER && src->target != PIPE_BUFFER) {
success = tex_copy_region(blitter,
if (offset + size > end)
size = end - offset;
- success = buf_clear_region(blitter, ilo_buffer(rt->texture),
+ success = buf_clear_region(blitter, ilo_buffer_resource(rt->texture),
offset, packed.ui[0], size, mask, mask);
}
else {
const struct pipe_draw_info *info)
{
const struct ilo_ib_state *ib = &ilo->state_vector.ib;
+ const struct ilo_vma *vma;
union {
const void *ptr;
const uint8_t *u8;
/* we will draw with IB mapped */
if (ib->state.buffer) {
- u.ptr = intel_bo_map(ilo_buffer(ib->state.buffer)->bo, false);
+ vma = ilo_resource_get_vma(ib->state.buffer);
+ u.ptr = intel_bo_map(vma->bo, false);
if (u.ptr)
- u.u8 += ib->state.offset;
+ u.u8 += vma->bo_offset + ib->state.offset;
} else {
+ vma = NULL;
u.ptr = ib->state.user_buffer;
}
#undef DRAW_VBO_WITH_SW_RESTART
- if (ib->state.buffer)
- intel_bo_unmap(ilo_buffer(ib->state.buffer)->bo);
+ if (vma)
+ intel_bo_unmap(vma->bo);
}
static bool
const struct pipe_stream_output_info *so_info,
int so_index)
{
- struct ilo_buffer *buf = ilo_buffer(so->buffer);
struct ilo_state_surface_buffer_info info;
struct ilo_state_surface surf;
ILO_DEV_ASSERT(builder->dev, 6, 6);
memset(&info, 0, sizeof(info));
- info.buf = buf;
+
+ info.vma = ilo_resource_get_vma(so->buffer);
+ info.offset = so->buffer_offset + so_info->output[so_index].dst_offset * 4;
+ info.size = so->buffer_size - so_info->output[so_index].dst_offset * 4;
+
info.access = ILO_STATE_SURFACE_ACCESS_DP_SVB;
switch (so_info->output[so_index].num_components) {
info.struct_size =
so_info->stride[so_info->output[so_index].output_buffer] * 4;
- info.offset = so->buffer_offset + so_info->output[so_index].dst_offset * 4;
- info.size = so->buffer_size - so_info->output[so_index].dst_offset * 4;
memset(&surf, 0, sizeof(surf));
ilo_state_surface_init_for_buffer(&surf, builder->dev, &info);
- surf.bo = info.buf->bo;
return gen6_SURFACE_STATE(builder, &surf);
}
return;
memset(&info, 0, sizeof(info));
- info.buf = ilo_buffer(session->input->buffer);
+
+ info.vma = ilo_resource_get_vma(session->input->buffer);
+ info.offset = session->input->buffer_offset;
+ info.size = session->input->buffer_size;
+
info.access = ILO_STATE_SURFACE_ACCESS_DP_UNTYPED;
info.format = GEN6_FORMAT_RAW;
info.format_size = 1;
info.struct_size = 1;
info.readonly = true;
- info.offset = session->input->buffer_offset;
- info.size = session->input->buffer_size;
memset(&surf, 0, sizeof(surf));
ilo_state_surface_init_for_buffer(&surf, r->dev, &info);
- surf.bo = info.buf->bo;
assert(count == 1 && session->input->buffer);
surface_state[base] = gen6_SURFACE_STATE(r->builder, &surf);
surface_state += base;
for (i = 0; i < count; i++) {
if (i < vec->global_binding.count && bindings[i].resource) {
- const struct ilo_buffer *buf = ilo_buffer(bindings[i].resource);
struct ilo_state_surface_buffer_info info;
struct ilo_state_surface surf;
assert(bindings[i].resource->target == PIPE_BUFFER);
memset(&info, 0, sizeof(info));
- info.buf = buf;
+
+ info.vma = ilo_resource_get_vma(bindings[i].resource);
+ info.size = info.vma->vm_size;
+
info.access = ILO_STATE_SURFACE_ACCESS_DP_UNTYPED;
info.format = GEN6_FORMAT_RAW;
info.format_size = 1;
info.struct_size = 1;
- info.size = buf->bo_size;
memset(&surf, 0, sizeof(surf));
ilo_state_surface_init_for_buffer(&surf, r->dev, &info);
- surf.bo = info.buf->bo;
surface_state[i] = gen6_SURFACE_STATE(r->builder, &surf);
} else {
if (!bo)
return false;
- intel_bo_unref(tex->image.bo);
- tex->image.bo = bo;
+ intel_bo_unref(tex->vma.bo);
+ ilo_vma_set_bo(&tex->vma, &is->dev, bo, 0);
return true;
}
tex_create_hiz(struct ilo_texture *tex)
{
const struct pipe_resource *templ = &tex->base;
+ const uint32_t size = tex->image.aux.bo_stride * tex->image.aux.bo_height;
struct ilo_screen *is = ilo_screen(tex->base.screen);
struct intel_bo *bo;
- bo = intel_winsys_alloc_bo(is->dev.winsys, "hiz texture",
- tex->image.aux.bo_stride * tex->image.aux.bo_height, false);
+ bo = intel_winsys_alloc_bo(is->dev.winsys, "hiz texture", size, false);
if (!bo)
return false;
- tex->image.aux.bo = bo;
+ ilo_vma_init(&tex->aux_vma, &is->dev, size, 4096);
+ ilo_vma_set_bo(&tex->aux_vma, &is->dev, bo, 0);
if (tex->imported) {
unsigned lv;
static bool
tex_create_mcs(struct ilo_texture *tex)
{
+ const uint32_t size = tex->image.aux.bo_stride * tex->image.aux.bo_height;
struct ilo_screen *is = ilo_screen(tex->base.screen);
struct intel_bo *bo;
assert(tex->image.aux.enables == (1 << (tex->base.last_level + 1)) - 1);
- bo = intel_winsys_alloc_bo(is->dev.winsys, "mcs texture",
- tex->image.aux.bo_stride * tex->image.aux.bo_height, false);
+ bo = intel_winsys_alloc_bo(is->dev.winsys, "mcs texture", size, false);
if (!bo)
return false;
- tex->image.aux.bo = bo;
+ ilo_vma_init(&tex->aux_vma, &is->dev, size, 4096);
+ ilo_vma_set_bo(&tex->aux_vma, &is->dev, bo, 0);
return true;
}
if (tex->separate_s8)
tex_destroy(tex->separate_s8);
- intel_bo_unref(tex->image.bo);
- intel_bo_unref(tex->image.aux.bo);
+ intel_bo_unref(tex->vma.bo);
+ intel_bo_unref(tex->aux_vma.bo);
tex_free_slices(tex);
FREE(tex);
return false;
}
- tex->image.bo = bo;
+ ilo_vma_init(&tex->vma, &is->dev,
+ tex->image.bo_stride * tex->image.bo_height, 4096);
+ ilo_vma_set_bo(&tex->vma, &is->dev, bo, 0);
tex->imported = true;
return false;
} else {
ilo_image_init(img, &is->dev, templ);
+ ilo_vma_init(&tex->vma, &is->dev,
+ img->bo_stride * img->bo_height, 4096);
}
if (img->bo_height > ilo_max_resource_size / img->bo_stride)
else
tiling = surface_to_winsys_tiling(tex->image.tiling);
- err = intel_winsys_export_handle(is->dev.winsys, tex->image.bo, tiling,
+ err = intel_winsys_export_handle(is->dev.winsys, tex->vma.bo, tiling,
tex->image.bo_stride, tex->image.bo_height, handle);
return !err;
if (!bo)
return false;
- intel_bo_unref(buf->buffer.bo);
- buf->buffer.bo = bo;
+ intel_bo_unref(buf->vma.bo);
+ ilo_vma_set_bo(&buf->vma, &is->dev, bo, 0);
return true;
}
static void
buf_destroy(struct ilo_buffer_resource *buf)
{
- intel_bo_unref(buf->buffer.bo);
+ intel_bo_unref(buf->vma.bo);
FREE(buf);
}
size = align(size, 4096);
ilo_buffer_init(&buf->buffer, &is->dev, size, templ->bind, templ->flags);
+ ilo_vma_init(&buf->vma, &is->dev, buf->buffer.bo_size, 4096);
if (buf->buffer.bo_size < templ->width0 ||
buf->buffer.bo_size > ilo_max_resource_size ||
#include "core/intel_winsys.h"
#include "core/ilo_buffer.h"
#include "core/ilo_image.h"
+#include "core/ilo_vma.h"
#include "ilo_common.h"
#include "ilo_screen.h"
bool imported;
struct ilo_image image;
+ struct ilo_vma vma;
+ struct ilo_vma aux_vma;
/* XXX thread-safety */
struct ilo_texture_slice *slices[PIPE_MAX_TEXTURE_LEVELS];
struct pipe_resource base;
struct ilo_buffer buffer;
+ struct ilo_vma vma;
};
-static inline struct ilo_buffer *
-ilo_buffer(struct pipe_resource *res)
+static inline struct ilo_buffer_resource *
+ilo_buffer_resource(struct pipe_resource *res)
{
- return (res && res->target == PIPE_BUFFER) ?
- &((struct ilo_buffer_resource *) res)->buffer : NULL;
+ return (struct ilo_buffer_resource *)
+ ((res && res->target == PIPE_BUFFER) ? res : NULL);
}
static inline struct ilo_texture *
ilo_resource_rename_bo(struct pipe_resource *res);
/**
- * Return the bo of the resource.
+ * Return the VMA of the resource.
*/
-static inline struct intel_bo *
-ilo_resource_get_bo(struct pipe_resource *res)
+static inline const struct ilo_vma *
+ilo_resource_get_vma(struct pipe_resource *res)
{
return (res->target == PIPE_BUFFER) ?
- ilo_buffer(res)->bo : ilo_texture(res)->image.bo;
+ &((struct ilo_buffer_resource *) res)->vma :
+ &((struct ilo_texture *) res)->vma;
}
static inline struct ilo_texture_slice *
u_upload_data(ilo->uploader, 0, cbuf->cso[i].info.size,
cbuf->cso[i].user_buffer, &offset, &cbuf->cso[i].resource);
- cbuf->cso[i].info.buf = ilo_buffer(cbuf->cso[i].resource);
+ cbuf->cso[i].info.vma = ilo_resource_get_vma(cbuf->cso[i].resource);
cbuf->cso[i].info.offset = offset;
memset(&cbuf->cso[i].surface, 0, sizeof(cbuf->cso[i].surface));
ilo_state_surface_init_for_buffer(&cbuf->cso[i].surface,
ilo->dev, &cbuf->cso[i].info);
- cbuf->cso[i].surface.bo = cbuf->cso[i].info.buf->bo;
ilo->state_vector.dirty |= ILO_DIRTY_CBUF;
}
memset(&info, 0, sizeof(info));
if (vec->ib.hw_resource) {
- info.buf = ilo_buffer(vec->ib.hw_resource);
- info.size = info.buf->bo_size;
+ info.vma = ilo_resource_get_vma(vec->ib.hw_resource);
+ info.size = info.vma->vm_size;
info.format = ilo_translate_index_size(vec->ib.hw_index_size);
-
- vec->ib.ib.bo = info.buf->bo;
}
ilo_state_index_buffer_set_info(&vec->ib.ib, dev, &info);
const struct pipe_vertex_buffer *cso = &vec->vb.states[pipe_idx];
if (cso->buffer) {
- info.buf = ilo_buffer(cso->buffer);
+ info.vma = ilo_resource_get_vma(cso->buffer);
info.offset = cso->buffer_offset;
- info.size = info.buf->bo_size;
+ info.size = info.vma->vm_size - cso->buffer_offset;
info.stride = cso->stride;
-
- vec->vb.vb[i].bo = info.buf->bo;
} else {
memset(&info, 0, sizeof(info));
}
cso->info.size = buf[i].buffer_size;
if (buf[i].buffer) {
- cso->info.buf = ilo_buffer(buf[i].buffer);
+ cso->info.vma = ilo_resource_get_vma(buf[i].buffer);
cso->info.offset = buf[i].buffer_offset;
memset(&cso->surface, 0, sizeof(cso->surface));
ilo_state_surface_init_for_buffer(&cso->surface, dev, &cso->info);
- cso->surface.bo = cso->info.buf->bo;
cso->user_buffer = NULL;
cbuf->enabled_mask |= 1 << (index + i);
} else if (buf[i].user_buffer) {
- cso->info.buf = NULL;
+ cso->info.vma = NULL;
/* buffer_offset does not apply for user buffer */
cso->user_buffer = buf[i].user_buffer;
cbuf->enabled_mask |= 1 << (index + i);
} else {
- cso->info.buf = NULL;
+ cso->info.vma = NULL;
cso->info.size = 0;
cso->user_buffer = NULL;
pipe_resource_reference(&cso->resource, NULL);
- cso->info.buf = NULL;
+ cso->info.vma = NULL;
cso->info.size = 0;
cso->user_buffer = NULL;
const struct ilo_surface_cso *cso =
(const struct ilo_surface_cso *) state->zsbuf;
- fb->has_hiz = cso->u.zs.hiz_bo;
+ fb->has_hiz = cso->u.zs.hiz_vma;
fb->depth_offset_format =
ilo_state_zs_get_depth_format(&cso->u.zs, dev);
} else {
target->base.buffer_size = buffer_size;
memset(&info, 0, sizeof(info));
- info.buf = ilo_buffer(res);
+ info.vma = ilo_resource_get_vma(res);
info.offset = buffer_offset;
info.size = buffer_size;
ilo_state_sol_buffer_init(&target->sb, dev, &info);
- target->sb.bo = info.buf->bo;
return &target->base;
}
struct ilo_state_surface_buffer_info info;
memset(&info, 0, sizeof(info));
- info.buf = ilo_buffer(res);
+ info.vma = ilo_resource_get_vma(res);
+ info.offset = templ->u.buf.first_element * info.struct_size;
+ info.size = (templ->u.buf.last_element -
+ templ->u.buf.first_element + 1) * info.struct_size;
info.access = ILO_STATE_SURFACE_ACCESS_SAMPLER;
info.format = ilo_format_translate_color(dev, templ->format);
info.format_size = util_format_get_blocksize(templ->format);
info.struct_size = info.format_size;
info.readonly = true;
- info.offset = templ->u.buf.first_element * info.struct_size;
- info.size = (templ->u.buf.last_element -
- templ->u.buf.first_element + 1) * info.struct_size;
ilo_state_surface_init_for_buffer(&view->surface, dev, &info);
- view->surface.bo = info.buf->bo;
} else {
struct ilo_texture *tex = ilo_texture(res);
struct ilo_state_surface_image_info info;
}
memset(&info, 0, sizeof(info));
+
info.img = &tex->image;
+ info.level_base = templ->u.tex.first_level;
+ info.level_count = templ->u.tex.last_level -
+ templ->u.tex.first_level + 1;
+ info.slice_base = templ->u.tex.first_layer;
+ info.slice_count = templ->u.tex.last_layer -
+ templ->u.tex.first_layer + 1;
+ info.vma = &tex->vma;
info.access = ILO_STATE_SURFACE_ACCESS_SAMPLER;
if (templ->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT &&
info.is_array = util_resource_is_array_texture(&tex->base);
info.readonly = true;
- info.level_base = templ->u.tex.first_level;
- info.level_count = templ->u.tex.last_level -
- templ->u.tex.first_level + 1;
- info.slice_base = templ->u.tex.first_layer;
- info.slice_count = templ->u.tex.last_layer -
- templ->u.tex.first_layer + 1;
-
ilo_state_surface_init_for_image(&view->surface, dev, &info);
- view->surface.bo = info.img->bo;
}
return &view->base;
assert(tex->base.target != PIPE_BUFFER);
memset(&info, 0, sizeof(info));
+
info.img = &tex->image;
- info.access = ILO_STATE_SURFACE_ACCESS_DP_RENDER;
- info.format = ilo_format_translate_render(dev, templ->format);
- info.is_array = util_resource_is_array_texture(&tex->base);
info.level_base = templ->u.tex.level;
info.level_count = 1;
info.slice_base = templ->u.tex.first_layer;
info.slice_count = templ->u.tex.last_layer -
templ->u.tex.first_layer + 1;
+ info.vma = &tex->vma;
+ if (ilo_image_can_enable_aux(&tex->image, templ->u.tex.level))
+ info.aux_vma = &tex->aux_vma;
+
+ info.access = ILO_STATE_SURFACE_ACCESS_DP_RENDER;
+ info.format = ilo_format_translate_render(dev, templ->format);
+ info.is_array = util_resource_is_array_texture(&tex->base);
+
ilo_state_surface_init_for_image(&surf->u.rt, dev, &info);
- surf->u.rt.bo = info.img->bo;
} else {
struct ilo_state_zs_info info;
memset(&info, 0, sizeof(info));
if (templ->format == PIPE_FORMAT_S8_UINT) {
+ info.s_vma = &tex->vma;
info.s_img = &tex->image;
} else {
+ info.z_vma = &tex->vma;
info.z_img = &tex->image;
- info.s_img = (tex->separate_s8) ? &tex->separate_s8->image : NULL;
- info.hiz_enable =
- ilo_image_can_enable_aux(&tex->image, templ->u.tex.level);
+ if (tex->separate_s8) {
+ info.s_vma = &tex->separate_s8->vma;
+ info.s_img = &tex->separate_s8->image;
+ }
+
+ if (ilo_image_can_enable_aux(&tex->image, templ->u.tex.level))
+ info.hiz_vma = &tex->aux_vma;
}
info.level = templ->u.tex.level;
templ->u.tex.first_layer + 1;
ilo_state_zs_init(&surf->u.zs, dev, &info);
-
- if (info.z_img) {
- surf->u.zs.depth_bo = info.z_img->bo;
- if (info.hiz_enable)
- surf->u.zs.hiz_bo = info.z_img->aux.bo;
- }
-
- if (info.s_img)
- surf->u.zs.stencil_bo = info.s_img->bo;
}
return &surf->base;
ilo_state_vector_resource_renamed(struct ilo_state_vector *vec,
struct pipe_resource *res)
{
- struct intel_bo *bo = ilo_resource_get_bo(res);
uint32_t states = 0;
unsigned sh, i;
for (i = 0; i < vec->so.count; i++) {
if (vec->so.states[i]->buffer == res) {
- struct ilo_stream_output_target *target =
- (struct ilo_stream_output_target *) vec->so.states[i];
-
- target->sb.bo = ilo_buffer(res)->bo;
states |= ILO_DIRTY_SO;
break;
}
[PIPE_SHADER_GEOMETRY] = ILO_DIRTY_VIEW_GS,
[PIPE_SHADER_COMPUTE] = ILO_DIRTY_VIEW_CS,
};
- cso->surface.bo = bo;
states |= view_dirty_bits[sh];
break;
struct ilo_cbuf_cso *cbuf = &vec->cbuf[sh].cso[i];
if (cbuf->resource == res) {
- cbuf->surface.bo = bo;
states |= ILO_DIRTY_CBUF;
break;
}
(struct ilo_surface_cso *) vec->resource.states[i];
if (cso->base.texture == res) {
- cso->u.rt.bo = bo;
states |= ILO_DIRTY_RESOURCE;
break;
}
struct ilo_surface_cso *cso =
(struct ilo_surface_cso *) vec->fb.state.cbufs[i];
if (cso && cso->base.texture == res) {
- cso->u.rt.bo = bo;
states |= ILO_DIRTY_FB;
break;
}
}
- if (vec->fb.state.zsbuf && vec->fb.state.zsbuf->texture == res) {
- struct ilo_surface_cso *cso =
- (struct ilo_surface_cso *) vec->fb.state.zsbuf;
-
- cso->u.zs.depth_bo = bo;
-
+ if (vec->fb.state.zsbuf && vec->fb.state.zsbuf->texture == res)
states |= ILO_DIRTY_FB;
- }
}
for (i = 0; i < vec->cs_resource.count; i++) {
struct ilo_surface_cso *cso =
(struct ilo_surface_cso *) vec->cs_resource.states[i];
if (cso->base.texture == res) {
- cso->u.rt.bo = bo;
states |= ILO_DIRTY_CS_RESOURCE;
break;
}
static void *
xfer_map(struct ilo_transfer *xfer)
{
+ const struct ilo_vma *vma;
void *ptr;
switch (xfer->method) {
case ILO_TRANSFER_MAP_CPU:
- ptr = intel_bo_map(ilo_resource_get_bo(xfer->base.resource),
- xfer->base.usage & PIPE_TRANSFER_WRITE);
+ vma = ilo_resource_get_vma(xfer->base.resource);
+ ptr = intel_bo_map(vma->bo, xfer->base.usage & PIPE_TRANSFER_WRITE);
break;
case ILO_TRANSFER_MAP_GTT:
- ptr = intel_bo_map_gtt(ilo_resource_get_bo(xfer->base.resource));
+ vma = ilo_resource_get_vma(xfer->base.resource);
+ ptr = intel_bo_map_gtt(vma->bo);
break;
case ILO_TRANSFER_MAP_GTT_ASYNC:
- ptr = intel_bo_map_gtt_async(ilo_resource_get_bo(xfer->base.resource));
+ vma = ilo_resource_get_vma(xfer->base.resource);
+ ptr = intel_bo_map_gtt_async(vma->bo);
break;
case ILO_TRANSFER_MAP_STAGING:
{
const struct ilo_screen *is = ilo_screen(xfer->staging.res->screen);
- struct intel_bo *bo = ilo_resource_get_bo(xfer->staging.res);
+
+ vma = ilo_resource_get_vma(xfer->staging.res);
/*
* We want a writable, optionally persistent and coherent, mapping
* this turns out to be fairly simple.
*/
if (is->dev.has_llc)
- ptr = intel_bo_map(bo, true);
+ ptr = intel_bo_map(vma->bo, true);
else
- ptr = intel_bo_map_gtt(bo);
+ ptr = intel_bo_map_gtt(vma->bo);
if (ptr && xfer->staging.res->target == PIPE_BUFFER)
ptr += (xfer->base.box.x % ILO_TRANSFER_MAP_BUFFER_ALIGNMENT);
-
}
break;
case ILO_TRANSFER_MAP_SW_CONVERT:
case ILO_TRANSFER_MAP_SW_ZS:
+ vma = NULL;
ptr = xfer->staging.sys;
break;
default:
assert(!"unknown mapping method");
+ vma = NULL;
ptr = NULL;
break;
}
+ if (ptr && vma)
+ ptr = (void *) ((char *) ptr + vma->bo_offset);
+
return ptr;
}
case ILO_TRANSFER_MAP_CPU:
case ILO_TRANSFER_MAP_GTT:
case ILO_TRANSFER_MAP_GTT_ASYNC:
- intel_bo_unmap(ilo_resource_get_bo(xfer->base.resource));
+ intel_bo_unmap(ilo_resource_get_vma(xfer->base.resource)->bo);
break;
case ILO_TRANSFER_MAP_STAGING:
- intel_bo_unmap(ilo_resource_get_bo(xfer->staging.res));
+ intel_bo_unmap(ilo_resource_get_vma(xfer->staging.res)->bo);
break;
default:
break;
if (prefer_cpu && (tex->image.tiling == GEN6_TILING_NONE ||
!linear_view))
- ptr = intel_bo_map(tex->image.bo, !for_read_back);
+ ptr = intel_bo_map(tex->vma.bo, !for_read_back);
else
- ptr = intel_bo_map_gtt(tex->image.bo);
+ ptr = intel_bo_map_gtt(tex->vma.bo);
+
+ if (ptr)
+ ptr = (void *) ((char *) ptr + tex->vma.bo_offset);
return ptr;
}
static void
tex_staging_sys_unmap_bo(struct ilo_texture *tex)
{
- intel_bo_unmap(tex->image.bo);
+ intel_bo_unmap(tex->vma.bo);
}
static bool
return false;
/* see if we can avoid blocking */
- if (is_bo_busy(ilo, ilo_resource_get_bo(res), &need_submit)) {
+ if (is_bo_busy(ilo, ilo_resource_get_vma(res)->bo, &need_submit)) {
bool resource_renamed;
if (!xfer_unblock(xfer, &resource_renamed)) {
buf_pwrite(struct ilo_context *ilo, struct pipe_resource *res,
unsigned usage, int offset, int size, const void *data)
{
- struct ilo_buffer *buf = ilo_buffer(res);
+ struct ilo_buffer_resource *buf = ilo_buffer_resource(res);
bool need_submit;
/* see if we can avoid blocking */
- if (is_bo_busy(ilo, buf->bo, &need_submit)) {
+ if (is_bo_busy(ilo, buf->vma.bo, &need_submit)) {
bool unblocked = false;
if ((usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) &&
templ.bind = PIPE_BIND_TRANSFER_WRITE;
staging = ilo->base.screen->resource_create(ilo->base.screen, &templ);
if (staging) {
+ const struct ilo_vma *staging_vma = ilo_resource_get_vma(staging);
struct pipe_box staging_box;
- intel_bo_pwrite(ilo_buffer(staging)->bo, 0, size, data);
+ /* offset by staging_vma->bo_offset for pwrite */
+ intel_bo_pwrite(staging_vma->bo, staging_vma->bo_offset,
+ size, data);
u_box_1d(0, size, &staging_box);
ilo_blitter_blt_copy_resource(ilo->blitter,
ilo_cp_submit(ilo->cp, "syncing for pwrites");
}
- intel_bo_pwrite(buf->bo, offset, size, data);
+ /* offset by buf->vma.bo_offset for pwrite */
+ intel_bo_pwrite(buf->vma.bo, buf->vma.bo_offset + offset, size, data);
}
static void