/* use PIPE_BIND_CUSTOM to indicate MCS */
#define ILO_BIND_MCS PIPE_BIND_CUSTOM
-static struct intel_bo *
-alloc_buf_bo(const struct ilo_texture *tex)
-{
- struct ilo_screen *is = ilo_screen(tex->base.screen);
- struct intel_bo *bo;
- const char *name;
- const unsigned size = tex->bo_width;
-
- switch (tex->base.bind) {
- case PIPE_BIND_VERTEX_BUFFER:
- name = "vertex buffer";
- break;
- case PIPE_BIND_INDEX_BUFFER:
- name = "index buffer";
- break;
- case PIPE_BIND_CONSTANT_BUFFER:
- name = "constant buffer";
- break;
- case PIPE_BIND_STREAM_OUTPUT:
- name = "stream output";
- break;
- default:
- name = "unknown buffer";
- break;
- }
-
- /* this is what a buffer supposed to be like */
- assert(tex->bo_width * tex->bo_height * tex->bo_cpp == size);
- assert(tex->tiling == INTEL_TILING_NONE);
- assert(tex->bo_stride == 0);
-
- if (tex->handle) {
- bo = is->winsys->import_handle(is->winsys, name,
- tex->bo_width, tex->bo_height, tex->bo_cpp, tex->handle);
-
- /* since the bo is shared to us, make sure it meets the expectations */
- if (bo) {
- assert(bo->get_size(tex->bo) == size);
- assert(bo->get_tiling(tex->bo) == tex->tiling);
- assert(bo->get_pitch(tex->bo) == tex->bo_stride);
- }
- }
- else {
- bo = is->winsys->alloc_buffer(is->winsys, name, size, 0);
- }
+struct tex_layout {
+ const struct ilo_dev_info *dev;
+ const struct pipe_resource *templ;
- return bo;
-}
+ enum pipe_format format;
+ unsigned block_width, block_height, block_size;
+ bool compressed;
+ bool has_depth, has_stencil;
-static struct intel_bo *
-alloc_tex_bo(const struct ilo_texture *tex)
-{
- struct ilo_screen *is = ilo_screen(tex->base.screen);
- struct intel_bo *bo;
- const char *name;
+ enum intel_tiling_mode tiling;
+ bool can_be_linear;
- switch (tex->base.target) {
- case PIPE_TEXTURE_1D:
- name = "1D texture";
- break;
- case PIPE_TEXTURE_2D:
- name = "2D texture";
- break;
- case PIPE_TEXTURE_3D:
- name = "3D texture";
- break;
- case PIPE_TEXTURE_CUBE:
- name = "cube texture";
- break;
- case PIPE_TEXTURE_RECT:
- name = "rectangle texture";
- break;
- case PIPE_TEXTURE_1D_ARRAY:
- name = "1D array texture";
- break;
- case PIPE_TEXTURE_2D_ARRAY:
- name = "2D array texture";
- break;
- case PIPE_TEXTURE_CUBE_ARRAY:
- name = "cube array texture";
- break;
- default:
- name ="unknown texture";
- break;
- }
+ bool array_spacing_full;
+ bool interleaved;
- if (tex->handle) {
- bo = is->winsys->import_handle(is->winsys, name,
- tex->bo_width, tex->bo_height, tex->bo_cpp, tex->handle);
- }
- else {
- const bool for_render =
- (tex->base.bind & (PIPE_BIND_DEPTH_STENCIL |
- PIPE_BIND_RENDER_TARGET));
- const unsigned long flags =
- (for_render) ? INTEL_ALLOC_FOR_RENDER : 0;
+ struct {
+ int w, h, d;
+ struct ilo_texture_slice *slices;
+ } levels[PIPE_MAX_TEXTURE_LEVELS];
- bo = is->winsys->alloc(is->winsys, name,
- tex->bo_width, tex->bo_height, tex->bo_cpp,
- tex->tiling, flags);
- }
+ int align_i, align_j;
+ int qpitch;
- return bo;
-}
+ int width, height;
+};
-bool
-ilo_texture_alloc_bo(struct ilo_texture *tex)
+static void
+tex_layout_init_qpitch(struct tex_layout *layout)
{
- struct intel_bo *old_bo = tex->bo;
+ const struct pipe_resource *templ = layout->templ;
+ int h0, h1;
- /* a shared bo cannot be reallocated */
- if (old_bo && tex->handle)
- return false;
+ if (templ->array_size <= 1)
+ return;
- if (tex->base.target == PIPE_BUFFER)
- tex->bo = alloc_buf_bo(tex);
- else
- tex->bo = alloc_tex_bo(tex);
+ h0 = align(layout->levels[0].h, layout->align_j);
- if (!tex->bo) {
- tex->bo = old_bo;
- return false;
+ if (!layout->array_spacing_full) {
+ layout->qpitch = h0;
+ return;
}
- /* winsys may decide to use a different tiling */
- tex->tiling = tex->bo->get_tiling(tex->bo);
- tex->bo_stride = tex->bo->get_pitch(tex->bo);
-
- if (old_bo)
- old_bo->unreference(old_bo);
-
- return true;
-}
-
-static bool
-alloc_slice_offsets(struct ilo_texture *tex)
-{
- int depth, lv;
-
- /* sum the depths of all levels */
- depth = 0;
- for (lv = 0; lv <= tex->base.last_level; lv++)
- depth += u_minify(tex->base.depth0, lv);
+ h1 = align(layout->levels[1].h, layout->align_j);
/*
- * There are (depth * tex->base.array_size) slices. Either depth is one
- * (non-3D) or tex->base.array_size is one (non-array), but it does not
- * matter.
+ * From the Sandy Bridge PRM, volume 1 part 1, page 115:
+ *
+ * "The following equation is used for surface formats other than
+ * compressed textures:
+ *
+ * QPitch = (h0 + h1 + 11j)"
+ *
+ * "The equation for compressed textures (BC* and FXT1 surface formats)
+ * follows:
+ *
+ * QPitch = (h0 + h1 + 11j) / 4"
+ *
+ * "[DevSNB] Errata: Sampler MSAA Qpitch will be 4 greater than the
+ * value calculated in the equation above, for every other odd Surface
+ * Height starting from 1 i.e. 1,5,9,13"
+ *
+ * From the Ivy Bridge PRM, volume 1 part 1, page 111-112:
+ *
+ * "If Surface Array Spacing is set to ARYSPC_FULL (note that the depth
+ * buffer and stencil buffer have an implied value of ARYSPC_FULL):
+ *
+ * QPitch = (h0 + h1 + 12j)
+ * QPitch = (h0 + h1 + 12j) / 4 (compressed)
+ *
+ * (There are many typos or missing words here...)"
+ *
+ * To access the N-th slice, an offset of (Stride * QPitch * N) is added to
+ * the base address. The PRM divides QPitch by 4 for compressed formats
+ * because the block height for those formats are 4, and it wants QPitch to
+ * mean the number of memory rows, as opposed to texel rows, between
+ * slices. Since we use texel rows in tex->slice_offsets, we do not need
+ * to divide QPitch by 4.
*/
- tex->slice_offsets[0] =
- CALLOC(depth * tex->base.array_size, sizeof(tex->slice_offsets[0][0]));
- if (!tex->slice_offsets[0])
- return false;
-
- /* point to the respective positions in the buffer */
- for (lv = 1; lv <= tex->base.last_level; lv++) {
- tex->slice_offsets[lv] = tex->slice_offsets[lv - 1] +
- u_minify(tex->base.depth0, lv - 1) * tex->base.array_size;
- }
-
- return true;
-}
-
-static void
-free_slice_offsets(struct ilo_texture *tex)
-{
- int lv;
+ layout->qpitch = h0 + h1 +
+ ((layout->dev->gen >= ILO_GEN(7)) ? 12 : 11) * layout->align_j;
- FREE(tex->slice_offsets[0]);
- for (lv = 0; lv <= tex->base.last_level; lv++)
- tex->slice_offsets[lv] = NULL;
+ if (layout->dev->gen == ILO_GEN(6) && templ->nr_samples > 1 &&
+ templ->height0 % 4 == 1)
+ layout->qpitch += 4;
}
-struct layout_tex_info {
- bool compressed;
- int block_width, block_height;
- int align_i, align_j;
- bool array_spacing_full;
- bool interleaved;
- int qpitch;
-
- struct {
- int w, h, d;
- } sizes[PIPE_MAX_TEXTURE_LEVELS];
-};
-
-/**
- * Prepare for texture layout.
- */
static void
-layout_tex_init(const struct ilo_texture *tex, struct layout_tex_info *info)
+tex_layout_init_alignments(struct tex_layout *layout)
{
- struct ilo_screen *is = ilo_screen(tex->base.screen);
- const enum pipe_format bo_format = tex->bo_format;
- const enum intel_tiling_mode tiling = tex->tiling;
- const struct pipe_resource *templ = &tex->base;
- int last_level, lv;
-
- memset(info, 0, sizeof(*info));
-
- info->compressed = util_format_is_compressed(bo_format);
- info->block_width = util_format_get_blockwidth(bo_format);
- info->block_height = util_format_get_blockheight(bo_format);
+ const struct pipe_resource *templ = layout->templ;
/*
* From the Sandy Bridge PRM, volume 1 part 1, page 113:
* others 4 or 8 2 or 4
*/
- if (info->compressed) {
+ if (layout->compressed) {
/* this happens to be the case */
- info->align_i = info->block_width;
- info->align_j = info->block_height;
+ layout->align_i = layout->block_width;
+ layout->align_j = layout->block_height;
}
- else if (util_format_is_depth_or_stencil(bo_format)) {
- if (is->dev.gen >= ILO_GEN(7)) {
- switch (bo_format) {
+ else if (layout->has_depth || layout->has_stencil) {
+ if (layout->dev->gen >= ILO_GEN(7)) {
+ switch (layout->format) {
case PIPE_FORMAT_Z16_UNORM:
- info->align_i = 8;
- info->align_j = 4;
+ layout->align_i = 8;
+ layout->align_j = 4;
break;
case PIPE_FORMAT_S8_UINT:
- info->align_i = 8;
- info->align_j = 8;
+ layout->align_i = 8;
+ layout->align_j = 8;
break;
default:
/*
* We will make use of them and setting align_i to 8 help us meet
* the requirement.
*/
- info->align_i = (templ->last_level > 0) ? 8 : 4;
- info->align_j = 4;
+ layout->align_i = (templ->last_level > 0) ? 8 : 4;
+ layout->align_j = 4;
break;
}
}
else {
- switch (bo_format) {
+ switch (layout->format) {
case PIPE_FORMAT_S8_UINT:
- info->align_i = 4;
- info->align_j = 2;
+ layout->align_i = 4;
+ layout->align_j = 2;
break;
default:
- info->align_i = 4;
- info->align_j = 4;
+ layout->align_i = 4;
+ layout->align_j = 4;
break;
}
}
}
else {
const bool valign_4 = (templ->nr_samples > 1) ||
- (is->dev.gen >= ILO_GEN(7) &&
- (templ->bind & PIPE_BIND_RENDER_TARGET) &&
- tiling == INTEL_TILING_Y);
+ (layout->dev->gen >= ILO_GEN(7) &&
+ layout->tiling == INTEL_TILING_Y &&
+ (templ->bind & PIPE_BIND_RENDER_TARGET));
if (valign_4)
- assert(util_format_get_blocksizebits(bo_format) != 96);
+ assert(layout->block_size != 12);
- info->align_i = 4;
- info->align_j = (valign_4) ? 4 : 2;
+ layout->align_i = 4;
+ layout->align_j = (valign_4) ? 4 : 2;
}
/*
* size, slices start at block boundaries, and many of the computations
* work.
*/
- assert(info->align_i % info->block_width == 0);
- assert(info->align_j % info->block_height == 0);
+ assert(layout->align_i % layout->block_width == 0);
+ assert(layout->align_j % layout->block_height == 0);
/* make sure align() works */
- assert(util_is_power_of_two(info->align_i) &&
- util_is_power_of_two(info->align_j));
- assert(util_is_power_of_two(info->block_width) &&
- util_is_power_of_two(info->block_height));
-
- if (is->dev.gen >= ILO_GEN(7)) {
- /*
- * It is not explicitly states, but render targets are expected to be
- * UMS/CMS (samples non-interleaved) and depth/stencil buffers are
- * expected to be IMS (samples interleaved).
- *
- * See "Multisampled Surface Storage Format" field of SURFACE_STATE.
- */
- if (util_format_is_depth_or_stencil(bo_format)) {
- info->interleaved = true;
-
- /*
- * From the Ivy Bridge PRM, volume 1 part 1, page 111:
- *
- * "note that the depth buffer and stencil buffer have an implied
- * value of ARYSPC_FULL"
- */
- info->array_spacing_full = true;
- }
- else {
- info->interleaved = false;
-
- /*
- * From the Ivy Bridge PRM, volume 4 part 1, page 66:
- *
- * "If Multisampled Surface Storage Format is MSFMT_MSS and
- * Number of Multisamples is not MULTISAMPLECOUNT_1, this field
- * (Surface Array Spacing) must be set to ARYSPC_LOD0."
- *
- * As multisampled resources are not mipmapped, we never use
- * ARYSPC_FULL for them.
- */
- if (templ->nr_samples > 1)
- assert(templ->last_level == 0);
- info->array_spacing_full = (templ->last_level > 0);
- }
- }
- else {
- /* GEN6 supports only interleaved samples */
- info->interleaved = true;
+ assert(util_is_power_of_two(layout->align_i) &&
+ util_is_power_of_two(layout->align_j));
+ assert(util_is_power_of_two(layout->block_width) &&
+ util_is_power_of_two(layout->block_height));
+}
- /*
- * From the Sandy Bridge PRM, volume 1 part 1, page 115:
- *
- * "The separate stencil buffer does not support mip mapping, thus
- * the storage for LODs other than LOD 0 is not needed. The
- * following QPitch equation applies only to the separate stencil
- * buffer:
- *
- * QPitch = h_0"
- *
- * GEN6 does not support compact spacing otherwise.
- */
- info->array_spacing_full = (bo_format != PIPE_FORMAT_S8_UINT);
- }
+static void
+tex_layout_init_levels(struct tex_layout *layout)
+{
+ const struct pipe_resource *templ = layout->templ;
+ int last_level, lv;
last_level = templ->last_level;
/* need at least 2 levels to compute full qpitch */
- if (last_level == 0 && templ->array_size > 1 && info->array_spacing_full)
+ if (last_level == 0 && templ->array_size > 1 && layout->array_spacing_full)
last_level++;
/* compute mip level sizes */
* above. Then, if necessary, they are padded out to compression
* block boundaries."
*/
- w = align(w, info->block_width);
- h = align(h, info->block_height);
+ w = align(w, layout->block_width);
+ h = align(h, layout->block_height);
/*
* From the Sandy Bridge PRM, volume 1 part 1, page 111:
* w = align(w, 2) * 2;
* y = align(y, 2) * 2;
*/
- if (info->interleaved) {
+ if (layout->interleaved) {
switch (templ->nr_samples) {
case 0:
case 1:
}
}
- info->sizes[lv].w = w;
- info->sizes[lv].h = h;
- info->sizes[lv].d = d;
+ layout->levels[lv].w = w;
+ layout->levels[lv].h = h;
+ layout->levels[lv].d = d;
}
+}
- if (templ->array_size > 1) {
- const int h0 = align(info->sizes[0].h, info->align_j);
+static void
+tex_layout_init_spacing(struct tex_layout *layout)
+{
+ const struct pipe_resource *templ = layout->templ;
- if (info->array_spacing_full) {
- const int h1 = align(info->sizes[1].h, info->align_j);
+ if (layout->dev->gen >= ILO_GEN(7)) {
+ /*
+ * It is not explicitly states, but render targets are expected to be
+ * UMS/CMS (samples non-interleaved) and depth/stencil buffers are
+ * expected to be IMS (samples interleaved).
+ *
+ * See "Multisampled Surface Storage Format" field of SURFACE_STATE.
+ */
+ if (layout->has_depth || layout->has_stencil) {
+ layout->interleaved = true;
/*
- * From the Sandy Bridge PRM, volume 1 part 1, page 115:
- *
- * "The following equation is used for surface formats other than
- * compressed textures:
- *
- * QPitch = (h0 + h1 + 11j)"
- *
- * "The equation for compressed textures (BC* and FXT1 surface
- * formats) follows:
- *
- * QPitch = (h0 + h1 + 11j) / 4"
- *
- * "[DevSNB] Errata: Sampler MSAA Qpitch will be 4 greater than
- * the value calculated in the equation above, for every other
- * odd Surface Height starting from 1 i.e. 1,5,9,13"
- *
- * From the Ivy Bridge PRM, volume 1 part 1, page 111-112:
- *
- * "If Surface Array Spacing is set to ARYSPC_FULL (note that the
- * depth buffer and stencil buffer have an implied value of
- * ARYSPC_FULL):
- *
- * QPitch = (h0 + h1 + 12j)
- * QPitch = (h0 + h1 + 12j) / 4 (compressed)
- *
- * (There are many typos or missing words here...)"
+ * From the Ivy Bridge PRM, volume 1 part 1, page 111:
*
- * To access the N-th slice, an offset of (Stride * QPitch * N) is
- * added to the base address. The PRM divides QPitch by 4 for
- * compressed formats because the block height for those formats are
- * 4, and it wants QPitch to mean the number of memory rows, as
- * opposed to texel rows, between slices. Since we use texel rows in
- * tex->slice_offsets, we do not need to divide QPitch by 4.
+ * "note that the depth buffer and stencil buffer have an implied
+ * value of ARYSPC_FULL"
*/
- info->qpitch = h0 + h1 +
- ((is->dev.gen >= ILO_GEN(7)) ? 12 : 11) * info->align_j;
-
- if (is->dev.gen == ILO_GEN(6) && templ->nr_samples > 1 &&
- templ->height0 % 4 == 1)
- info->qpitch += 4;
+ layout->array_spacing_full = true;
}
else {
- info->qpitch = h0;
+ layout->interleaved = false;
+
+ /*
+ * From the Ivy Bridge PRM, volume 4 part 1, page 66:
+ *
+ * "If Multisampled Surface Storage Format is MSFMT_MSS and
+ * Number of Multisamples is not MULTISAMPLECOUNT_1, this field
+ * (Surface Array Spacing) must be set to ARYSPC_LOD0."
+ *
+ * As multisampled resources are not mipmapped, we never use
+ * ARYSPC_FULL for them.
+ */
+ if (templ->nr_samples > 1)
+ assert(templ->last_level == 0);
+ layout->array_spacing_full = (templ->last_level > 0);
}
}
+ else {
+ /* GEN6 supports only interleaved samples */
+ layout->interleaved = true;
+
+ /*
+ * From the Sandy Bridge PRM, volume 1 part 1, page 115:
+ *
+ * "The separate stencil buffer does not support mip mapping, thus
+ * the storage for LODs other than LOD 0 is not needed. The
+ * following QPitch equation applies only to the separate stencil
+ * buffer:
+ *
+ * QPitch = h_0"
+ *
+ * GEN6 does not support compact spacing otherwise.
+ */
+ layout->array_spacing_full = (layout->format != PIPE_FORMAT_S8_UINT);
+ }
}
-/**
- * Layout a 2D texture.
- */
static void
-layout_tex_2d(struct ilo_texture *tex, const struct layout_tex_info *info)
+tex_layout_init_tiling(struct tex_layout *layout)
{
- const struct pipe_resource *templ = &tex->base;
- unsigned int level_x, level_y, num_slices;
- int lv;
+ const struct pipe_resource *templ = layout->templ;
+ const enum pipe_format format = layout->format;
+ const unsigned tile_none = 1 << INTEL_TILING_NONE;
+ const unsigned tile_x = 1 << INTEL_TILING_X;
+ const unsigned tile_y = 1 << INTEL_TILING_Y;
+ unsigned valid_tilings = tile_none | tile_x | tile_y;
- tex->bo_width = 0;
- tex->bo_height = 0;
-
- level_x = 0;
- level_y = 0;
- for (lv = 0; lv <= templ->last_level; lv++) {
- const unsigned int level_w = info->sizes[lv].w;
- const unsigned int level_h = info->sizes[lv].h;
+ /*
+ * From the Sandy Bridge PRM, volume 1 part 2, page 32:
+ *
+ * "Display/Overlay Y-Major not supported.
+ * X-Major required for Async Flips"
+ */
+ if (unlikely(templ->bind & PIPE_BIND_SCANOUT))
+ valid_tilings &= tile_x;
+
+ /*
+ * From the Sandy Bridge PRM, volume 3 part 2, page 158:
+ *
+ * "The cursor surface address must be 4K byte aligned. The cursor must
+ * be in linear memory, it cannot be tiled."
+ */
+ if (unlikely(templ->bind & PIPE_BIND_CURSOR))
+ valid_tilings &= tile_none;
+
+ /*
+ * From the Ivy Bridge PRM, volume 4 part 1, page 76:
+ *
+ * "The MCS surface must be stored as Tile Y."
+ */
+ if (templ->bind & ILO_BIND_MCS)
+ valid_tilings &= tile_y;
+
+ /*
+ * From the Sandy Bridge PRM, volume 2 part 1, page 318:
+ *
+ * "[DevSNB+]: This field (Tiled Surface) must be set to TRUE. Linear
+ * Depth Buffer is not supported."
+ *
+ * "The Depth Buffer, if tiled, must use Y-Major tiling."
+ *
+ * From the Sandy Bridge PRM, volume 1 part 2, page 22:
+ *
+ * "W-Major Tile Format is used for separate stencil."
+ *
+ * Since the HW does not support W-tiled fencing, we have to do it in the
+ * driver.
+ */
+ if (templ->bind & PIPE_BIND_DEPTH_STENCIL) {
+ switch (format) {
+ case PIPE_FORMAT_S8_UINT:
+ valid_tilings &= tile_none;
+ break;
+ default:
+ valid_tilings &= tile_y;
+ break;
+ }
+ }
+
+ if (templ->bind & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW)) {
+ if (templ->bind & PIPE_BIND_RENDER_TARGET) {
+ /*
+ * From the Sandy Bridge PRM, volume 1 part 2, page 32:
+ *
+ * "NOTE: 128BPE Format Color buffer ( render target ) MUST be
+ * either TileX or Linear."
+ */
+ if (layout->block_size == 16)
+ valid_tilings &= ~tile_y;
+
+ /*
+ * From the Ivy Bridge PRM, volume 4 part 1, page 63:
+ *
+ * "This field (Surface Vertical Aligment) must be set to
+ * VALIGN_4 for all tiled Y Render Target surfaces."
+ *
+ * "VALIGN_4 is not supported for surface format
+ * R32G32B32_FLOAT."
+ */
+ if (layout->dev->gen >= ILO_GEN(7) && layout->block_size == 12)
+ valid_tilings &= ~tile_y;
+ }
+
+ /*
+ * Also, heuristically set a minimum width/height for enabling tiling.
+ */
+ if (templ->width0 < 64 && (valid_tilings & ~tile_x))
+ valid_tilings &= ~tile_x;
+
+ if ((templ->width0 < 32 || templ->height0 < 16) &&
+ (templ->width0 < 16 || templ->height0 < 32) &&
+ (valid_tilings & ~tile_y))
+ valid_tilings &= ~tile_y;
+ }
+ else {
+ /* force linear if we are not sure where the texture is bound to */
+ if (valid_tilings & tile_none)
+ valid_tilings &= tile_none;
+ }
+
+ /* no conflicting binding flags */
+ assert(valid_tilings);
+
+ /* prefer tiled than linear */
+ if (valid_tilings & tile_y)
+ layout->tiling = INTEL_TILING_Y;
+ else if (valid_tilings & tile_x)
+ layout->tiling = INTEL_TILING_X;
+ else
+ layout->tiling = INTEL_TILING_NONE;
+
+ layout->can_be_linear = valid_tilings & tile_none;
+}
+
+static void
+tex_layout_init_format(struct tex_layout *layout)
+{
+ const struct pipe_resource *templ = layout->templ;
+ enum pipe_format format;
+ const struct util_format_description *desc;
+
+ switch (templ->format) {
+ case PIPE_FORMAT_ETC1_RGB8:
+ format = PIPE_FORMAT_R8G8B8X8_UNORM;
+ break;
+ default:
+ format = templ->format;
+ break;
+ }
+
+ layout->format = format;
+
+ layout->block_width = util_format_get_blockwidth(format);
+ layout->block_height = util_format_get_blockheight(format);
+ layout->block_size = util_format_get_blocksize(format);
+ layout->compressed = util_format_is_compressed(format);
+
+ desc = util_format_description(format);
+ layout->has_depth = util_format_has_depth(desc);
+ layout->has_stencil = util_format_has_stencil(desc);
+}
+
+static void
+tex_layout_init(struct tex_layout *layout,
+ struct pipe_screen *screen,
+ const struct pipe_resource *templ,
+ struct ilo_texture_slice **slices)
+{
+ struct ilo_screen *is = ilo_screen(screen);
+
+ memset(layout, 0, sizeof(*layout));
+
+ layout->dev = &is->dev;
+ layout->templ = templ;
+
+ /* note that there are dependencies between these functions */
+ tex_layout_init_format(layout);
+ tex_layout_init_tiling(layout);
+ tex_layout_init_spacing(layout);
+ tex_layout_init_levels(layout);
+ tex_layout_init_alignments(layout);
+ tex_layout_init_qpitch(layout);
+
+ if (slices) {
+ int lv;
+
+ for (lv = 0; lv <= templ->last_level; lv++)
+ layout->levels[lv].slices = slices[lv];
+ }
+}
+
+static bool
+tex_layout_force_linear(struct tex_layout *layout)
+{
+ if (!layout->can_be_linear)
+ return false;
+
+ /*
+ * we may be able to switch from VALIGN_4 to VALIGN_2 when the layout was
+ * Y-tiled, but let's keep it simple
+ */
+ layout->tiling = INTEL_TILING_NONE;
+
+ return true;
+}
+
+/**
+ * Layout a 2D texture.
+ */
+static void
+tex_layout_2d(struct tex_layout *layout)
+{
+ const struct pipe_resource *templ = layout->templ;
+ unsigned int level_x, level_y, num_slices;
+ int lv;
+
+ level_x = 0;
+ level_y = 0;
+ for (lv = 0; lv <= templ->last_level; lv++) {
+ const unsigned int level_w = layout->levels[lv].w;
+ const unsigned int level_h = layout->levels[lv].h;
int slice;
- for (slice = 0; slice < templ->array_size; slice++) {
- tex->slice_offsets[lv][slice].x = level_x;
- /* slices are qpitch apart in Y-direction */
- tex->slice_offsets[lv][slice].y = level_y + info->qpitch * slice;
+ /* set slice offsets */
+ if (layout->levels[lv].slices) {
+ for (slice = 0; slice < templ->array_size; slice++) {
+ layout->levels[lv].slices[slice].x = level_x;
+ /* slices are qpitch apart in Y-direction */
+ layout->levels[lv].slices[slice].y =
+ level_y + layout->qpitch * slice;
+ }
}
/* extend the size of the monolithic bo to cover this mip level */
- if (tex->bo_width < level_x + level_w)
- tex->bo_width = level_x + level_w;
- if (tex->bo_height < level_y + level_h)
- tex->bo_height = level_y + level_h;
+ if (layout->width < level_x + level_w)
+ layout->width = level_x + level_w;
+ if (layout->height < level_y + level_h)
+ layout->height = level_y + level_h;
/* MIPLAYOUT_BELOW */
if (lv == 1)
- level_x += align(level_w, info->align_i);
+ level_x += align(level_w, layout->align_i);
else
- level_y += align(level_h, info->align_j);
+ level_y += align(level_h, layout->align_j);
}
num_slices = templ->array_size;
/* samples of the same index are stored in a slice */
- if (templ->nr_samples > 1 && !info->interleaved)
+ if (templ->nr_samples > 1 && !layout->interleaved)
num_slices *= templ->nr_samples;
/* we did not take slices into consideration in the computation above */
- tex->bo_height += info->qpitch * (num_slices - 1);
+ layout->height += layout->qpitch * (num_slices - 1);
}
/**
* Layout a 3D texture.
*/
static void
-layout_tex_3d(struct ilo_texture *tex, const struct layout_tex_info *info)
+tex_layout_3d(struct tex_layout *layout)
{
- const struct pipe_resource *templ = &tex->base;
+ const struct pipe_resource *templ = layout->templ;
unsigned int level_y;
int lv;
- tex->bo_width = 0;
- tex->bo_height = 0;
-
level_y = 0;
for (lv = 0; lv <= templ->last_level; lv++) {
- const unsigned int level_w = info->sizes[lv].w;
- const unsigned int level_h = info->sizes[lv].h;
- const unsigned int level_d = info->sizes[lv].d;
- const unsigned int slice_pitch = align(level_w, info->align_i);
- const unsigned int slice_qpitch = align(level_h, info->align_j);
+ const unsigned int level_w = layout->levels[lv].w;
+ const unsigned int level_h = layout->levels[lv].h;
+ const unsigned int level_d = layout->levels[lv].d;
+ const unsigned int slice_pitch = align(level_w, layout->align_i);
+ const unsigned int slice_qpitch = align(level_h, layout->align_j);
const unsigned int num_slices_per_row = 1 << lv;
int slice;
for (slice = 0; slice < level_d; slice += num_slices_per_row) {
int i;
- for (i = 0; i < num_slices_per_row && slice + i < level_d; i++) {
- tex->slice_offsets[lv][slice + i].x = slice_pitch * i;
- tex->slice_offsets[lv][slice + i].y = level_y;
+ /* set slice offsets */
+ if (layout->levels[lv].slices) {
+ for (i = 0; i < num_slices_per_row && slice + i < level_d; i++) {
+ layout->levels[lv].slices[slice + i].x = slice_pitch * i;
+ layout->levels[lv].slices[slice + i].y = level_y;
+ }
}
/* move on to the next slice row */
slice = MIN2(num_slices_per_row, level_d) - 1;
/* extend the size of the monolithic bo to cover this slice */
- if (tex->bo_width < slice_pitch * slice + level_w)
- tex->bo_width = slice_pitch * slice + level_w;
+ if (layout->width < slice_pitch * slice + level_w)
+ layout->width = slice_pitch * slice + level_w;
if (lv == templ->last_level)
- tex->bo_height = (level_y - slice_qpitch) + level_h;
+ layout->height = (level_y - slice_qpitch) + level_h;
}
}
-/**
- * Guess the texture size. For large textures, the errors are relative small.
- */
-static size_t
-guess_tex_size(const struct pipe_resource *templ,
- enum intel_tiling_mode tiling)
+static void
+tex_layout_validate(struct tex_layout *layout)
{
- int bo_width, bo_height, bo_stride;
-
- /* HALIGN_8 and VALIGN_4 */
- bo_width = align(templ->width0, 8);
- bo_height = align(templ->height0, 4);
-
- if (templ->target == PIPE_TEXTURE_3D) {
- const int num_rows = util_next_power_of_two(templ->depth0);
- int lv, sum;
+ /*
+ * From the Sandy Bridge PRM, volume 1 part 2, page 22:
+ *
+ * "A 4KB tile is subdivided into 8-high by 8-wide array of Blocks for
+ * W-Major Tiles (W Tiles). Each Block is 8 rows by 8 bytes."
+ *
+ * Since we ask for INTEL_TILING_NONE instead of the non-existent
+ * INTEL_TILING_W, we need to manually align the width and height to the
+ * tile boundaries.
+ */
+ if (layout->templ->format == PIPE_FORMAT_S8_UINT) {
+ layout->width = align(layout->width, 64);
+ layout->height = align(layout->height, 64);
+ }
- sum = bo_height * templ->depth0;
- for (lv = 1; lv <= templ->last_level; lv++)
- sum += u_minify(bo_height, lv) * u_minify(num_rows, lv);
+ assert(layout->width % layout->block_width == 0);
+ assert(layout->height % layout->block_height == 0);
+ assert(layout->qpitch % layout->block_height == 0);
+}
- bo_height = sum;
- }
- else if (templ->last_level > 0) {
- /* MIPLAYOUT_BELOW, ignore qpich */
- bo_height = (bo_height + u_minify(bo_height, 1)) * templ->array_size;
- }
+static size_t
+tex_layout_estimate_size(const struct tex_layout *layout)
+{
+ unsigned stride, height;
- bo_stride = util_format_get_stride(templ->format, bo_width);
+ stride = (layout->width / layout->block_width) * layout->block_size;
+ height = layout->height / layout->block_height;
- switch (tiling) {
+ switch (layout->tiling) {
case INTEL_TILING_X:
- bo_stride = align(bo_stride, 512);
- bo_height = align(bo_height, 8);
+ stride = align(stride, 512);
+ height = align(height, 8);
break;
case INTEL_TILING_Y:
- bo_stride = align(bo_stride, 128);
- bo_height = align(bo_height, 32);
+ stride = align(stride, 128);
+ height = align(height, 32);
break;
default:
- bo_height = align(bo_height, 2);
+ height = align(height, 2);
break;
}
- return util_format_get_2d_size(templ->format, bo_stride, bo_height);
+ return stride * height;
}
-static enum intel_tiling_mode
-get_tex_tiling(const struct ilo_texture *tex)
+static void
+tex_layout_apply(const struct tex_layout *layout, struct ilo_texture *tex)
{
- const struct pipe_resource *templ = &tex->base;
- const enum pipe_format bo_format = tex->bo_format;
+ tex->bo_format = layout->format;
- /*
- * From the Sandy Bridge PRM, volume 1 part 2, page 32:
- *
- * "Display/Overlay Y-Major not supported.
- * X-Major required for Async Flips"
- */
- if (unlikely(templ->bind & PIPE_BIND_SCANOUT))
- return INTEL_TILING_X;
+ /* in blocks */
+ tex->bo_width = layout->width / layout->block_width;
+ tex->bo_height = layout->height / layout->block_height;
+ tex->bo_cpp = layout->block_size;
+ tex->tiling = layout->tiling;
+
+ tex->compressed = layout->compressed;
+ tex->block_width = layout->block_width;
+ tex->block_height = layout->block_height;
+
+ tex->halign_8 = (layout->align_i == 8);
+ tex->valign_4 = (layout->align_j == 4);
+ tex->array_spacing_full = layout->array_spacing_full;
+ tex->interleaved = layout->interleaved;
+}
- /*
- * From the Sandy Bridge PRM, volume 3 part 2, page 158:
- *
- * "The cursor surface address must be 4K byte aligned. The cursor must
- * be in linear memory, it cannot be tiled."
- */
- if (unlikely(templ->bind & PIPE_BIND_CURSOR))
- return INTEL_TILING_NONE;
+static void
+tex_free_slices(struct ilo_texture *tex)
+{
+ FREE(tex->slice_offsets[0]);
+}
- /*
- * From the Ivy Bridge PRM, volume 4 part 1, page 76:
- *
- * "The MCS surface must be stored as Tile Y."
- */
- if (templ->bind & ILO_BIND_MCS)
- return INTEL_TILING_Y;
+static bool
+tex_alloc_slices(struct ilo_texture *tex)
+{
+ const struct pipe_resource *templ = &tex->base;
+ struct ilo_texture_slice *slices;
+ int depth, lv;
+
+ /* sum the depths of all levels */
+ depth = 0;
+ for (lv = 0; lv <= templ->last_level; lv++)
+ depth += u_minify(templ->depth0, lv);
/*
- * From the Sandy Bridge PRM, volume 2 part 1, page 318:
- *
- * "[DevSNB+]: This field (Tiled Surface) must be set to TRUE. Linear
- * Depth Buffer is not supported."
- *
- * "The Depth Buffer, if tiled, must use Y-Major tiling."
+ * There are (depth * tex->base.array_size) slices in total. Either depth
+ * is one (non-3D) or templ->array_size is one (non-array), but it does
+ * not matter.
*/
- if (templ->bind & PIPE_BIND_DEPTH_STENCIL) {
- /* separate stencil uses W-tiling but we do not know how to specify that */
- return (bo_format == PIPE_FORMAT_S8_UINT) ?
- INTEL_TILING_NONE : INTEL_TILING_Y;
- }
-
- if (templ->bind & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_SAMPLER_VIEW)) {
- enum intel_tiling_mode tiling = INTEL_TILING_NONE;
-
- /*
- * From the Sandy Bridge PRM, volume 1 part 2, page 32:
- *
- * "NOTE: 128BPE Format Color buffer ( render target ) MUST be
- * either TileX or Linear."
- *
- * Also, heuristically set a minimum width/height for enabling tiling.
- */
- if (util_format_get_blocksizebits(bo_format) == 128 &&
- (templ->bind & PIPE_BIND_RENDER_TARGET) && templ->width0 >= 64)
- tiling = INTEL_TILING_X;
- else if ((templ->width0 >= 32 && templ->height0 >= 16) ||
- (templ->width0 >= 16 && templ->height0 >= 32))
- tiling = INTEL_TILING_Y;
-
- /* make sure the bo can be mapped through GTT if tiled */
- if (tiling != INTEL_TILING_NONE) {
- /*
- * Usually only the first 256MB of the GTT is mappable.
- *
- * See also how intel_context::max_gtt_map_object_size is calculated.
- */
- const size_t mappable_gtt_size = 256 * 1024 * 1024;
- const size_t size = guess_tex_size(templ, tiling);
+ slices = CALLOC(depth * templ->array_size, sizeof(*slices));
+ if (!slices)
+ return false;
- /* be conservative */
- if (size > mappable_gtt_size / 4)
- tiling = INTEL_TILING_NONE;
- }
+ tex->slice_offsets[0] = slices;
- return tiling;
+ /* point to the respective positions in the buffer */
+ for (lv = 1; lv <= templ->last_level; lv++) {
+ tex->slice_offsets[lv] = tex->slice_offsets[lv - 1] +
+ u_minify(templ->depth0, lv - 1) * templ->array_size;
}
- return INTEL_TILING_NONE;
+ return true;
}
-static void
-init_texture(struct ilo_texture *tex)
+static struct intel_bo *
+tex_create_bo(const struct ilo_texture *tex,
+ const struct winsys_handle *handle)
{
- struct layout_tex_info info;
-
- switch (tex->base.format) {
- case PIPE_FORMAT_ETC1_RGB8:
- tex->bo_format = PIPE_FORMAT_R8G8B8X8_UNORM;
- break;
- default:
- tex->bo_format = tex->base.format;
- break;
- }
-
- /* determine tiling first as it may affect the layout */
- tex->tiling = get_tex_tiling(tex);
-
- layout_tex_init(tex, &info);
-
- tex->compressed = info.compressed;
- tex->block_width = info.block_width;
- tex->block_height = info.block_height;
-
- tex->halign_8 = (info.align_i == 8);
- tex->valign_4 = (info.align_j == 4);
- tex->array_spacing_full = info.array_spacing_full;
- tex->interleaved = info.interleaved;
+ struct ilo_screen *is = ilo_screen(tex->base.screen);
+ const char *name;
+ struct intel_bo *bo;
switch (tex->base.target) {
case PIPE_TEXTURE_1D:
+ name = "1D texture";
+ break;
case PIPE_TEXTURE_2D:
+ name = "2D texture";
+ break;
+ case PIPE_TEXTURE_3D:
+ name = "3D texture";
+ break;
case PIPE_TEXTURE_CUBE:
+ name = "cube texture";
+ break;
case PIPE_TEXTURE_RECT:
+ name = "rectangle texture";
+ break;
case PIPE_TEXTURE_1D_ARRAY:
+ name = "1D array texture";
+ break;
case PIPE_TEXTURE_2D_ARRAY:
- case PIPE_TEXTURE_CUBE_ARRAY:
- layout_tex_2d(tex, &info);
+ name = "2D array texture";
break;
- case PIPE_TEXTURE_3D:
- layout_tex_3d(tex, &info);
+ case PIPE_TEXTURE_CUBE_ARRAY:
+ name = "cube array texture";
break;
default:
- assert(!"unknown resource target");
+ name ="unknown texture";
break;
}
- /*
- * From the Sandy Bridge PRM, volume 1 part 2, page 22:
- *
- * "A 4KB tile is subdivided into 8-high by 8-wide array of Blocks for
- * W-Major Tiles (W Tiles). Each Block is 8 rows by 8 bytes."
- *
- * Since we ask for INTEL_TILING_NONE instead lf INTEL_TILING_W, we need to
- * manually align the bo width and height to the tile boundaries.
- */
- if (tex->bo_format == PIPE_FORMAT_S8_UINT) {
- tex->bo_width = align(tex->bo_width, 64);
- tex->bo_height = align(tex->bo_height, 64);
+ if (handle) {
+ bo = is->winsys->import_handle(is->winsys, name,
+ tex->bo_width, tex->bo_height, tex->bo_cpp, handle);
+ }
+ else {
+ bo = is->winsys->alloc(is->winsys, name,
+ tex->bo_width, tex->bo_height, tex->bo_cpp,
+ tex->tiling, tex->bo_flags);
}
- /* in blocks */
- assert(tex->bo_width % info.block_width == 0);
- assert(tex->bo_height % info.block_height == 0);
- tex->bo_width /= info.block_width;
- tex->bo_height /= info.block_height;
- tex->bo_cpp = util_format_get_blocksize(tex->bo_format);
+ return bo;
+}
+
+static void
+tex_set_bo(struct ilo_texture *tex, struct intel_bo *bo)
+{
+ if (tex->bo)
+ tex->bo->unreference(tex->bo);
+
+ tex->bo = bo;
+
+ /* winsys may decide to use a different tiling */
+ tex->tiling = tex->bo->get_tiling(tex->bo);
+ tex->bo_stride = tex->bo->get_pitch(tex->bo);
}
static void
-init_buffer(struct ilo_texture *tex)
+tex_destroy(struct ilo_texture *tex)
{
- tex->bo_format = tex->base.format;
- tex->bo_width = tex->base.width0;
- tex->bo_height = 1;
- tex->bo_cpp = 1;
- tex->bo_stride = 0;
- tex->tiling = INTEL_TILING_NONE;
-
- tex->compressed = false;
- tex->block_width = 1;
- tex->block_height = 1;
-
- tex->halign_8 = false;
- tex->valign_4 = false;
- tex->array_spacing_full = false;
- tex->interleaved = false;
+ tex->bo->unreference(tex->bo);
+ tex_free_slices(tex);
+ FREE(tex);
}
static struct pipe_resource *
-create_resource(struct pipe_screen *screen,
- const struct pipe_resource *templ,
- struct winsys_handle *handle)
+tex_create(struct pipe_screen *screen,
+ const struct pipe_resource *templ,
+ const struct winsys_handle *handle)
{
+ struct tex_layout layout;
struct ilo_texture *tex;
+ struct intel_bo *bo;
tex = CALLOC_STRUCT(ilo_texture);
if (!tex)
tex->base = *templ;
tex->base.screen = screen;
pipe_reference_init(&tex->base.reference, 1);
- tex->handle = handle;
- if (!alloc_slice_offsets(tex)) {
+ if (!tex_alloc_slices(tex)) {
FREE(tex);
return NULL;
}
- if (templ->target == PIPE_BUFFER)
- init_buffer(tex);
- else
- init_texture(tex);
+ tex->imported = (handle != NULL);
+
+ if (tex->base.bind & (PIPE_BIND_DEPTH_STENCIL |
+ PIPE_BIND_RENDER_TARGET))
+ tex->bo_flags |= INTEL_ALLOC_FOR_RENDER;
- if (!ilo_texture_alloc_bo(tex)) {
- free_slice_offsets(tex);
+ tex_layout_init(&layout, screen, templ, tex->slice_offsets);
+
+ switch (templ->target) {
+ case PIPE_TEXTURE_1D:
+ case PIPE_TEXTURE_2D:
+ case PIPE_TEXTURE_CUBE:
+ case PIPE_TEXTURE_RECT:
+ case PIPE_TEXTURE_1D_ARRAY:
+ case PIPE_TEXTURE_2D_ARRAY:
+ case PIPE_TEXTURE_CUBE_ARRAY:
+ tex_layout_2d(&layout);
+ break;
+ case PIPE_TEXTURE_3D:
+ tex_layout_3d(&layout);
+ break;
+ default:
+ assert(!"unknown resource target");
+ break;
+ }
+
+ tex_layout_validate(&layout);
+
+ /* make sure the bo can be mapped through GTT if tiled */
+ if (layout.tiling != INTEL_TILING_NONE) {
+ /*
+ * Usually only the first 256MB of the GTT is mappable.
+ *
+ * See also how intel_context::max_gtt_map_object_size is calculated.
+ */
+ const size_t mappable_gtt_size = 256 * 1024 * 1024;
+ const size_t size = tex_layout_estimate_size(&layout);
+
+ /* be conservative */
+ if (size > mappable_gtt_size / 4)
+ tex_layout_force_linear(&layout);
+ }
+
+ tex_layout_apply(&layout, tex);
+
+ bo = tex_create_bo(tex, handle);
+ if (!bo) {
+ tex_free_slices(tex);
FREE(tex);
return NULL;
}
+ tex_set_bo(tex, bo);
+
return &tex->base;
}
+static bool
+tex_get_handle(struct ilo_texture *tex, struct winsys_handle *handle)
+{
+ int err;
+
+ err = tex->bo->export_handle(tex->bo, handle);
+
+ return !err;
+}
+
+/**
+ * Estimate the texture size. For large textures, the errors should be pretty
+ * small.
+ */
+static size_t
+tex_estimate_size(struct pipe_screen *screen,
+ const struct pipe_resource *templ)
+{
+ struct tex_layout layout;
+
+ tex_layout_init(&layout, screen, templ, NULL);
+
+ switch (templ->target) {
+ case PIPE_TEXTURE_3D:
+ tex_layout_3d(&layout);
+ break;
+ default:
+ tex_layout_2d(&layout);
+ break;
+ }
+
+ tex_layout_validate(&layout);
+
+ return tex_layout_estimate_size(&layout);
+}
+
+static struct intel_bo *
+buf_create_bo(const struct ilo_buffer *buf)
+{
+ struct ilo_screen *is = ilo_screen(buf->base.screen);
+ const char *name;
+
+ switch (buf->base.bind) {
+ case PIPE_BIND_VERTEX_BUFFER:
+ name = "vertex buffer";
+ break;
+ case PIPE_BIND_INDEX_BUFFER:
+ name = "index buffer";
+ break;
+ case PIPE_BIND_CONSTANT_BUFFER:
+ name = "constant buffer";
+ break;
+ case PIPE_BIND_STREAM_OUTPUT:
+ name = "stream output";
+ break;
+ default:
+ name = "unknown buffer";
+ break;
+ }
+
+ return is->winsys->alloc_buffer(is->winsys,
+ name, buf->bo_size, buf->bo_flags);
+}
+
+static void
+buf_set_bo(struct ilo_buffer *buf, struct intel_bo *bo)
+{
+ if (buf->bo)
+ buf->bo->unreference(buf->bo);
+
+ buf->bo = bo;
+}
+
+static void
+buf_destroy(struct ilo_buffer *buf)
+{
+ buf->bo->unreference(buf->bo);
+ FREE(buf);
+}
+
+static struct pipe_resource *
+buf_create(struct pipe_screen *screen, const struct pipe_resource *templ)
+{
+ struct ilo_buffer *buf;
+ struct intel_bo *bo;
+
+ buf = CALLOC_STRUCT(ilo_buffer);
+ if (!buf)
+ return NULL;
+
+ buf->base = *templ;
+ buf->base.screen = screen;
+ pipe_reference_init(&buf->base.reference, 1);
+
+ buf->bo_size = templ->width0;
+ buf->bo_flags = 0;
+
+ bo = buf_create_bo(buf);
+ if (!bo) {
+ FREE(buf);
+ return NULL;
+ }
+
+ buf_set_bo(buf, bo);
+
+ return &buf->base;
+}
+
static boolean
ilo_can_create_resource(struct pipe_screen *screen,
const struct pipe_resource *templ)
* So just set a limit on the texture size.
*/
const size_t max_size = 1 * 1024 * 1024 * 1024;
- const size_t size = guess_tex_size(templ, INTEL_TILING_Y);
+ size_t size;
+
+ if (templ->target == PIPE_BUFFER)
+ size = templ->width0;
+ else
+ size = tex_estimate_size(screen, templ);
return (size <= max_size);
}
ilo_resource_create(struct pipe_screen *screen,
const struct pipe_resource *templ)
{
- return create_resource(screen, templ, NULL);
+ if (templ->target == PIPE_BUFFER)
+ return buf_create(screen, templ);
+ else
+ return tex_create(screen, templ, NULL);
}
static struct pipe_resource *
const struct pipe_resource *templ,
struct winsys_handle *handle)
{
- return create_resource(screen, templ, handle);
+ if (templ->target == PIPE_BUFFER)
+ return NULL;
+ else
+ return tex_create(screen, templ, handle);
}
static boolean
struct pipe_resource *res,
struct winsys_handle *handle)
{
- struct ilo_texture *tex = ilo_texture(res);
- int err;
-
- err = tex->bo->export_handle(tex->bo, handle);
+ if (res->target == PIPE_BUFFER)
+ return false;
+ else
+ return tex_get_handle(ilo_texture(res), handle);
- return !err;
}
static void
ilo_resource_destroy(struct pipe_screen *screen,
struct pipe_resource *res)
{
- struct ilo_texture *tex = ilo_texture(res);
-
- free_slice_offsets(tex);
- tex->bo->unreference(tex->bo);
- FREE(tex);
+ if (res->target == PIPE_BUFFER)
+ buf_destroy(ilo_buffer(res));
+ else
+ tex_destroy(ilo_texture(res));
}
/**
is->base.resource_destroy = ilo_resource_destroy;
}
+bool
+ilo_buffer_alloc_bo(struct ilo_buffer *buf)
+{
+ struct intel_bo *bo;
+
+ bo = buf_create_bo(buf);
+ if (!bo)
+ return false;
+
+ buf_set_bo(buf, bo);
+
+ return true;
+}
+
+bool
+ilo_texture_alloc_bo(struct ilo_texture *tex)
+{
+ struct intel_bo *bo;
+
+ /* a shared bo cannot be reallocated */
+ if (tex->imported)
+ return false;
+
+ bo = tex_create_bo(tex, NULL);
+ if (!bo)
+ return false;
+
+ tex_set_bo(tex, bo);
+
+ return true;
+}
+
/**
* Return the offset (in bytes) to a slice within the bo.
*
return (struct ilo_transfer *) transfer;
}
-static void
-ilo_transfer_inline_write(struct pipe_context *pipe,
- struct pipe_resource *res,
- unsigned level,
- unsigned usage,
- const struct pipe_box *box,
- const void *data,
- unsigned stride,
- unsigned layer_stride)
+/**
+ * Choose the best mapping method, depending on the transfer usage and whether
+ * the bo is busy.
+ */
+static bool
+transfer_choose_method(struct ilo_context *ilo, struct ilo_transfer *xfer)
{
- struct ilo_context *ilo = ilo_context(pipe);
- struct ilo_texture *tex = ilo_texture(res);
- int offset, size;
- bool will_be_busy;
+ struct pipe_resource *res = xfer->base.resource;
+ struct ilo_texture *tex;
+ struct ilo_buffer *buf;
+ struct intel_bo *bo;
+ bool will_be_busy, will_stall;
- /*
- * Fall back to map(), memcpy(), and unmap(). We use this path for
- * unsynchronized write, as the buffer is likely to be busy and pwrite()
- * will stall.
- */
- if (unlikely(tex->base.target != PIPE_BUFFER) ||
- (usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
- u_default_transfer_inline_write(pipe, res,
- level, usage, box, data, stride, layer_stride);
+ if (res->target == PIPE_BUFFER) {
+ tex = NULL;
- return;
+ buf = ilo_buffer(res);
+ bo = buf->bo;
}
+ else {
+ buf = NULL;
- /*
- * XXX With hardware context support, the bo may be needed by GPU without
- * being referenced by ilo->cp->bo. We have to flush unconditionally, and
- * that is bad.
- */
- if (ilo->cp->hw_ctx)
- ilo_cp_flush(ilo->cp);
-
- will_be_busy = ilo->cp->bo->references(ilo->cp->bo, tex->bo);
+ tex = ilo_texture(res);
+ bo = tex->bo;
- /* see if we can avoid stalling */
- if (will_be_busy || intel_bo_is_busy(tex->bo)) {
- bool will_stall = true;
+ /* need to convert on-the-fly */
+ if (tex->bo_format != tex->base.format &&
+ !(xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
+ xfer->method = ILO_TRANSFER_MAP_STAGING_SYS;
- if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
- /* old data not needed so discard the old bo to avoid stalling */
- if (ilo_texture_alloc_bo(tex))
- will_stall = false;
- }
- else {
- /*
- * We could allocate a temporary bo to hold the data and emit
- * pipelined copy blit to move them to tex->bo. But for now, do
- * nothing.
- */
+ return true;
}
+ }
- /* flush to make bo busy (so that pwrite() stalls as it should be) */
- if (will_stall && will_be_busy)
+ xfer->method = ILO_TRANSFER_MAP_DIRECT;
+
+ /* unsynchronized map does not stall */
+ if (xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
+ return true;
+
+ will_be_busy = ilo->cp->bo->references(ilo->cp->bo, bo);
+ if (!will_be_busy) {
+ /*
+ * XXX With hardware context support, the bo may be needed by GPU
+ * without being referenced by ilo->cp->bo. We have to flush
+ * unconditionally, and that is bad.
+ */
+ if (ilo->cp->hw_ctx)
ilo_cp_flush(ilo->cp);
+
+ if (!intel_bo_is_busy(bo))
+ return true;
}
- /* for PIPE_BUFFERs, conversion should not be needed */
- assert(tex->bo_format == tex->base.format);
+ /* bo is busy and mapping it will stall */
+ will_stall = true;
- /* they should specify just an offset and a size */
- assert(level == 0);
- assert(box->y == 0);
- assert(box->z == 0);
- assert(box->height == 1);
- assert(box->depth == 1);
- offset = box->x;
- size = box->width;
+ if (xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY) {
+ /* nothing we can do */
+ }
+ else if (xfer->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ /* discard old bo and allocate a new one for mapping */
+ if ((tex && ilo_texture_alloc_bo(tex)) ||
+ (buf && ilo_buffer_alloc_bo(buf)))
+ will_stall = false;
+ }
+ else if (xfer->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) {
+ /*
+ * We could allocate and return a system buffer here. When a region of
+ * the buffer is explicitly flushed, we pwrite() the region to a
+ * temporary bo and emit pipelined copy blit.
+ *
+ * For now, do nothing.
+ */
+ }
+ else if (xfer->base.usage & PIPE_TRANSFER_DISCARD_RANGE) {
+ /*
+ * We could allocate a temporary bo for mapping, and emit pipelined copy
+ * blit upon unmapping.
+ *
+ * For now, do nothing.
+ */
+ }
+
+ if (will_stall) {
+ if (xfer->base.usage & PIPE_TRANSFER_DONTBLOCK)
+ return false;
+
+ /* flush to make bo busy (so that map() stalls as it should be) */
+ if (will_be_busy)
+ ilo_cp_flush(ilo->cp);
+ }
- tex->bo->pwrite(tex->bo, offset, size, data);
+ return true;
}
static void
-transfer_unmap_sys_convert(enum pipe_format dst_fmt,
- const struct pipe_transfer *dst_xfer,
- void *dst,
- enum pipe_format src_fmt,
- const struct pipe_transfer *src_xfer,
- const void *src)
+tex_unmap_sys_convert(enum pipe_format dst_fmt,
+ const struct pipe_transfer *dst_xfer,
+ void *dst,
+ enum pipe_format src_fmt,
+ const struct pipe_transfer *src_xfer,
+ const void *src)
{
int i;
}
static void
-transfer_unmap_sys(struct ilo_context *ilo,
- struct ilo_texture *tex,
- struct ilo_transfer *xfer)
+tex_unmap_sys(struct ilo_context *ilo,
+ struct ilo_texture *tex,
+ struct ilo_transfer *xfer)
{
const void *src = xfer->ptr;
struct pipe_transfer *dst_xfer;
}
if (likely(tex->bo_format != tex->base.format)) {
- transfer_unmap_sys_convert(tex->bo_format, dst_xfer, dst,
+ tex_unmap_sys_convert(tex->bo_format, dst_xfer, dst,
tex->base.format, &xfer->base, src);
}
else {
}
static bool
-transfer_map_sys(struct ilo_context *ilo,
- struct ilo_texture *tex,
- struct ilo_transfer *xfer)
+tex_map_sys(struct ilo_context *ilo,
+ struct ilo_texture *tex,
+ struct ilo_transfer *xfer)
{
const struct pipe_box *box = &xfer->base.box;
const size_t stride = util_format_get_stride(tex->base.format, box->width);
}
static void
-transfer_unmap_direct(struct ilo_context *ilo,
- struct ilo_texture *tex,
- struct ilo_transfer *xfer)
+tex_unmap_direct(struct ilo_context *ilo,
+ struct ilo_texture *tex,
+ struct ilo_transfer *xfer)
{
tex->bo->unmap(tex->bo);
}
static bool
-transfer_map_direct(struct ilo_context *ilo,
- struct ilo_texture *tex,
- struct ilo_transfer *xfer)
+tex_map_direct(struct ilo_context *ilo,
+ struct ilo_texture *tex,
+ struct ilo_transfer *xfer)
{
- int x, y, err;
+ int err, x, y;
if (xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
err = tex->bo->map_unsynchronized(tex->bo);
return true;
}
-/**
- * Choose the best mapping method, depending on the transfer usage and whether
- * the bo is busy.
- */
static bool
-transfer_map_choose_method(struct ilo_context *ilo,
- struct ilo_texture *tex,
- struct ilo_transfer *xfer)
+tex_map(struct ilo_context *ilo, struct ilo_transfer *xfer)
{
- bool will_be_busy, will_stall;
+ struct ilo_texture *tex = ilo_texture(xfer->base.resource);
+ bool success;
- /* need to convert on-the-fly */
- if (tex->bo_format != tex->base.format &&
- !(xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
- xfer->method = ILO_TRANSFER_MAP_STAGING_SYS;
+ success = transfer_choose_method(ilo, xfer);
+ if (!success)
+ return false;
- return true;
+ switch (xfer->method) {
+ case ILO_TRANSFER_MAP_DIRECT:
+ success = tex_map_direct(ilo, tex, xfer);
+ break;
+ case ILO_TRANSFER_MAP_STAGING_SYS:
+ success = tex_map_sys(ilo, tex, xfer);
+ break;
+ default:
+ assert(!"unknown mapping method");
+ success = false;
+ break;
}
- xfer->method = ILO_TRANSFER_MAP_DIRECT;
+ return success;
+}
+
+static void
+tex_unmap(struct ilo_context *ilo, struct ilo_transfer *xfer)
+{
+ struct ilo_texture *tex = ilo_texture(xfer->base.resource);
+
+ switch (xfer->method) {
+ case ILO_TRANSFER_MAP_DIRECT:
+ tex_unmap_direct(ilo, tex, xfer);
+ break;
+ case ILO_TRANSFER_MAP_STAGING_SYS:
+ tex_unmap_sys(ilo, tex, xfer);
+ break;
+ default:
+ assert(!"unknown mapping method");
+ break;
+ }
+}
+
+static bool
+buf_map(struct ilo_context *ilo, struct ilo_transfer *xfer)
+{
+ struct ilo_buffer *buf = ilo_buffer(xfer->base.resource);
+ int err;
+
+ if (!transfer_choose_method(ilo, xfer))
+ return false;
+
+ assert(xfer->method == ILO_TRANSFER_MAP_DIRECT);
- /* unsynchronized map does not stall */
if (xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
- return true;
+ err = buf->bo->map_unsynchronized(buf->bo);
+ else if (ilo->dev->has_llc || (xfer->base.usage & PIPE_TRANSFER_READ))
+ err = buf->bo->map(buf->bo, (xfer->base.usage & PIPE_TRANSFER_WRITE));
+ else
+ err = buf->bo->map_gtt(buf->bo);
- will_be_busy = ilo->cp->bo->references(ilo->cp->bo, tex->bo);
- if (!will_be_busy) {
- /*
- * XXX With hardware context support, the bo may be needed by GPU
- * without being referenced by ilo->cp->bo. We have to flush
- * unconditionally, and that is bad.
- */
- if (ilo->cp->hw_ctx)
- ilo_cp_flush(ilo->cp);
+ if (err)
+ return false;
- if (!intel_bo_is_busy(tex->bo))
- return true;
- }
+ assert(xfer->base.level == 0);
+ assert(xfer->base.box.y == 0);
+ assert(xfer->base.box.z == 0);
+ assert(xfer->base.box.height == 1);
+ assert(xfer->base.box.depth == 1);
- /* bo is busy and mapping it will stall */
- will_stall = true;
+ xfer->base.stride = 0;
+ xfer->base.layer_stride = 0;
- if (xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY) {
- /* nothing we can do */
- }
- else if (xfer->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
- /* discard old bo and allocate a new one for mapping */
- if (ilo_texture_alloc_bo(tex))
- will_stall = false;
- }
- else if (xfer->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) {
- /*
- * We could allocate and return a system buffer here. When a region of
- * the buffer is explicitly flushed, we pwrite() the region to a
- * temporary bo and emit pipelined copy blit.
- *
- * For now, do nothing.
- */
- }
- else if (xfer->base.usage & PIPE_TRANSFER_DISCARD_RANGE) {
- /*
- * We could allocate a temporary bo for mapping, and emit pipelined copy
- * blit upon unmapping.
- *
- * For now, do nothing.
- */
- }
+ xfer->ptr = buf->bo->get_virtual(buf->bo);
+ xfer->ptr += xfer->base.box.x;
- if (will_stall) {
- if (xfer->base.usage & PIPE_TRANSFER_DONTBLOCK)
- return false;
+ return true;
+}
- /* flush to make bo busy (so that map() stalls as it should be) */
- if (will_be_busy)
+static void
+buf_unmap(struct ilo_context *ilo, struct ilo_transfer *xfer)
+{
+ struct ilo_buffer *buf = ilo_buffer(xfer->base.resource);
+
+ buf->bo->unmap(buf->bo);
+}
+
+static void
+buf_pwrite(struct ilo_context *ilo, struct ilo_buffer *buf,
+ unsigned usage, int offset, int size, const void *data)
+{
+ bool will_be_busy;
+
+ /*
+ * XXX With hardware context support, the bo may be needed by GPU without
+ * being referenced by ilo->cp->bo. We have to flush unconditionally, and
+ * that is bad.
+ */
+ if (ilo->cp->hw_ctx)
+ ilo_cp_flush(ilo->cp);
+
+ will_be_busy = ilo->cp->bo->references(ilo->cp->bo, buf->bo);
+
+ /* see if we can avoid stalling */
+ if (will_be_busy || intel_bo_is_busy(buf->bo)) {
+ bool will_stall = true;
+
+ if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ /* old data not needed so discard the old bo to avoid stalling */
+ if (ilo_buffer_alloc_bo(buf))
+ will_stall = false;
+ }
+ else {
+ /*
+ * We could allocate a temporary bo to hold the data and emit
+ * pipelined copy blit to move them to buf->bo. But for now, do
+ * nothing.
+ */
+ }
+
+ /* flush to make bo busy (so that pwrite() stalls as it should be) */
+ if (will_stall && will_be_busy)
ilo_cp_flush(ilo->cp);
}
- return true;
+ buf->bo->pwrite(buf->bo, offset, size, data);
}
static void
struct pipe_transfer *transfer)
{
struct ilo_context *ilo = ilo_context(pipe);
- struct ilo_texture *tex = ilo_texture(transfer->resource);
struct ilo_transfer *xfer = ilo_transfer(transfer);
- switch (xfer->method) {
- case ILO_TRANSFER_MAP_DIRECT:
- transfer_unmap_direct(ilo, tex, xfer);
- break;
- case ILO_TRANSFER_MAP_STAGING_SYS:
- transfer_unmap_sys(ilo, tex, xfer);
- break;
- default:
- assert(!"unknown mapping method");
- break;
- }
+ if (xfer->base.resource->target == PIPE_BUFFER)
+ buf_unmap(ilo, xfer);
+ else
+ tex_unmap(ilo, xfer);
pipe_resource_reference(&xfer->base.resource, NULL);
FREE(xfer);
struct pipe_transfer **transfer)
{
struct ilo_context *ilo = ilo_context(pipe);
- struct ilo_texture *tex = ilo_texture(res);
struct ilo_transfer *xfer;
- int ok;
+ bool success;
xfer = MALLOC_STRUCT(ilo_transfer);
if (!xfer) {
}
xfer->base.resource = NULL;
- pipe_resource_reference(&xfer->base.resource, &tex->base);
+ pipe_resource_reference(&xfer->base.resource, res);
xfer->base.level = level;
xfer->base.usage = usage;
xfer->base.box = *box;
- ok = transfer_map_choose_method(ilo, tex, xfer);
- if (ok) {
- switch (xfer->method) {
- case ILO_TRANSFER_MAP_DIRECT:
- ok = transfer_map_direct(ilo, tex, xfer);
- break;
- case ILO_TRANSFER_MAP_STAGING_SYS:
- ok = transfer_map_sys(ilo, tex, xfer);
- break;
- default:
- assert(!"unknown mapping method");
- ok = false;
- break;
- }
- }
+ if (res->target == PIPE_BUFFER)
+ success = buf_map(ilo, xfer);
+ else
+ success = tex_map(ilo, xfer);
- if (!ok) {
+ if (!success) {
pipe_resource_reference(&xfer->base.resource, NULL);
FREE(xfer);
-
*transfer = NULL;
-
return NULL;
}
return xfer->ptr;
}
+static void
+ilo_transfer_inline_write(struct pipe_context *pipe,
+ struct pipe_resource *res,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ const void *data,
+ unsigned stride,
+ unsigned layer_stride)
+{
+ if (likely(res->target == PIPE_BUFFER) &&
+ !(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ /* they should specify just an offset and a size */
+ assert(level == 0);
+ assert(box->y == 0);
+ assert(box->z == 0);
+ assert(box->height == 1);
+ assert(box->depth == 1);
+
+ buf_pwrite(ilo_context(pipe), ilo_buffer(res),
+ usage, box->x, box->width, data);
+ }
+ else {
+ u_default_transfer_inline_write(pipe, res,
+ level, usage, box, data, stride, layer_stride);
+ }
+}
+
/**
* Initialize transfer-related functions.
*/