In preparation for the introduction of ilo_buffer.
static bool
blitter_xy_color_blt(struct pipe_context *pipe,
- struct pipe_resource *r,
+ struct pipe_resource *res,
int16_t x1, int16_t y1,
int16_t x2, int16_t y2,
uint32_t color)
{
const int cmd_len = 6;
struct ilo_context *ilo = ilo_context(pipe);
- struct ilo_resource *res = ilo_resource(r);
+ struct ilo_texture *tex = ilo_texture(res);
uint32_t cmd, br13;
int cpp, stride;
struct intel_bo *bo_check[2];
/* how to support Y-tiling? */
- if (res->tiling == INTEL_TILING_Y)
+ if (tex->tiling == INTEL_TILING_Y)
return false;
/* nothing to clear */
cmd = XY_COLOR_BLT_CMD | (cmd_len - 2);
br13 = 0xf0 << 16;
- cpp = util_format_get_blocksize(res->base.format);
+ cpp = util_format_get_blocksize(tex->base.format);
switch (cpp) {
case 4:
cmd |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
break;
}
- stride = res->bo_stride;
- if (res->tiling != INTEL_TILING_NONE) {
- assert(res->tiling == INTEL_TILING_X);
+ stride = tex->bo_stride;
+ if (tex->tiling != INTEL_TILING_NONE) {
+ assert(tex->tiling == INTEL_TILING_X);
cmd |= XY_DST_TILED;
/* in dwords */
/* make room if necessary */
bo_check[0] = ilo->cp->bo;
- bo_check[1] = res->bo;
+ bo_check[1] = tex->bo;
if (ilo->winsys->check_aperture_space(ilo->winsys, bo_check, 2))
ilo_cp_flush(ilo->cp);
ilo_cp_write(ilo->cp, br13 | stride);
ilo_cp_write(ilo->cp, (y1 << 16) | x1);
ilo_cp_write(ilo->cp, (y2 << 16) | x2);
- ilo_cp_write_bo(ilo->cp, 0, res->bo,
+ ilo_cp_write_bo(ilo->cp, 0, tex->bo,
INTEL_DOMAIN_RENDER,
INTEL_DOMAIN_RENDER);
ilo_cp_write(ilo->cp, color);
/* use null vb if there is no buffer or the stride is out of range */
if (vb->buffer && vb->stride <= 2048) {
- const struct ilo_resource *res = ilo_resource(vb->buffer);
+ const struct ilo_texture *tex = ilo_texture(vb->buffer);
const uint32_t start_offset = vb->buffer_offset;
- const uint32_t end_offset = res->bo->get_size(res->bo) - 1;
+ const uint32_t end_offset = tex->bo->get_size(tex->bo) - 1;
dw |= vb->stride << BRW_VB0_PITCH_SHIFT;
ilo_cp_write(cp, dw);
- ilo_cp_write_bo(cp, start_offset, res->bo, INTEL_DOMAIN_VERTEX, 0);
- ilo_cp_write_bo(cp, end_offset, res->bo, INTEL_DOMAIN_VERTEX, 0);
+ ilo_cp_write_bo(cp, start_offset, tex->bo, INTEL_DOMAIN_VERTEX, 0);
+ ilo_cp_write_bo(cp, end_offset, tex->bo, INTEL_DOMAIN_VERTEX, 0);
ilo_cp_write(cp, instance_divisor);
}
else {
{
const uint32_t cmd = ILO_GPE_CMD(0x3, 0x0, 0x0a);
const uint8_t cmd_len = 3;
- const struct ilo_resource *res = ilo_resource(ib->buffer);
+ const struct ilo_texture *tex = ilo_texture(ib->buffer);
uint32_t start_offset, end_offset;
int format;
ILO_GPE_VALID_GEN(dev, 6, 7);
- if (!res)
+ if (!tex)
return;
format = gen6_translate_index_size(ib->index_size);
}
/* end_offset must also be aligned */
- end_offset = res->bo->get_size(res->bo);
+ end_offset = tex->bo->get_size(tex->bo);
end_offset -= (end_offset % ib->index_size);
/* it is inclusive */
end_offset -= 1;
ilo_cp_write(cp, cmd | (cmd_len - 2) |
((enable_cut_index) ? BRW_CUT_INDEX_ENABLE : 0) |
format << 8);
- ilo_cp_write_bo(cp, start_offset, res->bo, INTEL_DOMAIN_VERTEX, 0);
- ilo_cp_write_bo(cp, end_offset, res->bo, INTEL_DOMAIN_VERTEX, 0);
+ ilo_cp_write_bo(cp, start_offset, tex->bo, INTEL_DOMAIN_VERTEX, 0);
+ ilo_cp_write_bo(cp, end_offset, tex->bo, INTEL_DOMAIN_VERTEX, 0);
ilo_cp_end(cp);
}
ILO_GPE_CMD(0x3, 0x0, 0x05) : ILO_GPE_CMD(0x3, 0x1, 0x05);
const uint8_t cmd_len = 7;
const int max_2d_size = (dev->gen >= ILO_GEN(7)) ? 16384 : 8192;
- struct ilo_resource *res;
+ struct ilo_texture *tex;
uint32_t dw1, dw3;
uint32_t slice_offset, x_offset, y_offset;
int surface_type, depth_format, width, height;
return;
}
- res = ilo_resource(surface->texture);
- surface_type = ilo_gpe_gen6_translate_texture(res->base.target);
+ tex = ilo_texture(surface->texture);
+ surface_type = ilo_gpe_gen6_translate_texture(tex->base.target);
width = surface->width;
height = surface->height;
* we always treat the resource as non-mipmapped and set the slice/x/y
* offsets manually
*/
- slice_offset = ilo_resource_get_slice_offset(res,
+ slice_offset = ilo_texture_get_slice_offset(tex,
surface->u.tex.level, surface->u.tex.first_layer,
true, &x_offset, &y_offset);
height += y_offset;
/* required for GEN6+ */
- assert(res->tiling == INTEL_TILING_Y);
+ assert(tex->tiling == INTEL_TILING_Y);
- assert(res->bo_stride > 0 && res->bo_stride < 128 * 1024 &&
- res->bo_stride % 128 == 0);
+ assert(tex->bo_stride > 0 && tex->bo_stride < 128 * 1024 &&
+ tex->bo_stride % 128 == 0);
assert(surface->u.tex.first_layer == surface->u.tex.last_layer);
- assert(width <= res->bo_stride);
+ assert(width <= tex->bo_stride);
/* we have to treat them as 2D surfaces */
if (surface_type == BRW_SURFACE_CUBE) {
dw1 = surface_type << 29 |
depth_format << 18 |
- (res->bo_stride - 1);
+ (tex->bo_stride - 1);
if (dev->gen >= ILO_GEN(7)) {
if (has_depth) {
(width - 1) << 4;
}
else {
- dw1 |= (res->tiling != INTEL_TILING_NONE) << 27 |
- (res->tiling == INTEL_TILING_Y) << 26;
+ dw1 |= (tex->tiling != INTEL_TILING_NONE) << 27 |
+ (tex->tiling == INTEL_TILING_Y) << 26;
if (hiz) {
dw1 |= 1 << 22 |
ilo_cp_write(cp, dw1);
if (has_depth) {
- ilo_cp_write_bo(cp, slice_offset, res->bo,
+ ilo_cp_write_bo(cp, slice_offset, tex->bo,
INTEL_DOMAIN_RENDER, INTEL_DOMAIN_RENDER);
}
else {
ILO_GPE_CMD(0x3, 0x0, 0x06) :
ILO_GPE_CMD(0x3, 0x1, 0x0e);
const uint8_t cmd_len = 3;
- struct ilo_resource *res;
+ struct ilo_texture *tex;
uint32_t slice_offset;
int pitch;
return;
}
- res = ilo_resource(surface->texture);
+ tex = ilo_texture(surface->texture);
/* TODO */
slice_offset = 0;
* "The pitch must be set to 2x the value computed based on width, as
* the stencil buffer is stored with two rows interleaved."
*/
- pitch = 2 * res->bo_stride;
+ pitch = 2 * tex->bo_stride;
assert(pitch > 0 && pitch < 128 * 1024 && pitch % 128 == 0);
ilo_cp_begin(cp, cmd_len);
ilo_cp_write(cp, cmd | (cmd_len - 2));
ilo_cp_write(cp, pitch - 1);
- ilo_cp_write_bo(cp, slice_offset, res->bo,
+ ilo_cp_write_bo(cp, slice_offset, tex->bo,
INTEL_DOMAIN_RENDER, INTEL_DOMAIN_RENDER);
ilo_cp_end(cp);
}
ILO_GPE_CMD(0x3, 0x0, 0x07) :
ILO_GPE_CMD(0x3, 0x1, 0x0f);
const uint8_t cmd_len = 3;
- struct ilo_resource *res;
+ struct ilo_texture *tex;
uint32_t slice_offset;
ILO_GPE_VALID_GEN(dev, 6, 7);
return;
}
- res = ilo_resource(surface->texture);
+ tex = ilo_texture(surface->texture);
/* TODO */
slice_offset = 0;
- assert(res->bo_stride > 0 && res->bo_stride < 128 * 1024 &&
- res->bo_stride % 128 == 0);
+ assert(tex->bo_stride > 0 && tex->bo_stride < 128 * 1024 &&
+ tex->bo_stride % 128 == 0);
ilo_cp_begin(cp, cmd_len);
ilo_cp_write(cp, cmd | (cmd_len - 2));
- ilo_cp_write(cp, res->bo_stride - 1);
- ilo_cp_write_bo(cp, slice_offset, res->bo,
+ ilo_cp_write(cp, tex->bo_stride - 1);
+ ilo_cp_write_bo(cp, slice_offset, tex->bo,
INTEL_DOMAIN_RENDER, INTEL_DOMAIN_RENDER);
ilo_cp_end(cp);
}
static void
gen6_fill_buffer_SURFACE_STATE(const struct ilo_dev_info *dev,
- const struct ilo_resource *res,
+ const struct ilo_texture *tex,
unsigned offset, unsigned size,
unsigned struct_size,
enum pipe_format elem_format,
* "If Surface Type is SURFTYPE_BUFFER, this field (Tiled Surface) must
* be false (buffers are supported only in linear memory)"
*/
- assert(res->tiling == INTEL_TILING_NONE);
+ assert(tex->tiling == INTEL_TILING_NONE);
pitch--;
num_entries--;
static void
gen6_fill_normal_SURFACE_STATE(const struct ilo_dev_info *dev,
- struct ilo_resource *res,
+ struct ilo_texture *tex,
enum pipe_format format,
unsigned first_level, unsigned num_levels,
unsigned first_layer, unsigned num_layers,
ILO_GPE_VALID_GEN(dev, 6, 6);
assert(num_dwords == 6);
- surface_type = ilo_gpe_gen6_translate_texture(res->base.target);
+ surface_type = ilo_gpe_gen6_translate_texture(tex->base.target);
assert(surface_type != BRW_SURFACE_BUFFER);
if (is_rt)
surface_format = ilo_translate_texture_format(format);
assert(surface_format >= 0);
- width = res->base.width0;
- height = res->base.height0;
- pitch = res->bo_stride;
+ width = tex->base.width0;
+ height = tex->base.height0;
+ pitch = tex->bo_stride;
- switch (res->base.target) {
+ switch (tex->base.target) {
case PIPE_TEXTURE_3D:
- depth = res->base.depth0;
+ depth = tex->base.depth0;
break;
case PIPE_TEXTURE_CUBE:
case PIPE_TEXTURE_CUBE_ARRAY:
}
/* non-full array spacing is supported only on GEN7+ */
- assert(res->array_spacing_full);
+ assert(tex->array_spacing_full);
/* non-interleaved samples are supported only on GEN7+ */
- if (res->base.nr_samples > 1)
- assert(res->interleaved);
+ if (tex->base.nr_samples > 1)
+ assert(tex->interleaved);
/*
* Compute the offset to the layer manually.
/* we lose the capability for layered rendering */
assert(num_levels == 1 && num_layers == 1);
- layer_offset = ilo_resource_get_slice_offset(res,
+ layer_offset = ilo_texture_get_slice_offset(tex,
first_level, first_layer, true, &x_offset, &y_offset);
assert(x_offset % 4 == 0);
y_offset /= 2;
/* derive the size for the LOD */
- width = u_minify(res->base.width0, first_level);
- height = u_minify(res->base.height0, first_level);
+ width = u_minify(tex->base.width0, first_level);
+ height = u_minify(tex->base.height0, first_level);
if (surface_type == BRW_SURFACE_3D)
- depth = u_minify(res->base.depth0, first_level);
+ depth = u_minify(tex->base.depth0, first_level);
first_level = 0;
first_layer = 0;
*
* "For linear surfaces, this field (X Offset) must be zero"
*/
- if (res->tiling == INTEL_TILING_NONE) {
+ if (tex->tiling == INTEL_TILING_NONE) {
if (is_rt) {
const int elem_size = util_format_get_blocksize(format);
assert(layer_offset % elem_size == 0);
dw[3] = (depth - 1) << BRW_SURFACE_DEPTH_SHIFT |
(pitch - 1) << BRW_SURFACE_PITCH_SHIFT |
- ilo_gpe_gen6_translate_winsys_tiling(res->tiling);
+ ilo_gpe_gen6_translate_winsys_tiling(tex->tiling);
dw[4] = first_level << BRW_SURFACE_MIN_LOD_SHIFT |
first_layer << 17 |
(depth - 1) << 8 |
- ((res->base.nr_samples > 1) ? BRW_SURFACE_MULTISAMPLECOUNT_4 :
+ ((tex->base.nr_samples > 1) ? BRW_SURFACE_MULTISAMPLECOUNT_4 :
BRW_SURFACE_MULTISAMPLECOUNT_1);
dw[5] = x_offset << BRW_SURFACE_X_OFFSET_SHIFT |
y_offset << BRW_SURFACE_Y_OFFSET_SHIFT;
- if (res->valign_4)
+ if (tex->valign_4)
dw[5] |= BRW_SURFACE_VERTICAL_ALIGN_ENABLE;
}
ILO_GPE_VALID_GEN(dev, 6, 6);
if (surface && surface->texture) {
- struct ilo_resource *res = ilo_resource(surface->texture);
+ struct ilo_texture *tex = ilo_texture(surface->texture);
- bo = res->bo;
+ bo = tex->bo;
/*
* classic i965 sets render_cache_rw for constant buffers and sol
* surfaces but not render buffers. Why?
*/
- gen6_fill_normal_SURFACE_STATE(dev, res, surface->format,
+ gen6_fill_normal_SURFACE_STATE(dev, tex, surface->format,
surface->u.tex.level, 1,
surface->u.tex.first_layer,
surface->u.tex.last_layer - surface->u.tex.first_layer + 1,
const struct pipe_sampler_view *view,
struct ilo_cp *cp)
{
- struct ilo_resource *res = ilo_resource(view->texture);
+ struct ilo_texture *tex = ilo_texture(view->texture);
uint32_t dw[6];
ILO_GPE_VALID_GEN(dev, 6, 6);
- gen6_fill_normal_SURFACE_STATE(dev, res, view->format,
+ gen6_fill_normal_SURFACE_STATE(dev, tex, view->format,
view->u.tex.first_level,
view->u.tex.last_level - view->u.tex.first_level + 1,
view->u.tex.first_layer,
view->u.tex.last_layer - view->u.tex.first_layer + 1,
false, false, dw, Elements(dw));
- return gen6_emit_SURFACE_STATE(dev, res->bo, false, dw, Elements(dw), cp);
+ return gen6_emit_SURFACE_STATE(dev, tex->bo, false, dw, Elements(dw), cp);
}
static uint32_t
struct ilo_cp *cp)
{
const enum pipe_format elem_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
- struct ilo_resource *res = ilo_resource(cbuf->buffer);
+ struct ilo_texture *tex = ilo_texture(cbuf->buffer);
uint32_t dw[6];
ILO_GPE_VALID_GEN(dev, 6, 6);
- gen6_fill_buffer_SURFACE_STATE(dev, res,
+ gen6_fill_buffer_SURFACE_STATE(dev, tex,
cbuf->buffer_offset, cbuf->buffer_size,
util_format_get_blocksize(elem_format), elem_format,
false, false, dw, Elements(dw));
- return gen6_emit_SURFACE_STATE(dev, res->bo, false, dw, Elements(dw), cp);
+ return gen6_emit_SURFACE_STATE(dev, tex->bo, false, dw, Elements(dw), cp);
}
static uint32_t
int so_index,
struct ilo_cp *cp)
{
- struct ilo_resource *res = ilo_resource(so->buffer);
+ struct ilo_texture *tex = ilo_texture(so->buffer);
unsigned bo_offset, struct_size;
enum pipe_format elem_format;
uint32_t dw[6];
break;
}
- gen6_fill_buffer_SURFACE_STATE(dev, res, bo_offset, so->buffer_size,
+ gen6_fill_buffer_SURFACE_STATE(dev, tex, bo_offset, so->buffer_size,
struct_size, elem_format, false, true, dw, Elements(dw));
- return gen6_emit_SURFACE_STATE(dev, res->bo, false, dw, Elements(dw), cp);
+ return gen6_emit_SURFACE_STATE(dev, tex->bo, false, dw, Elements(dw), cp);
}
static uint32_t
struct intel_bo;
struct ilo_cp;
-struct ilo_resource;
+struct ilo_texture;
struct ilo_shader;
typedef void
{
const uint32_t cmd = ILO_GPE_CMD(0x3, 0x1, 0x18);
const uint8_t cmd_len = 4;
- struct ilo_resource *res;
+ struct ilo_texture *tex;
int end;
ILO_GPE_VALID_GEN(dev, 7, 7);
return;
}
- res = ilo_resource(so_target->buffer);
+ tex = ilo_texture(so_target->buffer);
/* DWord-aligned */
assert(stride % 4 == 0 && base % 4 == 0);
ilo_cp_write(cp, cmd | (cmd_len - 2));
ilo_cp_write(cp, index << SO_BUFFER_INDEX_SHIFT |
stride);
- ilo_cp_write_bo(cp, base, res->bo, INTEL_DOMAIN_RENDER, INTEL_DOMAIN_RENDER);
- ilo_cp_write_bo(cp, end, res->bo, INTEL_DOMAIN_RENDER, INTEL_DOMAIN_RENDER);
+ ilo_cp_write_bo(cp, base, tex->bo, INTEL_DOMAIN_RENDER, INTEL_DOMAIN_RENDER);
+ ilo_cp_write_bo(cp, end, tex->bo, INTEL_DOMAIN_RENDER, INTEL_DOMAIN_RENDER);
ilo_cp_end(cp);
}
static void
gen7_fill_buffer_SURFACE_STATE(const struct ilo_dev_info *dev,
- const struct ilo_resource *res,
+ const struct ilo_texture *tex,
unsigned offset, unsigned size,
unsigned struct_size,
enum pipe_format elem_format,
* "If Surface Type is SURFTYPE_BUFFER, this field (Tiled Surface) must
* be false (because buffers are supported only in linear memory)."
*/
- assert(res->tiling == INTEL_TILING_NONE);
+ assert(tex->tiling == INTEL_TILING_NONE);
pitch--;
num_entries--;
static void
gen7_fill_normal_SURFACE_STATE(const struct ilo_dev_info *dev,
- struct ilo_resource *res,
+ struct ilo_texture *tex,
enum pipe_format format,
unsigned first_level, unsigned num_levels,
unsigned first_layer, unsigned num_layers,
ILO_GPE_VALID_GEN(dev, 7, 7);
assert(num_dwords == 8);
- surface_type = ilo_gpe_gen6_translate_texture(res->base.target);
+ surface_type = ilo_gpe_gen6_translate_texture(tex->base.target);
assert(surface_type != BRW_SURFACE_BUFFER);
if (is_rt)
surface_format = ilo_translate_texture_format(format);
assert(surface_format >= 0);
- width = res->base.width0;
- height = res->base.height0;
- pitch = res->bo_stride;
+ width = tex->base.width0;
+ height = tex->base.height0;
+ pitch = tex->bo_stride;
- switch (res->base.target) {
+ switch (tex->base.target) {
case PIPE_TEXTURE_3D:
- depth = res->base.depth0;
+ depth = tex->base.depth0;
break;
case PIPE_TEXTURE_CUBE:
case PIPE_TEXTURE_CUBE_ARRAY:
/* we lose the capability for layered rendering */
assert(num_levels == 1 && num_layers == 1);
- layer_offset = ilo_resource_get_slice_offset(res,
+ layer_offset = ilo_texture_get_slice_offset(tex,
first_level, first_layer, true, &x_offset, &y_offset);
assert(x_offset % 4 == 0);
y_offset /= 2;
/* derive the size for the LOD */
- width = u_minify(res->base.width0, first_level);
- height = u_minify(res->base.height0, first_level);
+ width = u_minify(tex->base.width0, first_level);
+ height = u_minify(tex->base.height0, first_level);
if (surface_type == BRW_SURFACE_3D)
- depth = u_minify(res->base.depth0, first_level);
+ depth = u_minify(tex->base.depth0, first_level);
first_level = 0;
first_layer = 0;
*
* "For linear surfaces, this field (X Offset) must be zero."
*/
- if (res->tiling == INTEL_TILING_NONE) {
+ if (tex->tiling == INTEL_TILING_NONE) {
if (is_rt) {
const int elem_size = util_format_get_blocksize(format);
assert(layer_offset % elem_size == 0);
dw[0] = surface_type << BRW_SURFACE_TYPE_SHIFT |
surface_format << BRW_SURFACE_FORMAT_SHIFT |
- ilo_gpe_gen6_translate_winsys_tiling(res->tiling) << 13;
+ ilo_gpe_gen6_translate_winsys_tiling(tex->tiling) << 13;
if (surface_type != BRW_SURFACE_3D && depth > 1)
dw[0] |= GEN7_SURFACE_IS_ARRAY;
- if (res->valign_4)
+ if (tex->valign_4)
dw[0] |= GEN7_SURFACE_VALIGN_4;
- if (res->halign_8)
+ if (tex->halign_8)
dw[0] |= GEN7_SURFACE_HALIGN_8;
- if (res->array_spacing_full)
+ if (tex->array_spacing_full)
dw[0] |= GEN7_SURFACE_ARYSPC_FULL;
else
dw[0] |= GEN7_SURFACE_ARYSPC_LOD0;
* means the samples are interleaved. The layouts are the same when the
* number of samples is 1.
*/
- if (res->interleaved && res->base.nr_samples > 1) {
+ if (tex->interleaved && tex->base.nr_samples > 1) {
assert(!is_rt);
dw[4] |= GEN7_SURFACE_MSFMT_DEPTH_STENCIL;
}
dw[4] |= GEN7_SURFACE_MSFMT_MSS;
}
- if (res->base.nr_samples > 4)
+ if (tex->base.nr_samples > 4)
dw[4] |= GEN7_SURFACE_MULTISAMPLECOUNT_8;
- else if (res->base.nr_samples > 2)
+ else if (tex->base.nr_samples > 2)
dw[4] |= GEN7_SURFACE_MULTISAMPLECOUNT_4;
else
dw[4] |= GEN7_SURFACE_MULTISAMPLECOUNT_1;
ILO_GPE_VALID_GEN(dev, 7, 7);
if (surface && surface->texture) {
- struct ilo_resource *res = ilo_resource(surface->texture);
+ struct ilo_texture *tex = ilo_texture(surface->texture);
- bo = res->bo;
+ bo = tex->bo;
/*
* classic i965 sets render_cache_rw for constant buffers and sol
* surfaces but not render buffers. Why?
*/
- gen7_fill_normal_SURFACE_STATE(dev, res, surface->format,
+ gen7_fill_normal_SURFACE_STATE(dev, tex, surface->format,
surface->u.tex.level, 1,
surface->u.tex.first_layer,
surface->u.tex.last_layer - surface->u.tex.first_layer + 1,
const struct pipe_sampler_view *view,
struct ilo_cp *cp)
{
- struct ilo_resource *res = ilo_resource(view->texture);
+ struct ilo_texture *tex = ilo_texture(view->texture);
uint32_t dw[8];
ILO_GPE_VALID_GEN(dev, 7, 7);
- gen7_fill_normal_SURFACE_STATE(dev, res, view->format,
+ gen7_fill_normal_SURFACE_STATE(dev, tex, view->format,
view->u.tex.first_level,
view->u.tex.last_level - view->u.tex.first_level + 1,
view->u.tex.first_layer,
view->u.tex.last_layer - view->u.tex.first_layer + 1,
false, false, dw, Elements(dw));
- return gen7_emit_SURFACE_STATE(dev, res->bo, false, dw, Elements(dw), cp);
+ return gen7_emit_SURFACE_STATE(dev, tex->bo, false, dw, Elements(dw), cp);
}
static uint32_t
struct ilo_cp *cp)
{
const enum pipe_format elem_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
- struct ilo_resource *res = ilo_resource(cbuf->buffer);
+ struct ilo_texture *tex = ilo_texture(cbuf->buffer);
uint32_t dw[8];
ILO_GPE_VALID_GEN(dev, 7, 7);
- gen7_fill_buffer_SURFACE_STATE(dev, res,
+ gen7_fill_buffer_SURFACE_STATE(dev, tex,
cbuf->buffer_offset, cbuf->buffer_size,
util_format_get_blocksize(elem_format), elem_format,
false, false, dw, Elements(dw));
- return gen7_emit_SURFACE_STATE(dev, res->bo, false, dw, Elements(dw), cp);
+ return gen7_emit_SURFACE_STATE(dev, tex->bo, false, dw, Elements(dw), cp);
}
static uint32_t
#define ILO_BIND_MCS PIPE_BIND_CUSTOM
static struct intel_bo *
-alloc_buf_bo(const struct ilo_resource *res)
+alloc_buf_bo(const struct ilo_texture *tex)
{
- struct ilo_screen *is = ilo_screen(res->base.screen);
+ struct ilo_screen *is = ilo_screen(tex->base.screen);
struct intel_bo *bo;
const char *name;
- const unsigned size = res->bo_width;
+ const unsigned size = tex->bo_width;
- switch (res->base.bind) {
+ switch (tex->base.bind) {
case PIPE_BIND_VERTEX_BUFFER:
name = "vertex buffer";
break;
}
/* this is what a buffer supposed to be like */
- assert(res->bo_width * res->bo_height * res->bo_cpp == size);
- assert(res->tiling == INTEL_TILING_NONE);
- assert(res->bo_stride == 0);
+ assert(tex->bo_width * tex->bo_height * tex->bo_cpp == size);
+ assert(tex->tiling == INTEL_TILING_NONE);
+ assert(tex->bo_stride == 0);
- if (res->handle) {
+ if (tex->handle) {
bo = is->winsys->import_handle(is->winsys, name,
- res->bo_width, res->bo_height, res->bo_cpp, res->handle);
+ tex->bo_width, tex->bo_height, tex->bo_cpp, tex->handle);
/* since the bo is shared to us, make sure it meets the expectations */
if (bo) {
- assert(bo->get_size(res->bo) == size);
- assert(bo->get_tiling(res->bo) == res->tiling);
- assert(bo->get_pitch(res->bo) == res->bo_stride);
+ assert(bo->get_size(tex->bo) == size);
+ assert(bo->get_tiling(tex->bo) == tex->tiling);
+ assert(bo->get_pitch(tex->bo) == tex->bo_stride);
}
}
else {
}
static struct intel_bo *
-alloc_tex_bo(const struct ilo_resource *res)
+alloc_tex_bo(const struct ilo_texture *tex)
{
- struct ilo_screen *is = ilo_screen(res->base.screen);
+ struct ilo_screen *is = ilo_screen(tex->base.screen);
struct intel_bo *bo;
const char *name;
- switch (res->base.target) {
+ switch (tex->base.target) {
case PIPE_TEXTURE_1D:
name = "1D texture";
break;
break;
}
- if (res->handle) {
+ if (tex->handle) {
bo = is->winsys->import_handle(is->winsys, name,
- res->bo_width, res->bo_height, res->bo_cpp, res->handle);
+ tex->bo_width, tex->bo_height, tex->bo_cpp, tex->handle);
}
else {
const bool for_render =
- (res->base.bind & (PIPE_BIND_DEPTH_STENCIL |
+ (tex->base.bind & (PIPE_BIND_DEPTH_STENCIL |
PIPE_BIND_RENDER_TARGET));
const unsigned long flags =
(for_render) ? INTEL_ALLOC_FOR_RENDER : 0;
bo = is->winsys->alloc(is->winsys, name,
- res->bo_width, res->bo_height, res->bo_cpp,
- res->tiling, flags);
+ tex->bo_width, tex->bo_height, tex->bo_cpp,
+ tex->tiling, flags);
}
return bo;
}
bool
-ilo_resource_alloc_bo(struct ilo_resource *res)
+ilo_texture_alloc_bo(struct ilo_texture *tex)
{
- struct intel_bo *old_bo = res->bo;
+ struct intel_bo *old_bo = tex->bo;
/* a shared bo cannot be reallocated */
- if (old_bo && res->handle)
+ if (old_bo && tex->handle)
return false;
- if (res->base.target == PIPE_BUFFER)
- res->bo = alloc_buf_bo(res);
+ if (tex->base.target == PIPE_BUFFER)
+ tex->bo = alloc_buf_bo(tex);
else
- res->bo = alloc_tex_bo(res);
+ tex->bo = alloc_tex_bo(tex);
- if (!res->bo) {
- res->bo = old_bo;
+ if (!tex->bo) {
+ tex->bo = old_bo;
return false;
}
/* winsys may decide to use a different tiling */
- res->tiling = res->bo->get_tiling(res->bo);
- res->bo_stride = res->bo->get_pitch(res->bo);
+ tex->tiling = tex->bo->get_tiling(tex->bo);
+ tex->bo_stride = tex->bo->get_pitch(tex->bo);
if (old_bo)
old_bo->unreference(old_bo);
}
static bool
-alloc_slice_offsets(struct ilo_resource *res)
+alloc_slice_offsets(struct ilo_texture *tex)
{
int depth, lv;
/* sum the depths of all levels */
depth = 0;
- for (lv = 0; lv <= res->base.last_level; lv++)
- depth += u_minify(res->base.depth0, lv);
+ for (lv = 0; lv <= tex->base.last_level; lv++)
+ depth += u_minify(tex->base.depth0, lv);
/*
- * There are (depth * res->base.array_size) slices. Either depth is one
- * (non-3D) or res->base.array_size is one (non-array), but it does not
+ * There are (depth * tex->base.array_size) slices. Either depth is one
+ * (non-3D) or tex->base.array_size is one (non-array), but it does not
* matter.
*/
- res->slice_offsets[0] =
- CALLOC(depth * res->base.array_size, sizeof(res->slice_offsets[0][0]));
- if (!res->slice_offsets[0])
+ tex->slice_offsets[0] =
+ CALLOC(depth * tex->base.array_size, sizeof(tex->slice_offsets[0][0]));
+ if (!tex->slice_offsets[0])
return false;
/* point to the respective positions in the buffer */
- for (lv = 1; lv <= res->base.last_level; lv++) {
- res->slice_offsets[lv] = res->slice_offsets[lv - 1] +
- u_minify(res->base.depth0, lv - 1) * res->base.array_size;
+ for (lv = 1; lv <= tex->base.last_level; lv++) {
+ tex->slice_offsets[lv] = tex->slice_offsets[lv - 1] +
+ u_minify(tex->base.depth0, lv - 1) * tex->base.array_size;
}
return true;
}
static void
-free_slice_offsets(struct ilo_resource *res)
+free_slice_offsets(struct ilo_texture *tex)
{
int lv;
- FREE(res->slice_offsets[0]);
- for (lv = 0; lv <= res->base.last_level; lv++)
- res->slice_offsets[lv] = NULL;
+ FREE(tex->slice_offsets[0]);
+ for (lv = 0; lv <= tex->base.last_level; lv++)
+ tex->slice_offsets[lv] = NULL;
}
struct layout_tex_info {
* Prepare for texture layout.
*/
static void
-layout_tex_init(const struct ilo_resource *res, struct layout_tex_info *info)
+layout_tex_init(const struct ilo_texture *tex, struct layout_tex_info *info)
{
- struct ilo_screen *is = ilo_screen(res->base.screen);
- const enum pipe_format bo_format = res->bo_format;
- const enum intel_tiling_mode tiling = res->tiling;
- const struct pipe_resource *templ = &res->base;
+ struct ilo_screen *is = ilo_screen(tex->base.screen);
+ const enum pipe_format bo_format = tex->bo_format;
+ const enum intel_tiling_mode tiling = tex->tiling;
+ const struct pipe_resource *templ = &tex->base;
int last_level, lv;
memset(info, 0, sizeof(*info));
* compressed formats because the block height for those formats are
* 4, and it wants QPitch to mean the number of memory rows, as
* opposed to texel rows, between slices. Since we use texel rows in
- * res->slice_offsets, we do not need to divide QPitch by 4.
+ * tex->slice_offsets, we do not need to divide QPitch by 4.
*/
info->qpitch = h0 + h1 +
((is->dev.gen >= ILO_GEN(7)) ? 12 : 11) * info->align_j;
* Layout a 2D texture.
*/
static void
-layout_tex_2d(struct ilo_resource *res, const struct layout_tex_info *info)
+layout_tex_2d(struct ilo_texture *tex, const struct layout_tex_info *info)
{
- const struct pipe_resource *templ = &res->base;
+ const struct pipe_resource *templ = &tex->base;
unsigned int level_x, level_y, num_slices;
int lv;
- res->bo_width = 0;
- res->bo_height = 0;
+ tex->bo_width = 0;
+ tex->bo_height = 0;
level_x = 0;
level_y = 0;
int slice;
for (slice = 0; slice < templ->array_size; slice++) {
- res->slice_offsets[lv][slice].x = level_x;
+ tex->slice_offsets[lv][slice].x = level_x;
/* slices are qpitch apart in Y-direction */
- res->slice_offsets[lv][slice].y = level_y + info->qpitch * slice;
+ tex->slice_offsets[lv][slice].y = level_y + info->qpitch * slice;
}
/* extend the size of the monolithic bo to cover this mip level */
- if (res->bo_width < level_x + level_w)
- res->bo_width = level_x + level_w;
- if (res->bo_height < level_y + level_h)
- res->bo_height = level_y + level_h;
+ if (tex->bo_width < level_x + level_w)
+ tex->bo_width = level_x + level_w;
+ if (tex->bo_height < level_y + level_h)
+ tex->bo_height = level_y + level_h;
/* MIPLAYOUT_BELOW */
if (lv == 1)
num_slices *= templ->nr_samples;
/* we did not take slices into consideration in the computation above */
- res->bo_height += info->qpitch * (num_slices - 1);
+ tex->bo_height += info->qpitch * (num_slices - 1);
}
/**
* Layout a 3D texture.
*/
static void
-layout_tex_3d(struct ilo_resource *res, const struct layout_tex_info *info)
+layout_tex_3d(struct ilo_texture *tex, const struct layout_tex_info *info)
{
- const struct pipe_resource *templ = &res->base;
+ const struct pipe_resource *templ = &tex->base;
unsigned int level_y;
int lv;
- res->bo_width = 0;
- res->bo_height = 0;
+ tex->bo_width = 0;
+ tex->bo_height = 0;
level_y = 0;
for (lv = 0; lv <= templ->last_level; lv++) {
int i;
for (i = 0; i < num_slices_per_row && slice + i < level_d; i++) {
- res->slice_offsets[lv][slice + i].x = slice_pitch * i;
- res->slice_offsets[lv][slice + i].y = level_y;
+ tex->slice_offsets[lv][slice + i].x = slice_pitch * i;
+ tex->slice_offsets[lv][slice + i].y = level_y;
}
/* move on to the next slice row */
slice = MIN2(num_slices_per_row, level_d) - 1;
/* extend the size of the monolithic bo to cover this slice */
- if (res->bo_width < slice_pitch * slice + level_w)
- res->bo_width = slice_pitch * slice + level_w;
+ if (tex->bo_width < slice_pitch * slice + level_w)
+ tex->bo_width = slice_pitch * slice + level_w;
if (lv == templ->last_level)
- res->bo_height = (level_y - slice_qpitch) + level_h;
+ tex->bo_height = (level_y - slice_qpitch) + level_h;
}
}
}
static enum intel_tiling_mode
-get_tex_tiling(const struct ilo_resource *res)
+get_tex_tiling(const struct ilo_texture *tex)
{
- const struct pipe_resource *templ = &res->base;
- const enum pipe_format bo_format = res->bo_format;
+ const struct pipe_resource *templ = &tex->base;
+ const enum pipe_format bo_format = tex->bo_format;
/*
* From the Sandy Bridge PRM, volume 1 part 2, page 32:
}
static void
-init_texture(struct ilo_resource *res)
+init_texture(struct ilo_texture *tex)
{
struct layout_tex_info info;
- switch (res->base.format) {
+ switch (tex->base.format) {
case PIPE_FORMAT_ETC1_RGB8:
- res->bo_format = PIPE_FORMAT_R8G8B8X8_UNORM;
+ tex->bo_format = PIPE_FORMAT_R8G8B8X8_UNORM;
break;
default:
- res->bo_format = res->base.format;
+ tex->bo_format = tex->base.format;
break;
}
/* determine tiling first as it may affect the layout */
- res->tiling = get_tex_tiling(res);
+ tex->tiling = get_tex_tiling(tex);
- layout_tex_init(res, &info);
+ layout_tex_init(tex, &info);
- res->compressed = info.compressed;
- res->block_width = info.block_width;
- res->block_height = info.block_height;
+ tex->compressed = info.compressed;
+ tex->block_width = info.block_width;
+ tex->block_height = info.block_height;
- res->halign_8 = (info.align_i == 8);
- res->valign_4 = (info.align_j == 4);
- res->array_spacing_full = info.array_spacing_full;
- res->interleaved = info.interleaved;
+ tex->halign_8 = (info.align_i == 8);
+ tex->valign_4 = (info.align_j == 4);
+ tex->array_spacing_full = info.array_spacing_full;
+ tex->interleaved = info.interleaved;
- switch (res->base.target) {
+ switch (tex->base.target) {
case PIPE_TEXTURE_1D:
case PIPE_TEXTURE_2D:
case PIPE_TEXTURE_CUBE:
case PIPE_TEXTURE_1D_ARRAY:
case PIPE_TEXTURE_2D_ARRAY:
case PIPE_TEXTURE_CUBE_ARRAY:
- layout_tex_2d(res, &info);
+ layout_tex_2d(tex, &info);
break;
case PIPE_TEXTURE_3D:
- layout_tex_3d(res, &info);
+ layout_tex_3d(tex, &info);
break;
default:
assert(!"unknown resource target");
* Since we ask for INTEL_TILING_NONE instead lf INTEL_TILING_W, we need to
* manually align the bo width and height to the tile boundaries.
*/
- if (res->bo_format == PIPE_FORMAT_S8_UINT) {
- res->bo_width = align(res->bo_width, 64);
- res->bo_height = align(res->bo_height, 64);
+ if (tex->bo_format == PIPE_FORMAT_S8_UINT) {
+ tex->bo_width = align(tex->bo_width, 64);
+ tex->bo_height = align(tex->bo_height, 64);
}
/* in blocks */
- assert(res->bo_width % info.block_width == 0);
- assert(res->bo_height % info.block_height == 0);
- res->bo_width /= info.block_width;
- res->bo_height /= info.block_height;
- res->bo_cpp = util_format_get_blocksize(res->bo_format);
+ assert(tex->bo_width % info.block_width == 0);
+ assert(tex->bo_height % info.block_height == 0);
+ tex->bo_width /= info.block_width;
+ tex->bo_height /= info.block_height;
+ tex->bo_cpp = util_format_get_blocksize(tex->bo_format);
}
static void
-init_buffer(struct ilo_resource *res)
+init_buffer(struct ilo_texture *tex)
{
- res->bo_format = res->base.format;
- res->bo_width = res->base.width0;
- res->bo_height = 1;
- res->bo_cpp = 1;
- res->bo_stride = 0;
- res->tiling = INTEL_TILING_NONE;
-
- res->compressed = false;
- res->block_width = 1;
- res->block_height = 1;
-
- res->halign_8 = false;
- res->valign_4 = false;
- res->array_spacing_full = false;
- res->interleaved = false;
+ tex->bo_format = tex->base.format;
+ tex->bo_width = tex->base.width0;
+ tex->bo_height = 1;
+ tex->bo_cpp = 1;
+ tex->bo_stride = 0;
+ tex->tiling = INTEL_TILING_NONE;
+
+ tex->compressed = false;
+ tex->block_width = 1;
+ tex->block_height = 1;
+
+ tex->halign_8 = false;
+ tex->valign_4 = false;
+ tex->array_spacing_full = false;
+ tex->interleaved = false;
}
static struct pipe_resource *
const struct pipe_resource *templ,
struct winsys_handle *handle)
{
- struct ilo_resource *res;
+ struct ilo_texture *tex;
- res = CALLOC_STRUCT(ilo_resource);
- if (!res)
+ tex = CALLOC_STRUCT(ilo_texture);
+ if (!tex)
return NULL;
- res->base = *templ;
- res->base.screen = screen;
- pipe_reference_init(&res->base.reference, 1);
- res->handle = handle;
+ tex->base = *templ;
+ tex->base.screen = screen;
+ pipe_reference_init(&tex->base.reference, 1);
+ tex->handle = handle;
- if (!alloc_slice_offsets(res)) {
- FREE(res);
+ if (!alloc_slice_offsets(tex)) {
+ FREE(tex);
return NULL;
}
if (templ->target == PIPE_BUFFER)
- init_buffer(res);
+ init_buffer(tex);
else
- init_texture(res);
+ init_texture(tex);
- if (!ilo_resource_alloc_bo(res)) {
- free_slice_offsets(res);
- FREE(res);
+ if (!ilo_texture_alloc_bo(tex)) {
+ free_slice_offsets(tex);
+ FREE(tex);
return NULL;
}
- return &res->base;
+ return &tex->base;
}
static boolean
static boolean
ilo_resource_get_handle(struct pipe_screen *screen,
- struct pipe_resource *r,
+ struct pipe_resource *res,
struct winsys_handle *handle)
{
- struct ilo_resource *res = ilo_resource(r);
+ struct ilo_texture *tex = ilo_texture(res);
int err;
- err = res->bo->export_handle(res->bo, handle);
+ err = tex->bo->export_handle(tex->bo, handle);
return !err;
}
static void
ilo_resource_destroy(struct pipe_screen *screen,
- struct pipe_resource *r)
+ struct pipe_resource *res)
{
- struct ilo_resource *res = ilo_resource(r);
+ struct ilo_texture *tex = ilo_texture(res);
- free_slice_offsets(res);
- res->bo->unreference(res->bo);
- FREE(res);
+ free_slice_offsets(tex);
+ tex->bo->unreference(tex->bo);
+ FREE(tex);
}
/**
* y_offset is always a multiple of 2.
*/
unsigned
-ilo_resource_get_slice_offset(const struct ilo_resource *res,
- int level, int slice, bool tile_aligned,
- unsigned *x_offset, unsigned *y_offset)
+ilo_texture_get_slice_offset(const struct ilo_texture *tex,
+ int level, int slice, bool tile_aligned,
+ unsigned *x_offset, unsigned *y_offset)
{
- const unsigned x = res->slice_offsets[level][slice].x / res->block_width;
- const unsigned y = res->slice_offsets[level][slice].y / res->block_height;
+ const unsigned x = tex->slice_offsets[level][slice].x / tex->block_width;
+ const unsigned y = tex->slice_offsets[level][slice].y / tex->block_height;
unsigned tile_w, tile_h, tile_size, row_size;
unsigned slice_offset;
/* see the Sandy Bridge PRM, volume 1 part 2, page 24 */
- switch (res->tiling) {
+ switch (tex->tiling) {
case INTEL_TILING_NONE:
- tile_w = res->bo_cpp;
+ tile_w = tex->bo_cpp;
tile_h = 1;
break;
case INTEL_TILING_X:
break;
default:
assert(!"unknown tiling");
- tile_w = res->bo_cpp;
+ tile_w = tex->bo_cpp;
tile_h = 1;
break;
}
tile_size = tile_w * tile_h;
- row_size = res->bo_stride * tile_h;
+ row_size = tex->bo_stride * tile_h;
/*
* for non-tiled resources, this is equivalent to
*
- * slice_offset = y * res->bo_stride + x * res->bo_cpp;
+ * slice_offset = y * tex->bo_stride + x * tex->bo_cpp;
*/
slice_offset =
- row_size * (y / tile_h) + tile_size * (x * res->bo_cpp / tile_w);
+ row_size * (y / tile_h) + tile_size * (x * tex->bo_cpp / tile_w);
/*
- * Since res->bo_stride is a multiple of tile_w, slice_offset should be
+ * Since tex->bo_stride is a multiple of tile_w, slice_offset should be
* aligned at this point.
*/
assert(slice_offset % tile_size == 0);
* be a multiple of 2.
*/
if (x_offset) {
- assert(tile_w % res->bo_cpp == 0);
- *x_offset = (x % (tile_w / res->bo_cpp)) * res->block_width;
+ assert(tile_w % tex->bo_cpp == 0);
+ *x_offset = (x % (tile_w / tex->bo_cpp)) * tex->block_width;
assert(*x_offset % 4 == 0);
}
if (y_offset) {
- *y_offset = (y % tile_h) * res->block_height;
+ *y_offset = (y % tile_h) * tex->block_height;
assert(*y_offset % 2 == 0);
}
}
else {
- const unsigned tx = (x * res->bo_cpp) % tile_w;
+ const unsigned tx = (x * tex->bo_cpp) % tile_w;
const unsigned ty = y % tile_h;
- switch (res->tiling) {
+ switch (tex->tiling) {
case INTEL_TILING_NONE:
assert(tx == 0 && ty == 0);
break;
* - ilo_texture
* - ilo_global_binding
*/
-struct ilo_resource {
+struct ilo_texture {
struct pipe_resource base;
struct winsys_handle *handle;
} *slice_offsets[PIPE_MAX_TEXTURE_LEVELS];
};
-static inline struct ilo_resource *
-ilo_resource(struct pipe_resource *res)
+static inline struct ilo_texture *
+ilo_texture(struct pipe_resource *res)
{
- return (struct ilo_resource *) res;
+ return (struct ilo_texture *) res;
}
void
ilo_init_resource_functions(struct ilo_screen *is);
bool
-ilo_resource_alloc_bo(struct ilo_resource *res);
+ilo_texture_alloc_bo(struct ilo_texture *tex);
unsigned
-ilo_resource_get_slice_offset(const struct ilo_resource *res,
- int level, int slice, bool tile_aligned,
- unsigned *x_offset, unsigned *y_offset);
+ilo_texture_get_slice_offset(const struct ilo_texture *tex,
+ int level, int slice, bool tile_aligned,
+ unsigned *x_offset, unsigned *y_offset);
#endif /* ILO_RESOURCE_H */
static void
ilo_transfer_inline_write(struct pipe_context *pipe,
- struct pipe_resource *r,
+ struct pipe_resource *res,
unsigned level,
unsigned usage,
const struct pipe_box *box,
unsigned layer_stride)
{
struct ilo_context *ilo = ilo_context(pipe);
- struct ilo_resource *res = ilo_resource(r);
+ struct ilo_texture *tex = ilo_texture(res);
int offset, size;
bool will_be_busy;
* unsynchronized write, as the buffer is likely to be busy and pwrite()
* will stall.
*/
- if (unlikely(res->base.target != PIPE_BUFFER) ||
+ if (unlikely(tex->base.target != PIPE_BUFFER) ||
(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
- u_default_transfer_inline_write(pipe, r,
+ u_default_transfer_inline_write(pipe, res,
level, usage, box, data, stride, layer_stride);
return;
if (ilo->cp->hw_ctx)
ilo_cp_flush(ilo->cp);
- will_be_busy = ilo->cp->bo->references(ilo->cp->bo, res->bo);
+ will_be_busy = ilo->cp->bo->references(ilo->cp->bo, tex->bo);
/* see if we can avoid stalling */
- if (will_be_busy || intel_bo_is_busy(res->bo)) {
+ if (will_be_busy || intel_bo_is_busy(tex->bo)) {
bool will_stall = true;
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
/* old data not needed so discard the old bo to avoid stalling */
- if (ilo_resource_alloc_bo(res))
+ if (ilo_texture_alloc_bo(tex))
will_stall = false;
}
else {
/*
* We could allocate a temporary bo to hold the data and emit
- * pipelined copy blit to move them to res->bo. But for now, do
+ * pipelined copy blit to move them to tex->bo. But for now, do
* nothing.
*/
}
}
/* for PIPE_BUFFERs, conversion should not be needed */
- assert(res->bo_format == res->base.format);
+ assert(tex->bo_format == tex->base.format);
/* they should specify just an offset and a size */
assert(level == 0);
offset = box->x;
size = box->width;
- res->bo->pwrite(res->bo, offset, size, data);
+ tex->bo->pwrite(tex->bo, offset, size, data);
}
static void
static void
transfer_unmap_sys(struct ilo_context *ilo,
- struct ilo_resource *res,
+ struct ilo_texture *tex,
struct ilo_transfer *xfer)
{
const void *src = xfer->ptr;
return;
}
- if (likely(res->bo_format != res->base.format)) {
- transfer_unmap_sys_convert(res->bo_format, dst_xfer, dst,
- res->base.format, &xfer->base, src);
+ if (likely(tex->bo_format != tex->base.format)) {
+ transfer_unmap_sys_convert(tex->bo_format, dst_xfer, dst,
+ tex->base.format, &xfer->base, src);
}
else {
- util_copy_box(dst, res->bo_format,
+ util_copy_box(dst, tex->bo_format,
dst_xfer->stride, dst_xfer->layer_stride, 0, 0, 0,
dst_xfer->box.width, dst_xfer->box.height, dst_xfer->box.depth,
src, xfer->base.stride, xfer->base.layer_stride, 0, 0, 0);
static bool
transfer_map_sys(struct ilo_context *ilo,
- struct ilo_resource *res,
+ struct ilo_texture *tex,
struct ilo_transfer *xfer)
{
const struct pipe_box *box = &xfer->base.box;
- const size_t stride = util_format_get_stride(res->base.format, box->width);
+ const size_t stride = util_format_get_stride(tex->base.format, box->width);
const size_t size =
- util_format_get_2d_size(res->base.format, stride, box->height);
+ util_format_get_2d_size(tex->base.format, stride, box->height);
bool read_back = false;
if (xfer->base.usage & PIPE_TRANSFER_READ) {
static void
transfer_unmap_direct(struct ilo_context *ilo,
- struct ilo_resource *res,
+ struct ilo_texture *tex,
struct ilo_transfer *xfer)
{
- res->bo->unmap(res->bo);
+ tex->bo->unmap(tex->bo);
}
static bool
transfer_map_direct(struct ilo_context *ilo,
- struct ilo_resource *res,
+ struct ilo_texture *tex,
struct ilo_transfer *xfer)
{
int x, y, err;
if (xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
- err = res->bo->map_unsynchronized(res->bo);
+ err = tex->bo->map_unsynchronized(tex->bo);
/* prefer map() when there is the last-level cache */
- else if (res->tiling == INTEL_TILING_NONE &&
+ else if (tex->tiling == INTEL_TILING_NONE &&
(ilo->dev->has_llc || (xfer->base.usage & PIPE_TRANSFER_READ)))
- err = res->bo->map(res->bo, (xfer->base.usage & PIPE_TRANSFER_WRITE));
+ err = tex->bo->map(tex->bo, (xfer->base.usage & PIPE_TRANSFER_WRITE));
else
- err = res->bo->map_gtt(res->bo);
+ err = tex->bo->map_gtt(tex->bo);
if (err)
return false;
/* note that stride is for a block row, not a texel row */
- xfer->base.stride = res->bo_stride;
+ xfer->base.stride = tex->bo_stride;
/*
* we can walk through layers when the resource is a texture array or
* when this is the first level of a 3D texture being mapped
*/
- if (res->base.array_size > 1 ||
- (res->base.target == PIPE_TEXTURE_3D && xfer->base.level == 0)) {
- const unsigned qpitch = res->slice_offsets[xfer->base.level][1].y -
- res->slice_offsets[xfer->base.level][0].y;
+ if (tex->base.array_size > 1 ||
+ (tex->base.target == PIPE_TEXTURE_3D && xfer->base.level == 0)) {
+ const unsigned qpitch = tex->slice_offsets[xfer->base.level][1].y -
+ tex->slice_offsets[xfer->base.level][0].y;
- assert(qpitch % res->block_height == 0);
- xfer->base.layer_stride = (qpitch / res->block_height) * xfer->base.stride;
+ assert(qpitch % tex->block_height == 0);
+ xfer->base.layer_stride = (qpitch / tex->block_height) * xfer->base.stride;
}
else {
xfer->base.layer_stride = 0;
}
- x = res->slice_offsets[xfer->base.level][xfer->base.box.z].x;
- y = res->slice_offsets[xfer->base.level][xfer->base.box.z].y;
+ x = tex->slice_offsets[xfer->base.level][xfer->base.box.z].x;
+ y = tex->slice_offsets[xfer->base.level][xfer->base.box.z].y;
x += xfer->base.box.x;
y += xfer->base.box.y;
/* in blocks */
- assert(x % res->block_width == 0 && y % res->block_height == 0);
- x /= res->block_width;
- y /= res->block_height;
+ assert(x % tex->block_width == 0 && y % tex->block_height == 0);
+ x /= tex->block_width;
+ y /= tex->block_height;
- xfer->ptr = res->bo->get_virtual(res->bo);
- xfer->ptr += y * res->bo_stride + x * res->bo_cpp;
+ xfer->ptr = tex->bo->get_virtual(tex->bo);
+ xfer->ptr += y * tex->bo_stride + x * tex->bo_cpp;
return true;
}
*/
static bool
transfer_map_choose_method(struct ilo_context *ilo,
- struct ilo_resource *res,
+ struct ilo_texture *tex,
struct ilo_transfer *xfer)
{
bool will_be_busy, will_stall;
/* need to convert on-the-fly */
- if (res->bo_format != res->base.format &&
+ if (tex->bo_format != tex->base.format &&
!(xfer->base.usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
xfer->method = ILO_TRANSFER_MAP_STAGING_SYS;
if (xfer->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED)
return true;
- will_be_busy = ilo->cp->bo->references(ilo->cp->bo, res->bo);
+ will_be_busy = ilo->cp->bo->references(ilo->cp->bo, tex->bo);
if (!will_be_busy) {
/*
* XXX With hardware context support, the bo may be needed by GPU
if (ilo->cp->hw_ctx)
ilo_cp_flush(ilo->cp);
- if (!intel_bo_is_busy(res->bo))
+ if (!intel_bo_is_busy(tex->bo))
return true;
}
}
else if (xfer->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
/* discard old bo and allocate a new one for mapping */
- if (ilo_resource_alloc_bo(res))
+ if (ilo_texture_alloc_bo(tex))
will_stall = false;
}
else if (xfer->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT) {
struct pipe_transfer *transfer)
{
struct ilo_context *ilo = ilo_context(pipe);
- struct ilo_resource *res = ilo_resource(transfer->resource);
+ struct ilo_texture *tex = ilo_texture(transfer->resource);
struct ilo_transfer *xfer = ilo_transfer(transfer);
switch (xfer->method) {
case ILO_TRANSFER_MAP_DIRECT:
- transfer_unmap_direct(ilo, res, xfer);
+ transfer_unmap_direct(ilo, tex, xfer);
break;
case ILO_TRANSFER_MAP_STAGING_SYS:
- transfer_unmap_sys(ilo, res, xfer);
+ transfer_unmap_sys(ilo, tex, xfer);
break;
default:
assert(!"unknown mapping method");
static void *
ilo_transfer_map(struct pipe_context *pipe,
- struct pipe_resource *r,
+ struct pipe_resource *res,
unsigned level,
unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **transfer)
{
struct ilo_context *ilo = ilo_context(pipe);
- struct ilo_resource *res = ilo_resource(r);
+ struct ilo_texture *tex = ilo_texture(res);
struct ilo_transfer *xfer;
int ok;
}
xfer->base.resource = NULL;
- pipe_resource_reference(&xfer->base.resource, &res->base);
+ pipe_resource_reference(&xfer->base.resource, &tex->base);
xfer->base.level = level;
xfer->base.usage = usage;
xfer->base.box = *box;
- ok = transfer_map_choose_method(ilo, res, xfer);
+ ok = transfer_map_choose_method(ilo, tex, xfer);
if (ok) {
switch (xfer->method) {
case ILO_TRANSFER_MAP_DIRECT:
- ok = transfer_map_direct(ilo, res, xfer);
+ ok = transfer_map_direct(ilo, tex, xfer);
break;
case ILO_TRANSFER_MAP_STAGING_SYS:
- ok = transfer_map_sys(ilo, res, xfer);
+ ok = transfer_map_sys(ilo, tex, xfer);
break;
default:
assert(!"unknown mapping method");