#include "util/u_transfer.h"
#include "util/u_format_etc.h"
+#include "ilo_blit.h"
#include "ilo_cp.h"
#include "ilo_context.h"
#include "ilo_resource.h"
+#include "ilo_state.h"
#include "ilo_transfer.h"
static bool
is_bo_busy(struct ilo_context *ilo, struct intel_bo *bo, bool *need_flush)
{
- const bool referenced = ilo->cp->bo->references(ilo->cp->bo, bo);
+ const bool referenced = intel_bo_references(ilo->cp->bo, bo);
if (need_flush)
*need_flush = referenced;
if (referenced)
return true;
- /*
- * XXX With hardware context support, the bo may be needed by GPU
- * without being referenced by ilo->cp->bo. We have to flush
- * unconditionally, and that is bad.
- */
- if (ilo->cp->render_ctx)
- ilo_cp_flush(ilo->cp);
-
return intel_bo_is_busy(bo);
}
switch (xfer->method) {
case ILO_TRANSFER_MAP_CPU:
- err = bo->map(bo, (xfer->base.usage & PIPE_TRANSFER_WRITE));
+ err = intel_bo_map(bo, (xfer->base.usage & PIPE_TRANSFER_WRITE));
break;
case ILO_TRANSFER_MAP_GTT:
- err = bo->map_gtt(bo);
+ err = intel_bo_map_gtt(bo);
break;
case ILO_TRANSFER_MAP_UNSYNC:
- err = bo->map_unsynchronized(bo);
+ err = intel_bo_map_unsynchronized(bo);
break;
default:
assert(!"unknown mapping method");
else if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
/* discard old bo and allocate a new one for mapping */
if ((tex && ilo_texture_alloc_bo(tex)) ||
- (buf && ilo_buffer_alloc_bo(buf)))
+ (buf && ilo_buffer_alloc_bo(buf))) {
+ ilo_mark_states_with_resource_dirty(ilo, res);
will_stall = false;
+ }
}
else if (usage & PIPE_TRANSFER_FLUSH_EXPLICIT) {
/*
/* flush to make bo busy (so that map() stalls as it should be) */
if (need_flush)
- ilo_cp_flush(ilo->cp);
+ ilo_cp_flush(ilo->cp, "syncing for transfers");
}
}
const struct pipe_box *box,
unsigned *mem_x, unsigned *mem_y)
{
+ const struct ilo_texture_slice *s =
+ ilo_texture_get_slice(tex, level, slice + box->z);
unsigned x, y;
- x = tex->slice_offsets[level][slice + box->z].x + box->x;
- y = tex->slice_offsets[level][slice + box->z].y + box->y;
+ x = s->x + box->x;
+ y = s->y + box->y;
assert(x % tex->block_width == 0 && y % tex->block_height == 0);
static unsigned
tex_get_slice_stride(const struct ilo_texture *tex, unsigned level)
{
+ const struct ilo_texture_slice *s0, *s1;
unsigned qpitch;
/* there is no 3D array texture */
}
}
- qpitch = tex->slice_offsets[level][1].y - tex->slice_offsets[level][0].y;
+ s0 = ilo_texture_get_slice(tex, level, 0);
+ s1 = ilo_texture_get_slice(tex, level, 1);
+ qpitch = s1->y - s0->y;
assert(qpitch % tex->block_height == 0);
return (qpitch / tex->block_height) * tex->bo_stride;
{
const bool swizzle = ilo->dev->has_address_swizzling;
const struct pipe_box *box = &xfer->base.box;
- const uint8_t *src = tex->bo->get_virtual(tex->bo);
+ const uint8_t *src = intel_bo_get_virtual(tex->bo);
tex_tile_offset_func tile_offset;
unsigned tiles_per_row;
int slice;
if (tex->separate_s8) {
struct ilo_texture *s8_tex = tex->separate_s8;
- const uint8_t *s8_src = s8_tex->bo->get_virtual(s8_tex->bo);
+ const uint8_t *s8_src = intel_bo_get_virtual(s8_tex->bo);
tex_tile_offset_func s8_tile_offset;
unsigned s8_tiles_per_row;
int dst_cpp, dst_s8_pos, src_cpp_used;
{
const bool swizzle = ilo->dev->has_address_swizzling;
const struct pipe_box *box = &xfer->base.box;
- uint8_t *dst = tex->bo->get_virtual(tex->bo);
+ uint8_t *dst = intel_bo_get_virtual(tex->bo);
tex_tile_offset_func tile_offset;
unsigned tiles_per_row;
int slice;
if (tex->separate_s8) {
struct ilo_texture *s8_tex = tex->separate_s8;
- uint8_t *s8_dst = s8_tex->bo->get_virtual(s8_tex->bo);
+ uint8_t *s8_dst = intel_bo_get_virtual(s8_tex->bo);
tex_tile_offset_func s8_tile_offset;
unsigned s8_tiles_per_row;
int src_cpp, src_s8_pos, dst_cpp_used;
void *dst;
int slice;
- dst = tex->bo->get_virtual(tex->bo);
+ dst = intel_bo_get_virtual(tex->bo);
dst += tex_get_box_offset(tex, xfer->base.level, box);
/* slice stride is not always available */
int err;
if (prefer_cpu && (tex->tiling == INTEL_TILING_NONE || !linear_view))
- err = tex->bo->map(tex->bo, !for_read_back);
+ err = intel_bo_map(tex->bo, !for_read_back);
else
- err = tex->bo->map_gtt(tex->bo);
+ err = intel_bo_map_gtt(tex->bo);
if (!tex->separate_s8)
return !err;
- err = tex->separate_s8->bo->map(tex->separate_s8->bo, !for_read_back);
+ err = intel_bo_map(tex->separate_s8->bo, !for_read_back);
if (err)
- tex->bo->unmap(tex->bo);
+ intel_bo_unmap(tex->bo);
return !err;
}
const struct ilo_texture *tex)
{
if (tex->separate_s8)
- tex->separate_s8->bo->unmap(tex->separate_s8->bo);
+ intel_bo_unmap(tex->separate_s8->bo);
- tex->bo->unmap(tex->bo);
+ intel_bo_unmap(tex->bo);
}
static void
struct ilo_texture *tex,
struct ilo_transfer *xfer)
{
- tex->bo->unmap(tex->bo);
+ intel_bo_unmap(tex->bo);
}
static bool
else
xfer->base.layer_stride = 0;
- xfer->ptr = tex->bo->get_virtual(tex->bo);
+ xfer->ptr = intel_bo_get_virtual(tex->bo);
xfer->ptr += tex_get_box_offset(tex, xfer->base.level, &xfer->base.box);
return true;
xfer->base.stride = 0;
xfer->base.layer_stride = 0;
- xfer->ptr = buf->bo->get_virtual(buf->bo);
+ xfer->ptr = intel_bo_get_virtual(buf->bo);
xfer->ptr += xfer->base.box.x;
return true;
{
struct ilo_buffer *buf = ilo_buffer(xfer->base.resource);
- buf->bo->unmap(buf->bo);
+ intel_bo_unmap(buf->bo);
}
static void
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
/* old data not needed so discard the old bo to avoid stalling */
- if (ilo_buffer_alloc_bo(buf))
+ if (ilo_buffer_alloc_bo(buf)) {
+ ilo_mark_states_with_resource_dirty(ilo, &buf->base);
will_stall = false;
+ }
}
else {
/*
/* flush to make bo busy (so that pwrite() stalls as it should be) */
if (will_stall && need_flush)
- ilo_cp_flush(ilo->cp);
+ ilo_cp_flush(ilo->cp, "syncing for pwrites");
}
- buf->bo->pwrite(buf->bo, offset, size, data);
+ intel_bo_pwrite(buf->bo, offset, size, data);
}
static void
xfer->base.usage = usage;
xfer->base.box = *box;
+ ilo_blit_resolve_transfer(ilo, &xfer->base);
+
if (res->target == PIPE_BUFFER)
success = buf_map(ilo, xfer);
else