* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Marek Olšák
*/
+#include "radeonsi/si_pipe.h"
#include "r600_cs.h"
#include "util/u_memory.h"
#include "util/u_upload_mgr.h"
-#include "util/u_threaded_context.h"
#include <inttypes.h>
#include <stdio.h>
-bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
- struct pb_buffer *buf,
- enum radeon_bo_usage usage)
+bool si_rings_is_buffer_referenced(struct r600_common_context *ctx,
+ struct pb_buffer *buf,
+ enum radeon_bo_usage usage)
{
if (ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, buf, usage)) {
return true;
return false;
}
-void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
- struct r600_resource *resource,
- unsigned usage)
+void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
+ struct r600_resource *resource,
+ unsigned usage)
{
enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
bool busy = false;
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
ctx->gfx.flush(ctx, 0, NULL);
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
ctx->dma.flush(ctx, 0, NULL);
return ctx->ws->buffer_map(resource->buf, NULL, usage);
}
-void r600_init_resource_fields(struct r600_common_screen *rscreen,
- struct r600_resource *res,
- uint64_t size, unsigned alignment)
+void si_init_resource_fields(struct si_screen *sscreen,
+ struct r600_resource *res,
+ uint64_t size, unsigned alignment)
{
struct r600_texture *rtex = (struct r600_texture*)res;
res->bo_size = size;
res->bo_alignment = alignment;
res->flags = 0;
+ res->texture_handle_allocated = false;
+ res->image_handle_allocated = false;
switch (res->b.b.usage) {
case PIPE_USAGE_STREAM:
/* Older kernels didn't always flush the HDP cache before
* CS execution
*/
- if (rscreen->info.drm_major == 2 &&
- rscreen->info.drm_minor < 40) {
+ if (sscreen->info.drm_major == 2 &&
+ sscreen->info.drm_minor < 40) {
res->domains = RADEON_DOMAIN_GTT;
res->flags |= RADEON_FLAG_GTT_WC;
break;
}
- res->flags |= RADEON_FLAG_CPU_ACCESS;
/* fall through */
case PIPE_USAGE_DEFAULT:
case PIPE_USAGE_IMMUTABLE:
* ensures all CPU writes finish before the GPU
* executes a command stream.
*/
- if (rscreen->info.drm_major == 2 &&
- rscreen->info.drm_minor < 40)
+ if (sscreen->info.drm_major == 2 &&
+ sscreen->info.drm_minor < 40)
res->domains = RADEON_DOMAIN_GTT;
- else if (res->domains & RADEON_DOMAIN_VRAM)
- res->flags |= RADEON_FLAG_CPU_ACCESS;
}
/* Tiled textures are unmappable. Always put them in VRAM. */
if ((res->b.b.target != PIPE_BUFFER && !rtex->surface.is_linear) ||
res->flags & R600_RESOURCE_FLAG_UNMAPPABLE) {
res->domains = RADEON_DOMAIN_VRAM;
- res->flags &= ~RADEON_FLAG_CPU_ACCESS;
res->flags |= RADEON_FLAG_NO_CPU_ACCESS |
RADEON_FLAG_GTT_WC;
}
- /* If VRAM is just stolen system memory, allow both VRAM and
- * GTT, whichever has free space. If a buffer is evicted from
- * VRAM to GTT, it will stay there.
- *
- * DRM 3.6.0 has good BO move throttling, so we can allow VRAM-only
- * placements even with a low amount of stolen VRAM.
- */
- if (!rscreen->info.has_dedicated_vram &&
- (rscreen->info.drm_major < 3 || rscreen->info.drm_minor < 6) &&
- res->domains == RADEON_DOMAIN_VRAM)
- res->domains = RADEON_DOMAIN_VRAM_GTT;
+ /* Displayable and shareable surfaces are not suballocated. */
+ if (res->b.b.bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))
+ res->flags |= RADEON_FLAG_NO_SUBALLOC; /* shareable */
+ else
+ res->flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
- if (rscreen->debug_flags & DBG_NO_WC)
+ if (sscreen->debug_flags & DBG(NO_WC))
res->flags &= ~RADEON_FLAG_GTT_WC;
/* Set expected VRAM and GART usage for the buffer. */
res->vram_usage = 0;
res->gart_usage = 0;
+ res->max_forced_staging_uploads = 0;
+ res->b.max_forced_staging_uploads = 0;
- if (res->domains & RADEON_DOMAIN_VRAM)
+ if (res->domains & RADEON_DOMAIN_VRAM) {
res->vram_usage = size;
- else if (res->domains & RADEON_DOMAIN_GTT)
+
+ res->max_forced_staging_uploads =
+ res->b.max_forced_staging_uploads =
+ sscreen->info.has_dedicated_vram &&
+ size >= sscreen->info.vram_vis_size / 4 ? 1 : 0;
+ } else if (res->domains & RADEON_DOMAIN_GTT) {
res->gart_usage = size;
+ }
}
-bool r600_alloc_resource(struct r600_common_screen *rscreen,
- struct r600_resource *res)
+bool si_alloc_resource(struct si_screen *sscreen,
+ struct r600_resource *res)
{
struct pb_buffer *old_buf, *new_buf;
/* Allocate a new resource. */
- new_buf = rscreen->ws->buffer_create(rscreen->ws, res->bo_size,
+ new_buf = sscreen->ws->buffer_create(sscreen->ws, res->bo_size,
res->bo_alignment,
res->domains, res->flags);
if (!new_buf) {
old_buf = res->buf;
res->buf = new_buf; /* should be atomic */
- if (rscreen->info.has_virtual_memory)
- res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->buf);
+ if (sscreen->info.has_virtual_memory)
+ res->gpu_address = sscreen->ws->buffer_get_virtual_address(res->buf);
else
res->gpu_address = 0;
res->TC_L2_dirty = false;
/* Print debug information. */
- if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
+ if (sscreen->debug_flags & DBG(VM) && res->b.b.target == PIPE_BUFFER) {
fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Buffer %"PRIu64" bytes\n",
res->gpu_address, res->gpu_address + res->buf->size,
res->buf->size);
{
struct r600_resource *rbuffer = r600_resource(buf);
+ threaded_resource_deinit(buf);
util_range_destroy(&rbuffer->valid_buffer_range);
pb_reference(&rbuffer->buf, NULL);
FREE(rbuffer);
struct r600_resource *rbuffer)
{
/* Shared buffers can't be reallocated. */
- if (rbuffer->is_shared)
+ if (rbuffer->b.is_shared)
return false;
/* Sparse buffers can't be reallocated. */
/* In AMD_pinned_memory, the user pointer association only gets
* broken when the buffer is explicitly re-allocated.
*/
- if (rctx->ws->buffer_is_user_ptr(rbuffer->buf))
+ if (rbuffer->b.is_user_ptr)
return false;
/* Check if mapping this buffer would cause waiting for the GPU. */
- if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
+ if (si_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
} else {
return true;
}
-void r600_invalidate_resource(struct pipe_context *ctx,
- struct pipe_resource *resource)
+/* Replace the storage of dst with src. */
+void si_replace_buffer_storage(struct pipe_context *ctx,
+ struct pipe_resource *dst,
+ struct pipe_resource *src)
+{
+ struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+ struct r600_resource *rdst = r600_resource(dst);
+ struct r600_resource *rsrc = r600_resource(src);
+ uint64_t old_gpu_address = rdst->gpu_address;
+
+ pb_reference(&rdst->buf, rsrc->buf);
+ rdst->gpu_address = rsrc->gpu_address;
+ rdst->b.b.bind = rsrc->b.b.bind;
+ rdst->b.max_forced_staging_uploads = rsrc->b.max_forced_staging_uploads;
+ rdst->max_forced_staging_uploads = rsrc->max_forced_staging_uploads;
+ rdst->flags = rsrc->flags;
+
+ assert(rdst->vram_usage == rsrc->vram_usage);
+ assert(rdst->gart_usage == rsrc->gart_usage);
+ assert(rdst->bo_size == rsrc->bo_size);
+ assert(rdst->bo_alignment == rsrc->bo_alignment);
+ assert(rdst->domains == rsrc->domains);
+
+ rctx->rebind_buffer(ctx, dst, old_gpu_address);
+}
+
+static void si_invalidate_resource(struct pipe_context *ctx,
+ struct pipe_resource *resource)
{
struct r600_common_context *rctx = (struct r600_common_context*)ctx;
struct r600_resource *rbuffer = r600_resource(resource);
else
transfer = slab_alloc(&rctx->pool_transfers);
- transfer->transfer.resource = NULL;
- pipe_resource_reference(&transfer->transfer.resource, resource);
- transfer->transfer.level = 0;
- transfer->transfer.usage = usage;
- transfer->transfer.box = *box;
- transfer->transfer.stride = 0;
- transfer->transfer.layer_stride = 0;
+ transfer->b.b.resource = NULL;
+ pipe_resource_reference(&transfer->b.b.resource, resource);
+ transfer->b.b.level = 0;
+ transfer->b.b.usage = usage;
+ transfer->b.b.box = *box;
+ transfer->b.b.stride = 0;
+ transfer->b.b.layer_stride = 0;
+ transfer->b.staging = NULL;
transfer->offset = offset;
transfer->staging = staging;
- *ptransfer = &transfer->transfer;
+ *ptransfer = &transfer->b.b;
return data;
}
-static bool r600_can_dma_copy_buffer(struct r600_common_context *rctx,
- unsigned dstx, unsigned srcx, unsigned size)
-{
- bool dword_aligned = !(dstx % 4) && !(srcx % 4) && !(size % 4);
-
- return rctx->screen->has_cp_dma ||
- (dword_aligned && (rctx->dma.cs ||
- rctx->screen->has_streamout));
-
-}
-
static void *r600_buffer_transfer_map(struct pipe_context *ctx,
struct pipe_resource *resource,
unsigned level,
struct pipe_transfer **ptransfer)
{
struct r600_common_context *rctx = (struct r600_common_context*)ctx;
- struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen;
struct r600_resource *rbuffer = r600_resource(resource);
uint8_t *data;
*
* So don't ever use staging buffers.
*/
- if (rscreen->ws->buffer_is_user_ptr(rbuffer->buf))
+ if (rbuffer->b.is_user_ptr)
usage |= PIPE_TRANSFER_PERSISTENT;
/* See if the buffer range being mapped has never been initialized,
* in which case it can be mapped unsynchronized. */
if (!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
- TC_TRANSFER_MAP_IGNORE_VALID_RANGE)) &&
+ TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
usage & PIPE_TRANSFER_WRITE &&
- !rbuffer->is_shared &&
+ !rbuffer->b.is_shared &&
!util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) {
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
}
usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
}
+ /* If a buffer in VRAM is too large and the range is discarded, don't
+ * map it directly. This makes sure that the buffer stays in VRAM.
+ */
+ bool force_discard_range = false;
+ if (usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
+ PIPE_TRANSFER_DISCARD_RANGE) &&
+ !(usage & PIPE_TRANSFER_PERSISTENT) &&
+ /* Try not to decrement the counter if it's not positive. Still racy,
+ * but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */
+ rbuffer->max_forced_staging_uploads > 0 &&
+ p_atomic_dec_return(&rbuffer->max_forced_staging_uploads) >= 0) {
+ usage &= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
+ PIPE_TRANSFER_UNSYNCHRONIZED);
+ usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ force_discard_range = true;
+ }
+
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
TC_TRANSFER_MAP_NO_INVALIDATE))) {
}
if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
- !(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&
((!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
- PIPE_TRANSFER_PERSISTENT)) &&
- r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) ||
+ PIPE_TRANSFER_PERSISTENT))) ||
(rbuffer->flags & RADEON_FLAG_SPARSE))) {
assert(usage & PIPE_TRANSFER_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU.
*/
if (rbuffer->flags & RADEON_FLAG_SPARSE ||
- r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
+ force_discard_range ||
+ si_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
/* Do a wait-free write-only transfer using a temporary buffer. */
unsigned offset;
else if (((usage & PIPE_TRANSFER_READ) &&
!(usage & PIPE_TRANSFER_PERSISTENT) &&
(rbuffer->domains & RADEON_DOMAIN_VRAM ||
- rbuffer->flags & RADEON_FLAG_GTT_WC) &&
- r600_can_dma_copy_buffer(rctx, 0, box->x, box->width)) ||
+ rbuffer->flags & RADEON_FLAG_GTT_WC)) ||
(rbuffer->flags & RADEON_FLAG_SPARSE)) {
struct r600_resource *staging;
box->x % R600_MAP_BUFFER_ALIGNMENT,
0, 0, resource, 0, box);
- data = r600_buffer_map_sync_with_rings(rctx, staging,
+ data = si_buffer_map_sync_with_rings(rctx, staging,
usage & ~PIPE_TRANSFER_UNSYNCHRONIZED);
if (!data) {
r600_resource_reference(&staging, NULL);
}
}
- data = r600_buffer_map_sync_with_rings(rctx, rbuffer, usage);
+ data = si_buffer_map_sync_with_rings(rctx, rbuffer, usage);
if (!data) {
return NULL;
}
!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
r600_buffer_do_flush_region(ctx, transfer, &transfer->box);
- if (rtransfer->staging)
- r600_resource_reference(&rtransfer->staging, NULL);
-
+ r600_resource_reference(&rtransfer->staging, NULL);
+ assert(rtransfer->b.staging == NULL); /* for threaded context only */
pipe_resource_reference(&transfer->resource, NULL);
/* Don't use pool_transfers_unsync. We are always in the driver
slab_free(&rctx->pool_transfers, transfer);
}
-void r600_buffer_subdata(struct pipe_context *ctx,
- struct pipe_resource *buffer,
- unsigned usage, unsigned offset,
- unsigned size, const void *data)
+static void si_buffer_subdata(struct pipe_context *ctx,
+ struct pipe_resource *buffer,
+ unsigned usage, unsigned offset,
+ unsigned size, const void *data)
{
struct pipe_transfer *transfer = NULL;
struct pipe_box box;
rbuffer->b.b.next = NULL;
pipe_reference_init(&rbuffer->b.b.reference, 1);
rbuffer->b.b.screen = screen;
+
rbuffer->b.vtbl = &r600_buffer_vtbl;
+ threaded_resource_init(&rbuffer->b.b);
+
rbuffer->buf = NULL;
rbuffer->bind_history = 0;
rbuffer->TC_L2_dirty = false;
- rbuffer->is_shared = false;
util_range_init(&rbuffer->valid_buffer_range);
return rbuffer;
}
-struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
- const struct pipe_resource *templ,
- unsigned alignment)
+static struct pipe_resource *si_buffer_create(struct pipe_screen *screen,
+ const struct pipe_resource *templ,
+ unsigned alignment)
{
- struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
+ struct si_screen *sscreen = (struct si_screen*)screen;
struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
- r600_init_resource_fields(rscreen, rbuffer, templ->width0, alignment);
+ si_init_resource_fields(sscreen, rbuffer, templ->width0, alignment);
- if (templ->bind & PIPE_BIND_SHARED)
- rbuffer->flags |= RADEON_FLAG_HANDLE;
if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
rbuffer->flags |= RADEON_FLAG_SPARSE;
- if (!r600_alloc_resource(rscreen, rbuffer)) {
+ if (!si_alloc_resource(sscreen, rbuffer)) {
FREE(rbuffer);
return NULL;
}
return &rbuffer->b.b;
}
-struct pipe_resource *r600_aligned_buffer_create(struct pipe_screen *screen,
- unsigned flags,
- unsigned usage,
- unsigned size,
- unsigned alignment)
+struct pipe_resource *si_aligned_buffer_create(struct pipe_screen *screen,
+ unsigned flags,
+ unsigned usage,
+ unsigned size,
+ unsigned alignment)
{
struct pipe_resource buffer;
buffer.height0 = 1;
buffer.depth0 = 1;
buffer.array_size = 1;
- return r600_buffer_create(screen, &buffer, alignment);
+ return si_buffer_create(screen, &buffer, alignment);
}
-struct pipe_resource *
-r600_buffer_from_user_memory(struct pipe_screen *screen,
- const struct pipe_resource *templ,
- void *user_memory)
+static struct pipe_resource *
+si_buffer_from_user_memory(struct pipe_screen *screen,
+ const struct pipe_resource *templ,
+ void *user_memory)
{
- struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
- struct radeon_winsys *ws = rscreen->ws;
+ struct si_screen *sscreen = (struct si_screen*)screen;
+ struct radeon_winsys *ws = sscreen->ws;
struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
rbuffer->domains = RADEON_DOMAIN_GTT;
rbuffer->flags = 0;
+ rbuffer->b.is_user_ptr = true;
util_range_add(&rbuffer->valid_buffer_range, 0, templ->width0);
+ util_range_add(&rbuffer->b.valid_buffer_range, 0, templ->width0);
/* Convert a user pointer to a buffer. */
rbuffer->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0);
return NULL;
}
- if (rscreen->info.has_virtual_memory)
+ if (sscreen->info.has_virtual_memory)
rbuffer->gpu_address =
ws->buffer_get_virtual_address(rbuffer->buf);
else
return &rbuffer->b.b;
}
+
+static struct pipe_resource *si_resource_create(struct pipe_screen *screen,
+ const struct pipe_resource *templ)
+{
+ if (templ->target == PIPE_BUFFER) {
+ return si_buffer_create(screen, templ, 256);
+ } else {
+ return si_texture_create(screen, templ);
+ }
+}
+
+void si_init_screen_buffer_functions(struct si_screen *sscreen)
+{
+ sscreen->b.resource_create = si_resource_create;
+ sscreen->b.resource_destroy = u_resource_destroy_vtbl;
+ sscreen->b.resource_from_user_memory = si_buffer_from_user_memory;
+}
+
+void si_init_buffer_functions(struct si_context *sctx)
+{
+ sctx->b.b.invalidate_resource = si_invalidate_resource;
+ sctx->b.b.transfer_map = u_transfer_map_vtbl;
+ sctx->b.b.transfer_flush_region = u_transfer_flush_region_vtbl;
+ sctx->b.b.transfer_unmap = u_transfer_unmap_vtbl;
+ sctx->b.b.texture_subdata = u_default_texture_subdata;
+ sctx->b.b.buffer_subdata = si_buffer_subdata;
+}