* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Marek Olšák
*/
#include "r600_cs.h"
RADEON_FLAG_GTT_WC;
}
- /* Only displayable single-sample textures can be shared between
- * processes. */
- if (!(res->b.b.bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT)) &&
- (res->b.b.target == PIPE_BUFFER ||
- res->b.b.nr_samples >= 2 ||
- rtex->surface.micro_tile_mode != RADEON_MICRO_MODE_DISPLAY))
+ /* Displayable and shareable surfaces are not suballocated. */
+ if (res->b.b.bind & (PIPE_BIND_SHARED | PIPE_BIND_SCANOUT))
+ res->flags |= RADEON_FLAG_NO_SUBALLOC; /* shareable */
+ else
res->flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
/* If VRAM is just stolen system memory, allow both VRAM and
res->flags &= ~RADEON_FLAG_NO_CPU_ACCESS; /* disallowed with VRAM_GTT */
}
- if (rscreen->debug_flags & DBG_NO_WC)
+ if (rscreen->debug_flags & DBG(NO_WC))
res->flags &= ~RADEON_FLAG_GTT_WC;
- if (res->b.b.bind & PIPE_BIND_SHARED)
- res->flags |= RADEON_FLAG_NO_SUBALLOC;
-
/* Set expected VRAM and GART usage for the buffer. */
res->vram_usage = 0;
res->gart_usage = 0;
- if (res->domains & RADEON_DOMAIN_VRAM)
+ if (res->domains & RADEON_DOMAIN_VRAM) {
res->vram_usage = size;
- else if (res->domains & RADEON_DOMAIN_GTT)
+
+ res->max_forced_staging_uploads =
+ res->b.max_forced_staging_uploads =
+ rscreen->info.has_dedicated_vram &&
+ size >= rscreen->info.vram_vis_size / 4 ? 1 : 0;
+ } else if (res->domains & RADEON_DOMAIN_GTT) {
res->gart_usage = size;
+ }
}
bool si_alloc_resource(struct r600_common_screen *rscreen,
res->TC_L2_dirty = false;
/* Print debug information. */
- if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
+ if (rscreen->debug_flags & DBG(VM) && res->b.b.target == PIPE_BUFFER) {
fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Buffer %"PRIu64" bytes\n",
res->gpu_address, res->gpu_address + res->buf->size,
res->buf->size);
pb_reference(&rdst->buf, rsrc->buf);
rdst->gpu_address = rsrc->gpu_address;
rdst->b.b.bind = rsrc->b.b.bind;
+ rdst->b.max_forced_staging_uploads = rsrc->b.max_forced_staging_uploads;
+ rdst->max_forced_staging_uploads = rsrc->max_forced_staging_uploads;
rdst->flags = rsrc->flags;
assert(rdst->vram_usage == rsrc->vram_usage);
return data;
}
-static bool r600_can_dma_copy_buffer(struct r600_common_context *rctx,
- unsigned dstx, unsigned srcx, unsigned size)
-{
- bool dword_aligned = !(dstx % 4) && !(srcx % 4) && !(size % 4);
-
- return rctx->screen->has_cp_dma ||
- (dword_aligned && (rctx->dma.cs ||
- rctx->screen->has_streamout));
-
-}
-
static void *r600_buffer_transfer_map(struct pipe_context *ctx,
struct pipe_resource *resource,
unsigned level,
struct pipe_transfer **ptransfer)
{
struct r600_common_context *rctx = (struct r600_common_context*)ctx;
- struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen;
struct r600_resource *rbuffer = r600_resource(resource);
uint8_t *data;
usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
}
+ /* If a buffer in VRAM is too large and the range is discarded, don't
+ * map it directly. This makes sure that the buffer stays in VRAM.
+ */
+ bool force_discard_range = false;
+ if (usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
+ PIPE_TRANSFER_DISCARD_RANGE) &&
+ !(usage & PIPE_TRANSFER_PERSISTENT) &&
+ /* Try not to decrement the counter if it's not positive. Still racy,
+ * but it makes it harder to wrap the counter from INT_MIN to INT_MAX. */
+ rbuffer->max_forced_staging_uploads > 0 &&
+ p_atomic_dec_return(&rbuffer->max_forced_staging_uploads) >= 0) {
+ usage &= ~(PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
+ PIPE_TRANSFER_UNSYNCHRONIZED);
+ usage |= PIPE_TRANSFER_DISCARD_RANGE;
+ force_discard_range = true;
+ }
+
if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
TC_TRANSFER_MAP_NO_INVALIDATE))) {
}
if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
- !(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&
((!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
- PIPE_TRANSFER_PERSISTENT)) &&
- r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) ||
+ PIPE_TRANSFER_PERSISTENT))) ||
(rbuffer->flags & RADEON_FLAG_SPARSE))) {
assert(usage & PIPE_TRANSFER_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU.
*/
if (rbuffer->flags & RADEON_FLAG_SPARSE ||
+ force_discard_range ||
si_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
/* Do a wait-free write-only transfer using a temporary buffer. */
else if (((usage & PIPE_TRANSFER_READ) &&
!(usage & PIPE_TRANSFER_PERSISTENT) &&
(rbuffer->domains & RADEON_DOMAIN_VRAM ||
- rbuffer->flags & RADEON_FLAG_GTT_WC) &&
- r600_can_dma_copy_buffer(rctx, 0, box->x, box->width)) ||
+ rbuffer->flags & RADEON_FLAG_GTT_WC)) ||
(rbuffer->flags & RADEON_FLAG_SPARSE)) {
struct r600_resource *staging;