const struct pipe_box *box)
{
struct i915_context *i915 = i915_context(pipe);
- struct pipe_transfer *transfer;
+ struct pipe_transfer *transfer = util_slab_alloc(&i915->transfer_pool);
- if (usage & PIPE_TRANSFER_MAP_PERMANENTLY) {
- return NULL;
- }
-
- transfer = util_slab_alloc(&i915->transfer_pool);
if (transfer == NULL)
return NULL;
{
struct i915_context *i915 = i915_context(pipe);
struct i915_texture *tex = i915_texture(resource);
- struct i915_transfer *transfer;
+ struct i915_transfer *transfer = util_slab_alloc(&i915->texture_transfer_pool);
boolean use_staging_texture = FALSE;
- if (usage & PIPE_TRANSFER_MAP_PERMANENTLY) {
- return NULL;
- }
-
- transfer = util_slab_alloc(&i915->texture_transfer_pool);
if (transfer == NULL)
return NULL;
assert(resource);
assert(level <= resource->last_level);
- if (usage & PIPE_TRANSFER_MAP_PERMANENTLY) {
- return NULL;
- }
-
/*
* Transfers, like other pipe operations, must happen in order, so flush the
* context if necessary.
{
struct nv04_resource *buf = nv04_resource(resource);
struct nouveau_context *nv = nouveau_context(pipe);
- struct nouveau_transfer *xfr;
-
- if (usage & PIPE_TRANSFER_MAP_PERMANENTLY) {
- return NULL;
- }
-
- xfr = CALLOC_STRUCT(nouveau_transfer);
+ struct nouveau_transfer *xfr = CALLOC_STRUCT(nouveau_transfer);
if (!xfr)
return NULL;
uint32_t size;
int ret;
- if (usage & (PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_MAP_PERMANENTLY))
+ if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
return NULL;
tx = CALLOC_STRUCT(nv50_transfer);
uint32_t size;
int ret;
- if (usage & (PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_MAP_PERMANENTLY))
+ if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
return NULL;
tx = CALLOC_STRUCT(nvc0_transfer);
unsigned usage,
const struct pipe_box *box)
{
- if (usage & PIPE_TRANSFER_MAP_PERMANENTLY) {
- return NULL;
- }
if((usage & (PIPE_TRANSFER_UNSYNCHRONIZED | PIPE_TRANSFER_DONTBLOCK)) == PIPE_TRANSFER_DONTBLOCK)
{
struct nouveau_bo* bo = ((struct nvfx_resource*)pt)->bo;
struct pipe_resource base;
boolean referenced_cs, referenced_hw;
- if (usage & (PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_MAP_PERMANENTLY)) {
- return NULL;
- }
-
referenced_cs =
r300->rws->cs_is_buffer_referenced(r300->cs, tex->cs_buf);
if (referenced_cs) {
int r;
boolean use_staging_texture = FALSE;
- if (usage & PIPE_TRANSFER_MAP_PERMANENTLY) {
- return NULL;
- }
-
/* We cannot map a tiled texture directly because the data is
* in a different order, therefore we do detiling using a blit.
*
struct svga_buffer *sbuf = svga_buffer(resource);
struct pipe_transfer *transfer;
- if (usage & PIPE_TRANSFER_MAP_PERMANENTLY) {
- return NULL;
- }
-
transfer = CALLOC_STRUCT(pipe_transfer);
if (transfer == NULL) {
return NULL;
unsigned nblocksy = util_format_get_nblocksy(texture->format, box->height);
/* We can't map texture storage directly */
- if (usage & (PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_MAP_PERMANENTLY))
+ if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
return NULL;
assert(box->depth == 1);
*/
PIPE_TRANSFER_MAP_DIRECTLY = (1 << 2),
- /**
- * The transfer should map the resource storage directly and the GPU should
- * be able to see what the CPU has written. Such a storage may stay mapped
- * while issuing draw commands which use it. The only allowed usage is
- * non-overlapping writes which are suballocated out of a big buffer.
- * The minimum allowed alignment of suballocations is 256 bytes (this is
- * a subject to change).
- * The flag is intended to be used to avoid mapping and unmapping
- * resources repeatedly when doing uploads and draws intermixed.
- *
- * The driver may return NULL if that isn't possible, and the state
- * tracker needs to cope with that and use an alternative path
- * without this flag.
- */
- PIPE_TRANSFER_MAP_PERMANENTLY = (1 << 3),
-
/**
* Discards the memory within the mapped region.
*