Enable PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT and reorder caps a bit.
}
}
-static void
+static bool
tex_layout_init(struct tex_layout *layout,
struct pipe_screen *screen,
const struct pipe_resource *templ,
tex_layout_init_alignments(layout);
tex_layout_init_qpitch(layout);
+ if (templ->flags & PIPE_RESOURCE_FLAG_MAP_PERSISTENT) {
+ /* require on-the-fly tiling/untiling or format conversion */
+ if (layout->separate_stencil ||
+ layout->format == PIPE_FORMAT_S8_UINT ||
+ layout->format != templ->format)
+ return false;
+ }
+
if (slices) {
int lv;
for (lv = 0; lv <= templ->last_level; lv++)
layout->levels[lv].slices = slices[lv];
}
+
+ return true;
}
static void
tex->imported = (handle != NULL);
- tex_layout_init(&layout, screen, templ, tex->slices);
+ if (!tex_layout_init(&layout, screen, templ, tex->slices)) {
+ tex_destroy(tex);
+ return NULL;
+ }
switch (templ->target) {
case PIPE_TEXTURE_1D:
return ILO_MAX_SO_BINDINGS / ILO_MAX_SO_BUFFERS;
case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
return ILO_MAX_SO_BINDINGS;
- case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
- case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
- case PIPE_CAP_MAX_VERTEX_STREAMS:
- return 0;
case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
if (is->dev.gen >= ILO_GEN(7))
return is->dev.has_gen7_sol_reset;
case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
return true;
case PIPE_CAP_TGSI_VS_LAYER_VIEWPORT:
+ case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
+ case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
case PIPE_CAP_TEXTURE_GATHER_SM5:
+ return 0;
case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
+ return true;
case PIPE_CAP_FAKE_SW_MSAA:
case PIPE_CAP_TEXTURE_QUERY_LOD:
case PIPE_CAP_SAMPLE_SHADING:
case PIPE_CAP_TEXTURE_GATHER_OFFSETS:
case PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION:
+ case PIPE_CAP_MAX_VERTEX_STREAMS:
+ case PIPE_CAP_DRAW_INDIRECT:
return 0;
default:
* synchronization at all on mapping.
* - When PIPE_TRANSFER_MAP_DIRECTLY is set, no staging area is allowed.
* - When PIPE_TRANSFER_DONTBLOCK is set, we should fail if we have to block.
+ * - When PIPE_TRANSFER_PERSISTENT is set, GPU may access the buffer while it
+ * is mapped. Synchronization is done by defining memory barriers,
+ * explicitly via memory_barrier() or implicitly via
+ * transfer_flush_region(), as well as GPU fences.
+ * - When PIPE_TRANSFER_COHERENT is set, updates by either CPU or GPU should
+ * be made visible to the other side immediately. Since the kernel flushes
+ * GPU caches at the end of each batch buffer, CPU always sees GPU updates.
+ * We could use a coherent mapping to make all persistent mappings
+ * coherent.
*
* These also apply to textures, except that we may additionally need to do
* format conversion or tiling/untiling.
need_convert = false;
if (need_convert) {
- if (usage & PIPE_TRANSFER_MAP_DIRECTLY)
+ if (usage & (PIPE_TRANSFER_MAP_DIRECTLY | PIPE_TRANSFER_PERSISTENT))
return false;
*method = m;
m = ILO_TRANSFER_MAP_GTT; /* to have a linear view */
else if (is->dev.has_llc)
m = ILO_TRANSFER_MAP_CPU; /* fast and mostly coherent */
+ else if (usage & PIPE_TRANSFER_PERSISTENT)
+ m = ILO_TRANSFER_MAP_GTT; /* for coherency */
else if (usage & PIPE_TRANSFER_READ)
m = ILO_TRANSFER_MAP_CPU; /* gtt read is too slow */
else
PIPE_TRANSFER_DISCARD_RANGE |
PIPE_TRANSFER_FLUSH_EXPLICIT);
const unsigned reasons_against = (PIPE_TRANSFER_READ |
- PIPE_TRANSFER_MAP_DIRECTLY);
+ PIPE_TRANSFER_MAP_DIRECTLY |
+ PIPE_TRANSFER_PERSISTENT);
return (usage & can_writeback) && !(usage & reasons_against);
}