}
void *
-brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, int write_enable)
+brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
bo->map_cpu);
set_domain(brw, "CPU mapping", bo, I915_GEM_DOMAIN_CPU,
- write_enable ? I915_GEM_DOMAIN_CPU : 0);
+ flags & MAP_WRITE ? I915_GEM_DOMAIN_CPU : 0);
bo_mark_mmaps_incoherent(bo);
VG(VALGRIND_MAKE_MEM_DEFINED(bo->map_cpu, bo->size));
}
void *
-brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo)
+brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
* does reasonable things.
*/
if (!bufmgr->has_llc)
- return brw_bo_map_gtt(brw, bo);
+ return brw_bo_map_gtt(brw, bo, MAP_READ | MAP_WRITE);
pthread_mutex_lock(&bufmgr->lock);
*/
void brw_bo_unreference(struct brw_bo *bo);
+/* Must match MapBufferRange interface (for convenience) */
+#define MAP_READ GL_MAP_READ_BIT
+#define MAP_WRITE GL_MAP_WRITE_BIT
+#define MAP_ASYNC GL_MAP_UNSYNCHRONIZED_BIT
+#define MAP_PERSISTENT GL_MAP_PERSISTENT_BIT
+#define MAP_COHERENT GL_MAP_COHERENT_BIT
+/* internal */
+#define MAP_INTERNAL_MASK (0xff << 24)
+#define MAP_RAW (0x01 << 24)
+
/**
* Maps the buffer into userspace.
*
* This function will block waiting for any existing execution on the
* buffer to complete, first. The resulting mapping is returned.
*/
-MUST_CHECK void *brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, int write_enable);
+MUST_CHECK void *brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, unsigned flags);
/**
* Reduces the refcount on the userspace mapping of the buffer
unsigned int handle);
void brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr);
MUST_CHECK void *brw_bo_map_unsynchronized(struct brw_context *brw, struct brw_bo *bo);
-MUST_CHECK void *brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo);
+MUST_CHECK void *brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags);
int brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns);
if (!read_oa_samples(brw))
goto error;
- query_buffer = brw_bo_map_cpu(brw, obj->oa.bo, false);
+ query_buffer = brw_bo_map_cpu(brw, obj->oa.bo, MAP_READ);
start = last = query_buffer;
end = query_buffer + (MI_RPC_BO_END_OFFSET_BYTES / sizeof(uint32_t));
MI_RPC_BO_SIZE, 64);
#ifdef DEBUG
/* Pre-filling the BO helps debug whether writes landed. */
- void *map = brw_bo_map_cpu(brw, obj->oa.bo, true);
+ void *map = brw_bo_map_cpu(brw, obj->oa.bo, MAP_READ | MAP_WRITE);
memset(map, 0x80, MI_RPC_BO_SIZE);
brw_bo_unmap(obj->oa.bo);
#endif
int n_counters = obj->query->n_counters;
uint8_t *p = data;
- uint64_t *start = brw_bo_map_cpu(brw, obj->pipeline_stats.bo, false);
+ uint64_t *start = brw_bo_map_cpu(brw, obj->pipeline_stats.bo, MAP_READ);
uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
for (int i = 0; i < n_counters; i++) {
* delaying reading the reports, but it doesn't look like it's a big
* overhead compared to the cost of tracking the time in the first place.
*/
- void *bo_map = brw_bo_map_cpu(brw, brw->shader_time.bo, true);
+ void *bo_map = brw_bo_map_cpu(brw, brw->shader_time.bo, MAP_READ | MAP_WRITE);
for (int i = 0; i < brw->shader_time.num_entries; i++) {
uint32_t *times = bo_map + i * 3 * BRW_SHADER_TIME_STRIDE;
if (brw->has_llc) {
memcpy(llc_map, cache->map, cache->next_offset);
} else {
- void *map = brw_bo_map_cpu(brw, cache->bo, false);
+ void *map = brw_bo_map_cpu(brw, cache->bo, MAP_READ);
brw_bo_subdata(new_bo, 0, cache->next_offset, map);
brw_bo_unmap(cache->bo);
}
void *map;
if (!brw->has_llc)
- map = brw_bo_map_cpu(brw, cache->bo, false);
+ map = brw_bo_map_cpu(brw, cache->bo, MAP_READ);
else
map = cache->map;
void *map;
if (!brw->has_llc)
- map = brw_bo_map_cpu(brw, cache->bo, false);
+ map = brw_bo_map_cpu(brw, cache->bo, MAP_READ);
else
map = cache->map;
}
}
- results = brw_bo_map_cpu(brw, query->bo, false);
+ results = brw_bo_map_cpu(brw, query->bo, MAP_READ);
switch (query->Base.Target) {
case GL_TIME_ELAPSED_EXT:
/* The query BO contains the starting and ending timestamps.
if (query->bo == NULL)
return;
- uint64_t *results = brw_bo_map_cpu(brw, query->bo, false);
+ uint64_t *results = brw_bo_map_cpu(brw, query->bo, MAP_READ);
switch (query->Base.Target) {
case GL_TIME_ELAPSED:
/* The query BO contains the starting and ending timestamps.
if (unlikely(brw->perf_debug && brw_bo_busy(obj->prim_count_bo)))
perf_debug("Stalling for # of transform feedback primitives written.\n");
- uint64_t *prim_counts = brw_bo_map_cpu(brw, obj->prim_count_bo, false);
+ uint64_t *prim_counts = brw_bo_map_cpu(brw, obj->prim_count_bo, MAP_READ);
assert(obj->prim_count_buffer_index % (2 * streams) == 0);
int pairs = obj->prim_count_buffer_index / (2 * streams);
batch->bo = brw_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
if (has_llc) {
- batch->map = brw_bo_map_cpu(NULL, batch->bo, true);
+ batch->map = brw_bo_map_cpu(NULL, batch->bo, MAP_READ | MAP_WRITE);
}
batch->map_next = batch->map;
if (batch->ring != RENDER_RING)
return;
- void *map = brw_bo_map_cpu(brw, batch->bo, false);
+ void *map = brw_bo_map_cpu(brw, batch->bo, MAP_READ);
if (map == NULL) {
fprintf(stderr,
"WARNING: failed to map batchbuffer, "
assert(intel_obj);
+ STATIC_ASSERT(GL_MAP_UNSYNCHRONIZED_BIT == MAP_ASYNC);
+ STATIC_ASSERT(GL_MAP_WRITE_BIT == MAP_WRITE);
+ STATIC_ASSERT(GL_MAP_READ_BIT == MAP_READ);
+ STATIC_ASSERT(GL_MAP_PERSISTENT_BIT == MAP_PERSISTENT);
+ STATIC_ASSERT(GL_MAP_COHERENT_BIT == MAP_COHERENT);
+ assert((access & MAP_INTERNAL_MASK) == 0);
+
/* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
* internally uses our functions directly.
*/
alignment);
void *map;
if (brw->has_llc) {
- map = brw_bo_map_cpu(brw, intel_obj->range_map_bo[index],
- (access & GL_MAP_WRITE_BIT) != 0);
+ map = brw_bo_map_cpu(brw, intel_obj->range_map_bo[index], access);
} else {
- map = brw_bo_map_gtt(brw, intel_obj->range_map_bo[index]);
+ map = brw_bo_map_gtt(brw, intel_obj->range_map_bo[index], access);
}
obj->Mappings[index].Pointer = map + intel_obj->map_extra[index];
return obj->Mappings[index].Pointer;
map = brw_bo_map_unsynchronized(brw, intel_obj->buffer);
} else if (!brw->has_llc && (!(access & GL_MAP_READ_BIT) ||
(access & GL_MAP_PERSISTENT_BIT))) {
- map = brw_bo_map_gtt(brw, intel_obj->buffer);
+ map = brw_bo_map_gtt(brw, intel_obj->buffer, access);
mark_buffer_inactive(intel_obj);
} else {
- map = brw_bo_map_cpu(brw, intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
+ map = brw_bo_map_cpu(brw, intel_obj->buffer, access);
mark_buffer_inactive(intel_obj);
}
*
* Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
*/
- void *map = brw_bo_map_gtt(brw, mt->mcs_buf->bo);
+ void *map = brw_bo_map_gtt(brw, mt->mcs_buf->bo, MAP_READ | MAP_WRITE);
if (unlikely(map == NULL)) {
fprintf(stderr, "Failed to map mcs buffer into GTT\n");
brw_bo_unreference(mt->mcs_buf->bo);
* long as cache consistency is maintained).
*/
if (mt->tiling != I915_TILING_NONE || mt->is_scanout)
- return brw_bo_map_gtt(brw, bo);
+ return brw_bo_map_gtt(brw, bo, mode);
else
- return brw_bo_map_cpu(brw, bo, mode & GL_MAP_WRITE_BIT);
+ return brw_bo_map_cpu(brw, bo, mode);
}
static void
intel_batchbuffer_flush(brw);
}
- void *map = brw_bo_map_cpu(brw, bo, false /* write enable */);
+ void *map = brw_bo_map_cpu(brw, bo, MAP_READ);
if (map == NULL) {
DBG("%s: failed to map bo\n", __func__);
return false;
if (bo == NULL)
goto err_results;
- map = brw_bo_map_cpu(NULL, bo, 1);
+ map = brw_bo_map_cpu(NULL, bo, MAP_READ | MAP_WRITE);
if (!map)
goto err_batch;
drmIoctl(dri_screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
/* Check whether the value got written. */
- void *results_map = brw_bo_map_cpu(NULL, results, false);
+ void *results_map = brw_bo_map_cpu(NULL, results, MAP_READ);
if (results_map) {
success = *((uint32_t *)results_map + offset) == expected_value;
brw_bo_unmap(results);
intel_batchbuffer_flush(brw);
}
- void *map = brw_bo_map_cpu(brw, bo, false /* write enable */);
+ void *map = brw_bo_map_cpu(brw, bo, MAP_READ);
if (map == NULL) {
DBG("%s: failed to map bo\n", __func__);
return false;
intel_batchbuffer_flush(brw);
}
- void *map = brw_bo_map_cpu(brw, bo, true /* write enable */);
+ void *map = brw_bo_map_cpu(brw, bo, MAP_READ | MAP_WRITE);
if (map == NULL) {
DBG("%s: failed to map bo\n", __func__);
return false;
brw->upload.bo = brw_bo_alloc(brw->bufmgr, "streamed data",
MAX2(INTEL_UPLOAD_SIZE, size), 4096);
if (brw->has_llc)
- brw->upload.map = brw_bo_map_cpu(brw, brw->upload.bo, true);
+ brw->upload.map = brw_bo_map_cpu(brw, brw->upload.bo, MAP_READ | MAP_WRITE);
else
- brw->upload.map = brw_bo_map_gtt(brw, brw->upload.bo);
+ brw->upload.map = brw_bo_map_gtt(brw, brw->upload.bo, MAP_READ | MAP_WRITE);
}
brw->upload.next_offset = offset + size;