bo->size = open_arg.size;
bo->offset64 = 0;
- bo->virtual = NULL;
bo->bufmgr = bufmgr;
bo->gem_handle = open_arg.handle;
bo->name = name;
}
}
-int
+void *
brw_bo_map(struct brw_context *brw, struct brw_bo *bo, int write_enable)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
- int ret;
pthread_mutex_lock(&bufmgr->lock);
memclear(mmap_arg);
mmap_arg.handle = bo->gem_handle;
mmap_arg.size = bo->size;
- ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+ int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
if (ret != 0) {
ret = -errno;
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
pthread_mutex_unlock(&bufmgr->lock);
- return ret;
+ return NULL;
}
bo->map_count++;
VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
bo->mem_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
}
DBG("bo_map: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->mem_virtual);
- bo->virtual = bo->mem_virtual;
set_domain(brw, "CPU mapping", bo, I915_GEM_DOMAIN_CPU,
write_enable ? I915_GEM_DOMAIN_CPU : 0);
VG(VALGRIND_MAKE_MEM_DEFINED(bo->mem_virtual, bo->size));
pthread_mutex_unlock(&bufmgr->lock);
- return 0;
+ return bo->mem_virtual;
}
-static int
+static void *
map_gtt(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
- int ret;
/* Get a mapping of the buffer if we haven't before. */
if (bo->gtt_virtual == NULL) {
mmap_arg.handle = bo->gem_handle;
/* Get the fake offset back... */
- ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
+ int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
if (ret != 0) {
- ret = -errno;
DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
- return ret;
+ return NULL;
}
/* and mmap it */
MAP_SHARED, bufmgr->fd, mmap_arg.offset);
if (bo->gtt_virtual == MAP_FAILED) {
bo->gtt_virtual = NULL;
- ret = -errno;
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
- return ret;
+ return NULL;
}
}
- bo->map_count++;
- bo->virtual = bo->gtt_virtual;
-
DBG("bo_map_gtt: %d (%s) -> %p\n", bo->gem_handle, bo->name,
bo->gtt_virtual);
- return 0;
+ bo->map_count++;
+ return bo->gtt_virtual;
}
-int
+void *
brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
- int ret;
pthread_mutex_lock(&bufmgr->lock);
- ret = map_gtt(bo);
- if (ret) {
+ void *map = map_gtt(bo);
+ if (map == NULL) {
pthread_mutex_unlock(&bufmgr->lock);
- return ret;
+ return NULL;
}
/* Now move it to the GTT domain so that the GPU and CPU
VG(VALGRIND_MAKE_MEM_DEFINED(bo->gtt_virtual, bo->size));
pthread_mutex_unlock(&bufmgr->lock);
- return 0;
+ return map;
}
/**
* undefined).
*/
-int
+void *
brw_bo_map_unsynchronized(struct brw_context *brw, struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
- int ret;
/* If the CPU cache isn't coherent with the GTT, then use a
* regular synchronized mapping. The problem is that we don't
pthread_mutex_lock(&bufmgr->lock);
- ret = map_gtt(bo);
- if (ret == 0) {
+ void *map = map_gtt(bo);
+ if (map != NULL) {
bo_mark_mmaps_incoherent(bo);
VG(VALGRIND_MAKE_MEM_DEFINED(bo->gtt_virtual, bo->size));
}
pthread_mutex_unlock(&bufmgr->lock);
- return ret;
+ return map;
}
int
if (--bo->map_count == 0) {
bo_mark_mmaps_incoherent(bo);
- bo->virtual = NULL;
}
pthread_mutex_unlock(&bufmgr->lock);
*/
uint64_t align;
- /**
- * Virtual address for accessing the buffer data. Only valid while
- * mapped.
- */
-#ifdef __cplusplus
- void *virt;
-#else
- void *virtual;
-#endif
-
/** Buffer manager context associated with this buffer object */
struct brw_bufmgr *bufmgr;
* Maps the buffer into userspace.
*
* This function will block waiting for any existing execution on the
- * buffer to complete, first. The resulting mapping is available at
- * buf->virtual.
+ * buffer to complete, first. The resulting mapping is returned.
*/
-int brw_bo_map(struct brw_context *brw, struct brw_bo *bo, int write_enable);
+MUST_CHECK void *brw_bo_map(struct brw_context *brw, struct brw_bo *bo, int write_enable);
/**
* Reduces the refcount on the userspace mapping of the buffer
const char *name,
unsigned int handle);
void brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr);
-int brw_bo_map_unsynchronized(struct brw_context *brw, struct brw_bo *bo);
-int brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo);
+MUST_CHECK void *brw_bo_map_unsynchronized(struct brw_context *brw, struct brw_bo *bo);
+MUST_CHECK void *brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo);
int brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns);
struct brw_cache_item **items;
struct brw_bo *bo;
+ void *map;
GLuint size, n_items;
uint32_t next_offset;
struct {
struct brw_bo *bo;
+ void *map;
uint32_t next_offset;
} upload;
if (!read_oa_samples(brw))
goto error;
- brw_bo_map(brw, obj->oa.bo, false);
- query_buffer = obj->oa.bo->virtual;
+ query_buffer = brw_bo_map(brw, obj->oa.bo, false);
start = last = query_buffer;
end = query_buffer + (MI_RPC_BO_END_OFFSET_BYTES / sizeof(uint32_t));
MI_RPC_BO_SIZE, 64);
#ifdef DEBUG
/* Pre-filling the BO helps debug whether writes landed. */
- brw_bo_map(brw, obj->oa.bo, true);
- memset((char *) obj->oa.bo->virtual, 0x80, MI_RPC_BO_SIZE);
+ void *map = brw_bo_map(brw, obj->oa.bo, true);
+ memset(map, 0x80, MI_RPC_BO_SIZE);
brw_bo_unmap(obj->oa.bo);
#endif
int n_counters = obj->query->n_counters;
uint8_t *p = data;
- brw_bo_map(brw, obj->pipeline_stats.bo, false);
- uint64_t *start = obj->pipeline_stats.bo->virtual;
+ uint64_t *start = brw_bo_map(brw, obj->pipeline_stats.bo, false);
uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
for (int i = 0; i < n_counters; i++) {
* delaying reading the reports, but it doesn't look like it's a big
* overhead compared to the cost of tracking the time in the first place.
*/
- brw_bo_map(brw, brw->shader_time.bo, true);
- void *bo_map = brw->shader_time.bo->virtual;
+ void *bo_map = brw_bo_map(brw, brw->shader_time.bo, true);
for (int i = 0; i < brw->shader_time.num_entries; i++) {
uint32_t *times = bo_map + i * 3 * BRW_SHADER_TIME_STRIDE;
{
struct brw_context *brw = cache->brw;
struct brw_bo *new_bo;
+ void *llc_map;
new_bo = brw_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
if (can_do_exec_capture(brw->screen))
new_bo->kflags = EXEC_OBJECT_CAPTURE;
if (brw->has_llc)
- brw_bo_map_unsynchronized(brw, new_bo);
+ llc_map = brw_bo_map_unsynchronized(brw, new_bo);
/* Copy any existing data that needs to be saved. */
if (cache->next_offset != 0) {
if (brw->has_llc) {
- memcpy(new_bo->virtual, cache->bo->virtual, cache->next_offset);
+ memcpy(llc_map, cache->map, cache->next_offset);
} else {
- brw_bo_map(brw, cache->bo, false);
- brw_bo_subdata(new_bo, 0, cache->next_offset,
- cache->bo->virtual);
+ void *map = brw_bo_map(brw, cache->bo, false);
+ brw_bo_subdata(new_bo, 0, cache->next_offset, map);
brw_bo_unmap(cache->bo);
}
}
brw_bo_unmap(cache->bo);
brw_bo_unreference(cache->bo);
cache->bo = new_bo;
+ cache->map = brw->has_llc ? llc_map : NULL;
cache->bo_used_by_gpu = false;
/* Since we have a new BO in place, we need to signal the units
if (item->cache_id != cache_id || item->size != data_size)
continue;
+ void *map;
if (!brw->has_llc)
- brw_bo_map(brw, cache->bo, false);
- ret = memcmp(cache->bo->virtual + item->offset, data, item->size);
+ map = brw_bo_map(brw, cache->bo, false);
+ else
+ map = cache->map;
+
+ ret = memcmp(map + item->offset, data, item->size);
if (!brw->has_llc)
brw_bo_unmap(cache->bo);
if (ret)
/* Copy data to the buffer */
if (brw->has_llc) {
- memcpy((char *)cache->bo->virtual + item->offset, data, data_size);
+ memcpy(cache->map + item->offset, data, data_size);
} else {
brw_bo_subdata(cache->bo, item->offset, data_size, data);
}
if (can_do_exec_capture(brw->screen))
cache->bo->kflags = EXEC_OBJECT_CAPTURE;
if (brw->has_llc)
- brw_bo_map_unsynchronized(brw, cache->bo);
+ cache->map = brw_bo_map_unsynchronized(brw, cache->bo);
}
static void
brw_bo_unmap(cache->bo);
brw_bo_unreference(cache->bo);
cache->bo = NULL;
+ cache->map = NULL;
}
brw_clear_cache(brw, cache);
free(cache->items);
{
const struct brw_cache *cache = &brw->cache;
struct brw_cache_item *item;
+ void *map;
if (!brw->has_llc)
- brw_bo_map(brw, cache->bo, false);
+ map = brw_bo_map(brw, cache->bo, false);
+ else
+ map = cache->map;
for (unsigned i = 0; i < cache->size; i++) {
for (item = cache->items[i]; item; item = item->next) {
fprintf(stderr, "%s:\n", cache_name(i));
- brw_disassemble(&brw->screen->devinfo, cache->bo->virtual,
+ brw_disassemble(&brw->screen->devinfo, map,
item->offset, item->size, stderr);
}
}
}
}
- brw_bo_map(brw, query->bo, false);
- results = query->bo->virtual;
+ results = brw_bo_map(brw, query->bo, false);
switch (query->Base.Target) {
case GL_TIME_ELAPSED_EXT:
/* The query BO contains the starting and ending timestamps.
if (query->bo == NULL)
return;
- brw_bo_map(brw, query->bo, false);
- uint64_t *results = query->bo->virtual;
+ uint64_t *results = brw_bo_map(brw, query->bo, false);
switch (query->Base.Target) {
case GL_TIME_ELAPSED:
/* The query BO contains the starting and ending timestamps.
if (unlikely(brw->perf_debug && brw_bo_busy(obj->prim_count_bo)))
perf_debug("Stalling for # of transform feedback primitives written.\n");
- brw_bo_map(brw, obj->prim_count_bo, false);
- uint64_t *prim_counts = obj->prim_count_bo->virtual;
+ uint64_t *prim_counts = brw_bo_map(brw, obj->prim_count_bo, false);
assert(obj->prim_count_buffer_index % (2 * streams) == 0);
int pairs = obj->prim_count_buffer_index / (2 * streams);
batch->bo = brw_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
if (has_llc) {
- brw_bo_map(NULL, batch->bo, true);
- batch->map = batch->bo->virtual;
+ batch->map = brw_bo_map(NULL, batch->bo, true);
}
batch->map_next = batch->map;
if (batch->ring != RENDER_RING)
return;
- int ret = brw_bo_map(brw, batch->bo, false);
- if (ret != 0) {
+ void *map = brw_bo_map(brw, batch->bo, false);
+ if (map == NULL) {
fprintf(stderr,
- "WARNING: failed to map batchbuffer (%s), "
- "dumping uploaded data instead.\n", strerror(ret));
+ "WARNING: failed to map batchbuffer, "
+ "dumping uploaded data instead.\n");
}
- uint32_t *data = batch->bo->virtual ? batch->bo->virtual : batch->map;
+ uint32_t *data = map ? map : batch->map;
uint32_t *end = data + USED_BATCH(*batch);
- uint32_t gtt_offset = batch->bo->virtual ? batch->bo->offset64 : 0;
+ uint32_t gtt_offset = map ? batch->bo->offset64 : 0;
int length;
bool color = INTEL_DEBUG & DEBUG_COLOR;
}
}
- if (ret == 0) {
+ if (map != NULL) {
brw_bo_unmap(batch->bo);
}
}
if (offset + size <= intel_obj->gpu_active_start ||
intel_obj->gpu_active_end <= offset) {
if (brw->has_llc) {
- brw_bo_map_unsynchronized(brw, intel_obj->buffer);
- memcpy(intel_obj->buffer->virtual + offset, data, size);
+ void *map = brw_bo_map_unsynchronized(brw, intel_obj->buffer);
+ memcpy(map + offset, data, size);
brw_bo_unmap(intel_obj->buffer);
if (intel_obj->gpu_active_end > intel_obj->gpu_active_start)
length +
intel_obj->map_extra[index],
alignment);
+ void *map;
if (brw->has_llc) {
- brw_bo_map(brw, intel_obj->range_map_bo[index],
- (access & GL_MAP_WRITE_BIT) != 0);
+ map = brw_bo_map(brw, intel_obj->range_map_bo[index],
+ (access & GL_MAP_WRITE_BIT) != 0);
} else {
- brw_bo_map_gtt(brw, intel_obj->range_map_bo[index]);
+ map = brw_bo_map_gtt(brw, intel_obj->range_map_bo[index]);
}
- obj->Mappings[index].Pointer =
- intel_obj->range_map_bo[index]->virtual + intel_obj->map_extra[index];
+ obj->Mappings[index].Pointer = map + intel_obj->map_extra[index];
return obj->Mappings[index].Pointer;
}
+ void *map;
if (access & GL_MAP_UNSYNCHRONIZED_BIT) {
if (!brw->has_llc && brw->perf_debug &&
brw_bo_busy(intel_obj->buffer)) {
perf_debug("MapBufferRange with GL_MAP_UNSYNCHRONIZED_BIT stalling (it's actually synchronized on non-LLC platforms)\n");
}
- brw_bo_map_unsynchronized(brw, intel_obj->buffer);
+ map = brw_bo_map_unsynchronized(brw, intel_obj->buffer);
} else if (!brw->has_llc && (!(access & GL_MAP_READ_BIT) ||
(access & GL_MAP_PERSISTENT_BIT))) {
- brw_bo_map_gtt(brw, intel_obj->buffer);
+ map = brw_bo_map_gtt(brw, intel_obj->buffer);
mark_buffer_inactive(intel_obj);
} else {
- brw_bo_map(brw, intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
+ map = brw_bo_map(brw, intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
mark_buffer_inactive(intel_obj);
}
- obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset;
+ obj->Mappings[index].Pointer = map + offset;
return obj->Mappings[index].Pointer;
}
*
* Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
*/
- const int ret = brw_bo_map_gtt(brw, mt->mcs_buf->bo);
- if (unlikely(ret)) {
+ void *map = brw_bo_map_gtt(brw, mt->mcs_buf->bo);
+ if (unlikely(map == NULL)) {
fprintf(stderr, "Failed to map mcs buffer into GTT\n");
brw_bo_unreference(mt->mcs_buf->bo);
free(mt->mcs_buf);
return;
}
- void *data = mt->mcs_buf->bo->virtual;
+ void *data = map;
memset(data, init_value, mt->mcs_buf->size);
brw_bo_unmap(mt->mcs_buf->bo);
}
* long as cache consistency is maintained).
*/
if (mt->tiling != I915_TILING_NONE || mt->is_scanout)
- brw_bo_map_gtt(brw, bo);
+ return brw_bo_map_gtt(brw, bo);
else
- brw_bo_map(brw, bo, true);
-
- return bo->virtual;
+ return brw_bo_map(brw, bo, true);
}
static void
/* The miptree's buffer. */
struct brw_bo *bo;
- int error = 0;
-
uint32_t cpp;
mem_copy_fn mem_copy = NULL;
intel_batchbuffer_flush(brw);
}
- error = brw_bo_map(brw, bo, false /* write enable */);
- if (error) {
+ void *map = brw_bo_map(brw, bo, false /* write enable */);
+ if (map == NULL) {
DBG("%s: failed to map bo\n", __func__);
return false;
}
xoffset * cpp, (xoffset + width) * cpp,
yoffset, yoffset + height,
pixels - (ptrdiff_t) yoffset * dst_pitch - (ptrdiff_t) xoffset * cpp,
- bo->virtual + irb->mt->offset,
+ map + irb->mt->offset,
dst_pitch, irb->mt->pitch,
brw->has_swizzling,
irb->mt->tiling,
struct brw_bo *results, *bo;
uint32_t *batch;
uint32_t offset = 0;
+ void *map;
bool success = false;
/* Create a zero'ed temporary buffer for reading our results */
if (bo == NULL)
goto err_results;
- if (brw_bo_map(NULL, bo, 1))
+ map = brw_bo_map(NULL, bo, 1);
+ if (!map)
goto err_batch;
- batch = bo->virtual;
+ batch = map;
/* Write the register. */
*batch++ = MI_LOAD_REGISTER_IMM | (3 - 2);
*batch++ = MI_STORE_REGISTER_MEM | (3 - 2);
*batch++ = reg;
struct drm_i915_gem_relocation_entry reloc = {
- .offset = (char *) batch - (char *) bo->virtual,
+ .offset = (char *) batch - (char *) map,
.delta = offset * sizeof(uint32_t),
.target_handle = results->gem_handle,
.read_domains = I915_GEM_DOMAIN_INSTRUCTION,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = (uintptr_t) exec_objects,
.buffer_count = 2,
- .batch_len = ALIGN((char *) batch - (char *) bo->virtual, 8),
+ .batch_len = ALIGN((char *) batch - (char *) map, 8),
.flags = I915_EXEC_RENDER,
};
drmIoctl(dri_screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
/* Check whether the value got written. */
- if (brw_bo_map(NULL, results, false) == 0) {
- success = *((uint32_t *)results->virtual + offset) == expected_value;
+ void *results_map = brw_bo_map(NULL, results, false);
+ if (results_map) {
+ success = *((uint32_t *)results_map + offset) == expected_value;
brw_bo_unmap(results);
}
/* The miptree's buffer. */
struct brw_bo *bo;
- int error = 0;
-
uint32_t cpp;
mem_copy_fn mem_copy = NULL;
intel_batchbuffer_flush(brw);
}
- error = brw_bo_map(brw, bo, false /* write enable */);
- if (error) {
+ void *map = brw_bo_map(brw, bo, false /* write enable */);
+ if (map == NULL) {
DBG("%s: failed to map bo\n", __func__);
return false;
}
xoffset * cpp, (xoffset + width) * cpp,
yoffset, yoffset + height,
pixels - (ptrdiff_t) yoffset * dst_pitch - (ptrdiff_t) xoffset * cpp,
- bo->virtual,
+ map,
dst_pitch, image->mt->pitch,
brw->has_swizzling,
image->mt->tiling,
/* The miptree's buffer. */
struct brw_bo *bo;
- int error = 0;
-
uint32_t cpp;
mem_copy_fn mem_copy = NULL;
intel_batchbuffer_flush(brw);
}
- error = brw_bo_map(brw, bo, true /* write enable */);
- if (error || bo->virtual == NULL) {
+ void *map = brw_bo_map(brw, bo, true /* write enable */);
+ if (map == NULL) {
DBG("%s: failed to map bo\n", __func__);
return false;
}
linear_to_tiled(
xoffset * cpp, (xoffset + width) * cpp,
yoffset, yoffset + height,
- bo->virtual,
+ map,
pixels - (ptrdiff_t) yoffset * src_pitch - (ptrdiff_t) xoffset * cpp,
image->mt->pitch, src_pitch,
brw->has_swizzling,
void
intel_upload_finish(struct brw_context *brw)
{
+ assert((brw->upload.bo == NULL) == (brw->upload.map == NULL));
if (!brw->upload.bo)
return;
brw_bo_unmap(brw->upload.bo);
brw_bo_unreference(brw->upload.bo);
brw->upload.bo = NULL;
+ brw->upload.map = NULL;
brw->upload.next_offset = 0;
}
offset = 0;
}
+ assert((brw->upload.bo == NULL) == (brw->upload.map == NULL));
if (!brw->upload.bo) {
brw->upload.bo = brw_bo_alloc(brw->bufmgr, "streamed data",
MAX2(INTEL_UPLOAD_SIZE, size), 4096);
if (brw->has_llc)
- brw_bo_map(brw, brw->upload.bo, true);
+ brw->upload.map = brw_bo_map(brw, brw->upload.bo, true);
else
- brw_bo_map_gtt(brw, brw->upload.bo);
+ brw->upload.map = brw_bo_map_gtt(brw, brw->upload.bo);
}
brw->upload.next_offset = offset + size;
brw_bo_reference(brw->upload.bo);
}
- return brw->upload.bo->virtual + offset;
+ return brw->upload.map + offset;
}
/**