From 5dc35e1664f7858679424f2c8ab606e6c0d8b768 Mon Sep 17 00:00:00 2001 From: Matt Turner Date: Fri, 5 May 2017 11:20:05 -0700 Subject: [PATCH] i965: Remove brw_bo's virtual member Just return the map from brw_map_bo_* Reviewed-by: Kenneth Graunke --- src/mesa/drivers/dri/i965/brw_bufmgr.c | 49 +++++++------------ src/mesa/drivers/dri/i965/brw_bufmgr.h | 19 ++----- src/mesa/drivers/dri/i965/brw_context.h | 2 + .../drivers/dri/i965/brw_performance_query.c | 10 ++-- src/mesa/drivers/dri/i965/brw_program.c | 3 +- src/mesa/drivers/dri/i965/brw_program_cache.c | 31 +++++++----- src/mesa/drivers/dri/i965/brw_queryobj.c | 3 +- src/mesa/drivers/dri/i965/gen6_queryobj.c | 3 +- src/mesa/drivers/dri/i965/gen6_sol.c | 3 +- src/mesa/drivers/dri/i965/intel_batchbuffer.c | 17 +++---- .../drivers/dri/i965/intel_buffer_objects.c | 23 ++++----- src/mesa/drivers/dri/i965/intel_mipmap_tree.c | 12 ++--- src/mesa/drivers/dri/i965/intel_pixel_read.c | 8 ++- src/mesa/drivers/dri/i965/intel_screen.c | 15 +++--- src/mesa/drivers/dri/i965/intel_tex_image.c | 8 ++- .../drivers/dri/i965/intel_tex_subimage.c | 8 ++- src/mesa/drivers/dri/i965/intel_upload.c | 9 ++-- 17 files changed, 102 insertions(+), 121 deletions(-) diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.c b/src/mesa/drivers/dri/i965/brw_bufmgr.c index 6ea69787d20..9a65d32dd0a 100644 --- a/src/mesa/drivers/dri/i965/brw_bufmgr.c +++ b/src/mesa/drivers/dri/i965/brw_bufmgr.c @@ -468,7 +468,6 @@ brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr, bo->size = open_arg.size; bo->offset64 = 0; - bo->virtual = NULL; bo->bufmgr = bufmgr; bo->gem_handle = open_arg.handle; bo->name = name; @@ -658,11 +657,10 @@ set_domain(struct brw_context *brw, const char *action, } } -int +void * brw_bo_map(struct brw_context *brw, struct brw_bo *bo, int write_enable) { struct brw_bufmgr *bufmgr = bo->bufmgr; - int ret; pthread_mutex_lock(&bufmgr->lock); @@ -675,20 +673,19 @@ brw_bo_map(struct brw_context *brw, struct brw_bo *bo, int write_enable) memclear(mmap_arg); mmap_arg.handle = bo->gem_handle; mmap_arg.size = bo->size; - ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg); + int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg); if (ret != 0) { ret = -errno; DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno)); pthread_mutex_unlock(&bufmgr->lock); - return ret; + return NULL; } bo->map_count++; VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1)); bo->mem_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr; } DBG("bo_map: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->mem_virtual); - bo->virtual = bo->mem_virtual; set_domain(brw, "CPU mapping", bo, I915_GEM_DOMAIN_CPU, write_enable ? I915_GEM_DOMAIN_CPU : 0); @@ -697,14 +694,13 @@ brw_bo_map(struct brw_context *brw, struct brw_bo *bo, int write_enable) VG(VALGRIND_MAKE_MEM_DEFINED(bo->mem_virtual, bo->size)); pthread_mutex_unlock(&bufmgr->lock); - return 0; + return bo->mem_virtual; } -static int +static void * map_gtt(struct brw_bo *bo) { struct brw_bufmgr *bufmgr = bo->bufmgr; - int ret; /* Get a mapping of the buffer if we haven't before. */ if (bo->gtt_virtual == NULL) { @@ -717,12 +713,11 @@ map_gtt(struct brw_bo *bo) mmap_arg.handle = bo->gem_handle; /* Get the fake offset back... */ - ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg); + int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg); if (ret != 0) { - ret = -errno; DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n", __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno)); - return ret; + return NULL; } /* and mmap it */ @@ -730,34 +725,30 @@ map_gtt(struct brw_bo *bo) MAP_SHARED, bufmgr->fd, mmap_arg.offset); if (bo->gtt_virtual == MAP_FAILED) { bo->gtt_virtual = NULL; - ret = -errno; DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno)); - return ret; + return NULL; } } - bo->map_count++; - bo->virtual = bo->gtt_virtual; - DBG("bo_map_gtt: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->gtt_virtual); - return 0; + bo->map_count++; + return bo->gtt_virtual; } -int +void * brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo) { struct brw_bufmgr *bufmgr = bo->bufmgr; - int ret; pthread_mutex_lock(&bufmgr->lock); - ret = map_gtt(bo); - if (ret) { + void *map = map_gtt(bo); + if (map == NULL) { pthread_mutex_unlock(&bufmgr->lock); - return ret; + return NULL; } /* Now move it to the GTT domain so that the GPU and CPU @@ -776,7 +767,7 @@ brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo) VG(VALGRIND_MAKE_MEM_DEFINED(bo->gtt_virtual, bo->size)); pthread_mutex_unlock(&bufmgr->lock); - return 0; + return map; } /** @@ -793,11 +784,10 @@ brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo) * undefined). */ -int +void * brw_bo_map_unsynchronized(struct brw_context *brw, struct brw_bo *bo) { struct brw_bufmgr *bufmgr = bo->bufmgr; - int ret; /* If the CPU cache isn't coherent with the GTT, then use a * regular synchronized mapping. The problem is that we don't @@ -811,15 +801,15 @@ brw_bo_map_unsynchronized(struct brw_context *brw, struct brw_bo *bo) pthread_mutex_lock(&bufmgr->lock); - ret = map_gtt(bo); - if (ret == 0) { + void *map = map_gtt(bo); + if (map != NULL) { bo_mark_mmaps_incoherent(bo); VG(VALGRIND_MAKE_MEM_DEFINED(bo->gtt_virtual, bo->size)); } pthread_mutex_unlock(&bufmgr->lock); - return ret; + return map; } int @@ -841,7 +831,6 @@ brw_bo_unmap(struct brw_bo *bo) if (--bo->map_count == 0) { bo_mark_mmaps_incoherent(bo); - bo->virtual = NULL; } pthread_mutex_unlock(&bufmgr->lock); diff --git a/src/mesa/drivers/dri/i965/brw_bufmgr.h b/src/mesa/drivers/dri/i965/brw_bufmgr.h index d0b88fbcfda..1e3e8cff456 100644 --- a/src/mesa/drivers/dri/i965/brw_bufmgr.h +++ b/src/mesa/drivers/dri/i965/brw_bufmgr.h @@ -62,16 +62,6 @@ struct brw_bo { */ uint64_t align; - /** - * Virtual address for accessing the buffer data. Only valid while - * mapped. - */ -#ifdef __cplusplus - void *virt; -#else - void *virtual; -#endif - /** Buffer manager context associated with this buffer object */ struct brw_bufmgr *bufmgr; @@ -182,10 +172,9 @@ void brw_bo_unreference(struct brw_bo *bo); * Maps the buffer into userspace. * * This function will block waiting for any existing execution on the - * buffer to complete, first. The resulting mapping is available at - * buf->virtual. + * buffer to complete, first. The resulting mapping is returned. */ -int brw_bo_map(struct brw_context *brw, struct brw_bo *bo, int write_enable); +MUST_CHECK void *brw_bo_map(struct brw_context *brw, struct brw_bo *bo, int write_enable); /** * Reduces the refcount on the userspace mapping of the buffer @@ -258,8 +247,8 @@ struct brw_bo *brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr, const char *name, unsigned int handle); void brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr); -int brw_bo_map_unsynchronized(struct brw_context *brw, struct brw_bo *bo); -int brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo); +MUST_CHECK void *brw_bo_map_unsynchronized(struct brw_context *brw, struct brw_bo *bo); +MUST_CHECK void *brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo); int brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns); diff --git a/src/mesa/drivers/dri/i965/brw_context.h b/src/mesa/drivers/dri/i965/brw_context.h index c15abe1d48f..0081a318af6 100644 --- a/src/mesa/drivers/dri/i965/brw_context.h +++ b/src/mesa/drivers/dri/i965/brw_context.h @@ -368,6 +368,7 @@ struct brw_cache { struct brw_cache_item **items; struct brw_bo *bo; + void *map; GLuint size, n_items; uint32_t next_offset; @@ -672,6 +673,7 @@ struct brw_context struct { struct brw_bo *bo; + void *map; uint32_t next_offset; } upload; diff --git a/src/mesa/drivers/dri/i965/brw_performance_query.c b/src/mesa/drivers/dri/i965/brw_performance_query.c index d1c4b6514bc..1342ece8add 100644 --- a/src/mesa/drivers/dri/i965/brw_performance_query.c +++ b/src/mesa/drivers/dri/i965/brw_performance_query.c @@ -713,8 +713,7 @@ accumulate_oa_reports(struct brw_context *brw, if (!read_oa_samples(brw)) goto error; - brw_bo_map(brw, obj->oa.bo, false); - query_buffer = obj->oa.bo->virtual; + query_buffer = brw_bo_map(brw, obj->oa.bo, false); start = last = query_buffer; end = query_buffer + (MI_RPC_BO_END_OFFSET_BYTES / sizeof(uint32_t)); @@ -993,8 +992,8 @@ brw_begin_perf_query(struct gl_context *ctx, MI_RPC_BO_SIZE, 64); #ifdef DEBUG /* Pre-filling the BO helps debug whether writes landed. */ - brw_bo_map(brw, obj->oa.bo, true); - memset((char *) obj->oa.bo->virtual, 0x80, MI_RPC_BO_SIZE); + void *map = brw_bo_map(brw, obj->oa.bo, true); + memset(map, 0x80, MI_RPC_BO_SIZE); brw_bo_unmap(obj->oa.bo); #endif @@ -1215,8 +1214,7 @@ get_pipeline_stats_data(struct brw_context *brw, int n_counters = obj->query->n_counters; uint8_t *p = data; - brw_bo_map(brw, obj->pipeline_stats.bo, false); - uint64_t *start = obj->pipeline_stats.bo->virtual; + uint64_t *start = brw_bo_map(brw, obj->pipeline_stats.bo, false); uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t)); for (int i = 0; i < n_counters; i++) { diff --git a/src/mesa/drivers/dri/i965/brw_program.c b/src/mesa/drivers/dri/i965/brw_program.c index d26dce07f97..e5c36f13083 100644 --- a/src/mesa/drivers/dri/i965/brw_program.c +++ b/src/mesa/drivers/dri/i965/brw_program.c @@ -578,8 +578,7 @@ brw_collect_shader_time(struct brw_context *brw) * delaying reading the reports, but it doesn't look like it's a big * overhead compared to the cost of tracking the time in the first place. */ - brw_bo_map(brw, brw->shader_time.bo, true); - void *bo_map = brw->shader_time.bo->virtual; + void *bo_map = brw_bo_map(brw, brw->shader_time.bo, true); for (int i = 0; i < brw->shader_time.num_entries; i++) { uint32_t *times = bo_map + i * 3 * BRW_SHADER_TIME_STRIDE; diff --git a/src/mesa/drivers/dri/i965/brw_program_cache.c b/src/mesa/drivers/dri/i965/brw_program_cache.c index d3555b42391..9c209b8c455 100644 --- a/src/mesa/drivers/dri/i965/brw_program_cache.c +++ b/src/mesa/drivers/dri/i965/brw_program_cache.c @@ -214,21 +214,21 @@ brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size) { struct brw_context *brw = cache->brw; struct brw_bo *new_bo; + void *llc_map; new_bo = brw_bo_alloc(brw->bufmgr, "program cache", new_size, 64); if (can_do_exec_capture(brw->screen)) new_bo->kflags = EXEC_OBJECT_CAPTURE; if (brw->has_llc) - brw_bo_map_unsynchronized(brw, new_bo); + llc_map = brw_bo_map_unsynchronized(brw, new_bo); /* Copy any existing data that needs to be saved. */ if (cache->next_offset != 0) { if (brw->has_llc) { - memcpy(new_bo->virtual, cache->bo->virtual, cache->next_offset); + memcpy(llc_map, cache->map, cache->next_offset); } else { - brw_bo_map(brw, cache->bo, false); - brw_bo_subdata(new_bo, 0, cache->next_offset, - cache->bo->virtual); + void *map = brw_bo_map(brw, cache->bo, false); + brw_bo_subdata(new_bo, 0, cache->next_offset, map); brw_bo_unmap(cache->bo); } } @@ -237,6 +237,7 @@ brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size) brw_bo_unmap(cache->bo); brw_bo_unreference(cache->bo); cache->bo = new_bo; + cache->map = brw->has_llc ? llc_map : NULL; cache->bo_used_by_gpu = false; /* Since we have a new BO in place, we need to signal the units @@ -265,9 +266,13 @@ brw_lookup_prog(const struct brw_cache *cache, if (item->cache_id != cache_id || item->size != data_size) continue; + void *map; if (!brw->has_llc) - brw_bo_map(brw, cache->bo, false); - ret = memcmp(cache->bo->virtual + item->offset, data, item->size); + map = brw_bo_map(brw, cache->bo, false); + else + map = cache->map; + + ret = memcmp(map + item->offset, data, item->size); if (!brw->has_llc) brw_bo_unmap(cache->bo); if (ret) @@ -369,7 +374,7 @@ brw_upload_cache(struct brw_cache *cache, /* Copy data to the buffer */ if (brw->has_llc) { - memcpy((char *)cache->bo->virtual + item->offset, data, data_size); + memcpy(cache->map + item->offset, data, data_size); } else { brw_bo_subdata(cache->bo, item->offset, data_size, data); } @@ -412,7 +417,7 @@ brw_init_caches(struct brw_context *brw) if (can_do_exec_capture(brw->screen)) cache->bo->kflags = EXEC_OBJECT_CAPTURE; if (brw->has_llc) - brw_bo_map_unsynchronized(brw, cache->bo); + cache->map = brw_bo_map_unsynchronized(brw, cache->bo); } static void @@ -495,6 +500,7 @@ brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache) brw_bo_unmap(cache->bo); brw_bo_unreference(cache->bo); cache->bo = NULL; + cache->map = NULL; } brw_clear_cache(brw, cache); free(cache->items); @@ -541,14 +547,17 @@ brw_print_program_cache(struct brw_context *brw) { const struct brw_cache *cache = &brw->cache; struct brw_cache_item *item; + void *map; if (!brw->has_llc) - brw_bo_map(brw, cache->bo, false); + map = brw_bo_map(brw, cache->bo, false); + else + map = cache->map; for (unsigned i = 0; i < cache->size; i++) { for (item = cache->items[i]; item; item = item->next) { fprintf(stderr, "%s:\n", cache_name(i)); - brw_disassemble(&brw->screen->devinfo, cache->bo->virtual, + brw_disassemble(&brw->screen->devinfo, map, item->offset, item->size, stderr); } } diff --git a/src/mesa/drivers/dri/i965/brw_queryobj.c b/src/mesa/drivers/dri/i965/brw_queryobj.c index de93b8bad74..50f30a33893 100644 --- a/src/mesa/drivers/dri/i965/brw_queryobj.c +++ b/src/mesa/drivers/dri/i965/brw_queryobj.c @@ -146,8 +146,7 @@ brw_queryobj_get_results(struct gl_context *ctx, } } - brw_bo_map(brw, query->bo, false); - results = query->bo->virtual; + results = brw_bo_map(brw, query->bo, false); switch (query->Base.Target) { case GL_TIME_ELAPSED_EXT: /* The query BO contains the starting and ending timestamps. diff --git a/src/mesa/drivers/dri/i965/gen6_queryobj.c b/src/mesa/drivers/dri/i965/gen6_queryobj.c index a28f83af1d6..f8329bbefba 100644 --- a/src/mesa/drivers/dri/i965/gen6_queryobj.c +++ b/src/mesa/drivers/dri/i965/gen6_queryobj.c @@ -221,8 +221,7 @@ gen6_queryobj_get_results(struct gl_context *ctx, if (query->bo == NULL) return; - brw_bo_map(brw, query->bo, false); - uint64_t *results = query->bo->virtual; + uint64_t *results = brw_bo_map(brw, query->bo, false); switch (query->Base.Target) { case GL_TIME_ELAPSED: /* The query BO contains the starting and ending timestamps. diff --git a/src/mesa/drivers/dri/i965/gen6_sol.c b/src/mesa/drivers/dri/i965/gen6_sol.c index 436775a4c0f..00b29bd6fd6 100644 --- a/src/mesa/drivers/dri/i965/gen6_sol.c +++ b/src/mesa/drivers/dri/i965/gen6_sol.c @@ -247,8 +247,7 @@ tally_prims_generated(struct brw_context *brw, if (unlikely(brw->perf_debug && brw_bo_busy(obj->prim_count_bo))) perf_debug("Stalling for # of transform feedback primitives written.\n"); - brw_bo_map(brw, obj->prim_count_bo, false); - uint64_t *prim_counts = obj->prim_count_bo->virtual; + uint64_t *prim_counts = brw_bo_map(brw, obj->prim_count_bo, false); assert(obj->prim_count_buffer_index % (2 * streams) == 0); int pairs = obj->prim_count_buffer_index / (2 * streams); diff --git a/src/mesa/drivers/dri/i965/intel_batchbuffer.c b/src/mesa/drivers/dri/i965/intel_batchbuffer.c index 36faa7ae6f5..01511b17391 100644 --- a/src/mesa/drivers/dri/i965/intel_batchbuffer.c +++ b/src/mesa/drivers/dri/i965/intel_batchbuffer.c @@ -100,8 +100,7 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch, batch->bo = brw_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096); if (has_llc) { - brw_bo_map(NULL, batch->bo, true); - batch->map = batch->bo->virtual; + batch->map = brw_bo_map(NULL, batch->bo, true); } batch->map_next = batch->map; @@ -240,16 +239,16 @@ do_batch_dump(struct brw_context *brw) if (batch->ring != RENDER_RING) return; - int ret = brw_bo_map(brw, batch->bo, false); - if (ret != 0) { + void *map = brw_bo_map(brw, batch->bo, false); + if (map == NULL) { fprintf(stderr, - "WARNING: failed to map batchbuffer (%s), " - "dumping uploaded data instead.\n", strerror(ret)); + "WARNING: failed to map batchbuffer, " + "dumping uploaded data instead.\n"); } - uint32_t *data = batch->bo->virtual ? batch->bo->virtual : batch->map; + uint32_t *data = map ? map : batch->map; uint32_t *end = data + USED_BATCH(*batch); - uint32_t gtt_offset = batch->bo->virtual ? batch->bo->offset64 : 0; + uint32_t gtt_offset = map ? batch->bo->offset64 : 0; int length; bool color = INTEL_DEBUG & DEBUG_COLOR; @@ -370,7 +369,7 @@ do_batch_dump(struct brw_context *brw) } } - if (ret == 0) { + if (map != NULL) { brw_bo_unmap(batch->bo); } } diff --git a/src/mesa/drivers/dri/i965/intel_buffer_objects.c b/src/mesa/drivers/dri/i965/intel_buffer_objects.c index 9f1f7932ea7..be9a2b54c6d 100644 --- a/src/mesa/drivers/dri/i965/intel_buffer_objects.c +++ b/src/mesa/drivers/dri/i965/intel_buffer_objects.c @@ -217,8 +217,8 @@ brw_buffer_subdata(struct gl_context *ctx, if (offset + size <= intel_obj->gpu_active_start || intel_obj->gpu_active_end <= offset) { if (brw->has_llc) { - brw_bo_map_unsynchronized(brw, intel_obj->buffer); - memcpy(intel_obj->buffer->virtual + offset, data, size); + void *map = brw_bo_map_unsynchronized(brw, intel_obj->buffer); + memcpy(map + offset, data, size); brw_bo_unmap(intel_obj->buffer); if (intel_obj->gpu_active_end > intel_obj->gpu_active_start) @@ -388,33 +388,34 @@ brw_map_buffer_range(struct gl_context *ctx, length + intel_obj->map_extra[index], alignment); + void *map; if (brw->has_llc) { - brw_bo_map(brw, intel_obj->range_map_bo[index], - (access & GL_MAP_WRITE_BIT) != 0); + map = brw_bo_map(brw, intel_obj->range_map_bo[index], + (access & GL_MAP_WRITE_BIT) != 0); } else { - brw_bo_map_gtt(brw, intel_obj->range_map_bo[index]); + map = brw_bo_map_gtt(brw, intel_obj->range_map_bo[index]); } - obj->Mappings[index].Pointer = - intel_obj->range_map_bo[index]->virtual + intel_obj->map_extra[index]; + obj->Mappings[index].Pointer = map + intel_obj->map_extra[index]; return obj->Mappings[index].Pointer; } + void *map; if (access & GL_MAP_UNSYNCHRONIZED_BIT) { if (!brw->has_llc && brw->perf_debug && brw_bo_busy(intel_obj->buffer)) { perf_debug("MapBufferRange with GL_MAP_UNSYNCHRONIZED_BIT stalling (it's actually synchronized on non-LLC platforms)\n"); } - brw_bo_map_unsynchronized(brw, intel_obj->buffer); + map = brw_bo_map_unsynchronized(brw, intel_obj->buffer); } else if (!brw->has_llc && (!(access & GL_MAP_READ_BIT) || (access & GL_MAP_PERSISTENT_BIT))) { - brw_bo_map_gtt(brw, intel_obj->buffer); + map = brw_bo_map_gtt(brw, intel_obj->buffer); mark_buffer_inactive(intel_obj); } else { - brw_bo_map(brw, intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0); + map = brw_bo_map(brw, intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0); mark_buffer_inactive(intel_obj); } - obj->Mappings[index].Pointer = intel_obj->buffer->virtual + offset; + obj->Mappings[index].Pointer = map + offset; return obj->Mappings[index].Pointer; } diff --git a/src/mesa/drivers/dri/i965/intel_mipmap_tree.c b/src/mesa/drivers/dri/i965/intel_mipmap_tree.c index b77b7fdadd2..ebf77b65501 100644 --- a/src/mesa/drivers/dri/i965/intel_mipmap_tree.c +++ b/src/mesa/drivers/dri/i965/intel_mipmap_tree.c @@ -1382,14 +1382,14 @@ intel_miptree_init_mcs(struct brw_context *brw, * * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff. */ - const int ret = brw_bo_map_gtt(brw, mt->mcs_buf->bo); - if (unlikely(ret)) { + void *map = brw_bo_map_gtt(brw, mt->mcs_buf->bo); + if (unlikely(map == NULL)) { fprintf(stderr, "Failed to map mcs buffer into GTT\n"); brw_bo_unreference(mt->mcs_buf->bo); free(mt->mcs_buf); return; } - void *data = mt->mcs_buf->bo->virtual; + void *data = map; memset(data, init_value, mt->mcs_buf->size); brw_bo_unmap(mt->mcs_buf->bo); } @@ -2433,11 +2433,9 @@ intel_miptree_map_raw(struct brw_context *brw, struct intel_mipmap_tree *mt) * long as cache consistency is maintained). */ if (mt->tiling != I915_TILING_NONE || mt->is_scanout) - brw_bo_map_gtt(brw, bo); + return brw_bo_map_gtt(brw, bo); else - brw_bo_map(brw, bo, true); - - return bo->virtual; + return brw_bo_map(brw, bo, true); } static void diff --git a/src/mesa/drivers/dri/i965/intel_pixel_read.c b/src/mesa/drivers/dri/i965/intel_pixel_read.c index 8793c3e4c0f..3eca28a4b18 100644 --- a/src/mesa/drivers/dri/i965/intel_pixel_read.c +++ b/src/mesa/drivers/dri/i965/intel_pixel_read.c @@ -84,8 +84,6 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx, /* The miptree's buffer. */ struct brw_bo *bo; - int error = 0; - uint32_t cpp; mem_copy_fn mem_copy = NULL; @@ -147,8 +145,8 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx, intel_batchbuffer_flush(brw); } - error = brw_bo_map(brw, bo, false /* write enable */); - if (error) { + void *map = brw_bo_map(brw, bo, false /* write enable */); + if (map == NULL) { DBG("%s: failed to map bo\n", __func__); return false; } @@ -188,7 +186,7 @@ intel_readpixels_tiled_memcpy(struct gl_context * ctx, xoffset * cpp, (xoffset + width) * cpp, yoffset, yoffset + height, pixels - (ptrdiff_t) yoffset * dst_pitch - (ptrdiff_t) xoffset * cpp, - bo->virtual + irb->mt->offset, + map + irb->mt->offset, dst_pitch, irb->mt->pitch, brw->has_swizzling, irb->mt->tiling, diff --git a/src/mesa/drivers/dri/i965/intel_screen.c b/src/mesa/drivers/dri/i965/intel_screen.c index 22f6d9af03c..3399c96132b 100644 --- a/src/mesa/drivers/dri/i965/intel_screen.c +++ b/src/mesa/drivers/dri/i965/intel_screen.c @@ -1408,6 +1408,7 @@ intel_detect_pipelined_register(struct intel_screen *screen, struct brw_bo *results, *bo; uint32_t *batch; uint32_t offset = 0; + void *map; bool success = false; /* Create a zero'ed temporary buffer for reading our results */ @@ -1419,10 +1420,11 @@ intel_detect_pipelined_register(struct intel_screen *screen, if (bo == NULL) goto err_results; - if (brw_bo_map(NULL, bo, 1)) + map = brw_bo_map(NULL, bo, 1); + if (!map) goto err_batch; - batch = bo->virtual; + batch = map; /* Write the register. */ *batch++ = MI_LOAD_REGISTER_IMM | (3 - 2); @@ -1433,7 +1435,7 @@ intel_detect_pipelined_register(struct intel_screen *screen, *batch++ = MI_STORE_REGISTER_MEM | (3 - 2); *batch++ = reg; struct drm_i915_gem_relocation_entry reloc = { - .offset = (char *) batch - (char *) bo->virtual, + .offset = (char *) batch - (char *) map, .delta = offset * sizeof(uint32_t), .target_handle = results->gem_handle, .read_domains = I915_GEM_DOMAIN_INSTRUCTION, @@ -1464,7 +1466,7 @@ intel_detect_pipelined_register(struct intel_screen *screen, struct drm_i915_gem_execbuffer2 execbuf = { .buffers_ptr = (uintptr_t) exec_objects, .buffer_count = 2, - .batch_len = ALIGN((char *) batch - (char *) bo->virtual, 8), + .batch_len = ALIGN((char *) batch - (char *) map, 8), .flags = I915_EXEC_RENDER, }; @@ -1475,8 +1477,9 @@ intel_detect_pipelined_register(struct intel_screen *screen, drmIoctl(dri_screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf); /* Check whether the value got written. */ - if (brw_bo_map(NULL, results, false) == 0) { - success = *((uint32_t *)results->virtual + offset) == expected_value; + void *results_map = brw_bo_map(NULL, results, false); + if (results_map) { + success = *((uint32_t *)results_map + offset) == expected_value; brw_bo_unmap(results); } diff --git a/src/mesa/drivers/dri/i965/intel_tex_image.c b/src/mesa/drivers/dri/i965/intel_tex_image.c index 649b3907d13..ffc98b6c42a 100644 --- a/src/mesa/drivers/dri/i965/intel_tex_image.c +++ b/src/mesa/drivers/dri/i965/intel_tex_image.c @@ -473,8 +473,6 @@ intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx, /* The miptree's buffer. */ struct brw_bo *bo; - int error = 0; - uint32_t cpp; mem_copy_fn mem_copy = NULL; @@ -536,8 +534,8 @@ intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx, intel_batchbuffer_flush(brw); } - error = brw_bo_map(brw, bo, false /* write enable */); - if (error) { + void *map = brw_bo_map(brw, bo, false /* write enable */); + if (map == NULL) { DBG("%s: failed to map bo\n", __func__); return false; } @@ -562,7 +560,7 @@ intel_gettexsubimage_tiled_memcpy(struct gl_context *ctx, xoffset * cpp, (xoffset + width) * cpp, yoffset, yoffset + height, pixels - (ptrdiff_t) yoffset * dst_pitch - (ptrdiff_t) xoffset * cpp, - bo->virtual, + map, dst_pitch, image->mt->pitch, brw->has_swizzling, image->mt->tiling, diff --git a/src/mesa/drivers/dri/i965/intel_tex_subimage.c b/src/mesa/drivers/dri/i965/intel_tex_subimage.c index 912622253a4..54c0bfe705d 100644 --- a/src/mesa/drivers/dri/i965/intel_tex_subimage.c +++ b/src/mesa/drivers/dri/i965/intel_tex_subimage.c @@ -86,8 +86,6 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx, /* The miptree's buffer. */ struct brw_bo *bo; - int error = 0; - uint32_t cpp; mem_copy_fn mem_copy = NULL; @@ -148,8 +146,8 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx, intel_batchbuffer_flush(brw); } - error = brw_bo_map(brw, bo, true /* write enable */); - if (error || bo->virtual == NULL) { + void *map = brw_bo_map(brw, bo, true /* write enable */); + if (map == NULL) { DBG("%s: failed to map bo\n", __func__); return false; } @@ -177,7 +175,7 @@ intel_texsubimage_tiled_memcpy(struct gl_context * ctx, linear_to_tiled( xoffset * cpp, (xoffset + width) * cpp, yoffset, yoffset + height, - bo->virtual, + map, pixels - (ptrdiff_t) yoffset * src_pitch - (ptrdiff_t) xoffset * cpp, image->mt->pitch, src_pitch, brw->has_swizzling, diff --git a/src/mesa/drivers/dri/i965/intel_upload.c b/src/mesa/drivers/dri/i965/intel_upload.c index bb1e7d014b2..1b8353097dd 100644 --- a/src/mesa/drivers/dri/i965/intel_upload.c +++ b/src/mesa/drivers/dri/i965/intel_upload.c @@ -47,12 +47,14 @@ void intel_upload_finish(struct brw_context *brw) { + assert((brw->upload.bo == NULL) == (brw->upload.map == NULL)); if (!brw->upload.bo) return; brw_bo_unmap(brw->upload.bo); brw_bo_unreference(brw->upload.bo); brw->upload.bo = NULL; + brw->upload.map = NULL; brw->upload.next_offset = 0; } @@ -94,13 +96,14 @@ intel_upload_space(struct brw_context *brw, offset = 0; } + assert((brw->upload.bo == NULL) == (brw->upload.map == NULL)); if (!brw->upload.bo) { brw->upload.bo = brw_bo_alloc(brw->bufmgr, "streamed data", MAX2(INTEL_UPLOAD_SIZE, size), 4096); if (brw->has_llc) - brw_bo_map(brw, brw->upload.bo, true); + brw->upload.map = brw_bo_map(brw, brw->upload.bo, true); else - brw_bo_map_gtt(brw, brw->upload.bo); + brw->upload.map = brw_bo_map_gtt(brw, brw->upload.bo); } brw->upload.next_offset = offset + size; @@ -112,7 +115,7 @@ intel_upload_space(struct brw_context *brw, brw_bo_reference(brw->upload.bo); } - return brw->upload.bo->virtual + offset; + return brw->upload.map + offset; } /** -- 2.30.2