The bacon is all gone.
This renames both the class and the related functions. We're about to
run indent on the bufmgr code, so no need to worry about fixing bad
indentation.
Acked-by: Jason Ekstrand <jason@jlekstrand.net>
struct gen_device_info;
-typedef struct _drm_bacon_bo drm_bacon_bo;
-
-struct _drm_bacon_bo {
+struct brw_bo {
/**
* Size in bytes of the buffer object.
*
/**
* Last seen card virtual address (offset from the beginning of the
* aperture) for the object. This should be used to fill relocation
- * entries when calling drm_bacon_bo_emit_reloc()
+ * entries when calling brw_bo_emit_reloc()
*/
uint64_t offset64;
*
* Buffer objects are not necessarily initially mapped into CPU virtual
* address space or graphics device aperture. They must be mapped
- * using bo_map() or drm_bacon_gem_bo_map_gtt() to be used by the CPU.
+ * using bo_map() or brw_bo_map_gtt() to be used by the CPU.
*/
-drm_bacon_bo *drm_bacon_bo_alloc(struct brw_bufmgr *bufmgr, const char *name,
+struct brw_bo *brw_bo_alloc(struct brw_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
/**
* Allocate a buffer object, hinting that it will be used as a
*
* This is otherwise the same as bo_alloc.
*/
-drm_bacon_bo *drm_bacon_bo_alloc_for_render(struct brw_bufmgr *bufmgr,
+struct brw_bo *brw_bo_alloc_for_render(struct brw_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
* 'tiling_mode' field on return, as well as the pitch value, which
* may have been rounded up to accommodate for tiling restrictions.
*/
-drm_bacon_bo *drm_bacon_bo_alloc_tiled(struct brw_bufmgr *bufmgr,
+struct brw_bo *brw_bo_alloc_tiled(struct brw_bufmgr *bufmgr,
const char *name,
int x, int y, int cpp,
uint32_t *tiling_mode,
unsigned long flags);
/** Takes a reference on a buffer object */
-void drm_bacon_bo_reference(drm_bacon_bo *bo);
+void brw_bo_reference(struct brw_bo *bo);
/**
* Releases a reference on a buffer object, freeing the data if
* no references remain.
*/
-void drm_bacon_bo_unreference(drm_bacon_bo *bo);
+void brw_bo_unreference(struct brw_bo *bo);
/**
* Maps the buffer into userspace.
* buffer to complete, first. The resulting mapping is available at
* buf->virtual.
*/
-int drm_bacon_bo_map(drm_bacon_bo *bo, int write_enable);
+int brw_bo_map(struct brw_bo *bo, int write_enable);
/**
* Reduces the refcount on the userspace mapping of the buffer
* object.
*/
-int drm_bacon_bo_unmap(drm_bacon_bo *bo);
+int brw_bo_unmap(struct brw_bo *bo);
/** Write data into an object. */
-int drm_bacon_bo_subdata(drm_bacon_bo *bo, unsigned long offset,
+int brw_bo_subdata(struct brw_bo *bo, unsigned long offset,
unsigned long size, const void *data);
/** Read data from an object. */
-int drm_bacon_bo_get_subdata(drm_bacon_bo *bo, unsigned long offset,
+int brw_bo_get_subdata(struct brw_bo *bo, unsigned long offset,
unsigned long size, void *data);
/**
* Waits for rendering to an object by the GPU to have completed.
* bo_subdata, etc. It is merely a way for the driver to implement
* glFinish.
*/
-void drm_bacon_bo_wait_rendering(drm_bacon_bo *bo);
+void brw_bo_wait_rendering(struct brw_bo *bo);
/**
* Tears down the buffer manager instance.
* \param buf Buffer to set tiling mode for
* \param tiling_mode desired, and returned tiling mode
*/
-int drm_bacon_bo_set_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
+int brw_bo_set_tiling(struct brw_bo *bo, uint32_t * tiling_mode,
uint32_t stride);
/**
* Get the current tiling (and resulting swizzling) mode for the bo.
* \param tiling_mode returned tiling mode
* \param swizzle_mode returned swizzling mode
*/
-int drm_bacon_bo_get_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
+int brw_bo_get_tiling(struct brw_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode);
/**
* \param buf Buffer to create a name for
* \param name Returned name
*/
-int drm_bacon_bo_flink(drm_bacon_bo *bo, uint32_t * name);
+int brw_bo_flink(struct brw_bo *bo, uint32_t * name);
/**
* Returns 1 if mapping the buffer for write could cause the process
* to block, due to the object being active in the GPU.
*/
-int drm_bacon_bo_busy(drm_bacon_bo *bo);
+int brw_bo_busy(struct brw_bo *bo);
/**
* Specify the volatility of the buffer.
* Returns 1 if the buffer was retained, or 0 if it was discarded whilst
* marked as I915_MADV_DONTNEED.
*/
-int drm_bacon_bo_madvise(drm_bacon_bo *bo, int madv);
+int brw_bo_madvise(struct brw_bo *bo, int madv);
/**
* Disable buffer reuse for buffers which will be shared in some way,
*
* \param bo Buffer to disable reuse for
*/
-int drm_bacon_bo_disable_reuse(drm_bacon_bo *bo);
+int brw_bo_disable_reuse(struct brw_bo *bo);
/**
* Query whether a buffer is reusable.
*
* \param bo Buffer to query
*/
-int drm_bacon_bo_is_reusable(drm_bacon_bo *bo);
+int brw_bo_is_reusable(struct brw_bo *bo);
/* drm_bacon_bufmgr_gem.c */
struct brw_bufmgr *brw_bufmgr_init(struct gen_device_info *devinfo,
int fd, int batch_size);
-drm_bacon_bo *drm_bacon_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
- const char *name,
- unsigned int handle);
+struct brw_bo *brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
+ const char *name,
+ unsigned int handle);
void brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr);
void brw_bufmgr_gem_set_vma_cache_size(struct brw_bufmgr *bufmgr,
int limit);
-int drm_bacon_gem_bo_map_unsynchronized(drm_bacon_bo *bo);
-int drm_bacon_gem_bo_map_gtt(drm_bacon_bo *bo);
+int brw_bo_map_unsynchronized(struct brw_bo *bo);
+int brw_bo_map_gtt(struct brw_bo *bo);
-void *drm_bacon_gem_bo_map__cpu(drm_bacon_bo *bo);
-void *drm_bacon_gem_bo_map__gtt(drm_bacon_bo *bo);
-void *drm_bacon_gem_bo_map__wc(drm_bacon_bo *bo);
+void *brw_bo_map__cpu(struct brw_bo *bo);
+void *brw_bo_map__gtt(struct brw_bo *bo);
+void *brw_bo_map__wc(struct brw_bo *bo);
-void drm_bacon_gem_bo_start_gtt_access(drm_bacon_bo *bo, int write_enable);
+void brw_bo_start_gtt_access(struct brw_bo *bo, int write_enable);
-int drm_bacon_gem_bo_wait(drm_bacon_bo *bo, int64_t timeout_ns);
+int brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns);
uint32_t brw_create_hw_context(struct brw_bufmgr *bufmgr);
void brw_destroy_hw_context(struct brw_bufmgr *bufmgr, uint32_t ctx_id);
-int drm_bacon_bo_gem_export_to_prime(drm_bacon_bo *bo, int *prime_fd);
-drm_bacon_bo *drm_bacon_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr,
+int brw_bo_gem_export_to_prime(struct brw_bo *bo, int *prime_fd);
+struct brw_bo *brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr,
int prime_fd, int size);
int brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset, uint64_t *result);
prepare_indirect_gpgpu_walker(struct brw_context *brw)
{
GLintptr indirect_offset = brw->compute.num_work_groups_offset;
- drm_bacon_bo *bo = brw->compute.num_work_groups_bo;
+ struct brw_bo *bo = brw->compute.num_work_groups_bo;
brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMX, bo,
I915_GEM_DOMAIN_VERTEX, 0,
struct brw_context *brw = brw_context(ctx);
static const GLuint indirect_group_counts[3] = { 0, 0, 0 };
struct gl_buffer_object *indirect_buffer = ctx->DispatchIndirectBuffer;
- drm_bacon_bo *bo =
+ struct brw_bo *bo =
intel_bufferobj_buffer(brw,
intel_buffer_object(indirect_buffer),
indirect, 3 * sizeof(GLuint));
}
static bool
-intel_disable_rb_aux_buffer(struct brw_context *brw, const drm_bacon_bo *bo)
+intel_disable_rb_aux_buffer(struct brw_context *brw, const struct brw_bo *bo)
{
const struct gl_framebuffer *fb = brw->ctx.DrawBuffer;
bool found = false;
intel_glFlush(ctx);
if (brw->batch.last_bo)
- drm_bacon_bo_wait_rendering(brw->batch.last_bo);
+ brw_bo_wait_rendering(brw->batch.last_bo);
}
static void
brw_destroy_state(brw);
brw_draw_destroy(brw);
- drm_bacon_bo_unreference(brw->curbe.curbe_bo);
+ brw_bo_unreference(brw->curbe.curbe_bo);
if (brw->vs.base.scratch_bo)
- drm_bacon_bo_unreference(brw->vs.base.scratch_bo);
+ brw_bo_unreference(brw->vs.base.scratch_bo);
if (brw->tcs.base.scratch_bo)
- drm_bacon_bo_unreference(brw->tcs.base.scratch_bo);
+ brw_bo_unreference(brw->tcs.base.scratch_bo);
if (brw->tes.base.scratch_bo)
- drm_bacon_bo_unreference(brw->tes.base.scratch_bo);
+ brw_bo_unreference(brw->tes.base.scratch_bo);
if (brw->gs.base.scratch_bo)
- drm_bacon_bo_unreference(brw->gs.base.scratch_bo);
+ brw_bo_unreference(brw->gs.base.scratch_bo);
if (brw->wm.base.scratch_bo)
- drm_bacon_bo_unreference(brw->wm.base.scratch_bo);
+ brw_bo_unreference(brw->wm.base.scratch_bo);
brw_destroy_hw_context(brw->bufmgr, brw->hw_ctx);
brw_fini_pipe_control(brw);
intel_batchbuffer_free(&brw->batch);
- drm_bacon_bo_unreference(brw->throttle_batch[1]);
- drm_bacon_bo_unreference(brw->throttle_batch[0]);
+ brw_bo_unreference(brw->throttle_batch[1]);
+ brw_bo_unreference(brw->throttle_batch[0]);
brw->throttle_batch[1] = NULL;
brw->throttle_batch[0] = NULL;
* DRI2BufferDepthStencil are handled as special cases.
*
* \param buffer_name is a human readable name, such as "dri2 front buffer",
- * that is passed to drm_bacon_bo_gem_create_from_name().
+ * that is passed to brw_bo_gem_create_from_name().
*
* \see intel_update_renderbuffers()
*/
const char *buffer_name)
{
struct gl_framebuffer *fb = drawable->driverPrivate;
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
if (!rb)
return;
if (last_mt) {
/* The bo already has a name because the miptree was created by a
* previous call to intel_process_dri2_buffer(). If a bo already has a
- * name, then drm_bacon_bo_flink() is a low-cost getter. It does not
+ * name, then brw_bo_flink() is a low-cost getter. It does not
* create a new name.
*/
- drm_bacon_bo_flink(last_mt->bo, &old_name);
+ brw_bo_flink(last_mt->bo, &old_name);
}
if (old_name == buffer->name)
buffer->cpp, buffer->pitch);
}
- bo = drm_bacon_bo_gem_create_from_name(brw->bufmgr, buffer_name,
+ bo = brw_bo_gem_create_from_name(brw->bufmgr, buffer_name,
buffer->name);
if (!bo) {
fprintf(stderr,
assert(rb->mt);
- drm_bacon_bo_unreference(bo);
+ brw_bo_unreference(bo);
}
/**
struct brw_context *brw;
struct brw_cache_item **items;
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
GLuint size, n_items;
uint32_t next_offset;
struct brw_vertex_buffer {
/** Buffer object containing the uploaded vertex data */
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
uint32_t offset;
uint32_t size;
/** Byte stride between elements in the uploaded array */
struct gl_query_object Base;
/** Last query BO associated with this query. */
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
/** Last index in bo with query data for this object. */
int last_index;
struct intel_batchbuffer {
/** Current batchbuffer being queued up. */
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
/** Last BO submitted to the hardware. Used for glFinish(). */
- drm_bacon_bo *last_bo;
+ struct brw_bo *last_bo;
#ifdef DEBUG
uint16_t emit, total;
int reloc_array_size;
/** The validation list */
struct drm_i915_gem_exec_object2 *exec_objects;
- drm_bacon_bo **exec_bos;
+ struct brw_bo **exec_bos;
int exec_count;
int exec_array_size;
/** The amount of aperture space (in bytes) used by all exec_bos */
struct gl_transform_feedback_object base;
/** A buffer to hold SO_WRITE_OFFSET(n) values while paused. */
- drm_bacon_bo *offset_bo;
+ struct brw_bo *offset_bo;
/** If true, SO_WRITE_OFFSET(n) should be reset to zero at next use. */
bool zero_offsets;
* @{
*/
uint64_t prims_generated[BRW_MAX_XFB_STREAMS];
- drm_bacon_bo *prim_count_bo;
+ struct brw_bo *prim_count_bo;
unsigned prim_count_buffer_index; /**< in number of uint64_t units */
/** @} */
* unless you're taking additional measures to synchronize thread execution
* across slot size changes.
*/
- drm_bacon_bo *scratch_bo;
+ struct brw_bo *scratch_bo;
/**
* Scratch slot size allocated for each thread in the buffer object given
uint32_t hw_ctx;
/** BO for post-sync nonzero writes for gen6 workaround. */
- drm_bacon_bo *workaround_bo;
+ struct brw_bo *workaround_bo;
uint8_t pipe_controls_since_last_cs_stall;
/**
- * Set of drm_bacon_bo * that have been rendered to within this batchbuffer
+ * Set of struct brw_bo * that have been rendered to within this batchbuffer
* and would need flushing before being used from another cache domain that
* isn't coherent with it (i.e. the sampler).
*/
bool no_batch_wrap;
struct {
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
uint32_t next_offset;
} upload;
bool front_buffer_dirty;
/** Framerate throttling: @{ */
- drm_bacon_bo *throttle_batch[2];
+ struct brw_bo *throttle_batch[2];
/* Limit the number of outstanding SwapBuffers by waiting for an earlier
* frame of rendering to complete. This gives a very precise cap to the
* Buffer and offset used for GL_ARB_shader_draw_parameters
* (for now, only gl_BaseVertex).
*/
- drm_bacon_bo *draw_params_bo;
+ struct brw_bo *draw_params_bo;
uint32_t draw_params_offset;
/**
* draw parameters.
*/
int gl_drawid;
- drm_bacon_bo *draw_id_bo;
+ struct brw_bo *draw_id_bo;
uint32_t draw_id_offset;
} draw;
* an indirect call, and num_work_groups_offset is valid. Otherwise,
* num_work_groups is set based on glDispatchCompute.
*/
- drm_bacon_bo *num_work_groups_bo;
+ struct brw_bo *num_work_groups_bo;
GLintptr num_work_groups_offset;
const GLuint *num_work_groups;
} compute;
const struct _mesa_index_buffer *ib;
/* Updates are signaled by BRW_NEW_INDEX_BUFFER. */
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
uint32_t size;
GLuint type;
* Pointer to the (intel_upload.c-generated) BO containing the uniforms
* for upload to the CURBE.
*/
- drm_bacon_bo *curbe_bo;
+ struct brw_bo *curbe_bo;
/** Offset within curbe_bo of space for current curbe entry */
GLuint curbe_offset;
} curbe;
* Buffer object used in place of multisampled null render targets on
* Gen6. See brw_emit_null_surface_state().
*/
- drm_bacon_bo *multisampled_null_render_target_bo;
+ struct brw_bo *multisampled_null_render_target_bo;
uint32_t fast_clear_op;
float offset_clamp;
} l3;
struct {
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
const char **names;
int *ids;
enum shader_time_shader_type *types;
/** gen6_queryobj.c */
void gen6_init_queryobj_functions(struct dd_function_table *functions);
-void brw_write_timestamp(struct brw_context *brw, drm_bacon_bo *bo, int idx);
-void brw_write_depth_count(struct brw_context *brw, drm_bacon_bo *bo, int idx);
+void brw_write_timestamp(struct brw_context *brw, struct brw_bo *bo, int idx);
+void brw_write_depth_count(struct brw_context *brw, struct brw_bo *bo, int idx);
/** hsw_queryobj.c */
void hsw_overflow_result_to_gpr0(struct brw_context *brw,
/** intel_batchbuffer.c */
void brw_load_register_mem(struct brw_context *brw,
uint32_t reg,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset);
void brw_load_register_mem64(struct brw_context *brw,
uint32_t reg,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset);
void brw_store_register_mem32(struct brw_context *brw,
- drm_bacon_bo *bo, uint32_t reg, uint32_t offset);
+ struct brw_bo *bo, uint32_t reg, uint32_t offset);
void brw_store_register_mem64(struct brw_context *brw,
- drm_bacon_bo *bo, uint32_t reg, uint32_t offset);
+ struct brw_bo *bo, uint32_t reg, uint32_t offset);
void brw_load_register_imm32(struct brw_context *brw,
uint32_t reg, uint32_t imm);
void brw_load_register_imm64(struct brw_context *brw,
uint32_t dest);
void brw_load_register_reg64(struct brw_context *brw, uint32_t src,
uint32_t dest);
-void brw_store_data_imm32(struct brw_context *brw, drm_bacon_bo *bo,
+void brw_store_data_imm32(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, uint32_t imm);
-void brw_store_data_imm64(struct brw_context *brw, drm_bacon_bo *bo,
+void brw_store_data_imm64(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, uint64_t imm);
/*======================================================================
void brwInitFragProgFuncs( struct dd_function_table *functions );
void brw_get_scratch_bo(struct brw_context *brw,
- drm_bacon_bo **scratch_bo, int size);
+ struct brw_bo **scratch_bo, int size);
void brw_alloc_stage_scratch(struct brw_context *brw,
struct brw_stage_state *stage_state,
unsigned per_thread_size,
/* brw_wm_surface_state.c */
void brw_init_surface_formats(struct brw_context *brw);
void brw_create_constant_surface(struct brw_context *brw,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t offset,
uint32_t size,
uint32_t *out_offset);
void brw_create_buffer_surface(struct brw_context *brw,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t offset,
uint32_t size,
uint32_t *out_offset);
void brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags);
void brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
- drm_bacon_bo *bo, uint32_t offset,
+ struct brw_bo *bo, uint32_t offset,
uint32_t imm_lower, uint32_t imm_upper);
void brw_emit_mi_flush(struct brw_context *brw);
void brw_emit_post_sync_nonzero_flush(struct brw_context *brw);
if (unlikely(brw->perf_debug)) {
start_busy = (brw->batch.last_bo &&
- drm_bacon_bo_busy(brw->batch.last_bo));
+ brw_bo_busy(brw->batch.last_bo));
start_time = get_time();
}
}
cp->compiled_once = true;
- if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
+ if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
perf_debug("CS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
ADVANCE_BATCH();
} else if (prim->is_indirect) {
struct gl_buffer_object *indirect_buffer = brw->ctx.DrawIndirectBuffer;
- drm_bacon_bo *bo = intel_bufferobj_buffer(brw,
+ struct brw_bo *bo = intel_bufferobj_buffer(brw,
intel_buffer_object(indirect_buffer),
prim->indirect_offset, 5 * sizeof(GLuint));
GLuint i;
for (i = 0; i < brw->vb.nr_buffers; i++) {
- drm_bacon_bo_unreference(brw->vb.buffers[i].bo);
+ brw_bo_unreference(brw->vb.buffers[i].bo);
brw->vb.buffers[i].bo = NULL;
}
brw->vb.nr_buffers = 0;
brw->draw.params.gl_basevertex = new_basevertex;
brw->draw.params.gl_baseinstance = new_baseinstance;
- drm_bacon_bo_unreference(brw->draw.draw_params_bo);
+ brw_bo_unreference(brw->draw.draw_params_bo);
if (prims[i].is_indirect) {
/* Point draw_params_bo at the indirect buffer. */
brw->draw.draw_params_bo =
intel_buffer_object(ctx->DrawIndirectBuffer)->buffer;
- drm_bacon_bo_reference(brw->draw.draw_params_bo);
+ brw_bo_reference(brw->draw.draw_params_bo);
brw->draw.draw_params_offset =
prims[i].indirect_offset + (prims[i].indexed ? 12 : 8);
} else {
* the loop.
*/
brw->draw.gl_drawid = prims[i].draw_id;
- drm_bacon_bo_unreference(brw->draw.draw_id_bo);
+ brw_bo_unreference(brw->draw.draw_id_bo);
brw->draw.draw_id_bo = NULL;
if (i > 0 && vs_prog_data->uses_drawid)
brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
unsigned i;
for (i = 0; i < brw->vb.nr_buffers; i++) {
- drm_bacon_bo_unreference(brw->vb.buffers[i].bo);
+ brw_bo_unreference(brw->vb.buffers[i].bo);
brw->vb.buffers[i].bo = NULL;
}
brw->vb.nr_buffers = 0;
}
brw->vb.nr_enabled = 0;
- drm_bacon_bo_unreference(brw->ib.bo);
+ brw_bo_unreference(brw->ib.bo);
brw->ib.bo = NULL;
}
uint32_t *
brw_emit_vertex_buffer_state(struct brw_context *brw,
unsigned buffer_nr,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
unsigned start_offset,
unsigned end_offset,
unsigned stride,
const uint32_t range = buffer_range_end[i] - buffer_range_start[i];
buffer->bo = intel_bufferobj_buffer(brw, enabled_buffer[i], start, range);
- drm_bacon_bo_reference(buffer->bo);
+ brw_bo_reference(buffer->bo);
}
/* If we need to upload all the arrays, then we can trim those arrays to
uint32_t *
brw_emit_vertex_buffer_state(struct brw_context *brw,
unsigned buffer_nr,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
unsigned start_offset,
unsigned end_offset,
unsigned stride,
struct gl_context *ctx = &brw->ctx;
const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
GLuint ib_size;
- drm_bacon_bo *old_bo = brw->ib.bo;
+ struct brw_bo *old_bo = brw->ib.bo;
struct gl_buffer_object *bufferobj;
GLuint offset;
GLuint ib_type_size;
ctx->Driver.UnmapBuffer(ctx, bufferobj, MAP_INTERNAL);
} else {
- drm_bacon_bo *bo =
+ struct brw_bo *bo =
intel_bufferobj_buffer(brw, intel_buffer_object(bufferobj),
offset, ib_size);
if (bo != brw->ib.bo) {
- drm_bacon_bo_unreference(brw->ib.bo);
+ brw_bo_unreference(brw->ib.bo);
brw->ib.bo = bo;
brw->ib.size = bufferobj->Size;
- drm_bacon_bo_reference(bo);
+ brw_bo_reference(bo);
}
}
}
st_index = brw_get_shader_time_index(brw, &gp->program, ST_GS, true);
if (unlikely(brw->perf_debug)) {
- start_busy = brw->batch.last_bo && drm_bacon_bo_busy(brw->batch.last_bo);
+ start_busy = brw->batch.last_bo && brw_bo_busy(brw->batch.last_bo);
start_time = get_time();
}
if (gp->compiled_once) {
brw_gs_debug_recompile(brw, &gp->program, key);
}
- if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
+ if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
perf_debug("GS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
#include "intel_mipmap_tree.h"
static GLenum
-intel_buffer_purgeable(drm_bacon_bo *buffer)
+intel_buffer_purgeable(struct brw_bo *buffer)
{
int retained = 0;
if (buffer != NULL)
- retained = drm_bacon_bo_madvise(buffer, I915_MADV_DONTNEED);
+ retained = brw_bo_madvise(buffer, I915_MADV_DONTNEED);
return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
}
}
static int
-intel_bo_unpurgeable(drm_bacon_bo *buffer)
+intel_bo_unpurgeable(struct brw_bo *buffer)
{
int retained;
retained = 0;
if (buffer != NULL)
- retained = drm_bacon_bo_madvise(buffer, I915_MADV_WILLNEED);
+ retained = brw_bo_madvise(buffer, I915_MADV_WILLNEED);
return retained;
}
return GL_UNDEFINED_APPLE;
if (option == GL_UNDEFINED_APPLE || !intel_bo_unpurgeable(intel->buffer)) {
- drm_bacon_bo_unreference(intel->buffer);
+ brw_bo_unreference(intel->buffer);
intel->buffer = NULL;
return GL_UNDEFINED_APPLE;
}
/**
* BO containing OA counter snapshots at query Begin/End time.
*/
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
/**
* The MI_REPORT_PERF_COUNT command lets us specify a unique
* BO containing starting and ending snapshots for the
* statistics counters.
*/
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
} pipeline_stats;
};
};
*/
static void
emit_mi_report_perf_count(struct brw_context *brw,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t offset_in_bytes,
uint32_t report_id)
{
if (!read_oa_samples(brw))
goto error;
- drm_bacon_bo_map(obj->oa.bo, false);
+ brw_bo_map(obj->oa.bo, false);
query_buffer = obj->oa.bo->virtual;
start = last = query_buffer;
DBG("Marking %d accumulated - results gathered\n", o->Id);
- drm_bacon_bo_unmap(obj->oa.bo);
+ brw_bo_unmap(obj->oa.bo);
obj->oa.results_accumulated = true;
drop_from_unaccumulated_query_list(brw, obj);
dec_n_oa_users(brw);
error:
- drm_bacon_bo_unmap(obj->oa.bo);
+ brw_bo_unmap(obj->oa.bo);
discard_all_queries(brw);
}
}
if (obj->oa.bo) {
- drm_bacon_bo_unreference(obj->oa.bo);
+ brw_bo_unreference(obj->oa.bo);
obj->oa.bo = NULL;
}
obj->oa.bo =
- drm_bacon_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo",
- MI_RPC_BO_SIZE, 64);
+ brw_bo_alloc(brw->bufmgr, "perf. query OA MI_RPC bo",
+ MI_RPC_BO_SIZE, 64);
#ifdef DEBUG
/* Pre-filling the BO helps debug whether writes landed. */
- drm_bacon_bo_map(obj->oa.bo, true);
+ brw_bo_map(obj->oa.bo, true);
memset((char *) obj->oa.bo->virtual, 0x80, MI_RPC_BO_SIZE);
- drm_bacon_bo_unmap(obj->oa.bo);
+ brw_bo_unmap(obj->oa.bo);
#endif
obj->oa.begin_report_id = brw->perfquery.next_query_start_report_id;
case PIPELINE_STATS:
if (obj->pipeline_stats.bo) {
- drm_bacon_bo_unreference(obj->pipeline_stats.bo);
+ brw_bo_unreference(obj->pipeline_stats.bo);
obj->pipeline_stats.bo = NULL;
}
obj->pipeline_stats.bo =
- drm_bacon_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
+ brw_bo_alloc(brw->bufmgr, "perf. query pipeline stats bo",
STATS_BO_SIZE, 64);
/* Take starting snapshots. */
{
struct brw_context *brw = brw_context(ctx);
struct brw_perf_query_object *obj = brw_perf_query(o);
- drm_bacon_bo *bo = NULL;
+ struct brw_bo *bo = NULL;
assert(!o->Ready);
intel_batchbuffer_flush(brw);
if (unlikely(brw->perf_debug)) {
- if (drm_bacon_bo_busy(bo))
+ if (brw_bo_busy(bo))
perf_debug("Stalling GPU waiting for a performance query object.\n");
}
- drm_bacon_bo_wait_rendering(bo);
+ brw_bo_wait_rendering(bo);
}
static bool
return (obj->oa.results_accumulated ||
(obj->oa.bo &&
!brw_batch_references(&brw->batch, obj->oa.bo) &&
- !drm_bacon_bo_busy(obj->oa.bo)));
+ !brw_bo_busy(obj->oa.bo)));
case PIPELINE_STATS:
return (obj->pipeline_stats.bo &&
!brw_batch_references(&brw->batch, obj->pipeline_stats.bo) &&
- !drm_bacon_bo_busy(obj->pipeline_stats.bo));
+ !brw_bo_busy(obj->pipeline_stats.bo));
}
unreachable("missing ready check for unknown query kind");
int n_counters = obj->query->n_counters;
uint8_t *p = data;
- drm_bacon_bo_map(obj->pipeline_stats.bo, false);
+ brw_bo_map(obj->pipeline_stats.bo, false);
uint64_t *start = obj->pipeline_stats.bo->virtual;
uint64_t *end = start + (STATS_BO_END_OFFSET_BYTES / sizeof(uint64_t));
p += 8;
}
- drm_bacon_bo_unmap(obj->pipeline_stats.bo);
+ brw_bo_unmap(obj->pipeline_stats.bo);
return p - data;
}
dec_n_oa_users(brw);
}
- drm_bacon_bo_unreference(obj->oa.bo);
+ brw_bo_unreference(obj->oa.bo);
obj->oa.bo = NULL;
}
case PIPELINE_STATS:
if (obj->pipeline_stats.bo) {
- drm_bacon_bo_unreference(obj->pipeline_stats.bo);
+ brw_bo_unreference(obj->pipeline_stats.bo);
obj->pipeline_stats.bo = NULL;
}
break;
*/
void
brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
- drm_bacon_bo *bo, uint32_t offset,
+ struct brw_bo *bo, uint32_t offset,
uint32_t imm_lower, uint32_t imm_upper)
{
if (brw->gen >= 8) {
* the gen6 workaround because it involves actually writing to
* the buffer, and the kernel doesn't let us write to the batch.
*/
- brw->workaround_bo = drm_bacon_bo_alloc(brw->bufmgr,
- "pipe_control workaround",
- 4096, 4096);
+ brw->workaround_bo = brw_bo_alloc(brw->bufmgr,
+ "pipe_control workaround",
+ 4096, 4096);
if (brw->workaround_bo == NULL)
return -ENOMEM;
void
brw_fini_pipe_control(struct brw_context *brw)
{
- drm_bacon_bo_unreference(brw->workaround_bo);
+ brw_bo_unreference(brw->workaround_bo);
}
void
brw_get_scratch_bo(struct brw_context *brw,
- drm_bacon_bo **scratch_bo, int size)
+ struct brw_bo **scratch_bo, int size)
{
- drm_bacon_bo *old_bo = *scratch_bo;
+ struct brw_bo *old_bo = *scratch_bo;
if (old_bo && old_bo->size < size) {
- drm_bacon_bo_unreference(old_bo);
+ brw_bo_unreference(old_bo);
old_bo = NULL;
}
if (!old_bo) {
- *scratch_bo = drm_bacon_bo_alloc(brw->bufmgr, "scratch bo", size, 4096);
+ *scratch_bo = brw_bo_alloc(brw->bufmgr, "scratch bo", size, 4096);
}
}
stage_state->per_thread_scratch = per_thread_size;
if (stage_state->scratch_bo)
- drm_bacon_bo_unreference(stage_state->scratch_bo);
+ brw_bo_unreference(stage_state->scratch_bo);
stage_state->scratch_bo =
- drm_bacon_bo_alloc(brw->bufmgr, "shader scratch space",
- per_thread_size * thread_count, 4096);
+ brw_bo_alloc(brw->bufmgr, "shader scratch space",
+ per_thread_size * thread_count, 4096);
}
}
{
const int max_entries = 2048;
brw->shader_time.bo =
- drm_bacon_bo_alloc(brw->bufmgr, "shader time",
- max_entries * BRW_SHADER_TIME_STRIDE * 3, 4096);
+ brw_bo_alloc(brw->bufmgr, "shader time",
+ max_entries * BRW_SHADER_TIME_STRIDE * 3, 4096);
brw->shader_time.names = rzalloc_array(brw, const char *, max_entries);
brw->shader_time.ids = rzalloc_array(brw, int, max_entries);
brw->shader_time.types = rzalloc_array(brw, enum shader_time_shader_type,
* delaying reading the reports, but it doesn't look like it's a big
* overhead compared to the cost of tracking the time in the first place.
*/
- drm_bacon_bo_map(brw->shader_time.bo, true);
+ brw_bo_map(brw->shader_time.bo, true);
void *bo_map = brw->shader_time.bo->virtual;
for (int i = 0; i < brw->shader_time.num_entries; i++) {
/* Zero the BO out to clear it out for our next collection.
*/
memset(bo_map, 0, brw->shader_time.bo->size);
- drm_bacon_bo_unmap(brw->shader_time.bo);
+ brw_bo_unmap(brw->shader_time.bo);
}
void
void
brw_destroy_shader_time(struct brw_context *brw)
{
- drm_bacon_bo_unreference(brw->shader_time.bo);
+ brw_bo_unreference(brw->shader_time.bo);
brw->shader_time.bo = NULL;
}
brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
{
struct brw_context *brw = cache->brw;
- drm_bacon_bo *new_bo;
+ struct brw_bo *new_bo;
- new_bo = drm_bacon_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
+ new_bo = brw_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
if (brw->has_llc)
- drm_bacon_gem_bo_map_unsynchronized(new_bo);
+ brw_bo_map_unsynchronized(new_bo);
/* Copy any existing data that needs to be saved. */
if (cache->next_offset != 0) {
if (brw->has_llc) {
memcpy(new_bo->virtual, cache->bo->virtual, cache->next_offset);
} else {
- drm_bacon_bo_map(cache->bo, false);
- drm_bacon_bo_subdata(new_bo, 0, cache->next_offset,
+ brw_bo_map(cache->bo, false);
+ brw_bo_subdata(new_bo, 0, cache->next_offset,
cache->bo->virtual);
- drm_bacon_bo_unmap(cache->bo);
+ brw_bo_unmap(cache->bo);
}
}
if (brw->has_llc)
- drm_bacon_bo_unmap(cache->bo);
- drm_bacon_bo_unreference(cache->bo);
+ brw_bo_unmap(cache->bo);
+ brw_bo_unreference(cache->bo);
cache->bo = new_bo;
cache->bo_used_by_gpu = false;
continue;
if (!brw->has_llc)
- drm_bacon_bo_map(cache->bo, false);
+ brw_bo_map(cache->bo, false);
ret = memcmp(cache->bo->virtual + item->offset, data, item->size);
if (!brw->has_llc)
- drm_bacon_bo_unmap(cache->bo);
+ brw_bo_unmap(cache->bo);
if (ret)
continue;
if (brw->has_llc) {
memcpy((char *)cache->bo->virtual + item->offset, data, data_size);
} else {
- drm_bacon_bo_subdata(cache->bo, item->offset, data_size, data);
+ brw_bo_subdata(cache->bo, item->offset, data_size, data);
}
}
cache->items =
calloc(cache->size, sizeof(struct brw_cache_item *));
- cache->bo = drm_bacon_bo_alloc(brw->bufmgr, "program cache", 4096, 64);
+ cache->bo = brw_bo_alloc(brw->bufmgr, "program cache", 4096, 64);
if (brw->has_llc)
- drm_bacon_gem_bo_map_unsynchronized(cache->bo);
+ brw_bo_map_unsynchronized(cache->bo);
}
static void
DBG("%s\n", __func__);
if (brw->has_llc)
- drm_bacon_bo_unmap(cache->bo);
- drm_bacon_bo_unreference(cache->bo);
+ brw_bo_unmap(cache->bo);
+ brw_bo_unreference(cache->bo);
cache->bo = NULL;
brw_clear_cache(brw, cache);
free(cache->items);
struct brw_cache_item *item;
if (!brw->has_llc)
- drm_bacon_bo_map(cache->bo, false);
+ brw_bo_map(cache->bo, false);
for (unsigned i = 0; i < cache->size; i++) {
for (item = cache->items[i]; item; item = item->next) {
}
if (!brw->has_llc)
- drm_bacon_bo_unmap(cache->bo);
+ brw_bo_unmap(cache->bo);
}
* Emit PIPE_CONTROLs to write the current GPU timestamp into a buffer.
*/
void
-brw_write_timestamp(struct brw_context *brw, drm_bacon_bo *query_bo, int idx)
+brw_write_timestamp(struct brw_context *brw, struct brw_bo *query_bo, int idx)
{
if (brw->gen == 6) {
/* Emit Sandybridge workaround flush: */
* Emit PIPE_CONTROLs to write the PS_DEPTH_COUNT register into a buffer.
*/
void
-brw_write_depth_count(struct brw_context *brw, drm_bacon_bo *query_bo, int idx)
+brw_write_depth_count(struct brw_context *brw, struct brw_bo *query_bo, int idx)
{
uint32_t flags = PIPE_CONTROL_WRITE_DEPTH_COUNT | PIPE_CONTROL_DEPTH_STALL;
intel_batchbuffer_flush(brw);
if (unlikely(brw->perf_debug)) {
- if (drm_bacon_bo_busy(query->bo)) {
+ if (brw_bo_busy(query->bo)) {
perf_debug("Stalling on the GPU waiting for a query object.\n");
}
}
- drm_bacon_bo_map(query->bo, false);
+ brw_bo_map(query->bo, false);
results = query->bo->virtual;
switch (query->Base.Target) {
case GL_TIME_ELAPSED_EXT:
default:
unreachable("Unrecognized query target in brw_queryobj_get_results()");
}
- drm_bacon_bo_unmap(query->bo);
+ brw_bo_unmap(query->bo);
/* Now that we've processed the data stored in the query's buffer object,
* we can release it.
*/
- drm_bacon_bo_unreference(query->bo);
+ brw_bo_unreference(query->bo);
query->bo = NULL;
}
{
struct brw_query_object *query = (struct brw_query_object *)q;
- drm_bacon_bo_unreference(query->bo);
+ brw_bo_unreference(query->bo);
free(query);
}
* obtain the time elapsed. Notably, this includes time elapsed while
* the system was doing other work, such as running other applications.
*/
- drm_bacon_bo_unreference(query->bo);
- query->bo = drm_bacon_bo_alloc(brw->bufmgr, "timer query", 4096, 4096);
+ brw_bo_unreference(query->bo);
+ query->bo = brw_bo_alloc(brw->bufmgr, "timer query", 4096, 4096);
brw_write_timestamp(brw, query->bo, 0);
break;
* Since we're starting a new query, we need to be sure to throw away
* any previous occlusion query results.
*/
- drm_bacon_bo_unreference(query->bo);
+ brw_bo_unreference(query->bo);
query->bo = NULL;
query->last_index = -1;
if (query->bo && brw_batch_references(&brw->batch, query->bo))
intel_batchbuffer_flush(brw);
- if (query->bo == NULL || !drm_bacon_bo_busy(query->bo)) {
+ if (query->bo == NULL || !brw_bo_busy(query->bo)) {
brw_queryobj_get_results(ctx, query);
query->Base.Ready = true;
}
brw_queryobj_get_results(ctx, query);
}
- query->bo = drm_bacon_bo_alloc(brw->bufmgr, "query", 4096, 1);
+ query->bo = brw_bo_alloc(brw->bufmgr, "query", 4096, 1);
query->last_index = 0;
}
}
assert(q->Target == GL_TIMESTAMP);
- drm_bacon_bo_unreference(query->bo);
- query->bo = drm_bacon_bo_alloc(brw->bufmgr, "timestamp query", 4096, 4096);
+ brw_bo_unreference(query->bo);
+ query->bo = brw_bo_alloc(brw->bufmgr, "timestamp query", 4096, 4096);
brw_write_timestamp(brw, query->bo, 0);
query->flushed = false;
void brw_emit_buffer_surface_state(struct brw_context *brw,
uint32_t *out_offset,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
unsigned buffer_offset,
unsigned surface_format,
unsigned buffer_size,
} type;
union {
- drm_bacon_bo *batch_bo;
+ struct brw_bo *batch_bo;
/* This struct owns the fd. */
int sync_fd;
switch (fence->type) {
case BRW_FENCE_TYPE_BO_WAIT:
if (fence->batch_bo)
- drm_bacon_bo_unreference(fence->batch_bo);
+ brw_bo_unreference(fence->batch_bo);
break;
case BRW_FENCE_TYPE_SYNC_FD:
if (fence->sync_fd != -1)
assert(!fence->signalled);
fence->batch_bo = brw->batch.bo;
- drm_bacon_bo_reference(fence->batch_bo);
+ brw_bo_reference(fence->batch_bo);
if (intel_batchbuffer_flush(brw) < 0) {
- drm_bacon_bo_unreference(fence->batch_bo);
+ brw_bo_unreference(fence->batch_bo);
fence->batch_bo = NULL;
return false;
}
return false;
}
- if (drm_bacon_bo_busy(fence->batch_bo))
+ if (brw_bo_busy(fence->batch_bo))
return false;
- drm_bacon_bo_unreference(fence->batch_bo);
+ brw_bo_unreference(fence->batch_bo);
fence->batch_bo = NULL;
fence->signalled = true;
if (timeout > INT64_MAX)
timeout = INT64_MAX;
- if (drm_bacon_gem_bo_wait(fence->batch_bo, timeout) != 0)
+ if (brw_bo_wait(fence->batch_bo, timeout) != 0)
return false;
fence->signalled = true;
- drm_bacon_bo_unreference(fence->batch_bo);
+ brw_bo_unreference(fence->batch_bo);
fence->batch_bo = NULL;
return true;
st_index = brw_get_shader_time_index(brw, &tep->program, ST_TCS, true);
if (unlikely(brw->perf_debug)) {
- start_busy = brw->batch.last_bo && drm_bacon_bo_busy(brw->batch.last_bo);
+ start_busy = brw->batch.last_bo && brw_bo_busy(brw->batch.last_bo);
start_time = get_time();
}
tcp->compiled_once = true;
}
- if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
+ if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
perf_debug("TCS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
st_index = brw_get_shader_time_index(brw, &tep->program, ST_TES, true);
if (unlikely(brw->perf_debug)) {
- start_busy = brw->batch.last_bo && drm_bacon_bo_busy(brw->batch.last_bo);
+ start_busy = brw->batch.last_bo && brw_bo_busy(brw->batch.last_bo);
start_time = get_time();
}
if (tep->compiled_once) {
brw_tes_debug_recompile(brw, &tep->program, key);
}
- if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
+ if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
perf_debug("TES compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
if (unlikely(brw->perf_debug)) {
start_busy = (brw->batch.last_bo &&
- drm_bacon_bo_busy(brw->batch.last_bo));
+ brw_bo_busy(brw->batch.last_bo));
start_time = get_time();
}
if (vp->compiled_once) {
brw_vs_debug_recompile(brw, &vp->program, key);
}
- if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
+ if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
perf_debug("VS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
/* BRW_NEW_*_PROG_DATA | _NEW_PROGRAM_CONSTANTS */
uint32_t size = prog_data->nr_pull_params * 4;
- drm_bacon_bo *const_bo = NULL;
+ struct brw_bo *const_bo = NULL;
uint32_t const_offset;
gl_constant_value *constants = intel_upload_space(brw, size, 64,
&const_bo, &const_offset);
brw_create_constant_surface(brw, const_bo, const_offset, size,
&stage_state->surf_offset[surf_index]);
- drm_bacon_bo_unreference(const_bo);
+ brw_bo_unreference(const_bo);
brw->ctx.NewDriverState |= brw_new_constbuf;
}
if (unlikely(brw->perf_debug)) {
start_busy = (brw->batch.last_bo &&
- drm_bacon_bo_busy(brw->batch.last_bo));
+ brw_bo_busy(brw->batch.last_bo));
start_time = get_time();
}
brw_wm_debug_recompile(brw, &fp->program, key);
fp->compiled_once = true;
- if (start_busy && !drm_bacon_bo_busy(brw->batch.last_bo)) {
+ if (start_busy && !brw_bo_busy(brw->batch.last_bo)) {
perf_debug("FS compile took %.03f ms and stalled the GPU\n",
(get_time() - start_time) * 1000);
}
union isl_color_value clear_color = { .u32 = { 0, 0, 0, 0 } };
- drm_bacon_bo *aux_bo;
+ struct brw_bo *aux_bo;
struct isl_surf *aux_surf = NULL, aux_surf_s;
uint64_t aux_offset = 0;
enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE;
void
brw_emit_buffer_surface_state(struct brw_context *brw,
uint32_t *out_offset,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
unsigned buffer_offset,
unsigned surface_format,
unsigned buffer_size,
struct intel_buffer_object *intel_obj =
intel_buffer_object(tObj->BufferObject);
uint32_t size = tObj->BufferSize;
- drm_bacon_bo *bo = NULL;
+ struct brw_bo *bo = NULL;
mesa_format format = tObj->_BufferObjectFormat;
uint32_t brw_format = brw_isl_format_for_mesa_format(format);
int texel_size = _mesa_get_format_bytes(format);
*/
void
brw_create_constant_surface(struct brw_context *brw,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t offset,
uint32_t size,
uint32_t *out_offset)
*/
void
brw_create_buffer_surface(struct brw_context *brw,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t offset,
uint32_t size,
uint32_t *out_offset)
{
struct intel_buffer_object *intel_bo = intel_buffer_object(buffer_obj);
uint32_t offset_bytes = 4 * offset_dwords;
- drm_bacon_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
+ struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_bo,
offset_bytes,
buffer_obj->Size - offset_bytes);
uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
* - Surface Format must be R8G8B8A8_UNORM.
*/
unsigned surface_type = BRW_SURFACE_NULL;
- drm_bacon_bo *bo = NULL;
+ struct brw_bo *bo = NULL;
unsigned pitch_minus_1 = 0;
uint32_t multisampling_state = 0;
uint32_t *surf = brw_state_batch(brw, 6 * 4, 32, out_offset);
GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
if (!binding->AutomaticSize)
size = MIN2(size, binding->Size);
- drm_bacon_bo *bo =
+ struct brw_bo *bo =
intel_bufferobj_buffer(brw, intel_bo,
binding->Offset,
size);
GLsizeiptr size = binding->BufferObject->Size - binding->Offset;
if (!binding->AutomaticSize)
size = MIN2(size, binding->Size);
- drm_bacon_bo *bo =
+ struct brw_bo *bo =
intel_bufferobj_buffer(brw, intel_bo,
binding->Offset,
size);
&ctx->AtomicBufferBindings[prog->sh.AtomicBuffers[i]->Binding];
struct intel_buffer_object *intel_bo =
intel_buffer_object(binding->BufferObject);
- drm_bacon_bo *bo = intel_bufferobj_buffer(
+ struct brw_bo *bo = intel_bufferobj_buffer(
brw, intel_bo, binding->Offset, intel_bo->Base.Size - binding->Offset);
brw_emit_buffer_surface_state(brw, &surf_offsets[i], bo,
const unsigned surf_idx =
cs_prog_data->binding_table.work_groups_start;
uint32_t *surf_offset = &brw->cs.base.surf_offset[surf_idx];
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
uint32_t bo_offset;
if (brw->compute.num_work_groups_bo == NULL) {
static void
write_primitives_generated(struct brw_context *brw,
- drm_bacon_bo *query_bo, int stream, int idx)
+ struct brw_bo *query_bo, int stream, int idx)
{
brw_emit_mi_flush(brw);
static void
write_xfb_primitives_written(struct brw_context *brw,
- drm_bacon_bo *bo, int stream, int idx)
+ struct brw_bo *bo, int stream, int idx)
{
brw_emit_mi_flush(brw);
static void
write_xfb_overflow_streams(struct gl_context *ctx,
- drm_bacon_bo *bo, int stream, int count,
+ struct brw_bo *bo, int stream, int count,
int idx)
{
struct brw_context *brw = brw_context(ctx);
}
static void
-emit_pipeline_stat(struct brw_context *brw, drm_bacon_bo *bo,
+emit_pipeline_stat(struct brw_context *brw, struct brw_bo *bo,
int stream, int target, int idx)
{
/* One source of confusion is the tessellation shader statistics. The
if (query->bo == NULL)
return;
- drm_bacon_bo_map(query->bo, false);
+ brw_bo_map(query->bo, false);
uint64_t *results = query->bo->virtual;
switch (query->Base.Target) {
case GL_TIME_ELAPSED:
default:
unreachable("Unrecognized query target in brw_queryobj_get_results()");
}
- drm_bacon_bo_unmap(query->bo);
+ brw_bo_unmap(query->bo);
/* Now that we've processed the data stored in the query's buffer object,
* we can release it.
*/
- drm_bacon_bo_unreference(query->bo);
+ brw_bo_unreference(query->bo);
query->bo = NULL;
query->Base.Ready = true;
struct brw_query_object *query = (struct brw_query_object *)q;
/* Since we're starting a new query, we need to throw away old results. */
- drm_bacon_bo_unreference(query->bo);
- query->bo = drm_bacon_bo_alloc(brw->bufmgr, "query results", 4096, 4096);
+ brw_bo_unreference(query->bo);
+ query->bo = brw_bo_alloc(brw->bufmgr, "query results", 4096, 4096);
/* For ARB_query_buffer_object: The result is not available */
set_query_availability(brw, query, false);
*/
flush_batch_if_needed(brw, query);
- if (!drm_bacon_bo_busy(query->bo)) {
+ if (!brw_bo_busy(query->bo)) {
gen6_queryobj_get_results(ctx, query);
}
}
_mesa_init_transform_feedback_object(&brw_obj->base, name);
brw_obj->offset_bo =
- drm_bacon_bo_alloc(brw->bufmgr, "transform feedback offsets", 16, 64);
+ brw_bo_alloc(brw->bufmgr, "transform feedback offsets", 16, 64);
brw_obj->prim_count_bo =
- drm_bacon_bo_alloc(brw->bufmgr, "xfb primitive counts", 4096, 64);
+ brw_bo_alloc(brw->bufmgr, "xfb primitive counts", 4096, 64);
return &brw_obj->base;
}
_mesa_reference_buffer_object(ctx, &obj->Buffers[i], NULL);
}
- drm_bacon_bo_unreference(brw_obj->offset_bo);
- drm_bacon_bo_unreference(brw_obj->prim_count_bo);
+ brw_bo_unreference(brw_obj->offset_bo);
+ brw_bo_unreference(brw_obj->prim_count_bo);
free(brw_obj);
}
if (brw_batch_references(&brw->batch, obj->prim_count_bo))
intel_batchbuffer_flush(brw);
- if (unlikely(brw->perf_debug && drm_bacon_bo_busy(obj->prim_count_bo)))
+ if (unlikely(brw->perf_debug && brw_bo_busy(obj->prim_count_bo)))
perf_debug("Stalling for # of transform feedback primitives written.\n");
- drm_bacon_bo_map(obj->prim_count_bo, false);
+ brw_bo_map(obj->prim_count_bo, false);
uint64_t *prim_counts = obj->prim_count_bo->virtual;
assert(obj->prim_count_buffer_index % (2 * streams) == 0);
prim_counts += 2 * streams; /* move to the next pair */
}
- drm_bacon_bo_unmap(obj->prim_count_bo);
+ brw_bo_unmap(obj->prim_count_bo);
/* We've already gathered up the old data; we can safely overwrite it now. */
obj->prim_count_buffer_index = 0;
for (i = 0; i < 4; i++) {
struct intel_buffer_object *bufferobj =
intel_buffer_object(xfb_obj->Buffers[i]);
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
uint32_t start, end;
uint32_t stride;
uint32_t start = xfb_obj->Offset[i];
assert(start % 4 == 0);
uint32_t end = ALIGN(start + xfb_obj->Size[i], 4);
- drm_bacon_bo *bo =
+ struct brw_bo *bo =
intel_bufferobj_buffer(brw, bufferobj, start, end - start);
assert(end <= bo->size);
{
assert(batch->blorp->driver_ctx == batch->driver_batch);
struct brw_context *brw = batch->driver_batch;
- drm_bacon_bo *bo = address.buffer;
+ struct brw_bo *bo = address.buffer;
brw_emit_reloc(&brw->batch, ss_offset, bo, address.offset + delta,
address.read_domains, address.write_domain);
retry:
intel_batchbuffer_require_space(brw, estimated_max_batch_usage, RENDER_RING);
intel_batchbuffer_save_state(brw);
- drm_bacon_bo *saved_bo = brw->batch.bo;
+ struct brw_bo *saved_bo = brw->batch.bo;
uint32_t saved_used = USED_BATCH(brw->batch);
uint32_t saved_state_batch_offset = brw->batch.state_batch_offset;
* Store immediate data into the user buffer using the requested size.
*/
static void
-store_query_result_imm(struct brw_context *brw, drm_bacon_bo *bo,
+store_query_result_imm(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, GLenum ptype, uint64_t imm)
{
switch (ptype) {
}
static void
-set_predicate(struct brw_context *brw, drm_bacon_bo *query_bo)
+set_predicate(struct brw_context *brw, struct brw_bo *query_bo)
{
brw_load_register_imm64(brw, MI_PREDICATE_SRC1, 0ull);
* query has not finished yet.
*/
static void
-store_query_result_reg(struct brw_context *brw, drm_bacon_bo *bo,
+store_query_result_reg(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, GLenum ptype, uint32_t reg,
const bool pipelined)
{
bool has_llc)
{
if (batch->last_bo != NULL) {
- drm_bacon_bo_unreference(batch->last_bo);
+ brw_bo_unreference(batch->last_bo);
batch->last_bo = NULL;
}
batch->last_bo = batch->bo;
- batch->bo = drm_bacon_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
+ batch->bo = brw_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
if (has_llc) {
- drm_bacon_bo_map(batch->bo, true);
+ brw_bo_map(batch->bo, true);
batch->map = batch->bo->virtual;
}
batch->map_next = batch->map;
for (int i = brw->batch.saved.exec_count;
i < brw->batch.exec_count; i++) {
if (brw->batch.exec_bos[i] != brw->batch.bo) {
- drm_bacon_bo_unreference(brw->batch.exec_bos[i]);
+ brw_bo_unreference(brw->batch.exec_bos[i]);
}
}
brw->batch.reloc_count = brw->batch.saved.reloc_count;
for (int i = 0; i < batch->exec_count; i++) {
if (batch->exec_bos[i] != batch->bo) {
- drm_bacon_bo_unreference(batch->exec_bos[i]);
+ brw_bo_unreference(batch->exec_bos[i]);
}
}
free(batch->relocs);
free(batch->exec_bos);
free(batch->exec_objects);
- drm_bacon_bo_unreference(batch->last_bo);
- drm_bacon_bo_unreference(batch->bo);
+ brw_bo_unreference(batch->last_bo);
+ brw_bo_unreference(batch->bo);
if (batch->state_batch_sizes)
_mesa_hash_table_destroy(batch->state_batch_sizes, NULL);
}
if (batch->ring != RENDER_RING)
return;
- int ret = drm_bacon_bo_map(batch->bo, false);
+ int ret = brw_bo_map(batch->bo, false);
if (ret != 0) {
fprintf(stderr,
"WARNING: failed to map batchbuffer (%s), "
}
if (ret == 0) {
- drm_bacon_bo_unmap(batch->bo);
+ brw_bo_unmap(batch->bo);
}
}
#else
/* Unreference any BOs held by the previous batch, and reset counts. */
for (int i = 0; i < brw->batch.exec_count; i++) {
if (brw->batch.exec_bos[i] != brw->batch.bo) {
- drm_bacon_bo_unreference(brw->batch.exec_bos[i]);
+ brw_bo_unreference(brw->batch.exec_bos[i]);
}
brw->batch.exec_bos[i] = NULL;
}
if (brw->need_swap_throttle && brw->throttle_batch[0]) {
if (brw->throttle_batch[1]) {
if (!brw->disable_throttling)
- drm_bacon_bo_wait_rendering(brw->throttle_batch[1]);
- drm_bacon_bo_unreference(brw->throttle_batch[1]);
+ brw_bo_wait_rendering(brw->throttle_batch[1]);
+ brw_bo_unreference(brw->throttle_batch[1]);
}
brw->throttle_batch[1] = brw->throttle_batch[0];
brw->throttle_batch[0] = NULL;
}
static void
-add_exec_bo(struct intel_batchbuffer *batch, drm_bacon_bo *bo)
+add_exec_bo(struct intel_batchbuffer *batch, struct brw_bo *bo)
{
if (bo != batch->bo) {
for (int i = 0; i < batch->exec_count; i++) {
return;
}
- drm_bacon_bo_reference(bo);
+ brw_bo_reference(bo);
}
if (batch->exec_count == batch->exec_array_size) {
ret = -errno;
for (int i = 0; i < batch->exec_count; i++) {
- drm_bacon_bo *bo = batch->exec_bos[i];
+ struct brw_bo *bo = batch->exec_bos[i];
bo->idle = false;
- /* Update drm_bacon_bo::offset64 */
+ /* Update brw_bo::offset64 */
if (batch->exec_objects[i].offset != bo->offset64) {
DBG("BO %d migrated: 0x%" PRIx64 " -> 0x%llx\n",
bo->gem_handle, bo->offset64, batch->exec_objects[i].offset);
int ret = 0;
if (brw->has_llc) {
- drm_bacon_bo_unmap(batch->bo);
+ brw_bo_unmap(batch->bo);
} else {
- ret = drm_bacon_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
+ ret = brw_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
- ret = drm_bacon_bo_subdata(batch->bo,
+ ret = brw_bo_subdata(batch->bo,
batch->state_batch_offset,
batch->bo->size - batch->state_batch_offset,
(char *)batch->map + batch->state_batch_offset);
if (brw->throttle_batch[0] == NULL) {
brw->throttle_batch[0] = brw->batch.bo;
- drm_bacon_bo_reference(brw->throttle_batch[0]);
+ brw_bo_reference(brw->throttle_batch[0]);
}
if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
- drm_bacon_bo_wait_rendering(brw->batch.bo);
+ brw_bo_wait_rendering(brw->batch.bo);
}
/* Start a new batch buffer. */
}
bool
-brw_batch_references(struct intel_batchbuffer *batch, drm_bacon_bo *bo)
+brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo)
{
for (int i = 0; i < batch->exec_count; i++) {
if (batch->exec_bos[i] == bo)
*/
uint64_t
brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
- drm_bacon_bo *target, uint32_t target_offset,
+ struct brw_bo *target, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
{
if (batch->reloc_count == batch->reloc_array_size) {
static void
load_sized_register_mem(struct brw_context *brw,
uint32_t reg,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset,
int size)
void
brw_load_register_mem(struct brw_context *brw,
uint32_t reg,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset)
{
void
brw_load_register_mem64(struct brw_context *brw,
uint32_t reg,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t read_domains, uint32_t write_domain,
uint32_t offset)
{
*/
void
brw_store_register_mem32(struct brw_context *brw,
- drm_bacon_bo *bo, uint32_t reg, uint32_t offset)
+ struct brw_bo *bo, uint32_t reg, uint32_t offset)
{
assert(brw->gen >= 6);
*/
void
brw_store_register_mem64(struct brw_context *brw,
- drm_bacon_bo *bo, uint32_t reg, uint32_t offset)
+ struct brw_bo *bo, uint32_t reg, uint32_t offset)
{
assert(brw->gen >= 6);
* Write 32-bits of immediate data to a GPU memory buffer.
*/
void
-brw_store_data_imm32(struct brw_context *brw, drm_bacon_bo *bo,
+brw_store_data_imm32(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, uint32_t imm)
{
assert(brw->gen >= 6);
* Write 64-bits of immediate data to a GPU memory buffer.
*/
void
-brw_store_data_imm64(struct brw_context *brw, drm_bacon_bo *bo,
+brw_store_data_imm64(struct brw_context *brw, struct brw_bo *bo,
uint32_t offset, uint64_t imm)
{
assert(brw->gen >= 6);
bool brw_batch_has_aperture_space(struct brw_context *brw,
unsigned extra_space_in_bytes);
-bool brw_batch_references(struct intel_batchbuffer *batch, drm_bacon_bo *bo);
+bool brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo);
uint64_t brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
- drm_bacon_bo *target, uint32_t target_offset,
+ struct brw_bo *target, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
static inline uint32_t
intelEmitCopyBlit(struct brw_context *brw,
GLuint cpp,
int32_t src_pitch,
- drm_bacon_bo *src_buffer,
+ struct brw_bo *src_buffer,
GLuint src_offset,
uint32_t src_tiling,
int32_t dst_pitch,
- drm_bacon_bo *dst_buffer,
+ struct brw_bo *dst_buffer,
GLuint dst_offset,
uint32_t dst_tiling,
GLshort src_x, GLshort src_y,
GLubyte *src_bits, GLuint src_size,
GLuint fg_color,
GLshort dst_pitch,
- drm_bacon_bo *dst_buffer,
+ struct brw_bo *dst_buffer,
GLuint dst_offset,
uint32_t dst_tiling,
GLshort x, GLshort y,
*/
void
intel_emit_linear_blit(struct brw_context *brw,
- drm_bacon_bo *dst_bo,
+ struct brw_bo *dst_bo,
unsigned int dst_offset,
- drm_bacon_bo *src_bo,
+ struct brw_bo *src_bo,
unsigned int src_offset,
unsigned int size)
{
intelEmitCopyBlit(struct brw_context *brw,
GLuint cpp,
int32_t src_pitch,
- drm_bacon_bo *src_buffer,
+ struct brw_bo *src_buffer,
GLuint src_offset,
uint32_t src_tiling,
int32_t dst_pitch,
- drm_bacon_bo *dst_buffer,
+ struct brw_bo *dst_buffer,
GLuint dst_offset,
uint32_t dst_tiling,
GLshort srcx, GLshort srcy,
GLubyte *src_bits, GLuint src_size,
GLuint fg_color,
GLshort dst_pitch,
- drm_bacon_bo *dst_buffer,
+ struct brw_bo *dst_buffer,
GLuint dst_offset,
uint32_t dst_tiling,
GLshort x, GLshort y,
GLshort w, GLshort h,
GLenum logic_op);
void intel_emit_linear_blit(struct brw_context *brw,
- drm_bacon_bo *dst_bo,
+ struct brw_bo *dst_bo,
unsigned int dst_offset,
- drm_bacon_bo *src_bo,
+ struct brw_bo *src_bo,
unsigned int src_offset,
unsigned int size);
intel_obj->gpu_active_end = 0;
}
-/** Allocates a new drm_bacon_bo to store the data for the buffer object. */
+/** Allocates a new brw_bo to store the data for the buffer object. */
static void
alloc_buffer_object(struct brw_context *brw,
struct intel_buffer_object *intel_obj)
{
- intel_obj->buffer = drm_bacon_bo_alloc(brw->bufmgr, "bufferobj",
+ intel_obj->buffer = brw_bo_alloc(brw->bufmgr, "bufferobj",
intel_obj->Base.Size, 64);
/* the buffer might be bound as a uniform buffer, need to update it
static void
release_buffer(struct intel_buffer_object *intel_obj)
{
- drm_bacon_bo_unreference(intel_obj->buffer);
+ brw_bo_unreference(intel_obj->buffer);
intel_obj->buffer = NULL;
}
*/
_mesa_buffer_unmap_all_mappings(ctx, obj);
- drm_bacon_bo_unreference(intel_obj->buffer);
+ brw_bo_unreference(intel_obj->buffer);
_mesa_delete_buffer_object(ctx, obj);
}
return false;
if (data != NULL)
- drm_bacon_bo_subdata(intel_obj->buffer, 0, size, data);
+ brw_bo_subdata(intel_obj->buffer, 0, size, data);
}
return true;
if (offset + size <= intel_obj->gpu_active_start ||
intel_obj->gpu_active_end <= offset) {
if (brw->has_llc) {
- drm_bacon_gem_bo_map_unsynchronized(intel_obj->buffer);
+ brw_bo_map_unsynchronized(intel_obj->buffer);
memcpy(intel_obj->buffer->virtual + offset, data, size);
- drm_bacon_bo_unmap(intel_obj->buffer);
+ brw_bo_unmap(intel_obj->buffer);
if (intel_obj->gpu_active_end > intel_obj->gpu_active_start)
intel_obj->prefer_stall_to_blit = true;
}
busy =
- drm_bacon_bo_busy(intel_obj->buffer) ||
+ brw_bo_busy(intel_obj->buffer) ||
brw_batch_references(&brw->batch, intel_obj->buffer);
if (busy) {
if (size == intel_obj->Base.Size) {
/* Replace the current busy bo so the subdata doesn't stall. */
- drm_bacon_bo_unreference(intel_obj->buffer);
+ brw_bo_unreference(intel_obj->buffer);
alloc_buffer_object(brw, intel_obj);
} else if (!intel_obj->prefer_stall_to_blit) {
perf_debug("Using a blit copy to avoid stalling on "
(long)offset, (long)offset + size, (long)(size/1024),
intel_obj->gpu_active_start,
intel_obj->gpu_active_end);
- drm_bacon_bo *temp_bo =
- drm_bacon_bo_alloc(brw->bufmgr, "subdata temp", size, 64);
+ struct brw_bo *temp_bo =
+ brw_bo_alloc(brw->bufmgr, "subdata temp", size, 64);
- drm_bacon_bo_subdata(temp_bo, 0, size, data);
+ brw_bo_subdata(temp_bo, 0, size, data);
intel_emit_linear_blit(brw,
intel_obj->buffer, offset,
temp_bo, 0,
size);
- drm_bacon_bo_unreference(temp_bo);
+ brw_bo_unreference(temp_bo);
return;
} else {
perf_debug("Stalling on glBufferSubData(%ld, %ld) (%ldkb) to a busy "
}
}
- drm_bacon_bo_subdata(intel_obj->buffer, offset, size, data);
+ brw_bo_subdata(intel_obj->buffer, offset, size, data);
mark_buffer_inactive(intel_obj);
}
if (brw_batch_references(&brw->batch, intel_obj->buffer)) {
intel_batchbuffer_flush(brw);
}
- drm_bacon_bo_get_subdata(intel_obj->buffer, offset, size, data);
+ brw_bo_get_subdata(intel_obj->buffer, offset, size, data);
mark_buffer_inactive(intel_obj);
}
if (!(access & GL_MAP_UNSYNCHRONIZED_BIT)) {
if (brw_batch_references(&brw->batch, intel_obj->buffer)) {
if (access & GL_MAP_INVALIDATE_BUFFER_BIT) {
- drm_bacon_bo_unreference(intel_obj->buffer);
+ brw_bo_unreference(intel_obj->buffer);
alloc_buffer_object(brw, intel_obj);
} else {
perf_debug("Stalling on the GPU for mapping a busy buffer "
"object\n");
intel_batchbuffer_flush(brw);
}
- } else if (drm_bacon_bo_busy(intel_obj->buffer) &&
+ } else if (brw_bo_busy(intel_obj->buffer) &&
(access & GL_MAP_INVALIDATE_BUFFER_BIT)) {
- drm_bacon_bo_unreference(intel_obj->buffer);
+ brw_bo_unreference(intel_obj->buffer);
alloc_buffer_object(brw, intel_obj);
}
}
*/
if (!(access & (GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_PERSISTENT_BIT)) &&
(access & GL_MAP_INVALIDATE_RANGE_BIT) &&
- drm_bacon_bo_busy(intel_obj->buffer)) {
+ brw_bo_busy(intel_obj->buffer)) {
/* Ensure that the base alignment of the allocation meets the alignment
* guarantees the driver has advertised to the application.
*/
const unsigned alignment = ctx->Const.MinMapBufferAlignment;
intel_obj->map_extra[index] = (uintptr_t) offset % alignment;
- intel_obj->range_map_bo[index] = drm_bacon_bo_alloc(brw->bufmgr,
+ intel_obj->range_map_bo[index] = brw_bo_alloc(brw->bufmgr,
"BO blit temp",
length +
intel_obj->map_extra[index],
alignment);
if (brw->has_llc) {
- drm_bacon_bo_map(intel_obj->range_map_bo[index],
+ brw_bo_map(intel_obj->range_map_bo[index],
(access & GL_MAP_WRITE_BIT) != 0);
} else {
- drm_bacon_gem_bo_map_gtt(intel_obj->range_map_bo[index]);
+ brw_bo_map_gtt(intel_obj->range_map_bo[index]);
}
obj->Mappings[index].Pointer =
intel_obj->range_map_bo[index]->virtual + intel_obj->map_extra[index];
if (access & GL_MAP_UNSYNCHRONIZED_BIT) {
if (!brw->has_llc && brw->perf_debug &&
- drm_bacon_bo_busy(intel_obj->buffer)) {
+ brw_bo_busy(intel_obj->buffer)) {
perf_debug("MapBufferRange with GL_MAP_UNSYNCHRONIZED_BIT stalling (it's actually synchronized on non-LLC platforms)\n");
}
- drm_bacon_gem_bo_map_unsynchronized(intel_obj->buffer);
+ brw_bo_map_unsynchronized(intel_obj->buffer);
} else if (!brw->has_llc && (!(access & GL_MAP_READ_BIT) ||
(access & GL_MAP_PERSISTENT_BIT))) {
- drm_bacon_gem_bo_map_gtt(intel_obj->buffer);
+ brw_bo_map_gtt(intel_obj->buffer);
mark_buffer_inactive(intel_obj);
} else {
- drm_bacon_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
+ brw_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
mark_buffer_inactive(intel_obj);
}
assert(intel_obj);
assert(obj->Mappings[index].Pointer);
if (intel_obj->range_map_bo[index] != NULL) {
- drm_bacon_bo_unmap(intel_obj->range_map_bo[index]);
+ brw_bo_unmap(intel_obj->range_map_bo[index]);
if (!(obj->Mappings[index].AccessFlags & GL_MAP_FLUSH_EXPLICIT_BIT)) {
intel_emit_linear_blit(brw,
*/
brw_emit_mi_flush(brw);
- drm_bacon_bo_unreference(intel_obj->range_map_bo[index]);
+ brw_bo_unreference(intel_obj->range_map_bo[index]);
intel_obj->range_map_bo[index] = NULL;
} else if (intel_obj->buffer != NULL) {
- drm_bacon_bo_unmap(intel_obj->buffer);
+ brw_bo_unmap(intel_obj->buffer);
}
obj->Mappings[index].Pointer = NULL;
obj->Mappings[index].Offset = 0;
* Anywhere that uses buffer objects in the pipeline should be using this to
* mark the range of the buffer that is being accessed by the pipeline.
*/
-drm_bacon_bo *
+struct brw_bo *
intel_bufferobj_buffer(struct brw_context *brw,
struct intel_buffer_object *intel_obj,
uint32_t offset, uint32_t size)
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *intel_src = intel_buffer_object(src);
struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
- drm_bacon_bo *src_bo, *dst_bo;
+ struct brw_bo *src_bo, *dst_bo;
if (size == 0)
return;
struct intel_buffer_object
{
struct gl_buffer_object Base;
- drm_bacon_bo *buffer; /* the low-level buffer manager's buffer handle */
+ struct brw_bo *buffer; /* the low-level buffer manager's buffer handle */
- drm_bacon_bo *range_map_bo[MAP_COUNT];
+ struct brw_bo *range_map_bo[MAP_COUNT];
/**
* Alignment offset from the range_map_bo temporary mapping to the returned
/* Get the bm buffer associated with a GL bufferobject:
*/
-drm_bacon_bo *intel_bufferobj_buffer(struct brw_context *brw,
+struct brw_bo *intel_bufferobj_buffer(struct brw_context *brw,
struct intel_buffer_object *obj,
uint32_t offset,
uint32_t size);
const void *data,
uint32_t size,
uint32_t alignment,
- drm_bacon_bo **out_bo,
+ struct brw_bo **out_bo,
uint32_t *out_offset);
void *intel_upload_space(struct brw_context *brw,
uint32_t size,
uint32_t alignment,
- drm_bacon_bo **out_bo,
+ struct brw_bo **out_bo,
uint32_t *out_offset);
void intel_upload_finish(struct brw_context *brw);
};
static int
-bo_set_tiling_internal(drm_bacon_bo *bo, uint32_t tiling_mode, uint32_t stride);
+bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode, uint32_t stride);
-static void bo_free(drm_bacon_bo *bo);
+static void bo_free(struct brw_bo *bo);
static uint32_t
key_hash_uint(const void *key)
return *((unsigned *) a) == *((unsigned *) b);
}
-static drm_bacon_bo *
+static struct brw_bo *
hash_find_bo(struct hash_table *ht, unsigned int key)
{
struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
- return entry ? (drm_bacon_bo *) entry->data : NULL;
+ return entry ? (struct brw_bo *) entry->data : NULL;
}
static unsigned long
}
inline void
-drm_bacon_bo_reference(drm_bacon_bo *bo)
+brw_bo_reference(struct brw_bo *bo)
{
p_atomic_inc(&bo->refcount);
}
int
-drm_bacon_bo_busy(drm_bacon_bo *bo)
+brw_bo_busy(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_busy busy;
}
int
-drm_bacon_bo_madvise(drm_bacon_bo *bo, int state)
+brw_bo_madvise(struct brw_bo *bo, int state)
{
struct drm_i915_gem_madvise madv;
/* drop the oldest entries that have been purged by the kernel */
static void
-drm_bacon_gem_bo_cache_purge_bucket(struct brw_bufmgr *bufmgr,
+brw_bo_cache_purge_bucket(struct brw_bufmgr *bufmgr,
struct bo_cache_bucket *bucket)
{
while (!list_empty(&bucket->head)) {
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
- bo = LIST_ENTRY(drm_bacon_bo, bucket->head.next, head);
- if (drm_bacon_bo_madvise(bo, I915_MADV_DONTNEED))
+ bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
+ if (brw_bo_madvise(bo, I915_MADV_DONTNEED))
break;
list_del(&bo->head);
}
}
-static drm_bacon_bo *
+static struct brw_bo *
bo_alloc_internal(struct brw_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned long stride,
unsigned int alignment)
{
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
unsigned int page_size = getpagesize();
int ret;
struct bo_cache_bucket *bucket;
* of the list, as it will likely be hot in the GPU
* cache and in the aperture for us.
*/
- bo = LIST_ENTRY(drm_bacon_bo, bucket->head.prev, head);
+ bo = LIST_ENTRY(struct brw_bo, bucket->head.prev, head);
list_del(&bo->head);
alloc_from_cache = true;
bo->align = alignment;
* allocating a new buffer is probably faster than
* waiting for the GPU to finish.
*/
- bo = LIST_ENTRY(drm_bacon_bo, bucket->head.next, head);
- if (!drm_bacon_bo_busy(bo)) {
+ bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
+ if (!brw_bo_busy(bo)) {
alloc_from_cache = true;
list_del(&bo->head);
}
}
if (alloc_from_cache) {
- if (!drm_bacon_bo_madvise(bo, I915_MADV_WILLNEED)) {
+ if (!brw_bo_madvise(bo, I915_MADV_WILLNEED)) {
bo_free(bo);
- drm_bacon_gem_bo_cache_purge_bucket(bufmgr,
+ brw_bo_cache_purge_bucket(bufmgr,
bucket);
goto retry;
}
return NULL;
}
-drm_bacon_bo *
-drm_bacon_bo_alloc_for_render(struct brw_bufmgr *bufmgr,
+struct brw_bo *
+brw_bo_alloc_for_render(struct brw_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment)
I915_TILING_NONE, 0, alignment);
}
-drm_bacon_bo *
-drm_bacon_bo_alloc(struct brw_bufmgr *bufmgr,
+struct brw_bo *
+brw_bo_alloc(struct brw_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment)
return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0, 0);
}
-drm_bacon_bo *
-drm_bacon_bo_alloc_tiled(struct brw_bufmgr *bufmgr, const char *name,
+struct brw_bo *
+brw_bo_alloc_tiled(struct brw_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
unsigned long *pitch, unsigned long flags)
{
}
/**
- * Returns a drm_bacon_bo wrapping the given buffer object handle.
+ * Returns a brw_bo wrapping the given buffer object handle.
*
* This can be used when one application needs to pass a buffer object
* to another.
*/
-drm_bacon_bo *
-drm_bacon_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
+struct brw_bo *
+brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
const char *name,
unsigned int handle)
{
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
int ret;
struct drm_gem_open open_arg;
struct drm_i915_gem_get_tiling get_tiling;
pthread_mutex_lock(&bufmgr->lock);
bo = hash_find_bo(bufmgr->name_table, handle);
if (bo) {
- drm_bacon_bo_reference(bo);
+ brw_bo_reference(bo);
goto out;
}
*/
bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
if (bo) {
- drm_bacon_bo_reference(bo);
+ brw_bo_reference(bo);
goto out;
}
}
static void
-bo_free(drm_bacon_bo *bo)
+bo_free(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_gem_close close;
}
static void
-bo_mark_mmaps_incoherent(drm_bacon_bo *bo)
+bo_mark_mmaps_incoherent(struct brw_bo *bo)
{
#if HAVE_VALGRIND
if (bo->mem_virtual)
&bufmgr->cache_bucket[i];
while (!list_empty(&bucket->head)) {
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
- bo = LIST_ENTRY(drm_bacon_bo, bucket->head.next, head);
+ bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
if (time - bo->free_time <= 1)
break;
limit = 0;
while (bufmgr->vma_count > limit) {
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
- bo = LIST_ENTRY(drm_bacon_bo, bufmgr->vma_cache.next, vma_list);
+ bo = LIST_ENTRY(struct brw_bo, bufmgr->vma_cache.next, vma_list);
assert(bo->map_count == 0);
list_delinit(&bo->vma_list);
}
static void
-bo_close_vma(struct brw_bufmgr *bufmgr, drm_bacon_bo *bo)
+bo_close_vma(struct brw_bufmgr *bufmgr, struct brw_bo *bo)
{
bufmgr->vma_open--;
list_addtail(&bo->vma_list, &bufmgr->vma_cache);
}
static void
-bo_open_vma(struct brw_bufmgr *bufmgr, drm_bacon_bo *bo)
+bo_open_vma(struct brw_bufmgr *bufmgr, struct brw_bo *bo)
{
bufmgr->vma_open++;
list_del(&bo->vma_list);
}
static void
-bo_unreference_final(drm_bacon_bo *bo, time_t time)
+bo_unreference_final(struct brw_bo *bo, time_t time)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct bo_cache_bucket *bucket;
bucket = bucket_for_size(bufmgr, bo->size);
/* Put the buffer into our internal cache for reuse if we can. */
if (bufmgr->bo_reuse && bo->reusable && bucket != NULL &&
- drm_bacon_bo_madvise(bo, I915_MADV_DONTNEED)) {
+ brw_bo_madvise(bo, I915_MADV_DONTNEED)) {
bo->free_time = time;
bo->name = NULL;
}
void
-drm_bacon_bo_unreference(drm_bacon_bo *bo)
+brw_bo_unreference(struct brw_bo *bo)
{
if (bo == NULL)
return;
}
int
-drm_bacon_bo_map(drm_bacon_bo *bo, int write_enable)
+brw_bo_map(struct brw_bo *bo, int write_enable)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_set_domain set_domain;
}
static int
-map_gtt(drm_bacon_bo *bo)
+map_gtt(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
int ret;
}
int
-drm_bacon_gem_bo_map_gtt(drm_bacon_bo *bo)
+brw_bo_map_gtt(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_set_domain set_domain;
*/
int
-drm_bacon_gem_bo_map_unsynchronized(drm_bacon_bo *bo)
+brw_bo_map_unsynchronized(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
int ret;
/* If the CPU cache isn't coherent with the GTT, then use a
* regular synchronized mapping. The problem is that we don't
* track where the buffer was last used on the CPU side in
- * terms of drm_bacon_bo_map vs drm_bacon_gem_bo_map_gtt, so
+ * terms of brw_bo_map vs brw_bo_map_gtt, so
* we would potentially corrupt the buffer even when the user
* does reasonable things.
*/
if (!bufmgr->has_llc)
- return drm_bacon_gem_bo_map_gtt(bo);
+ return brw_bo_map_gtt(bo);
pthread_mutex_lock(&bufmgr->lock);
}
int
-drm_bacon_bo_unmap(drm_bacon_bo *bo)
+brw_bo_unmap(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
int ret = 0;
}
int
-drm_bacon_bo_subdata(drm_bacon_bo *bo, unsigned long offset,
+brw_bo_subdata(struct brw_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
}
int
-drm_bacon_bo_get_subdata(drm_bacon_bo *bo, unsigned long offset,
+brw_bo_get_subdata(struct brw_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
/** Waits for all GPU rendering with the object to have completed. */
void
-drm_bacon_bo_wait_rendering(drm_bacon_bo *bo)
+brw_bo_wait_rendering(struct brw_bo *bo)
{
- drm_bacon_gem_bo_start_gtt_access(bo, 1);
+ brw_bo_start_gtt_access(bo, 1);
}
/**
* value describes the error. Of particular interest is -ETIME when the wait has
* failed to yield the desired result.
*
- * Similar to drm_bacon_gem_bo_wait_rendering except a timeout parameter allows
+ * Similar to brw_bo_wait_rendering except a timeout parameter allows
* the operation to give up after a certain amount of time. Another subtle
* difference is the internal locking semantics are different (this variant does
* not hold the lock for the duration of the wait). This makes the wait subject
* promise, upgrade to latest stable kernels if this is the case.
*/
int
-drm_bacon_gem_bo_wait(drm_bacon_bo *bo, int64_t timeout_ns)
+brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_wait wait;
/**
* Sets the object to the GTT read and possibly write domain, used by the X
- * 2D driver in the absence of kernel support to do drm_bacon_gem_bo_map_gtt().
+ * 2D driver in the absence of kernel support to do brw_bo_map_gtt().
*
- * In combination with drm_bacon_gem_bo_pin() and manual fence management, we
+ * In combination with brw_bo_pin() and manual fence management, we
* can do tiled pixmaps this way.
*/
void
-drm_bacon_gem_bo_start_gtt_access(drm_bacon_bo *bo, int write_enable)
+brw_bo_start_gtt_access(struct brw_bo *bo, int write_enable)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_set_domain set_domain;
for (int i = 0; i < bufmgr->num_buckets; i++) {
struct bo_cache_bucket *bucket =
&bufmgr->cache_bucket[i];
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
while (!list_empty(&bucket->head)) {
- bo = LIST_ENTRY(drm_bacon_bo, bucket->head.next, head);
+ bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
list_del(&bo->head);
bo_free(bo);
}
static int
-bo_set_tiling_internal(drm_bacon_bo *bo, uint32_t tiling_mode, uint32_t stride)
+bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode, uint32_t stride)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
struct drm_i915_gem_set_tiling set_tiling;
}
int
-drm_bacon_bo_set_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
+brw_bo_set_tiling(struct brw_bo *bo, uint32_t * tiling_mode,
uint32_t stride)
{
int ret;
}
int
-drm_bacon_bo_get_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
+brw_bo_get_tiling(struct brw_bo *bo, uint32_t * tiling_mode,
uint32_t *swizzle_mode)
{
*tiling_mode = bo->tiling_mode;
return 0;
}
-drm_bacon_bo *
-drm_bacon_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd, int size)
+struct brw_bo *
+brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd, int size)
{
int ret;
uint32_t handle;
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
struct drm_i915_gem_get_tiling get_tiling;
pthread_mutex_lock(&bufmgr->lock);
*/
bo = hash_find_bo(bufmgr->handle_table, handle);
if (bo) {
- drm_bacon_bo_reference(bo);
+ brw_bo_reference(bo);
goto out;
}
}
int
-drm_bacon_bo_gem_export_to_prime(drm_bacon_bo *bo, int *prime_fd)
+brw_bo_gem_export_to_prime(struct brw_bo *bo, int *prime_fd)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
}
int
-drm_bacon_bo_flink(drm_bacon_bo *bo, uint32_t *name)
+brw_bo_flink(struct brw_bo *bo, uint32_t *name)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
* as scanout buffers
*/
int
-drm_bacon_bo_disable_reuse(drm_bacon_bo *bo)
+brw_bo_disable_reuse(struct brw_bo *bo)
{
bo->reusable = false;
return 0;
}
int
-drm_bacon_bo_is_reusable(drm_bacon_bo *bo)
+brw_bo_is_reusable(struct brw_bo *bo)
{
return bo->reusable;
}
return ret;
}
-void *drm_bacon_gem_bo_map__gtt(drm_bacon_bo *bo)
+void *brw_bo_map__gtt(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
return bo->gtt_virtual;
}
-void *drm_bacon_gem_bo_map__cpu(drm_bacon_bo *bo)
+void *brw_bo_map__cpu(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
return bo->mem_virtual;
}
-void *drm_bacon_gem_bo_map__wc(drm_bacon_bo *bo)
+void *brw_bo_map__wc(struct brw_bo *bo)
{
struct brw_bufmgr *bufmgr = bo->bufmgr;
}
void
-brw_render_cache_set_add_bo(struct brw_context *brw, drm_bacon_bo *bo)
+brw_render_cache_set_add_bo(struct brw_context *brw, struct brw_bo *bo)
{
_mesa_set_add(brw->render_cache, bo);
}
* different caches within a batchbuffer, it's all our responsibility.
*/
void
-brw_render_cache_set_check_flush(struct brw_context *brw, drm_bacon_bo *bo)
+brw_render_cache_set_check_flush(struct brw_context *brw, struct brw_bo *bo)
{
if (!_mesa_set_search(brw->render_cache, bo))
return;
struct intel_renderbuffer *irb);
void brw_render_cache_set_clear(struct brw_context *brw);
-void brw_render_cache_set_add_bo(struct brw_context *brw, drm_bacon_bo *bo);
-void brw_render_cache_set_check_flush(struct brw_context *brw, drm_bacon_bo *bo);
+void brw_render_cache_set_add_bo(struct brw_context *brw, struct brw_bo *bo);
+void brw_render_cache_set_check_flush(struct brw_context *brw, struct brw_bo *bo);
unsigned
intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples);
struct __DRIimageRec {
struct intel_screen *screen;
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
uint32_t pitch; /**< in bytes */
GLenum internal_format;
uint32_t dri_format;
if (format == MESA_FORMAT_S_UINT8) {
/* Align to size of W tile, 64x64. */
- mt->bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "miptree",
- ALIGN(mt->total_width, 64),
- ALIGN(mt->total_height, 64),
- mt->cpp, &mt->tiling, &pitch,
- alloc_flags);
+ mt->bo = brw_bo_alloc_tiled(brw->bufmgr, "miptree",
+ ALIGN(mt->total_width, 64),
+ ALIGN(mt->total_height, 64),
+ mt->cpp, &mt->tiling, &pitch,
+ alloc_flags);
} else {
- mt->bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "miptree",
- mt->total_width, mt->total_height,
- mt->cpp, &mt->tiling, &pitch,
- alloc_flags);
+ mt->bo = brw_bo_alloc_tiled(brw->bufmgr, "miptree",
+ mt->total_width, mt->total_height,
+ mt->cpp, &mt->tiling, &pitch,
+ alloc_flags);
}
mt->pitch = pitch;
mt->total_width, mt->total_height);
mt->tiling = I915_TILING_X;
- drm_bacon_bo_unreference(mt->bo);
- mt->bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "miptree",
+ brw_bo_unreference(mt->bo);
+ mt->bo = brw_bo_alloc_tiled(brw->bufmgr, "miptree",
mt->total_width, mt->total_height, mt->cpp,
&mt->tiling, &pitch, alloc_flags);
mt->pitch = pitch;
struct intel_mipmap_tree *
intel_miptree_create_for_bo(struct brw_context *brw,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
mesa_format format,
uint32_t offset,
uint32_t width,
uint32_t tiling, swizzle;
GLenum target;
- drm_bacon_bo_get_tiling(bo, &tiling, &swizzle);
+ brw_bo_get_tiling(bo, &tiling, &swizzle);
/* Nothing will be able to use this miptree with the BO if the offset isn't
* aligned.
if (!mt)
return NULL;
- drm_bacon_bo_reference(bo);
+ brw_bo_reference(bo);
mt->bo = bo;
mt->pitch = pitch;
mt->offset = offset;
void
intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
struct intel_renderbuffer *irb,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t width, uint32_t height,
uint32_t pitch)
{
if (hiz_buf->mt)
intel_miptree_release(&hiz_buf->mt);
else
- drm_bacon_bo_unreference(hiz_buf->aux_base.bo);
+ brw_bo_unreference(hiz_buf->aux_base.bo);
free(hiz_buf);
}
DBG("%s deleting %p\n", __func__, *mt);
- drm_bacon_bo_unreference((*mt)->bo);
+ brw_bo_unreference((*mt)->bo);
intel_miptree_release(&(*mt)->stencil_mt);
intel_miptree_release(&(*mt)->r8stencil_mt);
intel_miptree_hiz_buffer_free((*mt)->hiz_buf);
if ((*mt)->mcs_buf) {
- drm_bacon_bo_unreference((*mt)->mcs_buf->bo);
+ brw_bo_unreference((*mt)->mcs_buf->bo);
free((*mt)->mcs_buf);
}
intel_resolve_map_clear(&(*mt)->hiz_map);
*
* Note: the clear value for MCS buffers is all 1's, so we memset to 0xff.
*/
- const int ret = drm_bacon_gem_bo_map_gtt(mt->mcs_buf->bo);
+ const int ret = brw_bo_map_gtt(mt->mcs_buf->bo);
if (unlikely(ret)) {
fprintf(stderr, "Failed to map mcs buffer into GTT\n");
- drm_bacon_bo_unreference(mt->mcs_buf->bo);
+ brw_bo_unreference(mt->mcs_buf->bo);
free(mt->mcs_buf);
return;
}
void *data = mt->mcs_buf->bo->virtual;
memset(data, init_value, mt->mcs_buf->size);
- drm_bacon_bo_unmap(mt->mcs_buf->bo);
+ brw_bo_unmap(mt->mcs_buf->bo);
}
static struct intel_miptree_aux_buffer *
* structure should go away. We use miptree create simply as a means to make
* sure all the constraints for the buffer are satisfied.
*/
- drm_bacon_bo_reference(temp_mt->bo);
+ brw_bo_reference(temp_mt->bo);
intel_miptree_release(&temp_mt);
return buf;
* Therefore one can pass the ISL dimensions in terms of bytes instead of
* trying to recalculate based on different format block sizes.
*/
- buf->bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "ccs-miptree",
+ buf->bo = brw_bo_alloc_tiled(brw->bufmgr, "ccs-miptree",
buf->pitch, buf->size / buf->pitch,
1, &tiling, &pitch, alloc_flags);
if (buf->bo) {
unsigned long pitch;
uint32_t tiling = I915_TILING_Y;
- buf->aux_base.bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "hiz",
+ buf->aux_base.bo = brw_bo_alloc_tiled(brw->bufmgr, "hiz",
hz_width, hz_height, 1,
&tiling, &pitch,
BO_ALLOC_FOR_RENDER);
free(buf);
return NULL;
} else if (tiling != I915_TILING_Y) {
- drm_bacon_bo_unreference(buf->aux_base.bo);
+ brw_bo_unreference(buf->aux_base.bo);
free(buf);
return NULL;
}
unsigned long pitch;
uint32_t tiling = I915_TILING_Y;
- buf->aux_base.bo = drm_bacon_bo_alloc_tiled(brw->bufmgr, "hiz",
+ buf->aux_base.bo = brw_bo_alloc_tiled(brw->bufmgr, "hiz",
hz_width, hz_height, 1,
&tiling, &pitch,
BO_ALLOC_FOR_RENDER);
free(buf);
return NULL;
} else if (tiling != I915_TILING_Y) {
- drm_bacon_bo_unreference(buf->aux_base.bo);
+ brw_bo_unreference(buf->aux_base.bo);
free(buf);
return NULL;
}
if (mt->mcs_buf) {
intel_miptree_all_slices_resolve_color(brw, mt, 0);
mt->aux_disable |= (INTEL_AUX_DISABLE_CCS | INTEL_AUX_DISABLE_MCS);
- drm_bacon_bo_unreference(mt->mcs_buf->bo);
+ brw_bo_unreference(mt->mcs_buf->bo);
free(mt->mcs_buf);
mt->mcs_buf = NULL;
*/
intel_miptree_all_slices_resolve_color(brw, mt, 0);
- drm_bacon_bo *bo = mt->bo;
+ struct brw_bo *bo = mt->bo;
if (brw_batch_references(&brw->batch, bo))
intel_batchbuffer_flush(brw);
* long as cache consistency is maintained).
*/
if (mt->tiling != I915_TILING_NONE || mt->is_scanout)
- drm_bacon_gem_bo_map_gtt(bo);
+ brw_bo_map_gtt(bo);
else
- drm_bacon_bo_map(bo, true);
+ brw_bo_map(bo, true);
return bo->virtual;
}
static void
intel_miptree_unmap_raw(struct intel_mipmap_tree *mt)
{
- drm_bacon_bo_unmap(mt->bo);
+ brw_bo_unmap(mt->bo);
}
static void
* The hardware has a fixed layout of a texture depending on parameters such
* as the target/type (2D, 3D, CUBE), width, height, pitch, and number of
* mipmap levels. The individual level/layer slices are each 2D rectangles of
- * pixels at some x/y offset from the start of the drm_bacon_bo.
+ * pixels at some x/y offset from the start of the brw_bo.
*
* Original OpenGL allowed texture miplevels to be specified in arbitrary
* order, and a texture may change size over time. Thus, each
* @see RENDER_SURFACE_STATE.AuxiliarySurfaceBaseAddress
* @see 3DSTATE_HIER_DEPTH_BUFFER.AuxiliarySurfaceBaseAddress
*/
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
/**
* Offset into bo where the surface starts.
* @see 3DSTATE_HIER_DEPTH_BUFFER.SurfaceBaseAddress
* @see 3DSTATE_STENCIL_BUFFER.SurfaceBaseAddress
*/
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
/**
* Pitch in bytes.
struct intel_mipmap_tree *
intel_miptree_create_for_bo(struct brw_context *brw,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
mesa_format format,
uint32_t offset,
uint32_t width,
void
intel_update_winsys_renderbuffer_miptree(struct brw_context *intel,
struct intel_renderbuffer *irb,
- drm_bacon_bo *bo,
+ struct brw_bo *bo,
uint32_t width, uint32_t height,
uint32_t pitch);
struct brw_context *brw = brw_context(ctx);
struct intel_buffer_object *src = intel_buffer_object(unpack->BufferObj);
GLuint src_offset;
- drm_bacon_bo *src_buffer;
+ struct brw_bo *src_buffer;
DBG("%s\n", __func__);
int dst_pitch;
/* The miptree's buffer. */
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
int error = 0;
intel_batchbuffer_flush(brw);
}
- error = drm_bacon_bo_map(bo, false /* write enable */);
+ error = brw_bo_map(bo, false /* write enable */);
if (error) {
DBG("%s: failed to map bo\n", __func__);
return false;
mem_copy
);
- drm_bacon_bo_unmap(bo);
+ brw_bo_unmap(bo);
return true;
}
intel_image_warn_if_unaligned(__DRIimage *image, const char *func)
{
uint32_t tiling, swizzle;
- drm_bacon_bo_get_tiling(image->bo, &tiling, &swizzle);
+ brw_bo_get_tiling(image->bo, &tiling, &swizzle);
if (tiling != I915_TILING_NONE && (image->offset & 0xfff)) {
_mesa_warning(NULL, "%s: offset 0x%08x not on tile boundary",
&image->tile_x,
&image->tile_y);
- drm_bacon_bo_unreference(image->bo);
+ brw_bo_unreference(image->bo);
image->bo = mt->bo;
- drm_bacon_bo_reference(mt->bo);
+ brw_bo_reference(mt->bo);
}
static __DRIimage *
image->width = width;
image->height = height;
image->pitch = pitch * cpp;
- image->bo = drm_bacon_bo_gem_create_from_name(screen->bufmgr, "image",
+ image->bo = brw_bo_gem_create_from_name(screen->bufmgr, "image",
name);
if (!image->bo) {
free(image);
image->format = rb->Format;
image->offset = 0;
image->data = loaderPrivate;
- drm_bacon_bo_unreference(image->bo);
+ brw_bo_unreference(image->bo);
image->bo = irb->mt->bo;
- drm_bacon_bo_reference(irb->mt->bo);
+ brw_bo_reference(irb->mt->bo);
image->width = rb->Width;
image->height = rb->Height;
image->pitch = irb->mt->pitch;
static void
intel_destroy_image(__DRIimage *image)
{
- drm_bacon_bo_unreference(image->bo);
+ brw_bo_unreference(image->bo);
free(image);
}
return NULL;
cpp = _mesa_get_format_bytes(image->format);
- image->bo = drm_bacon_bo_alloc_tiled(screen->bufmgr, "image",
+ image->bo = brw_bo_alloc_tiled(screen->bufmgr, "image",
width, height, cpp, &tiling,
&pitch, 0);
if (image->bo == NULL) {
*value = image->bo->gem_handle;
return true;
case __DRI_IMAGE_ATTRIB_NAME:
- return !drm_bacon_bo_flink(image->bo, (uint32_t *) value);
+ return !brw_bo_flink(image->bo, (uint32_t *) value);
case __DRI_IMAGE_ATTRIB_FORMAT:
*value = image->dri_format;
return true;
*value = image->planar_format->components;
return true;
case __DRI_IMAGE_ATTRIB_FD:
- return !drm_bacon_bo_gem_export_to_prime(image->bo, value);
+ return !brw_bo_gem_export_to_prime(image->bo, value);
case __DRI_IMAGE_ATTRIB_FOURCC:
return intel_lookup_fourcc(image->dri_format, value);
case __DRI_IMAGE_ATTRIB_NUM_PLANES:
if (image == NULL)
return NULL;
- drm_bacon_bo_reference(orig_image->bo);
+ brw_bo_reference(orig_image->bo);
image->bo = orig_image->bo;
image->internal_format = orig_image->internal_format;
image->planar_format = orig_image->planar_format;
size = end;
}
- image->bo = drm_bacon_bo_gem_create_from_prime(screen->bufmgr,
+ image->bo = brw_bo_gem_create_from_prime(screen->bufmgr,
fds[0], size);
if (image->bo == NULL) {
free(image);
}
image->bo = parent->bo;
- drm_bacon_bo_reference(parent->bo);
+ brw_bo_reference(parent->bo);
image->width = width;
image->height = height;
static bool
intel_detect_swizzling(struct intel_screen *screen)
{
- drm_bacon_bo *buffer;
+ struct brw_bo *buffer;
unsigned long flags = 0;
unsigned long aligned_pitch;
uint32_t tiling = I915_TILING_X;
uint32_t swizzle_mode = 0;
- buffer = drm_bacon_bo_alloc_tiled(screen->bufmgr, "swizzle test",
+ buffer = brw_bo_alloc_tiled(screen->bufmgr, "swizzle test",
64, 64, 4,
&tiling, &aligned_pitch, flags);
if (buffer == NULL)
return false;
- drm_bacon_bo_get_tiling(buffer, &tiling, &swizzle_mode);
- drm_bacon_bo_unreference(buffer);
+ brw_bo_get_tiling(buffer, &tiling, &swizzle_mode);
+ brw_bo_unreference(buffer);
if (swizzle_mode == I915_BIT_6_SWIZZLE_NONE)
return false;
if (screen->no_hw)
return false;
- drm_bacon_bo *results, *bo;
+ struct brw_bo *results, *bo;
uint32_t *batch;
uint32_t offset = 0;
bool success = false;
/* Create a zero'ed temporary buffer for reading our results */
- results = drm_bacon_bo_alloc(screen->bufmgr, "registers", 4096, 0);
+ results = brw_bo_alloc(screen->bufmgr, "registers", 4096, 0);
if (results == NULL)
goto err;
- bo = drm_bacon_bo_alloc(screen->bufmgr, "batchbuffer", 4096, 0);
+ bo = brw_bo_alloc(screen->bufmgr, "batchbuffer", 4096, 0);
if (bo == NULL)
goto err_results;
- if (drm_bacon_bo_map(bo, 1))
+ if (brw_bo_map(bo, 1))
goto err_batch;
batch = bo->virtual;
drmIoctl(dri_screen->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
/* Check whether the value got written. */
- if (drm_bacon_bo_map(results, false) == 0) {
+ if (brw_bo_map(results, false) == 0) {
success = *((uint32_t *)results->virtual + offset) == expected_value;
- drm_bacon_bo_unmap(results);
+ brw_bo_unmap(results);
}
err_batch:
- drm_bacon_bo_unreference(bo);
+ brw_bo_unreference(bo);
err_results:
- drm_bacon_bo_unreference(results);
+ brw_bo_unreference(results);
err:
return success;
}
* Currently the entire (global) address space for all GTT maps is
* limited to 64bits. That is all objects on the system that are
* setup for GTT mmapping must fit within 64bits. An attempt to use
- * one that exceeds the limit with fail in drm_bacon_bo_map_gtt().
+ * one that exceeds the limit with fail in brw_bo_map_gtt().
*
* Long before we hit that limit, we will be practically limited by
* that any single object must fit in physical memory (RAM). The upper
struct intel_buffer {
__DRIbuffer base;
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
};
static __DRIbuffer *
uint32_t tiling = I915_TILING_X;
unsigned long pitch;
int cpp = format / 8;
- intelBuffer->bo = drm_bacon_bo_alloc_tiled(screen->bufmgr,
+ intelBuffer->bo = brw_bo_alloc_tiled(screen->bufmgr,
"intelAllocateBuffer",
width,
height,
return NULL;
}
- drm_bacon_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
+ brw_bo_flink(intelBuffer->bo, &intelBuffer->base.name);
intelBuffer->base.attachment = attachment;
intelBuffer->base.cpp = cpp;
{
struct intel_buffer *intelBuffer = (struct intel_buffer *) buffer;
- drm_bacon_bo_unreference(intelBuffer->bo);
+ brw_bo_unreference(intelBuffer->bo);
free(intelBuffer);
}
assert(intel_texobj->mt == NULL);
- drm_bacon_bo *bo = intel_bufferobj_buffer(brw, intel_buffer_obj,
+ struct brw_bo *bo = intel_bufferobj_buffer(brw, intel_buffer_obj,
buffer_offset,
row_stride * image->Height);
intel_texobj->mt =
struct intel_texture_image *intelImage = intel_texture_image(texImage);
bool ok;
- bool tex_busy = intelImage->mt && drm_bacon_bo_busy(intelImage->mt->bo);
+ bool tex_busy = intelImage->mt && brw_bo_busy(intelImage->mt->bo);
DBG("%s mesa_format %s target %s format %s type %s level %d %dx%dx%d\n",
__func__, _mesa_get_format_name(texImage->TexFormat),
int dst_pitch;
/* The miptree's buffer. */
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
int error = 0;
intel_batchbuffer_flush(brw);
}
- error = drm_bacon_bo_map(bo, false /* write enable */);
+ error = brw_bo_map(bo, false /* write enable */);
if (error) {
DBG("%s: failed to map bo\n", __func__);
return false;
mem_copy
);
- drm_bacon_bo_unmap(bo);
+ brw_bo_unmap(bo);
return true;
}
int src_pitch;
/* The miptree's buffer. */
- drm_bacon_bo *bo;
+ struct brw_bo *bo;
int error = 0;
intel_batchbuffer_flush(brw);
}
- error = drm_bacon_bo_map(bo, true /* write enable */);
+ error = brw_bo_map(bo, true /* write enable */);
if (error || bo->virtual == NULL) {
DBG("%s: failed to map bo\n", __func__);
return false;
mem_copy
);
- drm_bacon_bo_unmap(bo);
+ brw_bo_unmap(bo);
return true;
}
struct intel_mipmap_tree *mt = intel_texture_image(texImage)->mt;
bool ok;
- bool tex_busy = mt && drm_bacon_bo_busy(mt->bo);
+ bool tex_busy = mt && brw_bo_busy(mt->bo);
if (mt && mt->format == MESA_FORMAT_S_UINT8)
mt->r8stencil_needs_update = true;
if (!brw->upload.bo)
return;
- drm_bacon_bo_unmap(brw->upload.bo);
- drm_bacon_bo_unreference(brw->upload.bo);
+ brw_bo_unmap(brw->upload.bo);
+ brw_bo_unreference(brw->upload.bo);
brw->upload.bo = NULL;
brw->upload.next_offset = 0;
}
intel_upload_space(struct brw_context *brw,
uint32_t size,
uint32_t alignment,
- drm_bacon_bo **out_bo,
+ struct brw_bo **out_bo,
uint32_t *out_offset)
{
uint32_t offset;
}
if (!brw->upload.bo) {
- brw->upload.bo = drm_bacon_bo_alloc(brw->bufmgr, "streamed data",
- MAX2(INTEL_UPLOAD_SIZE, size), 4096);
+ brw->upload.bo = brw_bo_alloc(brw->bufmgr, "streamed data",
+ MAX2(INTEL_UPLOAD_SIZE, size), 4096);
if (brw->has_llc)
- drm_bacon_bo_map(brw->upload.bo, true);
+ brw_bo_map(brw->upload.bo, true);
else
- drm_bacon_gem_bo_map_gtt(brw->upload.bo);
+ brw_bo_map_gtt(brw->upload.bo);
}
brw->upload.next_offset = offset + size;
*out_offset = offset;
if (*out_bo != brw->upload.bo) {
- drm_bacon_bo_unreference(*out_bo);
+ brw_bo_unreference(*out_bo);
*out_bo = brw->upload.bo;
- drm_bacon_bo_reference(brw->upload.bo);
+ brw_bo_reference(brw->upload.bo);
}
return brw->upload.bo->virtual + offset;
const void *data,
uint32_t size,
uint32_t alignment,
- drm_bacon_bo **out_bo,
+ struct brw_bo **out_bo,
uint32_t *out_offset)
{
void *dst = intel_upload_space(brw, size, alignment, out_bo, out_offset);