struct bo_cache_bucket *bucket;
bool alloc_from_cache;
uint64_t bo_size;
- bool for_render = false;
+ bool busy = false;
bool zeroed = false;
- if (flags & BO_ALLOC_FOR_RENDER)
- for_render = true;
+ if (flags & BO_ALLOC_BUSY)
+ busy = true;
if (flags & BO_ALLOC_ZEROED)
zeroed = true;
- /* FOR_RENDER really means "I'm ok with a busy BO". This doesn't really
- * jive with ZEROED as we have to wait for it to be idle before we can
- * memset. Just disallow that combination.
+ /* BUSY does doesn't really jive with ZEROED as we have to wait for it to
+ * be idle before we can memset. Just disallow that combination.
*/
- assert(!(for_render && zeroed));
+ assert(!(busy && zeroed));
/* Round the allocated size up to a power of two number of pages. */
bucket = bucket_for_size(bufmgr, size);
retry:
alloc_from_cache = false;
if (bucket != NULL && !list_empty(&bucket->head)) {
- if (for_render && !zeroed) {
+ if (busy && !zeroed) {
/* Allocate new render-target BOs from the tail (MRU)
* of the list, as it will likely be hot in the GPU
* cache and in the aperture for us. If the caller
mt->surf.samples, ISL_TILING_W_BIT,
ISL_SURF_USAGE_STENCIL_BIT |
ISL_SURF_USAGE_TEXTURE_BIT,
- BO_ALLOC_FOR_RENDER, 0, NULL);
+ BO_ALLOC_BUSY, 0, NULL);
if (!mt->stencil_mt)
return false;
ISL_TILING_W_BIT,
ISL_SURF_USAGE_STENCIL_BIT |
ISL_SURF_USAGE_TEXTURE_BIT,
- BO_ALLOC_FOR_RENDER,
+ BO_ALLOC_BUSY,
0,
NULL);
first_level, last_level,
width0, height0, depth0, num_samples, ISL_TILING_Y0_BIT,
ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT,
- BO_ALLOC_FOR_RENDER, 0, NULL);
+ BO_ALLOC_BUSY, 0, NULL);
if (needs_separate_stencil(brw, mt, format) &&
!make_separate_stencil_surface(brw, mt)) {
etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE;
if (flags & MIPTREE_CREATE_BUSY)
- alloc_flags |= BO_ALLOC_FOR_RENDER;
+ alloc_flags |= BO_ALLOC_BUSY;
isl_tiling_flags_t tiling_flags = (flags & MIPTREE_CREATE_LINEAR) ?
ISL_TILING_LINEAR_BIT : ISL_TILING_ANY_MASK;
brw->gen >= 6 ? depth_only_format : format,
0, 0, width, height, depth, 1, ISL_TILING_Y0_BIT,
ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT,
- BO_ALLOC_FOR_RENDER, pitch, bo);
+ BO_ALLOC_BUSY, pitch, bo);
if (!mt)
return NULL;
ISL_TILING_W_BIT,
ISL_SURF_USAGE_STENCIL_BIT |
ISL_SURF_USAGE_TEXTURE_BIT,
- BO_ALLOC_FOR_RENDER, pitch, bo);
+ BO_ALLOC_BUSY, pitch, bo);
if (!mt)
return NULL;
* fast-clear operation. In that case, being hot in caches more useful.
*/
const uint32_t alloc_flags = mt->aux_usage == ISL_AUX_USAGE_CCS_E ?
- BO_ALLOC_ZEROED : BO_ALLOC_FOR_RENDER;
+ BO_ALLOC_ZEROED : BO_ALLOC_BUSY;
mt->mcs_buf = intel_alloc_aux_buffer(brw, "ccs-miptree",
&temp_ccs_surf, alloc_flags, mt);
if (!mt->mcs_buf) {
isl_surf_get_hiz_surf(&brw->isl_dev, &mt->surf, &temp_hiz_surf);
assert(ok);
- const uint32_t alloc_flags = BO_ALLOC_FOR_RENDER;
+ const uint32_t alloc_flags = BO_ALLOC_BUSY;
mt->hiz_buf = intel_alloc_aux_buffer(brw, "hiz-miptree",
&temp_hiz_surf, alloc_flags, mt);
src->surf.samples,
ISL_TILING_Y0_BIT,
ISL_SURF_USAGE_TEXTURE_BIT,
- BO_ALLOC_FOR_RENDER, 0, NULL);
+ BO_ALLOC_BUSY, 0, NULL);
assert(mt->r8stencil_mt);
}