* cached translations.
*/
uint64_t base_addr = gen_aux_map_get_base(aux_map_ctx);
- assert(base_addr != 0 && ALIGN(base_addr, 32 * 1024) == base_addr);
+ assert(base_addr != 0 && align64(base_addr, 32 * 1024) == base_addr);
iris_load_register_imm64(batch, GENX(GFX_AUX_TABLE_BASE_ADDR_num),
base_addr);
batch->last_aux_map_state = aux_map_state_num;
struct aux_map_buffer *tail =
list_last_entry(&ctx->buffers, struct aux_map_buffer, link);
uint64_t gpu = tail->buffer->gpu + ctx->tail_offset;
- uint64_t aligned = ALIGN(gpu, align);
+ uint64_t aligned = align64(gpu, align);
if ((aligned - gpu) + size > ctx->tail_remaining) {
return false;
pthread_mutex_lock(&ctx->mutex);
uint64_t map_addr = address;
uint64_t dest_aux_addr = aux_address;
- assert(ALIGN(address, 64 * 1024) == address);
- assert(ALIGN(aux_address, 4 * 64) == aux_address);
+ assert(align64(address, 64 * 1024) == address);
+ assert(align64(aux_address, 4 * 64) == aux_address);
while (map_addr - address < isl_surf->size_B) {
add_mapping(ctx, map_addr, dest_aux_addr, isl_surf, &state_changed);
map_addr += 64 * 1024;
address + size);
uint64_t map_addr = address;
- assert(ALIGN(address, 64 * 1024) == address);
+ assert(align64(address, 64 * 1024) == address);
while (map_addr - address < size) {
remove_mapping(ctx, map_addr, &state_changed);
map_addr += 64 * 1024;