#define GEN_AUX_MAP_ENTRY_Y_TILED_BIT (0x1ull << 52)
#define GEN_AUX_MAP_ENTRY_VALID_BIT 0x1ull
-static void
-add_mapping(struct gen_aux_map_context *ctx, uint64_t address,
- uint64_t aux_address, const struct isl_surf *isl_surf,
- bool *state_changed)
+uint64_t
+gen_aux_map_format_bits_for_isl_surf(const struct isl_surf *isl_surf)
{
+ const struct isl_format_layout *fmtl =
+ isl_format_get_layout(isl_surf->format);
+
+ uint16_t bpp = fmtl->bpb;
+ assert(fmtl->bw == 1 && fmtl->bh == 1 && fmtl->bd == 1);
if (aux_map_debug)
- fprintf(stderr, "AUX-MAP 0x%"PRIx64" => 0x%"PRIx64"\n", address,
- aux_address);
+ fprintf(stderr, "AUX-MAP entry %s, bpp=%d\n",
+ isl_format_get_name(isl_surf->format), bpp);
+
+ assert(isl_tiling_is_any_y(isl_surf->tiling));
+ uint64_t format_bits =
+ ((uint64_t)get_format_encoding(isl_surf) << 58) |
+ ((uint64_t)get_bpp_encoding(bpp) << 54) |
+ GEN_AUX_MAP_ENTRY_Y_TILED_BIT;
+
+ assert((format_bits & GEN_AUX_MAP_FORMAT_BITS_MASK) == format_bits);
+
+ return format_bits;
+}
+
+static void
+get_aux_entry(struct gen_aux_map_context *ctx, uint64_t address,
+ uint32_t *l1_index_out, uint64_t *l1_entry_addr_out,
+ uint64_t **l1_entry_map_out)
+{
uint32_t l3_index = (address >> 36) & 0xfff;
uint64_t *l3_entry = &ctx->level3_map[l3_index];
uint32_t l2_index = (address >> 24) & 0xfff;
uint64_t *l2_entry = &l2_map[l2_index];
- uint64_t *l1_map;
+ uint64_t l1_addr, *l1_map;
if ((*l2_entry & GEN_AUX_MAP_ENTRY_VALID_BIT) == 0) {
- uint64_t l1_gpu;
- if (add_sub_table(ctx, 8 * 1024, 8 * 1024, &l1_gpu, &l1_map)) {
+ if (add_sub_table(ctx, 8 * 1024, 8 * 1024, &l1_addr, &l1_map)) {
if (aux_map_debug)
fprintf(stderr, "AUX-MAP L2[0x%x]: 0x%"PRIx64", map=%p\n",
- l2_index, l1_gpu, l1_map);
+ l2_index, l1_addr, l1_map);
} else {
unreachable("Failed to add L1 Aux-Map Page Table!");
}
- *l2_entry = (l1_gpu & 0xffffffffe000ULL) | 1;
+ *l2_entry = (l1_addr & 0xffffffffe000ULL) | 1;
} else {
- uint64_t l1_addr = gen_canonical_address(*l2_entry & ~0x1fffULL);
+ l1_addr = gen_canonical_address(*l2_entry & ~0x1fffULL);
l1_map = get_u64_entry_ptr(ctx, l1_addr);
}
uint32_t l1_index = (address >> 16) & 0xff;
- uint64_t *l1_entry = &l1_map[l1_index];
+ if (l1_index_out)
+ *l1_index_out = l1_index;
+ if (l1_entry_addr_out)
+ *l1_entry_addr_out = l1_addr + l1_index * sizeof(*l1_map);
+ if (l1_entry_map_out)
+ *l1_entry_map_out = &l1_map[l1_index];
+}
- const struct isl_format_layout *fmtl =
- isl_format_get_layout(isl_surf->format);
- uint16_t bpp = fmtl->bpb;
- assert(fmtl->bw == 1 && fmtl->bh == 1 && fmtl->bd == 1);
+static void
+add_mapping(struct gen_aux_map_context *ctx, uint64_t address,
+ uint64_t aux_address, uint64_t format_bits,
+ bool *state_changed)
+{
if (aux_map_debug)
- fprintf(stderr, "AUX-MAP entry %s, bpp=%d\n",
- isl_format_get_name(isl_surf->format), bpp);
+ fprintf(stderr, "AUX-MAP 0x%"PRIx64" => 0x%"PRIx64"\n", address,
+ aux_address);
+
+ uint32_t l1_index;
+ uint64_t *l1_entry;
+ get_aux_entry(ctx, address, &l1_index, NULL, &l1_entry);
const uint64_t l1_data =
- (aux_address & 0xffffffffff00ULL) |
- ((uint64_t)get_format_encoding(isl_surf) << 58) |
- ((uint64_t)get_bpp_encoding(bpp) << 54) |
- GEN_AUX_MAP_ENTRY_Y_TILED_BIT |
+ (aux_address & GEN_AUX_MAP_ADDRESS_MASK) |
+ format_bits |
GEN_AUX_MAP_ENTRY_VALID_BIT;
const uint64_t current_l1_data = *l1_entry;
}
}
+uint64_t *
+gen_aux_map_get_entry(struct gen_aux_map_context *ctx,
+ uint64_t address,
+ uint64_t *entry_address)
+{
+ pthread_mutex_lock(&ctx->mutex);
+ uint64_t *l1_entry_map;
+ get_aux_entry(ctx, address, NULL, entry_address, &l1_entry_map);
+ pthread_mutex_unlock(&ctx->mutex);
+
+ return l1_entry_map;
+}
+
void
-gen_aux_map_add_image(struct gen_aux_map_context *ctx,
- const struct isl_surf *isl_surf, uint64_t address,
- uint64_t aux_address)
+gen_aux_map_add_mapping(struct gen_aux_map_context *ctx, uint64_t address,
+ uint64_t aux_address, uint64_t main_size_B,
+ uint64_t format_bits)
{
bool state_changed = false;
pthread_mutex_lock(&ctx->mutex);
uint64_t dest_aux_addr = aux_address;
assert(align64(address, GEN_AUX_MAP_MAIN_PAGE_SIZE) == address);
assert(align64(aux_address, GEN_AUX_MAP_AUX_PAGE_SIZE) == aux_address);
- while (map_addr - address < isl_surf->size_B) {
- add_mapping(ctx, map_addr, dest_aux_addr, isl_surf, &state_changed);
+ while (map_addr - address < main_size_B) {
+ add_mapping(ctx, map_addr, dest_aux_addr, format_bits, &state_changed);
map_addr += GEN_AUX_MAP_MAIN_PAGE_SIZE;
dest_aux_addr += GEN_AUX_MAP_AUX_PAGE_SIZE;
}
p_atomic_inc(&ctx->state_num);
}
+void
+gen_aux_map_add_image(struct gen_aux_map_context *ctx,
+ const struct isl_surf *isl_surf, uint64_t address,
+ uint64_t aux_address)
+{
+ gen_aux_map_add_mapping(ctx, address, aux_address, isl_surf->size_B,
+ gen_aux_map_format_bits_for_isl_surf(isl_surf));
+}
+
/**
* We mark the leaf entry as invalid, but we don't attempt to cleanup the
* other levels of translation mappings. Since we attempt to re-use VMA