(We don't need DELTA2, because the vtable is always the first thing
in the object.) If the function is virtual, then PFN is one plus
twice the index into the vtable; otherwise, it is just a pointer to
- the function. */
+ the function.
+
+ Unfortunately, using the lowest bit of PFN doesn't work in
+ architectures that don't impose alignment requirements on function
+ addresses, or that use the lowest bit to tell one ISA from another,
+ for example. For such architectures, we use the lowest bit of
+ DELTA instead of the lowest bit of the PFN, and DELTA will be
+ multiplied by 2. */
+enum ptrmemfunc_vbit_where_t
+{
+ ptrmemfunc_vbit_in_pfn,
+ ptrmemfunc_vbit_in_delta
+};
/* Get the POINTER_TYPE to the METHOD_TYPE associated with this
pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true,
e3 = PFN_FROM_PTRMEMFUNC (function);
+ vtbl = convert_pointer_to (ptr_type_node, instance);
+ delta = cp_convert (ptrdiff_type_node,
+ build_component_ref (function, delta_identifier,
+ NULL_TREE, 0));
+
/* This used to avoid checking for virtual functions if basetype
has no virtual functions, according to an earlier ANSI draft.
With the final ISO C++ rules, such an optimization is
load-with-sign-extend, while the second used normal load then
shift to sign-extend. An optimizer flaw, perhaps, but it's
easier to make this change. */
- idx = cp_build_binary_op (TRUNC_DIV_EXPR,
- build1 (NOP_EXPR, vtable_index_type, e3),
- TYPE_SIZE_UNIT (vtable_entry_type));
- e1 = cp_build_binary_op (BIT_AND_EXPR,
- build1 (NOP_EXPR, vtable_index_type, e3),
- integer_one_node);
+ switch (TARGET_PTRMEMFUNC_VBIT_LOCATION)
+ {
+ case ptrmemfunc_vbit_in_pfn:
+ idx = cp_build_binary_op (TRUNC_DIV_EXPR,
+ build1 (NOP_EXPR, vtable_index_type, e3),
+ TYPE_SIZE_UNIT (vtable_entry_type));
+ e1 = cp_build_binary_op (BIT_AND_EXPR,
+ build1 (NOP_EXPR, vtable_index_type, e3),
+ integer_one_node);
+ break;
+
+ case ptrmemfunc_vbit_in_delta:
+ idx = build1 (NOP_EXPR, vtable_index_type, e3);
+ e1 = cp_build_binary_op (BIT_AND_EXPR,
+ delta, integer_one_node);
+ delta = cp_build_binary_op (RSHIFT_EXPR,
+ build1 (NOP_EXPR, vtable_index_type,
+ delta),
+ integer_one_node);
+ break;
+
+ default:
+ abort ();
+ }
- vtbl = convert_pointer_to (ptr_type_node, instance);
delta = cp_convert (ptrdiff_type_node,
build_component_ref (function, delta_identifier,
NULL_TREE, 0));
/* Under the new ABI, the conversion is easy. Just adjust
the DELTA field. */
delta = cp_convert (ptrdiff_type_node, delta);
+ if (TARGET_PTRMEMFUNC_VBIT_LOCATION == ptrmemfunc_vbit_in_delta)
+ n = cp_build_binary_op (LSHIFT_EXPR, n, integer_one_node);
delta = cp_build_binary_op (PLUS_EXPR, delta, n);
return build_ptrmemfunc1 (to_type, delta, npfn);
}
*delta = fold (build (PLUS_EXPR, TREE_TYPE (*delta),
*delta, BINFO_OFFSET (binfo)));
- /* Under the new ABI, we set PFN to the vtable offset, plus
- one, at which the function can be found. */
- *pfn = fold (build (MULT_EXPR, integer_type_node,
- DECL_VINDEX (fn),
- TYPE_SIZE_UNIT (vtable_entry_type)));
- *pfn = fold (build (PLUS_EXPR, integer_type_node, *pfn,
- integer_one_node));
+ /* Under the new ABI, we set PFN to the vtable offset at
+ which the function can be found, plus one (unless
+ ptrmemfunc_vbit_in_delta, in which case delta is shifted
+ left, and then incremented). */
+ *pfn = DECL_VINDEX (fn);
+
+ switch (TARGET_PTRMEMFUNC_VBIT_LOCATION)
+ {
+ case ptrmemfunc_vbit_in_pfn:
+ *pfn = fold (build (MULT_EXPR, integer_type_node, *pfn,
+ TYPE_SIZE_UNIT (vtable_entry_type)));
+ *pfn = fold (build (PLUS_EXPR, integer_type_node, *pfn,
+ integer_one_node));
+ break;
+
+ case ptrmemfunc_vbit_in_delta:
+ *delta = fold (build (LSHIFT_EXPR, TREE_TYPE (*delta),
+ *delta, integer_one_node));
+ *delta = fold (build (PLUS_EXPR, TREE_TYPE (*delta),
+ *delta, integer_one_node));
+ break;
+
+ default:
+ abort ();
+ }
+
*pfn = fold (build1 (NOP_EXPR, TYPE_PTRMEMFUNC_FN_TYPE (type),
*pfn));
}