This ports https://golang.org/cl/155918 from the master repo.
runtime: panic on uncomparable map key, even if map is empty
Reorg map flags a bit so we don't need any extra space for the extra flag.
This is a pre-req for updating libgo to the Go 1.12beta2 release.
Reviewed-on: https://go-review.googlesource.com/c/157858
From-SVN: r267950
-0d64279c01a37b2579c0c62ca4f2c3e3f81de07c
+87005025fcd0d7e7908b3aae7062b52cb80eb0f3
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
return false;
}
+// Return whether computing the hash value of an instance of this
+// struct type might panic.
+
+bool
+Struct_type::do_hash_might_panic()
+{
+ const Struct_field_list* fields = this->fields_;
+ if (fields == NULL)
+ return false;
+ for (Struct_field_list::const_iterator pf = fields->begin();
+ pf != fields->end();
+ ++pf)
+ {
+ if (pf->type()->hash_might_panic())
+ return true;
+ }
+ return false;
+}
+
// Return whether this struct type is permitted to be in the heap.
bool
Type* ptdt = Type::make_type_descriptor_ptr_type();
Type* uint8_type = Type::lookup_integer_type("uint8");
Type* uint16_type = Type::lookup_integer_type("uint16");
- Type* bool_type = Type::lookup_bool_type();
+ Type* uint32_type = Type::lookup_integer_type("uint32");
Struct_type* sf =
- Type::make_builtin_struct_type(11,
+ Type::make_builtin_struct_type(8,
"", tdt,
"key", ptdt,
"elem", ptdt,
"bucket", ptdt,
"keysize", uint8_type,
- "indirectkey", bool_type,
"valuesize", uint8_type,
- "indirectvalue", bool_type,
"bucketsize", uint16_type,
- "reflexivekey", bool_type,
- "needkeyupdate", bool_type);
+ "flags", uint32_type);
ret = Type::make_builtin_named_type("MapType", sf);
}
Type* mtdt = Map_type::make_map_type_descriptor_type();
Type* uint8_type = Type::lookup_integer_type("uint8");
Type* uint16_type = Type::lookup_integer_type("uint16");
+ Type* uint32_type = Type::lookup_integer_type("uint32");
int64_t keysize;
if (!this->key_type_->backend_type_size(gogo, &keysize))
else
vals->push_back(Expression::make_integer_int64(keysize, uint8_type, bloc));
- ++p;
- go_assert(p->is_field_name("indirectkey"));
- vals->push_back(Expression::make_boolean(keysize > Map_type::max_key_size,
- bloc));
-
++p;
go_assert(p->is_field_name("valuesize"));
if (valsize > Map_type::max_val_size)
else
vals->push_back(Expression::make_integer_int64(valsize, uint8_type, bloc));
- ++p;
- go_assert(p->is_field_name("indirectvalue"));
- vals->push_back(Expression::make_boolean(valsize > Map_type::max_val_size,
- bloc));
-
++p;
go_assert(p->is_field_name("bucketsize"));
vals->push_back(Expression::make_integer_int64(bucketsize, uint16_type,
bloc));
++p;
- go_assert(p->is_field_name("reflexivekey"));
- vals->push_back(Expression::make_boolean(this->key_type_->is_reflexive(),
- bloc));
-
- ++p;
- go_assert(p->is_field_name("needkeyupdate"));
- vals->push_back(Expression::make_boolean(this->key_type_->needs_key_update(),
- bloc));
+ go_assert(p->is_field_name("flags"));
+ // As with the other fields, the flag bits must match the reflect
+ // and runtime packages.
+ unsigned long flags = 0;
+ if (keysize > Map_type::max_key_size)
+ flags |= 1;
+ if (valsize > Map_type::max_val_size)
+ flags |= 2;
+ if (this->key_type_->is_reflexive())
+ flags |= 4;
+ if (this->key_type_->needs_key_update())
+ flags |= 8;
+ if (this->key_type_->hash_might_panic())
+ flags |= 16;
+ vals->push_back(Expression::make_integer_ul(flags, uint32_type, bloc));
++p;
go_assert(p == fields->end());
needs_key_update()
{ return this->do_needs_key_update(); }
+ // Return whether the hash function of this type might panic. This
+ // is only called for types used as a key in a map type.
+ bool
+ hash_might_panic()
+ { return this->do_hash_might_panic(); }
+
// Whether the type is permitted in the heap.
bool
in_heap()
do_needs_key_update()
{ return false; }
+ virtual bool
+ do_hash_might_panic()
+ { return false; }
+
virtual bool
do_in_heap()
{ return true; }
bool
do_needs_key_update();
+ bool
+ do_hash_might_panic();
+
bool
do_in_heap();
do_needs_key_update()
{ return this->element_type_->needs_key_update(); }
+ bool
+ do_hash_might_panic()
+ { return this->length_ != NULL && this->element_type_->hash_might_panic(); }
+
bool
do_in_heap()
{ return this->length_ == NULL || this->element_type_->in_heap(); }
do_needs_key_update()
{ return true; }
+ // Hashing an unhashable type stored in an interface might panic.
+ bool
+ do_hash_might_panic()
+ { return true; }
+
unsigned int
do_hash_for_method(Gogo*, int) const;
// mapType represents a map type.
type mapType struct {
rtype
- key *rtype // map key type
- elem *rtype // map element (value) type
- bucket *rtype // internal bucket structure
- keysize uint8 // size of key slot
- indirectkey uint8 // store ptr to key instead of key itself
- valuesize uint8 // size of value slot
- indirectvalue uint8 // store ptr to value instead of value itself
- bucketsize uint16 // size of bucket
- reflexivekey bool // true if k==k for all keys
- needkeyupdate bool // true if we need to update key on an overwrite
+ key *rtype // map key type
+ elem *rtype // map element (value) type
+ bucket *rtype // internal bucket structure
+ keysize uint8 // size of key slot
+ valuesize uint8 // size of value slot
+ bucketsize uint16 // size of bucket
+ flags uint32
}
// ptrType represents a pointer type.
s := "map[" + *ktyp.string + "]" + *etyp.string
// Make a map type.
+ // Note: flag values must match those used in the TMAP case
+ // in ../cmd/compile/internal/gc/reflect.go:dtypesym.
var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
mt := **(**mapType)(unsafe.Pointer(&imap))
mt.string = &s
mt.ptrToThis = nil
mt.bucket = bucketOf(ktyp, etyp)
+ mt.flags = 0
if ktyp.size > maxKeySize {
mt.keysize = uint8(ptrSize)
- mt.indirectkey = 1
+ mt.flags |= 1 // indirect key
} else {
mt.keysize = uint8(ktyp.size)
- mt.indirectkey = 0
}
if etyp.size > maxValSize {
mt.valuesize = uint8(ptrSize)
- mt.indirectvalue = 1
+ mt.flags |= 2 // indirect value
} else {
mt.valuesize = uint8(etyp.size)
- mt.indirectvalue = 0
}
mt.bucketsize = uint16(mt.bucket.size)
- mt.reflexivekey = isReflexive(ktyp)
- mt.needkeyupdate = needKeyUpdate(ktyp)
+ if isReflexive(ktyp) {
+ mt.flags |= 4
+ }
+ if needKeyUpdate(ktyp) {
+ mt.flags |= 8
+ }
+ if hashMightPanic(ktyp) {
+ mt.flags |= 16
+ }
// Canonicalize before storing in lookupCache
ti := toType(&mt.rtype)
}
}
+// hashMightPanic reports whether the hash of a map key of type t might panic.
+func hashMightPanic(t *rtype) bool {
+ switch t.Kind() {
+ case Interface:
+ return true
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return hashMightPanic(tt.elem)
+ case Struct:
+ tt := (*structType)(unsafe.Pointer(t))
+ for _, f := range tt.fields {
+ if hashMightPanic(f.typ) {
+ return true
+ }
+ }
+ return false
+ default:
+ return false
+ }
+}
+
// Make sure these routines stay in sync with ../../runtime/map.go!
// These types exist only for GC, so we only fill out GC relevant info.
// Currently, that's just size and the GC program. We also fill in string
if msanenabled && h != nil {
msanread(key, t.key.size)
}
+ hashfn := t.key.hashfn
+ equalfn := t.key.equalfn
if h == nil || h.count == 0 {
+ if t.hashMightPanic() {
+ hashfn(key, 0) // see issue 23734
+ }
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
hash := hashfn(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
+ if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if equalfn(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
+ if t.indirectvalue() {
v = *((*unsafe.Pointer)(v))
}
return v
if msanenabled && h != nil {
msanread(key, t.key.size)
}
+ hashfn := t.key.hashfn
+ equalfn := t.key.equalfn
if h == nil || h.count == 0 {
+ if t.hashMightPanic() {
+ hashfn(key, 0) // see issue 23734
+ }
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
hash := hashfn(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
+ if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if equalfn(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
+ if t.indirectvalue() {
v = *((*unsafe.Pointer)(v))
}
return v, true
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
+ if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if equalfn(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
+ if t.indirectvalue() {
v = *((*unsafe.Pointer)(v))
}
return k, v
continue
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
+ if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
if !equalfn(key, k) {
continue
}
// already have a mapping for key. Update it.
- if t.needkeyupdate {
+ if t.needkeyupdate() {
typedmemmove(t.key, k, key)
}
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
}
// store new key/value at insert position
- if t.indirectkey {
+ if t.indirectkey() {
kmem := newobject(t.key)
*(*unsafe.Pointer)(insertk) = kmem
insertk = kmem
}
- if t.indirectvalue {
+ if t.indirectvalue() {
vmem := newobject(t.elem)
*(*unsafe.Pointer)(val) = vmem
}
throw("concurrent map writes")
}
h.flags &^= hashWriting
- if t.indirectvalue {
+ if t.indirectvalue() {
val = *((*unsafe.Pointer)(val))
}
return val
if msanenabled && h != nil {
msanread(key, t.key.size)
}
+ hashfn := t.key.hashfn
+ equalfn := t.key.equalfn
if h == nil || h.count == 0 {
+ if t.hashMightPanic() {
+ hashfn(key, 0) // see issue 23734
+ }
return
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
hash := hashfn(key, uintptr(h.hash0))
// Set hashWriting after calling alg.hash, since alg.hash may panic,
}
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
k2 := k
- if t.indirectkey {
+ if t.indirectkey() {
k2 = *((*unsafe.Pointer)(k2))
}
if !equalfn(key, k2) {
continue
}
// Only clear key if there are pointers in it.
- if t.indirectkey {
+ if t.indirectkey() {
*(*unsafe.Pointer)(k) = nil
} else if t.key.kind&kindNoPointers == 0 {
memclrHasPointers(k, t.key.size)
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
+ if t.indirectvalue() {
*(*unsafe.Pointer)(v) = nil
} else if t.elem.kind&kindNoPointers == 0 {
memclrHasPointers(v, t.elem.size)
continue
}
k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
- if t.indirectkey {
+ if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
// through the oldbucket, skipping any keys that will go
// to the other new bucket (each oldbucket expands to two
// buckets during a grow).
- if t.reflexivekey || equalfn(k, k) {
+ if t.reflexivekey() || equalfn(k, k) {
// If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it.
hash := hashfn(k, uintptr(h.hash0))
}
}
if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
- !(t.reflexivekey || equalfn(k, k)) {
+ !(t.reflexivekey() || equalfn(k, k)) {
// This is the golden data, we can return it.
// OR
// key!=key, so the entry can't be deleted or updated, so we can just return it.
// That's lucky for us because when key!=key we can't look it up successfully.
it.key = k
- if t.indirectvalue {
+ if t.indirectvalue() {
v = *((*unsafe.Pointer)(v))
}
it.value = v
throw("bad map state")
}
k2 := k
- if t.indirectkey {
+ if t.indirectkey() {
k2 = *((*unsafe.Pointer)(k2))
}
var useY uint8
// Compute hash to make our evacuation decision (whether we need
// to send this key/value to bucket x or bucket y).
hash := t.key.hashfn(k2, uintptr(h.hash0))
- if h.flags&iterator != 0 && !t.reflexivekey && !t.key.equalfn(k2, k2) {
+ if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equalfn(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the
dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
}
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
- if t.indirectkey {
+ if t.indirectkey() {
*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
} else {
typedmemmove(t.key, dst.k, k) // copy value
}
- if t.indirectvalue {
+ if t.indirectvalue() {
*(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v)
} else {
typedmemmove(t.elem, dst.v, v)
if !ismapkey(t.key) {
throw("runtime.reflect_makemap: unsupported map key type")
}
- if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
- t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
+ if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
+ t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) {
throw("key size wrong")
}
- if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
- t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
+ if t.elem.size > maxValueSize && (!t.indirectvalue() || t.valuesize != uint8(sys.PtrSize)) ||
+ t.elem.size <= maxValueSize && (t.indirectvalue() || t.valuesize != uint8(t.elem.size)) {
throw("value size wrong")
}
if t.key.align > bucketCnt {
}
type maptype struct {
- typ _type
- key *_type
- elem *_type
- bucket *_type // internal type representing a hash bucket
- keysize uint8 // size of key slot
- indirectkey bool // store ptr to key instead of key itself
- valuesize uint8 // size of value slot
- indirectvalue bool // store ptr to value instead of value itself
- bucketsize uint16 // size of bucket
- reflexivekey bool // true if k==k for all keys
- needkeyupdate bool // true if we need to update key on an overwrite
+ typ _type
+ key *_type
+ elem *_type
+ bucket *_type // internal type representing a hash bucket
+ keysize uint8 // size of key slot
+ valuesize uint8 // size of value slot
+ bucketsize uint16 // size of bucket
+ flags uint32
+}
+
+// Note: flag values must match those used in the TMAP case
+// in ../cmd/compile/internal/gc/reflect.go:dtypesym.
+func (mt *maptype) indirectkey() bool { // store ptr to key instead of key itself
+ return mt.flags&1 != 0
+}
+func (mt *maptype) indirectvalue() bool { // store ptr to value instead of value itself
+ return mt.flags&2 != 0
+}
+func (mt *maptype) reflexivekey() bool { // true if k==k for all keys
+ return mt.flags&4 != 0
+}
+func (mt *maptype) needkeyupdate() bool { // true if we need to update key on an overwrite
+ return mt.flags&8 != 0
+}
+func (mt *maptype) hashMightPanic() bool { // true if hash function might panic
+ return mt.flags&16 != 0
}
type arraytype struct {