compiler, runtime: replace hashmap code with Go 1.7 hashmap
authorIan Lance Taylor <iant@golang.org>
Wed, 21 Sep 2016 20:58:51 +0000 (20:58 +0000)
committerIan Lance Taylor <ian@gcc.gnu.org>
Wed, 21 Sep 2016 20:58:51 +0000 (20:58 +0000)
    This change removes the gccgo-specific hashmap code and replaces it with
    the hashmap code from the Go 1.7 runtime.  The Go 1.7 hashmap code is
    more efficient, does a better job on details like when to update a key,
    and provides some support against denial-of-service attacks.

    The compiler is changed to call the new hashmap functions instead of the
    old ones.

    The compiler now tracks which types are reflexive and which require
    updating when used as a map key, and records the information in map type
    descriptors.

    Map_index_expression is simplified.  The special case for a map index on
    the right hand side of a tuple expression has been unnecessary for some
    time, and is removed.  The support for specially marking a map index as
    an lvalue is removed, in favor of lowering an assignment to a map index
    into a function call.  The long-obsolete support for a map index of a
    pointer to a map is removed.

    The __go_new_map_big function (known to the compiler as
    Runtime::MAKEMAPBIG) is no longer needed, as the new runtime.makemap
    function takes an int64 hint argument.

    The old map descriptor type and supporting expression is removed.

    The compiler was still supporting the long-obsolete syntax `m[k] = 0,
    false` to delete a value from a map.  That is now removed, requiring a
    change to one of the gccgo-specific tests.

    The builtin len function applied to a map or channel p is now compiled
    as `p == nil ? 0 : *(*int)(p)`.  The __go_chan_len function (known to
    the compiler as Runtime::CHAN_LEN) is removed.

    Support for a shared zero value for maps to large value types is
    introduced, along the lines of the gc compiler.  The zero value is
    handled as a common variable.

    The hash function is changed to take a seed argument, changing the
    runtime hash functions and the compiler-generated hash functions.
    Unlike the gc compiler, both the hash and equal functions continue to
    take the type length.

    Types that can not be compared now store nil for the hash and equal
    functions, rather than pointing to functions that throw.  Interface hash
    and comparison functions now check explicitly for nil.  This matches the
    gc compiler and permits a simple implementation for ismapkey.

    The compiler is changed to permit marking struct and array types as
    incomparable, meaning that they have no hash or equal function.  We use
    this for thunk types, removing the existing special code to avoid
    generating hash/equal functions for them.

    The C runtime code adds memclr, memequal, and memmove functions.

    The hashmap code uses go:linkname comments to make the functions
    visible, as otherwise the compiler would discard them.

    The hashmap code comments out the unused reference to the address of the
    first parameter in the race code, as otherwise the compiler thinks that
    the parameter escapes and copies it onto the heap.  This is probably not
    needed when we enable escape analysis.

    Several runtime map tests that ere previously skipped for gccgo are now
    run.

    The Go runtime picks up type kind information and stubs.  The type kind
    information causes the generated runtime header file to define some
    constants, including `empty`, and the C code is adjusted accordingly.

    A Go-callable version of runtime.throw, that takes a Go string, is
    added to be called from the hashmap code.

    Reviewed-on: https://go-review.googlesource.com/29447

* go.go-torture/execute/map-1.go: Replace old map deletion syntax
with call to builtin delete function.

From-SVN: r240334

62 files changed:
gcc/go/gofrontend/MERGE
gcc/go/gofrontend/escape.cc
gcc/go/gofrontend/expressions.cc
gcc/go/gofrontend/expressions.h
gcc/go/gofrontend/gogo.cc
gcc/go/gofrontend/parse.cc
gcc/go/gofrontend/runtime.cc
gcc/go/gofrontend/runtime.def
gcc/go/gofrontend/runtime.h
gcc/go/gofrontend/statements.cc
gcc/go/gofrontend/statements.h
gcc/go/gofrontend/types.cc
gcc/go/gofrontend/types.h
gcc/testsuite/ChangeLog
gcc/testsuite/go.go-torture/execute/map-1.go
libgo/Makefile.am
libgo/Makefile.in
libgo/go/reflect/type.go
libgo/go/runtime/export_test.go
libgo/go/runtime/hashmap.go [new file with mode: 0644]
libgo/go/runtime/hashmap_fast.go [new file with mode: 0644]
libgo/go/runtime/map_test.go
libgo/go/runtime/msan0.go
libgo/go/runtime/race0.go [new file with mode: 0644]
libgo/go/runtime/stubs.go [new file with mode: 0644]
libgo/go/runtime/type.go
libgo/go/runtime/typekind.go [new file with mode: 0644]
libgo/runtime/chan.goc
libgo/runtime/go-construct-map.c
libgo/runtime/go-eface-compare.c
libgo/runtime/go-eface-val-compare.c
libgo/runtime/go-fieldtrack.c
libgo/runtime/go-interface-compare.c
libgo/runtime/go-interface-eface-compare.c
libgo/runtime/go-interface-val-compare.c
libgo/runtime/go-map-delete.c [deleted file]
libgo/runtime/go-map-index.c [deleted file]
libgo/runtime/go-map-len.c [deleted file]
libgo/runtime/go-map-range.c [deleted file]
libgo/runtime/go-memclr.c [new file with mode: 0644]
libgo/runtime/go-memequal.c [new file with mode: 0644]
libgo/runtime/go-memmove.c [new file with mode: 0644]
libgo/runtime/go-new-map.c [deleted file]
libgo/runtime/go-reflect-map.c [deleted file]
libgo/runtime/go-type-complex.c
libgo/runtime/go-type-eface.c
libgo/runtime/go-type-error.c [deleted file]
libgo/runtime/go-type-float.c
libgo/runtime/go-type-identity.c
libgo/runtime/go-type-interface.c
libgo/runtime/go-type-string.c
libgo/runtime/go-type.h
libgo/runtime/malloc.goc
libgo/runtime/malloc.h
libgo/runtime/map.goc [deleted file]
libgo/runtime/map.h [deleted file]
libgo/runtime/mcentral.c
libgo/runtime/mgc0.c
libgo/runtime/mheap.c
libgo/runtime/panic.c
libgo/runtime/proc.c
libgo/runtime/runtime.h

index 1123e622c825b3a57b045839f3397db9705826d6..73645de171fc8a0d1211b483e151dd35df8a2603 100644 (file)
@@ -1,4 +1,4 @@
-259b4fe81436a3836308f6c96e058edad07be338
+69668416034247ac6c7228c9dcbf6719af05b6ca
 
 The first line of this file holds the git revision number of the last
 merge done from the gofrontend repository.
index ee7fa0933447d8ac46488418bb95e6fdc81cf04c..ea4978bb5550b53963f9dfe76d4279abc28df76a 100644 (file)
@@ -294,7 +294,6 @@ Node::op_format() const
                case Runtime::MAKECHAN:
                case Runtime::MAKECHANBIG:
                case Runtime::MAKEMAP:
-               case Runtime::MAKEMAPBIG:
                case Runtime::MAKESLICE1:
                case Runtime::MAKESLICE2:
                case Runtime::MAKESLICE1BIG:
@@ -1231,7 +1230,6 @@ Escape_analysis_assign::expression(Expression** pexpr)
              case Runtime::MAKECHAN:
              case Runtime::MAKECHANBIG:
              case Runtime::MAKEMAP:
-             case Runtime::MAKEMAPBIG:
              case Runtime::MAKESLICE1:
              case Runtime::MAKESLICE2:
              case Runtime::MAKESLICE1BIG:
@@ -1839,7 +1837,6 @@ Escape_analysis_assign::assign(Node* dst, Node* src)
                  case Runtime::MAKECHAN:
                  case Runtime::MAKECHANBIG:
                  case Runtime::MAKEMAP:
-                 case Runtime::MAKEMAPBIG:
                  case Runtime::MAKESLICE1:
                  case Runtime::MAKESLICE2:
                  case Runtime::MAKESLICE1BIG:
@@ -2582,7 +2579,6 @@ Escape_analysis_flood::flood(Level level, Node* dst, Node* src,
                    case Runtime::MAKECHAN:
                    case Runtime::MAKECHANBIG:
                    case Runtime::MAKEMAP:
-                   case Runtime::MAKEMAPBIG:
                    case Runtime::MAKESLICE1:
                    case Runtime::MAKESLICE2:
                    case Runtime::MAKESLICE1BIG:
index aabb35391acd70a25e04fc4f05b45d7dfd9ddbe0..70853abcd787a85a726590026dce8f7c8648fcb5 100644 (file)
@@ -6998,13 +6998,14 @@ Builtin_call_expression::do_lower(Gogo* gogo, Named_object* function,
              Statement::make_temporary(mt->key_type(), args->back(), loc);
            inserter->insert(key_temp);
 
-           Expression* e1 = Expression::make_temporary_reference(map_temp,
+           Expression* e1 = Expression::make_type_descriptor(mt, loc);
+           Expression* e2 = Expression::make_temporary_reference(map_temp,
                                                                  loc);
-           Expression* e2 = Expression::make_temporary_reference(key_temp,
+           Expression* e3 = Expression::make_temporary_reference(key_temp,
                                                                  loc);
-           e2 = Expression::make_unary(OPERATOR_AND, e2, loc);
+           e3 = Expression::make_unary(OPERATOR_AND, e3, loc);
            return Runtime::make_call(Runtime::MAPDELETE, this->location(),
-                                     2, e1, e2);
+                                     3, e1, e2, e3);
          }
       }
       break;
@@ -7065,6 +7066,18 @@ Builtin_call_expression::do_flatten(Gogo*, Named_object*,
              *pa = Expression::make_temporary_reference(temp, loc);
            }
        }
+
+    case BUILTIN_LEN:
+      Expression_list::iterator pa = this->args()->begin();
+      if (!(*pa)->is_variable()
+         && ((*pa)->type()->map_type() != NULL
+             || (*pa)->type()->channel_type() != NULL))
+       {
+         Temporary_statement* temp =
+           Statement::make_temporary(NULL, *pa, loc);
+         inserter->insert(temp);
+         *pa = Expression::make_temporary_reference(temp, loc);
+       }
     }
 
   return this;
@@ -7174,13 +7187,7 @@ Builtin_call_expression::lower_make()
     }
 
   Location type_loc = first_arg->location();
-  Expression* type_arg;
-  if (is_slice || is_chan)
-    type_arg = Expression::make_type_descriptor(type, type_loc);
-  else if (is_map)
-    type_arg = Expression::make_map_descriptor(type->map_type(), type_loc);
-  else
-    go_unreachable();
+  Expression* type_arg = Expression::make_type_descriptor(type, type_loc);
 
   Expression* call;
   if (is_slice)
@@ -7197,10 +7204,9 @@ Builtin_call_expression::lower_make()
                                  loc, 3, type_arg, len_arg, cap_arg);
     }
   else if (is_map)
-    call = Runtime::make_call((have_big_args
-                              ? Runtime::MAKEMAPBIG
-                              : Runtime::MAKEMAP),
-                             loc, 2, type_arg, len_arg);
+    call = Runtime::make_call(Runtime::MAKEMAP, loc, 4, type_arg, len_arg,
+                             Expression::make_nil(loc),
+                             Expression::make_nil(loc));
   else if (is_chan)
     call = Runtime::make_call((have_big_args
                               ? Runtime::MAKECHANBIG
@@ -8250,10 +8256,23 @@ Builtin_call_expression::do_get_backend(Translate_context* context)
                val = arg_type->array_type()->get_length(gogo, arg);
                this->seen_ = false;
              }
-           else if (arg_type->map_type() != NULL)
-              val = Runtime::make_call(Runtime::MAP_LEN, location, 1, arg);
-           else if (arg_type->channel_type() != NULL)
-              val = Runtime::make_call(Runtime::CHAN_LEN, location, 1, arg);
+           else if (arg_type->map_type() != NULL
+                    || arg_type->channel_type() != NULL)
+             {
+               // The first field is the length.  If the pointer is
+               // nil, the length is zero.
+               Type* pint_type = Type::make_pointer_type(int_type);
+               arg = Expression::make_unsafe_cast(pint_type, arg, location);
+               Expression* nil = Expression::make_nil(location);
+               nil = Expression::make_cast(pint_type, nil, location);
+               Expression* cmp = Expression::make_binary(OPERATOR_EQEQ,
+                                                         arg, nil, location);
+               Expression* zero = Expression::make_integer_ul(0, int_type,
+                                                              location);
+               Expression* indir = Expression::make_unary(OPERATOR_MULT,
+                                                          arg, location);
+               val = Expression::make_conditional(cmp, zero, indir, location);
+             }
            else
              go_unreachable();
          }
@@ -9866,11 +9885,7 @@ Index_expression::do_lower(Gogo*, Named_object*, Statement_inserter*, int)
          error_at(location, "invalid slice of map");
          return Expression::make_error(location);
        }
-      Map_index_expression* ret = Expression::make_map_index(left, start,
-                                                            location);
-      if (this->is_lvalue_)
-       ret->set_is_lvalue();
-      return ret;
+      return Expression::make_map_index(left, start, location);
     }
   else
     {
@@ -10666,7 +10681,7 @@ Expression::make_string_index(Expression* string, Expression* start,
 Map_type*
 Map_index_expression::get_map_type() const
 {
-  Map_type* mt = this->map_->type()->deref()->map_type();
+  Map_type* mt = this->map_->type()->map_type();
   if (mt == NULL)
     go_assert(saw_errors());
   return mt;
@@ -10724,7 +10739,7 @@ Map_index_expression::do_flatten(Gogo* gogo, Named_object*,
     }
 
   if (this->value_pointer_ == NULL)
-    this->get_value_pointer(this->is_lvalue_);
+    this->get_value_pointer(gogo);
   if (this->value_pointer_->is_error_expression()
       || this->value_pointer_->type()->is_error_type())
     return Expression::make_error(loc);
@@ -10747,14 +10762,7 @@ Map_index_expression::do_type()
   Map_type* mt = this->get_map_type();
   if (mt == NULL)
     return Type::make_error_type();
-  Type* type = mt->val_type();
-  // If this map index is in a tuple assignment, we actually return a
-  // pointer to the value type.  Tuple_map_assignment_statement is
-  // responsible for handling this correctly.  We need to get the type
-  // right in case this gets assigned to a temporary variable.
-  if (this->is_in_tuple_assignment_)
-    type = Type::make_pointer_type(type);
-  return type;
+  return mt->val_type();
 }
 
 // Fix the type of a map index.
@@ -10806,47 +10814,17 @@ Map_index_expression::do_get_backend(Translate_context* context)
   go_assert(this->value_pointer_ != NULL
             && this->value_pointer_->is_variable());
 
-  Bexpression* ret;
-  if (this->is_lvalue_)
-    {
-      Expression* val =
-          Expression::make_unary(OPERATOR_MULT, this->value_pointer_,
-                                 this->location());
-      ret = val->get_backend(context);
-    }
-  else if (this->is_in_tuple_assignment_)
-    {
-      // Tuple_map_assignment_statement is responsible for using this
-      // appropriately.
-      ret = this->value_pointer_->get_backend(context);
-    }
-  else
-    {
-      Location loc = this->location();
-
-      Expression* nil_check =
-          Expression::make_binary(OPERATOR_EQEQ, this->value_pointer_,
-                                  Expression::make_nil(loc), loc);
-      Bexpression* bnil_check = nil_check->get_backend(context);
-      Expression* val =
-          Expression::make_unary(OPERATOR_MULT, this->value_pointer_, loc);
-      Bexpression* bval = val->get_backend(context);
-
-      Gogo* gogo = context->gogo();
-      Btype* val_btype = type->val_type()->get_backend(gogo);
-      Bexpression* val_zero = gogo->backend()->zero_expression(val_btype);
-      ret = gogo->backend()->conditional_expression(val_btype, bnil_check,
-                                                    val_zero, bval, loc);
-    }
-  return ret;
+  Expression* val = Expression::make_unary(OPERATOR_MULT, this->value_pointer_,
+                                          this->location());
+  return val->get_backend(context);
 }
 
-// Get an expression for the map index.  This returns an expression which
-// evaluates to a pointer to a value.  The pointer will be NULL if the key is
-// not in the map.
+// Get an expression for the map index.  This returns an expression
+// that evaluates to a pointer to a value.  If the key is not in the
+// map, the pointer will point to a zero value.
 
 Expression*
-Map_index_expression::get_value_pointer(bool insert)
+Map_index_expression::get_value_pointer(Gogo* gogo)
 {
   if (this->value_pointer_ == NULL)
     {
@@ -10859,21 +10837,32 @@ Map_index_expression::get_value_pointer(bool insert)
 
       Location loc = this->location();
       Expression* map_ref = this->map_;
-      if (this->map_->type()->points_to() != NULL)
-        map_ref = Expression::make_unary(OPERATOR_MULT, map_ref, loc);
 
-      Expression* index_ptr = Expression::make_unary(OPERATOR_AND, this->index_,
+      Expression* index_ptr = Expression::make_unary(OPERATOR_AND,
+                                                    this->index_,
                                                      loc);
-      Expression* map_index =
-          Runtime::make_call(Runtime::MAP_INDEX, loc, 3,
-                             map_ref, index_ptr,
-                             Expression::make_boolean(insert, loc));
+
+      Expression* zero = type->fat_zero_value(gogo);
+
+      Expression* map_index;
+
+      if (zero == NULL)
+       map_index =
+          Runtime::make_call(Runtime::MAPACCESS1, loc, 3,
+                            Expression::make_type_descriptor(type, loc),
+                             map_ref, index_ptr);
+      else
+       map_index =
+         Runtime::make_call(Runtime::MAPACCESS1_FAT, loc, 4,
+                            Expression::make_type_descriptor(type, loc),
+                            map_ref, index_ptr, zero);
 
       Type* val_type = type->val_type();
       this->value_pointer_ =
           Expression::make_unsafe_cast(Type::make_pointer_type(val_type),
                                        map_index, this->location());
     }
+
   return this->value_pointer_;
 }
 
@@ -12583,7 +12572,7 @@ Map_construction_expression::do_get_backend(Translate_context* context)
           Type::make_builtin_struct_type(2,
                                          "__key", mt->key_type(),
                                          "__val", mt->val_type());
-  Expression* descriptor = Expression::make_map_descriptor(mt, loc);
+  Expression* descriptor = Expression::make_type_descriptor(mt, loc);
 
   Type* uintptr_t = Type::lookup_integer_type("uintptr");
   Expression* count = Expression::make_integer_ul(i, uintptr_t, loc);
@@ -12596,12 +12585,10 @@ Map_construction_expression::do_get_backend(Translate_context* context)
       this->element_type_->find_local_field("__val", &field_index);
   Expression* val_offset =
       Expression::make_struct_field_offset(this->element_type_, valfield);
-  Expression* val_size =
-      Expression::make_type_info(mt->val_type(), TYPE_INFO_SIZE);
 
   Expression* map_ctor =
-      Runtime::make_call(Runtime::CONSTRUCT_MAP, loc, 6, descriptor, count,
-                         entry_size, val_offset, val_size, ventries);
+      Runtime::make_call(Runtime::CONSTRUCT_MAP, loc, 5, descriptor, count,
+                         entry_size, val_offset, ventries);
   return map_ctor->get_backend(context);
 }
 
@@ -14608,64 +14595,6 @@ Expression::make_struct_field_offset(Struct_type* type,
   return new Struct_field_offset_expression(type, field);
 }
 
-// An expression which evaluates to a pointer to the map descriptor of
-// a map type.
-
-class Map_descriptor_expression : public Expression
-{
- public:
-  Map_descriptor_expression(Map_type* type, Location location)
-    : Expression(EXPRESSION_MAP_DESCRIPTOR, location),
-      type_(type)
-  { }
-
- protected:
-  Type*
-  do_type()
-  { return Type::make_pointer_type(Map_type::make_map_descriptor_type()); }
-
-  void
-  do_determine_type(const Type_context*)
-  { }
-
-  Expression*
-  do_copy()
-  { return this; }
-
-  Bexpression*
-  do_get_backend(Translate_context* context)
-  {
-    return this->type_->map_descriptor_pointer(context->gogo(),
-                                              this->location());
-  }
-
-  void
-  do_dump_expression(Ast_dump_context*) const;
- private:
-  // The type for which this is the descriptor.
-  Map_type* type_;
-};
-
-// Dump ast representation for a map descriptor expression.
-
-void
-Map_descriptor_expression::do_dump_expression(
-    Ast_dump_context* ast_dump_context) const
-{
-  ast_dump_context->ostream() << "map_descriptor(";
-  ast_dump_context->dump_type(this->type_);
-  ast_dump_context->ostream() << ")";
-}
-
-// Make a map descriptor expression.
-
-Expression*
-Expression::make_map_descriptor(Map_type* type, Location location)
-{
-  return new Map_descriptor_expression(type, location);
-}
-
 // An expression which evaluates to the address of an unnamed label.
 
 class Label_addr_expression : public Expression
index 339cb5d288bf864aeabda7acab0fb48d85dd7efb..8ecc11afd7ffd1f66d774d3c689786a6081eb545 100644 (file)
@@ -133,7 +133,6 @@ class Expression
     EXPRESSION_INTERFACE_VALUE,
     EXPRESSION_INTERFACE_MTABLE,
     EXPRESSION_STRUCT_FIELD_OFFSET,
-    EXPRESSION_MAP_DESCRIPTOR,
     EXPRESSION_LABEL_ADDR,
     EXPRESSION_CONDITIONAL,
     EXPRESSION_COMPOUND
@@ -467,11 +466,6 @@ class Expression
   static Expression*
   make_struct_field_offset(Struct_type*, const Struct_field*);
 
-  // Make an expression which evaluates to the address of the map
-  // descriptor for TYPE.
-  static Expression*
-  make_map_descriptor(Map_type* type, Location);
-
   // Make an expression which evaluates to the address of an unnamed
   // label.
   static Expression*
@@ -2449,14 +2443,9 @@ class Index_expression : public Parser_expression
   Index_expression(Expression* left, Expression* start, Expression* end,
                    Expression* cap, Location location)
     : Parser_expression(EXPRESSION_INDEX, location),
-      left_(left), start_(start), end_(end), cap_(cap), is_lvalue_(false)
+      left_(left), start_(start), end_(end), cap_(cap)
   { }
 
-  // Record that this expression is an lvalue.
-  void
-  set_is_lvalue()
-  { this->is_lvalue_ = true; }
-
   // Dump an index expression, i.e. an expression of the form
   // expr[expr], expr[expr:expr], or expr[expr:expr:expr] to a dump context.
   static void
@@ -2509,9 +2498,6 @@ class Index_expression : public Parser_expression
   // default capacity, non-NULL for indices and slices that specify the
   // capacity.
   Expression* cap_;
-  // Whether this is being used as an l-value.  We set this during the
-  // parse because map index expressions need to know.
-  bool is_lvalue_;
 };
 
 // An array index.  This is used for both indexing and slicing.
@@ -2677,8 +2663,7 @@ class Map_index_expression : public Expression
   Map_index_expression(Expression* map, Expression* index,
                       Location location)
     : Expression(EXPRESSION_MAP_INDEX, location),
-      map_(map), index_(index), is_lvalue_(false),
-      is_in_tuple_assignment_(false), value_pointer_(NULL)
+      map_(map), index_(index), value_pointer_(NULL)
   { }
 
   // Return the map.
@@ -2703,31 +2688,12 @@ class Map_index_expression : public Expression
   Map_type*
   get_map_type() const;
 
-  // Record that this map expression is an lvalue.  The difference is
-  // that an lvalue always inserts the key.
-  void
-  set_is_lvalue()
-  { this->is_lvalue_ = true; }
-
-  // Return whether this map expression occurs in an assignment to a
-  // pair of values.
-  bool
-  is_in_tuple_assignment() const
-  { return this->is_in_tuple_assignment_; }
-
-  // Record that this map expression occurs in an assignment to a pair
-  // of values.
-  void
-  set_is_in_tuple_assignment()
-  { this->is_in_tuple_assignment_ = true; }
-
-  // Return an expression for the map index.  This returns an expression which
-  // evaluates to a pointer to a value in the map.  If INSERT is true,
-  // the key will be inserted if not present, and the value pointer
-  // will be zero initialized.  If INSERT is false, and the key is not
-  // present in the map, the pointer will be NULL.
+  // Return an expression for the map index.  This returns an
+  // expression that evaluates to a pointer to a value in the map.  If
+  // the key is not present in the map, this will return a pointer to
+  // the zero value.
   Expression*
-  get_value_pointer(bool insert);
+  get_value_pointer(Gogo*);
 
  protected:
   int
@@ -2773,10 +2739,6 @@ class Map_index_expression : public Expression
   Expression* map_;
   // The index.
   Expression* index_;
-  // Whether this is an lvalue.
-  bool is_lvalue_;
-  // Whether this is in a tuple assignment to a pair of values.
-  bool is_in_tuple_assignment_;
   // A pointer to the value at this index.
   Expression* value_pointer_;
 };
index 3b7ecd3491fbe335c8ff05b45aab64c5557335c8..a3afdcb7b9ad3eaf184dd47441b00a6e4a3a1bc7 100644 (file)
@@ -4614,7 +4614,6 @@ Gogo::convert_named_types()
   Array_type::make_array_type_descriptor_type();
   Array_type::make_slice_type_descriptor_type();
   Map_type::make_map_type_descriptor_type();
-  Map_type::make_map_descriptor_type();
   Channel_type::make_chan_type_descriptor_type();
   Interface_type::make_interface_type_descriptor_type();
   Expression::make_func_descriptor_type();
@@ -6547,7 +6546,9 @@ Variable::get_backend_variable(Gogo* gogo, Named_object* function,
          Btype* btype = type->get_backend(gogo);
 
          Bvariable* bvar;
-         if (this->is_global_)
+         if (Map_type::is_zero_value(this))
+           bvar = Map_type::backend_zero_value(gogo);
+         else if (this->is_global_)
            bvar = backend->global_variable((package == NULL
                                             ? gogo->package_name()
                                             : package->package_name()),
index cb7f9664aaf547fff59f59d2a07f794448683f79..6b45ebf4fd07b32e178a074936b4eb9dfa3a6d65 100644 (file)
@@ -4039,11 +4039,6 @@ void
 Parse::inc_dec_stat(Expression* exp)
 {
   const Token* token = this->peek_token();
-
-  // Lvalue maps require special handling.
-  if (exp->index_expression() != NULL)
-    exp->index_expression()->set_is_lvalue();
-
   if (token->is_op(OPERATOR_PLUSPLUS))
     this->gogo_->add_statement(Statement::make_inc_statement(exp));
   else if (token->is_op(OPERATOR_MINUSMINUS))
@@ -4120,13 +4115,6 @@ Parse::tuple_assignment(Expression_list* lhs, bool may_be_composite_lit,
   if (lhs == NULL)
     return;
 
-  // Map expressions act differently when they are lvalues.
-  for (Expression_list::iterator plv = lhs->begin();
-       plv != lhs->end();
-       ++plv)
-    if ((*plv)->index_expression() != NULL)
-      (*plv)->index_expression()->set_is_lvalue();
-
   if (p_range_clause != NULL && token->is_keyword(KEYWORD_RANGE))
     {
       if (op != OPERATOR_EQ)
@@ -4209,18 +4197,6 @@ Parse::tuple_assignment(Expression_list* lhs, bool may_be_composite_lit,
                                                          map_index, location);
       this->gogo_->add_statement(s);
     }
-  else if (lhs->size() == 1
-          && vals->size() == 2
-          && (map_index = lhs->front()->index_expression()) != NULL)
-    {
-      if (op != OPERATOR_EQ)
-       error_at(location, "assigning tuple to map index requires %<=%>");
-      Expression* val = vals->front();
-      Expression* should_set = vals->back();
-      Statement* s = Statement::make_map_assignment(map_index, val, should_set,
-                                                   location);
-      this->gogo_->add_statement(s);
-    }
   else if (lhs->size() == 2
           && vals->size() == 1
           && (receive = (*vals->begin())->receive_expression()) != NULL)
@@ -4952,13 +4928,6 @@ Parse::comm_clause(Select_clauses* clauses, bool* saw_default)
   bool got_case = this->comm_case(&is_send, &channel, &val, &closed,
                                  &varname, &closedname, &is_default);
 
-  if (!is_send
-      && varname.empty()
-      && closedname.empty()
-      && val != NULL
-      && val->index_expression() != NULL)
-    val->index_expression()->set_is_lvalue();
-
   if (this->peek_token()->is_op(OPERATOR_COLON))
     this->advance_token();
   else
index 64920250e4347f968e7be22fd240ccf1957aa60b..98678f4adea47ef6d7f0a84da089937481a2b7d7 100644 (file)
@@ -54,8 +54,6 @@ enum Runtime_function_type
   RFT_SLICE,
   // Go type map[any]any, C type struct __go_map *.
   RFT_MAP,
-  // Pointer to map iteration type.
-  RFT_MAPITER,
   // Go type chan any, C type struct __go_channel *.
   RFT_CHAN,
   // Go type non-empty interface, C type struct __go_interface.
@@ -66,8 +64,6 @@ enum Runtime_function_type
   RFT_FUNC_PTR,
   // Pointer to Go type descriptor.
   RFT_TYPE,
-  // Pointer to map descriptor.
-  RFT_MAPDESCRIPTOR,
 
   NUMBER_OF_RUNTIME_FUNCTION_TYPES
 };
@@ -153,10 +149,6 @@ runtime_function_type(Runtime_function_type bft)
          t = Type::make_map_type(any, any, bloc);
          break;
 
-       case RFT_MAPITER:
-         t = Type::make_pointer_type(Runtime::map_iteration_type());
-         break;
-
        case RFT_CHAN:
          t = Type::make_channel_type(true, true, any);
          break;
@@ -188,10 +180,6 @@ runtime_function_type(Runtime_function_type bft)
        case RFT_TYPE:
          t = Type::make_type_descriptor_ptr_type();
          break;
-
-       case RFT_MAPDESCRIPTOR:
-         t = Type::make_pointer_type(Map_type::make_map_descriptor_type());
-         break;
        }
 
       runtime_function_types[bft] = t;
@@ -225,7 +213,6 @@ convert_to_runtime_function_type(Runtime_function_type bft, Expression* e,
     case RFT_COMPLEX128:
     case RFT_STRING:
     case RFT_POINTER:
-    case RFT_MAPITER:
     case RFT_FUNC_PTR:
       {
        Type* t = runtime_function_type(bft);
@@ -244,11 +231,6 @@ convert_to_runtime_function_type(Runtime_function_type bft, Expression* e,
     case RFT_TYPE:
       go_assert(e->type() == Type::make_type_descriptor_ptr_type());
       return e;
-
-    case RFT_MAPDESCRIPTOR:
-      go_assert(e->type()->points_to()
-               == Map_type::make_map_descriptor_type());
-      return e;
     }
 }
 
@@ -389,21 +371,6 @@ Runtime::make_call(Runtime::Function code, Location loc,
   return Expression::make_call(func, args, false, loc);
 }
 
-// The type we use for a map iteration.  This is really a struct which
-// is four pointers long.  This must match the runtime struct
-// __go_hash_iter.
-
-Type*
-Runtime::map_iteration_type()
-{
-  const unsigned long map_iteration_size = 4;
-  Expression* iexpr =
-    Expression::make_integer_ul(map_iteration_size, NULL,
-                               Linemap::predeclared_location());
-  return Type::make_array_type(runtime_function_type(RFT_POINTER), iexpr);
-}
-
-
 // Get the runtime code for a named builtin function.  This is used as a helper
 // when creating function references for call expressions.  Every reference to
 // a builtin runtime function should have the associated runtime code.  If the
index 2e79263a632da45ec62c5f1e79328d7233f4c317..2be772bc9506f3a027d797a2b60ba0a656e499a4 100644 (file)
@@ -85,54 +85,51 @@ DEF_GO_RUNTIME(MAKESLICE2BIG, "__go_make_slice2_big", P3(TYPE, UINT64, UINT64),
 
 
 // Make a map.
-DEF_GO_RUNTIME(MAKEMAP, "__go_new_map", P2(MAPDESCRIPTOR, UINTPTR), R1(MAP))
-DEF_GO_RUNTIME(MAKEMAPBIG, "__go_new_map_big", P2(MAPDESCRIPTOR, UINT64),
+DEF_GO_RUNTIME(MAKEMAP, "runtime.makemap", P4(TYPE, INT64, POINTER, POINTER),
               R1(MAP))
 
 // Build a map from a composite literal.
 DEF_GO_RUNTIME(CONSTRUCT_MAP, "__go_construct_map",
-              P6(POINTER, UINTPTR, UINTPTR, UINTPTR, UINTPTR, POINTER),
+              P5(POINTER, UINTPTR, UINTPTR, UINTPTR, POINTER),
               R1(MAP))
 
-// Get the length of a map (the number of entries).
-DEF_GO_RUNTIME(MAP_LEN, "__go_map_len", P1(MAP), R1(INT))
-
 // Look up a key in a map.
-DEF_GO_RUNTIME(MAP_INDEX, "__go_map_index", P3(MAP, POINTER, BOOL),
+DEF_GO_RUNTIME(MAPACCESS1, "runtime.mapaccess1", P3(TYPE, MAP, POINTER),
               R1(POINTER))
 
-// Look up a key in a map returning whether it is present.
-DEF_GO_RUNTIME(MAPACCESS2, "runtime.mapaccess2",
-              P4(TYPE, MAP, POINTER, POINTER), R1(BOOL))
+// Look up a key in a map when the value is large.
+DEF_GO_RUNTIME(MAPACCESS1_FAT, "runtime.mapaccess1_fat",
+              P4(TYPE, MAP, POINTER, POINTER), R1(POINTER))
 
-// Tuple assignment to a map element.
-DEF_GO_RUNTIME(MAPASSIGN2, "runtime.mapassign2",
-              P4(MAP, POINTER, POINTER, BOOL), R0())
+// Look up a key in a map returning the value and whether it is
+// present.
+DEF_GO_RUNTIME(MAPACCESS2, "runtime.mapaccess2", P3(TYPE, MAP, POINTER),
+              R2(POINTER, BOOL))
 
-// Delete a key from a map.
-DEF_GO_RUNTIME(MAPDELETE, "runtime.mapdelete", P2(MAP, POINTER), R0())
+// Look up a key in a map, returning the value and whether it is
+// present, when the value is large.
+DEF_GO_RUNTIME(MAPACCESS2_FAT, "runtime.mapaccess2_fat",
+              P4(TYPE, MAP, POINTER, POINTER), R2(POINTER, BOOL))
 
-// Begin a range over a map.
-DEF_GO_RUNTIME(MAPITERINIT, "runtime.mapiterinit", P2(MAP, MAPITER), R0())
+// Assignment to a key in a map.
+DEF_GO_RUNTIME(MAPASSIGN, "runtime.mapassign1",
+              P4(TYPE, MAP, POINTER, POINTER), R0())
 
-// Range over a map, returning the next key.
-DEF_GO_RUNTIME(MAPITER1, "runtime.mapiter1", P2(MAPITER, POINTER), R0())
+// Delete a key from a map.
+DEF_GO_RUNTIME(MAPDELETE, "runtime.mapdelete", P3(TYPE, MAP, POINTER), R0())
 
-// Range over a map, returning the next key and value.
-DEF_GO_RUNTIME(MAPITER2, "runtime.mapiter2", P3(MAPITER, POINTER, POINTER),
+// Begin a range over a map.
+DEF_GO_RUNTIME(MAPITERINIT, "runtime.mapiterinit", P3(TYPE, MAP, POINTER),
               R0())
 
 // Range over a map, moving to the next map entry.
-DEF_GO_RUNTIME(MAPITERNEXT, "runtime.mapiternext", P1(MAPITER), R0())
+DEF_GO_RUNTIME(MAPITERNEXT, "runtime.mapiternext", P1(POINTER), R0())
 
 
 // Make a channel.
 DEF_GO_RUNTIME(MAKECHAN, "__go_new_channel", P2(TYPE, UINTPTR), R1(CHAN))
 DEF_GO_RUNTIME(MAKECHANBIG, "__go_new_channel_big", P2(TYPE, UINT64), R1(CHAN))
 
-// Get the length of a channel (the number of unread values).
-DEF_GO_RUNTIME(CHAN_LEN, "__go_chan_len", P1(CHAN), R1(INT))
-
 // Get the capacity of a channel (the size of the buffer).
 DEF_GO_RUNTIME(CHAN_CAP, "__go_chan_cap", P1(CHAN), R1(INT))
 
index 636e1965006fe466c1d9fd6aa18f66018902a4a0..e92510b33db0b42e8e59c9b663c04f95bcc459ca 100644 (file)
@@ -39,10 +39,6 @@ class Runtime
   static void
   convert_types(Gogo*);
 
-  // Return the type used for iterations over maps.
-  static Type*
-  map_iteration_type();
-
   // Return the runtime code for a named builtin function.
   static Function
   name_to_code(const std::string&);
index 9066c016730ab35ed734522fc32521d07674ef5b..9e481741ad6152427f36850dd176cd9a311ecc99 100644 (file)
@@ -544,6 +544,106 @@ Statement::make_temporary(Type* type, Expression* init,
   return new Temporary_statement(type, init, location);
 }
 
+// The Move_subexpressions class is used to move all top-level
+// subexpressions of an expression.  This is used for things like
+// index expressions in which we must evaluate the index value before
+// it can be changed by a multiple assignment.
+
+class Move_subexpressions : public Traverse
+{
+ public:
+  Move_subexpressions(int skip, Block* block)
+    : Traverse(traverse_expressions),
+      skip_(skip), block_(block)
+  { }
+
+ protected:
+  int
+  expression(Expression**);
+
+ private:
+  // The number of subexpressions to skip moving.  This is used to
+  // avoid moving the array itself, as we only need to move the index.
+  int skip_;
+  // The block where new temporary variables should be added.
+  Block* block_;
+};
+
+int
+Move_subexpressions::expression(Expression** pexpr)
+{
+  if (this->skip_ > 0)
+    --this->skip_;
+  else if ((*pexpr)->temporary_reference_expression() == NULL
+          && !(*pexpr)->is_nil_expression()
+           && !(*pexpr)->is_constant())
+    {
+      Location loc = (*pexpr)->location();
+      Temporary_statement* temp = Statement::make_temporary(NULL, *pexpr, loc);
+      this->block_->add_statement(temp);
+      *pexpr = Expression::make_temporary_reference(temp, loc);
+    }
+  // We only need to move top-level subexpressions.
+  return TRAVERSE_SKIP_COMPONENTS;
+}
+
+// The Move_ordered_evals class is used to find any subexpressions of
+// an expression that have an evaluation order dependency.  It creates
+// temporary variables to hold them.
+
+class Move_ordered_evals : public Traverse
+{
+ public:
+  Move_ordered_evals(Block* block)
+    : Traverse(traverse_expressions),
+      block_(block)
+  { }
+
+ protected:
+  int
+  expression(Expression**);
+
+ private:
+  // The block where new temporary variables should be added.
+  Block* block_;
+};
+
+int
+Move_ordered_evals::expression(Expression** pexpr)
+{
+  // We have to look at subexpressions first.
+  if ((*pexpr)->traverse_subexpressions(this) == TRAVERSE_EXIT)
+    return TRAVERSE_EXIT;
+
+  int i;
+  if ((*pexpr)->must_eval_subexpressions_in_order(&i))
+    {
+      Move_subexpressions ms(i, this->block_);
+      if ((*pexpr)->traverse_subexpressions(&ms) == TRAVERSE_EXIT)
+       return TRAVERSE_EXIT;
+    }
+
+  if ((*pexpr)->must_eval_in_order())
+    {
+      Call_expression* call = (*pexpr)->call_expression();
+      if (call != NULL && call->is_multi_value_arg())
+       {
+         // A call expression which returns multiple results as an argument
+         // to another call must be handled specially.  We can't create a
+         // temporary because there is no type to give it.  Instead, group
+         // the caller and this multi-valued call argument and use a temporary
+         // variable to hold them.
+         return TRAVERSE_SKIP_COMPONENTS;
+       }
+
+      Location loc = (*pexpr)->location();
+      Temporary_statement* temp = Statement::make_temporary(NULL, *pexpr, loc);
+      this->block_->add_statement(temp);
+      *pexpr = Expression::make_temporary_reference(temp, loc);
+    }
+  return TRAVERSE_SKIP_COMPONENTS;
+}
+
 // Class Assignment_statement.
 
 // Traversal.
@@ -563,6 +663,66 @@ Assignment_statement::do_traverse_assignments(Traverse_assignments* tassign)
   return true;
 }
 
+// Lower an assignment to a map index expression to a runtime function
+// call.
+
+Statement*
+Assignment_statement::do_lower(Gogo*, Named_object*, Block* enclosing,
+                              Statement_inserter*)
+{
+  Map_index_expression* mie = this->lhs_->map_index_expression();
+  if (mie != NULL)
+    {
+      Location loc = this->location();
+
+      Expression* map = mie->map();
+      Map_type* mt = map->type()->map_type();
+      if (mt == NULL)
+       {
+         go_assert(saw_errors());
+         return Statement::make_error_statement(loc);
+       }
+
+      Block* b = new Block(enclosing, loc);
+
+      // Move out any subexpressions on the left hand side to make
+      // sure that functions are called in the required order.
+      Move_ordered_evals moe(b);
+      mie->traverse_subexpressions(&moe);
+
+      // Copy key and value into temporaries so that we can take their
+      // address without pushing the value onto the heap.
+
+      // var key_temp KEY_TYPE = MAP_INDEX
+      Temporary_statement* key_temp = Statement::make_temporary(mt->key_type(),
+                                                               mie->index(),
+                                                               loc);
+      b->add_statement(key_temp);
+
+      // var val_temp VAL_TYPE = RHS
+      Temporary_statement* val_temp = Statement::make_temporary(mt->val_type(),
+                                                               this->rhs_,
+                                                               loc);
+      b->add_statement(val_temp);
+
+      // mapassign1(TYPE, MAP, &key_temp, &val_temp)
+      Expression* a1 = Expression::make_type_descriptor(mt, loc);
+      Expression* a2 = mie->map();
+      Temporary_reference_expression* ref =
+       Expression::make_temporary_reference(key_temp, loc);
+      Expression* a3 = Expression::make_unary(OPERATOR_AND, ref, loc);
+      ref = Expression::make_temporary_reference(val_temp, loc);
+      Expression* a4 = Expression::make_unary(OPERATOR_AND, ref, loc);
+      Expression* call = Runtime::make_call(Runtime::MAPASSIGN, loc, 4,
+                                           a1, a2, a3, a4);
+      b->add_statement(Statement::make_statement(call, false));
+
+      return Statement::make_block_statement(b, loc);
+    }
+
+  return this;
+}
+
 // Set types for the assignment.
 
 void
@@ -690,106 +850,6 @@ Statement::make_assignment(Expression* lhs, Expression* rhs,
   return new Assignment_statement(lhs, rhs, location);
 }
 
-// The Move_subexpressions class is used to move all top-level
-// subexpressions of an expression.  This is used for things like
-// index expressions in which we must evaluate the index value before
-// it can be changed by a multiple assignment.
-
-class Move_subexpressions : public Traverse
-{
- public:
-  Move_subexpressions(int skip, Block* block)
-    : Traverse(traverse_expressions),
-      skip_(skip), block_(block)
-  { }
-
- protected:
-  int
-  expression(Expression**);
-
- private:
-  // The number of subexpressions to skip moving.  This is used to
-  // avoid moving the array itself, as we only need to move the index.
-  int skip_;
-  // The block where new temporary variables should be added.
-  Block* block_;
-};
-
-int
-Move_subexpressions::expression(Expression** pexpr)
-{
-  if (this->skip_ > 0)
-    --this->skip_;
-  else if ((*pexpr)->temporary_reference_expression() == NULL
-          && !(*pexpr)->is_nil_expression()
-           && !(*pexpr)->is_constant())
-    {
-      Location loc = (*pexpr)->location();
-      Temporary_statement* temp = Statement::make_temporary(NULL, *pexpr, loc);
-      this->block_->add_statement(temp);
-      *pexpr = Expression::make_temporary_reference(temp, loc);
-    }
-  // We only need to move top-level subexpressions.
-  return TRAVERSE_SKIP_COMPONENTS;
-}
-
-// The Move_ordered_evals class is used to find any subexpressions of
-// an expression that have an evaluation order dependency.  It creates
-// temporary variables to hold them.
-
-class Move_ordered_evals : public Traverse
-{
- public:
-  Move_ordered_evals(Block* block)
-    : Traverse(traverse_expressions),
-      block_(block)
-  { }
-
- protected:
-  int
-  expression(Expression**);
-
- private:
-  // The block where new temporary variables should be added.
-  Block* block_;
-};
-
-int
-Move_ordered_evals::expression(Expression** pexpr)
-{
-  // We have to look at subexpressions first.
-  if ((*pexpr)->traverse_subexpressions(this) == TRAVERSE_EXIT)
-    return TRAVERSE_EXIT;
-
-  int i;
-  if ((*pexpr)->must_eval_subexpressions_in_order(&i))
-    {
-      Move_subexpressions ms(i, this->block_);
-      if ((*pexpr)->traverse_subexpressions(&ms) == TRAVERSE_EXIT)
-       return TRAVERSE_EXIT;
-    }
-
-  if ((*pexpr)->must_eval_in_order())
-    {
-      Call_expression* call = (*pexpr)->call_expression();
-      if (call != NULL && call->is_multi_value_arg())
-       {
-         // A call expression which returns multiple results as an argument
-         // to another call must be handled specially.  We can't create a
-         // temporary because there is no type to give it.  Instead, group
-         // the caller and this multi-valued call argument and use a temporary
-         // variable to hold them.
-         return TRAVERSE_SKIP_COMPONENTS;
-       }
-
-      Location loc = (*pexpr)->location();
-      Temporary_statement* temp = Statement::make_temporary(NULL, *pexpr, loc);
-      this->block_->add_statement(temp);
-      *pexpr = Expression::make_temporary_reference(temp, loc);
-    }
-  return TRAVERSE_SKIP_COMPONENTS;
-}
-
 // An assignment operation statement.
 
 class Assignment_operation_statement : public Statement
@@ -1131,7 +1191,7 @@ Tuple_map_assignment_statement::do_traverse(Traverse* traverse)
 // Lower a tuple map assignment.
 
 Statement*
-Tuple_map_assignment_statement::do_lower(Gogo*, Named_object*,
+Tuple_map_assignment_statement::do_lower(Gogo* gogo, Named_object*,
                                         Block* enclosing, Statement_inserter*)
 {
   Location loc = this->location();
@@ -1162,10 +1222,11 @@ Tuple_map_assignment_statement::do_lower(Gogo*, Named_object*,
     Statement::make_temporary(map_type->key_type(), map_index->index(), loc);
   b->add_statement(key_temp);
 
-  // var val_temp VAL_TYPE
-  Temporary_statement* val_temp =
-    Statement::make_temporary(map_type->val_type(), NULL, loc);
-  b->add_statement(val_temp);
+  // var val_ptr_temp *VAL_TYPE
+  Type* val_ptr_type = Type::make_pointer_type(map_type->val_type());
+  Temporary_statement* val_ptr_temp = Statement::make_temporary(val_ptr_type,
+                                                               NULL, loc);
+  b->add_statement(val_ptr_temp);
 
   // var present_temp bool
   Temporary_statement* present_temp =
@@ -1175,24 +1236,34 @@ Tuple_map_assignment_statement::do_lower(Gogo*, Named_object*,
                              NULL, loc);
   b->add_statement(present_temp);
 
-  // present_temp = mapaccess2(DESCRIPTOR, MAP, &key_temp, &val_temp)
+  // val_ptr_temp, present_temp = mapaccess2(DESCRIPTOR, MAP, &key_temp)
   Expression* a1 = Expression::make_type_descriptor(map_type, loc);
   Expression* a2 = map_index->map();
   Temporary_reference_expression* ref =
     Expression::make_temporary_reference(key_temp, loc);
   Expression* a3 = Expression::make_unary(OPERATOR_AND, ref, loc);
-  ref = Expression::make_temporary_reference(val_temp, loc);
-  Expression* a4 = Expression::make_unary(OPERATOR_AND, ref, loc);
-  Expression* call = Runtime::make_call(Runtime::MAPACCESS2, loc, 4,
-                                       a1, a2, a3, a4);
+  Expression* a4 = map_type->fat_zero_value(gogo);
+  Call_expression* call;
+  if (a4 == NULL)
+    call = Runtime::make_call(Runtime::MAPACCESS2, loc, 3, a1, a2, a3);
+  else
+    call = Runtime::make_call(Runtime::MAPACCESS2_FAT, loc, 4, a1, a2, a3, a4);
+  ref = Expression::make_temporary_reference(val_ptr_temp, loc);
+  ref->set_is_lvalue();
+  Expression* res = Expression::make_call_result(call, 0);
+  res = Expression::make_unsafe_cast(val_ptr_type, res, loc);
+  Statement* s = Statement::make_assignment(ref, res, loc);
+  b->add_statement(s);
   ref = Expression::make_temporary_reference(present_temp, loc);
   ref->set_is_lvalue();
-  Statement* s = Statement::make_assignment(ref, call, loc);
+  res = Expression::make_call_result(call, 1);
+  s = Statement::make_assignment(ref, res, loc);
   b->add_statement(s);
 
-  // val = val_temp
-  ref = Expression::make_temporary_reference(val_temp, loc);
-  s = Statement::make_assignment(this->val_, ref, loc);
+  // val = *val__ptr_temp
+  ref = Expression::make_temporary_reference(val_ptr_temp, loc);
+  Expression* ind = Expression::make_unary(OPERATOR_MULT, ref, loc);
+  s = Statement::make_assignment(this->val_, ind, loc);
   b->add_statement(s);
 
   // present = present_temp
@@ -1228,140 +1299,6 @@ Statement::make_tuple_map_assignment(Expression* val, Expression* present,
   return new Tuple_map_assignment_statement(val, present, map_index, location);
 }
 
-// Assign a pair of entries to a map.
-//   m[k] = v, p
-
-class Map_assignment_statement : public Statement
-{
- public:
-  Map_assignment_statement(Expression* map_index,
-                          Expression* val, Expression* should_set,
-                          Location location)
-    : Statement(STATEMENT_MAP_ASSIGNMENT, location),
-      map_index_(map_index), val_(val), should_set_(should_set)
-  { }
-
- protected:
-  int
-  do_traverse(Traverse* traverse);
-
-  bool
-  do_traverse_assignments(Traverse_assignments*)
-  { go_unreachable(); }
-
-  Statement*
-  do_lower(Gogo*, Named_object*, Block*, Statement_inserter*);
-
-  Bstatement*
-  do_get_backend(Translate_context*)
-  { go_unreachable(); }
-
-  void
-  do_dump_statement(Ast_dump_context*) const;
-
- private:
-  // A reference to the map index which should be set or deleted.
-  Expression* map_index_;
-  // The value to add to the map.
-  Expression* val_;
-  // Whether or not to add the value.
-  Expression* should_set_;
-};
-
-// Traverse a map assignment.
-
-int
-Map_assignment_statement::do_traverse(Traverse* traverse)
-{
-  if (this->traverse_expression(traverse, &this->map_index_) == TRAVERSE_EXIT
-      || this->traverse_expression(traverse, &this->val_) == TRAVERSE_EXIT)
-    return TRAVERSE_EXIT;
-  return this->traverse_expression(traverse, &this->should_set_);
-}
-
-// Lower a map assignment to a function call.
-
-Statement*
-Map_assignment_statement::do_lower(Gogo*, Named_object*, Block* enclosing,
-                                  Statement_inserter*)
-{
-  Location loc = this->location();
-
-  Map_index_expression* map_index = this->map_index_->map_index_expression();
-  if (map_index == NULL)
-    {
-      this->report_error(_("expected map index on left hand side"));
-      return Statement::make_error_statement(loc);
-    }
-  Map_type* map_type = map_index->get_map_type();
-  if (map_type == NULL)
-    return Statement::make_error_statement(loc);
-
-  Block* b = new Block(enclosing, loc);
-
-  // Evaluate the map first to get order of evaluation right.
-  // map_temp := m // we are evaluating m[k] = v, p
-  Temporary_statement* map_temp = Statement::make_temporary(map_type,
-                                                           map_index->map(),
-                                                           loc);
-  b->add_statement(map_temp);
-
-  // var key_temp MAP_KEY_TYPE = k
-  Temporary_statement* key_temp =
-    Statement::make_temporary(map_type->key_type(), map_index->index(), loc);
-  b->add_statement(key_temp);
-
-  // var val_temp MAP_VAL_TYPE = v
-  Temporary_statement* val_temp =
-    Statement::make_temporary(map_type->val_type(), this->val_, loc);
-  b->add_statement(val_temp);
-
-  // var insert_temp bool = p
-  Temporary_statement* insert_temp =
-    Statement::make_temporary(Type::lookup_bool_type(), this->should_set_,
-                             loc);
-  b->add_statement(insert_temp);
-
-  // mapassign2(map_temp, &key_temp, &val_temp, p)
-  Expression* p1 = Expression::make_temporary_reference(map_temp, loc);
-  Expression* ref = Expression::make_temporary_reference(key_temp, loc);
-  Expression* p2 = Expression::make_unary(OPERATOR_AND, ref, loc);
-  ref = Expression::make_temporary_reference(val_temp, loc);
-  Expression* p3 = Expression::make_unary(OPERATOR_AND, ref, loc);
-  Expression* p4 = Expression::make_temporary_reference(insert_temp, loc);
-  Expression* call = Runtime::make_call(Runtime::MAPASSIGN2, loc, 4,
-                                       p1, p2, p3, p4);
-  Statement* s = Statement::make_statement(call, true);
-  b->add_statement(s);
-
-  return Statement::make_block_statement(b, loc);
-}
-
-// Dump the AST representation for a map assignment statement.
-
-void
-Map_assignment_statement::do_dump_statement(
-    Ast_dump_context* ast_dump_context) const
-{
-  ast_dump_context->print_indent();
-  ast_dump_context->dump_expression(this->map_index_);
-  ast_dump_context->ostream() << " = ";
-  ast_dump_context->dump_expression(this->val_);
-  ast_dump_context->ostream() << ", ";
-  ast_dump_context->dump_expression(this->should_set_);
-  ast_dump_context->ostream() << std::endl;
-}
-
-// Make a statement which assigns a pair of entries to a map.
-
-Statement*
-Statement::make_map_assignment(Expression* map_index,
-                              Expression* val, Expression* should_set,
-                              Location location)
-{
-  return new Map_assignment_statement(map_index, val, should_set, location);
-}
-
 // A tuple assignment from a receive statement.
 
 class Tuple_receive_assignment_statement : public Statement
@@ -1894,8 +1831,6 @@ Statement::make_dec_statement(Expression* expr)
 // Class Thunk_statement.  This is the base class for go and defer
 // statements.
 
-Unordered_set(const Struct_type*) Thunk_statement::thunk_types;
-
 // Constructor.
 
 Thunk_statement::Thunk_statement(Statement_classification classification,
@@ -2278,21 +2213,10 @@ Thunk_statement::build_struct(Function_type* fntype)
     }
 
   Struct_type *st = Type::make_struct_type(fields, location);
-
-  Thunk_statement::thunk_types.insert(st);
-
+  st->set_is_struct_incomparable();
   return st;
 }
 
-// Return whether ST is a type created to hold thunk parameters.
-
-bool
-Thunk_statement::is_thunk_struct(const Struct_type* st)
-{
-  return (Thunk_statement::thunk_types.find(st)
-         != Thunk_statement::thunk_types.end());
-}
-
 // Build the thunk we are going to call.  This is a brand new, albeit
 // artificial, function.
 
@@ -5356,9 +5280,9 @@ For_range_statement::do_lower(Gogo* gogo, Named_object*, Block* enclosing,
                             index_temp, value_temp, &init, &cond, &iter_init,
                             &post);
   else if (range_type->map_type() != NULL)
-    this->lower_range_map(gogo, temp_block, body, range_object, range_temp,
-                         index_temp, value_temp, &init, &cond, &iter_init,
-                         &post);
+    this->lower_range_map(gogo, range_type->map_type(), temp_block, body,
+                         range_object, range_temp, index_temp, value_temp,
+                         &init, &cond, &iter_init, &post);
   else if (range_type->channel_type() != NULL)
     this->lower_range_channel(gogo, temp_block, body, range_object, range_temp,
                              index_temp, value_temp, &init, &cond, &iter_init,
@@ -5753,7 +5677,8 @@ For_range_statement::lower_range_string(Gogo*,
 // Lower a for range over a map.
 
 void
-For_range_statement::lower_range_map(Gogo*,
+For_range_statement::lower_range_map(Gogo* gogo,
+                                    Map_type* map_type,
                                     Block* enclosing,
                                     Block* body_block,
                                     Named_object* range_object,
@@ -5768,13 +5693,13 @@ For_range_statement::lower_range_map(Gogo*,
   Location loc = this->location();
 
   // The runtime uses a struct to handle ranges over a map.  The
-  // struct is four pointers long.  The first pointer is NULL when we
-  // have completed the iteration.
+  // struct is built by Map_type::hiter_type for a specific map type.
 
   // The loop we generate:
   //   var hiter map_iteration_struct
-  //   for mapiterinit(range, &hiter); hiter[0] != nil; mapiternext(&hiter) {
-  //           mapiter2(hiter, &index_temp, &value_temp)
+  //   for mapiterinit(type, range, &hiter); hiter.key != nil; mapiternext(&hiter) {
+  //           index_temp = *hiter.key
+  //           value_temp = *hiter.val
   //           index = index_temp
   //           value = value_temp
   //           original body
@@ -5782,54 +5707,57 @@ For_range_statement::lower_range_map(Gogo*,
 
   // Set *PINIT to
   //   var hiter map_iteration_struct
-  //   runtime.mapiterinit(range, &hiter)
+  //   runtime.mapiterinit(type, range, &hiter)
 
   Block* init = new Block(enclosing, loc);
 
-  Type* map_iteration_type = Runtime::map_iteration_type();
+  Type* map_iteration_type = map_type->hiter_type(gogo);
   Temporary_statement* hiter = Statement::make_temporary(map_iteration_type,
                                                         NULL, loc);
   init->add_statement(hiter);
 
-  Expression* p1 = this->make_range_ref(range_object, range_temp, loc);
+  Expression* p1 = Expression::make_type_descriptor(map_type, loc);
+  Expression* p2 = this->make_range_ref(range_object, range_temp, loc);
   Expression* ref = Expression::make_temporary_reference(hiter, loc);
-  Expression* p2 = Expression::make_unary(OPERATOR_AND, ref, loc);
-  Expression* call = Runtime::make_call(Runtime::MAPITERINIT, loc, 2, p1, p2);
+  Expression* p3 = Expression::make_unary(OPERATOR_AND, ref, loc);
+  Expression* call = Runtime::make_call(Runtime::MAPITERINIT, loc, 3,
+                                       p1, p2, p3);
   init->add_statement(Statement::make_statement(call, true));
 
   *pinit = init;
 
   // Set *PCOND to
-  //   hiter[0] != nil
+  //   hiter.key != nil
 
   ref = Expression::make_temporary_reference(hiter, loc);
-  Expression* zexpr = Expression::make_integer_ul(0, NULL, loc);
-  Expression* index = Expression::make_index(ref, zexpr, NULL, NULL, loc);
-  Expression* ne = Expression::make_binary(OPERATOR_NOTEQ, index,
+  ref = Expression::make_field_reference(ref, 0, loc);
+  Expression* ne = Expression::make_binary(OPERATOR_NOTEQ, ref,
                                           Expression::make_nil(loc),
                                           loc);
   *pcond = ne;
 
   // Set *PITER_INIT to
-  //   mapiter1(hiter, &index_temp)
-  // or
-  //   mapiter2(hiter, &index_temp, &value_temp)
+  //   index_temp = *hiter.key
+  //   value_temp = *hiter.val
 
   Block* iter_init = new Block(body_block, loc);
 
-  ref = Expression::make_temporary_reference(hiter, loc);
-  p1 = Expression::make_unary(OPERATOR_AND, ref, loc);
-  ref = Expression::make_temporary_reference(index_temp, loc);
-  p2 = Expression::make_unary(OPERATOR_AND, ref, loc);
-  if (value_temp == NULL)
-    call = Runtime::make_call(Runtime::MAPITER1, loc, 2, p1, p2);
-  else
+  Expression* lhs = Expression::make_temporary_reference(index_temp, loc);
+  Expression* rhs = Expression::make_temporary_reference(hiter, loc);
+  rhs = Expression::make_field_reference(ref, 0, loc);
+  rhs = Expression::make_unary(OPERATOR_MULT, ref, loc);
+  Statement* set = Statement::make_assignment(lhs, rhs, loc);
+  iter_init->add_statement(set);
+
+  if (value_temp != NULL)
     {
-      ref = Expression::make_temporary_reference(value_temp, loc);
-      Expression* p3 = Expression::make_unary(OPERATOR_AND, ref, loc);
-      call = Runtime::make_call(Runtime::MAPITER2, loc, 3, p1, p2, p3);
+      lhs = Expression::make_temporary_reference(value_temp, loc);
+      rhs = Expression::make_temporary_reference(hiter, loc);
+      rhs = Expression::make_field_reference(rhs, 1, loc);
+      rhs = Expression::make_unary(OPERATOR_MULT, rhs, loc);
+      set = Statement::make_assignment(lhs, rhs, loc);
+      iter_init->add_statement(set);
     }
-  iter_init->add_statement(Statement::make_statement(call, true));
 
   *piter_init = iter_init;
 
index c949dda599f11c819a4808f2facbf2376a0d4554..9ed8d7272ecccc8fbfdcd9e6ef601aec2765dc84 100644 (file)
@@ -120,7 +120,6 @@ class Statement
     STATEMENT_ASSIGNMENT_OPERATION,
     STATEMENT_TUPLE_ASSIGNMENT,
     STATEMENT_TUPLE_MAP_ASSIGNMENT,
-    STATEMENT_MAP_ASSIGNMENT,
     STATEMENT_TUPLE_RECEIVE_ASSIGNMENT,
     STATEMENT_TUPLE_TYPE_GUARD_ASSIGNMENT,
     STATEMENT_INCDEC,
@@ -166,11 +165,6 @@ class Statement
   make_tuple_map_assignment(Expression* val, Expression* present,
                            Expression*, Location);
 
-  // Make a statement which assigns a pair of values to a map.
-  static Statement*
-  make_map_assignment(Expression*, Expression* val,
-                     Expression* should_set, Location);
-
   // Make an assignment from a nonblocking receive to a pair of
   // variables.
   static Statement*
@@ -586,6 +580,9 @@ class Assignment_statement : public Statement
   bool
   do_traverse_assignments(Traverse_assignments*);
 
+  virtual Statement*
+  do_lower(Gogo*, Named_object*, Block*, Statement_inserter*);
+
   void
   do_determine_types();
 
@@ -1144,10 +1141,6 @@ class Thunk_statement : public Statement
   bool
   simplify_statement(Gogo*, Named_object*, Block*);
 
-  // Return whether ST is a type created to hold thunk parameters.
-  static bool
-  is_thunk_struct(const Struct_type *st);
-
  protected:
   int
   do_traverse(Traverse* traverse);
@@ -1186,9 +1179,6 @@ class Thunk_statement : public Statement
   void
   thunk_field_param(int n, char* buf, size_t buflen);
 
-  // A list of all the struct types created for thunk statements.
-  static Unordered_set(const Struct_type*) thunk_types;
-
   // The function call to be executed in a separate thread (go) or
   // later (defer).
   Expression* call_;
@@ -1529,9 +1519,10 @@ class For_range_statement : public Statement
                     Block**, Expression**, Block**, Block**);
 
   void
-  lower_range_map(Gogo*, Block*, Block*, Named_object*, Temporary_statement*,
+  lower_range_map(Gogo*, Map_type*, Block*, Block*, Named_object*,
                  Temporary_statement*, Temporary_statement*,
-                 Block**, Expression**, Block**, Block**);
+                 Temporary_statement*, Block**, Expression**, Block**,
+                 Block**);
 
   void
   lower_range_channel(Gogo*, Block*, Block*, Named_object*,
index d91180a04971947bc9957e56fd050ce50aad73a2..db5655ba06a0f410ac929973faa5364d655c0900 100644 (file)
@@ -567,6 +567,12 @@ Type::are_compatible_for_comparison(bool is_equality_op, const Type *t1,
        return t2->named_type()->named_type_is_comparable(reason);
       else if (t1->struct_type() != NULL)
        {
+         if (t1->struct_type()->is_struct_incomparable())
+           {
+             if (reason != NULL)
+               *reason = _("invalid comparison of generated struct");
+             return false;
+           }
          const Struct_field_list* fields = t1->struct_type()->fields();
          for (Struct_field_list::const_iterator p = fields->begin();
               p != fields->end();
@@ -582,6 +588,12 @@ Type::are_compatible_for_comparison(bool is_equality_op, const Type *t1,
        }
       else if (t1->array_type() != NULL)
        {
+         if (t1->array_type()->is_array_incomparable())
+           {
+             if (reason != NULL)
+               *reason = _("invalid comparison of generated array");
+             return false;
+           }
          if (t1->array_type()->length()->is_nil_expression()
              || !t1->array_type()->element_type()->is_comparable())
            {
@@ -1478,6 +1490,7 @@ Type::make_type_descriptor_type()
 
       Typed_identifier_list *params = new Typed_identifier_list();
       params->push_back(Typed_identifier("key", unsafe_pointer_type, bloc));
+      params->push_back(Typed_identifier("seed", uintptr_type, bloc));
       params->push_back(Typed_identifier("key_size", uintptr_type, bloc));
 
       Typed_identifier_list* results = new Typed_identifier_list();
@@ -1579,6 +1592,13 @@ Type::type_functions(Gogo* gogo, Named_type* name, Function_type* hash_fntype,
                     Function_type* equal_fntype, Named_object** hash_fn,
                     Named_object** equal_fn)
 {
+  if (!this->is_comparable())
+    {
+      *hash_fn = NULL;
+      *equal_fn = NULL;
+      return;
+    }
+
   if (hash_fntype == NULL || equal_fntype == NULL)
     {
       Location bloc = Linemap::predeclared_location();
@@ -1592,6 +1612,7 @@ Type::type_functions(Gogo* gogo, Named_type* name, Function_type* hash_fntype,
          Typed_identifier_list* params = new Typed_identifier_list();
          params->push_back(Typed_identifier("key", unsafe_pointer_type,
                                             bloc));
+         params->push_back(Typed_identifier("seed", uintptr_type, bloc));
          params->push_back(Typed_identifier("key_size", uintptr_type, bloc));
 
          Typed_identifier_list* results = new Typed_identifier_list();
@@ -1623,13 +1644,6 @@ Type::type_functions(Gogo* gogo, Named_type* name, Function_type* hash_fntype,
       hash_fnname = "__go_type_hash_identity";
       equal_fnname = "__go_type_equal_identity";
     }
-  else if (!this->is_comparable() ||
-          (this->struct_type() != NULL
-           && Thunk_statement::is_thunk_struct(this->struct_type())))
-    {
-      hash_fnname = "__go_type_hash_error";
-      equal_fnname = "__go_type_equal_error";
-    }
   else
     {
       switch (this->base()->classification())
@@ -1844,6 +1858,8 @@ Type::write_specific_type_functions(Gogo* gogo, Named_type* name,
       return;
     }
 
+  go_assert(this->is_comparable());
+
   Named_object* hash_fn = gogo->start_function(hash_name, hash_fntype, false,
                                               bloc);
   hash_fn->func_value()->set_is_type_specific_function();
@@ -1909,6 +1925,10 @@ Type::write_named_hash(Gogo* gogo, Named_type* name,
   Named_object* key_arg = gogo->lookup("key", NULL);
   go_assert(key_arg != NULL);
 
+  // The seed argument to the hash function.
+  Named_object* seed_arg = gogo->lookup("seed", NULL);
+  go_assert(seed_arg != NULL);
+
   // The size of the type we are going to hash.
   Named_object* keysz_arg = gogo->lookup("key_size", NULL);
   go_assert(keysz_arg != NULL);
@@ -1920,9 +1940,11 @@ Type::write_named_hash(Gogo* gogo, Named_type* name,
 
   // Call the hash function for the base type.
   Expression* key_ref = Expression::make_var_reference(key_arg, bloc);
+  Expression* seed_ref = Expression::make_var_reference(seed_arg, bloc);
   Expression* keysz_ref = Expression::make_var_reference(keysz_arg, bloc);
   Expression_list* args = new Expression_list();
   args->push_back(key_ref);
+  args->push_back(seed_ref);
   args->push_back(keysz_ref);
   Expression* func = Expression::make_func_reference(hash_fn, NULL, bloc);
   Expression* call = Expression::make_call(func, args, false, bloc);
@@ -2044,8 +2066,18 @@ Type::type_descriptor_constructor(Gogo* gogo, int runtime_type_kind,
   Named_object* equal_fn;
   this->type_functions(gogo, name, hash_fntype, equal_fntype, &hash_fn,
                       &equal_fn);
-  vals->push_back(Expression::make_func_reference(hash_fn, NULL, bloc));
-  vals->push_back(Expression::make_func_reference(equal_fn, NULL, bloc));
+  if (hash_fn == NULL)
+    vals->push_back(Expression::make_cast(hash_fntype,
+                                         Expression::make_nil(bloc),
+                                         bloc));
+  else
+    vals->push_back(Expression::make_func_reference(hash_fn, NULL, bloc));
+  if (equal_fn == NULL)
+    vals->push_back(Expression::make_cast(equal_fntype,
+                                         Expression::make_nil(bloc),
+                                         bloc));
+  else
+    vals->push_back(Expression::make_func_reference(equal_fn, NULL, bloc));
 
   ++p;
   go_assert(p->is_field_name("gc"));
@@ -4842,6 +4874,44 @@ Struct_type::do_compare_is_identity(Gogo* gogo)
   return true;
 }
 
+// Return whether this struct type is reflexive--whether a value of
+// this type is always equal to itself.
+
+bool
+Struct_type::do_is_reflexive()
+{
+  const Struct_field_list* fields = this->fields_;
+  if (fields == NULL)
+    return true;
+  for (Struct_field_list::const_iterator pf = fields->begin();
+       pf != fields->end();
+       ++pf)
+    {
+      if (!pf->type()->is_reflexive())
+       return false;
+    }
+  return true;
+}
+
+// Return whether this struct type needs a key update when used as a
+// map key.
+
+bool
+Struct_type::do_needs_key_update()
+{
+  const Struct_field_list* fields = this->fields_;
+  if (fields == NULL)
+    return false;
+  for (Struct_field_list::const_iterator pf = fields->begin();
+       pf != fields->end();
+       ++pf)
+    {
+      if (pf->type()->needs_key_update())
+       return true;
+    }
+  return false;
+}
+
 // Build identity and hash functions for this struct.
 
 // Hash code.
@@ -5310,18 +5380,20 @@ Struct_type::write_hash_function(Gogo* gogo, Named_type*,
   go_assert(key_arg != NULL);
   Type* key_arg_type = key_arg->var_value()->type();
 
-  Type* uintptr_type = Type::lookup_integer_type("uintptr");
+  // The seed argument to the hash function.
+  Named_object* seed_arg = gogo->lookup("seed", NULL);
+  go_assert(seed_arg != NULL);
 
-  // Get a 0.
-  Expression* zero = Expression::make_integer_ul(0, uintptr_type, bloc);
+  Type* uintptr_type = Type::lookup_integer_type("uintptr");
 
-  // Make a temporary to hold the return value, initialized to 0.
-  Temporary_statement* retval = Statement::make_temporary(uintptr_type, zero,
+  // Make a temporary to hold the return value, initialized to the seed.
+  Expression* ref = Expression::make_var_reference(seed_arg, bloc);
+  Temporary_statement* retval = Statement::make_temporary(uintptr_type, ref,
                                                          bloc);
   gogo->add_statement(retval);
 
   // Make a temporary to hold the key as a uintptr.
-  Expression* ref = Expression::make_var_reference(key_arg, bloc);
+  ref = Expression::make_var_reference(key_arg, bloc);
   ref = Expression::make_cast(uintptr_type, ref, bloc);
   Temporary_statement* key = Statement::make_temporary(uintptr_type, ref,
                                                       bloc);
@@ -5367,19 +5439,20 @@ Struct_type::write_hash_function(Gogo* gogo, Named_type*,
       pf->type()->type_functions(gogo, pf->type()->named_type(), hash_fntype,
                                 equal_fntype, &hash_fn, &equal_fn);
 
-      // Call the hash function for the field.
+      // Call the hash function for the field, passing retval as the seed.
+      ref = Expression::make_temporary_reference(retval, bloc);
       Expression_list* args = new Expression_list();
       args->push_back(subkey);
+      args->push_back(ref);
       args->push_back(size);
       Expression* func = Expression::make_func_reference(hash_fn, NULL, bloc);
       Expression* call = Expression::make_call(func, args, false, bloc);
 
-      // Add the field's hash value to retval.
+      // Set retval to the result.
       Temporary_reference_expression* tref =
        Expression::make_temporary_reference(retval, bloc);
       tref->set_is_lvalue();
-      Statement* s = Statement::make_assignment_operation(OPERATOR_PLUSEQ,
-                                                         tref, call, bloc);
+      Statement* s = Statement::make_assignment(tref, call, bloc);
       gogo->add_statement(s);
     }
 
@@ -5733,6 +5806,11 @@ Struct_type::can_write_type_to_c_header(
       return true;
 
     case TYPE_POINTER:
+      // Don't try to handle a pointer to an array.
+      if (t->points_to()->array_type() != NULL
+         && !t->points_to()->is_slice_type())
+       return false;
+
       if (t->points_to()->named_type() != NULL
          && t->points_to()->struct_type() != NULL)
        declare->push_back(t->points_to()->named_type()->named_object());
@@ -6157,18 +6235,20 @@ Array_type::write_hash_function(Gogo* gogo, Named_type* name,
   go_assert(key_arg != NULL);
   Type* key_arg_type = key_arg->var_value()->type();
 
-  Type* uintptr_type = Type::lookup_integer_type("uintptr");
+  // The seed argument to the hash function.
+  Named_object* seed_arg = gogo->lookup("seed", NULL);
+  go_assert(seed_arg != NULL);
 
-  // Get a 0.
-  Expression* zero = Expression::make_integer_ul(0, uintptr_type, bloc);
+  Type* uintptr_type = Type::lookup_integer_type("uintptr");
 
-  // Make a temporary to hold the return value, initialized to 0.
-  Temporary_statement* retval = Statement::make_temporary(uintptr_type, zero,
+  // Make a temporary to hold the return value, initialized to the seed.
+  Expression* ref = Expression::make_var_reference(seed_arg, bloc);
+  Temporary_statement* retval = Statement::make_temporary(uintptr_type, ref,
                                                          bloc);
   gogo->add_statement(retval);
 
   // Make a temporary to hold the key as a uintptr.
-  Expression* ref = Expression::make_var_reference(key_arg, bloc);
+  ref = Expression::make_var_reference(key_arg, bloc);
   ref = Expression::make_cast(uintptr_type, ref, bloc);
   Temporary_statement* key = Statement::make_temporary(uintptr_type, ref,
                                                       bloc);
@@ -6216,18 +6296,20 @@ Array_type::write_hash_function(Gogo* gogo, Named_type* name,
   Expression* ele_size = Expression::make_type_info(this->element_type_,
                                                    Expression::TYPE_INFO_SIZE);
 
-  // Get the hash of this element.
+  // Get the hash of this element, passing retval as the seed.
+  ref = Expression::make_temporary_reference(retval, bloc);
   Expression_list* args = new Expression_list();
   args->push_back(subkey);
+  args->push_back(ref);
   args->push_back(ele_size);
   Expression* func = Expression::make_func_reference(hash_fn, NULL, bloc);
   Expression* call = Expression::make_call(func, args, false, bloc);
 
-  // Add the element's hash value to retval.
+  // Set retval to the result.
   Temporary_reference_expression* tref =
     Expression::make_temporary_reference(retval, bloc);
   tref->set_is_lvalue();
-  s = Statement::make_assignment_operation(OPERATOR_PLUSEQ, tref, call, bloc);
+  s = Statement::make_assignment(tref, call, bloc);
   gogo->add_statement(s);
 
   // Increase the element pointer.
@@ -6846,6 +6928,100 @@ Type::make_array_type(Type* element_type, Expression* length)
 
 // Class Map_type.
 
+Named_object* Map_type::zero_value;
+int64_t Map_type::zero_value_size;
+int64_t Map_type::zero_value_align;
+
+// If this map requires the "fat" functions, return the pointer to
+// pass as the zero value to those functions.  Otherwise, in the
+// normal case, return NULL.  The map requires the "fat" functions if
+// the value size is larger than max_zero_size bytes.  max_zero_size
+// must match maxZero in libgo/go/runtime/hashmap.go.
+
+Expression*
+Map_type::fat_zero_value(Gogo* gogo)
+{
+  int64_t valsize;
+  if (!this->val_type_->backend_type_size(gogo, &valsize))
+    {
+      go_assert(saw_errors());
+      return NULL;
+    }
+  if (valsize <= Map_type::max_zero_size)
+    return NULL;
+
+  if (Map_type::zero_value_size < valsize)
+    Map_type::zero_value_size = valsize;
+
+  int64_t valalign;
+  if (!this->val_type_->backend_type_align(gogo, &valalign))
+    {
+      go_assert(saw_errors());
+      return NULL;
+    }
+
+  if (Map_type::zero_value_align < valalign)
+    Map_type::zero_value_align = valalign;
+
+  Location bloc = Linemap::predeclared_location();
+
+  if (Map_type::zero_value == NULL)
+    {
+      // The final type will be set in backend_zero_value.
+      Type* uint8_type = Type::lookup_integer_type("uint8");
+      Expression* size = Expression::make_integer_ul(0, NULL, bloc);
+      Type* array_type = Type::make_array_type(uint8_type, size);
+      Variable* var = new Variable(array_type, NULL, true, false, false, bloc);
+      Map_type::zero_value = Named_object::make_variable("go$zerovalue", NULL,
+                                                        var);
+    }
+
+  Expression* z = Expression::make_var_reference(Map_type::zero_value, bloc);
+  z = Expression::make_unary(OPERATOR_AND, z, bloc);
+  Type* unsafe_ptr_type = Type::make_pointer_type(Type::make_void_type());
+  z = Expression::make_cast(unsafe_ptr_type, z, bloc);
+  return z;
+}
+
+// Return whether VAR is the map zero value.
+
+bool
+Map_type::is_zero_value(Variable* var)
+{
+  return (Map_type::zero_value != NULL
+         && Map_type::zero_value->var_value() == var);
+}
+
+// Return the backend representation for the zero value.
+
+Bvariable*
+Map_type::backend_zero_value(Gogo* gogo)
+{
+  Location bloc = Linemap::predeclared_location();
+
+  go_assert(Map_type::zero_value != NULL);
+
+  Type* uint8_type = Type::lookup_integer_type("uint8");
+  Btype* buint8_type = uint8_type->get_backend(gogo);
+
+  Type* int_type = Type::lookup_integer_type("int");
+
+  Expression* e = Expression::make_integer_int64(Map_type::zero_value_size,
+                                                int_type, bloc);
+  Translate_context context(gogo, NULL, NULL, NULL);
+  Bexpression* blength = e->get_backend(&context);
+
+  Btype* barray_type = gogo->backend()->array_type(buint8_type, blength);
+
+  std::string zname = Map_type::zero_value->name();
+  Bvariable* zvar =
+    gogo->backend()->implicit_variable(zname, barray_type, false, true, true,
+                                      Map_type::zero_value_align);
+  gogo->backend()->implicit_variable_set_init(zvar, zname, barray_type,
+                                             false, true, true, NULL);
+  return zvar;
+}
+
 // Traversal.
 
 int
@@ -6890,8 +7066,8 @@ Map_type::do_hash_for_method(Gogo* gogo) const
 }
 
 // Get the backend representation for a map type.  A map type is
-// represented as a pointer to a struct.  The struct is __go_map in
-// libgo/map.h.
+// represented as a pointer to a struct.  The struct is hmap in
+// runtime/hashmap.go.
 
 Btype*
 Map_type::do_get_backend(Gogo* gogo)
@@ -6899,33 +7075,50 @@ Map_type::do_get_backend(Gogo* gogo)
   static Btype* backend_map_type;
   if (backend_map_type == NULL)
     {
-      std::vector<Backend::Btyped_identifier> bfields(4);
+      std::vector<Backend::Btyped_identifier> bfields(8);
 
       Location bloc = Linemap::predeclared_location();
 
-      Type* pdt = Type::make_type_descriptor_ptr_type();
-      bfields[0].name = "__descriptor";
-      bfields[0].btype = pdt->get_backend(gogo);
+      Type* int_type = Type::lookup_integer_type("int");
+      bfields[0].name = "count";
+      bfields[0].btype = int_type->get_backend(gogo);
       bfields[0].location = bloc;
 
-      Type* uintptr_type = Type::lookup_integer_type("uintptr");
-      bfields[1].name = "__element_count";
-      bfields[1].btype = uintptr_type->get_backend(gogo);
+      Type* uint8_type = Type::lookup_integer_type("uint8");
+      bfields[1].name = "flags";
+      bfields[1].btype = uint8_type->get_backend(gogo);
       bfields[1].location = bloc;
 
-      bfields[2].name = "__bucket_count";
+      bfields[2].name = "B";
       bfields[2].btype = bfields[1].btype;
       bfields[2].location = bloc;
 
+      Type* uint32_type = Type::lookup_integer_type("uint32");
+      bfields[3].name = "hash0";
+      bfields[3].btype = uint32_type->get_backend(gogo);
+      bfields[3].location = bloc;
+
       Btype* bvt = gogo->backend()->void_type();
       Btype* bpvt = gogo->backend()->pointer_type(bvt);
-      Btype* bppvt = gogo->backend()->pointer_type(bpvt);
-      bfields[3].name = "__buckets";
-      bfields[3].btype = bppvt;
-      bfields[3].location = bloc;
+      bfields[4].name = "buckets";
+      bfields[4].btype = bpvt;
+      bfields[4].location = bloc;
+
+      bfields[5].name = "oldbuckets";
+      bfields[5].btype = bpvt;
+      bfields[5].location = bloc;
+
+      Type* uintptr_type = Type::lookup_integer_type("uintptr");
+      bfields[6].name = "nevacuate";
+      bfields[6].btype = uintptr_type->get_backend(gogo);
+      bfields[6].location = bloc;
+
+      bfields[7].name = "overflow";
+      bfields[7].btype = bpvt;
+      bfields[7].location = bloc;
 
       Btype *bt = gogo->backend()->struct_type(bfields);
-      bt = gogo->backend()->named_type("__go_map", bt, bloc);
+      bt = gogo->backend()->named_type("runtime.hmap", bt, bloc);
       backend_map_type = gogo->backend()->pointer_type(bt);
     }
   return backend_map_type;
@@ -6941,12 +7134,24 @@ Map_type::make_map_type_descriptor_type()
     {
       Type* tdt = Type::make_type_descriptor_type();
       Type* ptdt = Type::make_type_descriptor_ptr_type();
+      Type* uint8_type = Type::lookup_integer_type("uint8");
+      Type* uint16_type = Type::lookup_integer_type("uint16");
+      Type* bool_type = Type::lookup_bool_type();
 
       Struct_type* sf =
-       Type::make_builtin_struct_type(3,
+       Type::make_builtin_struct_type(12,
                                       "", tdt,
                                       "key", ptdt,
-                                      "elem", ptdt);
+                                      "elem", ptdt,
+                                      "bucket", ptdt,
+                                      "hmap", ptdt,
+                                      "keysize", uint8_type,
+                                      "indirectkey", bool_type,
+                                      "valuesize", uint8_type,
+                                      "indirectvalue", bool_type,
+                                      "bucketsize", uint16_type,
+                                      "reflexivekey", bool_type,
+                                      "needkeyupdate", bool_type);
 
       ret = Type::make_builtin_named_type("MapType", sf);
     }
@@ -6962,11 +7167,48 @@ Map_type::do_type_descriptor(Gogo* gogo, Named_type* name)
   Location bloc = Linemap::predeclared_location();
 
   Type* mtdt = Map_type::make_map_type_descriptor_type();
+  Type* uint8_type = Type::lookup_integer_type("uint8");
+  Type* uint16_type = Type::lookup_integer_type("uint16");
+
+  int64_t keysize;
+  if (!this->key_type_->backend_type_size(gogo, &keysize))
+    {
+      error_at(this->location_, "error determining map key type size");
+      return Expression::make_error(this->location_);
+    }
+
+  int64_t valsize;
+  if (!this->val_type_->backend_type_size(gogo, &valsize))
+    {
+      error_at(this->location_, "error determining map value type size");
+      return Expression::make_error(this->location_);
+    }
+
+  int64_t ptrsize;
+  if (!Type::make_pointer_type(uint8_type)->backend_type_size(gogo, &ptrsize))
+    {
+      go_assert(saw_errors());
+      return Expression::make_error(this->location_);
+    }
+
+  Type* bucket_type = this->bucket_type(gogo, keysize, valsize);
+  if (bucket_type == NULL)
+    {
+      go_assert(saw_errors());
+      return Expression::make_error(this->location_);
+    }
+
+  int64_t bucketsize;
+  if (!bucket_type->backend_type_size(gogo, &bucketsize))
+    {
+      go_assert(saw_errors());
+      return Expression::make_error(this->location_);
+    }
 
   const Struct_field_list* fields = mtdt->struct_type()->fields();
 
   Expression_list* vals = new Expression_list();
-  vals->reserve(3);
+  vals->reserve(12);
 
   Struct_field_list::const_iterator p = fields->begin();
   go_assert(p->is_field_name("commonType"));
@@ -6983,130 +7225,270 @@ Map_type::do_type_descriptor(Gogo* gogo, Named_type* name)
   vals->push_back(Expression::make_type_descriptor(this->val_type_, bloc));
 
   ++p;
-  go_assert(p == fields->end());
-
-  return Expression::make_struct_composite_literal(mtdt, vals, bloc);
-}
-
-// A mapping from map types to map descriptors.
-
-Map_type::Map_descriptors Map_type::map_descriptors;
-
-// Build a map descriptor for this type.  Return a pointer to it.
+  go_assert(p->is_field_name("bucket"));
+  vals->push_back(Expression::make_type_descriptor(bucket_type, bloc));
 
-Bexpression*
-Map_type::map_descriptor_pointer(Gogo* gogo, Location location)
-{
-  Bvariable* bvar = this->map_descriptor(gogo);
-  Bexpression* var_expr = gogo->backend()->var_expression(bvar, location);
-  return gogo->backend()->address_expression(var_expr, location);
-}
+  ++p;
+  go_assert(p->is_field_name("hmap"));
+  Type* hmap_type = this->hmap_type(bucket_type);
+  vals->push_back(Expression::make_type_descriptor(hmap_type, bloc));
 
-// Build a map descriptor for this type.
+  ++p;
+  go_assert(p->is_field_name("keysize"));
+  if (keysize > Map_type::max_key_size)
+    vals->push_back(Expression::make_integer_int64(ptrsize, uint8_type, bloc));
+  else
+    vals->push_back(Expression::make_integer_int64(keysize, uint8_type, bloc));
 
-Bvariable*
-Map_type::map_descriptor(Gogo* gogo)
-{
-  std::pair<Map_type*, Bvariable*> val(this, NULL);
-  std::pair<Map_type::Map_descriptors::iterator, bool> ins =
-    Map_type::map_descriptors.insert(val);
-  if (!ins.second)
-    return ins.first->second;
+  ++p;
+  go_assert(p->is_field_name("indirectkey"));
+  vals->push_back(Expression::make_boolean(keysize > Map_type::max_key_size,
+                                          bloc));
 
-  Type* key_type = this->key_type_;
-  Type* val_type = this->val_type_;
+  ++p;
+  go_assert(p->is_field_name("valuesize"));
+  if (valsize > Map_type::max_val_size)
+    vals->push_back(Expression::make_integer_int64(ptrsize, uint8_type, bloc));
+  else
+    vals->push_back(Expression::make_integer_int64(valsize, uint8_type, bloc));
 
-  // The map entry type is a struct with three fields.  Build that
-  // struct so that we can get the offsets of the key and value within
-  // a map entry.  The first field should technically be a pointer to
-  // this type itself, but since we only care about field offsets we
-  // just use pointer to bool.
-  Type* pbool = Type::make_pointer_type(Type::make_boolean_type());
-  Struct_type* map_entry_type =
-    Type::make_builtin_struct_type(3,
-                                  "__next", pbool,
-                                  "__key", key_type,
-                                  "__val", val_type);
+  ++p;
+  go_assert(p->is_field_name("indirectvalue"));
+  vals->push_back(Expression::make_boolean(valsize > Map_type::max_val_size,
+                                          bloc));
 
-  Type* map_descriptor_type = Map_type::make_map_descriptor_type();
+  ++p;
+  go_assert(p->is_field_name("bucketsize"));
+  vals->push_back(Expression::make_integer_int64(bucketsize, uint16_type,
+                                                bloc));
 
-  const Struct_field_list* fields =
-    map_descriptor_type->struct_type()->fields();
+  ++p;
+  go_assert(p->is_field_name("reflexivekey"));
+  vals->push_back(Expression::make_boolean(this->key_type_->is_reflexive(),
+                                          bloc));
 
-  Expression_list* vals = new Expression_list();
-  vals->reserve(4);
+  ++p;
+  go_assert(p->is_field_name("needkeyupdate"));
+  vals->push_back(Expression::make_boolean(this->key_type_->needs_key_update(),
+                                          bloc));
 
-  Location bloc = Linemap::predeclared_location();
+  ++p;
+  go_assert(p == fields->end());
 
-  Struct_field_list::const_iterator p = fields->begin();
+  return Expression::make_struct_composite_literal(mtdt, vals, bloc);
+}
 
-  go_assert(p->is_field_name("__map_descriptor"));
-  vals->push_back(Expression::make_type_descriptor(this, bloc));
+// Return the bucket type to use for a map type.  This must correspond
+// to libgo/go/runtime/hashmap.go.
 
-  ++p;
-  go_assert(p->is_field_name("__entry_size"));
-  Expression::Type_info type_info = Expression::TYPE_INFO_SIZE;
-  vals->push_back(Expression::make_type_info(map_entry_type, type_info));
+Type*
+Map_type::bucket_type(Gogo* gogo, int64_t keysize, int64_t valsize)
+{
+  if (this->bucket_type_ != NULL)
+    return this->bucket_type_;
 
-  Struct_field_list::const_iterator pf = map_entry_type->fields()->begin();
-  ++pf;
-  go_assert(pf->is_field_name("__key"));
+  Type* key_type = this->key_type_;
+  if (keysize > Map_type::max_key_size)
+    key_type = Type::make_pointer_type(key_type);
 
-  ++p;
-  go_assert(p->is_field_name("__key_offset"));
-  vals->push_back(Expression::make_struct_field_offset(map_entry_type, &*pf));
+  Type* val_type = this->val_type_;
+  if (valsize > Map_type::max_val_size)
+    val_type = Type::make_pointer_type(val_type);
+
+  Expression* bucket_size = Expression::make_integer_ul(Map_type::bucket_size,
+                                                       NULL, this->location_);
+
+  Type* uint8_type = Type::lookup_integer_type("uint8");
+  Array_type* topbits_type = Type::make_array_type(uint8_type, bucket_size);
+  topbits_type->set_is_array_incomparable();
+  Array_type* keys_type = Type::make_array_type(key_type, bucket_size);
+  keys_type->set_is_array_incomparable();
+  Array_type* values_type = Type::make_array_type(val_type, bucket_size);
+  values_type->set_is_array_incomparable();
+
+  // If keys and values have no pointers, the map implementation can
+  // keep a list of overflow pointers on the side so that buckets can
+  // be marked as having no pointers.  Arrange for the bucket to have
+  // no pointers by changing the type of the overflow field to uintptr
+  // in this case.  See comment on the hmap.overflow field in
+  // libgo/go/runtime/hashmap.go.
+  Type* overflow_type;
+  if (!key_type->has_pointer() && !val_type->has_pointer())
+    overflow_type = Type::lookup_integer_type("uintptr");
+  else
+    {
+      // This should really be a pointer to the bucket type itself,
+      // but that would require us to construct a Named_type for it to
+      // give it a way to refer to itself.  Since nothing really cares
+      // (except perhaps for someone using a debugger) just use an
+      // unsafe pointer.
+      overflow_type = Type::make_pointer_type(Type::make_void_type());
+    }
+
+  // Make sure the overflow pointer is the last memory in the struct,
+  // because the runtime assumes it can use size-ptrSize as the offset
+  // of the overflow pointer.  We double-check that property below
+  // once the offsets and size are computed.
+
+  int64_t topbits_field_size, topbits_field_align;
+  int64_t keys_field_size, keys_field_align;
+  int64_t values_field_size, values_field_align;
+  int64_t overflow_field_size, overflow_field_align;
+  if (!topbits_type->backend_type_size(gogo, &topbits_field_size)
+      || !topbits_type->backend_type_field_align(gogo, &topbits_field_align)
+      || !keys_type->backend_type_size(gogo, &keys_field_size)
+      || !keys_type->backend_type_field_align(gogo, &keys_field_align)
+      || !values_type->backend_type_size(gogo, &values_field_size)
+      || !values_type->backend_type_field_align(gogo, &values_field_align)
+      || !overflow_type->backend_type_size(gogo, &overflow_field_size)
+      || !overflow_type->backend_type_field_align(gogo, &overflow_field_align))
+    {
+      go_assert(saw_errors());
+      return NULL;
+    }
 
-  ++pf;
-  go_assert(pf->is_field_name("__val"));
+  Struct_type* ret;
+  int64_t max_align = std::max(std::max(topbits_field_align, keys_field_align),
+                              values_field_align);
+  if (max_align <= overflow_field_align)
+    ret =  make_builtin_struct_type(4,
+                                   "topbits", topbits_type,
+                                   "keys", keys_type,
+                                   "values", values_type,
+                                   "overflow", overflow_type);
+  else
+    {
+      size_t off = topbits_field_size;
+      off = ((off + keys_field_align - 1)
+            &~ static_cast<size_t>(keys_field_align - 1));
+      off += keys_field_size;
+      off = ((off + values_field_align - 1)
+            &~ static_cast<size_t>(values_field_align - 1));
+      off += values_field_size;
+
+      int64_t padded_overflow_field_size =
+       ((overflow_field_size + max_align - 1)
+        &~ static_cast<size_t>(max_align - 1));
+
+      size_t ovoff = off;
+      ovoff = ((ovoff + max_align - 1)
+              &~ static_cast<size_t>(max_align - 1));
+      size_t pad = (ovoff - off
+                   + padded_overflow_field_size - overflow_field_size);
+
+      Expression* pad_expr = Expression::make_integer_ul(pad, NULL,
+                                                        this->location_);
+      Type* pad_type = Type::make_array_type(uint8_type, pad_expr);
+
+      ret = make_builtin_struct_type(5,
+                                    "topbits", topbits_type,
+                                    "keys", keys_type,
+                                    "values", values_type,
+                                    "pad", pad_type,
+                                    "overflow", overflow_type);
+    }
+
+  // Verify that the overflow field is just before the end of the
+  // bucket type.
+
+  Btype* btype = ret->get_backend(gogo);
+  int64_t offset = gogo->backend()->type_field_offset(btype,
+                                                     ret->field_count() - 1);
+  int64_t size;
+  if (!ret->backend_type_size(gogo, &size))
+    {
+      go_assert(saw_errors());
+      return NULL;
+    }
 
-  ++p;
-  go_assert(p->is_field_name("__val_offset"));
-  vals->push_back(Expression::make_struct_field_offset(map_entry_type, &*pf));
+  int64_t ptr_size;
+  if (!Type::make_pointer_type(uint8_type)->backend_type_size(gogo, &ptr_size))
+    {
+      go_assert(saw_errors());
+      return NULL;
+    }
 
-  ++p;
-  go_assert(p == fields->end());
+  go_assert(offset + ptr_size == size);
 
-  Expression* initializer =
-    Expression::make_struct_composite_literal(map_descriptor_type, vals, bloc);
+  ret->set_is_struct_incomparable();
 
-  std::string mangled_name = "__go_map_" + this->mangled_name(gogo);
-  Btype* map_descriptor_btype = map_descriptor_type->get_backend(gogo);
-  Bvariable* bvar = gogo->backend()->immutable_struct(mangled_name, false,
-                                                     true,
-                                                     map_descriptor_btype,
-                                                     bloc);
+  this->bucket_type_ = ret;
+  return ret;
+}
 
-  Translate_context context(gogo, NULL, NULL, NULL);
-  context.set_is_const();
-  Bexpression* binitializer = initializer->get_backend(&context);
+// Return the hashmap type for a map type.
 
-  gogo->backend()->immutable_struct_set_init(bvar, mangled_name, false, true,
-                                            map_descriptor_btype, bloc,
-                                            binitializer);
+Type*
+Map_type::hmap_type(Type* bucket_type)
+{
+  if (this->hmap_type_ != NULL)
+    return this->hmap_type_;
 
-  ins.first->second = bvar;
-  return bvar;
+  Type* int_type = Type::lookup_integer_type("int");
+  Type* uint8_type = Type::lookup_integer_type("uint8");
+  Type* uint32_type = Type::lookup_integer_type("uint32");
+  Type* uintptr_type = Type::lookup_integer_type("uintptr");
+  Type* void_ptr_type = Type::make_pointer_type(Type::make_void_type());
+
+  Type* ptr_bucket_type = Type::make_pointer_type(bucket_type);
+
+  Struct_type* ret = make_builtin_struct_type(8,
+                                             "count", int_type,
+                                             "flags", uint8_type,
+                                             "B", uint8_type,
+                                             "hash0", uint32_type,
+                                             "buckets", ptr_bucket_type,
+                                             "oldbuckets", ptr_bucket_type,
+                                             "nevacuate", uintptr_type,
+                                             "overflow", void_ptr_type);
+  ret->set_is_struct_incomparable();
+  this->hmap_type_ = ret;
+  return ret;
 }
 
-// Build the type of a map descriptor.  This must match the struct
-// __go_map_descriptor in libgo/runtime/map.h.
+// Return the iterator type for a map type.  This is the type of the
+// value used when doing a range over a map.
 
 Type*
-Map_type::make_map_descriptor_type()
+Map_type::hiter_type(Gogo* gogo)
 {
-  static Type* ret;
-  if (ret == NULL)
+  if (this->hiter_type_ != NULL)
+    return this->hiter_type_;
+
+  int64_t keysize, valsize;
+  if (!this->key_type_->backend_type_size(gogo, &keysize)
+      || !this->val_type_->backend_type_size(gogo, &valsize))
     {
-      Type* ptdt = Type::make_type_descriptor_ptr_type();
-      Type* uintptr_type = Type::lookup_integer_type("uintptr");
-      Struct_type* sf =
-       Type::make_builtin_struct_type(4,
-                                      "__map_descriptor", ptdt,
-                                      "__entry_size", uintptr_type,
-                                      "__key_offset", uintptr_type,
-                                      "__val_offset", uintptr_type);
-      ret = Type::make_builtin_named_type("__go_map_descriptor", sf);
+      go_assert(saw_errors());
+      return NULL;
     }
+
+  Type* key_ptr_type = Type::make_pointer_type(this->key_type_);
+  Type* val_ptr_type = Type::make_pointer_type(this->val_type_);
+  Type* uint8_type = Type::lookup_integer_type("uint8");
+  Type* uint8_ptr_type = Type::make_pointer_type(uint8_type);
+  Type* uintptr_type = Type::lookup_integer_type("uintptr");
+  Type* bucket_type = this->bucket_type(gogo, keysize, valsize);
+  Type* bucket_ptr_type = Type::make_pointer_type(bucket_type);
+  Type* hmap_type = this->hmap_type(bucket_type);
+  Type* hmap_ptr_type = Type::make_pointer_type(hmap_type);
+  Type* void_ptr_type = Type::make_pointer_type(Type::make_void_type());
+
+  Struct_type* ret = make_builtin_struct_type(12,
+                                             "key", key_ptr_type,
+                                             "val", val_ptr_type,
+                                             "t", uint8_ptr_type,
+                                             "h", hmap_ptr_type,
+                                             "buckets", bucket_ptr_type,
+                                             "bptr", bucket_ptr_type,
+                                             "overflow0", void_ptr_type,
+                                             "overflow1", void_ptr_type,
+                                             "startBucket", uintptr_type,
+                                             "stuff", uintptr_type,
+                                             "bucket", uintptr_type,
+                                             "checkBucket", uintptr_type);
+  ret->set_is_struct_incomparable();
+  this->hiter_type_ = ret;
   return ret;
 }
 
@@ -9016,6 +9398,33 @@ Named_type::do_compare_is_identity(Gogo* gogo)
   return ret;
 }
 
+// Return whether this type is reflexive--whether it is always equal
+// to itself.
+
+bool
+Named_type::do_is_reflexive()
+{
+  if (this->seen_in_compare_is_identity_)
+    return false;
+  this->seen_in_compare_is_identity_ = true;
+  bool ret = this->type_->is_reflexive();
+  this->seen_in_compare_is_identity_ = false;
+  return ret;
+}
+
+// Return whether this type needs a key update when used as a map key.
+
+bool
+Named_type::do_needs_key_update()
+{
+  if (this->seen_in_compare_is_identity_)
+    return true;
+  this->seen_in_compare_is_identity_ = true;
+  bool ret = this->type_->needs_key_update();
+  this->seen_in_compare_is_identity_ = false;
+  return ret;
+}
+
 // Return a hash code.  This is used for method lookup.  We simply
 // hash on the name itself.
 
index 5de49ae3534e5388b3cef2e4d1d01bf9845b125a..3d9a3c47fae1500595876ce4d77b3fb9d25d99e7 100644 (file)
@@ -14,6 +14,7 @@
 
 class Gogo;
 class Package;
+class Variable;
 class Traverse;
 class Typed_identifier;
 class Typed_identifier_list;
@@ -629,6 +630,18 @@ class Type
   compare_is_identity(Gogo* gogo)
   { return this->do_compare_is_identity(gogo); }
 
+  // Return whether values of this type are reflexive: if a comparison
+  // of a value with itself always returns true.
+  bool
+  is_reflexive()
+  { return this->do_is_reflexive(); }
+
+  // Return whether values of this, when used as a key in map,
+  // requires the key to be updated when an assignment is made.
+  bool
+  needs_key_update()
+  { return this->do_needs_key_update(); }
+
   // Return a hash code for this type for the method hash table.
   // Types which are equivalent according to are_identical will have
   // the same hash code.
@@ -1006,6 +1019,14 @@ class Type
   virtual bool
   do_compare_is_identity(Gogo*) = 0;
 
+  virtual bool
+  do_is_reflexive()
+  { return true; }
+
+  virtual bool
+  do_needs_key_update()
+  { return false; }
+
   virtual unsigned int
   do_hash_for_method(Gogo*) const;
 
@@ -1639,6 +1660,15 @@ class Float_type : public Type
   do_compare_is_identity(Gogo*)
   { return false; }
 
+  bool
+  do_is_reflexive()
+  { return false; }
+
+  // Distinction between +0 and -0 requires a key update.
+  bool
+  do_needs_key_update()
+  { return true; }
+
   unsigned int
   do_hash_for_method(Gogo*) const;
 
@@ -1712,6 +1742,15 @@ class Complex_type : public Type
   do_compare_is_identity(Gogo*)
   { return false; }
 
+  bool
+  do_is_reflexive()
+  { return false; }
+
+  // Distinction between +0 and -0 requires a key update.
+  bool
+  do_needs_key_update()
+  { return true; }
+
   unsigned int
   do_hash_for_method(Gogo*) const;
 
@@ -1768,6 +1807,11 @@ class String_type : public Type
   do_compare_is_identity(Gogo*)
   { return false; }
 
+  // New string might have a smaller backing store.
+  bool
+  do_needs_key_update()
+  { return true; }
+
   Btype*
   do_get_backend(Gogo*);
 
@@ -2218,7 +2262,8 @@ class Struct_type : public Type
  public:
   Struct_type(Struct_field_list* fields, Location location)
     : Type(TYPE_STRUCT),
-      fields_(fields), location_(location), all_methods_(NULL)
+      fields_(fields), location_(location), all_methods_(NULL),
+      is_struct_incomparable_(false)
   { }
 
   // Return the field NAME.  This only looks at local fields, not at
@@ -2323,6 +2368,16 @@ class Struct_type : public Type
   static Type*
   make_struct_type_descriptor_type();
 
+  // Return whether this is a generated struct that is not comparable.
+  bool
+  is_struct_incomparable() const
+  { return this->is_struct_incomparable_; }
+
+  // Record that this is a generated struct that is not comparable.
+  void
+  set_is_struct_incomparable()
+  { this->is_struct_incomparable_ = true; }
+
   // Write the hash function for this type.
   void
   write_hash_function(Gogo*, Named_type*, Function_type*, Function_type*);
@@ -2354,6 +2409,12 @@ class Struct_type : public Type
   bool
   do_compare_is_identity(Gogo*);
 
+  bool
+  do_is_reflexive();
+
+  bool
+  do_needs_key_update();
+
   unsigned int
   do_hash_for_method(Gogo*) const;
 
@@ -2418,6 +2479,9 @@ class Struct_type : public Type
   Location location_;
   // If this struct is unnamed, a list of methods.
   Methods* all_methods_;
+  // True if this is a generated struct that is not considered to be
+  // comparable.
+  bool is_struct_incomparable_;
 };
 
 // The type of an array.
@@ -2428,7 +2492,7 @@ class Array_type : public Type
   Array_type(Type* element_type, Expression* length)
     : Type(TYPE_ARRAY),
       element_type_(element_type), length_(length), blength_(NULL),
-      issued_length_error_(false)
+      issued_length_error_(false), is_array_incomparable_(false)
   { }
 
   // Return the element type.
@@ -2479,6 +2543,16 @@ class Array_type : public Type
   static Type*
   make_slice_type_descriptor_type();
 
+  // Return whether this is a generated array that is not comparable.
+  bool
+  is_array_incomparable() const
+  { return this->is_array_incomparable_; }
+
+  // Record that this is a generated array that is not comparable.
+  void
+  set_is_array_incomparable()
+  { this->is_array_incomparable_ = true; }
+
   // Write the hash function for this type.
   void
   write_hash_function(Gogo*, Named_type*, Function_type*, Function_type*);
@@ -2503,6 +2577,16 @@ class Array_type : public Type
   bool
   do_compare_is_identity(Gogo*);
 
+  bool
+  do_is_reflexive()
+  {
+    return this->length_ != NULL && this->element_type_->is_reflexive();
+  }
+
+  bool
+  do_needs_key_update()
+  { return this->element_type_->needs_key_update(); }
+
   unsigned int
   do_hash_for_method(Gogo*) const;
 
@@ -2550,6 +2634,9 @@ class Array_type : public Type
   // Whether or not an invalid length error has been issued for this type,
   // to avoid knock-on errors.
   mutable bool issued_length_error_;
+  // True if this is a generated array that is not considered to be
+  // comparable.
+  bool is_array_incomparable_;
 };
 
 // The type of a map.
@@ -2559,7 +2646,8 @@ class Map_type : public Type
  public:
   Map_type(Type* key_type, Type* val_type, Location location)
     : Type(TYPE_MAP),
-      key_type_(key_type), val_type_(val_type), location_(location)
+      key_type_(key_type), val_type_(val_type), hmap_type_(NULL),
+      bucket_type_(NULL), hiter_type_(NULL), location_(location)
   { }
 
   // Return the key type.
@@ -2572,6 +2660,24 @@ class Map_type : public Type
   val_type() const
   { return this->val_type_; }
 
+  // Return the type used for an iteration over this map.
+  Type*
+  hiter_type(Gogo*);
+
+  // If this map requires the "fat" functions, returns the pointer to
+  // pass as the zero value to those functions.  Otherwise, in the
+  // normal case, returns NULL.
+  Expression*
+  fat_zero_value(Gogo*);
+
+  // Return whether VAR is the map zero value.
+  static bool
+  is_zero_value(Variable* var);
+
+  // Return the backend representation of the map zero value.
+  static Bvariable*
+  backend_zero_value(Gogo*);
+
   // Whether this type is identical with T.
   bool
   is_identical(const Map_type* t, bool errors_are_identical) const;
@@ -2583,15 +2689,6 @@ class Map_type : public Type
   static Type*
   make_map_type_descriptor_type();
 
-  static Type*
-  make_map_descriptor_type();
-
-  // Build a map descriptor for this type.  Return a pointer to it.
-  // The location is the location which causes us to need the
-  // descriptor.
-  Bexpression*
-  map_descriptor_pointer(Gogo* gogo, Location);
-
  protected:
   int
   do_traverse(Traverse*);
@@ -2607,6 +2704,12 @@ class Map_type : public Type
   do_compare_is_identity(Gogo*)
   { return false; }
 
+  bool
+  do_is_reflexive()
+  {
+    return this->key_type_->is_reflexive() && this->val_type_->is_reflexive();
+  }
+
   unsigned int
   do_hash_for_method(Gogo*) const;
 
@@ -2629,18 +2732,41 @@ class Map_type : public Type
   do_export(Export*) const;
 
  private:
-  // Mapping from map types to map descriptors.
-  typedef Unordered_map_hash(const Map_type*, Bvariable*, Type_hash_identical,
-                            Type_identical) Map_descriptors;
-  static Map_descriptors map_descriptors;
+  // These must be in sync with libgo/go/runtime/hashmap.go.
+  static const int bucket_size = 8;
+  static const int max_key_size = 128;
+  static const int max_val_size = 128;
+  static const int max_zero_size = 1024;
+
+  // Maps with value types larger than max_zero_size require passing a
+  // zero value pointer to the map functions.
+
+  // The zero value variable.
+  static Named_object* zero_value;
 
-  Bvariable*
-  map_descriptor(Gogo*);
+  // The current size of the zero value.
+  static int64_t zero_value_size;
+
+  // The current alignment of the zero value.
+  static int64_t zero_value_align;
+
+  Type*
+  bucket_type(Gogo*, int64_t, int64_t);
+
+  Type*
+  hmap_type(Type*);
 
   // The key type.
   Type* key_type_;
   // The value type.
   Type* val_type_;
+  // The hashmap type.  At run time a map is represented as a pointer
+  // to this type.
+  Type* hmap_type_;
+  // The bucket type, the type used to hold keys and values at run time.
+  Type* bucket_type_;
+  // The iterator type.
+  Type* hiter_type_;
   // Where the type was defined.
   Location location_;
 };
@@ -2832,6 +2958,17 @@ class Interface_type : public Type
   do_compare_is_identity(Gogo*)
   { return false; }
 
+  // Not reflexive if it contains a float.
+  bool
+  do_is_reflexive()
+  { return false; }
+
+  // Distinction between +0 and -0 requires a key update if it
+  // contains a float.
+  bool
+  do_needs_key_update()
+  { return true; }
+
   unsigned int
   do_hash_for_method(Gogo*) const;
 
@@ -3121,6 +3258,12 @@ class Named_type : public Type
   bool
   do_compare_is_identity(Gogo*);
 
+  bool
+  do_is_reflexive();
+
+  bool
+  do_needs_key_update();
+
   unsigned int
   do_hash_for_method(Gogo*) const;
 
@@ -3268,6 +3411,14 @@ class Forward_declaration_type : public Type
   do_compare_is_identity(Gogo* gogo)
   { return this->real_type()->compare_is_identity(gogo); }
 
+  bool
+  do_is_reflexive()
+  { return this->real_type()->is_reflexive(); }
+
+  bool
+  do_needs_key_update()
+  { return this->real_type()->needs_key_update(); }
+
   unsigned int
   do_hash_for_method(Gogo* gogo) const
   { return this->real_type()->hash_for_method(gogo); }
index 28e974b272c7d3cc81aca3f75e10af0d32a595b0..6a1dae9cf3cf4ae5ad2530edd12e86f918867b8a 100644 (file)
@@ -1,3 +1,8 @@
+2016-09-21  Ian Lance Taylor  <iant@golang.org>
+
+       * go.go-torture/execute/map-1.go: Replace old map deletion syntax
+       with call to builtin delete function.
+
 2016-09-21  Joseph Myers  <joseph@codesourcery.com>
 
        * gcc.dg/torture/float128-tg-3.c, gcc.dg/torture/float128x-tg-3.c,
index 8307c6c98cd43abc5ae5e3142b62d0e6cf46018e..2054c6c413b6315390e53d89c8b9c183bfc6c08e 100644 (file)
@@ -26,7 +26,7 @@ func main() {
   if len(v) != 2 {
     panic(6)
   }
-  v[0] = 0, false;
+  delete(v, 0)
   if len(v) != 1 {
     panic(7)
   }
index 4ac6a4a7bd5f31e0ad04d8d76f4d645a6abe5edf..bd75dd3e02ffb56a304dab74d730e74d9a3b260b 100644 (file)
@@ -464,22 +464,19 @@ runtime_files = \
        runtime/go-interface-eface-compare.c \
        runtime/go-interface-val-compare.c \
        runtime/go-make-slice.c \
-       runtime/go-map-delete.c \
-       runtime/go-map-index.c \
-       runtime/go-map-len.c \
-       runtime/go-map-range.c \
        runtime/go-matherr.c \
+       runtime/go-memclr.c \
        runtime/go-memcmp.c \
+       runtime/go-memequal.c \
+       runtime/go-memmove.c \
        runtime/go-nanotime.c \
        runtime/go-now.c \
-       runtime/go-new-map.c \
        runtime/go-new.c \
        runtime/go-nosys.c \
        runtime/go-panic.c \
        runtime/go-print.c \
        runtime/go-recover.c \
        runtime/go-reflect-call.c \
-       runtime/go-reflect-map.c \
        runtime/go-rune.c \
        runtime/go-runtime-error.c \
        runtime/go-setenv.c \
@@ -492,7 +489,6 @@ runtime_files = \
        runtime/go-traceback.c \
        runtime/go-type-complex.c \
        runtime/go-type-eface.c \
-       runtime/go-type-error.c \
        runtime/go-type-float.c \
        runtime/go-type-identity.c \
        runtime/go-type-interface.c \
@@ -529,7 +525,6 @@ runtime_files = \
        go-iface.c \
        lfstack.c \
        malloc.c \
-       map.c \
        mprof.c \
        netpoll.c \
        rdebug.c \
index 2daa83ee443eb73b8d0db3bbe154e6c6b7aa2db6..78771c6f258e1a1273ec40230e7156a7ff6980b7 100644 (file)
@@ -248,26 +248,24 @@ am__objects_6 = go-append.lo go-assert.lo go-assert-interface.lo \
        go-eface-val-compare.lo go-ffi.lo go-fieldtrack.lo \
        go-int-array-to-string.lo go-int-to-string.lo \
        go-interface-compare.lo go-interface-eface-compare.lo \
-       go-interface-val-compare.lo go-make-slice.lo go-map-delete.lo \
-       go-map-index.lo go-map-len.lo go-map-range.lo go-matherr.lo \
-       go-memcmp.lo go-nanotime.lo go-now.lo go-new-map.lo go-new.lo \
-       go-nosys.lo go-panic.lo go-print.lo go-recover.lo \
-       go-reflect-call.lo go-reflect-map.lo go-rune.lo \
+       go-interface-val-compare.lo go-make-slice.lo go-matherr.lo \
+       go-memclr.lo go-memcmp.lo go-memequal.lo go-memmove.lo \
+       go-nanotime.lo go-now.lo go-new.lo go-nosys.lo go-panic.lo \
+       go-print.lo go-recover.lo go-reflect-call.lo go-rune.lo \
        go-runtime-error.lo go-setenv.lo go-signal.lo go-strcmp.lo \
        go-string-to-byte-array.lo go-string-to-int-array.lo \
        go-strplus.lo go-strslice.lo go-traceback.lo \
-       go-type-complex.lo go-type-eface.lo go-type-error.lo \
-       go-type-float.lo go-type-identity.lo go-type-interface.lo \
-       go-type-string.lo go-typedesc-equal.lo go-unsafe-new.lo \
-       go-unsafe-newarray.lo go-unsafe-pointer.lo go-unsetenv.lo \
-       go-unwind.lo go-varargs.lo env_posix.lo heapdump.lo \
-       $(am__objects_1) mcache.lo mcentral.lo $(am__objects_2) \
-       mfixalloc.lo mgc0.lo mheap.lo msize.lo $(am__objects_3) \
-       panic.lo parfor.lo print.lo proc.lo runtime.lo signal_unix.lo \
-       thread.lo yield.lo $(am__objects_4) chan.lo cpuprof.lo \
-       go-iface.lo lfstack.lo malloc.lo map.lo mprof.lo netpoll.lo \
-       rdebug.lo reflect.lo runtime1.lo sema.lo sigqueue.lo string.lo \
-       time.lo $(am__objects_5)
+       go-type-complex.lo go-type-eface.lo go-type-float.lo \
+       go-type-identity.lo go-type-interface.lo go-type-string.lo \
+       go-typedesc-equal.lo go-unsafe-new.lo go-unsafe-newarray.lo \
+       go-unsafe-pointer.lo go-unsetenv.lo go-unwind.lo go-varargs.lo \
+       env_posix.lo heapdump.lo $(am__objects_1) mcache.lo \
+       mcentral.lo $(am__objects_2) mfixalloc.lo mgc0.lo mheap.lo \
+       msize.lo $(am__objects_3) panic.lo parfor.lo print.lo proc.lo \
+       runtime.lo signal_unix.lo thread.lo yield.lo $(am__objects_4) \
+       chan.lo cpuprof.lo go-iface.lo lfstack.lo malloc.lo mprof.lo \
+       netpoll.lo rdebug.lo reflect.lo runtime1.lo sema.lo \
+       sigqueue.lo string.lo time.lo $(am__objects_5)
 am_libgo_llgo_la_OBJECTS = $(am__objects_6)
 libgo_llgo_la_OBJECTS = $(am_libgo_llgo_la_OBJECTS)
 libgo_llgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
@@ -867,22 +865,19 @@ runtime_files = \
        runtime/go-interface-eface-compare.c \
        runtime/go-interface-val-compare.c \
        runtime/go-make-slice.c \
-       runtime/go-map-delete.c \
-       runtime/go-map-index.c \
-       runtime/go-map-len.c \
-       runtime/go-map-range.c \
        runtime/go-matherr.c \
+       runtime/go-memclr.c \
        runtime/go-memcmp.c \
+       runtime/go-memequal.c \
+       runtime/go-memmove.c \
        runtime/go-nanotime.c \
        runtime/go-now.c \
-       runtime/go-new-map.c \
        runtime/go-new.c \
        runtime/go-nosys.c \
        runtime/go-panic.c \
        runtime/go-print.c \
        runtime/go-recover.c \
        runtime/go-reflect-call.c \
-       runtime/go-reflect-map.c \
        runtime/go-rune.c \
        runtime/go-runtime-error.c \
        runtime/go-setenv.c \
@@ -895,7 +890,6 @@ runtime_files = \
        runtime/go-traceback.c \
        runtime/go-type-complex.c \
        runtime/go-type-eface.c \
-       runtime/go-type-error.c \
        runtime/go-type-float.c \
        runtime/go-type-identity.c \
        runtime/go-type-interface.c \
@@ -932,7 +926,6 @@ runtime_files = \
        go-iface.c \
        lfstack.c \
        malloc.c \
-       map.c \
        mprof.c \
        netpoll.c \
        rdebug.c \
@@ -1594,14 +1587,12 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-interface-eface-compare.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-interface-val-compare.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-make-slice.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-map-delete.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-map-index.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-map-len.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-map-range.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-matherr.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-memclr.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-memcmp.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-memequal.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-memmove.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-nanotime.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-new-map.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-new.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-nosys.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-now.Plo@am__quote@
@@ -1609,7 +1600,6 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-print.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-recover.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-reflect-call.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-reflect-map.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-rune.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-runtime-error.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-setenv.Plo@am__quote@
@@ -1622,7 +1612,6 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-traceback.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-complex.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-eface.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-error.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-float.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-identity.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/go-type-interface.Plo@am__quote@
@@ -1642,7 +1631,6 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lock_futex.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lock_sema.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/malloc.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/map.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mcache.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mcentral.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mem.Plo@am__quote@
@@ -1920,34 +1908,6 @@ go-make-slice.lo: runtime/go-make-slice.c
 @AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-make-slice.lo `test -f 'runtime/go-make-slice.c' || echo '$(srcdir)/'`runtime/go-make-slice.c
 
-go-map-delete.lo: runtime/go-map-delete.c
-@am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-map-delete.lo -MD -MP -MF $(DEPDIR)/go-map-delete.Tpo -c -o go-map-delete.lo `test -f 'runtime/go-map-delete.c' || echo '$(srcdir)/'`runtime/go-map-delete.c
-@am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-map-delete.Tpo $(DEPDIR)/go-map-delete.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      source='runtime/go-map-delete.c' object='go-map-delete.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-map-delete.lo `test -f 'runtime/go-map-delete.c' || echo '$(srcdir)/'`runtime/go-map-delete.c
-
-go-map-index.lo: runtime/go-map-index.c
-@am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-map-index.lo -MD -MP -MF $(DEPDIR)/go-map-index.Tpo -c -o go-map-index.lo `test -f 'runtime/go-map-index.c' || echo '$(srcdir)/'`runtime/go-map-index.c
-@am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-map-index.Tpo $(DEPDIR)/go-map-index.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      source='runtime/go-map-index.c' object='go-map-index.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-map-index.lo `test -f 'runtime/go-map-index.c' || echo '$(srcdir)/'`runtime/go-map-index.c
-
-go-map-len.lo: runtime/go-map-len.c
-@am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-map-len.lo -MD -MP -MF $(DEPDIR)/go-map-len.Tpo -c -o go-map-len.lo `test -f 'runtime/go-map-len.c' || echo '$(srcdir)/'`runtime/go-map-len.c
-@am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-map-len.Tpo $(DEPDIR)/go-map-len.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      source='runtime/go-map-len.c' object='go-map-len.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-map-len.lo `test -f 'runtime/go-map-len.c' || echo '$(srcdir)/'`runtime/go-map-len.c
-
-go-map-range.lo: runtime/go-map-range.c
-@am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-map-range.lo -MD -MP -MF $(DEPDIR)/go-map-range.Tpo -c -o go-map-range.lo `test -f 'runtime/go-map-range.c' || echo '$(srcdir)/'`runtime/go-map-range.c
-@am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-map-range.Tpo $(DEPDIR)/go-map-range.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      source='runtime/go-map-range.c' object='go-map-range.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-map-range.lo `test -f 'runtime/go-map-range.c' || echo '$(srcdir)/'`runtime/go-map-range.c
-
 go-matherr.lo: runtime/go-matherr.c
 @am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-matherr.lo -MD -MP -MF $(DEPDIR)/go-matherr.Tpo -c -o go-matherr.lo `test -f 'runtime/go-matherr.c' || echo '$(srcdir)/'`runtime/go-matherr.c
 @am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-matherr.Tpo $(DEPDIR)/go-matherr.Plo
@@ -1955,6 +1915,13 @@ go-matherr.lo: runtime/go-matherr.c
 @AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-matherr.lo `test -f 'runtime/go-matherr.c' || echo '$(srcdir)/'`runtime/go-matherr.c
 
+go-memclr.lo: runtime/go-memclr.c
+@am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-memclr.lo -MD -MP -MF $(DEPDIR)/go-memclr.Tpo -c -o go-memclr.lo `test -f 'runtime/go-memclr.c' || echo '$(srcdir)/'`runtime/go-memclr.c
+@am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-memclr.Tpo $(DEPDIR)/go-memclr.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@      source='runtime/go-memclr.c' object='go-memclr.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-memclr.lo `test -f 'runtime/go-memclr.c' || echo '$(srcdir)/'`runtime/go-memclr.c
+
 go-memcmp.lo: runtime/go-memcmp.c
 @am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-memcmp.lo -MD -MP -MF $(DEPDIR)/go-memcmp.Tpo -c -o go-memcmp.lo `test -f 'runtime/go-memcmp.c' || echo '$(srcdir)/'`runtime/go-memcmp.c
 @am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-memcmp.Tpo $(DEPDIR)/go-memcmp.Plo
@@ -1962,6 +1929,20 @@ go-memcmp.lo: runtime/go-memcmp.c
 @AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-memcmp.lo `test -f 'runtime/go-memcmp.c' || echo '$(srcdir)/'`runtime/go-memcmp.c
 
+go-memequal.lo: runtime/go-memequal.c
+@am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-memequal.lo -MD -MP -MF $(DEPDIR)/go-memequal.Tpo -c -o go-memequal.lo `test -f 'runtime/go-memequal.c' || echo '$(srcdir)/'`runtime/go-memequal.c
+@am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-memequal.Tpo $(DEPDIR)/go-memequal.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@      source='runtime/go-memequal.c' object='go-memequal.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-memequal.lo `test -f 'runtime/go-memequal.c' || echo '$(srcdir)/'`runtime/go-memequal.c
+
+go-memmove.lo: runtime/go-memmove.c
+@am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-memmove.lo -MD -MP -MF $(DEPDIR)/go-memmove.Tpo -c -o go-memmove.lo `test -f 'runtime/go-memmove.c' || echo '$(srcdir)/'`runtime/go-memmove.c
+@am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-memmove.Tpo $(DEPDIR)/go-memmove.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@      source='runtime/go-memmove.c' object='go-memmove.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-memmove.lo `test -f 'runtime/go-memmove.c' || echo '$(srcdir)/'`runtime/go-memmove.c
+
 go-nanotime.lo: runtime/go-nanotime.c
 @am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-nanotime.lo -MD -MP -MF $(DEPDIR)/go-nanotime.Tpo -c -o go-nanotime.lo `test -f 'runtime/go-nanotime.c' || echo '$(srcdir)/'`runtime/go-nanotime.c
 @am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-nanotime.Tpo $(DEPDIR)/go-nanotime.Plo
@@ -1976,13 +1957,6 @@ go-now.lo: runtime/go-now.c
 @AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-now.lo `test -f 'runtime/go-now.c' || echo '$(srcdir)/'`runtime/go-now.c
 
-go-new-map.lo: runtime/go-new-map.c
-@am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-new-map.lo -MD -MP -MF $(DEPDIR)/go-new-map.Tpo -c -o go-new-map.lo `test -f 'runtime/go-new-map.c' || echo '$(srcdir)/'`runtime/go-new-map.c
-@am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-new-map.Tpo $(DEPDIR)/go-new-map.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      source='runtime/go-new-map.c' object='go-new-map.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-new-map.lo `test -f 'runtime/go-new-map.c' || echo '$(srcdir)/'`runtime/go-new-map.c
-
 go-new.lo: runtime/go-new.c
 @am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-new.lo -MD -MP -MF $(DEPDIR)/go-new.Tpo -c -o go-new.lo `test -f 'runtime/go-new.c' || echo '$(srcdir)/'`runtime/go-new.c
 @am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-new.Tpo $(DEPDIR)/go-new.Plo
@@ -2025,13 +1999,6 @@ go-reflect-call.lo: runtime/go-reflect-call.c
 @AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-reflect-call.lo `test -f 'runtime/go-reflect-call.c' || echo '$(srcdir)/'`runtime/go-reflect-call.c
 
-go-reflect-map.lo: runtime/go-reflect-map.c
-@am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-reflect-map.lo -MD -MP -MF $(DEPDIR)/go-reflect-map.Tpo -c -o go-reflect-map.lo `test -f 'runtime/go-reflect-map.c' || echo '$(srcdir)/'`runtime/go-reflect-map.c
-@am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-reflect-map.Tpo $(DEPDIR)/go-reflect-map.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      source='runtime/go-reflect-map.c' object='go-reflect-map.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-reflect-map.lo `test -f 'runtime/go-reflect-map.c' || echo '$(srcdir)/'`runtime/go-reflect-map.c
-
 go-rune.lo: runtime/go-rune.c
 @am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-rune.lo -MD -MP -MF $(DEPDIR)/go-rune.Tpo -c -o go-rune.lo `test -f 'runtime/go-rune.c' || echo '$(srcdir)/'`runtime/go-rune.c
 @am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-rune.Tpo $(DEPDIR)/go-rune.Plo
@@ -2116,13 +2083,6 @@ go-type-eface.lo: runtime/go-type-eface.c
 @AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-type-eface.lo `test -f 'runtime/go-type-eface.c' || echo '$(srcdir)/'`runtime/go-type-eface.c
 
-go-type-error.lo: runtime/go-type-error.c
-@am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-type-error.lo -MD -MP -MF $(DEPDIR)/go-type-error.Tpo -c -o go-type-error.lo `test -f 'runtime/go-type-error.c' || echo '$(srcdir)/'`runtime/go-type-error.c
-@am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-type-error.Tpo $(DEPDIR)/go-type-error.Plo
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      source='runtime/go-type-error.c' object='go-type-error.lo' libtool=yes @AMDEPBACKSLASH@
-@AMDEP_TRUE@@am__fastdepCC_FALSE@      DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
-@am__fastdepCC_FALSE@  $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o go-type-error.lo `test -f 'runtime/go-type-error.c' || echo '$(srcdir)/'`runtime/go-type-error.c
-
 go-type-float.lo: runtime/go-type-float.c
 @am__fastdepCC_TRUE@   $(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT go-type-float.lo -MD -MP -MF $(DEPDIR)/go-type-float.Tpo -c -o go-type-float.lo `test -f 'runtime/go-type-float.c' || echo '$(srcdir)/'`runtime/go-type-float.c
 @am__fastdepCC_TRUE@   $(am__mv) $(DEPDIR)/go-type-float.Tpo $(DEPDIR)/go-type-float.Plo
index d89f15631acf21a49f44ef13d622eb5c3effeff0..13b326f5a8de3c907732a7806973db8ca8ae3447 100644 (file)
@@ -16,7 +16,6 @@
 package reflect
 
 import (
-       "runtime"
        "strconv"
        "sync"
        "unsafe"
@@ -255,7 +254,7 @@ type rtype struct {
        size       uintptr
        hash       uint32 // hash of type; avoids computation in hash tables
 
-       hashfn  func(unsafe.Pointer, uintptr) uintptr              // hash function
+       hashfn  func(unsafe.Pointer, uintptr, uintptr) uintptr     // hash function
        equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) bool // equality function
 
        gc            unsafe.Pointer // garbage collection data
@@ -330,9 +329,18 @@ type interfaceType struct {
 
 // mapType represents a map type.
 type mapType struct {
-       rtype `reflect:"map"`
-       key   *rtype // map key type
-       elem  *rtype // map element (value) type
+       rtype         `reflect:"map"`
+       key           *rtype // map key type
+       elem          *rtype // map element (value) type
+       bucket        *rtype // internal bucket structure
+       hmap          *rtype // internal map header
+       keysize       uint8  // size of key slot
+       indirectkey   uint8  // store ptr to key instead of key itself
+       valuesize     uint8  // size of value slot
+       indirectvalue uint8  // store ptr to value instead of value itself
+       bucketsize    uint16 // size of bucket
+       reflexivekey  bool   // true if k==k for all keys
+       needkeyupdate bool   // true if we need to update key on an overwrite
 }
 
 // ptrType represents a pointer type.
@@ -1606,20 +1614,25 @@ func MapOf(key, elem Type) Type {
        mt.elem = etyp
        mt.uncommonType = nil
        mt.ptrToThis = nil
-       // mt.gc = unsafe.Pointer(&ptrGC{
-       //      width:  unsafe.Sizeof(uintptr(0)),
-       //      op:     _GC_PTR,
-       //      off:    0,
-       //      elemgc: nil,
-       //      end:    _GC_END,
-       // })
 
-       // TODO(cmang): Generate GC data for Map elements.
-       mt.gc = unsafe.Pointer(&ptrDataGCProg)
-
-       // INCORRECT. Uncomment to check that TestMapOfGC and TestMapOfGCValues
-       // fail when mt.gc is wrong.
-       //mt.gc = unsafe.Pointer(&badGC{width: mt.size, end: _GC_END})
+       mt.bucket = bucketOf(ktyp, etyp)
+       if ktyp.size > maxKeySize {
+               mt.keysize = uint8(ptrSize)
+               mt.indirectkey = 1
+       } else {
+               mt.keysize = uint8(ktyp.size)
+               mt.indirectkey = 0
+       }
+       if etyp.size > maxValSize {
+               mt.valuesize = uint8(ptrSize)
+               mt.indirectvalue = 1
+       } else {
+               mt.valuesize = uint8(etyp.size)
+               mt.indirectvalue = 0
+       }
+       mt.bucketsize = uint16(mt.bucket.size)
+       mt.reflexivekey = isReflexive(ktyp)
+       mt.needkeyupdate = needKeyUpdate(ktyp)
 
        return cachePut(ckey, &mt.rtype)
 }
@@ -1824,72 +1837,60 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
        // Note that since the key and value are known to be <= 128 bytes,
        // they're guaranteed to have bitmaps instead of GC programs.
        // var gcdata *byte
-       var ptrdata uintptr
-       var overflowPad uintptr
+       // var ptrdata uintptr
 
-       // On NaCl, pad if needed to make overflow end at the proper struct alignment.
-       // On other systems, align > ptrSize is not possible.
-       if runtime.GOARCH == "amd64p32" && (ktyp.align > ptrSize || etyp.align > ptrSize) {
-               overflowPad = ptrSize
+       size := bucketSize
+       size = align(size, uintptr(ktyp.fieldAlign))
+       size += bucketSize * ktyp.size
+       size = align(size, uintptr(etyp.fieldAlign))
+       size += bucketSize * etyp.size
+
+       maxAlign := uintptr(ktyp.fieldAlign)
+       if maxAlign < uintptr(etyp.fieldAlign) {
+               maxAlign = uintptr(etyp.fieldAlign)
        }
-       size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize
-       if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 {
-               panic("reflect: bad size computation in MapOf")
+       if maxAlign > ptrSize {
+               size = align(size, maxAlign)
+               size += align(ptrSize, maxAlign) - ptrSize
        }
 
-       if kind != kindNoPointers {
-               nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize
-               mask := make([]byte, (nptr+7)/8)
-               base := bucketSize / ptrSize
+       ovoff := size
+       size += ptrSize
+       if maxAlign < ptrSize {
+               maxAlign = ptrSize
+       }
 
+       var gcPtr unsafe.Pointer
+       if kind != kindNoPointers {
+               gc := []uintptr{size}
+               base := bucketSize
+               base = align(base, uintptr(ktyp.fieldAlign))
                if ktyp.kind&kindNoPointers == 0 {
-                       if ktyp.kind&kindGCProg != 0 {
-                               panic("reflect: unexpected GC program in MapOf")
-                       }
-                       kmask := (*[16]byte)(unsafe.Pointer( /*ktyp.gcdata*/ nil))
-                       for i := uintptr(0); i < ktyp.size/ptrSize; i++ {
-                               if (kmask[i/8]>>(i%8))&1 != 0 {
-                                       for j := uintptr(0); j < bucketSize; j++ {
-                                               word := base + j*ktyp.size/ptrSize + i
-                                               mask[word/8] |= 1 << (word % 8)
-                                       }
-                               }
-                       }
+                       gc = append(gc, _GC_ARRAY_START, base, bucketSize, ktyp.size)
+                       gc = appendGCProgram(gc, ktyp, 0)
+                       gc = append(gc, _GC_ARRAY_NEXT)
                }
-               base += bucketSize * ktyp.size / ptrSize
-
+               base += ktyp.size * bucketSize
+               base = align(base, uintptr(etyp.fieldAlign))
                if etyp.kind&kindNoPointers == 0 {
-                       if etyp.kind&kindGCProg != 0 {
-                               panic("reflect: unexpected GC program in MapOf")
-                       }
-                       emask := (*[16]byte)(unsafe.Pointer( /*etyp.gcdata*/ nil))
-                       for i := uintptr(0); i < etyp.size/ptrSize; i++ {
-                               if (emask[i/8]>>(i%8))&1 != 0 {
-                                       for j := uintptr(0); j < bucketSize; j++ {
-                                               word := base + j*etyp.size/ptrSize + i
-                                               mask[word/8] |= 1 << (word % 8)
-                                       }
-                               }
-                       }
-               }
-               base += bucketSize * etyp.size / ptrSize
-               base += overflowPad / ptrSize
-
-               word := base
-               mask[word/8] |= 1 << (word % 8)
-               // gcdata = &mask[0]
-               ptrdata = (word + 1) * ptrSize
-
-               // overflow word must be last
-               if ptrdata != size {
-                       panic("reflect: bad layout computation in MapOf")
+                       gc = append(gc, _GC_ARRAY_START, base, bucketSize, etyp.size)
+                       gc = appendGCProgram(gc, etyp, 0)
+                       gc = append(gc, _GC_ARRAY_NEXT)
                }
+               gc = append(gc, _GC_APTR, ovoff, _GC_END)
+               gcPtr = unsafe.Pointer(&gc[0])
+       } else {
+               // No pointers in bucket.
+               gc := [...]uintptr{size, _GC_END}
+               gcPtr = unsafe.Pointer(&gc[0])
        }
 
        b := new(rtype)
-       // b.size = gc.size
-       // b.gc[0], _ = gc.finalize()
-       b.kind |= kindGCProg
+       b.align = int8(maxAlign)
+       b.fieldAlign = uint8(maxAlign)
+       b.size = size
+       b.kind = kind
+       b.gc = gcPtr
        s := "bucket(" + *ktyp.string + "," + *etyp.string + ")"
        b.string = &s
        return b
@@ -2202,14 +2203,14 @@ func StructOf(fields []StructField) Type {
                typ.gc = unsafe.Pointer(&gc[0])
        }
 
-       typ.hashfn = func(p unsafe.Pointer, size uintptr) uintptr {
-               ret := uintptr(0)
+       typ.hashfn = func(p unsafe.Pointer, seed, size uintptr) uintptr {
+               ret := seed
                for i, ft := range typ.fields {
                        if i > 0 {
                                ret *= 33
                        }
                        o := unsafe.Pointer(uintptr(p) + ft.offset)
-                       ret += ft.typ.hashfn(o, ft.typ.size)
+                       ret = ft.typ.hashfn(o, ret, ft.typ.size)
                }
                return ret
        }
@@ -2347,11 +2348,11 @@ func ArrayOf(count int, elem Type) Type {
 
        array.kind &^= kindDirectIface
 
-       array.hashfn = func(p unsafe.Pointer, size uintptr) uintptr {
-               ret := uintptr(0)
+       array.hashfn = func(p unsafe.Pointer, seed, size uintptr) uintptr {
+               ret := seed
                for i := 0; i < count; i++ {
                        ret *= 33
-                       ret += typ.hashfn(p, typ.size)
+                       ret = typ.hashfn(p, ret, typ.size)
                        p = unsafe.Pointer(uintptr(p) + typ.size)
                }
                return ret
index 7ba217eb782294cc6b1e44304cd98560f6cf1a32..2b1a9b72211d61143b519db33847134270468dcb 100644 (file)
@@ -90,7 +90,7 @@ func GCMask(x interface{}) (ret []byte) {
 //var IfaceHash = ifaceHash
 //var MemclrBytes = memclrBytes
 
-// var HashLoad = &hashLoad
+var HashLoad = &hashLoad
 
 // entry point for testing
 //func GostringW(w []uint16) (s string) {
diff --git a/libgo/go/runtime/hashmap.go b/libgo/go/runtime/hashmap.go
new file mode 100644 (file)
index 0000000..aaf4fb4
--- /dev/null
@@ -0,0 +1,1081 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+// This file contains the implementation of Go's map type.
+//
+// A map is just a hash table. The data is arranged
+// into an array of buckets. Each bucket contains up to
+// 8 key/value pairs. The low-order bits of the hash are
+// used to select a bucket. Each bucket contains a few
+// high-order bits of each hash to distinguish the entries
+// within a single bucket.
+//
+// If more than 8 keys hash to a bucket, we chain on
+// extra buckets.
+//
+// When the hashtable grows, we allocate a new array
+// of buckets twice as big. Buckets are incrementally
+// copied from the old bucket array to the new bucket array.
+//
+// Map iterators walk through the array of buckets and
+// return the keys in walk order (bucket #, then overflow
+// chain order, then bucket index).  To maintain iteration
+// semantics, we never move keys within their bucket (if
+// we did, keys might be returned 0 or 2 times).  When
+// growing the table, iterators remain iterating through the
+// old table and must check the new table if the bucket
+// they are iterating through has been moved ("evacuated")
+// to the new table.
+
+// Picking loadFactor: too large and we have lots of overflow
+// buckets, too small and we waste a lot of space. I wrote
+// a simple program to check some stats for different loads:
+// (64-bit, 8 byte keys and values)
+//  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
+//        4.00         2.13        20.77         3.00         4.00
+//        4.50         4.05        17.30         3.25         4.50
+//        5.00         6.85        14.77         3.50         5.00
+//        5.50        10.55        12.94         3.75         5.50
+//        6.00        15.27        11.67         4.00         6.00
+//        6.50        20.90        10.79         4.25         6.50
+//        7.00        27.14        10.15         4.50         7.00
+//        7.50        34.03         9.73         4.75         7.50
+//        8.00        41.10         9.40         5.00         8.00
+//
+// %overflow   = percentage of buckets which have an overflow bucket
+// bytes/entry = overhead bytes used per key/value pair
+// hitprobe    = # of entries to check when looking up a present key
+// missprobe   = # of entries to check when looking up an absent key
+//
+// Keep in mind this data is for maximally loaded tables, i.e. just
+// before the table grows. Typical tables will be somewhat less loaded.
+
+import (
+       "runtime/internal/atomic"
+       "runtime/internal/sys"
+       "unsafe"
+)
+
+// For gccgo, use go:linkname to rename compiler-called functions to
+// themselves, so that the compiler will export them.
+//
+//go:linkname makemap runtime.makemap
+//go:linkname mapaccess1 runtime.mapaccess1
+//go:linkname mapaccess2 runtime.mapaccess2
+//go:linkname mapaccess1_fat runtime.mapaccess1_fat
+//go:linkname mapaccess2_fat runtime.mapaccess2_fat
+//go:linkname mapassign1 runtime.mapassign1
+//go:linkname mapdelete runtime.mapdelete
+//go:linkname mapiterinit runtime.mapiterinit
+//go:linkname mapiternext runtime.mapiternext
+
+const (
+       // Maximum number of key/value pairs a bucket can hold.
+       bucketCntBits = 3
+       bucketCnt     = 1 << bucketCntBits
+
+       // Maximum average load of a bucket that triggers growth.
+       loadFactor = 6.5
+
+       // Maximum key or value size to keep inline (instead of mallocing per element).
+       // Must fit in a uint8.
+       // Fast versions cannot handle big values - the cutoff size for
+       // fast versions in ../../cmd/internal/gc/walk.go must be at most this value.
+       maxKeySize   = 128
+       maxValueSize = 128
+
+       // data offset should be the size of the bmap struct, but needs to be
+       // aligned correctly. For amd64p32 this means 64-bit alignment
+       // even though pointers are 32 bit.
+       dataOffset = unsafe.Offsetof(struct {
+               b bmap
+               v int64
+       }{}.v)
+
+       // Possible tophash values. We reserve a few possibilities for special marks.
+       // Each bucket (including its overflow buckets, if any) will have either all or none of its
+       // entries in the evacuated* states (except during the evacuate() method, which only happens
+       // during map writes and thus no one else can observe the map during that time).
+       empty          = 0 // cell is empty
+       evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
+       evacuatedX     = 2 // key/value is valid.  Entry has been evacuated to first half of larger table.
+       evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
+       minTopHash     = 4 // minimum tophash for a normal filled cell.
+
+       // flags
+       iterator    = 1 // there may be an iterator using buckets
+       oldIterator = 2 // there may be an iterator using oldbuckets
+       hashWriting = 4 // a goroutine is writing to the map
+
+       // sentinel bucket ID for iterator checks
+       noCheck = 1<<(8*sys.PtrSize) - 1
+)
+
+// A header for a Go map.
+type hmap struct {
+       // Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and
+       // ../reflect/type.go. Don't change this structure without also changing that code!
+       count int // # live cells == size of map.  Must be first (used by len() builtin)
+       flags uint8
+       B     uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
+       hash0 uint32 // hash seed
+
+       buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
+       oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
+       nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
+
+       // If both key and value do not contain pointers and are inline, then we mark bucket
+       // type as containing no pointers. This avoids scanning such maps.
+       // However, bmap.overflow is a pointer. In order to keep overflow buckets
+       // alive, we store pointers to all overflow buckets in hmap.overflow.
+       // Overflow is used only if key and value do not contain pointers.
+       // overflow[0] contains overflow buckets for hmap.buckets.
+       // overflow[1] contains overflow buckets for hmap.oldbuckets.
+       // The first indirection allows us to reduce static size of hmap.
+       // The second indirection allows to store a pointer to the slice in hiter.
+       overflow *[2]*[]*bmap
+}
+
+// A bucket for a Go map.
+type bmap struct {
+       tophash [bucketCnt]uint8
+       // Followed by bucketCnt keys and then bucketCnt values.
+       // NOTE: packing all the keys together and then all the values together makes the
+       // code a bit more complicated than alternating key/value/key/value/... but it allows
+       // us to eliminate padding which would be needed for, e.g., map[int64]int8.
+       // Followed by an overflow pointer.
+}
+
+// A hash iteration structure.
+// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
+// the layout of this structure.
+type hiter struct {
+       key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/internal/gc/range.go).
+       value       unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
+       t           *maptype
+       h           *hmap
+       buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
+       bptr        *bmap          // current bucket
+       overflow    [2]*[]*bmap    // keeps overflow buckets alive
+       startBucket uintptr        // bucket iteration started at
+       offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
+       wrapped     bool           // already wrapped around from end of bucket array to beginning
+       B           uint8
+       i           uint8
+       bucket      uintptr
+       checkBucket uintptr
+}
+
+func evacuated(b *bmap) bool {
+       h := b.tophash[0]
+       return h > empty && h < minTopHash
+}
+
+func (b *bmap) overflow(t *maptype) *bmap {
+       return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
+}
+
+func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
+       if t.bucket.kind&kindNoPointers != 0 {
+               h.createOverflow()
+               *h.overflow[0] = append(*h.overflow[0], ovf)
+       }
+       *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
+}
+
+func (h *hmap) createOverflow() {
+       if h.overflow == nil {
+               h.overflow = new([2]*[]*bmap)
+       }
+       if h.overflow[0] == nil {
+               h.overflow[0] = new([]*bmap)
+       }
+}
+
+// makemap implements a Go map creation make(map[k]v, hint)
+// If the compiler has determined that the map or the first bucket
+// can be created on the stack, h and/or bucket may be non-nil.
+// If h != nil, the map can be created directly in h.
+// If bucket != nil, bucket can be used as the first bucket.
+func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
+       if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != t.hmap.size {
+               println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
+               throw("bad hmap size")
+       }
+
+       if hint < 0 || int64(int32(hint)) != hint {
+               panic(plainError("makemap: size out of range"))
+               // TODO: make hint an int, then none of this nonsense
+       }
+
+       if !ismapkey(t.key) {
+               throw("runtime.makemap: unsupported map key type")
+       }
+
+       // check compiler's and reflect's math
+       if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
+               t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
+               throw("key size wrong")
+       }
+       if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
+               t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
+               throw("value size wrong")
+       }
+
+       // invariants we depend on. We should probably check these at compile time
+       // somewhere, but for now we'll do it here.
+       if t.key.align > bucketCnt {
+               throw("key align too big")
+       }
+       if t.elem.align > bucketCnt {
+               throw("value align too big")
+       }
+       if t.key.size%uintptr(t.key.align) != 0 {
+               throw("key size not a multiple of key align")
+       }
+       if t.elem.size%uintptr(t.elem.align) != 0 {
+               throw("value size not a multiple of value align")
+       }
+       if bucketCnt < 8 {
+               throw("bucketsize too small for proper alignment")
+       }
+       if dataOffset%uintptr(t.key.align) != 0 {
+               throw("need padding in bucket (key)")
+       }
+       if dataOffset%uintptr(t.elem.align) != 0 {
+               throw("need padding in bucket (value)")
+       }
+
+       // find size parameter which will hold the requested # of elements
+       B := uint8(0)
+       for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
+       }
+
+       // allocate initial hash table
+       // if B == 0, the buckets field is allocated lazily later (in mapassign)
+       // If hint is large zeroing this memory could take a while.
+       buckets := bucket
+       if B != 0 {
+               buckets = newarray(t.bucket, 1<<B)
+       }
+
+       // initialize Hmap
+       if h == nil {
+               h = (*hmap)(newobject(t.hmap))
+       }
+       h.count = 0
+       h.B = B
+       h.flags = 0
+       h.hash0 = fastrand1()
+       h.buckets = buckets
+       h.oldbuckets = nil
+       h.nevacuate = 0
+
+       return h
+}
+
+// mapaccess1 returns a pointer to h[key].  Never returns nil, instead
+// it will return a reference to the zero object for the value type if
+// the key is not in the map.
+// NOTE: The returned pointer may keep the whole map live, so don't
+// hold onto it for very long.
+func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+               pc := funcPC(mapaccess1)
+               racereadpc(unsafe.Pointer(h), callerpc, pc)
+               raceReadObjectPC(t.key, key, callerpc, pc)
+       }
+       if msanenabled && h != nil {
+               msanread(key, t.key.size)
+       }
+       if h == nil || h.count == 0 {
+               return unsafe.Pointer(&zeroVal[0])
+       }
+       if h.flags&hashWriting != 0 {
+               throw("concurrent map read and map write")
+       }
+       hashfn := t.key.hashfn
+       equalfn := t.key.equalfn
+       hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+       m := uintptr(1)<<h.B - 1
+       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+       if c := h.oldbuckets; c != nil {
+               oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
+               if !evacuated(oldb) {
+                       b = oldb
+               }
+       }
+       top := uint8(hash >> (sys.PtrSize*8 - 8))
+       if top < minTopHash {
+               top += minTopHash
+       }
+       for {
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       if b.tophash[i] != top {
+                               continue
+                       }
+                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+                       if t.indirectkey {
+                               k = *((*unsafe.Pointer)(k))
+                       }
+                       if equalfn(key, k, uintptr(t.keysize)) {
+                               v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
+                               if t.indirectvalue {
+                                       v = *((*unsafe.Pointer)(v))
+                               }
+                               return v
+                       }
+               }
+               b = b.overflow(t)
+               if b == nil {
+                       return unsafe.Pointer(&zeroVal[0])
+               }
+       }
+}
+
+func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+               pc := funcPC(mapaccess2)
+               racereadpc(unsafe.Pointer(h), callerpc, pc)
+               raceReadObjectPC(t.key, key, callerpc, pc)
+       }
+       if msanenabled && h != nil {
+               msanread(key, t.key.size)
+       }
+       if h == nil || h.count == 0 {
+               return unsafe.Pointer(&zeroVal[0]), false
+       }
+       if h.flags&hashWriting != 0 {
+               throw("concurrent map read and map write")
+       }
+       hashfn := t.key.hashfn
+       equalfn := t.key.equalfn
+       hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+       m := uintptr(1)<<h.B - 1
+       b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
+       if c := h.oldbuckets; c != nil {
+               oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
+               if !evacuated(oldb) {
+                       b = oldb
+               }
+       }
+       top := uint8(hash >> (sys.PtrSize*8 - 8))
+       if top < minTopHash {
+               top += minTopHash
+       }
+       for {
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       if b.tophash[i] != top {
+                               continue
+                       }
+                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+                       if t.indirectkey {
+                               k = *((*unsafe.Pointer)(k))
+                       }
+                       if equalfn(key, k, uintptr(t.keysize)) {
+                               v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
+                               if t.indirectvalue {
+                                       v = *((*unsafe.Pointer)(v))
+                               }
+                               return v, true
+                       }
+               }
+               b = b.overflow(t)
+               if b == nil {
+                       return unsafe.Pointer(&zeroVal[0]), false
+               }
+       }
+}
+
+// returns both key and value. Used by map iterator
+func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
+       if h == nil || h.count == 0 {
+               return nil, nil
+       }
+       if h.flags&hashWriting != 0 {
+               throw("concurrent map read and map write")
+       }
+       hashfn := t.key.hashfn
+       equalfn := t.key.equalfn
+       hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+       m := uintptr(1)<<h.B - 1
+       b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
+       if c := h.oldbuckets; c != nil {
+               oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
+               if !evacuated(oldb) {
+                       b = oldb
+               }
+       }
+       top := uint8(hash >> (sys.PtrSize*8 - 8))
+       if top < minTopHash {
+               top += minTopHash
+       }
+       for {
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       if b.tophash[i] != top {
+                               continue
+                       }
+                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+                       if t.indirectkey {
+                               k = *((*unsafe.Pointer)(k))
+                       }
+                       if equalfn(key, k, uintptr(t.keysize)) {
+                               v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
+                               if t.indirectvalue {
+                                       v = *((*unsafe.Pointer)(v))
+                               }
+                               return k, v
+                       }
+               }
+               b = b.overflow(t)
+               if b == nil {
+                       return nil, nil
+               }
+       }
+}
+
+func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
+       v := mapaccess1(t, h, key)
+       if v == unsafe.Pointer(&zeroVal[0]) {
+               return zero
+       }
+       return v
+}
+
+func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
+       v := mapaccess1(t, h, key)
+       if v == unsafe.Pointer(&zeroVal[0]) {
+               return zero, false
+       }
+       return v, true
+}
+
+func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
+       if h == nil {
+               panic(plainError("assignment to entry in nil map"))
+       }
+       if raceenabled {
+               callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+               pc := funcPC(mapassign1)
+               racewritepc(unsafe.Pointer(h), callerpc, pc)
+               raceReadObjectPC(t.key, key, callerpc, pc)
+               raceReadObjectPC(t.elem, val, callerpc, pc)
+       }
+       if msanenabled {
+               msanread(key, t.key.size)
+               msanread(val, t.elem.size)
+       }
+       if h.flags&hashWriting != 0 {
+               throw("concurrent map writes")
+       }
+       h.flags |= hashWriting
+
+       hashfn := t.key.hashfn
+       equalfn := t.key.equalfn
+       hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+
+       if h.buckets == nil {
+               h.buckets = newarray(t.bucket, 1)
+       }
+
+again:
+       bucket := hash & (uintptr(1)<<h.B - 1)
+       if h.oldbuckets != nil {
+               growWork(t, h, bucket)
+       }
+       b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+       top := uint8(hash >> (sys.PtrSize*8 - 8))
+       if top < minTopHash {
+               top += minTopHash
+       }
+
+       var inserti *uint8
+       var insertk unsafe.Pointer
+       var insertv unsafe.Pointer
+       for {
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       if b.tophash[i] != top {
+                               if b.tophash[i] == empty && inserti == nil {
+                                       inserti = &b.tophash[i]
+                                       insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+                                       insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
+                               }
+                               continue
+                       }
+                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+                       k2 := k
+                       if t.indirectkey {
+                               k2 = *((*unsafe.Pointer)(k2))
+                       }
+                       if !equalfn(key, k2, uintptr(t.keysize)) {
+                               continue
+                       }
+                       // already have a mapping for key. Update it.
+                       if t.needkeyupdate {
+                               typedmemmove(t.key, k2, key)
+                       }
+                       v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
+                       v2 := v
+                       if t.indirectvalue {
+                               v2 = *((*unsafe.Pointer)(v2))
+                       }
+                       typedmemmove(t.elem, v2, val)
+                       goto done
+               }
+               ovf := b.overflow(t)
+               if ovf == nil {
+                       break
+               }
+               b = ovf
+       }
+
+       // did not find mapping for key. Allocate new cell & add entry.
+       if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt {
+               hashGrow(t, h)
+               goto again // Growing the table invalidates everything, so try again
+       }
+
+       if inserti == nil {
+               // all current buckets are full, allocate a new one.
+               newb := (*bmap)(newobject(t.bucket))
+               h.setoverflow(t, b, newb)
+               inserti = &newb.tophash[0]
+               insertk = add(unsafe.Pointer(newb), dataOffset)
+               insertv = add(insertk, bucketCnt*uintptr(t.keysize))
+       }
+
+       // store new key/value at insert position
+       if t.indirectkey {
+               kmem := newobject(t.key)
+               *(*unsafe.Pointer)(insertk) = kmem
+               insertk = kmem
+       }
+       if t.indirectvalue {
+               vmem := newobject(t.elem)
+               *(*unsafe.Pointer)(insertv) = vmem
+               insertv = vmem
+       }
+       typedmemmove(t.key, insertk, key)
+       typedmemmove(t.elem, insertv, val)
+       *inserti = top
+       h.count++
+
+done:
+       if h.flags&hashWriting == 0 {
+               throw("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+}
+
+func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+               pc := funcPC(mapdelete)
+               racewritepc(unsafe.Pointer(h), callerpc, pc)
+               raceReadObjectPC(t.key, key, callerpc, pc)
+       }
+       if msanenabled && h != nil {
+               msanread(key, t.key.size)
+       }
+       if h == nil || h.count == 0 {
+               return
+       }
+       if h.flags&hashWriting != 0 {
+               throw("concurrent map writes")
+       }
+       h.flags |= hashWriting
+
+       hashfn := t.key.hashfn
+       equalfn := t.key.equalfn
+       hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+       bucket := hash & (uintptr(1)<<h.B - 1)
+       if h.oldbuckets != nil {
+               growWork(t, h, bucket)
+       }
+       b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+       top := uint8(hash >> (sys.PtrSize*8 - 8))
+       if top < minTopHash {
+               top += minTopHash
+       }
+       for {
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       if b.tophash[i] != top {
+                               continue
+                       }
+                       k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
+                       k2 := k
+                       if t.indirectkey {
+                               k2 = *((*unsafe.Pointer)(k2))
+                       }
+                       if !equalfn(key, k2, uintptr(t.keysize)) {
+                               continue
+                       }
+                       memclr(k, uintptr(t.keysize))
+                       v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
+                       memclr(v, uintptr(t.valuesize))
+                       b.tophash[i] = empty
+                       h.count--
+                       goto done
+               }
+               b = b.overflow(t)
+               if b == nil {
+                       goto done
+               }
+       }
+
+done:
+       if h.flags&hashWriting == 0 {
+               throw("concurrent map writes")
+       }
+       h.flags &^= hashWriting
+}
+
+func mapiterinit(t *maptype, h *hmap, it *hiter) {
+       // Clear pointer fields so garbage collector does not complain.
+       it.key = nil
+       it.value = nil
+       it.t = nil
+       it.h = nil
+       it.buckets = nil
+       it.bptr = nil
+       it.overflow[0] = nil
+       it.overflow[1] = nil
+
+       if raceenabled && h != nil {
+               callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+               racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
+       }
+
+       if h == nil || h.count == 0 {
+               it.key = nil
+               it.value = nil
+               return
+       }
+
+       if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
+               throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
+       }
+       it.t = t
+       it.h = h
+
+       // grab snapshot of bucket state
+       it.B = h.B
+       it.buckets = h.buckets
+       if t.bucket.kind&kindNoPointers != 0 {
+               // Allocate the current slice and remember pointers to both current and old.
+               // This preserves all relevant overflow buckets alive even if
+               // the table grows and/or overflow buckets are added to the table
+               // while we are iterating.
+               h.createOverflow()
+               it.overflow = *h.overflow
+       }
+
+       // decide where to start
+       r := uintptr(fastrand1())
+       if h.B > 31-bucketCntBits {
+               r += uintptr(fastrand1()) << 31
+       }
+       it.startBucket = r & (uintptr(1)<<h.B - 1)
+       it.offset = uint8(r >> h.B & (bucketCnt - 1))
+
+       // iterator state
+       it.bucket = it.startBucket
+       it.wrapped = false
+       it.bptr = nil
+
+       // Remember we have an iterator.
+       // Can run concurrently with another hash_iter_init().
+       if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
+               atomic.Or8(&h.flags, iterator|oldIterator)
+       }
+
+       mapiternext(it)
+}
+
+func mapiternext(it *hiter) {
+       h := it.h
+       if raceenabled {
+               callerpc := getcallerpc(unsafe.Pointer( /* &it */ nil))
+               racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
+       }
+       t := it.t
+       bucket := it.bucket
+       b := it.bptr
+       i := it.i
+       checkBucket := it.checkBucket
+       hashfn := t.key.hashfn
+       equalfn := t.key.equalfn
+
+next:
+       if b == nil {
+               if bucket == it.startBucket && it.wrapped {
+                       // end of iteration
+                       it.key = nil
+                       it.value = nil
+                       return
+               }
+               if h.oldbuckets != nil && it.B == h.B {
+                       // Iterator was started in the middle of a grow, and the grow isn't done yet.
+                       // If the bucket we're looking at hasn't been filled in yet (i.e. the old
+                       // bucket hasn't been evacuated) then we need to iterate through the old
+                       // bucket and only return the ones that will be migrated to this bucket.
+                       oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1)
+                       b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+                       if !evacuated(b) {
+                               checkBucket = bucket
+                       } else {
+                               b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
+                               checkBucket = noCheck
+                       }
+               } else {
+                       b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
+                       checkBucket = noCheck
+               }
+               bucket++
+               if bucket == uintptr(1)<<it.B {
+                       bucket = 0
+                       it.wrapped = true
+               }
+               i = 0
+       }
+       for ; i < bucketCnt; i++ {
+               offi := (i + it.offset) & (bucketCnt - 1)
+               k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
+               v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
+               if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
+                       if checkBucket != noCheck {
+                               // Special case: iterator was started during a grow and the
+                               // grow is not done yet. We're working on a bucket whose
+                               // oldbucket has not been evacuated yet. Or at least, it wasn't
+                               // evacuated when we started the bucket. So we're iterating
+                               // through the oldbucket, skipping any keys that will go
+                               // to the other new bucket (each oldbucket expands to two
+                               // buckets during a grow).
+                               k2 := k
+                               if t.indirectkey {
+                                       k2 = *((*unsafe.Pointer)(k2))
+                               }
+                               if t.reflexivekey || equalfn(k2, k2, uintptr(t.keysize)) {
+                                       // If the item in the oldbucket is not destined for
+                                       // the current new bucket in the iteration, skip it.
+                                       hash := hashfn(k2, uintptr(h.hash0), uintptr(t.keysize))
+                                       if hash&(uintptr(1)<<it.B-1) != checkBucket {
+                                               continue
+                                       }
+                               } else {
+                                       // Hash isn't repeatable if k != k (NaNs).  We need a
+                                       // repeatable and randomish choice of which direction
+                                       // to send NaNs during evacuation. We'll use the low
+                                       // bit of tophash to decide which way NaNs go.
+                                       // NOTE: this case is why we need two evacuate tophash
+                                       // values, evacuatedX and evacuatedY, that differ in
+                                       // their low bit.
+                                       if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
+                                               continue
+                                       }
+                               }
+                       }
+                       if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
+                               // this is the golden data, we can return it.
+                               if t.indirectkey {
+                                       k = *((*unsafe.Pointer)(k))
+                               }
+                               it.key = k
+                               if t.indirectvalue {
+                                       v = *((*unsafe.Pointer)(v))
+                               }
+                               it.value = v
+                       } else {
+                               // The hash table has grown since the iterator was started.
+                               // The golden data for this key is now somewhere else.
+                               k2 := k
+                               if t.indirectkey {
+                                       k2 = *((*unsafe.Pointer)(k2))
+                               }
+                               if t.reflexivekey || equalfn(k2, k2, uintptr(t.keysize)) {
+                                       // Check the current hash table for the data.
+                                       // This code handles the case where the key
+                                       // has been deleted, updated, or deleted and reinserted.
+                                       // NOTE: we need to regrab the key as it has potentially been
+                                       // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
+                                       rk, rv := mapaccessK(t, h, k2)
+                                       if rk == nil {
+                                               continue // key has been deleted
+                                       }
+                                       it.key = rk
+                                       it.value = rv
+                               } else {
+                                       // if key!=key then the entry can't be deleted or
+                                       // updated, so we can just return it. That's lucky for
+                                       // us because when key!=key we can't look it up
+                                       // successfully in the current table.
+                                       it.key = k2
+                                       if t.indirectvalue {
+                                               v = *((*unsafe.Pointer)(v))
+                                       }
+                                       it.value = v
+                               }
+                       }
+                       it.bucket = bucket
+                       if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
+                               it.bptr = b
+                       }
+                       it.i = i + 1
+                       it.checkBucket = checkBucket
+                       return
+               }
+       }
+       b = b.overflow(t)
+       i = 0
+       goto next
+}
+
+func hashGrow(t *maptype, h *hmap) {
+       if h.oldbuckets != nil {
+               throw("evacuation not done in time")
+       }
+       oldbuckets := h.buckets
+       newbuckets := newarray(t.bucket, 1<<(h.B+1))
+       flags := h.flags &^ (iterator | oldIterator)
+       if h.flags&iterator != 0 {
+               flags |= oldIterator
+       }
+       // commit the grow (atomic wrt gc)
+       h.B++
+       h.flags = flags
+       h.oldbuckets = oldbuckets
+       h.buckets = newbuckets
+       h.nevacuate = 0
+
+       if h.overflow != nil {
+               // Promote current overflow buckets to the old generation.
+               if h.overflow[1] != nil {
+                       throw("overflow is not nil")
+               }
+               h.overflow[1] = h.overflow[0]
+               h.overflow[0] = nil
+       }
+
+       // the actual copying of the hash table data is done incrementally
+       // by growWork() and evacuate().
+}
+
+func growWork(t *maptype, h *hmap, bucket uintptr) {
+       noldbuckets := uintptr(1) << (h.B - 1)
+
+       // make sure we evacuate the oldbucket corresponding
+       // to the bucket we're about to use
+       evacuate(t, h, bucket&(noldbuckets-1))
+
+       // evacuate one more oldbucket to make progress on growing
+       if h.oldbuckets != nil {
+               evacuate(t, h, h.nevacuate)
+       }
+}
+
+func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
+       b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+       newbit := uintptr(1) << (h.B - 1)
+       hashfn := t.key.hashfn
+       equalfn := t.key.equalfn
+       if !evacuated(b) {
+               // TODO: reuse overflow buckets instead of using new ones, if there
+               // is no iterator using the old buckets.  (If !oldIterator.)
+
+               x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
+               y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
+               xi := 0
+               yi := 0
+               xk := add(unsafe.Pointer(x), dataOffset)
+               yk := add(unsafe.Pointer(y), dataOffset)
+               xv := add(xk, bucketCnt*uintptr(t.keysize))
+               yv := add(yk, bucketCnt*uintptr(t.keysize))
+               for ; b != nil; b = b.overflow(t) {
+                       k := add(unsafe.Pointer(b), dataOffset)
+                       v := add(k, bucketCnt*uintptr(t.keysize))
+                       for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
+                               top := b.tophash[i]
+                               if top == empty {
+                                       b.tophash[i] = evacuatedEmpty
+                                       continue
+                               }
+                               if top < minTopHash {
+                                       throw("bad map state")
+                               }
+                               k2 := k
+                               if t.indirectkey {
+                                       k2 = *((*unsafe.Pointer)(k2))
+                               }
+                               // Compute hash to make our evacuation decision (whether we need
+                               // to send this key/value to bucket x or bucket y).
+                               hash := hashfn(k2, uintptr(h.hash0), uintptr(t.keysize))
+                               if h.flags&iterator != 0 {
+                                       if !t.reflexivekey && !equalfn(k2, k2, uintptr(t.keysize)) {
+                                               // If key != key (NaNs), then the hash could be (and probably
+                                               // will be) entirely different from the old hash. Moreover,
+                                               // it isn't reproducible. Reproducibility is required in the
+                                               // presence of iterators, as our evacuation decision must
+                                               // match whatever decision the iterator made.
+                                               // Fortunately, we have the freedom to send these keys either
+                                               // way. Also, tophash is meaningless for these kinds of keys.
+                                               // We let the low bit of tophash drive the evacuation decision.
+                                               // We recompute a new random tophash for the next level so
+                                               // these keys will get evenly distributed across all buckets
+                                               // after multiple grows.
+                                               if (top & 1) != 0 {
+                                                       hash |= newbit
+                                               } else {
+                                                       hash &^= newbit
+                                               }
+                                               top = uint8(hash >> (sys.PtrSize*8 - 8))
+                                               if top < minTopHash {
+                                                       top += minTopHash
+                                               }
+                                       }
+                               }
+                               if (hash & newbit) == 0 {
+                                       b.tophash[i] = evacuatedX
+                                       if xi == bucketCnt {
+                                               newx := (*bmap)(newobject(t.bucket))
+                                               h.setoverflow(t, x, newx)
+                                               x = newx
+                                               xi = 0
+                                               xk = add(unsafe.Pointer(x), dataOffset)
+                                               xv = add(xk, bucketCnt*uintptr(t.keysize))
+                                       }
+                                       x.tophash[xi] = top
+                                       if t.indirectkey {
+                                               *(*unsafe.Pointer)(xk) = k2 // copy pointer
+                                       } else {
+                                               typedmemmove(t.key, xk, k) // copy value
+                                       }
+                                       if t.indirectvalue {
+                                               *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
+                                       } else {
+                                               typedmemmove(t.elem, xv, v)
+                                       }
+                                       xi++
+                                       xk = add(xk, uintptr(t.keysize))
+                                       xv = add(xv, uintptr(t.valuesize))
+                               } else {
+                                       b.tophash[i] = evacuatedY
+                                       if yi == bucketCnt {
+                                               newy := (*bmap)(newobject(t.bucket))
+                                               h.setoverflow(t, y, newy)
+                                               y = newy
+                                               yi = 0
+                                               yk = add(unsafe.Pointer(y), dataOffset)
+                                               yv = add(yk, bucketCnt*uintptr(t.keysize))
+                                       }
+                                       y.tophash[yi] = top
+                                       if t.indirectkey {
+                                               *(*unsafe.Pointer)(yk) = k2
+                                       } else {
+                                               typedmemmove(t.key, yk, k)
+                                       }
+                                       if t.indirectvalue {
+                                               *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
+                                       } else {
+                                               typedmemmove(t.elem, yv, v)
+                                       }
+                                       yi++
+                                       yk = add(yk, uintptr(t.keysize))
+                                       yv = add(yv, uintptr(t.valuesize))
+                               }
+                       }
+               }
+               // Unlink the overflow buckets & clear key/value to help GC.
+               if h.flags&oldIterator == 0 {
+                       b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
+                       memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
+               }
+       }
+
+       // Advance evacuation mark
+       if oldbucket == h.nevacuate {
+               h.nevacuate = oldbucket + 1
+               if oldbucket+1 == newbit { // newbit == # of oldbuckets
+                       // Growing is all done. Free old main bucket array.
+                       h.oldbuckets = nil
+                       // Can discard old overflow buckets as well.
+                       // If they are still referenced by an iterator,
+                       // then the iterator holds a pointers to the slice.
+                       if h.overflow != nil {
+                               h.overflow[1] = nil
+                       }
+               }
+       }
+}
+
+func ismapkey(t *_type) bool {
+       return t.hashfn != nil
+}
+
+// Reflect stubs. Called from ../reflect/asm_*.s
+
+//go:linkname reflect_makemap reflect.makemap
+func reflect_makemap(t *maptype) *hmap {
+       return makemap(t, 0, nil, nil)
+}
+
+//go:linkname reflect_mapaccess reflect.mapaccess
+func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
+       val, ok := mapaccess2(t, h, key)
+       if !ok {
+               // reflect wants nil for a missing element
+               val = nil
+       }
+       return val
+}
+
+//go:linkname reflect_mapassign reflect.mapassign
+func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
+       mapassign1(t, h, key, val)
+}
+
+//go:linkname reflect_mapdelete reflect.mapdelete
+func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
+       mapdelete(t, h, key)
+}
+
+//go:linkname reflect_mapiterinit reflect.mapiterinit
+func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
+       it := new(hiter)
+       mapiterinit(t, h, it)
+       return it
+}
+
+//go:linkname reflect_mapiternext reflect.mapiternext
+func reflect_mapiternext(it *hiter) {
+       mapiternext(it)
+}
+
+//go:linkname reflect_mapiterkey reflect.mapiterkey
+func reflect_mapiterkey(it *hiter) unsafe.Pointer {
+       return it.key
+}
+
+//go:linkname reflect_maplen reflect.maplen
+func reflect_maplen(h *hmap) int {
+       if h == nil {
+               return 0
+       }
+       if raceenabled {
+               callerpc := getcallerpc(unsafe.Pointer( /* &h */ nil))
+               racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
+       }
+       return h.count
+}
+
+//go:linkname reflect_ismapkey reflect.ismapkey
+func reflect_ismapkey(t *_type) bool {
+       return ismapkey(t)
+}
+
+const maxZero = 1024 // must match value in ../cmd/compile/internal/gc/walk.go
+var zeroVal [maxZero]byte
diff --git a/libgo/go/runtime/hashmap_fast.go b/libgo/go/runtime/hashmap_fast.go
new file mode 100644 (file)
index 0000000..4850b16
--- /dev/null
@@ -0,0 +1,398 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
+
+func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+               racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
+       }
+       if h == nil || h.count == 0 {
+               return unsafe.Pointer(&zeroVal[0])
+       }
+       if h.flags&hashWriting != 0 {
+               throw("concurrent map read and map write")
+       }
+       var b *bmap
+       if h.B == 0 {
+               // One-bucket table. No need to hash.
+               b = (*bmap)(h.buckets)
+       } else {
+               hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0), uintptr(t.keysize))
+               m := uintptr(1)<<h.B - 1
+               b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+               if c := h.oldbuckets; c != nil {
+                       oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
+                       if !evacuated(oldb) {
+                               b = oldb
+                       }
+               }
+       }
+       for {
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
+                       if k != key {
+                               continue
+                       }
+                       x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+                       if x == empty {
+                               continue
+                       }
+                       return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
+               }
+               b = b.overflow(t)
+               if b == nil {
+                       return unsafe.Pointer(&zeroVal[0])
+               }
+       }
+}
+
+func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+               racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
+       }
+       if h == nil || h.count == 0 {
+               return unsafe.Pointer(&zeroVal[0]), false
+       }
+       if h.flags&hashWriting != 0 {
+               throw("concurrent map read and map write")
+       }
+       var b *bmap
+       if h.B == 0 {
+               // One-bucket table. No need to hash.
+               b = (*bmap)(h.buckets)
+       } else {
+               hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0), uintptr(t.keysize))
+               m := uintptr(1)<<h.B - 1
+               b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+               if c := h.oldbuckets; c != nil {
+                       oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
+                       if !evacuated(oldb) {
+                               b = oldb
+                       }
+               }
+       }
+       for {
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
+                       if k != key {
+                               continue
+                       }
+                       x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+                       if x == empty {
+                               continue
+                       }
+                       return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
+               }
+               b = b.overflow(t)
+               if b == nil {
+                       return unsafe.Pointer(&zeroVal[0]), false
+               }
+       }
+}
+
+func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+               racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
+       }
+       if h == nil || h.count == 0 {
+               return unsafe.Pointer(&zeroVal[0])
+       }
+       if h.flags&hashWriting != 0 {
+               throw("concurrent map read and map write")
+       }
+       var b *bmap
+       if h.B == 0 {
+               // One-bucket table. No need to hash.
+               b = (*bmap)(h.buckets)
+       } else {
+               hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0), uintptr(t.keysize))
+               m := uintptr(1)<<h.B - 1
+               b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+               if c := h.oldbuckets; c != nil {
+                       oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
+                       if !evacuated(oldb) {
+                               b = oldb
+                       }
+               }
+       }
+       for {
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
+                       if k != key {
+                               continue
+                       }
+                       x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+                       if x == empty {
+                               continue
+                       }
+                       return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
+               }
+               b = b.overflow(t)
+               if b == nil {
+                       return unsafe.Pointer(&zeroVal[0])
+               }
+       }
+}
+
+func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+               racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
+       }
+       if h == nil || h.count == 0 {
+               return unsafe.Pointer(&zeroVal[0]), false
+       }
+       if h.flags&hashWriting != 0 {
+               throw("concurrent map read and map write")
+       }
+       var b *bmap
+       if h.B == 0 {
+               // One-bucket table. No need to hash.
+               b = (*bmap)(h.buckets)
+       } else {
+               hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0), uintptr(t.keysize))
+               m := uintptr(1)<<h.B - 1
+               b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+               if c := h.oldbuckets; c != nil {
+                       oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
+                       if !evacuated(oldb) {
+                               b = oldb
+                       }
+               }
+       }
+       for {
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
+                       if k != key {
+                               continue
+                       }
+                       x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+                       if x == empty {
+                               continue
+                       }
+                       return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
+               }
+               b = b.overflow(t)
+               if b == nil {
+                       return unsafe.Pointer(&zeroVal[0]), false
+               }
+       }
+}
+
+func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+               racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
+       }
+       if h == nil || h.count == 0 {
+               return unsafe.Pointer(&zeroVal[0])
+       }
+       if h.flags&hashWriting != 0 {
+               throw("concurrent map read and map write")
+       }
+       key := stringStructOf(&ky)
+       if h.B == 0 {
+               // One-bucket table.
+               b := (*bmap)(h.buckets)
+               if key.len < 32 {
+                       // short key, doing lots of comparisons is ok
+                       for i := uintptr(0); i < bucketCnt; i++ {
+                               x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+                               if x == empty {
+                                       continue
+                               }
+                               k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+                               if k.len != key.len {
+                                       continue
+                               }
+                               if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+                                       return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
+                               }
+                       }
+                       return unsafe.Pointer(&zeroVal[0])
+               }
+               // long key, try not to do more comparisons than necessary
+               keymaybe := uintptr(bucketCnt)
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+                       if x == empty {
+                               continue
+                       }
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+                       if k.len != key.len {
+                               continue
+                       }
+                       if k.str == key.str {
+                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
+                       }
+                       // check first 4 bytes
+                       // TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
+                       // four 1-byte comparisons.
+                       if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
+                               continue
+                       }
+                       // check last 4 bytes
+                       if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
+                               continue
+                       }
+                       if keymaybe != bucketCnt {
+                               // Two keys are potential matches. Use hash to distinguish them.
+                               goto dohash
+                       }
+                       keymaybe = i
+               }
+               if keymaybe != bucketCnt {
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
+                       if memequal(k.str, key.str, uintptr(key.len)) {
+                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
+                       }
+               }
+               return unsafe.Pointer(&zeroVal[0])
+       }
+dohash:
+       hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0), uintptr(t.keysize))
+       m := uintptr(1)<<h.B - 1
+       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+       if c := h.oldbuckets; c != nil {
+               oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
+               if !evacuated(oldb) {
+                       b = oldb
+               }
+       }
+       top := uint8(hash >> (sys.PtrSize*8 - 8))
+       if top < minTopHash {
+               top += minTopHash
+       }
+       for {
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+                       if x != top {
+                               continue
+                       }
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+                       if k.len != key.len {
+                               continue
+                       }
+                       if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
+                       }
+               }
+               b = b.overflow(t)
+               if b == nil {
+                       return unsafe.Pointer(&zeroVal[0])
+               }
+       }
+}
+
+func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
+       if raceenabled && h != nil {
+               callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
+               racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
+       }
+       if h == nil || h.count == 0 {
+               return unsafe.Pointer(&zeroVal[0]), false
+       }
+       if h.flags&hashWriting != 0 {
+               throw("concurrent map read and map write")
+       }
+       key := stringStructOf(&ky)
+       if h.B == 0 {
+               // One-bucket table.
+               b := (*bmap)(h.buckets)
+               if key.len < 32 {
+                       // short key, doing lots of comparisons is ok
+                       for i := uintptr(0); i < bucketCnt; i++ {
+                               x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+                               if x == empty {
+                                       continue
+                               }
+                               k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+                               if k.len != key.len {
+                                       continue
+                               }
+                               if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+                                       return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
+                               }
+                       }
+                       return unsafe.Pointer(&zeroVal[0]), false
+               }
+               // long key, try not to do more comparisons than necessary
+               keymaybe := uintptr(bucketCnt)
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+                       if x == empty {
+                               continue
+                       }
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+                       if k.len != key.len {
+                               continue
+                       }
+                       if k.str == key.str {
+                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
+                       }
+                       // check first 4 bytes
+                       if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
+                               continue
+                       }
+                       // check last 4 bytes
+                       if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
+                               continue
+                       }
+                       if keymaybe != bucketCnt {
+                               // Two keys are potential matches. Use hash to distinguish them.
+                               goto dohash
+                       }
+                       keymaybe = i
+               }
+               if keymaybe != bucketCnt {
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
+                       if memequal(k.str, key.str, uintptr(key.len)) {
+                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
+                       }
+               }
+               return unsafe.Pointer(&zeroVal[0]), false
+       }
+dohash:
+       hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0), uintptr(t.keysize))
+       m := uintptr(1)<<h.B - 1
+       b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
+       if c := h.oldbuckets; c != nil {
+               oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
+               if !evacuated(oldb) {
+                       b = oldb
+               }
+       }
+       top := uint8(hash >> (sys.PtrSize*8 - 8))
+       if top < minTopHash {
+               top += minTopHash
+       }
+       for {
+               for i := uintptr(0); i < bucketCnt; i++ {
+                       x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
+                       if x != top {
+                               continue
+                       }
+                       k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
+                       if k.len != key.len {
+                               continue
+                       }
+                       if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
+                               return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
+                       }
+               }
+               b = b.overflow(t)
+               if b == nil {
+                       return unsafe.Pointer(&zeroVal[0]), false
+               }
+       }
+}
index 95200a47481117f09acf0bae8765c0e5b2a4aa53..77affdfda12010e9a2cc8f37eeed3ffebbb74c0f 100644 (file)
@@ -30,13 +30,11 @@ func TestNegativeZero(t *testing.T) {
                t.Error("length wrong")
        }
 
-       /* gccgo fails this test; this is not required by the spec.
        for k := range m {
                if math.Copysign(1.0, k) > 0 {
                        t.Error("wrong sign")
                }
        }
-       */
 
        m = make(map[float64]bool, 0)
        m[math.Copysign(0.0, -1.0)] = true
@@ -46,13 +44,11 @@ func TestNegativeZero(t *testing.T) {
                t.Error("length wrong")
        }
 
-       /* gccgo fails this test; this is not required by the spec.
        for k := range m {
                if math.Copysign(1.0, k) < 0 {
                        t.Error("wrong sign")
                }
        }
-       */
 }
 
 // nan is a good test because nan != nan, and nan has
@@ -93,7 +89,6 @@ func TestAlias(t *testing.T) {
 }
 
 func TestGrowWithNaN(t *testing.T) {
-       t.Skip("fails with gccgo")
        m := make(map[float64]int, 4)
        nan := math.NaN()
        m[nan] = 1
@@ -115,7 +110,6 @@ func TestGrowWithNaN(t *testing.T) {
                        s |= v
                }
        }
-       t.Log("cnt:", cnt, "s:", s)
        if cnt != 3 {
                t.Error("NaN keys lost during grow")
        }
@@ -130,7 +124,6 @@ type FloatInt struct {
 }
 
 func TestGrowWithNegativeZero(t *testing.T) {
-       t.Skip("fails with gccgo")
        negzero := math.Copysign(0.0, -1.0)
        m := make(map[FloatInt]int, 4)
        m[FloatInt{0.0, 0}] = 1
@@ -407,7 +400,7 @@ func TestMapNanGrowIterator(t *testing.T) {
        nan := math.NaN()
        const nBuckets = 16
        // To fill nBuckets buckets takes LOAD * nBuckets keys.
-       nKeys := int(nBuckets * /* *runtime.HashLoad */ 6.5)
+       nKeys := int(nBuckets * *runtime.HashLoad)
 
        // Get map to full point with nan keys.
        for i := 0; i < nKeys; i++ {
@@ -439,10 +432,6 @@ func TestMapNanGrowIterator(t *testing.T) {
 }
 
 func TestMapIterOrder(t *testing.T) {
-       if runtime.Compiler == "gccgo" {
-               t.Skip("skipping for gccgo")
-       }
-
        for _, n := range [...]int{3, 7, 9, 15} {
                for i := 0; i < 1000; i++ {
                        // Make m be {0: true, 1: true, ..., n-1: true}.
@@ -478,9 +467,6 @@ func TestMapIterOrder(t *testing.T) {
 func TestMapSparseIterOrder(t *testing.T) {
        // Run several rounds to increase the probability
        // of failure. One is not enough.
-       if runtime.Compiler == "gccgo" {
-               t.Skip("skipping for gccgo")
-       }
 NextRound:
        for round := 0; round < 10; round++ {
                m := make(map[int]bool)
@@ -514,9 +500,6 @@ NextRound:
 }
 
 func TestMapStringBytesLookup(t *testing.T) {
-       if runtime.Compiler == "gccgo" {
-               t.Skip("skipping for gccgo")
-       }
        // Use large string keys to avoid small-allocation coalescing,
        // which can cause AllocsPerRun to report lower counts than it should.
        m := map[string]int{
@@ -532,6 +515,8 @@ func TestMapStringBytesLookup(t *testing.T) {
                t.Errorf(`m[string([]byte("2"))] = %d, want 2`, x)
        }
 
+       t.Skip("does not work on gccgo without better escape analysis")
+
        var x int
        n := testing.AllocsPerRun(100, func() {
                x += m[string(buf)]
index 48ae3e4ffdc9127eca23609d3500a29e84cc4ff8..117c5e5789cb51e9224a58ec14bebbebb86bbe15 100644 (file)
@@ -2,7 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build ignore
 // +build !msan
 
 // Dummy MSan support API, used when not built with -msan.
diff --git a/libgo/go/runtime/race0.go b/libgo/go/runtime/race0.go
new file mode 100644 (file)
index 0000000..f1d3706
--- /dev/null
@@ -0,0 +1,40 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !race
+
+// Dummy race detection API, used when not built with -race.
+
+package runtime
+
+import (
+       "unsafe"
+)
+
+const raceenabled = false
+
+// Because raceenabled is false, none of these functions should be called.
+
+func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr)  { throw("race") }
+func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
+func raceinit() (uintptr, uintptr)                                          { throw("race"); return 0, 0 }
+func racefini()                                                             { throw("race") }
+func raceproccreate() uintptr                                               { throw("race"); return 0 }
+func raceprocdestroy(ctx uintptr)                                           { throw("race") }
+func racemapshadow(addr unsafe.Pointer, size uintptr)                       { throw("race") }
+func racewritepc(addr unsafe.Pointer, callerpc, pc uintptr)                 { throw("race") }
+func racereadpc(addr unsafe.Pointer, callerpc, pc uintptr)                  { throw("race") }
+func racereadrangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr)         { throw("race") }
+func racewriterangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr)        { throw("race") }
+func raceacquire(addr unsafe.Pointer)                                       { throw("race") }
+func raceacquireg(gp *g, addr unsafe.Pointer)                               { throw("race") }
+func racerelease(addr unsafe.Pointer)                                       { throw("race") }
+func racereleaseg(gp *g, addr unsafe.Pointer)                               { throw("race") }
+func racereleasemerge(addr unsafe.Pointer)                                  { throw("race") }
+func racereleasemergeg(gp *g, addr unsafe.Pointer)                          { throw("race") }
+func racefingo()                                                            { throw("race") }
+func racemalloc(p unsafe.Pointer, sz uintptr)                               { throw("race") }
+func racefree(p unsafe.Pointer, sz uintptr)                                 { throw("race") }
+func racegostart(pc uintptr) uintptr                                        { throw("race"); return 0 }
+func racegoend()                                                            { throw("race") }
diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go
new file mode 100644 (file)
index 0000000..48abbfa
--- /dev/null
@@ -0,0 +1,253 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+       "runtime/internal/sys"
+       "unsafe"
+)
+
+// Should be a built-in for unsafe.Pointer?
+//go:nosplit
+func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
+       return unsafe.Pointer(uintptr(p) + x)
+}
+
+// getg returns the pointer to the current g.
+// The compiler rewrites calls to this function into instructions
+// that fetch the g directly (from TLS or from the dedicated register).
+func getg() *g
+
+// mcall switches from the g to the g0 stack and invokes fn(g),
+// where g is the goroutine that made the call.
+// mcall saves g's current PC/SP in g->sched so that it can be restored later.
+// It is up to fn to arrange for that later execution, typically by recording
+// g in a data structure, causing something to call ready(g) later.
+// mcall returns to the original goroutine g later, when g has been rescheduled.
+// fn must not return at all; typically it ends by calling schedule, to let the m
+// run other goroutines.
+//
+// mcall can only be called from g stacks (not g0, not gsignal).
+//
+// This must NOT be go:noescape: if fn is a stack-allocated closure,
+// fn puts g on a run queue, and g executes before fn returns, the
+// closure will be invalidated while it is still executing.
+func mcall(fn func(*g))
+
+// systemstack runs fn on a system stack.
+// If systemstack is called from the per-OS-thread (g0) stack, or
+// if systemstack is called from the signal handling (gsignal) stack,
+// systemstack calls fn directly and returns.
+// Otherwise, systemstack is being called from the limited stack
+// of an ordinary goroutine. In this case, systemstack switches
+// to the per-OS-thread stack, calls fn, and switches back.
+// It is common to use a func literal as the argument, in order
+// to share inputs and outputs with the code around the call
+// to system stack:
+//
+//     ... set up y ...
+//     systemstack(func() {
+//             x = bigcall(y)
+//     })
+//     ... use x ...
+//
+//go:noescape
+func systemstack(fn func())
+
+func badsystemstack() {
+       throw("systemstack called from unexpected goroutine")
+}
+
+// memclr clears n bytes starting at ptr.
+// in memclr_*.s
+//go:noescape
+func memclr(ptr unsafe.Pointer, n uintptr)
+
+//go:linkname reflect_memclr reflect.memclr
+func reflect_memclr(ptr unsafe.Pointer, n uintptr) {
+       memclr(ptr, n)
+}
+
+// memmove copies n bytes from "from" to "to".
+// in memmove_*.s
+//go:noescape
+func memmove(to, from unsafe.Pointer, n uintptr)
+
+//go:linkname reflect_memmove reflect.memmove
+func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
+       memmove(to, from, n)
+}
+
+// exported value for testing
+var hashLoad = loadFactor
+
+// in asm_*.s
+func fastrand1() uint32
+
+// in asm_*.s
+//go:noescape
+func memequal(a, b unsafe.Pointer, size uintptr) bool
+
+// noescape hides a pointer from escape analysis.  noescape is
+// the identity function but escape analysis doesn't think the
+// output depends on the input.  noescape is inlined and currently
+// compiles down to a single xor instruction.
+// USE CAREFULLY!
+//go:nosplit
+func noescape(p unsafe.Pointer) unsafe.Pointer {
+       x := uintptr(p)
+       return unsafe.Pointer(x ^ 0)
+}
+
+func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
+
+//go:noescape
+func jmpdefer(fv *funcval, argp uintptr)
+func exit1(code int32)
+func asminit()
+func setg(gg *g)
+func breakpoint()
+
+// reflectcall calls fn with a copy of the n argument bytes pointed at by arg.
+// After fn returns, reflectcall copies n-retoffset result bytes
+// back into arg+retoffset before returning. If copying result bytes back,
+// the caller should pass the argument frame type as argtype, so that
+// call can execute appropriate write barriers during the copy.
+// Package reflect passes a frame type. In package runtime, there is only
+// one call that copies results back, in cgocallbackg1, and it does NOT pass a
+// frame type, meaning there are no write barriers invoked. See that call
+// site for justification.
+func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32)
+
+func procyield(cycles uint32)
+
+type neverCallThisFunction struct{}
+
+// goexit is the return stub at the top of every goroutine call stack.
+// Each goroutine stack is constructed as if goexit called the
+// goroutine's entry point function, so that when the entry point
+// function returns, it will return to goexit, which will call goexit1
+// to perform the actual exit.
+//
+// This function must never be called directly. Call goexit1 instead.
+// gentraceback assumes that goexit terminates the stack. A direct
+// call on the stack will cause gentraceback to stop walking the stack
+// prematurely and if there are leftover stack barriers it may panic.
+func goexit(neverCallThisFunction)
+
+// publicationBarrier performs a store/store barrier (a "publication"
+// or "export" barrier). Some form of synchronization is required
+// between initializing an object and making that object accessible to
+// another processor. Without synchronization, the initialization
+// writes and the "publication" write may be reordered, allowing the
+// other processor to follow the pointer and observe an uninitialized
+// object. In general, higher-level synchronization should be used,
+// such as locking or an atomic pointer write. publicationBarrier is
+// for when those aren't an option, such as in the implementation of
+// the memory manager.
+//
+// There's no corresponding barrier for the read side because the read
+// side naturally has a data dependency order. All architectures that
+// Go supports or seems likely to ever support automatically enforce
+// data dependency ordering.
+func publicationBarrier()
+
+//go:noescape
+func setcallerpc(argp unsafe.Pointer, pc uintptr)
+
+// getcallerpc returns the program counter (PC) of its caller's caller.
+// getcallersp returns the stack pointer (SP) of its caller's caller.
+// For both, the argp must be a pointer to the caller's first function argument.
+// The implementation may or may not use argp, depending on
+// the architecture.
+//
+// For example:
+//
+//     func f(arg1, arg2, arg3 int) {
+//             pc := getcallerpc(unsafe.Pointer(&arg1))
+//             sp := getcallersp(unsafe.Pointer(&arg1))
+//     }
+//
+// These two lines find the PC and SP immediately following
+// the call to f (where f will return).
+//
+// The call to getcallerpc and getcallersp must be done in the
+// frame being asked about. It would not be correct for f to pass &arg1
+// to another function g and let g call getcallerpc/getcallersp.
+// The call inside g might return information about g's caller or
+// information about f's caller or complete garbage.
+//
+// The result of getcallersp is correct at the time of the return,
+// but it may be invalidated by any subsequent call to a function
+// that might relocate the stack in order to grow or shrink it.
+// A general rule is that the result of getcallersp should be used
+// immediately and can only be passed to nosplit functions.
+
+//go:noescape
+func getcallerpc(argp unsafe.Pointer) uintptr
+
+//go:noescape
+func getcallersp(argp unsafe.Pointer) uintptr
+
+// argp used in Defer structs when there is no argp.
+const _NoArgs = ^uintptr(0)
+
+// //go:linkname time_now time.now
+// func time_now() (sec int64, nsec int32)
+
+/*
+func unixnanotime() int64 {
+       sec, nsec := time_now()
+       return sec*1e9 + int64(nsec)
+}
+*/
+
+// round n up to a multiple of a.  a must be a power of 2.
+func round(n, a uintptr) uintptr {
+       return (n + a - 1) &^ (a - 1)
+}
+
+/*
+// checkASM returns whether assembly runtime checks have passed.
+func checkASM() bool
+*/
+
+// throw crashes the program.
+// For gccgo unless and until we port panic.go.
+func throw(string)
+
+// newobject allocates a new object.
+// For gccgo unless and until we port malloc.go.
+func newobject(*_type) unsafe.Pointer
+
+// newarray allocates a new array of objects.
+// For gccgo unless and until we port malloc.go.
+func newarray(*_type, int) unsafe.Pointer
+
+// funcPC returns the entry PC of the function f.
+// It assumes that f is a func value. Otherwise the behavior is undefined.
+// For gccgo here unless and until we port proc.go.
+//go:nosplit
+func funcPC(f interface{}) uintptr {
+       return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize))
+}
+
+// typedmemmove copies a typed value.
+// For gccgo for now.
+//go:nosplit
+func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
+       memmove(dst, src, typ.size)
+}
+
+// Here for gccgo unless and until we port string.go.
+type stringStruct struct {
+       str unsafe.Pointer
+       len int
+}
+
+// Here for gccgo unless and until we port string.go.
+func stringStructOf(sp *string) *stringStruct {
+       return (*stringStruct)(unsafe.Pointer(sp))
+}
index fb5f034dd68d49e7a8e4e301f9d97705b86a3432..d9b0b5590d2bf579a63fb2037a8d5724b305314b 100644 (file)
@@ -16,12 +16,12 @@ type _type struct {
        size       uintptr
        hash       uint32
 
-       hashfn  func(unsafe.Pointer, uintptr) uintptr
+       hashfn  func(unsafe.Pointer, uintptr, uintptr) uintptr
        equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) bool
 
        gc     unsafe.Pointer
        string *string
-       *uncommonType
+       *uncommontype
        ptrToThis *_type
 }
 
@@ -33,7 +33,7 @@ type method struct {
        tfn     unsafe.Pointer
 }
 
-type uncommonType struct {
+type uncommontype struct {
        name    *string
        pkgPath *string
        methods []method
@@ -45,25 +45,34 @@ type imethod struct {
        typ     *_type
 }
 
-type interfaceType struct {
+type interfacetype struct {
        typ     _type
        methods []imethod
 }
 
-type mapType struct {
-       typ  _type
-       key  *_type
-       elem *_type
+type maptype struct {
+       typ           _type
+       key           *_type
+       elem          *_type
+       bucket        *_type // internal type representing a hash bucket
+       hmap          *_type // internal type representing a hmap
+       keysize       uint8  // size of key slot
+       indirectkey   bool   // store ptr to key instead of key itself
+       valuesize     uint8  // size of value slot
+       indirectvalue bool   // store ptr to value instead of value itself
+       bucketsize    uint16 // size of bucket
+       reflexivekey  bool   // true if k==k for all keys
+       needkeyupdate bool   // true if we need to update key on an overwrite
 }
 
-type arrayType struct {
+type arraytype struct {
        typ   _type
        elem  *_type
        slice *_type
        len   uintptr
 }
 
-type chanType struct {
+type chantype struct {
        typ  _type
        elem *_type
        dir  uintptr
diff --git a/libgo/go/runtime/typekind.go b/libgo/go/runtime/typekind.go
new file mode 100644 (file)
index 0000000..abb2777
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+       kindBool = 1 + iota
+       kindInt
+       kindInt8
+       kindInt16
+       kindInt32
+       kindInt64
+       kindUint
+       kindUint8
+       kindUint16
+       kindUint32
+       kindUint64
+       kindUintptr
+       kindFloat32
+       kindFloat64
+       kindComplex64
+       kindComplex128
+       kindArray
+       kindChan
+       kindFunc
+       kindInterface
+       kindMap
+       kindPtr
+       kindSlice
+       kindString
+       kindStruct
+       kindUnsafePointer
+
+       kindDirectIface = 1 << 5
+       kindGCProg      = 1 << 6
+       kindNoPointers  = 1 << 7
+       kindMask        = (1 << 5) - 1
+)
+
+// isDirectIface reports whether t is stored directly in an interface value.
+func isDirectIface(t *_type) bool {
+       return t.kind&kindDirectIface != 0
+}
index 44402d4481efc66ccf8edc6b117ed77c58d4fcbb..6e4c8fd89203fe85b2a11c5c8211de99e0ab48b7 100644 (file)
@@ -1064,12 +1064,6 @@ func reflect.chanlen(c *Hchan) (len int) {
                len = c->qcount;
 }
 
-intgo
-__go_chan_len(Hchan *c)
-{
-       return reflect_chanlen(c);
-}
-
 func reflect.chancap(c *Hchan) (cap int) {
        if(c == nil)
                cap = 0;
index 4bd79d2005887b1c9032adfc305e215a11e398d9..c1a8bb72efac157a2c0095be18c57ac717d27a9c 100644 (file)
@@ -9,25 +9,30 @@
 #include <stdlib.h>
 
 #include "runtime.h"
-#include "map.h"
 
-struct __go_map *
-__go_construct_map (const struct __go_map_descriptor *descriptor,
+extern void *makemap (const struct __go_map_type *, int64_t hint,
+                     void *, void *)
+  __asm__ (GOSYM_PREFIX "runtime.makemap");
+
+extern void mapassign1 (const struct __go_map_type *, void *hmap,
+                       const void *key, const void *val)
+  __asm__ (GOSYM_PREFIX "runtime.mapassign1");
+
+void *
+__go_construct_map (const struct __go_map_type *type,
                    uintptr_t count, uintptr_t entry_size,
-                   uintptr_t val_offset, uintptr_t val_size,
-                   const void *ventries)
+                   uintptr_t val_offset, const void *ventries)
 {
-  struct __go_map *ret;
+  void *ret;
   const unsigned char *entries;
   uintptr_t i;
 
-  ret = __go_new_map (descriptor, count);
+  ret = makemap(type, (int64_t) count, NULL, NULL);
 
   entries = (const unsigned char *) ventries;
   for (i = 0; i < count; ++i)
     {
-      void *val = __go_map_index (ret, entries, 1);
-      __builtin_memcpy (val, entries + val_offset, val_size);
+      mapassign1 (type, ret, entries, entries + val_offset);
       entries += entry_size;
     }
 
index 40b716eb4af0d0e5e10abeb7b405e71fe7dda515..62302b5ebe2c45400e0da6dfe28976dabf9e047e 100644 (file)
@@ -26,6 +26,8 @@ __go_empty_interface_compare (struct __go_empty_interface left,
   if (!__go_type_descriptors_equal (left_descriptor,
                                    right.__type_descriptor))
     return 1;
+  if (left_descriptor->__equalfn == NULL)
+    runtime_panicstring ("comparing uncomparable types");
   if (__go_is_pointer_type (left_descriptor))
     return left.__object == right.__object ? 0 : 1;
   if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object,
index e810750d5db5b80d1aae5db7546b79ecd731042e..839d18916231441baddafb8dddb956d60d2a995a 100644 (file)
@@ -24,6 +24,8 @@ __go_empty_interface_value_compare (
     return 1;
   if (!__go_type_descriptors_equal (left_descriptor, right_descriptor))
     return 1;
+  if (left_descriptor->__equalfn == NULL)
+    runtime_panicstring ("comparing uncomparable types");
   if (__go_is_pointer_type (left_descriptor))
     return left.__object == val ? 0 : 1;
   if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object, val,
index a7e2c133440e17685ad4144802c93f4a8e35628d..2b3ac325c10e4264ae465ab181ef9fa05754bea6 100644 (file)
@@ -6,7 +6,6 @@
 
 #include "runtime.h"
 #include "go-type.h"
-#include "map.h"
 
 /* The compiler will track fields that have the tag go:"track".  Any
    function that refers to such a field will call this function with a
@@ -34,16 +33,26 @@ extern const char _edata[] __attribute__ ((weak));
 extern const char __edata[] __attribute__ ((weak));
 extern const char __bss_start[] __attribute__ ((weak));
 
-void runtime_Fieldtrack (struct __go_map *) __asm__ (GOSYM_PREFIX "runtime.Fieldtrack");
+extern void mapassign1 (const struct __go_map_type *, void *hmap,
+                       const void *key, const void *val)
+  __asm__ (GOSYM_PREFIX "runtime.mapassign1");
+
+// The type descriptor for map[string] bool.  */
+extern const char __go_td_MN6_string__N4_bool[] __attribute__ ((weak));
+
+void runtime_Fieldtrack (void *) __asm__ (GOSYM_PREFIX "runtime.Fieldtrack");
 
 void
-runtime_Fieldtrack (struct __go_map *m)
+runtime_Fieldtrack (void *m)
 {
   const char *p;
   const char *pend;
   const char *prefix;
   size_t prefix_len;
 
+  if (__go_td_MN6_string__N4_bool == NULL)
+    return;
+
   p = __data_start;
   if (p == NULL)
     p = __etext;
@@ -86,14 +95,12 @@ runtime_Fieldtrack (struct __go_map *m)
       if (__builtin_memchr (q1, '\0', q2 - q1) == NULL)
        {
          String s;
-         void *v;
-         _Bool *pb;
+         _Bool b;
 
          s.str = (const byte *) q1;
          s.len = q2 - q1;
-         v = __go_map_index (m, &s, 1);
-         pb = (_Bool *) v;
-         *pb = 1;
+         b = 1;
+         mapassign1((const void*) __go_td_MN6_string__N4_bool, m, &s, &b);
        }
 
       p = q2;
index 1d367753a1e4bcd3d6a306f09fbed5f15310ef28..14999df1dd1d097ecf4493c155f3c26e29d848d4 100644 (file)
@@ -26,6 +26,8 @@ __go_interface_compare (struct __go_interface left,
   left_descriptor = left.__methods[0];
   if (!__go_type_descriptors_equal (left_descriptor, right.__methods[0]))
     return 1;
+  if (left_descriptor->__equalfn == NULL)
+    runtime_panicstring ("comparing uncomparable types");
   if (__go_is_pointer_type (left_descriptor))
     return left.__object == right.__object ? 0 : 1;
   if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object,
index d1e6fd084d2e714d630f844d16258c192b52b83f..4c47b7cf04d5789e1b6792fb0406efab1e7f2971 100644 (file)
@@ -25,6 +25,8 @@ __go_interface_empty_compare (struct __go_interface left,
   left_descriptor = left.__methods[0];
   if (!__go_type_descriptors_equal (left_descriptor, right.__type_descriptor))
     return 1;
+  if (left_descriptor->__equalfn == NULL)
+    runtime_panicstring ("comparing uncomparable types");
   if (__go_is_pointer_type (left_descriptor))
     return left.__object == right.__object ? 0 : 1;
   if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object,
index 36b6efdc9f1068060a2885c0ad89722e0412d0a3..5dc91d0330f1cfa278c5d963784b49253650ea75 100644 (file)
@@ -24,6 +24,8 @@ __go_interface_value_compare (
   left_descriptor = left.__methods[0];
   if (!__go_type_descriptors_equal (left_descriptor, right_descriptor))
     return 1;
+  if (left_descriptor->__equalfn == NULL)
+    runtime_panicstring ("comparing uncomparable types");
   if (__go_is_pointer_type (left_descriptor))
     return left.__object == val ? 0 : 1;
   if (!__go_call_equalfn (left_descriptor->__equalfn, left.__object, val,
diff --git a/libgo/runtime/go-map-delete.c b/libgo/runtime/go-map-delete.c
deleted file mode 100644 (file)
index fb7c331..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/* go-map-delete.c -- delete an entry from a map.
-
-   Copyright 2009 The Go Authors. All rights reserved.
-   Use of this source code is governed by a BSD-style
-   license that can be found in the LICENSE file.  */
-
-#include <stddef.h>
-#include <stdlib.h>
-
-#include "runtime.h"
-#include "malloc.h"
-#include "go-alloc.h"
-#include "go-assert.h"
-#include "map.h"
-
-/* Delete the entry matching KEY from MAP.  */
-
-void
-__go_map_delete (struct __go_map *map, const void *key)
-{
-  const struct __go_map_descriptor *descriptor;
-  const struct __go_type_descriptor *key_descriptor;
-  uintptr_t key_offset;
-  const FuncVal *equalfn;
-  size_t key_hash;
-  size_t key_size;
-  size_t bucket_index;
-  void **pentry;
-
-  if (map == NULL)
-    return;
-
-  descriptor = map->__descriptor;
-
-  key_descriptor = descriptor->__map_descriptor->__key_type;
-  key_offset = descriptor->__key_offset;
-  key_size = key_descriptor->__size;
-  if (key_size == 0)
-    return;
-
-  __go_assert (key_size != -1UL);
-  equalfn = key_descriptor->__equalfn;
-
-  key_hash = __go_call_hashfn (key_descriptor->__hashfn, key, key_size);
-  bucket_index = key_hash % map->__bucket_count;
-
-  pentry = map->__buckets + bucket_index;
-  while (*pentry != NULL)
-    {
-      char *entry = (char *) *pentry;
-      if (__go_call_equalfn (equalfn, key, entry + key_offset, key_size))
-       {
-         *pentry = *(void **) entry;
-         if (descriptor->__entry_size >= TinySize)
-           __go_free (entry);
-         map->__element_count -= 1;
-         break;
-       }
-      pentry = (void **) entry;
-    }
-}
diff --git a/libgo/runtime/go-map-index.c b/libgo/runtime/go-map-index.c
deleted file mode 100644 (file)
index 353041d..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-/* go-map-index.c -- find or insert an entry in a map.
-
-   Copyright 2009 The Go Authors. All rights reserved.
-   Use of this source code is governed by a BSD-style
-   license that can be found in the LICENSE file.  */
-
-#include <stddef.h>
-#include <stdlib.h>
-
-#include "runtime.h"
-#include "malloc.h"
-#include "go-alloc.h"
-#include "go-assert.h"
-#include "map.h"
-
-/* Rehash MAP to a larger size.  */
-
-static void
-__go_map_rehash (struct __go_map *map)
-{
-  const struct __go_map_descriptor *descriptor;
-  const struct __go_type_descriptor *key_descriptor;
-  uintptr_t key_offset;
-  size_t key_size;
-  const FuncVal *hashfn;
-  uintptr_t old_bucket_count;
-  void **old_buckets;
-  uintptr_t new_bucket_count;
-  void **new_buckets;
-  uintptr_t i;
-
-  descriptor = map->__descriptor;
-
-  key_descriptor = descriptor->__map_descriptor->__key_type;
-  key_offset = descriptor->__key_offset;
-  key_size = key_descriptor->__size;
-  hashfn = key_descriptor->__hashfn;
-
-  old_bucket_count = map->__bucket_count;
-  old_buckets = map->__buckets;
-
-  new_bucket_count = __go_map_next_prime (old_bucket_count * 2);
-  new_buckets = (void **) __go_alloc (new_bucket_count * sizeof (void *));
-  __builtin_memset (new_buckets, 0, new_bucket_count * sizeof (void *));
-
-  for (i = 0; i < old_bucket_count; ++i)
-    {
-      char* entry;
-      char* next;
-
-      for (entry = old_buckets[i]; entry != NULL; entry = next)
-       {
-         size_t key_hash;
-         size_t new_bucket_index;
-
-         /* We could speed up rehashing at the cost of memory space
-            by caching the hash code.  */
-         key_hash = __go_call_hashfn (hashfn, entry + key_offset, key_size);
-         new_bucket_index = key_hash % new_bucket_count;
-
-         next = *(char **) entry;
-         *(char **) entry = new_buckets[new_bucket_index];
-         new_buckets[new_bucket_index] = entry;
-       }
-    }
-
-  if (old_bucket_count * sizeof (void *) >= TinySize)
-    __go_free (old_buckets);
-
-  map->__bucket_count = new_bucket_count;
-  map->__buckets = new_buckets;
-}
-
-/* Find KEY in MAP, return a pointer to the value.  If KEY is not
-   present, then if INSERT is false, return NULL, and if INSERT is
-   true, insert a new value and zero-initialize it before returning a
-   pointer to it.  */
-
-void *
-__go_map_index (struct __go_map *map, const void *key, _Bool insert)
-{
-  const struct __go_map_descriptor *descriptor;
-  const struct __go_type_descriptor *key_descriptor;
-  uintptr_t key_offset;
-  const FuncVal *equalfn;
-  size_t key_hash;
-  size_t key_size;
-  size_t bucket_index;
-  char *entry;
-
-  if (map == NULL)
-    {
-      if (insert)
-       runtime_panicstring ("assignment to entry in nil map");
-      return NULL;
-    }
-
-  descriptor = map->__descriptor;
-
-  key_descriptor = descriptor->__map_descriptor->__key_type;
-  key_offset = descriptor->__key_offset;
-  key_size = key_descriptor->__size;
-  __go_assert (key_size != -1UL);
-  equalfn = key_descriptor->__equalfn;
-
-  key_hash = __go_call_hashfn (key_descriptor->__hashfn, key, key_size);
-  bucket_index = key_hash % map->__bucket_count;
-
-  entry = (char *) map->__buckets[bucket_index];
-  while (entry != NULL)
-    {
-      if (__go_call_equalfn (equalfn, key, entry + key_offset, key_size))
-       return entry + descriptor->__val_offset;
-      entry = *(char **) entry;
-    }
-
-  if (!insert)
-    return NULL;
-
-  if (map->__element_count >= map->__bucket_count)
-    {
-      __go_map_rehash (map);
-      bucket_index = key_hash % map->__bucket_count;
-    }
-
-  entry = (char *) __go_alloc (descriptor->__entry_size);
-  __builtin_memset (entry, 0, descriptor->__entry_size);
-
-  __builtin_memcpy (entry + key_offset, key, key_size);
-
-  *(char **) entry = map->__buckets[bucket_index];
-  map->__buckets[bucket_index] = entry;
-
-  map->__element_count += 1;
-
-  return entry + descriptor->__val_offset;
-}
diff --git a/libgo/runtime/go-map-len.c b/libgo/runtime/go-map-len.c
deleted file mode 100644 (file)
index 7da10c2..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/* go-map-len.c -- return the length of a map.
-
-   Copyright 2009 The Go Authors. All rights reserved.
-   Use of this source code is governed by a BSD-style
-   license that can be found in the LICENSE file.  */
-
-#include <stddef.h>
-
-#include "runtime.h"
-#include "go-assert.h"
-#include "map.h"
-
-/* Return the length of a map.  This could be done inline, of course,
-   but I'm doing it as a function for now to make it easy to change
-   the map structure.  */
-
-intgo
-__go_map_len (struct __go_map *map)
-{
-  if (map == NULL)
-    return 0;
-  __go_assert (map->__element_count
-              == (uintptr_t) (intgo) map->__element_count);
-  return map->__element_count;
-}
diff --git a/libgo/runtime/go-map-range.c b/libgo/runtime/go-map-range.c
deleted file mode 100644 (file)
index 5dbb92c..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-/* go-map-range.c -- implement a range clause over a map.
-
-   Copyright 2009, 2010 The Go Authors. All rights reserved.
-   Use of this source code is governed by a BSD-style
-   license that can be found in the LICENSE file.  */
-
-#include "runtime.h"
-#include "go-assert.h"
-#include "map.h"
-
-/* Initialize a range over a map.  */
-
-void
-__go_mapiterinit (const struct __go_map *h, struct __go_hash_iter *it)
-{
-  it->entry = NULL;
-  if (h != NULL)
-    {
-      it->map = h;
-      it->next_entry = NULL;
-      it->bucket = 0;
-      --it->bucket;
-      __go_mapiternext(it);
-    }
-}
-
-/* Move to the next iteration, updating *HITER.  */
-
-void
-__go_mapiternext (struct __go_hash_iter *it)
-{
-  const void *entry;
-
-  entry = it->next_entry;
-  if (entry == NULL)
-    {
-      const struct __go_map *map;
-      uintptr_t bucket;
-
-      map = it->map;
-      bucket = it->bucket;
-      while (1)
-       {
-         ++bucket;
-         if (bucket >= map->__bucket_count)
-           {
-             /* Map iteration is complete.  */
-             it->entry = NULL;
-             return;
-           }
-         entry = map->__buckets[bucket];
-         if (entry != NULL)
-           break;
-       }
-      it->bucket = bucket;
-    }
-  it->entry = entry;
-  it->next_entry = *(const void * const *) entry;
-}
-
-/* Get the key of the current iteration.  */
-
-void
-__go_mapiter1 (struct __go_hash_iter *it, unsigned char *key)
-{
-  const struct __go_map *map;
-  const struct __go_map_descriptor *descriptor;
-  const struct __go_type_descriptor *key_descriptor;
-  const char *p;
-
-  map = it->map;
-  descriptor = map->__descriptor;
-  key_descriptor = descriptor->__map_descriptor->__key_type;
-  p = it->entry;
-  __go_assert (p != NULL);
-  __builtin_memcpy (key, p + descriptor->__key_offset, key_descriptor->__size);
-}
-
-/* Get the key and value of the current iteration.  */
-
-void
-__go_mapiter2 (struct __go_hash_iter *it, unsigned char *key,
-              unsigned char *val)
-{
-  const struct __go_map *map;
-  const struct __go_map_descriptor *descriptor;
-  const struct __go_map_type *map_descriptor;
-  const struct __go_type_descriptor *key_descriptor;
-  const struct __go_type_descriptor *val_descriptor;
-  const char *p;
-
-  map = it->map;
-  descriptor = map->__descriptor;
-  map_descriptor = descriptor->__map_descriptor;
-  key_descriptor = map_descriptor->__key_type;
-  val_descriptor = map_descriptor->__val_type;
-  p = it->entry;
-  __go_assert (p != NULL);
-  __builtin_memcpy (key, p + descriptor->__key_offset,
-                   key_descriptor->__size);
-  __builtin_memcpy (val, p + descriptor->__val_offset,
-                   val_descriptor->__size);
-}
diff --git a/libgo/runtime/go-memclr.c b/libgo/runtime/go-memclr.c
new file mode 100644 (file)
index 0000000..de6f39a
--- /dev/null
@@ -0,0 +1,16 @@
+/* go-memclr.c -- clear a memory buffer
+
+   Copyright 2016 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "runtime.h"
+
+void memclr(void *, uintptr)
+  __asm__ (GOSYM_PREFIX "runtime.memclr");
+
+void
+memclr (void *p1, uintptr len)
+{
+  __builtin_memset (p1, 0, len);
+}
diff --git a/libgo/runtime/go-memequal.c b/libgo/runtime/go-memequal.c
new file mode 100644 (file)
index 0000000..5f514aa
--- /dev/null
@@ -0,0 +1,16 @@
+/* go-memequal.c -- compare memory buffers for equality
+
+   Copyright 2016 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "runtime.h"
+
+_Bool memequal (void *, void *, uintptr)
+  __asm__ (GOSYM_PREFIX "runtime.memequal");
+
+_Bool
+memequal (void *p1, void *p2, uintptr len)
+{
+  return __builtin_memcmp (p1, p2, len) == 0;
+}
diff --git a/libgo/runtime/go-memmove.c b/libgo/runtime/go-memmove.c
new file mode 100644 (file)
index 0000000..a6fda08
--- /dev/null
@@ -0,0 +1,16 @@
+/* go-memmove.c -- move one memory buffer to another
+
+   Copyright 2016 The Go Authors. All rights reserved.
+   Use of this source code is governed by a BSD-style
+   license that can be found in the LICENSE file.  */
+
+#include "runtime.h"
+
+void move(void *, void *, uintptr)
+  __asm__ (GOSYM_PREFIX "runtime.memmove");
+
+void
+move (void *p1, void *p2, uintptr len)
+{
+  __builtin_memmove (p1, p2, len);
+}
diff --git a/libgo/runtime/go-new-map.c b/libgo/runtime/go-new-map.c
deleted file mode 100644 (file)
index c289bc0..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-/* go-new-map.c -- allocate a new map.
-
-   Copyright 2009 The Go Authors. All rights reserved.
-   Use of this source code is governed by a BSD-style
-   license that can be found in the LICENSE file.  */
-
-#include "runtime.h"
-#include "go-alloc.h"
-#include "map.h"
-
-/* List of prime numbers, copied from libstdc++/src/hashtable.c.  */
-
-static const unsigned long prime_list[] = /* 256 + 1 or 256 + 48 + 1 */
-{
-  2ul, 3ul, 5ul, 7ul, 11ul, 13ul, 17ul, 19ul, 23ul, 29ul, 31ul,
-  37ul, 41ul, 43ul, 47ul, 53ul, 59ul, 61ul, 67ul, 71ul, 73ul, 79ul,
-  83ul, 89ul, 97ul, 103ul, 109ul, 113ul, 127ul, 137ul, 139ul, 149ul,
-  157ul, 167ul, 179ul, 193ul, 199ul, 211ul, 227ul, 241ul, 257ul,
-  277ul, 293ul, 313ul, 337ul, 359ul, 383ul, 409ul, 439ul, 467ul,
-  503ul, 541ul, 577ul, 619ul, 661ul, 709ul, 761ul, 823ul, 887ul,
-  953ul, 1031ul, 1109ul, 1193ul, 1289ul, 1381ul, 1493ul, 1613ul,
-  1741ul, 1879ul, 2029ul, 2179ul, 2357ul, 2549ul, 2753ul, 2971ul,
-  3209ul, 3469ul, 3739ul, 4027ul, 4349ul, 4703ul, 5087ul, 5503ul,
-  5953ul, 6427ul, 6949ul, 7517ul, 8123ul, 8783ul, 9497ul, 10273ul,
-  11113ul, 12011ul, 12983ul, 14033ul, 15173ul, 16411ul, 17749ul,
-  19183ul, 20753ul, 22447ul, 24281ul, 26267ul, 28411ul, 30727ul,
-  33223ul, 35933ul, 38873ul, 42043ul, 45481ul, 49201ul, 53201ul,
-  57557ul, 62233ul, 67307ul, 72817ul, 78779ul, 85229ul, 92203ul,
-  99733ul, 107897ul, 116731ul, 126271ul, 136607ul, 147793ul,
-  159871ul, 172933ul, 187091ul, 202409ul, 218971ul, 236897ul,
-  256279ul, 277261ul, 299951ul, 324503ul, 351061ul, 379787ul,
-  410857ul, 444487ul, 480881ul, 520241ul, 562841ul, 608903ul,
-  658753ul, 712697ul, 771049ul, 834181ul, 902483ul, 976369ul,
-  1056323ul, 1142821ul, 1236397ul, 1337629ul, 1447153ul, 1565659ul,
-  1693859ul, 1832561ul, 1982627ul, 2144977ul, 2320627ul, 2510653ul,
-  2716249ul, 2938679ul, 3179303ul, 3439651ul, 3721303ul, 4026031ul,
-  4355707ul, 4712381ul, 5098259ul, 5515729ul, 5967347ul, 6456007ul,
-  6984629ul, 7556579ul, 8175383ul, 8844859ul, 9569143ul, 10352717ul,
-  11200489ul, 12117689ul, 13109983ul, 14183539ul, 15345007ul,
-  16601593ul, 17961079ul, 19431899ul, 21023161ul, 22744717ul,
-  24607243ul, 26622317ul, 28802401ul, 31160981ul, 33712729ul,
-  36473443ul, 39460231ul, 42691603ul, 46187573ul, 49969847ul,
-  54061849ul, 58488943ul, 63278561ul, 68460391ul, 74066549ul,
-  80131819ul, 86693767ul, 93793069ul, 101473717ul, 109783337ul,
-  118773397ul, 128499677ul, 139022417ul, 150406843ul, 162723577ul,
-  176048909ul, 190465427ul, 206062531ul, 222936881ul, 241193053ul,
-  260944219ul, 282312799ul, 305431229ul, 330442829ul, 357502601ul,
-  386778277ul, 418451333ul, 452718089ul, 489790921ul, 529899637ul,
-  573292817ul, 620239453ul, 671030513ul, 725980837ul, 785430967ul,
-  849749479ul, 919334987ul, 994618837ul, 1076067617ul, 1164186217ul,
-  1259520799ul, 1362662261ul, 1474249943ul, 1594975441ul, 1725587117ul,
-  1866894511ul, 2019773507ul, 2185171673ul, 2364114217ul, 2557710269ul,
-  2767159799ul, 2993761039ul, 3238918481ul, 3504151727ul, 3791104843ul,
-  4101556399ul, 4294967291ul,
-#if __SIZEOF_LONG__ >= 8
-  6442450933ul, 8589934583ul, 12884901857ul, 17179869143ul,
-  25769803693ul, 34359738337ul, 51539607367ul, 68719476731ul,
-  103079215087ul, 137438953447ul, 206158430123ul, 274877906899ul,
-  412316860387ul, 549755813881ul, 824633720731ul, 1099511627689ul,
-  1649267441579ul, 2199023255531ul, 3298534883309ul, 4398046511093ul,
-  6597069766607ul, 8796093022151ul, 13194139533241ul, 17592186044399ul,
-  26388279066581ul, 35184372088777ul, 52776558133177ul, 70368744177643ul,
-  105553116266399ul, 140737488355213ul, 211106232532861ul, 281474976710597ul,
-  562949953421231ul, 1125899906842597ul, 2251799813685119ul,
-  4503599627370449ul, 9007199254740881ul, 18014398509481951ul,
-  36028797018963913ul, 72057594037927931ul, 144115188075855859ul,
-  288230376151711717ul, 576460752303423433ul,
-  1152921504606846883ul, 2305843009213693951ul,
-  4611686018427387847ul, 9223372036854775783ul,
-  18446744073709551557ul
-#endif
-};
-
-/* Return the next number from PRIME_LIST >= N.  */
-
-uintptr_t
-__go_map_next_prime (uintptr_t n)
-{
-  size_t low;
-  size_t high;
-
-  low = 0;
-  high = sizeof prime_list / sizeof prime_list[0];
-  while (low < high)
-    {
-      size_t mid;
-
-      mid = (low + high) / 2;
-
-      /* Here LOW <= MID < HIGH.  */
-
-      if (prime_list[mid] < n)
-       low = mid + 1;
-      else if (prime_list[mid] > n)
-       high = mid;
-      else
-       return n;
-    }
-  if (low >= sizeof prime_list / sizeof prime_list[0])
-    return n;
-  return prime_list[low];
-}
-
-/* Allocate a new map.  */
-
-struct __go_map *
-__go_new_map (const struct __go_map_descriptor *descriptor, uintptr_t entries)
-{
-  int32 ientries;
-  struct __go_map *ret;
-
-  /* The master library limits map entries to int32, so we do too.  */
-  ientries = (int32) entries;
-  if (ientries < 0 || (uintptr_t) ientries != entries)
-    runtime_panicstring ("map size out of range");
-
-  if (entries == 0)
-    entries = 5;
-  else
-    entries = __go_map_next_prime (entries);
-  ret = (struct __go_map *) __go_alloc (sizeof (struct __go_map));
-  ret->__descriptor = descriptor;
-  ret->__element_count = 0;
-  ret->__bucket_count = entries;
-  ret->__buckets = (void **) __go_alloc (entries * sizeof (void *));
-  __builtin_memset (ret->__buckets, 0, entries * sizeof (void *));
-  return ret;
-}
-
-/* Allocate a new map when the argument to make is a large type.  */
-
-struct __go_map *
-__go_new_map_big (const struct __go_map_descriptor *descriptor,
-                 uint64_t entries)
-{
-  uintptr_t sentries;
-
-  sentries = (uintptr_t) entries;
-  if ((uint64_t) sentries != entries)
-    runtime_panicstring ("map size out of range");
-  return __go_new_map (descriptor, sentries);
-}
diff --git a/libgo/runtime/go-reflect-map.c b/libgo/runtime/go-reflect-map.c
deleted file mode 100644 (file)
index 36f3102..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-/* go-reflect-map.c -- map reflection support for Go.
-
-   Copyright 2009, 2010 The Go Authors. All rights reserved.
-   Use of this source code is governed by a BSD-style
-   license that can be found in the LICENSE file.  */
-
-#include <stdlib.h>
-#include <stdint.h>
-
-#include "runtime.h"
-#include "go-alloc.h"
-#include "go-assert.h"
-#include "go-type.h"
-#include "map.h"
-
-/* This file implements support for reflection on maps.  These
-   functions are called from reflect/value.go.  */
-
-extern void *mapaccess (struct __go_map_type *, void *, void *)
-  __asm__ (GOSYM_PREFIX "reflect.mapaccess");
-
-void *
-mapaccess (struct __go_map_type *mt, void *m, void *key)
-{
-  struct __go_map *map = (struct __go_map *) m;
-
-  __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP);
-  if (map == NULL)
-    return NULL;
-  else
-    return __go_map_index (map, key, 0);
-}
-
-extern void mapassign (struct __go_map_type *, void *, void *, void *)
-  __asm__ (GOSYM_PREFIX "reflect.mapassign");
-
-void
-mapassign (struct __go_map_type *mt, void *m, void *key, void *val)
-{
-  struct __go_map *map = (struct __go_map *) m;
-  void *p;
-
-  __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP);
-  if (map == NULL)
-    runtime_panicstring ("assignment to entry in nil map");
-  p = __go_map_index (map, key, 1);
-  __builtin_memcpy (p, val, mt->__val_type->__size);
-}
-
-extern void mapdelete (struct __go_map_type *, void *, void *)
-  __asm__ (GOSYM_PREFIX "reflect.mapdelete");
-
-void
-mapdelete (struct __go_map_type *mt, void *m, void *key)
-{
-  struct __go_map *map = (struct __go_map *) m;
-
-  __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP);
-  if (map == NULL)
-    return;
-  __go_map_delete (map, key);
-}
-
-extern int32_t maplen (void *) __asm__ (GOSYM_PREFIX "reflect.maplen");
-
-int32_t
-maplen (void *m)
-{
-  struct __go_map *map = (struct __go_map *) m;
-
-  if (map == NULL)
-    return 0;
-  return (int32_t) map->__element_count;
-}
-
-extern unsigned char *mapiterinit (struct __go_map_type *, void *)
-  __asm__ (GOSYM_PREFIX "reflect.mapiterinit");
-
-unsigned char *
-mapiterinit (struct __go_map_type *mt, void *m)
-{
-  struct __go_hash_iter *it;
-
-  __go_assert ((mt->__common.__code & GO_CODE_MASK) == GO_MAP);
-  it = __go_alloc (sizeof (struct __go_hash_iter));
-  __go_mapiterinit ((struct __go_map *) m, it);
-  return (unsigned char *) it;
-}
-
-extern void mapiternext (void *) __asm__ (GOSYM_PREFIX "reflect.mapiternext");
-
-void
-mapiternext (void *it)
-{
-  __go_mapiternext ((struct __go_hash_iter *) it);
-}
-
-extern void *mapiterkey (void *) __asm__ (GOSYM_PREFIX "reflect.mapiterkey");
-
-void *
-mapiterkey (void *ita)
-{
-  struct __go_hash_iter *it = (struct __go_hash_iter *) ita;
-  const struct __go_type_descriptor *key_descriptor;
-  void *key;
-
-  if (it->entry == NULL)
-    return NULL;
-
-  key_descriptor = it->map->__descriptor->__map_descriptor->__key_type;
-  key = __go_alloc (key_descriptor->__size);
-  __go_mapiter1 (it, key);
-  return key;
-}
-
-/* Make a new map.  We have to build our own map descriptor.  */
-
-extern struct __go_map *makemap (const struct __go_map_type *)
-  __asm__ (GOSYM_PREFIX "reflect.makemap");
-
-struct __go_map *
-makemap (const struct __go_map_type *t)
-{
-  struct __go_map_descriptor *md;
-  unsigned int o;
-  const struct __go_type_descriptor *kt;
-  const struct __go_type_descriptor *vt;
-
-  md = (struct __go_map_descriptor *) __go_alloc (sizeof (*md));
-  md->__map_descriptor = t;
-  o = sizeof (void *);
-  kt = t->__key_type;
-  o = (o + kt->__field_align - 1) & ~ (kt->__field_align - 1);
-  md->__key_offset = o;
-  o += kt->__size;
-  vt = t->__val_type;
-  o = (o + vt->__field_align - 1) & ~ (vt->__field_align - 1);
-  md->__val_offset = o;
-  o += vt->__size;
-  o = (o + sizeof (void *) - 1) & ~ (sizeof (void *) - 1);
-  o = (o + kt->__field_align - 1) & ~ (kt->__field_align - 1);
-  o = (o + vt->__field_align - 1) & ~ (vt->__field_align - 1);
-  md->__entry_size = o;
-
-  return __go_new_map (md, 0);
-}
-
-extern _Bool ismapkey (const struct __go_type_descriptor *)
-  __asm__ (GOSYM_PREFIX "reflect.ismapkey");
-
-_Bool
-ismapkey (const struct __go_type_descriptor *typ)
-{
-  return (typ != NULL
-         && (void *) typ->__hashfn->fn != (void *) __go_type_hash_error);
-}
index 585984e9fef220c1c58521ef6a3149530f88ef5b..829572b7bee921134c5446f3c1993d74cae4ea4d 100644 (file)
@@ -14,7 +14,7 @@
 /* Hash function for float types.  */
 
 uintptr_t
-__go_type_hash_complex (const void *vkey, uintptr_t key_size)
+__go_type_hash_complex (const void *vkey, uintptr_t seed, uintptr_t key_size)
 {
   if (key_size == 8)
     {
@@ -31,7 +31,7 @@ __go_type_hash_complex (const void *vkey, uintptr_t key_size)
       cfi = cimagf (cf);
 
       if (isinf (cfr) || isinf (cfi))
-       return 0;
+       return seed;
 
       /* NaN != NaN, so the hash code of a NaN is irrelevant.  Make it
         random so that not all NaNs wind up in the same place.  */
@@ -40,14 +40,14 @@ __go_type_hash_complex (const void *vkey, uintptr_t key_size)
 
       /* Avoid negative zero.  */
       if (cfr == 0 && cfi == 0)
-       return 0;
+       return seed;
       else if (cfr == 0)
        cf = cfi * I;
       else if (cfi == 0)
        cf = cfr;
 
       memcpy (&fi, &cf, 8);
-      return (uintptr_t) cfi;
+      return (uintptr_t) cfi ^ seed;
     }
   else if (key_size == 16)
     {
@@ -64,21 +64,21 @@ __go_type_hash_complex (const void *vkey, uintptr_t key_size)
       cdi = cimag (cd);
 
       if (isinf (cdr) || isinf (cdi))
-       return 0;
+       return seed;
 
       if (isnan (cdr) || isnan (cdi))
        return runtime_fastrand1 ();
 
       /* Avoid negative zero.  */
       if (cdr == 0 && cdi == 0)
-       return 0;
+       return seed;
       else if (cdr == 0)
        cd = cdi * I;
       else if (cdi == 0)
        cd = cdr;
 
       memcpy (&di, &cd, 16);
-      return di[0] ^ di[1];
+      return di[0] ^ di[1] ^ seed;
     }
   else
     runtime_throw ("__go_type_hash_complex: invalid complex size");
index 315c30efb7f609166f2ad217ddba6560aeb1937b..a98bceaac84686f6537aa76f3579d64f267b3d08 100644 (file)
@@ -11,7 +11,7 @@
 /* A hash function for an empty interface.  */
 
 uintptr_t
-__go_type_hash_empty_interface (const void *vval,
+__go_type_hash_empty_interface (const void *vval, uintptr_t seed,
                                uintptr_t key_size __attribute__ ((unused)))
 {
   const struct __go_empty_interface *val;
@@ -22,11 +22,13 @@ __go_type_hash_empty_interface (const void *vval,
   descriptor = val->__type_descriptor;
   if (descriptor == NULL)
     return 0;
+  if (descriptor->__hashfn == NULL)
+    runtime_panicstring ("hash of unhashable type");
   size = descriptor->__size;
   if (__go_is_pointer_type (descriptor))
-    return __go_call_hashfn (descriptor->__hashfn, &val->__object, size);
+    return __go_call_hashfn (descriptor->__hashfn, &val->__object, seed, size);
   else
-    return __go_call_hashfn (descriptor->__hashfn, val->__object, size);
+    return __go_call_hashfn (descriptor->__hashfn, val->__object, seed, size);
 }
 
 const FuncVal __go_type_hash_empty_interface_descriptor =
@@ -51,6 +53,8 @@ __go_type_equal_empty_interface (const void *vv1, const void *vv2,
     return v1_descriptor == v2_descriptor;
   if (!__go_type_descriptors_equal (v1_descriptor, v2_descriptor))
     return 0;
+  if (v1_descriptor->__equalfn == NULL)
+    runtime_panicstring ("comparing uncomparable types");
   if (__go_is_pointer_type (v1_descriptor))
     return v1->__object == v2->__object;
   else
diff --git a/libgo/runtime/go-type-error.c b/libgo/runtime/go-type-error.c
deleted file mode 100644 (file)
index 8881a86..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/* go-type-error.c -- invalid hash and equality functions.
-
-   Copyright 2009 The Go Authors. All rights reserved.
-   Use of this source code is governed by a BSD-style
-   license that can be found in the LICENSE file.  */
-
-#include "runtime.h"
-#include "go-type.h"
-
-/* A hash function used for a type which does not support hash
-   functions.  */
-
-uintptr_t
-__go_type_hash_error (const void *val __attribute__ ((unused)),
-                     uintptr_t key_size __attribute__ ((unused)))
-{
-  runtime_panicstring ("hash of unhashable type");
-}
-
-const FuncVal __go_type_hash_error_descriptor =
-  { (void *) __go_type_hash_error };
-
-/* An equality function for an interface.  */
-
-_Bool
-__go_type_equal_error (const void *v1 __attribute__ ((unused)),
-                      const void *v2 __attribute__ ((unused)),
-                      uintptr_t key_size __attribute__ ((unused)))
-{
-  runtime_panicstring ("comparing uncomparable types");
-}
-
-const FuncVal __go_type_equal_error_descriptor =
-  { (void *) __go_type_equal_error };
index 39f9b29ae7dd095567df76a4823c65df57e3d454..ae0e3367c2122cb5c8ae86acf5c102892e47663a 100644 (file)
@@ -12,7 +12,7 @@
 /* Hash function for float types.  */
 
 uintptr_t
-__go_type_hash_float (const void *vkey, uintptr_t key_size)
+__go_type_hash_float (const void *vkey, uintptr_t seed, uintptr_t key_size)
 {
   if (key_size == 4)
     {
@@ -24,7 +24,7 @@ __go_type_hash_float (const void *vkey, uintptr_t key_size)
       f = *fp;
 
       if (isinf (f) || f == 0)
-       return 0;
+       return seed;
 
       /* NaN != NaN, so the hash code of a NaN is irrelevant.  Make it
         random so that not all NaNs wind up in the same place.  */
@@ -32,7 +32,7 @@ __go_type_hash_float (const void *vkey, uintptr_t key_size)
        return runtime_fastrand1 ();
 
       memcpy (&si, vkey, 4);
-      return (uintptr_t) si;
+      return (uintptr_t) si ^ seed;
     }
   else if (key_size == 8)
     {
@@ -44,13 +44,13 @@ __go_type_hash_float (const void *vkey, uintptr_t key_size)
       d = *dp;
 
       if (isinf (d) || d == 0)
-       return 0;
+       return seed;
 
       if (isnan (d))
        return runtime_fastrand1 ();
 
       memcpy (&di, vkey, 8);
-      return (uintptr_t) di;
+      return (uintptr_t) di ^ seed;
     }
   else
     runtime_throw ("__go_type_hash_float: invalid float size");
index a334d56cbe406201761dabf47e6605847b194893..d58aa75e5ed2cfb39f0feb635b2266c52d3d3c11 100644 (file)
@@ -14,7 +14,7 @@
    true of, e.g., integers and pointers.  */
 
 uintptr_t
-__go_type_hash_identity (const void *key, uintptr_t key_size)
+__go_type_hash_identity (const void *key, uintptr_t seed, uintptr_t key_size)
 {
   uintptr_t ret;
   uintptr_t i;
@@ -34,12 +34,12 @@ __go_type_hash_identity (const void *key, uintptr_t key_size)
       __builtin_memcpy (&u.a[0], key, key_size);
 #endif
       if (sizeof (uintptr_t) >= 8)
-       return (uintptr_t) u.v;
+       return (uintptr_t) u.v ^ seed;
       else
-       return (uintptr_t) ((u.v >> 32) ^ (u.v & 0xffffffff));
+       return (uintptr_t) ((u.v >> 32) ^ (u.v & 0xffffffff)) ^ seed;
     }
 
-  ret = 5381;
+  ret = seed;
   for (i = 0, p = (const unsigned char *) key; i < key_size; i++, p++)
     ret = ret * 33 + *p;
   return ret;
index e9e577956ebba7ee66b24281d5126f2fe9111a34..ffba7b28a3566c76e5e19d9a566e34f11174c46d 100644 (file)
@@ -11,7 +11,7 @@
 /* A hash function for an interface.  */
 
 uintptr_t
-__go_type_hash_interface (const void *vval,
+__go_type_hash_interface (const void *vval, uintptr_t seed,
                          uintptr_t key_size __attribute__ ((unused)))
 {
   const struct __go_interface *val;
@@ -22,11 +22,13 @@ __go_type_hash_interface (const void *vval,
   if (val->__methods == NULL)
     return 0;
   descriptor = (const struct __go_type_descriptor *) val->__methods[0];
+  if (descriptor->__hashfn == NULL)
+    runtime_panicstring ("hash of unhashable type");
   size = descriptor->__size;
   if (__go_is_pointer_type (descriptor))
-    return __go_call_hashfn (descriptor->__hashfn, &val->__object, size);
+    return __go_call_hashfn (descriptor->__hashfn, &val->__object, seed, size);
   else
-    return __go_call_hashfn (descriptor->__hashfn, val->__object, size);
+    return __go_call_hashfn (descriptor->__hashfn, val->__object, seed, size);
 }
 
 const FuncVal __go_type_hash_interface_descriptor =
@@ -51,6 +53,8 @@ __go_type_equal_interface (const void *vv1, const void *vv2,
   v2_descriptor = (const struct __go_type_descriptor *) v2->__methods[0];
   if (!__go_type_descriptors_equal (v1_descriptor, v2_descriptor))
     return 0;
+  if (v1_descriptor->__equalfn == NULL)
+    runtime_panicstring ("comparing uncomparable types");
   if (__go_is_pointer_type (v1_descriptor))
     return v1->__object == v2->__object;
   else
index 3d33d6ee5103fba921e141c74fa243ec60bed3df..c7277ddb646e59e993ad9cb71a937a3acd8b8ce7 100644 (file)
@@ -11,7 +11,7 @@
 /* A string hash function for a map.  */
 
 uintptr_t
-__go_type_hash_string (const void *vkey,
+__go_type_hash_string (const void *vkey, uintptr_t seed,
                       uintptr_t key_size __attribute__ ((unused)))
 {
   uintptr_t ret;
@@ -20,7 +20,7 @@ __go_type_hash_string (const void *vkey,
   intgo i;
   const byte *p;
 
-  ret = 5381;
+  ret = seed;
   key = (const String *) vkey;
   len = key->len;
   for (i = 0, p = key->str; i < len; i++, p++)
index eb063ec67894ba100d1a732fc69369678ada2712..7c3149badc741575a778ed342321e446a588c3a3 100644 (file)
@@ -257,6 +257,33 @@ struct __go_map_type
 
   /* The map value type.  */
   const struct __go_type_descriptor *__val_type;
+
+  /* The map bucket type.  */
+  const struct __go_type_descriptor *__bucket_type;
+
+  /* The map header type.  */
+  const struct __go_type_descriptor *__hmap_type;
+
+  /* The size of the key slot.  */
+  uint8_t __key_size;
+
+  /* Whether to store a pointer to key rather than the key itself.  */
+  uint8_t __indirect_key;
+
+  /* The size of the value slot.  */
+  uint8_t __value_size;
+
+  /* Whether to store a pointer to value rather than the value itself.  */
+  uint8_t __indirect_value;
+
+  /* The size of a bucket.  */
+  uint16_t __bucket_size;
+
+  /* Whether the key type is reflexive--whether k==k for all keys.  */
+  _Bool __reflexive_key;
+
+  /* Whether we should update the key when overwriting an entry.  */
+  _Bool __need_key_update;
 };
 
 /* A pointer type.  */
@@ -314,10 +341,11 @@ __go_is_pointer_type (const struct __go_type_descriptor *td)
 /* Call a type hash function, given the __hashfn value.  */
 
 static inline uintptr_t
-__go_call_hashfn (const FuncVal *hashfn, const void *p, uintptr_t size)
+__go_call_hashfn (const FuncVal *hashfn, const void *p, uintptr_t seed,
+                 uintptr_t size)
 {
-  uintptr_t (*h) (const void *, uintptr_t) = (void *) hashfn->fn;
-  return __builtin_call_with_static_chain (h (p, size), hashfn);
+  uintptr_t (*h) (const void *, uintptr_t, uintptr_t) = (void *) hashfn->fn;
+  return __builtin_call_with_static_chain (h (p, seed, size), hashfn);
 }
 
 /* Call a type equality function, given the __equalfn value.  */
@@ -334,29 +362,25 @@ extern _Bool
 __go_type_descriptors_equal(const struct __go_type_descriptor*,
                            const struct __go_type_descriptor*);
 
-extern uintptr_t __go_type_hash_identity (const void *, uintptr_t);
+extern uintptr_t __go_type_hash_identity (const void *, uintptr_t, uintptr_t);
 extern const FuncVal __go_type_hash_identity_descriptor;
 extern _Bool __go_type_equal_identity (const void *, const void *, uintptr_t);
 extern const FuncVal __go_type_equal_identity_descriptor;
-extern uintptr_t __go_type_hash_string (const void *, uintptr_t);
+extern uintptr_t __go_type_hash_string (const void *, uintptr_t, uintptr_t);
 extern const FuncVal __go_type_hash_string_descriptor;
 extern _Bool __go_type_equal_string (const void *, const void *, uintptr_t);
 extern const FuncVal __go_type_equal_string_descriptor;
-extern uintptr_t __go_type_hash_float (const void *, uintptr_t);
+extern uintptr_t __go_type_hash_float (const void *, uintptr_t, uintptr_t);
 extern const FuncVal __go_type_hash_float_descriptor;
 extern _Bool __go_type_equal_float (const void *, const void *, uintptr_t);
 extern const FuncVal __go_type_equal_float_descriptor;
-extern uintptr_t __go_type_hash_complex (const void *, uintptr_t);
+extern uintptr_t __go_type_hash_complex (const void *, uintptr_t, uintptr_t);
 extern const FuncVal __go_type_hash_complex_descriptor;
 extern _Bool __go_type_equal_complex (const void *, const void *, uintptr_t);
 extern const FuncVal __go_type_equal_complex_descriptor;
-extern uintptr_t __go_type_hash_interface (const void *, uintptr_t);
+extern uintptr_t __go_type_hash_interface (const void *, uintptr_t, uintptr_t);
 extern const FuncVal __go_type_hash_interface_descriptor;
 extern _Bool __go_type_equal_interface (const void *, const void *, uintptr_t);
 extern const FuncVal __go_type_equal_interface_descriptor;
-extern uintptr_t __go_type_hash_error (const void *, uintptr_t);
-extern const FuncVal __go_type_hash_error_descriptor;
-extern _Bool __go_type_equal_error (const void *, const void *, uintptr_t);
-extern const FuncVal __go_type_equal_error_descriptor;
 
 #endif /* !defined(LIBGO_GO_TYPE_H) */
index fbb7b744eeb18bef1e4596072f72bd60f0032dcc..591d06a7f59fcc453ceb2cc53c674e9678d853ad 100644 (file)
@@ -23,9 +23,6 @@ package runtime
 // Type aka __go_type_descriptor
 #define kind __code
 #define string __reflection
-#define KindPtr GO_PTR
-#define KindNoPointers GO_NO_POINTERS
-#define kindMask GO_CODE_MASK
 
 // GCCGO SPECIFIC CHANGE
 //
@@ -893,7 +890,7 @@ runtime_mal(uintptr n)
 }
 
 func new(typ *Type) (ret *uint8) {
-       ret = runtime_mallocgc(typ->__size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0);
+       ret = runtime_mallocgc(typ->__size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&kindNoPointers ? FlagNoScan : 0);
 }
 
 static void*
@@ -903,7 +900,7 @@ cnew(const Type *typ, intgo n, int32 objtyp)
                runtime_throw("runtime: invalid objtyp");
        if(n < 0 || (typ->__size > 0 && (uintptr)n > (MaxMem/typ->__size)))
                runtime_panicstring("runtime: allocation size out of range");
-       return runtime_mallocgc(typ->__size*n, (uintptr)typ | objtyp, typ->kind&KindNoPointers ? FlagNoScan : 0);
+       return runtime_mallocgc(typ->__size*n, (uintptr)typ | objtyp, typ->kind&kindNoPointers ? FlagNoScan : 0);
 }
 
 // same as runtime_new, but callable from C
@@ -955,7 +952,7 @@ func SetFinalizer(obj Eface, finalizer Eface) {
        if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) {
                // As an implementation detail we allow to set finalizers for an inner byte
                // of an object if it could come from tiny alloc (see mallocgc for details).
-               if(ot->__element_type == nil || (ot->__element_type->kind&KindNoPointers) == 0 || ot->__element_type->__size >= TinySize) {
+               if(ot->__element_type == nil || (ot->__element_type->kind&kindNoPointers) == 0 || ot->__element_type->__size >= TinySize) {
                        runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block (%p)\n", obj.__object);
                        goto throw;
                }
index acd919f7abefd1aa1ad2cea1f9c2d52b5b4d21d7..1efbbbeb1021e493c962840d7bf1fdd4d3756462 100644 (file)
@@ -391,7 +391,7 @@ struct MCentral
        Lock;
        int32 sizeclass;
        MSpan nonempty; // list of spans with a free object
-       MSpan empty;    // list of spans with no free objects (or cached in an MCache)
+       MSpan mempty;   // list of spans with no free objects (or cached in an MCache)
        int32 nfree;    // # of objects available in nonempty spans
 };
 
@@ -478,8 +478,10 @@ extern     int32   runtime_checking;
 void   runtime_markspan(void *v, uintptr size, uintptr n, bool leftover);
 void   runtime_unmarkspan(void *v, uintptr size);
 void   runtime_purgecachedstats(MCache*);
-void*  runtime_cnew(const Type*);
-void*  runtime_cnewarray(const Type*, intgo);
+void*  runtime_cnew(const Type*)
+         __asm__(GOSYM_PREFIX "runtime.newobject");
+void*  runtime_cnewarray(const Type*, intgo)
+         __asm__(GOSYM_PREFIX "runtime.newarray");
 void   runtime_tracealloc(void*, uintptr, uintptr);
 void   runtime_tracefree(void*, uintptr);
 void   runtime_tracegc(void);
diff --git a/libgo/runtime/map.goc b/libgo/runtime/map.goc
deleted file mode 100644 (file)
index e4b8456..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-#include "runtime.h"
-#include "map.h"
-
-typedef struct __go_map Hmap;
-typedef struct __go_hash_iter hiter;
-
-/* Access a value in a map, returning a value and a presence indicator.  */
-
-func mapaccess2(t *MapType, h *Hmap, key *byte, val *byte) (present bool) {
-       byte *mapval;
-       size_t valsize;
-
-       mapval = __go_map_index(h, key, 0);
-       valsize = t->__val_type->__size;
-       if (mapval == nil) {
-               __builtin_memset(val, 0, valsize);
-               present = 0;
-       } else {
-               __builtin_memcpy(val, mapval, valsize);
-               present = 1;
-       }
-}
-
-/* Optionally assign a value to a map (m[k] = v, p).  */
-
-func mapassign2(h *Hmap, key *byte, val *byte, p bool) {
-       if (!p) {
-               __go_map_delete(h, key);
-       } else {
-               byte *mapval;
-               size_t valsize;
-
-               mapval = __go_map_index(h, key, 1);
-               valsize = h->__descriptor->__map_descriptor->__val_type->__size;
-               __builtin_memcpy(mapval, val, valsize);
-       }
-}
-
-/* Delete a key from a map.  */
-
-func mapdelete(h *Hmap, key *byte) {
-       __go_map_delete(h, key);
-}
-
-/* Initialize a range over a map.  */
-
-func mapiterinit(h *Hmap, it *hiter) {
-       __go_mapiterinit(h, it);
-}
-
-/* Move to the next iteration, updating *HITER.  */
-
-func mapiternext(it *hiter) {
-       __go_mapiternext(it);
-}
-
-/* Get the key of the current iteration.  */
-
-func mapiter1(it *hiter, key *byte) {
-       __go_mapiter1(it, key);
-}
-
-/* Get the key and value of the current iteration.  */
-
-func mapiter2(it *hiter, key *byte, val *byte) {
-       __go_mapiter2(it, key, val);
-}
diff --git a/libgo/runtime/map.h b/libgo/runtime/map.h
deleted file mode 100644 (file)
index 0c587bb..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/* map.h -- the map type for Go.
-
-   Copyright 2009 The Go Authors. All rights reserved.
-   Use of this source code is governed by a BSD-style
-   license that can be found in the LICENSE file.  */
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "go-type.h"
-
-/* A map descriptor is what we need to manipulate the map.  This is
-   constant for a given map type.  */
-
-struct __go_map_descriptor
-{
-  /* A pointer to the type descriptor for the type of the map itself.  */
-  const struct __go_map_type *__map_descriptor;
-
-  /* A map entry is a struct with three fields:
-       map_entry_type *next_entry;
-       key_type key;
-       value_type value;
-     This is the size of that struct.  */
-  uintptr_t __entry_size;
-
-  /* The offset of the key field in a map entry struct.  */
-  uintptr_t __key_offset;
-
-  /* The offset of the value field in a map entry struct (the value
-     field immediately follows the key field, but there may be some
-     bytes inserted for alignment).  */
-  uintptr_t __val_offset;
-};
-
-struct __go_map
-{
-  /* The constant descriptor for this map.  */
-  const struct __go_map_descriptor *__descriptor;
-
-  /* The number of elements in the hash table.  */
-  uintptr_t __element_count;
-
-  /* The number of entries in the __buckets array.  */
-  uintptr_t __bucket_count;
-
-  /* Each bucket is a pointer to a linked list of map entries.  */
-  void **__buckets;
-};
-
-/* For a map iteration the compiled code will use a pointer to an
-   iteration structure.  The iteration structure will be allocated on
-   the stack.  The Go code must allocate at least enough space.  */
-
-struct __go_hash_iter
-{
-  /* A pointer to the current entry.  This will be set to NULL when
-     the range has completed.  The Go will test this field, so it must
-     be the first one in the structure.  */
-  const void *entry;
-  /* The map we are iterating over.  */
-  const struct __go_map *map;
-  /* A pointer to the next entry in the current bucket.  This permits
-     deleting the current entry.  This will be NULL when we have seen
-     all the entries in the current bucket.  */
-  const void *next_entry;
-  /* The bucket index of the current and next entry.  */
-  uintptr_t bucket;
-};
-
-extern struct __go_map *__go_new_map (const struct __go_map_descriptor *,
-                                     uintptr_t);
-
-extern uintptr_t __go_map_next_prime (uintptr_t);
-
-extern void *__go_map_index (struct __go_map *, const void *, _Bool);
-
-extern void __go_map_delete (struct __go_map *, const void *);
-
-extern void __go_mapiterinit (const struct __go_map *, struct __go_hash_iter *);
-
-extern void __go_mapiternext (struct __go_hash_iter *);
-
-extern void __go_mapiter1 (struct __go_hash_iter *it, unsigned char *key);
-
-extern void __go_mapiter2 (struct __go_hash_iter *it, unsigned char *key,
-                          unsigned char *val);
index 62e2c2d7dfb5d57f022c6f41eddc62bf392eb9db..491cac5330f10375314211a8f6458e799e150619 100644 (file)
@@ -8,7 +8,7 @@
 //
 // The MCentral doesn't actually contain the list of free objects; the MSpan does.
 // Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
-// and those that are completely allocated (c->empty).
+// and those that are completely allocated (c->mempty).
 //
 // TODO(rsc): tcmalloc uses a "transfer cache" to split the list
 // into sections of class_to_transfercount[sizeclass] objects
@@ -28,7 +28,7 @@ runtime_MCentral_Init(MCentral *c, int32 sizeclass)
 {
        c->sizeclass = sizeclass;
        runtime_MSpanList_Init(&c->nonempty);
-       runtime_MSpanList_Init(&c->empty);
+       runtime_MSpanList_Init(&c->mempty);
 }
 
 // Allocate a span to use in an MCache.
@@ -58,13 +58,13 @@ retry:
                goto havespan;
        }
 
-       for(s = c->empty.next; s != &c->empty; s = s->next) {
+       for(s = c->mempty.next; s != &c->mempty; s = s->next) {
                if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
                        // we have an empty span that requires sweeping,
                        // sweep it and see if we can free some space in it
                        runtime_MSpanList_Remove(s);
                        // swept spans are at the end of the list
-                       runtime_MSpanList_InsertBack(&c->empty, s);
+                       runtime_MSpanList_InsertBack(&c->mempty, s);
                        runtime_unlock(c);
                        runtime_MSpan_Sweep(s);
                        runtime_lock(c);
@@ -96,7 +96,7 @@ havespan:
                runtime_throw("freelist empty");
        c->nfree -= n;
        runtime_MSpanList_Remove(s);
-       runtime_MSpanList_InsertBack(&c->empty, s);
+       runtime_MSpanList_InsertBack(&c->mempty, s);
        s->incache = true;
        runtime_unlock(c);
        return s;
index 1f6a40cd630724404717438df4c91cf242eb6789..341544cb97064a3c3046ed43ce41a0275aaeabe5 100644 (file)
@@ -69,9 +69,6 @@
 typedef struct __go_map Hmap;
 // Type aka __go_type_descriptor
 #define string __reflection
-#define KindPtr GO_PTR
-#define KindNoPointers GO_NO_POINTERS
-#define kindMask GO_CODE_MASK
 // PtrType aka __go_ptr_type
 #define elem __element_type
 
@@ -216,7 +213,7 @@ static void addstackroots(G *gp, Workbuf **wbufp);
 
 static struct {
        uint64  full;  // lock-free list of full blocks
-       uint64  empty; // lock-free list of empty blocks
+       uint64  wempty; // lock-free list of empty blocks
        byte    pad0[CacheLineSize]; // prevents false-sharing between full/empty and nproc/nwait
        uint32  nproc;
        int64   tstart;
@@ -943,16 +940,16 @@ scanblock(Workbuf *wbuf, bool keepworking)
                        // eface->__object
                        if((byte*)eface->__object >= arena_start && (byte*)eface->__object < arena_used) {
                                if(__go_is_pointer_type(t)) {
-                                       if((t->__code & KindNoPointers))
+                                       if((t->__code & kindNoPointers))
                                                continue;
 
                                        obj = eface->__object;
-                                       if((t->__code & kindMask) == KindPtr) {
+                                       if((t->__code & kindMask) == kindPtr) {
                                                // Only use type information if it is a pointer-containing type.
                                                // This matches the GC programs written by cmd/gc/reflect.c's
                                                // dgcsym1 in case TPTR32/case TPTR64. See rationale there.
                                                et = ((const PtrType*)t)->elem;
-                                               if(!(et->__code & KindNoPointers))
+                                               if(!(et->__code & kindNoPointers))
                                                        objti = (uintptr)((const PtrType*)t)->elem->__gc;
                                        }
                                } else {
@@ -981,16 +978,16 @@ scanblock(Workbuf *wbuf, bool keepworking)
                        if((byte*)iface->__object >= arena_start && (byte*)iface->__object < arena_used) {
                                t = (const Type*)iface->tab[0];
                                if(__go_is_pointer_type(t)) {
-                                       if((t->__code & KindNoPointers))
+                                       if((t->__code & kindNoPointers))
                                                continue;
 
                                        obj = iface->__object;
-                                       if((t->__code & kindMask) == KindPtr) {
+                                       if((t->__code & kindMask) == kindPtr) {
                                                // Only use type information if it is a pointer-containing type.
                                                // This matches the GC programs written by cmd/gc/reflect.c's
                                                // dgcsym1 in case TPTR32/case TPTR64. See rationale there.
                                                et = ((const PtrType*)t)->elem;
-                                               if(!(et->__code & KindNoPointers))
+                                               if(!(et->__code & kindNoPointers))
                                                        objti = (uintptr)((const PtrType*)t)->elem->__gc;
                                        }
                                } else {
@@ -1101,7 +1098,7 @@ scanblock(Workbuf *wbuf, bool keepworking)
                        }
                        if(markonly(chan)) {
                                chantype = (ChanType*)pc[2];
-                               if(!(chantype->elem->__code & KindNoPointers)) {
+                               if(!(chantype->elem->__code & kindNoPointers)) {
                                        // Start chanProg.
                                        chan_ret = pc+3;
                                        pc = chanProg+1;
@@ -1114,7 +1111,7 @@ scanblock(Workbuf *wbuf, bool keepworking)
                case GC_CHAN:
                        // There are no heap pointers in struct Hchan,
                        // so we can ignore the leading sizeof(Hchan) bytes.
-                       if(!(chantype->elem->__code & KindNoPointers)) {
+                       if(!(chantype->elem->__code & kindNoPointers)) {
                                // Channel's buffer follows Hchan immediately in memory.
                                // Size of buffer (cap(c)) is second int in the chan struct.
                                chancap = ((uintgo*)chan)[1];
@@ -1377,7 +1374,7 @@ getempty(Workbuf *b)
 {
        if(b != nil)
                runtime_lfstackpush(&work.full, &b->node);
-       b = (Workbuf*)runtime_lfstackpop(&work.empty);
+       b = (Workbuf*)runtime_lfstackpop(&work.wempty);
        if(b == nil) {
                // Need to allocate.
                runtime_lock(&work);
@@ -1402,7 +1399,7 @@ putempty(Workbuf *b)
        if(CollectStats)
                runtime_xadd64(&gcstats.putempty, 1);
 
-       runtime_lfstackpush(&work.empty, &b->node);
+       runtime_lfstackpush(&work.wempty, &b->node);
 }
 
 // Get a full work buffer off the work.full list, or return nil.
@@ -1416,7 +1413,7 @@ getfull(Workbuf *b)
                runtime_xadd64(&gcstats.getfull, 1);
 
        if(b != nil)
-               runtime_lfstackpush(&work.empty, &b->node);
+               runtime_lfstackpush(&work.wempty, &b->node);
        b = (Workbuf*)runtime_lfstackpop(&work.full);
        if(b != nil || work.nproc == 1)
                return b;
@@ -2129,7 +2126,7 @@ runtime_gc(int32 force)
        // The atomic operations are not atomic if the uint64s
        // are not aligned on uint64 boundaries. This has been
        // a problem in the past.
-       if((((uintptr)&work.empty) & 7) != 0)
+       if((((uintptr)&work.wempty) & 7) != 0)
                runtime_throw("runtime: gc work buffer is misaligned");
        if((((uintptr)&work.full) & 7) != 0)
                runtime_throw("runtime: gc work buffer is misaligned");
@@ -2522,7 +2519,7 @@ runfinq(void* dummy __attribute__ ((unused)))
 
                                f = &fb->fin[i];
                                fint = ((const Type**)f->ft->__in.array)[0];
-                               if((fint->__code & kindMask) == KindPtr) {
+                               if((fint->__code & kindMask) == kindPtr) {
                                        // direct use of pointer
                                        param = &f->arg;
                                } else if(((const InterfaceType*)fint)->__methods.__count == 0) {
index 04dc971d688489a7cdcaf58847a215905e9b3595..04a5b98772c543f65314791b647536f24837e507 100644 (file)
@@ -878,7 +878,7 @@ runtime_MHeap_SplitSpan(MHeap *h, MSpan *s)
 
        // remove the span from whatever list it is in now
        if(s->sizeclass > 0) {
-               // must be in h->central[x].empty
+               // must be in h->central[x].mempty
                c = &h->central[s->sizeclass];
                runtime_lock(c);
                runtime_MSpanList_Remove(s);
@@ -937,7 +937,7 @@ runtime_MHeap_SplitSpan(MHeap *h, MSpan *s)
                c = &h->central[s->sizeclass];
                runtime_lock(c);
                // swept spans are at the end of the list
-               runtime_MSpanList_InsertBack(&c->empty, s);
+               runtime_MSpanList_InsertBack(&c->mempty, s);
                runtime_unlock(c);
        } else {
                // Swept spans are at the end of lists.
index 3fb3bde32232408b1e1a8d5c8983a10db93189fd..d493b54a5093d3314fe02eeb4d3fa571475c5623 100644 (file)
@@ -194,6 +194,22 @@ runtime_throw(const char *s)
        runtime_exit(1);        // even more not reached
 }
 
+void throw(String) __asm__ (GOSYM_PREFIX "runtime.throw");
+void
+throw(String s)
+{
+       M *mp;
+
+       mp = runtime_m();
+       if(mp->throwing == 0)
+               mp->throwing = 1;
+       runtime_startpanic();
+       runtime_printf("fatal error: %S\n", s);
+       runtime_dopanic(0);
+       *(int32*)0 = 0; // not reached
+       runtime_exit(1);        // even more not reached
+}
+
 void
 runtime_panicstring(const char *s)
 {
index 20db789ddb62d3834549fa0e2a3b7febe295801b..32d0fb2a7be7c2c8a5f17490be290ba90c552b76 100644 (file)
@@ -546,9 +546,9 @@ static struct __go_channel_type chan_bool_type_descriptor =
       /* __hash */
       0, /* This value doesn't matter.  */
       /* __hashfn */
-      &__go_type_hash_error_descriptor,
+      NULL,
       /* __equalfn */
-      &__go_type_equal_error_descriptor,
+      NULL,
       /* __gc */
       NULL, /* This value doesn't matter */
       /* __reflection */
@@ -2753,7 +2753,7 @@ static void
 procresize(int32 new)
 {
        int32 i, old;
-       bool empty;
+       bool pempty;
        G *gp;
        P *p;
 
@@ -2781,14 +2781,14 @@ procresize(int32 new)
        // collect all runnable goroutines in global queue preserving FIFO order
        // FIFO order is required to ensure fairness even during frequent GCs
        // see http://golang.org/issue/7126
-       empty = false;
-       while(!empty) {
-               empty = true;
+       pempty = false;
+       while(!pempty) {
+               pempty = true;
                for(i = 0; i < old; i++) {
                        p = runtime_allp[i];
                        if(p->runqhead == p->runqtail)
                                continue;
-                       empty = false;
+                       pempty = false;
                        // pop from tail of local queue
                        p->runqtail--;
                        gp = (G*)p->runq[p->runqtail%nelem(p->runq)];
index 617766b8a990f749147172f729cbfca743852332..dc00b421f996c4976793ee7f794e12a56b6debe6 100644 (file)
@@ -376,7 +376,7 @@ void        runtime_mprofinit(void);
 int32  runtime_mcount(void);
 int32  runtime_gcount(void);
 void   runtime_mcall(void(*)(G*));
-uint32 runtime_fastrand1(void);
+uint32 runtime_fastrand1(void) __asm__ (GOSYM_PREFIX "runtime.fastrand1");
 int32  runtime_timediv(int64, int32, int32*);
 int32  runtime_round2(int32 x); // round x up to a power of 2.