1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/classLoaderData.hpp"
  28 #include "classfile/javaClasses.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "code/codeCache.hpp"
  33 #include "code/dependencies.hpp"
  34 #include "gc_interface/collectedHeap.inline.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "memory/cardTableModRefBS.hpp"
  37 #include "memory/gcLocker.inline.hpp"
  38 #include "memory/genCollectedHeap.hpp"
  39 #include "memory/genRemSet.hpp"
  40 #include "memory/generation.hpp"
  41 #include "memory/metadataFactory.hpp"
  42 #include "memory/metaspaceShared.hpp"
  43 #include "memory/oopFactory.hpp"
  44 #include "memory/space.hpp"
  45 #include "memory/universe.hpp"
  46 #include "memory/universe.inline.hpp"
  47 #include "oops/constantPool.hpp"
  48 #include "oops/instanceClassLoaderKlass.hpp"
  49 #include "oops/instanceKlass.hpp"
  50 #include "oops/instanceMirrorKlass.hpp"
  51 #include "oops/instanceRefKlass.hpp"
  52 #include "oops/oop.inline.hpp"
  53 #include "oops/typeArrayKlass.hpp"
  54 #include "prims/jvmtiRedefineClassesTrace.hpp"
  55 #include "runtime/arguments.hpp"
  56 #include "runtime/deoptimization.hpp"
  57 #include "runtime/fprofiler.hpp"
  58 #include "runtime/handles.inline.hpp"
  59 #include "runtime/init.hpp"
  60 #include "runtime/java.hpp"
  61 #include "runtime/javaCalls.hpp"
  62 #include "runtime/sharedRuntime.hpp"
  63 #include "runtime/synchronizer.hpp"
  64 #include "runtime/thread.inline.hpp"
  65 #include "runtime/timer.hpp"
  66 #include "runtime/vm_operations.hpp"
  67 #include "services/memoryService.hpp"
  68 #include "utilities/copy.hpp"
  69 #include "utilities/events.hpp"
  70 #include "utilities/hashtable.inline.hpp"
  71 #include "utilities/preserveException.hpp"
  72 #include "utilities/macros.hpp"
  73 #if INCLUDE_ALL_GCS
  74 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
  75 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
  76 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  77 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  78 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  79 #endif // INCLUDE_ALL_GCS
  80 
  81 // Known objects
  82 Klass* Universe::_boolArrayKlassObj                 = NULL;
  83 Klass* Universe::_byteArrayKlassObj                 = NULL;
  84 Klass* Universe::_charArrayKlassObj                 = NULL;
  85 Klass* Universe::_intArrayKlassObj                  = NULL;
  86 Klass* Universe::_shortArrayKlassObj                = NULL;
  87 Klass* Universe::_longArrayKlassObj                 = NULL;
  88 Klass* Universe::_singleArrayKlassObj               = NULL;
  89 Klass* Universe::_doubleArrayKlassObj               = NULL;
  90 Klass* Universe::_typeArrayKlassObjs[T_VOID+1]      = { NULL /*, NULL...*/ };
  91 Klass* Universe::_objectArrayKlassObj               = NULL;
  92 oop Universe::_int_mirror                             = NULL;
  93 oop Universe::_float_mirror                           = NULL;
  94 oop Universe::_double_mirror                          = NULL;
  95 oop Universe::_byte_mirror                            = NULL;
  96 oop Universe::_bool_mirror                            = NULL;
  97 oop Universe::_char_mirror                            = NULL;
  98 oop Universe::_long_mirror                            = NULL;
  99 oop Universe::_short_mirror                           = NULL;
 100 oop Universe::_void_mirror                            = NULL;
 101 oop Universe::_mirrors[T_VOID+1]                      = { NULL /*, NULL...*/ };
 102 oop Universe::_main_thread_group                      = NULL;
 103 oop Universe::_system_thread_group                    = NULL;
 104 objArrayOop Universe::_the_empty_class_klass_array    = NULL;
 105 Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
 106 oop Universe::_the_null_string                        = NULL;
 107 oop Universe::_the_min_jint_string                   = NULL;
 108 LatestMethodCache* Universe::_finalizer_register_cache = NULL;
 109 LatestMethodCache* Universe::_loader_addClass_cache    = NULL;
 110 LatestMethodCache* Universe::_pd_implies_cache         = NULL;
 111 oop Universe::_out_of_memory_error_java_heap          = NULL;
 112 oop Universe::_out_of_memory_error_metaspace          = NULL;
 113 oop Universe::_out_of_memory_error_class_metaspace    = NULL;
 114 oop Universe::_out_of_memory_error_array_size         = NULL;
 115 oop Universe::_out_of_memory_error_gc_overhead_limit  = NULL;
 116 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
 117 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
 118 bool Universe::_verify_in_progress                    = false;
 119 oop Universe::_null_ptr_exception_instance            = NULL;
 120 oop Universe::_arithmetic_exception_instance          = NULL;
 121 oop Universe::_virtual_machine_error_instance         = NULL;
 122 oop Universe::_vm_exception                           = NULL;
 123 Array<int>* Universe::_the_empty_int_array            = NULL;
 124 Array<u2>* Universe::_the_empty_short_array           = NULL;
 125 Array<Klass*>* Universe::_the_empty_klass_array     = NULL;
 126 Array<Method*>* Universe::_the_empty_method_array   = NULL;
 127 
 128 // These variables are guarded by FullGCALot_lock.
 129 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
 130 debug_only(int Universe::_fullgc_alot_dummy_next      = 0;)
 131 
 132 // Heap
 133 int             Universe::_verify_count = 0;
 134 
 135 int             Universe::_base_vtable_size = 0;
 136 bool            Universe::_bootstrapping = false;
 137 bool            Universe::_fully_initialized = false;
 138 
 139 size_t          Universe::_heap_capacity_at_last_gc;
 140 size_t          Universe::_heap_used_at_last_gc = 0;
 141 
 142 CollectedHeap*  Universe::_collectedHeap = NULL;
 143 
 144 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
 145 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
 146 address Universe::_narrow_ptrs_base;
 147 
 148 void Universe::basic_type_classes_do(void f(Klass*)) {
 149   f(boolArrayKlassObj());
 150   f(byteArrayKlassObj());
 151   f(charArrayKlassObj());
 152   f(intArrayKlassObj());
 153   f(shortArrayKlassObj());
 154   f(longArrayKlassObj());
 155   f(singleArrayKlassObj());
 156   f(doubleArrayKlassObj());
 157 }
 158 
 159 void Universe::oops_do(OopClosure* f, bool do_all) {
 160 
 161   f->do_oop((oop*) &_int_mirror);
 162   f->do_oop((oop*) &_float_mirror);
 163   f->do_oop((oop*) &_double_mirror);
 164   f->do_oop((oop*) &_byte_mirror);
 165   f->do_oop((oop*) &_bool_mirror);
 166   f->do_oop((oop*) &_char_mirror);
 167   f->do_oop((oop*) &_long_mirror);
 168   f->do_oop((oop*) &_short_mirror);
 169   f->do_oop((oop*) &_void_mirror);
 170 
 171   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 172     f->do_oop((oop*) &_mirrors[i]);
 173   }
 174   assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
 175 
 176   f->do_oop((oop*)&_the_empty_class_klass_array);
 177   f->do_oop((oop*)&_the_null_string);
 178   f->do_oop((oop*)&_the_min_jint_string);
 179   f->do_oop((oop*)&_out_of_memory_error_java_heap);
 180   f->do_oop((oop*)&_out_of_memory_error_metaspace);
 181   f->do_oop((oop*)&_out_of_memory_error_class_metaspace);
 182   f->do_oop((oop*)&_out_of_memory_error_array_size);
 183   f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
 184     f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
 185   f->do_oop((oop*)&_null_ptr_exception_instance);
 186   f->do_oop((oop*)&_arithmetic_exception_instance);
 187   f->do_oop((oop*)&_virtual_machine_error_instance);
 188   f->do_oop((oop*)&_main_thread_group);
 189   f->do_oop((oop*)&_system_thread_group);
 190   f->do_oop((oop*)&_vm_exception);
 191   debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
 192 }
 193 
 194 // Serialize metadata in and out of CDS archive, not oops.
 195 void Universe::serialize(SerializeClosure* f, bool do_all) {
 196 
 197   f->do_ptr((void**)&_boolArrayKlassObj);
 198   f->do_ptr((void**)&_byteArrayKlassObj);
 199   f->do_ptr((void**)&_charArrayKlassObj);
 200   f->do_ptr((void**)&_intArrayKlassObj);
 201   f->do_ptr((void**)&_shortArrayKlassObj);
 202   f->do_ptr((void**)&_longArrayKlassObj);
 203   f->do_ptr((void**)&_singleArrayKlassObj);
 204   f->do_ptr((void**)&_doubleArrayKlassObj);
 205   f->do_ptr((void**)&_objectArrayKlassObj);
 206 
 207   {
 208     for (int i = 0; i < T_VOID+1; i++) {
 209       if (_typeArrayKlassObjs[i] != NULL) {
 210         assert(i >= T_BOOLEAN, "checking");
 211         f->do_ptr((void**)&_typeArrayKlassObjs[i]);
 212       } else if (do_all) {
 213         f->do_ptr((void**)&_typeArrayKlassObjs[i]);
 214       }
 215     }
 216   }
 217 
 218   f->do_ptr((void**)&_the_array_interfaces_array);
 219   f->do_ptr((void**)&_the_empty_int_array);
 220   f->do_ptr((void**)&_the_empty_short_array);
 221   f->do_ptr((void**)&_the_empty_method_array);
 222   f->do_ptr((void**)&_the_empty_klass_array);
 223   _finalizer_register_cache->serialize(f);
 224   _loader_addClass_cache->serialize(f);
 225   _pd_implies_cache->serialize(f);
 226 }
 227 
 228 void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
 229   if (size < alignment || size % alignment != 0) {
 230     vm_exit_during_initialization(
 231       err_msg("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment));
 232   }
 233 }
 234 
 235 void initialize_basic_type_klass(Klass* k, TRAPS) {
 236   Klass* ok = SystemDictionary::Object_klass();
 237   if (UseSharedSpaces) {
 238     assert(k->super() == ok, "u3");
 239     k->restore_unshareable_info(CHECK);
 240   } else {
 241     k->initialize_supers(ok, CHECK);
 242   }
 243   k->append_to_sibling_list();
 244 }
 245 
 246 void Universe::genesis(TRAPS) {
 247   ResourceMark rm;
 248 
 249   { FlagSetting fs(_bootstrapping, true);
 250 
 251     { MutexLocker mc(Compile_lock);
 252 
 253       // determine base vtable size; without that we cannot create the array klasses
 254       compute_base_vtable_size();
 255 
 256       if (!UseSharedSpaces) {
 257         _boolArrayKlassObj      = TypeArrayKlass::create_klass(T_BOOLEAN, sizeof(jboolean), CHECK);
 258         _charArrayKlassObj      = TypeArrayKlass::create_klass(T_CHAR,    sizeof(jchar),    CHECK);
 259         _singleArrayKlassObj    = TypeArrayKlass::create_klass(T_FLOAT,   sizeof(jfloat),   CHECK);
 260         _doubleArrayKlassObj    = TypeArrayKlass::create_klass(T_DOUBLE,  sizeof(jdouble),  CHECK);
 261         _byteArrayKlassObj      = TypeArrayKlass::create_klass(T_BYTE,    sizeof(jbyte),    CHECK);
 262         _shortArrayKlassObj     = TypeArrayKlass::create_klass(T_SHORT,   sizeof(jshort),   CHECK);
 263         _intArrayKlassObj       = TypeArrayKlass::create_klass(T_INT,     sizeof(jint),     CHECK);
 264         _longArrayKlassObj      = TypeArrayKlass::create_klass(T_LONG,    sizeof(jlong),    CHECK);
 265 
 266         _typeArrayKlassObjs[T_BOOLEAN] = _boolArrayKlassObj;
 267         _typeArrayKlassObjs[T_CHAR]    = _charArrayKlassObj;
 268         _typeArrayKlassObjs[T_FLOAT]   = _singleArrayKlassObj;
 269         _typeArrayKlassObjs[T_DOUBLE]  = _doubleArrayKlassObj;
 270         _typeArrayKlassObjs[T_BYTE]    = _byteArrayKlassObj;
 271         _typeArrayKlassObjs[T_SHORT]   = _shortArrayKlassObj;
 272         _typeArrayKlassObjs[T_INT]     = _intArrayKlassObj;
 273         _typeArrayKlassObjs[T_LONG]    = _longArrayKlassObj;
 274 
 275         ClassLoaderData* null_cld = ClassLoaderData::the_null_class_loader_data();
 276 
 277         _the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK);
 278         _the_empty_int_array        = MetadataFactory::new_array<int>(null_cld, 0, CHECK);
 279         _the_empty_short_array      = MetadataFactory::new_array<u2>(null_cld, 0, CHECK);
 280         _the_empty_method_array     = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK);
 281         _the_empty_klass_array      = MetadataFactory::new_array<Klass*>(null_cld, 0, CHECK);
 282       }
 283     }
 284 
 285     vmSymbols::initialize(CHECK);
 286 
 287     SystemDictionary::initialize(CHECK);
 288 
 289     Klass* ok = SystemDictionary::Object_klass();
 290 
 291     _the_null_string            = StringTable::intern("null", CHECK);
 292     _the_min_jint_string       = StringTable::intern("-2147483648", CHECK);
 293 
 294     if (UseSharedSpaces) {
 295       // Verify shared interfaces array.
 296       assert(_the_array_interfaces_array->at(0) ==
 297              SystemDictionary::Cloneable_klass(), "u3");
 298       assert(_the_array_interfaces_array->at(1) ==
 299              SystemDictionary::Serializable_klass(), "u3");
 300     } else {
 301       // Set up shared interfaces array.  (Do this before supers are set up.)
 302       _the_array_interfaces_array->at_put(0, SystemDictionary::Cloneable_klass());
 303       _the_array_interfaces_array->at_put(1, SystemDictionary::Serializable_klass());
 304     }
 305 
 306     initialize_basic_type_klass(boolArrayKlassObj(), CHECK);
 307     initialize_basic_type_klass(charArrayKlassObj(), CHECK);
 308     initialize_basic_type_klass(singleArrayKlassObj(), CHECK);
 309     initialize_basic_type_klass(doubleArrayKlassObj(), CHECK);
 310     initialize_basic_type_klass(byteArrayKlassObj(), CHECK);
 311     initialize_basic_type_klass(shortArrayKlassObj(), CHECK);
 312     initialize_basic_type_klass(intArrayKlassObj(), CHECK);
 313     initialize_basic_type_klass(longArrayKlassObj(), CHECK);
 314   } // end of core bootstrapping
 315 
 316   // Maybe this could be lifted up now that object array can be initialized
 317   // during the bootstrapping.
 318 
 319   // OLD
 320   // Initialize _objectArrayKlass after core bootstraping to make
 321   // sure the super class is set up properly for _objectArrayKlass.
 322   // ---
 323   // NEW
 324   // Since some of the old system object arrays have been converted to
 325   // ordinary object arrays, _objectArrayKlass will be loaded when
 326   // SystemDictionary::initialize(CHECK); is run. See the extra check
 327   // for Object_klass_loaded in objArrayKlassKlass::allocate_objArray_klass_impl.
 328   _objectArrayKlassObj = InstanceKlass::
 329     cast(SystemDictionary::Object_klass())->array_klass(1, CHECK);
 330   // OLD
 331   // Add the class to the class hierarchy manually to make sure that
 332   // its vtable is initialized after core bootstrapping is completed.
 333   // ---
 334   // New
 335   // Have already been initialized.
 336   _objectArrayKlassObj->append_to_sibling_list();
 337 
 338   // Compute is_jdk version flags.
 339   // Only 1.3 or later has the java.lang.Shutdown class.
 340   // Only 1.4 or later has the java.lang.CharSequence interface.
 341   // Only 1.5 or later has the java.lang.management.MemoryUsage class.
 342   if (JDK_Version::is_partially_initialized()) {
 343     uint8_t jdk_version;
 344     Klass* k = SystemDictionary::resolve_or_null(
 345         vmSymbols::java_lang_management_MemoryUsage(), THREAD);
 346     CLEAR_PENDING_EXCEPTION; // ignore exceptions
 347     if (k == NULL) {
 348       k = SystemDictionary::resolve_or_null(
 349           vmSymbols::java_lang_CharSequence(), THREAD);
 350       CLEAR_PENDING_EXCEPTION; // ignore exceptions
 351       if (k == NULL) {
 352         k = SystemDictionary::resolve_or_null(
 353             vmSymbols::java_lang_Shutdown(), THREAD);
 354         CLEAR_PENDING_EXCEPTION; // ignore exceptions
 355         if (k == NULL) {
 356           jdk_version = 2;
 357         } else {
 358           jdk_version = 3;
 359         }
 360       } else {
 361         jdk_version = 4;
 362       }
 363     } else {
 364       jdk_version = 5;
 365     }
 366     JDK_Version::fully_initialize(jdk_version);
 367   }
 368 
 369   #ifdef ASSERT
 370   if (FullGCALot) {
 371     // Allocate an array of dummy objects.
 372     // We'd like these to be at the bottom of the old generation,
 373     // so that when we free one and then collect,
 374     // (almost) the whole heap moves
 375     // and we find out if we actually update all the oops correctly.
 376     // But we can't allocate directly in the old generation,
 377     // so we allocate wherever, and hope that the first collection
 378     // moves these objects to the bottom of the old generation.
 379     // We can allocate directly in the permanent generation, so we do.
 380     int size;
 381     if (UseConcMarkSweepGC) {
 382       warning("Using +FullGCALot with concurrent mark sweep gc "
 383               "will not force all objects to relocate");
 384       size = FullGCALotDummies;
 385     } else {
 386       size = FullGCALotDummies * 2;
 387     }
 388     objArrayOop    naked_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), size, CHECK);
 389     objArrayHandle dummy_array(THREAD, naked_array);
 390     int i = 0;
 391     while (i < size) {
 392         // Allocate dummy in old generation
 393       oop dummy = InstanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK);
 394       dummy_array->obj_at_put(i++, dummy);
 395     }
 396     {
 397       // Only modify the global variable inside the mutex.
 398       // If we had a race to here, the other dummy_array instances
 399       // and their elements just get dropped on the floor, which is fine.
 400       MutexLocker ml(FullGCALot_lock);
 401       if (_fullgc_alot_dummy_array == NULL) {
 402         _fullgc_alot_dummy_array = dummy_array();
 403       }
 404     }
 405     assert(i == _fullgc_alot_dummy_array->length(), "just checking");
 406   }
 407   #endif
 408 
 409   // Initialize dependency array for null class loader
 410   ClassLoaderData::the_null_class_loader_data()->init_dependencies(CHECK);
 411 
 412 }
 413 
 414 // CDS support for patching vtables in metadata in the shared archive.
 415 // All types inherited from Metadata have vtables, but not types inherited
 416 // from MetaspaceObj, because the latter does not have virtual functions.
 417 // If the metadata type has a vtable, it cannot be shared in the read-only
 418 // section of the CDS archive, because the vtable pointer is patched.
 419 static inline void add_vtable(void** list, int* n, void* o, int count) {
 420   guarantee((*n) < count, "vtable list too small");
 421   void* vtable = dereference_vptr(o);
 422   assert(*(void**)(vtable) != NULL, "invalid vtable");
 423   list[(*n)++] = vtable;
 424 }
 425 
 426 void Universe::init_self_patching_vtbl_list(void** list, int count) {
 427   int n = 0;
 428   { InstanceKlass o;          add_vtable(list, &n, &o, count); }
 429   { InstanceClassLoaderKlass o; add_vtable(list, &n, &o, count); }
 430   { InstanceMirrorKlass o;    add_vtable(list, &n, &o, count); }
 431   { InstanceRefKlass o;       add_vtable(list, &n, &o, count); }
 432   { TypeArrayKlass o;         add_vtable(list, &n, &o, count); }
 433   { ObjArrayKlass o;          add_vtable(list, &n, &o, count); }
 434   { Method o;                 add_vtable(list, &n, &o, count); }
 435   { ConstantPool o;           add_vtable(list, &n, &o, count); }
 436 }
 437 
 438 void Universe::initialize_basic_type_mirrors(TRAPS) {
 439     assert(_int_mirror==NULL, "basic type mirrors already initialized");
 440     _int_mirror     =
 441       java_lang_Class::create_basic_type_mirror("int",    T_INT, CHECK);
 442     _float_mirror   =
 443       java_lang_Class::create_basic_type_mirror("float",  T_FLOAT,   CHECK);
 444     _double_mirror  =
 445       java_lang_Class::create_basic_type_mirror("double", T_DOUBLE,  CHECK);
 446     _byte_mirror    =
 447       java_lang_Class::create_basic_type_mirror("byte",   T_BYTE, CHECK);
 448     _bool_mirror    =
 449       java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
 450     _char_mirror    =
 451       java_lang_Class::create_basic_type_mirror("char",   T_CHAR, CHECK);
 452     _long_mirror    =
 453       java_lang_Class::create_basic_type_mirror("long",   T_LONG, CHECK);
 454     _short_mirror   =
 455       java_lang_Class::create_basic_type_mirror("short",  T_SHORT,   CHECK);
 456     _void_mirror    =
 457       java_lang_Class::create_basic_type_mirror("void",   T_VOID, CHECK);
 458 
 459     _mirrors[T_INT]     = _int_mirror;
 460     _mirrors[T_FLOAT]   = _float_mirror;
 461     _mirrors[T_DOUBLE]  = _double_mirror;
 462     _mirrors[T_BYTE]    = _byte_mirror;
 463     _mirrors[T_BOOLEAN] = _bool_mirror;
 464     _mirrors[T_CHAR]    = _char_mirror;
 465     _mirrors[T_LONG]    = _long_mirror;
 466     _mirrors[T_SHORT]   = _short_mirror;
 467     _mirrors[T_VOID]    = _void_mirror;
 468   //_mirrors[T_OBJECT]  = InstanceKlass::cast(_object_klass)->java_mirror();
 469   //_mirrors[T_ARRAY]   = InstanceKlass::cast(_object_klass)->java_mirror();
 470 }
 471 
 472 void Universe::fixup_mirrors(TRAPS) {
 473   // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly,
 474   // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply
 475   // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note
 476   // that the number of objects allocated at this point is very small.
 477   assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded");
 478   HandleMark hm(THREAD);
 479   // Cache the start of the static fields
 480   InstanceMirrorKlass::init_offset_of_static_fields();
 481 
 482   GrowableArray <Klass*>* list = java_lang_Class::fixup_mirror_list();
 483   int list_length = list->length();
 484   for (int i = 0; i < list_length; i++) {
 485     Klass* k = list->at(i);
 486     assert(k->is_klass(), "List should only hold classes");
 487     EXCEPTION_MARK;
 488     KlassHandle kh(THREAD, k);
 489     java_lang_Class::fixup_mirror(kh, CATCH);
 490 }
 491   delete java_lang_Class::fixup_mirror_list();
 492   java_lang_Class::set_fixup_mirror_list(NULL);
 493 }
 494 
 495 static bool has_run_finalizers_on_exit = false;
 496 
 497 void Universe::run_finalizers_on_exit() {
 498   if (has_run_finalizers_on_exit) return;
 499   has_run_finalizers_on_exit = true;
 500 
 501   // Called on VM exit. This ought to be run in a separate thread.
 502   if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit");
 503   {
 504     PRESERVE_EXCEPTION_MARK;
 505     KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass());
 506     JavaValue result(T_VOID);
 507     JavaCalls::call_static(
 508       &result,
 509       finalizer_klass,
 510       vmSymbols::run_finalizers_on_exit_name(),
 511       vmSymbols::void_method_signature(),
 512       THREAD
 513     );
 514     // Ignore any pending exceptions
 515     CLEAR_PENDING_EXCEPTION;
 516   }
 517 }
 518 
 519 
 520 // initialize_vtable could cause gc if
 521 // 1) we specified true to initialize_vtable and
 522 // 2) this ran after gc was enabled
 523 // In case those ever change we use handles for oops
 524 void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) {
 525   // init vtable of k and all subclasses
 526   Klass* ko = k_h();
 527   klassVtable* vt = ko->vtable();
 528   if (vt) vt->initialize_vtable(false, CHECK);
 529   if (ko->oop_is_instance()) {
 530     InstanceKlass* ik = (InstanceKlass*)ko;
 531     for (KlassHandle s_h(THREAD, ik->subklass());
 532          s_h() != NULL;
 533          s_h = KlassHandle(THREAD, s_h()->next_sibling())) {
 534       reinitialize_vtable_of(s_h, CHECK);
 535     }
 536   }
 537 }
 538 
 539 
 540 void initialize_itable_for_klass(Klass* k, TRAPS) {
 541   InstanceKlass::cast(k)->itable()->initialize_itable(false, CHECK);
 542 }
 543 
 544 
 545 void Universe::reinitialize_itables(TRAPS) {
 546   SystemDictionary::classes_do(initialize_itable_for_klass, CHECK);
 547 
 548 }
 549 
 550 
 551 bool Universe::on_page_boundary(void* addr) {
 552   return ((uintptr_t) addr) % os::vm_page_size() == 0;
 553 }
 554 
 555 
 556 bool Universe::should_fill_in_stack_trace(Handle throwable) {
 557   // never attempt to fill in the stack trace of preallocated errors that do not have
 558   // backtrace. These errors are kept alive forever and may be "re-used" when all
 559   // preallocated errors with backtrace have been consumed. Also need to avoid
 560   // a potential loop which could happen if an out of memory occurs when attempting
 561   // to allocate the backtrace.
 562   return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
 563           (throwable() != Universe::_out_of_memory_error_metaspace)  &&
 564           (throwable() != Universe::_out_of_memory_error_class_metaspace)  &&
 565           (throwable() != Universe::_out_of_memory_error_array_size) &&
 566           (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
 567 }
 568 
 569 
 570 oop Universe::gen_out_of_memory_error(oop default_err) {
 571   // generate an out of memory error:
 572   // - if there is a preallocated error with backtrace available then return it wth
 573   //   a filled in stack trace.
 574   // - if there are no preallocated errors with backtrace available then return
 575   //   an error without backtrace.
 576   int next;
 577   if (_preallocated_out_of_memory_error_avail_count > 0) {
 578     next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count);
 579     assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt");
 580   } else {
 581     next = -1;
 582   }
 583   if (next < 0) {
 584     // all preallocated errors have been used.
 585     // return default
 586     return default_err;
 587   } else {
 588     // get the error object at the slot and set set it to NULL so that the
 589     // array isn't keeping it alive anymore.
 590     oop exc = preallocated_out_of_memory_errors()->obj_at(next);
 591     assert(exc != NULL, "slot has been used already");
 592     preallocated_out_of_memory_errors()->obj_at_put(next, NULL);
 593 
 594     // use the message from the default error
 595     oop msg = java_lang_Throwable::message(default_err);
 596     assert(msg != NULL, "no message");
 597     java_lang_Throwable::set_message(exc, msg);
 598 
 599     // populate the stack trace and return it.
 600     java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(exc);
 601     return exc;
 602   }
 603 }
 604 
 605 intptr_t Universe::_non_oop_bits = 0;
 606 
 607 void* Universe::non_oop_word() {
 608   // Neither the high bits nor the low bits of this value is allowed
 609   // to look like (respectively) the high or low bits of a real oop.
 610   //
 611   // High and low are CPU-specific notions, but low always includes
 612   // the low-order bit.  Since oops are always aligned at least mod 4,
 613   // setting the low-order bit will ensure that the low half of the
 614   // word will never look like that of a real oop.
 615   //
 616   // Using the OS-supplied non-memory-address word (usually 0 or -1)
 617   // will take care of the high bits, however many there are.
 618 
 619   if (_non_oop_bits == 0) {
 620     _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
 621   }
 622 
 623   return (void*)_non_oop_bits;
 624 }
 625 
 626 jint universe_init() {
 627   assert(!Universe::_fully_initialized, "called after initialize_vtables");
 628   guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
 629          "LogHeapWordSize is incorrect.");
 630   guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
 631   guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
 632             "oop size is not not a multiple of HeapWord size");
 633   TraceTime timer("Genesis", TraceStartupTime);
 634   GC_locker::lock();  // do not allow gc during bootstrapping
 635   JavaClasses::compute_hard_coded_offsets();
 636 
 637   jint status = Universe::initialize_heap();
 638   if (status != JNI_OK) {
 639     return status;
 640   }
 641 
 642   Metaspace::global_initialize();
 643 
 644   // Create memory for metadata.  Must be after initializing heap for
 645   // DumpSharedSpaces.
 646   ClassLoaderData::init_null_class_loader_data();
 647 
 648   // We have a heap so create the Method* caches before
 649   // Metaspace::initialize_shared_spaces() tries to populate them.
 650   Universe::_finalizer_register_cache = new LatestMethodCache();
 651   Universe::_loader_addClass_cache    = new LatestMethodCache();
 652   Universe::_pd_implies_cache         = new LatestMethodCache();
 653 
 654   if (UseSharedSpaces) {
 655     // Read the data structures supporting the shared spaces (shared
 656     // system dictionary, symbol table, etc.).  After that, access to
 657     // the file (other than the mapped regions) is no longer needed, and
 658     // the file is closed. Closing the file does not affect the
 659     // currently mapped regions.
 660     MetaspaceShared::initialize_shared_spaces();
 661     StringTable::create_table();
 662   } else {
 663     SymbolTable::create_table();
 664     StringTable::create_table();
 665     ClassLoader::create_package_info_table();
 666   }
 667 
 668   return JNI_OK;
 669 }
 670 
 671 // Choose the heap base address and oop encoding mode
 672 // when compressed oops are used:
 673 // Unscaled  - Use 32-bits oops without encoding when
 674 //     NarrowOopHeapBaseMin + heap_size < 4Gb
 675 // ZeroBased - Use zero based compressed oops with encoding when
 676 //     NarrowOopHeapBaseMin + heap_size < 32Gb
 677 // HeapBased - Use compressed oops with heap base + encoding.
 678 
 679 // 4Gb
 680 static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
 681 // 32Gb
 682 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
 683 
 684 char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
 685   assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
 686   assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be");
 687   assert(is_size_aligned(heap_size, alignment), "Must be");
 688 
 689   uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
 690 
 691   size_t base = 0;
 692 #ifdef _LP64
 693   if (UseCompressedOops) {
 694     assert(mode == UnscaledNarrowOop  ||
 695            mode == ZeroBasedNarrowOop ||
 696            mode == HeapBasedNarrowOop, "mode is invalid");
 697     const size_t total_size = heap_size + heap_base_min_address_aligned;
 698     // Return specified base for the first request.
 699     if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
 700       base = heap_base_min_address_aligned;
 701 
 702     // If the total size is small enough to allow UnscaledNarrowOop then
 703     // just use UnscaledNarrowOop.
 704     } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
 705       if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
 706           (Universe::narrow_oop_shift() == 0)) {
 707         // Use 32-bits oops without encoding and
 708         // place heap's top on the 4Gb boundary
 709         base = (NarrowOopHeapMax - heap_size);
 710       } else {
 711         // Can't reserve with NarrowOopShift == 0
 712         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 713         if (mode == UnscaledNarrowOop ||
 714             mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) {
 715           // Use zero based compressed oops with encoding and
 716           // place heap's top on the 32Gb boundary in case
 717           // total_size > 4Gb or failed to reserve below 4Gb.
 718           base = (OopEncodingHeapMax - heap_size);
 719         }
 720       }
 721     } else {
 722       // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
 723       // HeapBasedNarrowOop encoding was requested.  So, can't reserve below 32Gb.
 724       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 725     }
 726 
 727     // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
 728     // used in ReservedHeapSpace() constructors.
 729     // The final values will be set in initialize_heap() below.
 730     if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {
 731       // Use zero based compressed oops
 732       Universe::set_narrow_oop_base(NULL);
 733       // Don't need guard page for implicit checks in indexed
 734       // addressing mode with zero based Compressed Oops.
 735       Universe::set_narrow_oop_use_implicit_null_checks(true);
 736     } else {
 737       // Set to a non-NULL value so the ReservedSpace ctor computes
 738       // the correct no-access prefix.
 739       // The final value will be set in initialize_heap() below.
 740       Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
 741 #ifdef _WIN64
 742       if (UseLargePages) {
 743         // Cannot allocate guard pages for implicit checks in indexed
 744         // addressing mode when large pages are specified on windows.
 745         Universe::set_narrow_oop_use_implicit_null_checks(false);
 746       }
 747 #endif //  _WIN64
 748     }
 749   }
 750 #endif
 751 
 752   assert(is_ptr_aligned((char*)base, alignment), "Must be");
 753   return (char*)base; // also return NULL (don't care) for 32-bit VM
 754 }
 755 
 756 jint Universe::initialize_heap() {
 757 
 758   if (UseParallelGC) {
 759 #if INCLUDE_ALL_GCS
 760     Universe::_collectedHeap = new ParallelScavengeHeap();
 761 #else  // INCLUDE_ALL_GCS
 762     fatal("UseParallelGC not supported in this VM.");
 763 #endif // INCLUDE_ALL_GCS
 764 
 765   } else if (UseG1GC) {
 766 #if INCLUDE_ALL_GCS
 767     G1CollectorPolicy* g1p = new G1CollectorPolicy();
 768     g1p->initialize_all();
 769     G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
 770     Universe::_collectedHeap = g1h;
 771 #else  // INCLUDE_ALL_GCS
 772     fatal("UseG1GC not supported in java kernel vm.");
 773 #endif // INCLUDE_ALL_GCS
 774 
 775   } else {
 776     GenCollectorPolicy *gc_policy;
 777 
 778     if (UseSerialGC) {
 779       gc_policy = new MarkSweepPolicy();
 780     } else if (UseConcMarkSweepGC) {
 781 #if INCLUDE_ALL_GCS
 782       if (UseAdaptiveSizePolicy) {
 783         gc_policy = new ASConcurrentMarkSweepPolicy();
 784       } else {
 785         gc_policy = new ConcurrentMarkSweepPolicy();
 786       }
 787 #else  // INCLUDE_ALL_GCS
 788     fatal("UseConcMarkSweepGC not supported in this VM.");
 789 #endif // INCLUDE_ALL_GCS
 790     } else { // default old generation
 791       gc_policy = new MarkSweepPolicy();
 792     }
 793     gc_policy->initialize_all();
 794 
 795     Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
 796   }
 797 
 798   jint status = Universe::heap()->initialize();
 799   if (status != JNI_OK) {
 800     return status;
 801   }
 802 
 803 #ifdef _LP64
 804   if (UseCompressedOops) {
 805     // Subtract a page because something can get allocated at heap base.
 806     // This also makes implicit null checking work, because the
 807     // memory+1 page below heap_base needs to cause a signal.
 808     // See needs_explicit_null_check.
 809     // Only set the heap base for compressed oops because it indicates
 810     // compressed oops for pstack code.
 811     bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
 812     if (verbose) {
 813       tty->cr();
 814       tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
 815                  Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
 816     }
 817     if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
 818       // Can't reserve heap below 32Gb.
 819       // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
 820       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 821       if (verbose) {
 822         tty->print(", %s: "PTR_FORMAT,
 823             narrow_oop_mode_to_string(HeapBasedNarrowOop),
 824             Universe::narrow_oop_base());
 825       }
 826     } else {
 827       Universe::set_narrow_oop_base(0);
 828       if (verbose) {
 829         tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop));
 830       }
 831 #ifdef _WIN64
 832       if (!Universe::narrow_oop_use_implicit_null_checks()) {
 833         // Don't need guard page for implicit checks in indexed addressing
 834         // mode with zero based Compressed Oops.
 835         Universe::set_narrow_oop_use_implicit_null_checks(true);
 836       }
 837 #endif //  _WIN64
 838       if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) {
 839         // Can't reserve heap below 4Gb.
 840         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 841       } else {
 842         Universe::set_narrow_oop_shift(0);
 843         if (verbose) {
 844           tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop));
 845         }
 846       }
 847     }
 848 
 849     if (verbose) {
 850       tty->cr();
 851       tty->cr();
 852     }
 853     Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
 854   }
 855   // Universe::narrow_oop_base() is one page below the heap.
 856   assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
 857          os::vm_page_size()) ||
 858          Universe::narrow_oop_base() == NULL, "invalid value");
 859   assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
 860          Universe::narrow_oop_shift() == 0, "invalid value");
 861 #endif
 862 
 863   // We will never reach the CATCH below since Exceptions::_throw will cause
 864   // the VM to exit if an exception is thrown during initialization
 865 
 866   if (UseTLAB) {
 867     assert(Universe::heap()->supports_tlab_allocation(),
 868            "Should support thread-local allocation buffers");
 869     ThreadLocalAllocBuffer::startup_initialization();
 870   }
 871   return JNI_OK;
 872 }
 873 
 874 
 875 // Reserve the Java heap, which is now the same for all GCs.
 876 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
 877   assert(alignment <= Arguments::conservative_max_heap_alignment(),
 878       err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
 879           alignment, Arguments::conservative_max_heap_alignment()));
 880   size_t total_reserved = align_size_up(heap_size, alignment);
 881   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
 882       "heap size is too big for compressed oops");
 883 
 884   bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
 885   assert(!UseLargePages
 886       || UseParallelGC
 887       || use_large_pages, "Wrong alignment to use large pages");
 888 
 889   char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
 890 
 891   ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
 892 
 893   if (UseCompressedOops) {
 894     if (addr != NULL && !total_rs.is_reserved()) {
 895       // Failed to reserve at specified address - the requested memory
 896       // region is taken already, for example, by 'java' launcher.
 897       // Try again to reserver heap higher.
 898       addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
 899 
 900       ReservedHeapSpace total_rs0(total_reserved, alignment,
 901           use_large_pages, addr);
 902 
 903       if (addr != NULL && !total_rs0.is_reserved()) {
 904         // Failed to reserve at specified address again - give up.
 905         addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
 906         assert(addr == NULL, "");
 907 
 908         ReservedHeapSpace total_rs1(total_reserved, alignment,
 909             use_large_pages, addr);
 910         total_rs = total_rs1;
 911       } else {
 912         total_rs = total_rs0;
 913       }
 914     }
 915   }
 916 
 917   if (!total_rs.is_reserved()) {
 918     vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
 919     return total_rs;
 920   }
 921 
 922   if (UseCompressedOops) {
 923     // Universe::initialize_heap() will reset this to NULL if unscaled
 924     // or zero-based narrow oops are actually used.
 925     address base = (address)(total_rs.base() - os::vm_page_size());
 926     Universe::set_narrow_oop_base(base);
 927   }
 928   return total_rs;
 929 }
 930 
 931 
 932 // It's the caller's responsibility to ensure glitch-freedom
 933 // (if required).
 934 void Universe::update_heap_info_at_gc() {
 935   _heap_capacity_at_last_gc = heap()->capacity();
 936   _heap_used_at_last_gc     = heap()->used();
 937 }
 938 
 939 
 940 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
 941   switch (mode) {
 942     case UnscaledNarrowOop:
 943       return "32-bits Oops";
 944     case ZeroBasedNarrowOop:
 945       return "zero based Compressed Oops";
 946     case HeapBasedNarrowOop:
 947       return "Compressed Oops with base";
 948   }
 949 
 950   ShouldNotReachHere();
 951   return "";
 952 }
 953 
 954 
 955 Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() {
 956   if (narrow_oop_base() != 0) {
 957     return HeapBasedNarrowOop;
 958   }
 959 
 960   if (narrow_oop_shift() != 0) {
 961     return ZeroBasedNarrowOop;
 962   }
 963 
 964   return UnscaledNarrowOop;
 965 }
 966 
 967 
 968 void universe2_init() {
 969   EXCEPTION_MARK;
 970   Universe::genesis(CATCH);
 971 }
 972 
 973 
 974 // This function is defined in JVM.cpp
 975 extern void initialize_converter_functions();
 976 
 977 bool universe_post_init() {
 978   assert(!is_init_completed(), "Error: initialization not yet completed!");
 979   Universe::_fully_initialized = true;
 980   EXCEPTION_MARK;
 981   { ResourceMark rm;
 982     Interpreter::initialize();      // needed for interpreter entry points
 983     if (!UseSharedSpaces) {
 984       HandleMark hm(THREAD);
 985       KlassHandle ok_h(THREAD, SystemDictionary::Object_klass());
 986       Universe::reinitialize_vtable_of(ok_h, CHECK_false);
 987       Universe::reinitialize_itables(CHECK_false);
 988     }
 989   }
 990 
 991   HandleMark hm(THREAD);
 992   Klass* k;
 993   instanceKlassHandle k_h;
 994     // Setup preallocated empty java.lang.Class array
 995     Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false);
 996 
 997     // Setup preallocated OutOfMemoryError errors
 998     k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false);
 999     k_h = instanceKlassHandle(THREAD, k);
1000     Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false);
1001     Universe::_out_of_memory_error_metaspace = k_h->allocate_instance(CHECK_false);
1002     Universe::_out_of_memory_error_class_metaspace = k_h->allocate_instance(CHECK_false);
1003     Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
1004     Universe::_out_of_memory_error_gc_overhead_limit =
1005       k_h->allocate_instance(CHECK_false);
1006 
1007     // Setup preallocated NullPointerException
1008     // (this is currently used for a cheap & dirty solution in compiler exception handling)
1009     k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false);
1010     Universe::_null_ptr_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1011     // Setup preallocated ArithmeticException
1012     // (this is currently used for a cheap & dirty solution in compiler exception handling)
1013     k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ArithmeticException(), true, CHECK_false);
1014     Universe::_arithmetic_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1015     // Virtual Machine Error for when we get into a situation we can't resolve
1016     k = SystemDictionary::resolve_or_fail(
1017       vmSymbols::java_lang_VirtualMachineError(), true, CHECK_false);
1018     bool linked = InstanceKlass::cast(k)->link_class_or_fail(CHECK_false);
1019     if (!linked) {
1020       tty->print_cr("Unable to link/verify VirtualMachineError class");
1021       return false; // initialization failed
1022     }
1023     Universe::_virtual_machine_error_instance =
1024       InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1025 
1026     Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1027 
1028   if (!DumpSharedSpaces) {
1029     // These are the only Java fields that are currently set during shared space dumping.
1030     // We prefer to not handle this generally, so we always reinitialize these detail messages.
1031     Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false);
1032     java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
1033 
1034     msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
1035     java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
1036     msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
1037     java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
1038 
1039     msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
1040     java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg());
1041 
1042     msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
1043     java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
1044 
1045     msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
1046     java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
1047 
1048     // Setup the array of errors that have preallocated backtrace
1049     k = Universe::_out_of_memory_error_java_heap->klass();
1050     assert(k->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error");
1051     k_h = instanceKlassHandle(THREAD, k);
1052 
1053     int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0;
1054     Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false);
1055     for (int i=0; i<len; i++) {
1056       oop err = k_h->allocate_instance(CHECK_false);
1057       Handle err_h = Handle(THREAD, err);
1058       java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
1059       Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
1060     }
1061     Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
1062   }
1063 
1064 
1065   // Setup static method for registering finalizers
1066   // The finalizer klass must be linked before looking up the method, in
1067   // case it needs to get rewritten.
1068   InstanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false);
1069   Method* m = InstanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method(
1070                                   vmSymbols::register_method_name(),
1071                                   vmSymbols::register_method_signature());
1072   if (m == NULL || !m->is_static()) {
1073     tty->print_cr("Unable to link/verify Finalizer.register method");
1074     return false; // initialization failed (cannot throw exception yet)
1075   }
1076   Universe::_finalizer_register_cache->init(
1077     SystemDictionary::Finalizer_klass(), m);
1078 
1079   // Setup method for registering loaded classes in class loader vector
1080   InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
1081   m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
1082   if (m == NULL || m->is_static()) {
1083     tty->print_cr("Unable to link/verify ClassLoader.addClass method");
1084     return false; // initialization failed (cannot throw exception yet)
1085   }
1086   Universe::_loader_addClass_cache->init(
1087     SystemDictionary::ClassLoader_klass(), m);
1088 
1089   // Setup method for checking protection domain
1090   InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false);
1091   m = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->
1092             find_method(vmSymbols::impliesCreateAccessControlContext_name(),
1093                         vmSymbols::void_boolean_signature());
1094   // Allow NULL which should only happen with bootstrapping.
1095   if (m != NULL) {
1096     if (m->is_static()) {
1097       // NoSuchMethodException doesn't actually work because it tries to run the
1098       // <init> function before java_lang_Class is linked. Print error and exit.
1099       tty->print_cr("ProtectionDomain.impliesCreateAccessControlContext() has the wrong linkage");
1100       return false; // initialization failed
1101     }
1102     Universe::_pd_implies_cache->init(
1103       SystemDictionary::ProtectionDomain_klass(), m);;
1104   }
1105 
1106   // The folowing is initializing converter functions for serialization in
1107   // JVM.cpp. If we clean up the StrictMath code above we may want to find
1108   // a better solution for this as well.
1109   initialize_converter_functions();
1110 
1111   // This needs to be done before the first scavenge/gc, since
1112   // it's an input to soft ref clearing policy.
1113   {
1114     MutexLocker x(Heap_lock);
1115     Universe::update_heap_info_at_gc();
1116   }
1117 
1118   // ("weak") refs processing infrastructure initialization
1119   Universe::heap()->post_initialize();
1120 
1121   // Initialize performance counters for metaspaces
1122   MetaspaceCounters::initialize_performance_counters();
1123   CompressedClassSpaceCounters::initialize_performance_counters();
1124 
1125   MemoryService::add_metaspace_memory_pools();
1126 
1127   GC_locker::unlock();  // allow gc after bootstrapping
1128 
1129   MemoryService::set_universe_heap(Universe::_collectedHeap);
1130   return true;
1131 }
1132 
1133 
1134 void Universe::compute_base_vtable_size() {
1135   _base_vtable_size = ClassLoader::compute_Object_vtable();
1136 }
1137 
1138 
1139 // %%% The Universe::flush_foo methods belong in CodeCache.
1140 
1141 // Flushes compiled methods dependent on dependee.
1142 void Universe::flush_dependents_on(instanceKlassHandle dependee) {
1143   assert_lock_strong(Compile_lock);
1144 
1145   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1146 
1147   // CodeCache can only be updated by a thread_in_VM and they will all be
1148   // stopped dring the safepoint so CodeCache will be safe to update without
1149   // holding the CodeCache_lock.
1150 
1151   KlassDepChange changes(dependee);
1152 
1153   // Compute the dependent nmethods
1154   if (CodeCache::mark_for_deoptimization(changes) > 0) {
1155     // At least one nmethod has been marked for deoptimization
1156     VM_Deoptimize op;
1157     VMThread::execute(&op);
1158   }
1159 }
1160 
1161 // Flushes compiled methods dependent on a particular CallSite
1162 // instance when its target is different than the given MethodHandle.
1163 void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
1164   assert_lock_strong(Compile_lock);
1165 
1166   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1167 
1168   // CodeCache can only be updated by a thread_in_VM and they will all be
1169   // stopped dring the safepoint so CodeCache will be safe to update without
1170   // holding the CodeCache_lock.
1171 
1172   CallSiteDepChange changes(call_site(), method_handle());
1173 
1174   // Compute the dependent nmethods that have a reference to a
1175   // CallSite object.  We use InstanceKlass::mark_dependent_nmethod
1176   // directly instead of CodeCache::mark_for_deoptimization because we
1177   // want dependents on the call site class only not all classes in
1178   // the ContextStream.
1179   int marked = 0;
1180   {
1181     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1182     InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass());
1183     marked = call_site_klass->mark_dependent_nmethods(changes);
1184   }
1185   if (marked > 0) {
1186     // At least one nmethod has been marked for deoptimization
1187     VM_Deoptimize op;
1188     VMThread::execute(&op);
1189   }
1190 }
1191 
1192 #ifdef HOTSWAP
1193 // Flushes compiled methods dependent on dependee in the evolutionary sense
1194 void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
1195   // --- Compile_lock is not held. However we are at a safepoint.
1196   assert_locked_or_safepoint(Compile_lock);
1197   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1198 
1199   // CodeCache can only be updated by a thread_in_VM and they will all be
1200   // stopped dring the safepoint so CodeCache will be safe to update without
1201   // holding the CodeCache_lock.
1202 
1203   // Compute the dependent nmethods
1204   if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) {
1205     // At least one nmethod has been marked for deoptimization
1206 
1207     // All this already happens inside a VM_Operation, so we'll do all the work here.
1208     // Stuff copied from VM_Deoptimize and modified slightly.
1209 
1210     // We do not want any GCs to happen while we are in the middle of this VM operation
1211     ResourceMark rm;
1212     DeoptimizationMarker dm;
1213 
1214     // Deoptimize all activations depending on marked nmethods
1215     Deoptimization::deoptimize_dependents();
1216 
1217     // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1218     CodeCache::make_marked_nmethods_not_entrant();
1219   }
1220 }
1221 #endif // HOTSWAP
1222 
1223 
1224 // Flushes compiled methods dependent on dependee
1225 void Universe::flush_dependents_on_method(methodHandle m_h) {
1226   // --- Compile_lock is not held. However we are at a safepoint.
1227   assert_locked_or_safepoint(Compile_lock);
1228 
1229   // CodeCache can only be updated by a thread_in_VM and they will all be
1230   // stopped dring the safepoint so CodeCache will be safe to update without
1231   // holding the CodeCache_lock.
1232 
1233   // Compute the dependent nmethods
1234   if (CodeCache::mark_for_deoptimization(m_h()) > 0) {
1235     // At least one nmethod has been marked for deoptimization
1236 
1237     // All this already happens inside a VM_Operation, so we'll do all the work here.
1238     // Stuff copied from VM_Deoptimize and modified slightly.
1239 
1240     // We do not want any GCs to happen while we are in the middle of this VM operation
1241     ResourceMark rm;
1242     DeoptimizationMarker dm;
1243 
1244     // Deoptimize all activations depending on marked nmethods
1245     Deoptimization::deoptimize_dependents();
1246 
1247     // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1248     CodeCache::make_marked_nmethods_not_entrant();
1249   }
1250 }
1251 
1252 void Universe::print() {
1253   print_on(gclog_or_tty);
1254 }
1255 
1256 void Universe::print_on(outputStream* st, bool extended) {
1257   st->print_cr("Heap");
1258   if (!extended) {
1259     heap()->print_on(st);
1260   } else {
1261     heap()->print_extended_on(st);
1262   }
1263 }
1264 
1265 void Universe::print_heap_at_SIGBREAK() {
1266   if (PrintHeapAtSIGBREAK) {
1267     MutexLocker hl(Heap_lock);
1268     print_on(tty);
1269     tty->cr();
1270     tty->flush();
1271   }
1272 }
1273 
1274 void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) {
1275   st->print_cr("{Heap before GC invocations=%u (full %u):",
1276                heap()->total_collections(),
1277                heap()->total_full_collections());
1278   if (!PrintHeapAtGCExtended || ignore_extended) {
1279     heap()->print_on(st);
1280   } else {
1281     heap()->print_extended_on(st);
1282   }
1283 }
1284 
1285 void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
1286   st->print_cr("Heap after GC invocations=%u (full %u):",
1287                heap()->total_collections(),
1288                heap()->total_full_collections());
1289   if (!PrintHeapAtGCExtended || ignore_extended) {
1290     heap()->print_on(st);
1291   } else {
1292     heap()->print_extended_on(st);
1293   }
1294   st->print_cr("}");
1295 }
1296 
1297 void Universe::verify(VerifyOption option, const char* prefix, bool silent) {
1298   // The use of _verify_in_progress is a temporary work around for
1299   // 6320749.  Don't bother with a creating a class to set and clear
1300   // it since it is only used in this method and the control flow is
1301   // straight forward.
1302   _verify_in_progress = true;
1303 
1304   COMPILER2_PRESENT(
1305     assert(!DerivedPointerTable::is_active(),
1306          "DPT should not be active during verification "
1307          "(of thread stacks below)");
1308   )
1309 
1310   ResourceMark rm;
1311   HandleMark hm;  // Handles created during verification can be zapped
1312   _verify_count++;
1313 
1314   if (!silent) gclog_or_tty->print(prefix);
1315   if (!silent) gclog_or_tty->print("[Verifying ");
1316   if (!silent) gclog_or_tty->print("threads ");
1317   Threads::verify();
1318   if (!silent) gclog_or_tty->print("heap ");
1319   heap()->verify(silent, option);
1320   if (!silent) gclog_or_tty->print("syms ");
1321   SymbolTable::verify();
1322   if (!silent) gclog_or_tty->print("strs ");
1323   StringTable::verify();
1324   {
1325     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1326     if (!silent) gclog_or_tty->print("zone ");
1327     CodeCache::verify();
1328   }
1329   if (!silent) gclog_or_tty->print("dict ");
1330   SystemDictionary::verify();
1331 #ifndef PRODUCT
1332   if (!silent) gclog_or_tty->print("cldg ");
1333   ClassLoaderDataGraph::verify();
1334 #endif
1335   if (!silent) gclog_or_tty->print("metaspace chunks ");
1336   MetaspaceAux::verify_free_chunks();
1337   if (!silent) gclog_or_tty->print("hand ");
1338   JNIHandles::verify();
1339   if (!silent) gclog_or_tty->print("C-heap ");
1340   os::check_heap();
1341   if (!silent) gclog_or_tty->print("code cache ");
1342   CodeCache::verify_oops();
1343   if (!silent) gclog_or_tty->print_cr("]");
1344 
1345   _verify_in_progress = false;
1346 }
1347 
1348 // Oop verification (see MacroAssembler::verify_oop)
1349 
1350 static uintptr_t _verify_oop_data[2]   = {0, (uintptr_t)-1};
1351 static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1};
1352 
1353 
1354 #ifndef PRODUCT
1355 
1356 static void calculate_verify_data(uintptr_t verify_data[2],
1357                                   HeapWord* low_boundary,
1358                                   HeapWord* high_boundary) {
1359   assert(low_boundary < high_boundary, "bad interval");
1360 
1361   // decide which low-order bits we require to be clear:
1362   size_t alignSize = MinObjAlignmentInBytes;
1363   size_t min_object_size = CollectedHeap::min_fill_size();
1364 
1365   // make an inclusive limit:
1366   uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize;
1367   uintptr_t min = (uintptr_t)low_boundary;
1368   assert(min < max, "bad interval");
1369   uintptr_t diff = max ^ min;
1370 
1371   // throw away enough low-order bits to make the diff vanish
1372   uintptr_t mask = (uintptr_t)(-1);
1373   while ((mask & diff) != 0)
1374     mask <<= 1;
1375   uintptr_t bits = (min & mask);
1376   assert(bits == (max & mask), "correct mask");
1377   // check an intermediate value between min and max, just to make sure:
1378   assert(bits == ((min + (max-min)/2) & mask), "correct mask");
1379 
1380   // require address alignment, too:
1381   mask |= (alignSize - 1);
1382 
1383   if (!(verify_data[0] == 0 && verify_data[1] == (uintptr_t)-1)) {
1384     assert(verify_data[0] == mask && verify_data[1] == bits, "mask stability");
1385   }
1386   verify_data[0] = mask;
1387   verify_data[1] = bits;
1388 }
1389 
1390 // Oop verification (see MacroAssembler::verify_oop)
1391 
1392 uintptr_t Universe::verify_oop_mask() {
1393   MemRegion m = heap()->reserved_region();
1394   calculate_verify_data(_verify_oop_data,
1395                         m.start(),
1396                         m.end());
1397   return _verify_oop_data[0];
1398 }
1399 
1400 
1401 
1402 uintptr_t Universe::verify_oop_bits() {
1403   verify_oop_mask();
1404   return _verify_oop_data[1];
1405 }
1406 
1407 uintptr_t Universe::verify_mark_mask() {
1408   return markOopDesc::lock_mask_in_place;
1409 }
1410 
1411 uintptr_t Universe::verify_mark_bits() {
1412   intptr_t mask = verify_mark_mask();
1413   intptr_t bits = (intptr_t)markOopDesc::prototype();
1414   assert((bits & ~mask) == 0, "no stray header bits");
1415   return bits;
1416 }
1417 #endif // PRODUCT
1418 
1419 
1420 void Universe::compute_verify_oop_data() {
1421   verify_oop_mask();
1422   verify_oop_bits();
1423   verify_mark_mask();
1424   verify_mark_bits();
1425 }
1426 
1427 
1428 void LatestMethodCache::init(Klass* k, Method* m) {
1429   if (!UseSharedSpaces) {
1430     _klass = k;
1431   }
1432 #ifndef PRODUCT
1433   else {
1434     // sharing initilization should have already set up _klass
1435     assert(_klass != NULL, "just checking");
1436   }
1437 #endif
1438 
1439   _method_idnum = m->method_idnum();
1440   assert(_method_idnum >= 0, "sanity check");
1441 }
1442 
1443 
1444 Method* LatestMethodCache::get_method() {
1445   if (klass() == NULL) return NULL;
1446   InstanceKlass* ik = InstanceKlass::cast(klass());
1447   Method* m = ik->method_with_idnum(method_idnum());
1448   assert(m != NULL, "sanity check");
1449   return m;
1450 }
1451 
1452 
1453 #ifdef ASSERT
1454 // Release dummy object(s) at bottom of heap
1455 bool Universe::release_fullgc_alot_dummy() {
1456   MutexLocker ml(FullGCALot_lock);
1457   if (_fullgc_alot_dummy_array != NULL) {
1458     if (_fullgc_alot_dummy_next >= _fullgc_alot_dummy_array->length()) {
1459       // No more dummies to release, release entire array instead
1460       _fullgc_alot_dummy_array = NULL;
1461       return false;
1462     }
1463     if (!UseConcMarkSweepGC) {
1464       // Release dummy at bottom of old generation
1465       _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1466     }
1467     // Release dummy at bottom of permanent generation
1468     _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1469   }
1470   return true;
1471 }
1472 
1473 #endif // ASSERT