1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoader.hpp"
  27 #include "classfile/classLoaderData.hpp"
  28 #include "classfile/javaClasses.hpp"
  29 #include "classfile/symbolTable.hpp"
  30 #include "classfile/systemDictionary.hpp"
  31 #include "classfile/vmSymbols.hpp"
  32 #include "code/codeCache.hpp"
  33 #include "code/dependencies.hpp"
  34 #include "gc_interface/collectedHeap.inline.hpp"
  35 #include "interpreter/interpreter.hpp"
  36 #include "memory/cardTableModRefBS.hpp"
  37 #include "memory/gcLocker.inline.hpp"
  38 #include "memory/genCollectedHeap.hpp"
  39 #include "memory/genRemSet.hpp"
  40 #include "memory/generation.hpp"
  41 #include "memory/metadataFactory.hpp"
  42 #include "memory/metaspaceShared.hpp"
  43 #include "memory/oopFactory.hpp"
  44 #include "memory/space.hpp"
  45 #include "memory/universe.hpp"
  46 #include "memory/universe.inline.hpp"
  47 #include "oops/constantPool.hpp"
  48 #include "oops/instanceClassLoaderKlass.hpp"
  49 #include "oops/instanceKlass.hpp"
  50 #include "oops/instanceMirrorKlass.hpp"
  51 #include "oops/instanceRefKlass.hpp"
  52 #include "oops/oop.inline.hpp"
  53 #include "oops/typeArrayKlass.hpp"
  54 #include "prims/jvmtiRedefineClassesTrace.hpp"
  55 #include "runtime/aprofiler.hpp"
  56 #include "runtime/arguments.hpp"
  57 #include "runtime/deoptimization.hpp"
  58 #include "runtime/fprofiler.hpp"
  59 #include "runtime/handles.inline.hpp"
  60 #include "runtime/init.hpp"
  61 #include "runtime/java.hpp"
  62 #include "runtime/javaCalls.hpp"
  63 #include "runtime/sharedRuntime.hpp"
  64 #include "runtime/synchronizer.hpp"
  65 #include "runtime/timer.hpp"
  66 #include "runtime/vm_operations.hpp"
  67 #include "services/memoryService.hpp"
  68 #include "utilities/copy.hpp"
  69 #include "utilities/events.hpp"
  70 #include "utilities/hashtable.inline.hpp"
  71 #include "utilities/preserveException.hpp"
  72 #ifdef TARGET_OS_FAMILY_linux
  73 # include "thread_linux.inline.hpp"
  74 #endif
  75 #ifdef TARGET_OS_FAMILY_solaris
  76 # include "thread_solaris.inline.hpp"
  77 #endif
  78 #ifdef TARGET_OS_FAMILY_windows
  79 # include "thread_windows.inline.hpp"
  80 #endif
  81 #ifdef TARGET_OS_FAMILY_bsd
  82 # include "thread_bsd.inline.hpp"
  83 #endif
  84 #ifndef SERIALGC
  85 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
  86 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
  87 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  88 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  89 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  90 #endif
  91 
  92 // Known objects
  93 Klass* Universe::_boolArrayKlassObj                 = NULL;
  94 Klass* Universe::_byteArrayKlassObj                 = NULL;
  95 Klass* Universe::_charArrayKlassObj                 = NULL;
  96 Klass* Universe::_intArrayKlassObj                  = NULL;
  97 Klass* Universe::_shortArrayKlassObj                = NULL;
  98 Klass* Universe::_longArrayKlassObj                 = NULL;
  99 Klass* Universe::_singleArrayKlassObj               = NULL;
 100 Klass* Universe::_doubleArrayKlassObj               = NULL;
 101 Klass* Universe::_typeArrayKlassObjs[T_VOID+1]      = { NULL /*, NULL...*/ };
 102 Klass* Universe::_objectArrayKlassObj               = NULL;
 103 oop Universe::_int_mirror                             = NULL;
 104 oop Universe::_float_mirror                           = NULL;
 105 oop Universe::_double_mirror                          = NULL;
 106 oop Universe::_byte_mirror                            = NULL;
 107 oop Universe::_bool_mirror                            = NULL;
 108 oop Universe::_char_mirror                            = NULL;
 109 oop Universe::_long_mirror                            = NULL;
 110 oop Universe::_short_mirror                           = NULL;
 111 oop Universe::_void_mirror                            = NULL;
 112 oop Universe::_mirrors[T_VOID+1]                      = { NULL /*, NULL...*/ };
 113 oop Universe::_main_thread_group                      = NULL;
 114 oop Universe::_system_thread_group                    = NULL;
 115 objArrayOop Universe::_the_empty_class_klass_array    = NULL;
 116 Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
 117 oop Universe::_the_null_string                        = NULL;
 118 oop Universe::_the_min_jint_string                   = NULL;
 119 LatestMethodOopCache* Universe::_finalizer_register_cache = NULL;
 120 LatestMethodOopCache* Universe::_loader_addClass_cache    = NULL;
 121 ActiveMethodOopsCache* Universe::_reflect_invoke_cache    = NULL;
 122 oop Universe::_out_of_memory_error_java_heap          = NULL;
 123 oop Universe::_out_of_memory_error_perm_gen           = NULL;
 124 oop Universe::_out_of_memory_error_array_size         = NULL;
 125 oop Universe::_out_of_memory_error_gc_overhead_limit  = NULL;
 126 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
 127 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
 128 bool Universe::_verify_in_progress                    = false;
 129 oop Universe::_null_ptr_exception_instance            = NULL;
 130 oop Universe::_arithmetic_exception_instance          = NULL;
 131 oop Universe::_virtual_machine_error_instance         = NULL;
 132 oop Universe::_vm_exception                           = NULL;
 133 Array<int>* Universe::_the_empty_int_array            = NULL;
 134 Array<u2>* Universe::_the_empty_short_array           = NULL;
 135 Array<Klass*>* Universe::_the_empty_klass_array     = NULL;
 136 Array<Method*>* Universe::_the_empty_method_array   = NULL;
 137 
 138 // These variables are guarded by FullGCALot_lock.
 139 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
 140 debug_only(int Universe::_fullgc_alot_dummy_next      = 0;)
 141 
 142 // Heap
 143 int             Universe::_verify_count = 0;
 144 
 145 int             Universe::_base_vtable_size = 0;
 146 bool            Universe::_bootstrapping = false;
 147 bool            Universe::_fully_initialized = false;
 148 
 149 size_t          Universe::_heap_capacity_at_last_gc;
 150 size_t          Universe::_heap_used_at_last_gc = 0;
 151 
 152 CollectedHeap*  Universe::_collectedHeap = NULL;
 153 
 154 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
 155 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
 156 address Universe::_narrow_ptrs_base;
 157 
 158 
 159 void Universe::basic_type_classes_do(void f(Klass*)) {
 160   f(boolArrayKlassObj());
 161   f(byteArrayKlassObj());
 162   f(charArrayKlassObj());
 163   f(intArrayKlassObj());
 164   f(shortArrayKlassObj());
 165   f(longArrayKlassObj());
 166   f(singleArrayKlassObj());
 167   f(doubleArrayKlassObj());
 168 }
 169 
 170 void Universe::oops_do(OopClosure* f, bool do_all) {
 171 
 172   f->do_oop((oop*) &_int_mirror);
 173   f->do_oop((oop*) &_float_mirror);
 174   f->do_oop((oop*) &_double_mirror);
 175   f->do_oop((oop*) &_byte_mirror);
 176   f->do_oop((oop*) &_bool_mirror);
 177   f->do_oop((oop*) &_char_mirror);
 178   f->do_oop((oop*) &_long_mirror);
 179   f->do_oop((oop*) &_short_mirror);
 180   f->do_oop((oop*) &_void_mirror);
 181 
 182   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 183     f->do_oop((oop*) &_mirrors[i]);
 184   }
 185   assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
 186 
 187   f->do_oop((oop*)&_the_empty_class_klass_array);
 188   f->do_oop((oop*)&_the_null_string);
 189   f->do_oop((oop*)&_the_min_jint_string);
 190   f->do_oop((oop*)&_out_of_memory_error_java_heap);
 191   f->do_oop((oop*)&_out_of_memory_error_perm_gen);
 192   f->do_oop((oop*)&_out_of_memory_error_array_size);
 193   f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
 194     f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
 195   f->do_oop((oop*)&_null_ptr_exception_instance);
 196   f->do_oop((oop*)&_arithmetic_exception_instance);
 197   f->do_oop((oop*)&_virtual_machine_error_instance);
 198   f->do_oop((oop*)&_main_thread_group);
 199   f->do_oop((oop*)&_system_thread_group);
 200   f->do_oop((oop*)&_vm_exception);
 201   debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
 202 }
 203 
 204 // Serialize metadata in and out of CDS archive, not oops.
 205 void Universe::serialize(SerializeClosure* f, bool do_all) {
 206 
 207   f->do_ptr((void**)&_boolArrayKlassObj);
 208   f->do_ptr((void**)&_byteArrayKlassObj);
 209   f->do_ptr((void**)&_charArrayKlassObj);
 210   f->do_ptr((void**)&_intArrayKlassObj);
 211   f->do_ptr((void**)&_shortArrayKlassObj);
 212   f->do_ptr((void**)&_longArrayKlassObj);
 213   f->do_ptr((void**)&_singleArrayKlassObj);
 214   f->do_ptr((void**)&_doubleArrayKlassObj);
 215   f->do_ptr((void**)&_objectArrayKlassObj);
 216 
 217   {
 218     for (int i = 0; i < T_VOID+1; i++) {
 219       if (_typeArrayKlassObjs[i] != NULL) {
 220         assert(i >= T_BOOLEAN, "checking");
 221         f->do_ptr((void**)&_typeArrayKlassObjs[i]);
 222       } else if (do_all) {
 223         f->do_ptr((void**)&_typeArrayKlassObjs[i]);
 224       }
 225     }
 226   }
 227 
 228   f->do_ptr((void**)&_the_array_interfaces_array);
 229   f->do_ptr((void**)&_the_empty_int_array);
 230   f->do_ptr((void**)&_the_empty_short_array);
 231   f->do_ptr((void**)&_the_empty_method_array);
 232   f->do_ptr((void**)&_the_empty_klass_array);
 233   _finalizer_register_cache->serialize(f);
 234   _loader_addClass_cache->serialize(f);
 235   _reflect_invoke_cache->serialize(f);
 236 }
 237 
 238 void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
 239   if (size < alignment || size % alignment != 0) {
 240     ResourceMark rm;
 241     stringStream st;
 242     st.print("Size of %s (%ld bytes) must be aligned to %ld bytes", name, size, alignment);
 243     char* error = st.as_string();
 244     vm_exit_during_initialization(error);
 245   }
 246 }
 247 
 248 void initialize_basic_type_klass(Klass* k, TRAPS) {
 249   Klass* ok = SystemDictionary::Object_klass();
 250   if (UseSharedSpaces) {
 251     assert(k->super() == ok, "u3");
 252     k->restore_unshareable_info(CHECK);
 253   } else {
 254     k->initialize_supers(ok, CHECK);
 255   }
 256   k->append_to_sibling_list();
 257 }
 258 
 259 void Universe::genesis(TRAPS) {
 260   ResourceMark rm;
 261 
 262   { FlagSetting fs(_bootstrapping, true);
 263 
 264     { MutexLocker mc(Compile_lock);
 265 
 266       // determine base vtable size; without that we cannot create the array klasses
 267       compute_base_vtable_size();
 268 
 269       if (!UseSharedSpaces) {
 270         _boolArrayKlassObj      = TypeArrayKlass::create_klass(T_BOOLEAN, sizeof(jboolean), CHECK);
 271         _charArrayKlassObj      = TypeArrayKlass::create_klass(T_CHAR,    sizeof(jchar),    CHECK);
 272         _singleArrayKlassObj    = TypeArrayKlass::create_klass(T_FLOAT,   sizeof(jfloat),   CHECK);
 273         _doubleArrayKlassObj    = TypeArrayKlass::create_klass(T_DOUBLE,  sizeof(jdouble),  CHECK);
 274         _byteArrayKlassObj      = TypeArrayKlass::create_klass(T_BYTE,    sizeof(jbyte),    CHECK);
 275         _shortArrayKlassObj     = TypeArrayKlass::create_klass(T_SHORT,   sizeof(jshort),   CHECK);
 276         _intArrayKlassObj       = TypeArrayKlass::create_klass(T_INT,     sizeof(jint),     CHECK);
 277         _longArrayKlassObj      = TypeArrayKlass::create_klass(T_LONG,    sizeof(jlong),    CHECK);
 278 
 279         _typeArrayKlassObjs[T_BOOLEAN] = _boolArrayKlassObj;
 280         _typeArrayKlassObjs[T_CHAR]    = _charArrayKlassObj;
 281         _typeArrayKlassObjs[T_FLOAT]   = _singleArrayKlassObj;
 282         _typeArrayKlassObjs[T_DOUBLE]  = _doubleArrayKlassObj;
 283         _typeArrayKlassObjs[T_BYTE]    = _byteArrayKlassObj;
 284         _typeArrayKlassObjs[T_SHORT]   = _shortArrayKlassObj;
 285         _typeArrayKlassObjs[T_INT]     = _intArrayKlassObj;
 286         _typeArrayKlassObjs[T_LONG]    = _longArrayKlassObj;
 287 
 288         ClassLoaderData* null_cld = ClassLoaderData::the_null_class_loader_data();
 289 
 290         _the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK);
 291         _the_empty_int_array        = MetadataFactory::new_array<int>(null_cld, 0, CHECK);
 292         _the_empty_short_array      = MetadataFactory::new_array<u2>(null_cld, 0, CHECK);
 293         _the_empty_method_array     = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK);
 294         _the_empty_klass_array      = MetadataFactory::new_array<Klass*>(null_cld, 0, CHECK);
 295       }
 296     }
 297 
 298     vmSymbols::initialize(CHECK);
 299 
 300     SystemDictionary::initialize(CHECK);
 301 
 302     Klass* ok = SystemDictionary::Object_klass();
 303 
 304     _the_null_string            = StringTable::intern("null", CHECK);
 305     _the_min_jint_string       = StringTable::intern("-2147483648", CHECK);
 306 
 307     if (UseSharedSpaces) {
 308       // Verify shared interfaces array.
 309       assert(_the_array_interfaces_array->at(0) ==
 310              SystemDictionary::Cloneable_klass(), "u3");
 311       assert(_the_array_interfaces_array->at(1) ==
 312              SystemDictionary::Serializable_klass(), "u3");
 313     } else {
 314       // Set up shared interfaces array.  (Do this before supers are set up.)
 315       _the_array_interfaces_array->at_put(0, SystemDictionary::Cloneable_klass());
 316       _the_array_interfaces_array->at_put(1, SystemDictionary::Serializable_klass());
 317     }
 318 
 319     initialize_basic_type_klass(boolArrayKlassObj(), CHECK);
 320     initialize_basic_type_klass(charArrayKlassObj(), CHECK);
 321     initialize_basic_type_klass(singleArrayKlassObj(), CHECK);
 322     initialize_basic_type_klass(doubleArrayKlassObj(), CHECK);
 323     initialize_basic_type_klass(byteArrayKlassObj(), CHECK);
 324     initialize_basic_type_klass(shortArrayKlassObj(), CHECK);
 325     initialize_basic_type_klass(intArrayKlassObj(), CHECK);
 326     initialize_basic_type_klass(longArrayKlassObj(), CHECK);
 327   } // end of core bootstrapping
 328 
 329   // Maybe this could be lifted up now that object array can be initialized
 330   // during the bootstrapping.
 331 
 332   // OLD
 333   // Initialize _objectArrayKlass after core bootstraping to make
 334   // sure the super class is set up properly for _objectArrayKlass.
 335   // ---
 336   // NEW
 337   // Since some of the old system object arrays have been converted to
 338   // ordinary object arrays, _objectArrayKlass will be loaded when
 339   // SystemDictionary::initialize(CHECK); is run. See the extra check
 340   // for Object_klass_loaded in objArrayKlassKlass::allocate_objArray_klass_impl.
 341   _objectArrayKlassObj = InstanceKlass::
 342     cast(SystemDictionary::Object_klass())->array_klass(1, CHECK);
 343   // OLD
 344   // Add the class to the class hierarchy manually to make sure that
 345   // its vtable is initialized after core bootstrapping is completed.
 346   // ---
 347   // New
 348   // Have already been initialized.
 349   Klass::cast(_objectArrayKlassObj)->append_to_sibling_list();
 350 
 351   // Compute is_jdk version flags.
 352   // Only 1.3 or later has the java.lang.Shutdown class.
 353   // Only 1.4 or later has the java.lang.CharSequence interface.
 354   // Only 1.5 or later has the java.lang.management.MemoryUsage class.
 355   if (JDK_Version::is_partially_initialized()) {
 356     uint8_t jdk_version;
 357     Klass* k = SystemDictionary::resolve_or_null(
 358         vmSymbols::java_lang_management_MemoryUsage(), THREAD);
 359     CLEAR_PENDING_EXCEPTION; // ignore exceptions
 360     if (k == NULL) {
 361       k = SystemDictionary::resolve_or_null(
 362           vmSymbols::java_lang_CharSequence(), THREAD);
 363       CLEAR_PENDING_EXCEPTION; // ignore exceptions
 364       if (k == NULL) {
 365         k = SystemDictionary::resolve_or_null(
 366             vmSymbols::java_lang_Shutdown(), THREAD);
 367         CLEAR_PENDING_EXCEPTION; // ignore exceptions
 368         if (k == NULL) {
 369           jdk_version = 2;
 370         } else {
 371           jdk_version = 3;
 372         }
 373       } else {
 374         jdk_version = 4;
 375       }
 376     } else {
 377       jdk_version = 5;
 378     }
 379     JDK_Version::fully_initialize(jdk_version);
 380   }
 381 
 382   #ifdef ASSERT
 383   if (FullGCALot) {
 384     // Allocate an array of dummy objects.
 385     // We'd like these to be at the bottom of the old generation,
 386     // so that when we free one and then collect,
 387     // (almost) the whole heap moves
 388     // and we find out if we actually update all the oops correctly.
 389     // But we can't allocate directly in the old generation,
 390     // so we allocate wherever, and hope that the first collection
 391     // moves these objects to the bottom of the old generation.
 392     // We can allocate directly in the permanent generation, so we do.
 393     int size;
 394     if (UseConcMarkSweepGC) {
 395       warning("Using +FullGCALot with concurrent mark sweep gc "
 396               "will not force all objects to relocate");
 397       size = FullGCALotDummies;
 398     } else {
 399       size = FullGCALotDummies * 2;
 400     }
 401     objArrayOop    naked_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), size, CHECK);
 402     objArrayHandle dummy_array(THREAD, naked_array);
 403     int i = 0;
 404     while (i < size) {
 405         // Allocate dummy in old generation
 406       oop dummy = InstanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK);
 407       dummy_array->obj_at_put(i++, dummy);
 408     }
 409     {
 410       // Only modify the global variable inside the mutex.
 411       // If we had a race to here, the other dummy_array instances
 412       // and their elements just get dropped on the floor, which is fine.
 413       MutexLocker ml(FullGCALot_lock);
 414       if (_fullgc_alot_dummy_array == NULL) {
 415         _fullgc_alot_dummy_array = dummy_array();
 416       }
 417     }
 418     assert(i == _fullgc_alot_dummy_array->length(), "just checking");
 419   }
 420   #endif
 421 }
 422 
 423 // CDS support for patching vtables in metadata in the shared archive.
 424 // All types inherited from Metadata have vtables, but not types inherited
 425 // from MetaspaceObj, because the latter does not have virtual functions.
 426 // If the metadata type has a vtable, it cannot be shared in the read-only
 427 // section of the CDS archive, because the vtable pointer is patched.
 428 static inline void* dereference(void* addr) {
 429   return *(void**)addr;
 430 }
 431 
 432 static inline void add_vtable(void** list, int* n, void* o, int count) {
 433   guarantee((*n) < count, "vtable list too small");
 434   void* vtable = dereference(o);
 435   assert(dereference(vtable) != NULL, "invalid vtable");
 436   list[(*n)++] = vtable;
 437 }
 438 
 439 void Universe::init_self_patching_vtbl_list(void** list, int count) {
 440   int n = 0;
 441   { InstanceKlass o;          add_vtable(list, &n, &o, count); }
 442   { InstanceClassLoaderKlass o; add_vtable(list, &n, &o, count); }
 443   { InstanceMirrorKlass o;    add_vtable(list, &n, &o, count); }
 444   { InstanceRefKlass o;       add_vtable(list, &n, &o, count); }
 445   { TypeArrayKlass o;         add_vtable(list, &n, &o, count); }
 446   { ObjArrayKlass o;          add_vtable(list, &n, &o, count); }
 447   { Method o;                 add_vtable(list, &n, &o, count); }
 448   { ConstantPool o;           add_vtable(list, &n, &o, count); }
 449 }
 450 
 451 void Universe::initialize_basic_type_mirrors(TRAPS) {
 452     assert(_int_mirror==NULL, "basic type mirrors already initialized");
 453     _int_mirror     =
 454       java_lang_Class::create_basic_type_mirror("int",    T_INT, CHECK);
 455     _float_mirror   =
 456       java_lang_Class::create_basic_type_mirror("float",  T_FLOAT,   CHECK);
 457     _double_mirror  =
 458       java_lang_Class::create_basic_type_mirror("double", T_DOUBLE,  CHECK);
 459     _byte_mirror    =
 460       java_lang_Class::create_basic_type_mirror("byte",   T_BYTE, CHECK);
 461     _bool_mirror    =
 462       java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
 463     _char_mirror    =
 464       java_lang_Class::create_basic_type_mirror("char",   T_CHAR, CHECK);
 465     _long_mirror    =
 466       java_lang_Class::create_basic_type_mirror("long",   T_LONG, CHECK);
 467     _short_mirror   =
 468       java_lang_Class::create_basic_type_mirror("short",  T_SHORT,   CHECK);
 469     _void_mirror    =
 470       java_lang_Class::create_basic_type_mirror("void",   T_VOID, CHECK);
 471 
 472     _mirrors[T_INT]     = _int_mirror;
 473     _mirrors[T_FLOAT]   = _float_mirror;
 474     _mirrors[T_DOUBLE]  = _double_mirror;
 475     _mirrors[T_BYTE]    = _byte_mirror;
 476     _mirrors[T_BOOLEAN] = _bool_mirror;
 477     _mirrors[T_CHAR]    = _char_mirror;
 478     _mirrors[T_LONG]    = _long_mirror;
 479     _mirrors[T_SHORT]   = _short_mirror;
 480     _mirrors[T_VOID]    = _void_mirror;
 481   //_mirrors[T_OBJECT]  = InstanceKlass::cast(_object_klass)->java_mirror();
 482   //_mirrors[T_ARRAY]   = InstanceKlass::cast(_object_klass)->java_mirror();
 483 }
 484 
 485 void Universe::fixup_mirrors(TRAPS) {
 486   // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly,
 487   // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply
 488   // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note
 489   // that the number of objects allocated at this point is very small.
 490   assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded");
 491   HandleMark hm(THREAD);
 492   // Cache the start of the static fields
 493   InstanceMirrorKlass::init_offset_of_static_fields();
 494 
 495   GrowableArray <Klass*>* list = java_lang_Class::fixup_mirror_list();
 496   int list_length = list->length();
 497   for (int i = 0; i < list_length; i++) {
 498     Klass* k = list->at(i);
 499     assert(k->is_klass(), "List should only hold classes");
 500     EXCEPTION_MARK;
 501     KlassHandle kh(THREAD, k);
 502     java_lang_Class::fixup_mirror(kh, CATCH);
 503 }
 504   delete java_lang_Class::fixup_mirror_list();
 505   java_lang_Class::set_fixup_mirror_list(NULL);
 506 }
 507 
 508 static bool has_run_finalizers_on_exit = false;
 509 
 510 void Universe::run_finalizers_on_exit() {
 511   if (has_run_finalizers_on_exit) return;
 512   has_run_finalizers_on_exit = true;
 513 
 514   // Called on VM exit. This ought to be run in a separate thread.
 515   if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit");
 516   {
 517     PRESERVE_EXCEPTION_MARK;
 518     KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass());
 519     JavaValue result(T_VOID);
 520     JavaCalls::call_static(
 521       &result,
 522       finalizer_klass,
 523       vmSymbols::run_finalizers_on_exit_name(),
 524       vmSymbols::void_method_signature(),
 525       THREAD
 526     );
 527     // Ignore any pending exceptions
 528     CLEAR_PENDING_EXCEPTION;
 529   }
 530 }
 531 
 532 
 533 // initialize_vtable could cause gc if
 534 // 1) we specified true to initialize_vtable and
 535 // 2) this ran after gc was enabled
 536 // In case those ever change we use handles for oops
 537 void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) {
 538   // init vtable of k and all subclasses
 539   Klass* ko = k_h();
 540   klassVtable* vt = ko->vtable();
 541   if (vt) vt->initialize_vtable(false, CHECK);
 542   if (ko->oop_is_instance()) {
 543     InstanceKlass* ik = (InstanceKlass*)ko;
 544     for (KlassHandle s_h(THREAD, ik->subklass()); s_h() != NULL; s_h = (THREAD, s_h()->next_sibling())) {
 545       reinitialize_vtable_of(s_h, CHECK);
 546     }
 547   }
 548 }
 549 
 550 
 551 void initialize_itable_for_klass(Klass* k, TRAPS) {
 552   InstanceKlass::cast(k)->itable()->initialize_itable(false, CHECK);
 553 }
 554 
 555 
 556 void Universe::reinitialize_itables(TRAPS) {
 557   SystemDictionary::classes_do(initialize_itable_for_klass, CHECK);
 558 
 559 }
 560 
 561 
 562 bool Universe::on_page_boundary(void* addr) {
 563   return ((uintptr_t) addr) % os::vm_page_size() == 0;
 564 }
 565 
 566 
 567 bool Universe::should_fill_in_stack_trace(Handle throwable) {
 568   // never attempt to fill in the stack trace of preallocated errors that do not have
 569   // backtrace. These errors are kept alive forever and may be "re-used" when all
 570   // preallocated errors with backtrace have been consumed. Also need to avoid
 571   // a potential loop which could happen if an out of memory occurs when attempting
 572   // to allocate the backtrace.
 573   return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
 574           (throwable() != Universe::_out_of_memory_error_perm_gen)  &&
 575           (throwable() != Universe::_out_of_memory_error_array_size) &&
 576           (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
 577 }
 578 
 579 
 580 oop Universe::gen_out_of_memory_error(oop default_err) {
 581   // generate an out of memory error:
 582   // - if there is a preallocated error with backtrace available then return it wth
 583   //   a filled in stack trace.
 584   // - if there are no preallocated errors with backtrace available then return
 585   //   an error without backtrace.
 586   int next;
 587   if (_preallocated_out_of_memory_error_avail_count > 0) {
 588     next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count);
 589     assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt");
 590   } else {
 591     next = -1;
 592   }
 593   if (next < 0) {
 594     // all preallocated errors have been used.
 595     // return default
 596     return default_err;
 597   } else {
 598     // get the error object at the slot and set set it to NULL so that the
 599     // array isn't keeping it alive anymore.
 600     oop exc = preallocated_out_of_memory_errors()->obj_at(next);
 601     assert(exc != NULL, "slot has been used already");
 602     preallocated_out_of_memory_errors()->obj_at_put(next, NULL);
 603 
 604     // use the message from the default error
 605     oop msg = java_lang_Throwable::message(default_err);
 606     assert(msg != NULL, "no message");
 607     java_lang_Throwable::set_message(exc, msg);
 608 
 609     // populate the stack trace and return it.
 610     java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(exc);
 611     return exc;
 612   }
 613 }
 614 
 615 static intptr_t non_oop_bits = 0;
 616 
 617 void* Universe::non_oop_word() {
 618   // Neither the high bits nor the low bits of this value is allowed
 619   // to look like (respectively) the high or low bits of a real oop.
 620   //
 621   // High and low are CPU-specific notions, but low always includes
 622   // the low-order bit.  Since oops are always aligned at least mod 4,
 623   // setting the low-order bit will ensure that the low half of the
 624   // word will never look like that of a real oop.
 625   //
 626   // Using the OS-supplied non-memory-address word (usually 0 or -1)
 627   // will take care of the high bits, however many there are.
 628 
 629   if (non_oop_bits == 0) {
 630     non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
 631   }
 632 
 633   return (void*)non_oop_bits;
 634 }
 635 
 636 jint universe_init() {
 637   assert(!Universe::_fully_initialized, "called after initialize_vtables");
 638   guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
 639          "LogHeapWordSize is incorrect.");
 640   guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
 641   guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
 642             "oop size is not not a multiple of HeapWord size");
 643   TraceTime timer("Genesis", TraceStartupTime);
 644   GC_locker::lock();  // do not allow gc during bootstrapping
 645   JavaClasses::compute_hard_coded_offsets();
 646 
 647   jint status = Universe::initialize_heap();
 648   if (status != JNI_OK) {
 649     return status;
 650   }
 651 
 652   // Create memory for metadata.  Must be after initializing heap for
 653   // DumpSharedSpaces.
 654   ClassLoaderData::init_null_class_loader_data();
 655 
 656   // We have a heap so create the Method* caches before
 657   // Metaspace::initialize_shared_spaces() tries to populate them.
 658   Universe::_finalizer_register_cache = new LatestMethodOopCache();
 659   Universe::_loader_addClass_cache    = new LatestMethodOopCache();
 660   Universe::_reflect_invoke_cache     = new ActiveMethodOopsCache();
 661 
 662   if (UseSharedSpaces) {
 663     // Read the data structures supporting the shared spaces (shared
 664     // system dictionary, symbol table, etc.).  After that, access to
 665     // the file (other than the mapped regions) is no longer needed, and
 666     // the file is closed. Closing the file does not affect the
 667     // currently mapped regions.
 668     MetaspaceShared::initialize_shared_spaces();
 669     StringTable::create_table();
 670   } else {
 671     SymbolTable::create_table();
 672     StringTable::create_table();
 673     ClassLoader::create_package_info_table();
 674   }
 675 
 676   return JNI_OK;
 677 }
 678 
 679 // Choose the heap base address and oop encoding mode
 680 // when compressed oops are used:
 681 // Unscaled  - Use 32-bits oops without encoding when
 682 //     NarrowOopHeapBaseMin + heap_size < 4Gb
 683 // ZeroBased - Use zero based compressed oops with encoding when
 684 //     NarrowOopHeapBaseMin + heap_size < 32Gb
 685 // HeapBased - Use compressed oops with heap base + encoding.
 686 
 687 // 4Gb
 688 static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
 689 // 32Gb
 690 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
 691 
 692 char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
 693   size_t base = 0;
 694 #ifdef _LP64
 695   if (UseCompressedOops) {
 696     assert(mode == UnscaledNarrowOop  ||
 697            mode == ZeroBasedNarrowOop ||
 698            mode == HeapBasedNarrowOop, "mode is invalid");
 699     const size_t total_size = heap_size + HeapBaseMinAddress;
 700     // Return specified base for the first request.
 701     if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
 702       base = HeapBaseMinAddress;
 703     } else if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
 704       if (total_size <= NarrowOopHeapMax && (mode == UnscaledNarrowOop) &&
 705           (Universe::narrow_oop_shift() == 0)) {
 706         // Use 32-bits oops without encoding and
 707         // place heap's top on the 4Gb boundary
 708         base = (NarrowOopHeapMax - heap_size);
 709       } else {
 710         // Can't reserve with NarrowOopShift == 0
 711         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 712         if (mode == UnscaledNarrowOop ||
 713             mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) {
 714           // Use zero based compressed oops with encoding and
 715           // place heap's top on the 32Gb boundary in case
 716           // total_size > 4Gb or failed to reserve below 4Gb.
 717           base = (OopEncodingHeapMax - heap_size);
 718         }
 719       }
 720     } else {
 721       // Can't reserve below 32Gb.
 722       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 723     }
 724     // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
 725     // used in ReservedHeapSpace() constructors.
 726     // The final values will be set in initialize_heap() below.
 727     if (base != 0 && (base + heap_size) <= OopEncodingHeapMax) {
 728       // Use zero based compressed oops
 729       Universe::set_narrow_oop_base(NULL);
 730       // Don't need guard page for implicit checks in indexed
 731       // addressing mode with zero based Compressed Oops.
 732       Universe::set_narrow_oop_use_implicit_null_checks(true);
 733     } else {
 734       // Set to a non-NULL value so the ReservedSpace ctor computes
 735       // the correct no-access prefix.
 736       // The final value will be set in initialize_heap() below.
 737       Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
 738 #ifdef _WIN64
 739       if (UseLargePages) {
 740         // Cannot allocate guard pages for implicit checks in indexed
 741         // addressing mode when large pages are specified on windows.
 742         Universe::set_narrow_oop_use_implicit_null_checks(false);
 743       }
 744 #endif //  _WIN64
 745     }
 746   }
 747 #endif
 748   return (char*)base; // also return NULL (don't care) for 32-bit VM
 749 }
 750 
 751 jint Universe::initialize_heap() {
 752 
 753   if (UseParallelGC) {
 754 #ifndef SERIALGC
 755     Universe::_collectedHeap = new ParallelScavengeHeap();
 756 #else  // SERIALGC
 757     fatal("UseParallelGC not supported in this VM.");
 758 #endif // SERIALGC
 759 
 760   } else if (UseG1GC) {
 761 #ifndef SERIALGC
 762     G1CollectorPolicy* g1p = new G1CollectorPolicy();
 763     G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
 764     Universe::_collectedHeap = g1h;
 765 #else  // SERIALGC
 766     fatal("UseG1GC not supported in java kernel vm.");
 767 #endif // SERIALGC
 768 
 769   } else {
 770     GenCollectorPolicy *gc_policy;
 771 
 772     if (UseSerialGC) {
 773       gc_policy = new MarkSweepPolicy();
 774     } else if (UseConcMarkSweepGC) {
 775 #ifndef SERIALGC
 776       if (UseAdaptiveSizePolicy) {
 777         gc_policy = new ASConcurrentMarkSweepPolicy();
 778       } else {
 779         gc_policy = new ConcurrentMarkSweepPolicy();
 780       }
 781 #else   // SERIALGC
 782     fatal("UseConcMarkSweepGC not supported in this VM.");
 783 #endif // SERIALGC
 784     } else { // default old generation
 785       gc_policy = new MarkSweepPolicy();
 786     }
 787 
 788     Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
 789   }
 790 
 791   jint status = Universe::heap()->initialize();
 792   if (status != JNI_OK) {
 793     return status;
 794   }
 795 
 796 #ifdef _LP64
 797   if (UseCompressedOops) {
 798     // Subtract a page because something can get allocated at heap base.
 799     // This also makes implicit null checking work, because the
 800     // memory+1 page below heap_base needs to cause a signal.
 801     // See needs_explicit_null_check.
 802     // Only set the heap base for compressed oops because it indicates
 803     // compressed oops for pstack code.
 804     bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
 805     if (verbose) {
 806       tty->cr();
 807       tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
 808                  Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
 809     }
 810     if ((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) {
 811       // Can't reserve heap below 32Gb.
 812       // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
 813       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 814       if (verbose) {
 815         tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base());
 816       }
 817     } else {
 818       Universe::set_narrow_oop_base(0);
 819       if (verbose) {
 820         tty->print(", zero based Compressed Oops");
 821       }
 822 #ifdef _WIN64
 823       if (!Universe::narrow_oop_use_implicit_null_checks()) {
 824         // Don't need guard page for implicit checks in indexed addressing
 825         // mode with zero based Compressed Oops.
 826         Universe::set_narrow_oop_use_implicit_null_checks(true);
 827       }
 828 #endif //  _WIN64
 829       if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) {
 830         // Can't reserve heap below 4Gb.
 831         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
 832       } else {
 833         Universe::set_narrow_oop_shift(0);
 834         if (verbose) {
 835           tty->print(", 32-bits Oops");
 836         }
 837       }
 838     }
 839     if (verbose) {
 840       tty->cr();
 841       tty->cr();
 842     }
 843     if (UseCompressedKlassPointers) {
 844       Universe::set_narrow_klass_base(Universe::narrow_oop_base());
 845       Universe::set_narrow_klass_shift(MIN2(Universe::narrow_oop_shift(), LogKlassAlignmentInBytes));
 846     }
 847     Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
 848   }
 849   // Universe::narrow_oop_base() is one page below the metaspace
 850   // base. The actual metaspace base depends on alignment constraints
 851   // so we don't know its exact location here.
 852   assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size() - ClassMetaspaceSize) ||
 853          Universe::narrow_oop_base() == NULL, "invalid value");
 854   assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
 855          Universe::narrow_oop_shift() == 0, "invalid value");
 856 #endif
 857 
 858   // We will never reach the CATCH below since Exceptions::_throw will cause
 859   // the VM to exit if an exception is thrown during initialization
 860 
 861   if (UseTLAB) {
 862     assert(Universe::heap()->supports_tlab_allocation(),
 863            "Should support thread-local allocation buffers");
 864     ThreadLocalAllocBuffer::startup_initialization();
 865   }
 866   return JNI_OK;
 867 }
 868 
 869 
 870 // Reserve the Java heap, which is now the same for all GCs.
 871 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
 872   // Add in the class metaspace area so the classes in the headers can
 873   // be compressed the same as instances.
 874   // Need to round class space size up because it's below the heap and
 875   // the actual alignment depends on its size.
 876   size_t metaspace_size = align_size_up(ClassMetaspaceSize, alignment);
 877   size_t total_reserved = align_size_up(heap_size + metaspace_size, alignment);
 878   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 879 
 880   ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
 881 
 882   if (UseCompressedOops) {
 883     if (addr != NULL && !total_rs.is_reserved()) {
 884       // Failed to reserve at specified address - the requested memory
 885       // region is taken already, for example, by 'java' launcher.
 886       // Try again to reserver heap higher.
 887       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
 888 
 889       ReservedHeapSpace total_rs0(total_reserved, alignment,
 890                                   UseLargePages, addr);
 891 
 892       if (addr != NULL && !total_rs0.is_reserved()) {
 893         // Failed to reserve at specified address again - give up.
 894         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
 895         assert(addr == NULL, "");
 896 
 897         ReservedHeapSpace total_rs1(total_reserved, alignment,
 898                                     UseLargePages, addr);
 899         total_rs = total_rs1;
 900       } else {
 901         total_rs = total_rs0;
 902       }
 903     }
 904   }
 905 
 906   if (!total_rs.is_reserved()) {
 907     vm_exit_during_initialization(err_msg("Could not reserve enough space for object heap %d bytes", total_reserved));
 908     return total_rs;
 909   }
 910 
 911   // Split the reserved space into main Java heap and a space for
 912   // classes so that they can be compressed using the same algorithm
 913   // as compressed oops. If compress oops and compress klass ptrs are
 914   // used we need the meta space first: if the alignment used for
 915   // compressed oops is greater than the one used for compressed klass
 916   // ptrs, a metadata space on top of the heap could become
 917   // unreachable.
 918   ReservedSpace class_rs = total_rs.first_part(metaspace_size);
 919   ReservedSpace heap_rs = total_rs.last_part(metaspace_size, alignment);
 920   Metaspace::initialize_class_space(class_rs);
 921 
 922   if (UseCompressedOops) {
 923     // Universe::initialize_heap() will reset this to NULL if unscaled
 924     // or zero-based narrow oops are actually used.
 925     address base = (address)(total_rs.base() - os::vm_page_size());
 926     Universe::set_narrow_oop_base(base);
 927   }
 928   return heap_rs;
 929 }
 930 
 931 
 932 // It's the caller's repsonsibility to ensure glitch-freedom
 933 // (if required).
 934 void Universe::update_heap_info_at_gc() {
 935   _heap_capacity_at_last_gc = heap()->capacity();
 936   _heap_used_at_last_gc     = heap()->used();
 937 }
 938 
 939 
 940 
 941 void universe2_init() {
 942   EXCEPTION_MARK;
 943   Universe::genesis(CATCH);
 944   // Although we'd like to verify here that the state of the heap
 945   // is good, we can't because the main thread has not yet added
 946   // itself to the threads list (so, using current interfaces
 947   // we can't "fill" its TLAB), unless TLABs are disabled.
 948   if (VerifyBeforeGC && !UseTLAB &&
 949       Universe::heap()->total_collections() >= VerifyGCStartAt) {
 950      Universe::heap()->prepare_for_verify();
 951      Universe::verify();   // make sure we're starting with a clean slate
 952   }
 953 }
 954 
 955 
 956 // This function is defined in JVM.cpp
 957 extern void initialize_converter_functions();
 958 
 959 bool universe_post_init() {
 960   assert(!is_init_completed(), "Error: initialization not yet completed!");
 961   Universe::_fully_initialized = true;
 962   EXCEPTION_MARK;
 963   { ResourceMark rm;
 964     Interpreter::initialize();      // needed for interpreter entry points
 965     if (!UseSharedSpaces) {
 966       HandleMark hm(THREAD);
 967       KlassHandle ok_h(THREAD, SystemDictionary::Object_klass());
 968       Universe::reinitialize_vtable_of(ok_h, CHECK_false);
 969       Universe::reinitialize_itables(CHECK_false);
 970     }
 971   }
 972 
 973   HandleMark hm(THREAD);
 974   Klass* k;
 975   instanceKlassHandle k_h;
 976     // Setup preallocated empty java.lang.Class array
 977     Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false);
 978 
 979     // Setup preallocated OutOfMemoryError errors
 980     k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false);
 981     k_h = instanceKlassHandle(THREAD, k);
 982     Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false);
 983     Universe::_out_of_memory_error_perm_gen = k_h->allocate_instance(CHECK_false);
 984     Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
 985     Universe::_out_of_memory_error_gc_overhead_limit =
 986       k_h->allocate_instance(CHECK_false);
 987 
 988     // Setup preallocated NullPointerException
 989     // (this is currently used for a cheap & dirty solution in compiler exception handling)
 990     k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false);
 991     Universe::_null_ptr_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
 992     // Setup preallocated ArithmeticException
 993     // (this is currently used for a cheap & dirty solution in compiler exception handling)
 994     k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ArithmeticException(), true, CHECK_false);
 995     Universe::_arithmetic_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
 996     // Virtual Machine Error for when we get into a situation we can't resolve
 997     k = SystemDictionary::resolve_or_fail(
 998       vmSymbols::java_lang_VirtualMachineError(), true, CHECK_false);
 999     bool linked = InstanceKlass::cast(k)->link_class_or_fail(CHECK_false);
1000     if (!linked) {
1001       tty->print_cr("Unable to link/verify VirtualMachineError class");
1002       return false; // initialization failed
1003     }
1004     Universe::_virtual_machine_error_instance =
1005       InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1006 
1007     Universe::_vm_exception               = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1008 
1009   if (!DumpSharedSpaces) {
1010     // These are the only Java fields that are currently set during shared space dumping.
1011     // We prefer to not handle this generally, so we always reinitialize these detail messages.
1012     Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false);
1013     java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
1014 
1015     msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
1016     java_lang_Throwable::set_message(Universe::_out_of_memory_error_perm_gen, msg());
1017 
1018     msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
1019     java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg());
1020 
1021     msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
1022     java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
1023 
1024     msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
1025     java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
1026 
1027     // Setup the array of errors that have preallocated backtrace
1028     k = Universe::_out_of_memory_error_java_heap->klass();
1029     assert(k->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error");
1030     k_h = instanceKlassHandle(THREAD, k);
1031 
1032     int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0;
1033     Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false);
1034     for (int i=0; i<len; i++) {
1035       oop err = k_h->allocate_instance(CHECK_false);
1036       Handle err_h = Handle(THREAD, err);
1037       java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
1038       Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
1039     }
1040     Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
1041   }
1042 
1043 
1044   // Setup static method for registering finalizers
1045   // The finalizer klass must be linked before looking up the method, in
1046   // case it needs to get rewritten.
1047   InstanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false);
1048   Method* m = InstanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method(
1049                                   vmSymbols::register_method_name(),
1050                                   vmSymbols::register_method_signature());
1051   if (m == NULL || !m->is_static()) {
1052     THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
1053       "java.lang.ref.Finalizer.register", false);
1054   }
1055   Universe::_finalizer_register_cache->init(
1056     SystemDictionary::Finalizer_klass(), m, CHECK_false);
1057 
1058   // Resolve on first use and initialize class.
1059   // Note: No race-condition here, since a resolve will always return the same result
1060 
1061   // Setup method for security checks
1062   k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_reflect_Method(), true, CHECK_false);
1063   k_h = instanceKlassHandle(THREAD, k);
1064   k_h->link_class(CHECK_false);
1065   m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_object_array_object_signature());
1066   if (m == NULL || m->is_static()) {
1067     THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
1068       "java.lang.reflect.Method.invoke", false);
1069   }
1070   Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false);
1071 
1072   // Setup method for registering loaded classes in class loader vector
1073   InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
1074   m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
1075   if (m == NULL || m->is_static()) {
1076     THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
1077       "java.lang.ClassLoader.addClass", false);
1078   }
1079   Universe::_loader_addClass_cache->init(
1080     SystemDictionary::ClassLoader_klass(), m, CHECK_false);
1081 
1082   // The folowing is initializing converter functions for serialization in
1083   // JVM.cpp. If we clean up the StrictMath code above we may want to find
1084   // a better solution for this as well.
1085   initialize_converter_functions();
1086 
1087   // This needs to be done before the first scavenge/gc, since
1088   // it's an input to soft ref clearing policy.
1089   {
1090     MutexLocker x(Heap_lock);
1091     Universe::update_heap_info_at_gc();
1092   }
1093 
1094   // ("weak") refs processing infrastructure initialization
1095   Universe::heap()->post_initialize();
1096 
1097   // Initialize performance counters for metaspaces
1098   MetaspaceCounters::initialize_performance_counters();
1099 
1100   GC_locker::unlock();  // allow gc after bootstrapping
1101 
1102   MemoryService::set_universe_heap(Universe::_collectedHeap);
1103   return true;
1104 }
1105 
1106 
1107 void Universe::compute_base_vtable_size() {
1108   _base_vtable_size = ClassLoader::compute_Object_vtable();
1109 }
1110 
1111 
1112 // %%% The Universe::flush_foo methods belong in CodeCache.
1113 
1114 // Flushes compiled methods dependent on dependee.
1115 void Universe::flush_dependents_on(instanceKlassHandle dependee) {
1116   assert_lock_strong(Compile_lock);
1117 
1118   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1119 
1120   // CodeCache can only be updated by a thread_in_VM and they will all be
1121   // stopped dring the safepoint so CodeCache will be safe to update without
1122   // holding the CodeCache_lock.
1123 
1124   KlassDepChange changes(dependee);
1125 
1126   // Compute the dependent nmethods
1127   if (CodeCache::mark_for_deoptimization(changes) > 0) {
1128     // At least one nmethod has been marked for deoptimization
1129     VM_Deoptimize op;
1130     VMThread::execute(&op);
1131   }
1132 }
1133 
1134 // Flushes compiled methods dependent on a particular CallSite
1135 // instance when its target is different than the given MethodHandle.
1136 void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
1137   assert_lock_strong(Compile_lock);
1138 
1139   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1140 
1141   // CodeCache can only be updated by a thread_in_VM and they will all be
1142   // stopped dring the safepoint so CodeCache will be safe to update without
1143   // holding the CodeCache_lock.
1144 
1145   CallSiteDepChange changes(call_site(), method_handle());
1146 
1147   // Compute the dependent nmethods that have a reference to a
1148   // CallSite object.  We use InstanceKlass::mark_dependent_nmethod
1149   // directly instead of CodeCache::mark_for_deoptimization because we
1150   // want dependents on the call site class only not all classes in
1151   // the ContextStream.
1152   int marked = 0;
1153   {
1154     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1155     InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass());
1156     marked = call_site_klass->mark_dependent_nmethods(changes);
1157   }
1158   if (marked > 0) {
1159     // At least one nmethod has been marked for deoptimization
1160     VM_Deoptimize op;
1161     VMThread::execute(&op);
1162   }
1163 }
1164 
1165 #ifdef HOTSWAP
1166 // Flushes compiled methods dependent on dependee in the evolutionary sense
1167 void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
1168   // --- Compile_lock is not held. However we are at a safepoint.
1169   assert_locked_or_safepoint(Compile_lock);
1170   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1171 
1172   // CodeCache can only be updated by a thread_in_VM and they will all be
1173   // stopped dring the safepoint so CodeCache will be safe to update without
1174   // holding the CodeCache_lock.
1175 
1176   // Compute the dependent nmethods
1177   if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) {
1178     // At least one nmethod has been marked for deoptimization
1179 
1180     // All this already happens inside a VM_Operation, so we'll do all the work here.
1181     // Stuff copied from VM_Deoptimize and modified slightly.
1182 
1183     // We do not want any GCs to happen while we are in the middle of this VM operation
1184     ResourceMark rm;
1185     DeoptimizationMarker dm;
1186 
1187     // Deoptimize all activations depending on marked nmethods
1188     Deoptimization::deoptimize_dependents();
1189 
1190     // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1191     CodeCache::make_marked_nmethods_not_entrant();
1192   }
1193 }
1194 #endif // HOTSWAP
1195 
1196 
1197 // Flushes compiled methods dependent on dependee
1198 void Universe::flush_dependents_on_method(methodHandle m_h) {
1199   // --- Compile_lock is not held. However we are at a safepoint.
1200   assert_locked_or_safepoint(Compile_lock);
1201 
1202   // CodeCache can only be updated by a thread_in_VM and they will all be
1203   // stopped dring the safepoint so CodeCache will be safe to update without
1204   // holding the CodeCache_lock.
1205 
1206   // Compute the dependent nmethods
1207   if (CodeCache::mark_for_deoptimization(m_h()) > 0) {
1208     // At least one nmethod has been marked for deoptimization
1209 
1210     // All this already happens inside a VM_Operation, so we'll do all the work here.
1211     // Stuff copied from VM_Deoptimize and modified slightly.
1212 
1213     // We do not want any GCs to happen while we are in the middle of this VM operation
1214     ResourceMark rm;
1215     DeoptimizationMarker dm;
1216 
1217     // Deoptimize all activations depending on marked nmethods
1218     Deoptimization::deoptimize_dependents();
1219 
1220     // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1221     CodeCache::make_marked_nmethods_not_entrant();
1222   }
1223 }
1224 
1225 void Universe::print() {
1226   print_on(gclog_or_tty);
1227 }
1228 
1229 void Universe::print_on(outputStream* st, bool extended) {
1230   st->print_cr("Heap");
1231   if (!extended) {
1232     heap()->print_on(st);
1233   } else {
1234     heap()->print_extended_on(st);
1235   }
1236 }
1237 
1238 void Universe::print_heap_at_SIGBREAK() {
1239   if (PrintHeapAtSIGBREAK) {
1240     MutexLocker hl(Heap_lock);
1241     print_on(tty);
1242     tty->cr();
1243     tty->flush();
1244   }
1245 }
1246 
1247 void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) {
1248   st->print_cr("{Heap before GC invocations=%u (full %u):",
1249                heap()->total_collections(),
1250                heap()->total_full_collections());
1251   if (!PrintHeapAtGCExtended || ignore_extended) {
1252     heap()->print_on(st);
1253   } else {
1254     heap()->print_extended_on(st);
1255   }
1256 }
1257 
1258 void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
1259   st->print_cr("Heap after GC invocations=%u (full %u):",
1260                heap()->total_collections(),
1261                heap()->total_full_collections());
1262   if (!PrintHeapAtGCExtended || ignore_extended) {
1263     heap()->print_on(st);
1264   } else {
1265     heap()->print_extended_on(st);
1266   }
1267   st->print_cr("}");
1268 }
1269 
1270 void Universe::verify(bool silent, VerifyOption option) {
1271   // The use of _verify_in_progress is a temporary work around for
1272   // 6320749.  Don't bother with a creating a class to set and clear
1273   // it since it is only used in this method and the control flow is
1274   // straight forward.
1275   _verify_in_progress = true;
1276 
1277   COMPILER2_PRESENT(
1278     assert(!DerivedPointerTable::is_active(),
1279          "DPT should not be active during verification "
1280          "(of thread stacks below)");
1281   )
1282 
1283   ResourceMark rm;
1284   HandleMark hm;  // Handles created during verification can be zapped
1285   _verify_count++;
1286 
1287   if (!silent) gclog_or_tty->print("[Verifying ");
1288   if (!silent) gclog_or_tty->print("threads ");
1289   Threads::verify();
1290   heap()->verify(silent, option);
1291 
1292   if (!silent) gclog_or_tty->print("syms ");
1293   SymbolTable::verify();
1294   if (!silent) gclog_or_tty->print("strs ");
1295   StringTable::verify();
1296   {
1297     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1298     if (!silent) gclog_or_tty->print("zone ");
1299     CodeCache::verify();
1300   }
1301   if (!silent) gclog_or_tty->print("dict ");
1302   SystemDictionary::verify();
1303 #ifndef PRODUCT
1304   if (!silent) gclog_or_tty->print("cldg ");
1305   ClassLoaderDataGraph::verify();
1306 #endif
1307   if (!silent) gclog_or_tty->print("hand ");
1308   JNIHandles::verify();
1309   if (!silent) gclog_or_tty->print("C-heap ");
1310   os::check_heap();
1311   if (!silent) gclog_or_tty->print("code cache ");
1312   CodeCache::verify_oops();
1313   if (!silent) gclog_or_tty->print_cr("]");
1314 
1315   _verify_in_progress = false;
1316 }
1317 
1318 // Oop verification (see MacroAssembler::verify_oop)
1319 
1320 static uintptr_t _verify_oop_data[2]   = {0, (uintptr_t)-1};
1321 static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1};
1322 
1323 
1324 #ifndef PRODUCT
1325 
1326 static void calculate_verify_data(uintptr_t verify_data[2],
1327                                   HeapWord* low_boundary,
1328                                   HeapWord* high_boundary) {
1329   assert(low_boundary < high_boundary, "bad interval");
1330 
1331   // decide which low-order bits we require to be clear:
1332   size_t alignSize = MinObjAlignmentInBytes;
1333   size_t min_object_size = CollectedHeap::min_fill_size();
1334 
1335   // make an inclusive limit:
1336   uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize;
1337   uintptr_t min = (uintptr_t)low_boundary;
1338   assert(min < max, "bad interval");
1339   uintptr_t diff = max ^ min;
1340 
1341   // throw away enough low-order bits to make the diff vanish
1342   uintptr_t mask = (uintptr_t)(-1);
1343   while ((mask & diff) != 0)
1344     mask <<= 1;
1345   uintptr_t bits = (min & mask);
1346   assert(bits == (max & mask), "correct mask");
1347   // check an intermediate value between min and max, just to make sure:
1348   assert(bits == ((min + (max-min)/2) & mask), "correct mask");
1349 
1350   // require address alignment, too:
1351   mask |= (alignSize - 1);
1352 
1353   if (!(verify_data[0] == 0 && verify_data[1] == (uintptr_t)-1)) {
1354     assert(verify_data[0] == mask && verify_data[1] == bits, "mask stability");
1355   }
1356   verify_data[0] = mask;
1357   verify_data[1] = bits;
1358 }
1359 
1360 // Oop verification (see MacroAssembler::verify_oop)
1361 
1362 uintptr_t Universe::verify_oop_mask() {
1363   MemRegion m = heap()->reserved_region();
1364   calculate_verify_data(_verify_oop_data,
1365                         m.start(),
1366                         m.end());
1367   return _verify_oop_data[0];
1368 }
1369 
1370 
1371 
1372 uintptr_t Universe::verify_oop_bits() {
1373   verify_oop_mask();
1374   return _verify_oop_data[1];
1375 }
1376 
1377 uintptr_t Universe::verify_mark_mask() {
1378   return markOopDesc::lock_mask_in_place;
1379 }
1380 
1381 uintptr_t Universe::verify_mark_bits() {
1382   intptr_t mask = verify_mark_mask();
1383   intptr_t bits = (intptr_t)markOopDesc::prototype();
1384   assert((bits & ~mask) == 0, "no stray header bits");
1385   return bits;
1386 }
1387 #endif // PRODUCT
1388 
1389 
1390 void Universe::compute_verify_oop_data() {
1391   verify_oop_mask();
1392   verify_oop_bits();
1393   verify_mark_mask();
1394   verify_mark_bits();
1395 }
1396 
1397 
1398 void CommonMethodOopCache::init(Klass* k, Method* m, TRAPS) {
1399   if (!UseSharedSpaces) {
1400     _klass = k;
1401   }
1402 #ifndef PRODUCT
1403   else {
1404     // sharing initilization should have already set up _klass
1405     assert(_klass != NULL, "just checking");
1406   }
1407 #endif
1408 
1409   _method_idnum = m->method_idnum();
1410   assert(_method_idnum >= 0, "sanity check");
1411 }
1412 
1413 
1414 ActiveMethodOopsCache::~ActiveMethodOopsCache() {
1415   if (_prev_methods != NULL) {
1416     delete _prev_methods;
1417     _prev_methods = NULL;
1418   }
1419 }
1420 
1421 
1422 void ActiveMethodOopsCache::add_previous_version(Method* const method) {
1423   assert(Thread::current()->is_VM_thread(),
1424     "only VMThread can add previous versions");
1425 
1426   // Only append the previous method if it is executing on the stack.
1427   if (method->on_stack()) {
1428 
1429   if (_prev_methods == NULL) {
1430     // This is the first previous version so make some space.
1431     // Start with 2 elements under the assumption that the class
1432     // won't be redefined much.
1433       _prev_methods = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Method*>(2, true);
1434   }
1435 
1436   // RC_TRACE macro has an embedded ResourceMark
1437   RC_TRACE(0x00000100,
1438     ("add: %s(%s): adding prev version ref for cached method @%d",
1439     method->name()->as_C_string(), method->signature()->as_C_string(),
1440     _prev_methods->length()));
1441 
1442     _prev_methods->append(method);
1443   }
1444 
1445 
1446   // Since the caller is the VMThread and we are at a safepoint, this is a good
1447   // time to clear out unused method references.
1448 
1449   if (_prev_methods == NULL) return;
1450 
1451   for (int i = _prev_methods->length() - 1; i >= 0; i--) {
1452     Method* method = _prev_methods->at(i);
1453     assert(method != NULL, "weak method ref was unexpectedly cleared");
1454 
1455     if (!method->on_stack()) {
1456       // This method isn't running anymore so remove it
1457       _prev_methods->remove_at(i);
1458       MetadataFactory::free_metadata(method->method_holder()->class_loader_data(), method);
1459     } else {
1460       // RC_TRACE macro has an embedded ResourceMark
1461       RC_TRACE(0x00000400, ("add: %s(%s): previous cached method @%d is alive",
1462         method->name()->as_C_string(), method->signature()->as_C_string(), i));
1463     }
1464   }
1465 } // end add_previous_version()
1466 
1467 
1468 bool ActiveMethodOopsCache::is_same_method(Method* const method) const {
1469   InstanceKlass* ik = InstanceKlass::cast(klass());
1470   Method* check_method = ik->method_with_idnum(method_idnum());
1471   assert(check_method != NULL, "sanity check");
1472   if (check_method == method) {
1473     // done with the easy case
1474     return true;
1475   }
1476 
1477   if (_prev_methods != NULL) {
1478     // The cached method has been redefined at least once so search
1479     // the previous versions for a match.
1480     for (int i = 0; i < _prev_methods->length(); i++) {
1481       check_method = _prev_methods->at(i);
1482       if (check_method == method) {
1483         // a previous version matches
1484         return true;
1485       }
1486     }
1487   }
1488 
1489   // either no previous versions or no previous version matched
1490   return false;
1491 }
1492 
1493 
1494 Method* LatestMethodOopCache::get_Method() {
1495   InstanceKlass* ik = InstanceKlass::cast(klass());
1496   Method* m = ik->method_with_idnum(method_idnum());
1497   assert(m != NULL, "sanity check");
1498   return m;
1499 }
1500 
1501 
1502 #ifdef ASSERT
1503 // Release dummy object(s) at bottom of heap
1504 bool Universe::release_fullgc_alot_dummy() {
1505   MutexLocker ml(FullGCALot_lock);
1506   if (_fullgc_alot_dummy_array != NULL) {
1507     if (_fullgc_alot_dummy_next >= _fullgc_alot_dummy_array->length()) {
1508       // No more dummies to release, release entire array instead
1509       _fullgc_alot_dummy_array = NULL;
1510       return false;
1511     }
1512     if (!UseConcMarkSweepGC) {
1513       // Release dummy at bottom of old generation
1514       _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1515     }
1516     // Release dummy at bottom of permanent generation
1517     _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1518   }
1519   return true;
1520 }
1521 
1522 #endif // ASSERT