1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoader.hpp" 27 #include "classfile/classLoaderData.hpp" 28 #include "classfile/javaClasses.hpp" 29 #if INCLUDE_CDS 30 #include "classfile/sharedClassUtil.hpp" 31 #endif 32 #include "classfile/stringTable.hpp" 33 #include "classfile/systemDictionary.hpp" 34 #include "classfile/vmSymbols.hpp" 35 #include "code/codeCache.hpp" 36 #include "code/dependencies.hpp" 37 #include "gc_interface/collectedHeap.inline.hpp" 38 #include "interpreter/interpreter.hpp" 39 #include "memory/cardTableModRefBS.hpp" 40 #include "memory/filemap.hpp" 41 #include "memory/gcLocker.inline.hpp" 42 #include "memory/genCollectedHeap.hpp" 43 #include "memory/genRemSet.hpp" 44 #include "memory/generation.hpp" 45 #include "memory/metadataFactory.hpp" 46 #include "memory/metaspaceShared.hpp" 47 #include "memory/oopFactory.hpp" 48 #include "memory/space.hpp" 49 #include "memory/universe.hpp" 50 #include "memory/universe.inline.hpp" 51 #include "oops/constantPool.hpp" 52 #include "oops/instanceClassLoaderKlass.hpp" 53 #include "oops/instanceKlass.hpp" 54 #include "oops/instanceMirrorKlass.hpp" 55 #include "oops/instanceRefKlass.hpp" 56 #include "oops/oop.inline.hpp" 57 #include "oops/typeArrayKlass.hpp" 58 #include "prims/jvmtiRedefineClassesTrace.hpp" 59 #include "runtime/arguments.hpp" 60 #include "runtime/atomic.inline.hpp" 61 #include "runtime/deoptimization.hpp" 62 #include "runtime/fprofiler.hpp" 63 #include "runtime/handles.inline.hpp" 64 #include "runtime/init.hpp" 65 #include "runtime/java.hpp" 66 #include "runtime/javaCalls.hpp" 67 #include "runtime/sharedRuntime.hpp" 68 #include "runtime/synchronizer.hpp" 69 #include "runtime/thread.inline.hpp" 70 #include "runtime/timer.hpp" 71 #include "runtime/vm_operations.hpp" 72 #include "services/memoryService.hpp" 73 #include "utilities/copy.hpp" 74 #include "utilities/events.hpp" 75 #include "utilities/hashtable.inline.hpp" 76 #include "utilities/preserveException.hpp" 77 #include "utilities/macros.hpp" 78 #if INCLUDE_ALL_GCS 79 #include "gc_implementation/shared/adaptiveSizePolicy.hpp" 80 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp" 81 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 82 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 83 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 84 #endif // INCLUDE_ALL_GCS 85 86 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 87 88 // Known objects 89 Klass* Universe::_boolArrayKlassObj = NULL; 90 Klass* Universe::_byteArrayKlassObj = NULL; 91 Klass* Universe::_charArrayKlassObj = NULL; 92 Klass* Universe::_intArrayKlassObj = NULL; 93 Klass* Universe::_shortArrayKlassObj = NULL; 94 Klass* Universe::_longArrayKlassObj = NULL; 95 Klass* Universe::_singleArrayKlassObj = NULL; 96 Klass* Universe::_doubleArrayKlassObj = NULL; 97 Klass* Universe::_typeArrayKlassObjs[T_VOID+1] = { NULL /*, NULL...*/ }; 98 Klass* Universe::_objectArrayKlassObj = NULL; 99 oop Universe::_int_mirror = NULL; 100 oop Universe::_float_mirror = NULL; 101 oop Universe::_double_mirror = NULL; 102 oop Universe::_byte_mirror = NULL; 103 oop Universe::_bool_mirror = NULL; 104 oop Universe::_char_mirror = NULL; 105 oop Universe::_long_mirror = NULL; 106 oop Universe::_short_mirror = NULL; 107 oop Universe::_void_mirror = NULL; 108 oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ }; 109 oop Universe::_main_thread_group = NULL; 110 oop Universe::_system_thread_group = NULL; 111 objArrayOop Universe::_the_empty_class_klass_array = NULL; 112 Array<Klass*>* Universe::_the_array_interfaces_array = NULL; 113 oop Universe::_the_null_string = NULL; 114 oop Universe::_the_min_jint_string = NULL; 115 LatestMethodCache* Universe::_finalizer_register_cache = NULL; 116 LatestMethodCache* Universe::_loader_addClass_cache = NULL; 117 LatestMethodCache* Universe::_pd_implies_cache = NULL; 118 oop Universe::_out_of_memory_error_java_heap = NULL; 119 oop Universe::_out_of_memory_error_metaspace = NULL; 120 oop Universe::_out_of_memory_error_class_metaspace = NULL; 121 oop Universe::_out_of_memory_error_array_size = NULL; 122 oop Universe::_out_of_memory_error_gc_overhead_limit = NULL; 123 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL; 124 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0; 125 bool Universe::_verify_in_progress = false; 126 oop Universe::_null_ptr_exception_instance = NULL; 127 oop Universe::_arithmetic_exception_instance = NULL; 128 oop Universe::_virtual_machine_error_instance = NULL; 129 oop Universe::_vm_exception = NULL; 130 oop Universe::_allocation_context_notification_obj = NULL; 131 132 Method* Universe::_throw_illegal_access_error = NULL; 133 Array<int>* Universe::_the_empty_int_array = NULL; 134 Array<u2>* Universe::_the_empty_short_array = NULL; 135 Array<Klass*>* Universe::_the_empty_klass_array = NULL; 136 Array<Method*>* Universe::_the_empty_method_array = NULL; 137 138 // These variables are guarded by FullGCALot_lock. 139 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;) 140 debug_only(int Universe::_fullgc_alot_dummy_next = 0;) 141 142 // Heap 143 int Universe::_verify_count = 0; 144 145 int Universe::_base_vtable_size = 0; 146 bool Universe::_bootstrapping = false; 147 bool Universe::_fully_initialized = false; 148 149 size_t Universe::_heap_capacity_at_last_gc; 150 size_t Universe::_heap_used_at_last_gc = 0; 151 152 CollectedHeap* Universe::_collectedHeap = NULL; 153 154 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true }; 155 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true }; 156 address Universe::_narrow_ptrs_base; 157 158 void Universe::basic_type_classes_do(void f(Klass*)) { 159 f(boolArrayKlassObj()); 160 f(byteArrayKlassObj()); 161 f(charArrayKlassObj()); 162 f(intArrayKlassObj()); 163 f(shortArrayKlassObj()); 164 f(longArrayKlassObj()); 165 f(singleArrayKlassObj()); 166 f(doubleArrayKlassObj()); 167 } 168 169 void Universe::oops_do(OopClosure* f, bool do_all) { 170 171 f->do_oop((oop*) &_int_mirror); 172 f->do_oop((oop*) &_float_mirror); 173 f->do_oop((oop*) &_double_mirror); 174 f->do_oop((oop*) &_byte_mirror); 175 f->do_oop((oop*) &_bool_mirror); 176 f->do_oop((oop*) &_char_mirror); 177 f->do_oop((oop*) &_long_mirror); 178 f->do_oop((oop*) &_short_mirror); 179 f->do_oop((oop*) &_void_mirror); 180 181 for (int i = T_BOOLEAN; i < T_VOID+1; i++) { 182 f->do_oop((oop*) &_mirrors[i]); 183 } 184 assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking"); 185 186 f->do_oop((oop*)&_the_empty_class_klass_array); 187 f->do_oop((oop*)&_the_null_string); 188 f->do_oop((oop*)&_the_min_jint_string); 189 f->do_oop((oop*)&_out_of_memory_error_java_heap); 190 f->do_oop((oop*)&_out_of_memory_error_metaspace); 191 f->do_oop((oop*)&_out_of_memory_error_class_metaspace); 192 f->do_oop((oop*)&_out_of_memory_error_array_size); 193 f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit); 194 f->do_oop((oop*)&_preallocated_out_of_memory_error_array); 195 f->do_oop((oop*)&_null_ptr_exception_instance); 196 f->do_oop((oop*)&_arithmetic_exception_instance); 197 f->do_oop((oop*)&_virtual_machine_error_instance); 198 f->do_oop((oop*)&_main_thread_group); 199 f->do_oop((oop*)&_system_thread_group); 200 f->do_oop((oop*)&_vm_exception); 201 f->do_oop((oop*)&_allocation_context_notification_obj); 202 debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);) 203 } 204 205 // Serialize metadata in and out of CDS archive, not oops. 206 void Universe::serialize(SerializeClosure* f, bool do_all) { 207 208 f->do_ptr((void**)&_boolArrayKlassObj); 209 f->do_ptr((void**)&_byteArrayKlassObj); 210 f->do_ptr((void**)&_charArrayKlassObj); 211 f->do_ptr((void**)&_intArrayKlassObj); 212 f->do_ptr((void**)&_shortArrayKlassObj); 213 f->do_ptr((void**)&_longArrayKlassObj); 214 f->do_ptr((void**)&_singleArrayKlassObj); 215 f->do_ptr((void**)&_doubleArrayKlassObj); 216 f->do_ptr((void**)&_objectArrayKlassObj); 217 218 { 219 for (int i = 0; i < T_VOID+1; i++) { 220 if (_typeArrayKlassObjs[i] != NULL) { 221 assert(i >= T_BOOLEAN, "checking"); 222 f->do_ptr((void**)&_typeArrayKlassObjs[i]); 223 } else if (do_all) { 224 f->do_ptr((void**)&_typeArrayKlassObjs[i]); 225 } 226 } 227 } 228 229 f->do_ptr((void**)&_the_array_interfaces_array); 230 f->do_ptr((void**)&_the_empty_int_array); 231 f->do_ptr((void**)&_the_empty_short_array); 232 f->do_ptr((void**)&_the_empty_method_array); 233 f->do_ptr((void**)&_the_empty_klass_array); 234 _finalizer_register_cache->serialize(f); 235 _loader_addClass_cache->serialize(f); 236 _pd_implies_cache->serialize(f); 237 } 238 239 void Universe::check_alignment(uintx size, uintx alignment, const char* name) { 240 if (size < alignment || size % alignment != 0) { 241 vm_exit_during_initialization( 242 err_msg("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment)); 243 } 244 } 245 246 void initialize_basic_type_klass(Klass* k, TRAPS) { 247 Klass* ok = SystemDictionary::Object_klass(); 248 if (UseSharedSpaces) { 249 ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data(); 250 assert(k->super() == ok, "u3"); 251 k->restore_unshareable_info(loader_data, Handle(), CHECK); 252 } else { 253 k->initialize_supers(ok, CHECK); 254 } 255 k->append_to_sibling_list(); 256 } 257 258 void Universe::genesis(TRAPS) { 259 ResourceMark rm; 260 261 { FlagSetting fs(_bootstrapping, true); 262 263 { MutexLocker mc(Compile_lock); 264 265 // determine base vtable size; without that we cannot create the array klasses 266 compute_base_vtable_size(); 267 268 if (!UseSharedSpaces) { 269 _boolArrayKlassObj = TypeArrayKlass::create_klass(T_BOOLEAN, sizeof(jboolean), CHECK); 270 _charArrayKlassObj = TypeArrayKlass::create_klass(T_CHAR, sizeof(jchar), CHECK); 271 _singleArrayKlassObj = TypeArrayKlass::create_klass(T_FLOAT, sizeof(jfloat), CHECK); 272 _doubleArrayKlassObj = TypeArrayKlass::create_klass(T_DOUBLE, sizeof(jdouble), CHECK); 273 _byteArrayKlassObj = TypeArrayKlass::create_klass(T_BYTE, sizeof(jbyte), CHECK); 274 _shortArrayKlassObj = TypeArrayKlass::create_klass(T_SHORT, sizeof(jshort), CHECK); 275 _intArrayKlassObj = TypeArrayKlass::create_klass(T_INT, sizeof(jint), CHECK); 276 _longArrayKlassObj = TypeArrayKlass::create_klass(T_LONG, sizeof(jlong), CHECK); 277 278 _typeArrayKlassObjs[T_BOOLEAN] = _boolArrayKlassObj; 279 _typeArrayKlassObjs[T_CHAR] = _charArrayKlassObj; 280 _typeArrayKlassObjs[T_FLOAT] = _singleArrayKlassObj; 281 _typeArrayKlassObjs[T_DOUBLE] = _doubleArrayKlassObj; 282 _typeArrayKlassObjs[T_BYTE] = _byteArrayKlassObj; 283 _typeArrayKlassObjs[T_SHORT] = _shortArrayKlassObj; 284 _typeArrayKlassObjs[T_INT] = _intArrayKlassObj; 285 _typeArrayKlassObjs[T_LONG] = _longArrayKlassObj; 286 287 ClassLoaderData* null_cld = ClassLoaderData::the_null_class_loader_data(); 288 289 _the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK); 290 _the_empty_int_array = MetadataFactory::new_array<int>(null_cld, 0, CHECK); 291 _the_empty_short_array = MetadataFactory::new_array<u2>(null_cld, 0, CHECK); 292 _the_empty_method_array = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK); 293 _the_empty_klass_array = MetadataFactory::new_array<Klass*>(null_cld, 0, CHECK); 294 } 295 } 296 297 vmSymbols::initialize(CHECK); 298 299 SystemDictionary::initialize(CHECK); 300 301 Klass* ok = SystemDictionary::Object_klass(); 302 303 _the_null_string = StringTable::intern("null", CHECK); 304 _the_min_jint_string = StringTable::intern("-2147483648", CHECK); 305 306 if (UseSharedSpaces) { 307 // Verify shared interfaces array. 308 assert(_the_array_interfaces_array->at(0) == 309 SystemDictionary::Cloneable_klass(), "u3"); 310 assert(_the_array_interfaces_array->at(1) == 311 SystemDictionary::Serializable_klass(), "u3"); 312 } else { 313 // Set up shared interfaces array. (Do this before supers are set up.) 314 _the_array_interfaces_array->at_put(0, SystemDictionary::Cloneable_klass()); 315 _the_array_interfaces_array->at_put(1, SystemDictionary::Serializable_klass()); 316 } 317 318 initialize_basic_type_klass(boolArrayKlassObj(), CHECK); 319 initialize_basic_type_klass(charArrayKlassObj(), CHECK); 320 initialize_basic_type_klass(singleArrayKlassObj(), CHECK); 321 initialize_basic_type_klass(doubleArrayKlassObj(), CHECK); 322 initialize_basic_type_klass(byteArrayKlassObj(), CHECK); 323 initialize_basic_type_klass(shortArrayKlassObj(), CHECK); 324 initialize_basic_type_klass(intArrayKlassObj(), CHECK); 325 initialize_basic_type_klass(longArrayKlassObj(), CHECK); 326 } // end of core bootstrapping 327 328 // Maybe this could be lifted up now that object array can be initialized 329 // during the bootstrapping. 330 331 // OLD 332 // Initialize _objectArrayKlass after core bootstraping to make 333 // sure the super class is set up properly for _objectArrayKlass. 334 // --- 335 // NEW 336 // Since some of the old system object arrays have been converted to 337 // ordinary object arrays, _objectArrayKlass will be loaded when 338 // SystemDictionary::initialize(CHECK); is run. See the extra check 339 // for Object_klass_loaded in objArrayKlassKlass::allocate_objArray_klass_impl. 340 _objectArrayKlassObj = InstanceKlass:: 341 cast(SystemDictionary::Object_klass())->array_klass(1, CHECK); 342 // OLD 343 // Add the class to the class hierarchy manually to make sure that 344 // its vtable is initialized after core bootstrapping is completed. 345 // --- 346 // New 347 // Have already been initialized. 348 _objectArrayKlassObj->append_to_sibling_list(); 349 350 // Compute is_jdk version flags. 351 // Only 1.3 or later has the java.lang.Shutdown class. 352 // Only 1.4 or later has the java.lang.CharSequence interface. 353 // Only 1.5 or later has the java.lang.management.MemoryUsage class. 354 if (JDK_Version::is_partially_initialized()) { 355 uint8_t jdk_version; 356 Klass* k = SystemDictionary::resolve_or_null( 357 vmSymbols::java_lang_management_MemoryUsage(), THREAD); 358 CLEAR_PENDING_EXCEPTION; // ignore exceptions 359 if (k == NULL) { 360 k = SystemDictionary::resolve_or_null( 361 vmSymbols::java_lang_CharSequence(), THREAD); 362 CLEAR_PENDING_EXCEPTION; // ignore exceptions 363 if (k == NULL) { 364 k = SystemDictionary::resolve_or_null( 365 vmSymbols::java_lang_Shutdown(), THREAD); 366 CLEAR_PENDING_EXCEPTION; // ignore exceptions 367 if (k == NULL) { 368 jdk_version = 2; 369 } else { 370 jdk_version = 3; 371 } 372 } else { 373 jdk_version = 4; 374 } 375 } else { 376 jdk_version = 5; 377 } 378 JDK_Version::fully_initialize(jdk_version); 379 } 380 381 #ifdef ASSERT 382 if (FullGCALot) { 383 // Allocate an array of dummy objects. 384 // We'd like these to be at the bottom of the old generation, 385 // so that when we free one and then collect, 386 // (almost) the whole heap moves 387 // and we find out if we actually update all the oops correctly. 388 // But we can't allocate directly in the old generation, 389 // so we allocate wherever, and hope that the first collection 390 // moves these objects to the bottom of the old generation. 391 // We can allocate directly in the permanent generation, so we do. 392 int size; 393 if (UseConcMarkSweepGC) { 394 warning("Using +FullGCALot with concurrent mark sweep gc " 395 "will not force all objects to relocate"); 396 size = FullGCALotDummies; 397 } else { 398 size = FullGCALotDummies * 2; 399 } 400 objArrayOop naked_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), size, CHECK); 401 objArrayHandle dummy_array(THREAD, naked_array); 402 int i = 0; 403 while (i < size) { 404 // Allocate dummy in old generation 405 oop dummy = InstanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK); 406 dummy_array->obj_at_put(i++, dummy); 407 } 408 { 409 // Only modify the global variable inside the mutex. 410 // If we had a race to here, the other dummy_array instances 411 // and their elements just get dropped on the floor, which is fine. 412 MutexLocker ml(FullGCALot_lock); 413 if (_fullgc_alot_dummy_array == NULL) { 414 _fullgc_alot_dummy_array = dummy_array(); 415 } 416 } 417 assert(i == _fullgc_alot_dummy_array->length(), "just checking"); 418 } 419 #endif 420 421 // Initialize dependency array for null class loader 422 ClassLoaderData::the_null_class_loader_data()->init_dependencies(CHECK); 423 424 } 425 426 // CDS support for patching vtables in metadata in the shared archive. 427 // All types inherited from Metadata have vtables, but not types inherited 428 // from MetaspaceObj, because the latter does not have virtual functions. 429 // If the metadata type has a vtable, it cannot be shared in the read-only 430 // section of the CDS archive, because the vtable pointer is patched. 431 static inline void add_vtable(void** list, int* n, void* o, int count) { 432 guarantee((*n) < count, "vtable list too small"); 433 void* vtable = dereference_vptr(o); 434 assert(*(void**)(vtable) != NULL, "invalid vtable"); 435 list[(*n)++] = vtable; 436 } 437 438 void Universe::init_self_patching_vtbl_list(void** list, int count) { 439 int n = 0; 440 { InstanceKlass o; add_vtable(list, &n, &o, count); } 441 { InstanceClassLoaderKlass o; add_vtable(list, &n, &o, count); } 442 { InstanceMirrorKlass o; add_vtable(list, &n, &o, count); } 443 { InstanceRefKlass o; add_vtable(list, &n, &o, count); } 444 { TypeArrayKlass o; add_vtable(list, &n, &o, count); } 445 { ObjArrayKlass o; add_vtable(list, &n, &o, count); } 446 { Method o; add_vtable(list, &n, &o, count); } 447 { ConstantPool o; add_vtable(list, &n, &o, count); } 448 } 449 450 void Universe::initialize_basic_type_mirrors(TRAPS) { 451 assert(_int_mirror==NULL, "basic type mirrors already initialized"); 452 _int_mirror = 453 java_lang_Class::create_basic_type_mirror("int", T_INT, CHECK); 454 _float_mirror = 455 java_lang_Class::create_basic_type_mirror("float", T_FLOAT, CHECK); 456 _double_mirror = 457 java_lang_Class::create_basic_type_mirror("double", T_DOUBLE, CHECK); 458 _byte_mirror = 459 java_lang_Class::create_basic_type_mirror("byte", T_BYTE, CHECK); 460 _bool_mirror = 461 java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK); 462 _char_mirror = 463 java_lang_Class::create_basic_type_mirror("char", T_CHAR, CHECK); 464 _long_mirror = 465 java_lang_Class::create_basic_type_mirror("long", T_LONG, CHECK); 466 _short_mirror = 467 java_lang_Class::create_basic_type_mirror("short", T_SHORT, CHECK); 468 _void_mirror = 469 java_lang_Class::create_basic_type_mirror("void", T_VOID, CHECK); 470 471 _mirrors[T_INT] = _int_mirror; 472 _mirrors[T_FLOAT] = _float_mirror; 473 _mirrors[T_DOUBLE] = _double_mirror; 474 _mirrors[T_BYTE] = _byte_mirror; 475 _mirrors[T_BOOLEAN] = _bool_mirror; 476 _mirrors[T_CHAR] = _char_mirror; 477 _mirrors[T_LONG] = _long_mirror; 478 _mirrors[T_SHORT] = _short_mirror; 479 _mirrors[T_VOID] = _void_mirror; 480 //_mirrors[T_OBJECT] = InstanceKlass::cast(_object_klass)->java_mirror(); 481 //_mirrors[T_ARRAY] = InstanceKlass::cast(_object_klass)->java_mirror(); 482 } 483 484 void Universe::fixup_mirrors(TRAPS) { 485 // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly, 486 // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply 487 // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note 488 // that the number of objects allocated at this point is very small. 489 assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded"); 490 HandleMark hm(THREAD); 491 // Cache the start of the static fields 492 InstanceMirrorKlass::init_offset_of_static_fields(); 493 494 GrowableArray <Klass*>* list = java_lang_Class::fixup_mirror_list(); 495 int list_length = list->length(); 496 for (int i = 0; i < list_length; i++) { 497 Klass* k = list->at(i); 498 assert(k->is_klass(), "List should only hold classes"); 499 EXCEPTION_MARK; 500 KlassHandle kh(THREAD, k); 501 java_lang_Class::fixup_mirror(kh, CATCH); 502 } 503 delete java_lang_Class::fixup_mirror_list(); 504 java_lang_Class::set_fixup_mirror_list(NULL); 505 } 506 507 static bool has_run_finalizers_on_exit = false; 508 509 void Universe::run_finalizers_on_exit() { 510 if (has_run_finalizers_on_exit) return; 511 has_run_finalizers_on_exit = true; 512 513 // Called on VM exit. This ought to be run in a separate thread. 514 if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit"); 515 { 516 PRESERVE_EXCEPTION_MARK; 517 KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass()); 518 JavaValue result(T_VOID); 519 JavaCalls::call_static( 520 &result, 521 finalizer_klass, 522 vmSymbols::run_finalizers_on_exit_name(), 523 vmSymbols::void_method_signature(), 524 THREAD 525 ); 526 // Ignore any pending exceptions 527 CLEAR_PENDING_EXCEPTION; 528 } 529 } 530 531 532 // initialize_vtable could cause gc if 533 // 1) we specified true to initialize_vtable and 534 // 2) this ran after gc was enabled 535 // In case those ever change we use handles for oops 536 void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) { 537 // init vtable of k and all subclasses 538 Klass* ko = k_h(); 539 klassVtable* vt = ko->vtable(); 540 if (vt) vt->initialize_vtable(false, CHECK); 541 if (ko->oop_is_instance()) { 542 InstanceKlass* ik = (InstanceKlass*)ko; 543 for (KlassHandle s_h(THREAD, ik->subklass()); 544 s_h() != NULL; 545 s_h = KlassHandle(THREAD, s_h()->next_sibling())) { 546 reinitialize_vtable_of(s_h, CHECK); 547 } 548 } 549 } 550 551 552 void initialize_itable_for_klass(Klass* k, TRAPS) { 553 InstanceKlass::cast(k)->itable()->initialize_itable(false, CHECK); 554 } 555 556 557 void Universe::reinitialize_itables(TRAPS) { 558 SystemDictionary::classes_do(initialize_itable_for_klass, CHECK); 559 560 } 561 562 563 bool Universe::on_page_boundary(void* addr) { 564 return ((uintptr_t) addr) % os::vm_page_size() == 0; 565 } 566 567 568 bool Universe::should_fill_in_stack_trace(Handle throwable) { 569 // never attempt to fill in the stack trace of preallocated errors that do not have 570 // backtrace. These errors are kept alive forever and may be "re-used" when all 571 // preallocated errors with backtrace have been consumed. Also need to avoid 572 // a potential loop which could happen if an out of memory occurs when attempting 573 // to allocate the backtrace. 574 return ((throwable() != Universe::_out_of_memory_error_java_heap) && 575 (throwable() != Universe::_out_of_memory_error_metaspace) && 576 (throwable() != Universe::_out_of_memory_error_class_metaspace) && 577 (throwable() != Universe::_out_of_memory_error_array_size) && 578 (throwable() != Universe::_out_of_memory_error_gc_overhead_limit)); 579 } 580 581 582 oop Universe::gen_out_of_memory_error(oop default_err) { 583 // generate an out of memory error: 584 // - if there is a preallocated error with backtrace available then return it wth 585 // a filled in stack trace. 586 // - if there are no preallocated errors with backtrace available then return 587 // an error without backtrace. 588 int next; 589 if (_preallocated_out_of_memory_error_avail_count > 0) { 590 next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count); 591 assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt"); 592 } else { 593 next = -1; 594 } 595 if (next < 0) { 596 // all preallocated errors have been used. 597 // return default 598 return default_err; 599 } else { 600 // get the error object at the slot and set set it to NULL so that the 601 // array isn't keeping it alive anymore. 602 oop exc = preallocated_out_of_memory_errors()->obj_at(next); 603 assert(exc != NULL, "slot has been used already"); 604 preallocated_out_of_memory_errors()->obj_at_put(next, NULL); 605 606 // use the message from the default error 607 oop msg = java_lang_Throwable::message(default_err); 608 assert(msg != NULL, "no message"); 609 java_lang_Throwable::set_message(exc, msg); 610 611 // populate the stack trace and return it. 612 java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(exc); 613 return exc; 614 } 615 } 616 617 intptr_t Universe::_non_oop_bits = 0; 618 619 void* Universe::non_oop_word() { 620 // Neither the high bits nor the low bits of this value is allowed 621 // to look like (respectively) the high or low bits of a real oop. 622 // 623 // High and low are CPU-specific notions, but low always includes 624 // the low-order bit. Since oops are always aligned at least mod 4, 625 // setting the low-order bit will ensure that the low half of the 626 // word will never look like that of a real oop. 627 // 628 // Using the OS-supplied non-memory-address word (usually 0 or -1) 629 // will take care of the high bits, however many there are. 630 631 if (_non_oop_bits == 0) { 632 _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1; 633 } 634 635 return (void*)_non_oop_bits; 636 } 637 638 jint universe_init() { 639 assert(!Universe::_fully_initialized, "called after initialize_vtables"); 640 guarantee(1 << LogHeapWordSize == sizeof(HeapWord), 641 "LogHeapWordSize is incorrect."); 642 guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?"); 643 guarantee(sizeof(oop) % sizeof(HeapWord) == 0, 644 "oop size is not not a multiple of HeapWord size"); 645 TraceTime timer("Genesis", TraceStartupTime); 646 JavaClasses::compute_hard_coded_offsets(); 647 648 jint status = Universe::initialize_heap(); 649 if (status != JNI_OK) { 650 return status; 651 } 652 653 Metaspace::global_initialize(); 654 655 // Create memory for metadata. Must be after initializing heap for 656 // DumpSharedSpaces. 657 ClassLoaderData::init_null_class_loader_data(); 658 659 // We have a heap so create the Method* caches before 660 // Metaspace::initialize_shared_spaces() tries to populate them. 661 Universe::_finalizer_register_cache = new LatestMethodCache(); 662 Universe::_loader_addClass_cache = new LatestMethodCache(); 663 Universe::_pd_implies_cache = new LatestMethodCache(); 664 665 if (UseSharedSpaces) { 666 // Read the data structures supporting the shared spaces (shared 667 // system dictionary, symbol table, etc.). After that, access to 668 // the file (other than the mapped regions) is no longer needed, and 669 // the file is closed. Closing the file does not affect the 670 // currently mapped regions. 671 MetaspaceShared::initialize_shared_spaces(); 672 StringTable::create_table(); 673 } else { 674 SymbolTable::create_table(); 675 StringTable::create_table(); 676 ClassLoader::create_package_info_table(); 677 678 if (DumpSharedSpaces) { 679 MetaspaceShared::prepare_for_dumping(); 680 } 681 } 682 683 return JNI_OK; 684 } 685 686 // Choose the heap base address and oop encoding mode 687 // when compressed oops are used: 688 // Unscaled - Use 32-bits oops without encoding when 689 // NarrowOopHeapBaseMin + heap_size < 4Gb 690 // ZeroBased - Use zero based compressed oops with encoding when 691 // NarrowOopHeapBaseMin + heap_size < 32Gb 692 // HeapBased - Use compressed oops with heap base + encoding. 693 694 // 4Gb 695 static const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1); 696 // 32Gb 697 // OopEncodingHeapMax == UnscaledOopHeapMax << LogMinObjAlignmentInBytes; 698 699 char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) { 700 assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be"); 701 assert(is_size_aligned((size_t)UnscaledOopHeapMax, alignment), "Must be"); 702 assert(is_size_aligned(heap_size, alignment), "Must be"); 703 704 uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment); 705 706 size_t base = 0; 707 #ifdef _LP64 708 if (UseCompressedOops) { 709 assert(mode == UnscaledNarrowOop || 710 mode == ZeroBasedNarrowOop || 711 mode == HeapBasedNarrowOop, "mode is invalid"); 712 const size_t total_size = heap_size + heap_base_min_address_aligned; 713 // Return specified base for the first request. 714 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) { 715 base = heap_base_min_address_aligned; 716 717 // If the total size is small enough to allow UnscaledNarrowOop then 718 // just use UnscaledNarrowOop. 719 } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) { 720 if ((total_size <= UnscaledOopHeapMax) && (mode == UnscaledNarrowOop) && 721 (Universe::narrow_oop_shift() == 0)) { 722 // Use 32-bits oops without encoding and 723 // place heap's top on the 4Gb boundary 724 base = (UnscaledOopHeapMax - heap_size); 725 } else { 726 // Can't reserve with NarrowOopShift == 0 727 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 728 729 if (mode == UnscaledNarrowOop || 730 mode == ZeroBasedNarrowOop && total_size <= UnscaledOopHeapMax) { 731 732 // Use zero based compressed oops with encoding and 733 // place heap's top on the 32Gb boundary in case 734 // total_size > 4Gb or failed to reserve below 4Gb. 735 uint64_t heap_top = OopEncodingHeapMax; 736 737 // For small heaps, save some space for compressed class pointer 738 // space so it can be decoded with no base. 739 if (UseCompressedClassPointers && !UseSharedSpaces && 740 OopEncodingHeapMax <= 32*G) { 741 742 uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment); 743 assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space, 744 alignment), "difference must be aligned too"); 745 uint64_t new_top = OopEncodingHeapMax-class_space; 746 747 if (total_size <= new_top) { 748 heap_top = new_top; 749 } 750 } 751 752 // Align base to the adjusted top of the heap 753 base = heap_top - heap_size; 754 } 755 } 756 } else { 757 // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or 758 // HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb. 759 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 760 } 761 762 // Set narrow_oop_base and narrow_oop_use_implicit_null_checks 763 // used in ReservedHeapSpace() constructors. 764 // The final values will be set in initialize_heap() below. 765 if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) { 766 // Use zero based compressed oops 767 Universe::set_narrow_oop_base(NULL); 768 // Don't need guard page for implicit checks in indexed 769 // addressing mode with zero based Compressed Oops. 770 Universe::set_narrow_oop_use_implicit_null_checks(true); 771 } else { 772 // Set to a non-NULL value so the ReservedSpace ctor computes 773 // the correct no-access prefix. 774 // The final value will be set in initialize_heap() below. 775 Universe::set_narrow_oop_base((address)UnscaledOopHeapMax); 776 #if defined(_WIN64) || defined(AIX) 777 if (UseLargePages) { 778 // Cannot allocate guard pages for implicit checks in indexed 779 // addressing mode when large pages are specified on windows. 780 Universe::set_narrow_oop_use_implicit_null_checks(false); 781 } 782 #endif // _WIN64 783 } 784 } 785 #endif 786 787 assert(is_ptr_aligned((char*)base, alignment), "Must be"); 788 return (char*)base; // also return NULL (don't care) for 32-bit VM 789 } 790 791 jint Universe::initialize_heap() { 792 793 if (UseParallelGC) { 794 #if INCLUDE_ALL_GCS 795 Universe::_collectedHeap = new ParallelScavengeHeap(); 796 #else // INCLUDE_ALL_GCS 797 fatal("UseParallelGC not supported in this VM."); 798 #endif // INCLUDE_ALL_GCS 799 800 } else if (UseG1GC) { 801 #if INCLUDE_ALL_GCS 802 G1CollectorPolicy* g1p = new G1CollectorPolicy(); 803 g1p->initialize_all(); 804 G1CollectedHeap* g1h = new G1CollectedHeap(g1p); 805 Universe::_collectedHeap = g1h; 806 #else // INCLUDE_ALL_GCS 807 fatal("UseG1GC not supported in java kernel vm."); 808 #endif // INCLUDE_ALL_GCS 809 810 } else { 811 GenCollectorPolicy *gc_policy; 812 813 if (UseSerialGC) { 814 gc_policy = new MarkSweepPolicy(); 815 } else if (UseConcMarkSweepGC) { 816 #if INCLUDE_ALL_GCS 817 gc_policy = new ConcurrentMarkSweepPolicy(); 818 #else // INCLUDE_ALL_GCS 819 fatal("UseConcMarkSweepGC not supported in this VM."); 820 #endif // INCLUDE_ALL_GCS 821 } else { // default old generation 822 gc_policy = new MarkSweepPolicy(); 823 } 824 gc_policy->initialize_all(); 825 826 Universe::_collectedHeap = new GenCollectedHeap(gc_policy); 827 } 828 829 ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size()); 830 831 jint status = Universe::heap()->initialize(); 832 if (status != JNI_OK) { 833 return status; 834 } 835 836 #ifdef _LP64 837 if (UseCompressedOops) { 838 // Subtract a page because something can get allocated at heap base. 839 // This also makes implicit null checking work, because the 840 // memory+1 page below heap_base needs to cause a signal. 841 // See needs_explicit_null_check. 842 // Only set the heap base for compressed oops because it indicates 843 // compressed oops for pstack code. 844 if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) { 845 // Can't reserve heap below 32Gb. 846 // keep the Universe::narrow_oop_base() set in Universe::reserve_heap() 847 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 848 #ifdef AIX 849 // There is no protected page before the heap. This assures all oops 850 // are decoded so that NULL is preserved, so this page will not be accessed. 851 Universe::set_narrow_oop_use_implicit_null_checks(false); 852 #endif 853 } else { 854 Universe::set_narrow_oop_base(0); 855 #ifdef _WIN64 856 if (!Universe::narrow_oop_use_implicit_null_checks()) { 857 // Don't need guard page for implicit checks in indexed addressing 858 // mode with zero based Compressed Oops. 859 Universe::set_narrow_oop_use_implicit_null_checks(true); 860 } 861 #endif // _WIN64 862 if((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) { 863 // Can't reserve heap below 4Gb. 864 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 865 } else { 866 Universe::set_narrow_oop_shift(0); 867 } 868 } 869 870 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base()); 871 872 if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) { 873 Universe::print_compressed_oops_mode(); 874 } 875 } 876 // Universe::narrow_oop_base() is one page below the heap. 877 assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - 878 os::vm_page_size()) || 879 Universe::narrow_oop_base() == NULL, "invalid value"); 880 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes || 881 Universe::narrow_oop_shift() == 0, "invalid value"); 882 #endif 883 884 // We will never reach the CATCH below since Exceptions::_throw will cause 885 // the VM to exit if an exception is thrown during initialization 886 887 if (UseTLAB) { 888 assert(Universe::heap()->supports_tlab_allocation(), 889 "Should support thread-local allocation buffers"); 890 ThreadLocalAllocBuffer::startup_initialization(); 891 } 892 return JNI_OK; 893 } 894 895 void Universe::print_compressed_oops_mode() { 896 tty->cr(); 897 tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB", 898 Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M); 899 900 tty->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode())); 901 902 if (Universe::narrow_oop_base() != 0) { 903 tty->print(":" PTR_FORMAT, Universe::narrow_oop_base()); 904 } 905 906 if (Universe::narrow_oop_shift() != 0) { 907 tty->print(", Oop shift amount: %d", Universe::narrow_oop_shift()); 908 } 909 910 tty->cr(); 911 tty->cr(); 912 } 913 914 // Reserve the Java heap, which is now the same for all GCs. 915 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) { 916 assert(alignment <= Arguments::conservative_max_heap_alignment(), 917 err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT, 918 alignment, Arguments::conservative_max_heap_alignment())); 919 size_t total_reserved = align_size_up(heap_size, alignment); 920 assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())), 921 "heap size is too big for compressed oops"); 922 923 bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size()); 924 assert(!UseLargePages 925 || UseParallelGC 926 || use_large_pages, "Wrong alignment to use large pages"); 927 928 char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop); 929 930 ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr); 931 932 if (UseCompressedOops) { 933 if (addr != NULL && !total_rs.is_reserved()) { 934 // Failed to reserve at specified address - the requested memory 935 // region is taken already, for example, by 'java' launcher. 936 // Try again to reserver heap higher. 937 addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop); 938 939 ReservedHeapSpace total_rs0(total_reserved, alignment, 940 use_large_pages, addr); 941 942 if (addr != NULL && !total_rs0.is_reserved()) { 943 // Failed to reserve at specified address again - give up. 944 addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop); 945 assert(addr == NULL, ""); 946 947 ReservedHeapSpace total_rs1(total_reserved, alignment, 948 use_large_pages, addr); 949 total_rs = total_rs1; 950 } else { 951 total_rs = total_rs0; 952 } 953 } 954 } 955 956 if (!total_rs.is_reserved()) { 957 vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K)); 958 return total_rs; 959 } 960 961 if (UseCompressedOops) { 962 // Universe::initialize_heap() will reset this to NULL if unscaled 963 // or zero-based narrow oops are actually used. 964 address base = (address)(total_rs.base() - os::vm_page_size()); 965 Universe::set_narrow_oop_base(base); 966 } 967 return total_rs; 968 } 969 970 971 // It's the caller's responsibility to ensure glitch-freedom 972 // (if required). 973 void Universe::update_heap_info_at_gc() { 974 _heap_capacity_at_last_gc = heap()->capacity(); 975 _heap_used_at_last_gc = heap()->used(); 976 } 977 978 979 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) { 980 switch (mode) { 981 case UnscaledNarrowOop: 982 return "32-bit"; 983 case ZeroBasedNarrowOop: 984 return "Zero based"; 985 case HeapBasedNarrowOop: 986 return "Non-zero based"; 987 } 988 989 ShouldNotReachHere(); 990 return ""; 991 } 992 993 994 Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() { 995 if (narrow_oop_base() != 0) { 996 return HeapBasedNarrowOop; 997 } 998 999 if (narrow_oop_shift() != 0) { 1000 return ZeroBasedNarrowOop; 1001 } 1002 1003 return UnscaledNarrowOop; 1004 } 1005 1006 1007 void universe2_init() { 1008 EXCEPTION_MARK; 1009 Universe::genesis(CATCH); 1010 } 1011 1012 1013 bool universe_post_init() { 1014 assert(!is_init_completed(), "Error: initialization not yet completed!"); 1015 Universe::_fully_initialized = true; 1016 EXCEPTION_MARK; 1017 { ResourceMark rm; 1018 Interpreter::initialize(); // needed for interpreter entry points 1019 if (!UseSharedSpaces) { 1020 HandleMark hm(THREAD); 1021 KlassHandle ok_h(THREAD, SystemDictionary::Object_klass()); 1022 Universe::reinitialize_vtable_of(ok_h, CHECK_false); 1023 Universe::reinitialize_itables(CHECK_false); 1024 } 1025 } 1026 1027 HandleMark hm(THREAD); 1028 Klass* k; 1029 instanceKlassHandle k_h; 1030 // Setup preallocated empty java.lang.Class array 1031 Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false); 1032 1033 // Setup preallocated OutOfMemoryError errors 1034 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false); 1035 k_h = instanceKlassHandle(THREAD, k); 1036 Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false); 1037 Universe::_out_of_memory_error_metaspace = k_h->allocate_instance(CHECK_false); 1038 Universe::_out_of_memory_error_class_metaspace = k_h->allocate_instance(CHECK_false); 1039 Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false); 1040 Universe::_out_of_memory_error_gc_overhead_limit = 1041 k_h->allocate_instance(CHECK_false); 1042 1043 // Setup preallocated NullPointerException 1044 // (this is currently used for a cheap & dirty solution in compiler exception handling) 1045 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false); 1046 Universe::_null_ptr_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false); 1047 // Setup preallocated ArithmeticException 1048 // (this is currently used for a cheap & dirty solution in compiler exception handling) 1049 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ArithmeticException(), true, CHECK_false); 1050 Universe::_arithmetic_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false); 1051 // Virtual Machine Error for when we get into a situation we can't resolve 1052 k = SystemDictionary::resolve_or_fail( 1053 vmSymbols::java_lang_VirtualMachineError(), true, CHECK_false); 1054 bool linked = InstanceKlass::cast(k)->link_class_or_fail(CHECK_false); 1055 if (!linked) { 1056 tty->print_cr("Unable to link/verify VirtualMachineError class"); 1057 return false; // initialization failed 1058 } 1059 Universe::_virtual_machine_error_instance = 1060 InstanceKlass::cast(k)->allocate_instance(CHECK_false); 1061 1062 Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false); 1063 1064 if (!DumpSharedSpaces) { 1065 // These are the only Java fields that are currently set during shared space dumping. 1066 // We prefer to not handle this generally, so we always reinitialize these detail messages. 1067 Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false); 1068 java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg()); 1069 1070 msg = java_lang_String::create_from_str("Metaspace", CHECK_false); 1071 java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg()); 1072 msg = java_lang_String::create_from_str("Compressed class space", CHECK_false); 1073 java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg()); 1074 1075 msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false); 1076 java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg()); 1077 1078 msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false); 1079 java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg()); 1080 1081 msg = java_lang_String::create_from_str("/ by zero", CHECK_false); 1082 java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg()); 1083 1084 // Setup the array of errors that have preallocated backtrace 1085 k = Universe::_out_of_memory_error_java_heap->klass(); 1086 assert(k->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error"); 1087 k_h = instanceKlassHandle(THREAD, k); 1088 1089 int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0; 1090 Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false); 1091 for (int i=0; i<len; i++) { 1092 oop err = k_h->allocate_instance(CHECK_false); 1093 Handle err_h = Handle(THREAD, err); 1094 java_lang_Throwable::allocate_backtrace(err_h, CHECK_false); 1095 Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h()); 1096 } 1097 Universe::_preallocated_out_of_memory_error_avail_count = (jint)len; 1098 } 1099 1100 1101 // Setup static method for registering finalizers 1102 // The finalizer klass must be linked before looking up the method, in 1103 // case it needs to get rewritten. 1104 InstanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false); 1105 Method* m = InstanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method( 1106 vmSymbols::register_method_name(), 1107 vmSymbols::register_method_signature()); 1108 if (m == NULL || !m->is_static()) { 1109 tty->print_cr("Unable to link/verify Finalizer.register method"); 1110 return false; // initialization failed (cannot throw exception yet) 1111 } 1112 Universe::_finalizer_register_cache->init( 1113 SystemDictionary::Finalizer_klass(), m); 1114 1115 InstanceKlass::cast(SystemDictionary::misc_Unsafe_klass())->link_class(CHECK_false); 1116 m = InstanceKlass::cast(SystemDictionary::misc_Unsafe_klass())->find_method( 1117 vmSymbols::throwIllegalAccessError_name(), 1118 vmSymbols::void_method_signature()); 1119 if (m != NULL && !m->is_static()) { 1120 // Note null is okay; this method is used in itables, and if it is null, 1121 // then AbstractMethodError is thrown instead. 1122 tty->print_cr("Unable to link/verify Unsafe.throwIllegalAccessError method"); 1123 return false; // initialization failed (cannot throw exception yet) 1124 } 1125 Universe::_throw_illegal_access_error = m; 1126 1127 // Setup method for registering loaded classes in class loader vector 1128 InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false); 1129 m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature()); 1130 if (m == NULL || m->is_static()) { 1131 tty->print_cr("Unable to link/verify ClassLoader.addClass method"); 1132 return false; // initialization failed (cannot throw exception yet) 1133 } 1134 Universe::_loader_addClass_cache->init( 1135 SystemDictionary::ClassLoader_klass(), m); 1136 1137 // Setup method for checking protection domain 1138 InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false); 1139 m = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())-> 1140 find_method(vmSymbols::impliesCreateAccessControlContext_name(), 1141 vmSymbols::void_boolean_signature()); 1142 // Allow NULL which should only happen with bootstrapping. 1143 if (m != NULL) { 1144 if (m->is_static()) { 1145 // NoSuchMethodException doesn't actually work because it tries to run the 1146 // <init> function before java_lang_Class is linked. Print error and exit. 1147 tty->print_cr("ProtectionDomain.impliesCreateAccessControlContext() has the wrong linkage"); 1148 return false; // initialization failed 1149 } 1150 Universe::_pd_implies_cache->init( 1151 SystemDictionary::ProtectionDomain_klass(), m);; 1152 } 1153 1154 // This needs to be done before the first scavenge/gc, since 1155 // it's an input to soft ref clearing policy. 1156 { 1157 MutexLocker x(Heap_lock); 1158 Universe::update_heap_info_at_gc(); 1159 } 1160 1161 // ("weak") refs processing infrastructure initialization 1162 Universe::heap()->post_initialize(); 1163 1164 // Initialize performance counters for metaspaces 1165 MetaspaceCounters::initialize_performance_counters(); 1166 CompressedClassSpaceCounters::initialize_performance_counters(); 1167 1168 MemoryService::add_metaspace_memory_pools(); 1169 1170 MemoryService::set_universe_heap(Universe::_collectedHeap); 1171 #if INCLUDE_CDS 1172 SharedClassUtil::initialize(CHECK_false); 1173 #endif 1174 return true; 1175 } 1176 1177 1178 void Universe::compute_base_vtable_size() { 1179 _base_vtable_size = ClassLoader::compute_Object_vtable(); 1180 } 1181 1182 1183 // %%% The Universe::flush_foo methods belong in CodeCache. 1184 1185 // Flushes compiled methods dependent on dependee. 1186 void Universe::flush_dependents_on(instanceKlassHandle dependee) { 1187 assert_lock_strong(Compile_lock); 1188 1189 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; 1190 1191 // CodeCache can only be updated by a thread_in_VM and they will all be 1192 // stopped during the safepoint so CodeCache will be safe to update without 1193 // holding the CodeCache_lock. 1194 1195 KlassDepChange changes(dependee); 1196 1197 // Compute the dependent nmethods 1198 if (CodeCache::mark_for_deoptimization(changes) > 0) { 1199 // At least one nmethod has been marked for deoptimization 1200 VM_Deoptimize op; 1201 VMThread::execute(&op); 1202 } 1203 } 1204 1205 // Flushes compiled methods dependent on a particular CallSite 1206 // instance when its target is different than the given MethodHandle. 1207 void Universe::flush_dependents_on(Handle call_site, Handle method_handle) { 1208 assert_lock_strong(Compile_lock); 1209 1210 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; 1211 1212 // CodeCache can only be updated by a thread_in_VM and they will all be 1213 // stopped during the safepoint so CodeCache will be safe to update without 1214 // holding the CodeCache_lock. 1215 1216 CallSiteDepChange changes(call_site(), method_handle()); 1217 1218 // Compute the dependent nmethods that have a reference to a 1219 // CallSite object. We use InstanceKlass::mark_dependent_nmethod 1220 // directly instead of CodeCache::mark_for_deoptimization because we 1221 // want dependents on the call site class only not all classes in 1222 // the ContextStream. 1223 int marked = 0; 1224 { 1225 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1226 InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass()); 1227 marked = call_site_klass->mark_dependent_nmethods(changes); 1228 } 1229 if (marked > 0) { 1230 // At least one nmethod has been marked for deoptimization 1231 VM_Deoptimize op; 1232 VMThread::execute(&op); 1233 } 1234 } 1235 1236 #ifdef HOTSWAP 1237 // Flushes compiled methods dependent on dependee in the evolutionary sense 1238 void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) { 1239 // --- Compile_lock is not held. However we are at a safepoint. 1240 assert_locked_or_safepoint(Compile_lock); 1241 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; 1242 1243 // CodeCache can only be updated by a thread_in_VM and they will all be 1244 // stopped during the safepoint so CodeCache will be safe to update without 1245 // holding the CodeCache_lock. 1246 1247 // Compute the dependent nmethods 1248 if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) { 1249 // At least one nmethod has been marked for deoptimization 1250 1251 // All this already happens inside a VM_Operation, so we'll do all the work here. 1252 // Stuff copied from VM_Deoptimize and modified slightly. 1253 1254 // We do not want any GCs to happen while we are in the middle of this VM operation 1255 ResourceMark rm; 1256 DeoptimizationMarker dm; 1257 1258 // Deoptimize all activations depending on marked nmethods 1259 Deoptimization::deoptimize_dependents(); 1260 1261 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) 1262 CodeCache::make_marked_nmethods_not_entrant(); 1263 } 1264 } 1265 #endif // HOTSWAP 1266 1267 1268 // Flushes compiled methods dependent on dependee 1269 void Universe::flush_dependents_on_method(methodHandle m_h) { 1270 // --- Compile_lock is not held. However we are at a safepoint. 1271 assert_locked_or_safepoint(Compile_lock); 1272 1273 // CodeCache can only be updated by a thread_in_VM and they will all be 1274 // stopped dring the safepoint so CodeCache will be safe to update without 1275 // holding the CodeCache_lock. 1276 1277 // Compute the dependent nmethods 1278 if (CodeCache::mark_for_deoptimization(m_h()) > 0) { 1279 // At least one nmethod has been marked for deoptimization 1280 1281 // All this already happens inside a VM_Operation, so we'll do all the work here. 1282 // Stuff copied from VM_Deoptimize and modified slightly. 1283 1284 // We do not want any GCs to happen while we are in the middle of this VM operation 1285 ResourceMark rm; 1286 DeoptimizationMarker dm; 1287 1288 // Deoptimize all activations depending on marked nmethods 1289 Deoptimization::deoptimize_dependents(); 1290 1291 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) 1292 CodeCache::make_marked_nmethods_not_entrant(); 1293 } 1294 } 1295 1296 void Universe::print() { 1297 print_on(gclog_or_tty); 1298 } 1299 1300 void Universe::print_on(outputStream* st, bool extended) { 1301 st->print_cr("Heap"); 1302 if (!extended) { 1303 heap()->print_on(st); 1304 } else { 1305 heap()->print_extended_on(st); 1306 } 1307 } 1308 1309 void Universe::print_heap_at_SIGBREAK() { 1310 if (PrintHeapAtSIGBREAK) { 1311 MutexLocker hl(Heap_lock); 1312 print_on(tty); 1313 tty->cr(); 1314 tty->flush(); 1315 } 1316 } 1317 1318 void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) { 1319 st->print_cr("{Heap before GC invocations=%u (full %u):", 1320 heap()->total_collections(), 1321 heap()->total_full_collections()); 1322 if (!PrintHeapAtGCExtended || ignore_extended) { 1323 heap()->print_on(st); 1324 } else { 1325 heap()->print_extended_on(st); 1326 } 1327 } 1328 1329 void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) { 1330 st->print_cr("Heap after GC invocations=%u (full %u):", 1331 heap()->total_collections(), 1332 heap()->total_full_collections()); 1333 if (!PrintHeapAtGCExtended || ignore_extended) { 1334 heap()->print_on(st); 1335 } else { 1336 heap()->print_extended_on(st); 1337 } 1338 st->print_cr("}"); 1339 } 1340 1341 void Universe::verify(VerifyOption option, const char* prefix, bool silent) { 1342 // The use of _verify_in_progress is a temporary work around for 1343 // 6320749. Don't bother with a creating a class to set and clear 1344 // it since it is only used in this method and the control flow is 1345 // straight forward. 1346 _verify_in_progress = true; 1347 1348 COMPILER2_PRESENT( 1349 assert(!DerivedPointerTable::is_active(), 1350 "DPT should not be active during verification " 1351 "(of thread stacks below)"); 1352 ) 1353 1354 ResourceMark rm; 1355 HandleMark hm; // Handles created during verification can be zapped 1356 _verify_count++; 1357 1358 if (!silent) gclog_or_tty->print("%s", prefix); 1359 if (!silent) gclog_or_tty->print("[Verifying "); 1360 if (!silent) gclog_or_tty->print("threads "); 1361 Threads::verify(); 1362 if (!silent) gclog_or_tty->print("heap "); 1363 heap()->verify(silent, option); 1364 if (!silent) gclog_or_tty->print("syms "); 1365 SymbolTable::verify(); 1366 if (!silent) gclog_or_tty->print("strs "); 1367 StringTable::verify(); 1368 { 1369 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1370 if (!silent) gclog_or_tty->print("zone "); 1371 CodeCache::verify(); 1372 } 1373 if (!silent) gclog_or_tty->print("dict "); 1374 SystemDictionary::verify(); 1375 #ifndef PRODUCT 1376 if (!silent) gclog_or_tty->print("cldg "); 1377 ClassLoaderDataGraph::verify(); 1378 #endif 1379 if (!silent) gclog_or_tty->print("metaspace chunks "); 1380 MetaspaceAux::verify_free_chunks(); 1381 if (!silent) gclog_or_tty->print("hand "); 1382 JNIHandles::verify(); 1383 if (!silent) gclog_or_tty->print("C-heap "); 1384 os::check_heap(); 1385 if (!silent) gclog_or_tty->print("code cache "); 1386 CodeCache::verify_oops(); 1387 if (!silent) gclog_or_tty->print_cr("]"); 1388 1389 _verify_in_progress = false; 1390 } 1391 1392 // Oop verification (see MacroAssembler::verify_oop) 1393 1394 static uintptr_t _verify_oop_data[2] = {0, (uintptr_t)-1}; 1395 static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1}; 1396 1397 1398 #ifndef PRODUCT 1399 1400 static void calculate_verify_data(uintptr_t verify_data[2], 1401 HeapWord* low_boundary, 1402 HeapWord* high_boundary) { 1403 assert(low_boundary < high_boundary, "bad interval"); 1404 1405 // decide which low-order bits we require to be clear: 1406 size_t alignSize = MinObjAlignmentInBytes; 1407 size_t min_object_size = CollectedHeap::min_fill_size(); 1408 1409 // make an inclusive limit: 1410 uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize; 1411 uintptr_t min = (uintptr_t)low_boundary; 1412 assert(min < max, "bad interval"); 1413 uintptr_t diff = max ^ min; 1414 1415 // throw away enough low-order bits to make the diff vanish 1416 uintptr_t mask = (uintptr_t)(-1); 1417 while ((mask & diff) != 0) 1418 mask <<= 1; 1419 uintptr_t bits = (min & mask); 1420 assert(bits == (max & mask), "correct mask"); 1421 // check an intermediate value between min and max, just to make sure: 1422 assert(bits == ((min + (max-min)/2) & mask), "correct mask"); 1423 1424 // require address alignment, too: 1425 mask |= (alignSize - 1); 1426 1427 if (!(verify_data[0] == 0 && verify_data[1] == (uintptr_t)-1)) { 1428 assert(verify_data[0] == mask && verify_data[1] == bits, "mask stability"); 1429 } 1430 verify_data[0] = mask; 1431 verify_data[1] = bits; 1432 } 1433 1434 // Oop verification (see MacroAssembler::verify_oop) 1435 1436 uintptr_t Universe::verify_oop_mask() { 1437 MemRegion m = heap()->reserved_region(); 1438 calculate_verify_data(_verify_oop_data, 1439 m.start(), 1440 m.end()); 1441 return _verify_oop_data[0]; 1442 } 1443 1444 1445 1446 uintptr_t Universe::verify_oop_bits() { 1447 verify_oop_mask(); 1448 return _verify_oop_data[1]; 1449 } 1450 1451 uintptr_t Universe::verify_mark_mask() { 1452 return markOopDesc::lock_mask_in_place; 1453 } 1454 1455 uintptr_t Universe::verify_mark_bits() { 1456 intptr_t mask = verify_mark_mask(); 1457 intptr_t bits = (intptr_t)markOopDesc::prototype(); 1458 assert((bits & ~mask) == 0, "no stray header bits"); 1459 return bits; 1460 } 1461 #endif // PRODUCT 1462 1463 1464 void Universe::compute_verify_oop_data() { 1465 verify_oop_mask(); 1466 verify_oop_bits(); 1467 verify_mark_mask(); 1468 verify_mark_bits(); 1469 } 1470 1471 1472 void LatestMethodCache::init(Klass* k, Method* m) { 1473 if (!UseSharedSpaces) { 1474 _klass = k; 1475 } 1476 #ifndef PRODUCT 1477 else { 1478 // sharing initilization should have already set up _klass 1479 assert(_klass != NULL, "just checking"); 1480 } 1481 #endif 1482 1483 _method_idnum = m->method_idnum(); 1484 assert(_method_idnum >= 0, "sanity check"); 1485 } 1486 1487 1488 Method* LatestMethodCache::get_method() { 1489 if (klass() == NULL) return NULL; 1490 InstanceKlass* ik = InstanceKlass::cast(klass()); 1491 Method* m = ik->method_with_idnum(method_idnum()); 1492 assert(m != NULL, "sanity check"); 1493 return m; 1494 } 1495 1496 1497 #ifdef ASSERT 1498 // Release dummy object(s) at bottom of heap 1499 bool Universe::release_fullgc_alot_dummy() { 1500 MutexLocker ml(FullGCALot_lock); 1501 if (_fullgc_alot_dummy_array != NULL) { 1502 if (_fullgc_alot_dummy_next >= _fullgc_alot_dummy_array->length()) { 1503 // No more dummies to release, release entire array instead 1504 _fullgc_alot_dummy_array = NULL; 1505 return false; 1506 } 1507 if (!UseConcMarkSweepGC) { 1508 // Release dummy at bottom of old generation 1509 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL); 1510 } 1511 // Release dummy at bottom of permanent generation 1512 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL); 1513 } 1514 return true; 1515 } 1516 1517 #endif // ASSERT