1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoader.hpp" 27 #include "classfile/classLoaderData.hpp" 28 #include "classfile/javaClasses.hpp" 29 #include "classfile/symbolTable.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/codeCache.hpp" 33 #include "code/dependencies.hpp" 34 #include "gc_interface/collectedHeap.inline.hpp" 35 #include "interpreter/interpreter.hpp" 36 #include "memory/cardTableModRefBS.hpp" 37 #include "memory/gcLocker.inline.hpp" 38 #include "memory/genCollectedHeap.hpp" 39 #include "memory/genRemSet.hpp" 40 #include "memory/generation.hpp" 41 #include "memory/metadataFactory.hpp" 42 #include "memory/metaspaceShared.hpp" 43 #include "memory/oopFactory.hpp" 44 #include "memory/space.hpp" 45 #include "memory/universe.hpp" 46 #include "memory/universe.inline.hpp" 47 #include "oops/constantPool.hpp" 48 #include "oops/instanceClassLoaderKlass.hpp" 49 #include "oops/instanceKlass.hpp" 50 #include "oops/instanceMirrorKlass.hpp" 51 #include "oops/instanceRefKlass.hpp" 52 #include "oops/oop.inline.hpp" 53 #include "oops/typeArrayKlass.hpp" 54 #include "prims/jvmtiRedefineClassesTrace.hpp" 55 #include "runtime/arguments.hpp" 56 #include "runtime/deoptimization.hpp" 57 #include "runtime/fprofiler.hpp" 58 #include "runtime/handles.inline.hpp" 59 #include "runtime/init.hpp" 60 #include "runtime/java.hpp" 61 #include "runtime/javaCalls.hpp" 62 #include "runtime/sharedRuntime.hpp" 63 #include "runtime/synchronizer.hpp" 64 #include "runtime/thread.inline.hpp" 65 #include "runtime/timer.hpp" 66 #include "runtime/vm_operations.hpp" 67 #include "services/memoryService.hpp" 68 #include "utilities/copy.hpp" 69 #include "utilities/events.hpp" 70 #include "utilities/hashtable.inline.hpp" 71 #include "utilities/preserveException.hpp" 72 #include "utilities/macros.hpp" 73 #if INCLUDE_ALL_GCS 74 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp" 75 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp" 76 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 77 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 78 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" 79 #endif // INCLUDE_ALL_GCS 80 81 // Known objects 82 Klass* Universe::_boolArrayKlassObj = NULL; 83 Klass* Universe::_byteArrayKlassObj = NULL; 84 Klass* Universe::_charArrayKlassObj = NULL; 85 Klass* Universe::_intArrayKlassObj = NULL; 86 Klass* Universe::_shortArrayKlassObj = NULL; 87 Klass* Universe::_longArrayKlassObj = NULL; 88 Klass* Universe::_singleArrayKlassObj = NULL; 89 Klass* Universe::_doubleArrayKlassObj = NULL; 90 Klass* Universe::_typeArrayKlassObjs[T_VOID+1] = { NULL /*, NULL...*/ }; 91 Klass* Universe::_objectArrayKlassObj = NULL; 92 oop Universe::_int_mirror = NULL; 93 oop Universe::_float_mirror = NULL; 94 oop Universe::_double_mirror = NULL; 95 oop Universe::_byte_mirror = NULL; 96 oop Universe::_bool_mirror = NULL; 97 oop Universe::_char_mirror = NULL; 98 oop Universe::_long_mirror = NULL; 99 oop Universe::_short_mirror = NULL; 100 oop Universe::_void_mirror = NULL; 101 oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ }; 102 oop Universe::_main_thread_group = NULL; 103 oop Universe::_system_thread_group = NULL; 104 objArrayOop Universe::_the_empty_class_klass_array = NULL; 105 Array<Klass*>* Universe::_the_array_interfaces_array = NULL; 106 oop Universe::_the_null_string = NULL; 107 oop Universe::_the_min_jint_string = NULL; 108 LatestMethodOopCache* Universe::_finalizer_register_cache = NULL; 109 LatestMethodOopCache* Universe::_loader_addClass_cache = NULL; 110 LatestMethodOopCache* Universe::_pd_implies_cache = NULL; 111 ActiveMethodOopsCache* Universe::_reflect_invoke_cache = NULL; 112 oop Universe::_out_of_memory_error_java_heap = NULL; 113 oop Universe::_out_of_memory_error_metaspace = NULL; 114 oop Universe::_out_of_memory_error_class_metaspace = NULL; 115 oop Universe::_out_of_memory_error_array_size = NULL; 116 oop Universe::_out_of_memory_error_gc_overhead_limit = NULL; 117 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL; 118 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0; 119 bool Universe::_verify_in_progress = false; 120 oop Universe::_null_ptr_exception_instance = NULL; 121 oop Universe::_arithmetic_exception_instance = NULL; 122 oop Universe::_virtual_machine_error_instance = NULL; 123 oop Universe::_vm_exception = NULL; 124 Array<int>* Universe::_the_empty_int_array = NULL; 125 Array<u2>* Universe::_the_empty_short_array = NULL; 126 Array<Klass*>* Universe::_the_empty_klass_array = NULL; 127 Array<Method*>* Universe::_the_empty_method_array = NULL; 128 129 // These variables are guarded by FullGCALot_lock. 130 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;) 131 debug_only(int Universe::_fullgc_alot_dummy_next = 0;) 132 133 // Heap 134 int Universe::_verify_count = 0; 135 136 int Universe::_base_vtable_size = 0; 137 bool Universe::_bootstrapping = false; 138 bool Universe::_fully_initialized = false; 139 140 size_t Universe::_heap_capacity_at_last_gc; 141 size_t Universe::_heap_used_at_last_gc = 0; 142 143 CollectedHeap* Universe::_collectedHeap = NULL; 144 145 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true }; 146 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true }; 147 address Universe::_narrow_ptrs_base; 148 149 size_t Universe::_class_metaspace_size; 150 151 void Universe::basic_type_classes_do(void f(Klass*)) { 152 f(boolArrayKlassObj()); 153 f(byteArrayKlassObj()); 154 f(charArrayKlassObj()); 155 f(intArrayKlassObj()); 156 f(shortArrayKlassObj()); 157 f(longArrayKlassObj()); 158 f(singleArrayKlassObj()); 159 f(doubleArrayKlassObj()); 160 } 161 162 void Universe::oops_do(OopClosure* f, bool do_all) { 163 164 f->do_oop((oop*) &_int_mirror); 165 f->do_oop((oop*) &_float_mirror); 166 f->do_oop((oop*) &_double_mirror); 167 f->do_oop((oop*) &_byte_mirror); 168 f->do_oop((oop*) &_bool_mirror); 169 f->do_oop((oop*) &_char_mirror); 170 f->do_oop((oop*) &_long_mirror); 171 f->do_oop((oop*) &_short_mirror); 172 f->do_oop((oop*) &_void_mirror); 173 174 for (int i = T_BOOLEAN; i < T_VOID+1; i++) { 175 f->do_oop((oop*) &_mirrors[i]); 176 } 177 assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking"); 178 179 f->do_oop((oop*)&_the_empty_class_klass_array); 180 f->do_oop((oop*)&_the_null_string); 181 f->do_oop((oop*)&_the_min_jint_string); 182 f->do_oop((oop*)&_out_of_memory_error_java_heap); 183 f->do_oop((oop*)&_out_of_memory_error_metaspace); 184 f->do_oop((oop*)&_out_of_memory_error_class_metaspace); 185 f->do_oop((oop*)&_out_of_memory_error_array_size); 186 f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit); 187 f->do_oop((oop*)&_preallocated_out_of_memory_error_array); 188 f->do_oop((oop*)&_null_ptr_exception_instance); 189 f->do_oop((oop*)&_arithmetic_exception_instance); 190 f->do_oop((oop*)&_virtual_machine_error_instance); 191 f->do_oop((oop*)&_main_thread_group); 192 f->do_oop((oop*)&_system_thread_group); 193 f->do_oop((oop*)&_vm_exception); 194 debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);) 195 } 196 197 // Serialize metadata in and out of CDS archive, not oops. 198 void Universe::serialize(SerializeClosure* f, bool do_all) { 199 200 f->do_ptr((void**)&_boolArrayKlassObj); 201 f->do_ptr((void**)&_byteArrayKlassObj); 202 f->do_ptr((void**)&_charArrayKlassObj); 203 f->do_ptr((void**)&_intArrayKlassObj); 204 f->do_ptr((void**)&_shortArrayKlassObj); 205 f->do_ptr((void**)&_longArrayKlassObj); 206 f->do_ptr((void**)&_singleArrayKlassObj); 207 f->do_ptr((void**)&_doubleArrayKlassObj); 208 f->do_ptr((void**)&_objectArrayKlassObj); 209 210 { 211 for (int i = 0; i < T_VOID+1; i++) { 212 if (_typeArrayKlassObjs[i] != NULL) { 213 assert(i >= T_BOOLEAN, "checking"); 214 f->do_ptr((void**)&_typeArrayKlassObjs[i]); 215 } else if (do_all) { 216 f->do_ptr((void**)&_typeArrayKlassObjs[i]); 217 } 218 } 219 } 220 221 f->do_ptr((void**)&_the_array_interfaces_array); 222 f->do_ptr((void**)&_the_empty_int_array); 223 f->do_ptr((void**)&_the_empty_short_array); 224 f->do_ptr((void**)&_the_empty_method_array); 225 f->do_ptr((void**)&_the_empty_klass_array); 226 _finalizer_register_cache->serialize(f); 227 _loader_addClass_cache->serialize(f); 228 _reflect_invoke_cache->serialize(f); 229 _pd_implies_cache->serialize(f); 230 } 231 232 void Universe::check_alignment(uintx size, uintx alignment, const char* name) { 233 if (size < alignment || size % alignment != 0) { 234 vm_exit_during_initialization( 235 err_msg("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment)); 236 } 237 } 238 239 void initialize_basic_type_klass(Klass* k, TRAPS) { 240 Klass* ok = SystemDictionary::Object_klass(); 241 if (UseSharedSpaces) { 242 assert(k->super() == ok, "u3"); 243 k->restore_unshareable_info(CHECK); 244 } else { 245 k->initialize_supers(ok, CHECK); 246 } 247 k->append_to_sibling_list(); 248 } 249 250 void Universe::genesis(TRAPS) { 251 ResourceMark rm; 252 253 { FlagSetting fs(_bootstrapping, true); 254 255 { MutexLocker mc(Compile_lock); 256 257 // determine base vtable size; without that we cannot create the array klasses 258 compute_base_vtable_size(); 259 260 if (!UseSharedSpaces) { 261 _boolArrayKlassObj = TypeArrayKlass::create_klass(T_BOOLEAN, sizeof(jboolean), CHECK); 262 _charArrayKlassObj = TypeArrayKlass::create_klass(T_CHAR, sizeof(jchar), CHECK); 263 _singleArrayKlassObj = TypeArrayKlass::create_klass(T_FLOAT, sizeof(jfloat), CHECK); 264 _doubleArrayKlassObj = TypeArrayKlass::create_klass(T_DOUBLE, sizeof(jdouble), CHECK); 265 _byteArrayKlassObj = TypeArrayKlass::create_klass(T_BYTE, sizeof(jbyte), CHECK); 266 _shortArrayKlassObj = TypeArrayKlass::create_klass(T_SHORT, sizeof(jshort), CHECK); 267 _intArrayKlassObj = TypeArrayKlass::create_klass(T_INT, sizeof(jint), CHECK); 268 _longArrayKlassObj = TypeArrayKlass::create_klass(T_LONG, sizeof(jlong), CHECK); 269 270 _typeArrayKlassObjs[T_BOOLEAN] = _boolArrayKlassObj; 271 _typeArrayKlassObjs[T_CHAR] = _charArrayKlassObj; 272 _typeArrayKlassObjs[T_FLOAT] = _singleArrayKlassObj; 273 _typeArrayKlassObjs[T_DOUBLE] = _doubleArrayKlassObj; 274 _typeArrayKlassObjs[T_BYTE] = _byteArrayKlassObj; 275 _typeArrayKlassObjs[T_SHORT] = _shortArrayKlassObj; 276 _typeArrayKlassObjs[T_INT] = _intArrayKlassObj; 277 _typeArrayKlassObjs[T_LONG] = _longArrayKlassObj; 278 279 ClassLoaderData* null_cld = ClassLoaderData::the_null_class_loader_data(); 280 281 _the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK); 282 _the_empty_int_array = MetadataFactory::new_array<int>(null_cld, 0, CHECK); 283 _the_empty_short_array = MetadataFactory::new_array<u2>(null_cld, 0, CHECK); 284 _the_empty_method_array = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK); 285 _the_empty_klass_array = MetadataFactory::new_array<Klass*>(null_cld, 0, CHECK); 286 } 287 } 288 289 vmSymbols::initialize(CHECK); 290 291 SystemDictionary::initialize(CHECK); 292 293 Klass* ok = SystemDictionary::Object_klass(); 294 295 _the_null_string = StringTable::intern("null", CHECK); 296 _the_min_jint_string = StringTable::intern("-2147483648", CHECK); 297 298 if (UseSharedSpaces) { 299 // Verify shared interfaces array. 300 assert(_the_array_interfaces_array->at(0) == 301 SystemDictionary::Cloneable_klass(), "u3"); 302 assert(_the_array_interfaces_array->at(1) == 303 SystemDictionary::Serializable_klass(), "u3"); 304 } else { 305 // Set up shared interfaces array. (Do this before supers are set up.) 306 _the_array_interfaces_array->at_put(0, SystemDictionary::Cloneable_klass()); 307 _the_array_interfaces_array->at_put(1, SystemDictionary::Serializable_klass()); 308 } 309 310 initialize_basic_type_klass(boolArrayKlassObj(), CHECK); 311 initialize_basic_type_klass(charArrayKlassObj(), CHECK); 312 initialize_basic_type_klass(singleArrayKlassObj(), CHECK); 313 initialize_basic_type_klass(doubleArrayKlassObj(), CHECK); 314 initialize_basic_type_klass(byteArrayKlassObj(), CHECK); 315 initialize_basic_type_klass(shortArrayKlassObj(), CHECK); 316 initialize_basic_type_klass(intArrayKlassObj(), CHECK); 317 initialize_basic_type_klass(longArrayKlassObj(), CHECK); 318 } // end of core bootstrapping 319 320 // Maybe this could be lifted up now that object array can be initialized 321 // during the bootstrapping. 322 323 // OLD 324 // Initialize _objectArrayKlass after core bootstraping to make 325 // sure the super class is set up properly for _objectArrayKlass. 326 // --- 327 // NEW 328 // Since some of the old system object arrays have been converted to 329 // ordinary object arrays, _objectArrayKlass will be loaded when 330 // SystemDictionary::initialize(CHECK); is run. See the extra check 331 // for Object_klass_loaded in objArrayKlassKlass::allocate_objArray_klass_impl. 332 _objectArrayKlassObj = InstanceKlass:: 333 cast(SystemDictionary::Object_klass())->array_klass(1, CHECK); 334 // OLD 335 // Add the class to the class hierarchy manually to make sure that 336 // its vtable is initialized after core bootstrapping is completed. 337 // --- 338 // New 339 // Have already been initialized. 340 _objectArrayKlassObj->append_to_sibling_list(); 341 342 // Compute is_jdk version flags. 343 // Only 1.3 or later has the java.lang.Shutdown class. 344 // Only 1.4 or later has the java.lang.CharSequence interface. 345 // Only 1.5 or later has the java.lang.management.MemoryUsage class. 346 if (JDK_Version::is_partially_initialized()) { 347 uint8_t jdk_version; 348 Klass* k = SystemDictionary::resolve_or_null( 349 vmSymbols::java_lang_management_MemoryUsage(), THREAD); 350 CLEAR_PENDING_EXCEPTION; // ignore exceptions 351 if (k == NULL) { 352 k = SystemDictionary::resolve_or_null( 353 vmSymbols::java_lang_CharSequence(), THREAD); 354 CLEAR_PENDING_EXCEPTION; // ignore exceptions 355 if (k == NULL) { 356 k = SystemDictionary::resolve_or_null( 357 vmSymbols::java_lang_Shutdown(), THREAD); 358 CLEAR_PENDING_EXCEPTION; // ignore exceptions 359 if (k == NULL) { 360 jdk_version = 2; 361 } else { 362 jdk_version = 3; 363 } 364 } else { 365 jdk_version = 4; 366 } 367 } else { 368 jdk_version = 5; 369 } 370 JDK_Version::fully_initialize(jdk_version); 371 } 372 373 #ifdef ASSERT 374 if (FullGCALot) { 375 // Allocate an array of dummy objects. 376 // We'd like these to be at the bottom of the old generation, 377 // so that when we free one and then collect, 378 // (almost) the whole heap moves 379 // and we find out if we actually update all the oops correctly. 380 // But we can't allocate directly in the old generation, 381 // so we allocate wherever, and hope that the first collection 382 // moves these objects to the bottom of the old generation. 383 // We can allocate directly in the permanent generation, so we do. 384 int size; 385 if (UseConcMarkSweepGC) { 386 warning("Using +FullGCALot with concurrent mark sweep gc " 387 "will not force all objects to relocate"); 388 size = FullGCALotDummies; 389 } else { 390 size = FullGCALotDummies * 2; 391 } 392 objArrayOop naked_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), size, CHECK); 393 objArrayHandle dummy_array(THREAD, naked_array); 394 int i = 0; 395 while (i < size) { 396 // Allocate dummy in old generation 397 oop dummy = InstanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK); 398 dummy_array->obj_at_put(i++, dummy); 399 } 400 { 401 // Only modify the global variable inside the mutex. 402 // If we had a race to here, the other dummy_array instances 403 // and their elements just get dropped on the floor, which is fine. 404 MutexLocker ml(FullGCALot_lock); 405 if (_fullgc_alot_dummy_array == NULL) { 406 _fullgc_alot_dummy_array = dummy_array(); 407 } 408 } 409 assert(i == _fullgc_alot_dummy_array->length(), "just checking"); 410 } 411 #endif 412 413 // Initialize dependency array for null class loader 414 ClassLoaderData::the_null_class_loader_data()->init_dependencies(CHECK); 415 416 } 417 418 // CDS support for patching vtables in metadata in the shared archive. 419 // All types inherited from Metadata have vtables, but not types inherited 420 // from MetaspaceObj, because the latter does not have virtual functions. 421 // If the metadata type has a vtable, it cannot be shared in the read-only 422 // section of the CDS archive, because the vtable pointer is patched. 423 static inline void add_vtable(void** list, int* n, void* o, int count) { 424 guarantee((*n) < count, "vtable list too small"); 425 void* vtable = dereference_vptr(o); 426 assert(*(void**)(vtable) != NULL, "invalid vtable"); 427 list[(*n)++] = vtable; 428 } 429 430 void Universe::init_self_patching_vtbl_list(void** list, int count) { 431 int n = 0; 432 { InstanceKlass o; add_vtable(list, &n, &o, count); } 433 { InstanceClassLoaderKlass o; add_vtable(list, &n, &o, count); } 434 { InstanceMirrorKlass o; add_vtable(list, &n, &o, count); } 435 { InstanceRefKlass o; add_vtable(list, &n, &o, count); } 436 { TypeArrayKlass o; add_vtable(list, &n, &o, count); } 437 { ObjArrayKlass o; add_vtable(list, &n, &o, count); } 438 { Method o; add_vtable(list, &n, &o, count); } 439 { ConstantPool o; add_vtable(list, &n, &o, count); } 440 } 441 442 void Universe::initialize_basic_type_mirrors(TRAPS) { 443 assert(_int_mirror==NULL, "basic type mirrors already initialized"); 444 _int_mirror = 445 java_lang_Class::create_basic_type_mirror("int", T_INT, CHECK); 446 _float_mirror = 447 java_lang_Class::create_basic_type_mirror("float", T_FLOAT, CHECK); 448 _double_mirror = 449 java_lang_Class::create_basic_type_mirror("double", T_DOUBLE, CHECK); 450 _byte_mirror = 451 java_lang_Class::create_basic_type_mirror("byte", T_BYTE, CHECK); 452 _bool_mirror = 453 java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK); 454 _char_mirror = 455 java_lang_Class::create_basic_type_mirror("char", T_CHAR, CHECK); 456 _long_mirror = 457 java_lang_Class::create_basic_type_mirror("long", T_LONG, CHECK); 458 _short_mirror = 459 java_lang_Class::create_basic_type_mirror("short", T_SHORT, CHECK); 460 _void_mirror = 461 java_lang_Class::create_basic_type_mirror("void", T_VOID, CHECK); 462 463 _mirrors[T_INT] = _int_mirror; 464 _mirrors[T_FLOAT] = _float_mirror; 465 _mirrors[T_DOUBLE] = _double_mirror; 466 _mirrors[T_BYTE] = _byte_mirror; 467 _mirrors[T_BOOLEAN] = _bool_mirror; 468 _mirrors[T_CHAR] = _char_mirror; 469 _mirrors[T_LONG] = _long_mirror; 470 _mirrors[T_SHORT] = _short_mirror; 471 _mirrors[T_VOID] = _void_mirror; 472 //_mirrors[T_OBJECT] = InstanceKlass::cast(_object_klass)->java_mirror(); 473 //_mirrors[T_ARRAY] = InstanceKlass::cast(_object_klass)->java_mirror(); 474 } 475 476 void Universe::fixup_mirrors(TRAPS) { 477 // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly, 478 // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply 479 // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note 480 // that the number of objects allocated at this point is very small. 481 assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded"); 482 HandleMark hm(THREAD); 483 // Cache the start of the static fields 484 InstanceMirrorKlass::init_offset_of_static_fields(); 485 486 GrowableArray <Klass*>* list = java_lang_Class::fixup_mirror_list(); 487 int list_length = list->length(); 488 for (int i = 0; i < list_length; i++) { 489 Klass* k = list->at(i); 490 assert(k->is_klass(), "List should only hold classes"); 491 EXCEPTION_MARK; 492 KlassHandle kh(THREAD, k); 493 java_lang_Class::fixup_mirror(kh, CATCH); 494 } 495 delete java_lang_Class::fixup_mirror_list(); 496 java_lang_Class::set_fixup_mirror_list(NULL); 497 } 498 499 static bool has_run_finalizers_on_exit = false; 500 501 void Universe::run_finalizers_on_exit() { 502 if (has_run_finalizers_on_exit) return; 503 has_run_finalizers_on_exit = true; 504 505 // Called on VM exit. This ought to be run in a separate thread. 506 if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit"); 507 { 508 PRESERVE_EXCEPTION_MARK; 509 KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass()); 510 JavaValue result(T_VOID); 511 JavaCalls::call_static( 512 &result, 513 finalizer_klass, 514 vmSymbols::run_finalizers_on_exit_name(), 515 vmSymbols::void_method_signature(), 516 THREAD 517 ); 518 // Ignore any pending exceptions 519 CLEAR_PENDING_EXCEPTION; 520 } 521 } 522 523 524 // initialize_vtable could cause gc if 525 // 1) we specified true to initialize_vtable and 526 // 2) this ran after gc was enabled 527 // In case those ever change we use handles for oops 528 void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) { 529 // init vtable of k and all subclasses 530 Klass* ko = k_h(); 531 klassVtable* vt = ko->vtable(); 532 if (vt) vt->initialize_vtable(false, CHECK); 533 if (ko->oop_is_instance()) { 534 InstanceKlass* ik = (InstanceKlass*)ko; 535 for (KlassHandle s_h(THREAD, ik->subklass()); 536 s_h() != NULL; 537 s_h = KlassHandle(THREAD, s_h()->next_sibling())) { 538 reinitialize_vtable_of(s_h, CHECK); 539 } 540 } 541 } 542 543 544 void initialize_itable_for_klass(Klass* k, TRAPS) { 545 InstanceKlass::cast(k)->itable()->initialize_itable(false, CHECK); 546 } 547 548 549 void Universe::reinitialize_itables(TRAPS) { 550 SystemDictionary::classes_do(initialize_itable_for_klass, CHECK); 551 552 } 553 554 555 bool Universe::on_page_boundary(void* addr) { 556 return ((uintptr_t) addr) % os::vm_page_size() == 0; 557 } 558 559 560 bool Universe::should_fill_in_stack_trace(Handle throwable) { 561 // never attempt to fill in the stack trace of preallocated errors that do not have 562 // backtrace. These errors are kept alive forever and may be "re-used" when all 563 // preallocated errors with backtrace have been consumed. Also need to avoid 564 // a potential loop which could happen if an out of memory occurs when attempting 565 // to allocate the backtrace. 566 return ((throwable() != Universe::_out_of_memory_error_java_heap) && 567 (throwable() != Universe::_out_of_memory_error_metaspace) && 568 (throwable() != Universe::_out_of_memory_error_class_metaspace) && 569 (throwable() != Universe::_out_of_memory_error_array_size) && 570 (throwable() != Universe::_out_of_memory_error_gc_overhead_limit)); 571 } 572 573 574 oop Universe::gen_out_of_memory_error(oop default_err) { 575 // generate an out of memory error: 576 // - if there is a preallocated error with backtrace available then return it wth 577 // a filled in stack trace. 578 // - if there are no preallocated errors with backtrace available then return 579 // an error without backtrace. 580 int next; 581 if (_preallocated_out_of_memory_error_avail_count > 0) { 582 next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count); 583 assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt"); 584 } else { 585 next = -1; 586 } 587 if (next < 0) { 588 // all preallocated errors have been used. 589 // return default 590 return default_err; 591 } else { 592 // get the error object at the slot and set set it to NULL so that the 593 // array isn't keeping it alive anymore. 594 oop exc = preallocated_out_of_memory_errors()->obj_at(next); 595 assert(exc != NULL, "slot has been used already"); 596 preallocated_out_of_memory_errors()->obj_at_put(next, NULL); 597 598 // use the message from the default error 599 oop msg = java_lang_Throwable::message(default_err); 600 assert(msg != NULL, "no message"); 601 java_lang_Throwable::set_message(exc, msg); 602 603 // populate the stack trace and return it. 604 java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(exc); 605 return exc; 606 } 607 } 608 609 static intptr_t non_oop_bits = 0; 610 611 void* Universe::non_oop_word() { 612 // Neither the high bits nor the low bits of this value is allowed 613 // to look like (respectively) the high or low bits of a real oop. 614 // 615 // High and low are CPU-specific notions, but low always includes 616 // the low-order bit. Since oops are always aligned at least mod 4, 617 // setting the low-order bit will ensure that the low half of the 618 // word will never look like that of a real oop. 619 // 620 // Using the OS-supplied non-memory-address word (usually 0 or -1) 621 // will take care of the high bits, however many there are. 622 623 if (non_oop_bits == 0) { 624 non_oop_bits = (intptr_t)os::non_memory_address_word() | 1; 625 } 626 627 return (void*)non_oop_bits; 628 } 629 630 jint universe_init() { 631 assert(!Universe::_fully_initialized, "called after initialize_vtables"); 632 guarantee(1 << LogHeapWordSize == sizeof(HeapWord), 633 "LogHeapWordSize is incorrect."); 634 guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?"); 635 guarantee(sizeof(oop) % sizeof(HeapWord) == 0, 636 "oop size is not not a multiple of HeapWord size"); 637 TraceTime timer("Genesis", TraceStartupTime); 638 GC_locker::lock(); // do not allow gc during bootstrapping 639 JavaClasses::compute_hard_coded_offsets(); 640 641 jint status = Universe::initialize_heap(); 642 if (status != JNI_OK) { 643 return status; 644 } 645 646 // Create memory for metadata. Must be after initializing heap for 647 // DumpSharedSpaces. 648 ClassLoaderData::init_null_class_loader_data(); 649 650 // We have a heap so create the Method* caches before 651 // Metaspace::initialize_shared_spaces() tries to populate them. 652 Universe::_finalizer_register_cache = new LatestMethodOopCache(); 653 Universe::_loader_addClass_cache = new LatestMethodOopCache(); 654 Universe::_pd_implies_cache = new LatestMethodOopCache(); 655 Universe::_reflect_invoke_cache = new ActiveMethodOopsCache(); 656 657 if (UseSharedSpaces) { 658 // Read the data structures supporting the shared spaces (shared 659 // system dictionary, symbol table, etc.). After that, access to 660 // the file (other than the mapped regions) is no longer needed, and 661 // the file is closed. Closing the file does not affect the 662 // currently mapped regions. 663 MetaspaceShared::initialize_shared_spaces(); 664 StringTable::create_table(); 665 } else { 666 SymbolTable::create_table(); 667 StringTable::create_table(); 668 ClassLoader::create_package_info_table(); 669 } 670 671 return JNI_OK; 672 } 673 674 // Choose the heap base address and oop encoding mode 675 // when compressed oops are used: 676 // Unscaled - Use 32-bits oops without encoding when 677 // NarrowOopHeapBaseMin + heap_size < 4Gb 678 // ZeroBased - Use zero based compressed oops with encoding when 679 // NarrowOopHeapBaseMin + heap_size < 32Gb 680 // HeapBased - Use compressed oops with heap base + encoding. 681 682 // 4Gb 683 static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1); 684 // 32Gb 685 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes; 686 687 char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) { 688 size_t base = 0; 689 #ifdef _LP64 690 if (UseCompressedOops) { 691 assert(mode == UnscaledNarrowOop || 692 mode == ZeroBasedNarrowOop || 693 mode == HeapBasedNarrowOop, "mode is invalid"); 694 const size_t total_size = heap_size + HeapBaseMinAddress; 695 // Return specified base for the first request. 696 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) { 697 base = HeapBaseMinAddress; 698 699 // If the total size and the metaspace size are small enough to allow 700 // UnscaledNarrowOop then just use UnscaledNarrowOop. 701 } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop) && 702 (!UseCompressedKlassPointers || 703 (((OopEncodingHeapMax - heap_size) + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax))) { 704 // We don't need to check the metaspace size here because it is always smaller 705 // than total_size. 706 if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) && 707 (Universe::narrow_oop_shift() == 0)) { 708 // Use 32-bits oops without encoding and 709 // place heap's top on the 4Gb boundary 710 base = (NarrowOopHeapMax - heap_size); 711 } else { 712 // Can't reserve with NarrowOopShift == 0 713 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 714 if (mode == UnscaledNarrowOop || 715 mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) { 716 // Use zero based compressed oops with encoding and 717 // place heap's top on the 32Gb boundary in case 718 // total_size > 4Gb or failed to reserve below 4Gb. 719 base = (OopEncodingHeapMax - heap_size); 720 } 721 } 722 723 // See if ZeroBaseNarrowOop encoding will work for a heap based at 724 // (KlassEncodingMetaspaceMax - class_metaspace_size()). 725 } else if (UseCompressedKlassPointers && (mode != HeapBasedNarrowOop) && 726 (Universe::class_metaspace_size() + HeapBaseMinAddress <= KlassEncodingMetaspaceMax) && 727 (KlassEncodingMetaspaceMax + heap_size - Universe::class_metaspace_size() <= OopEncodingHeapMax)) { 728 base = (KlassEncodingMetaspaceMax - Universe::class_metaspace_size()); 729 } else { 730 // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or 731 // HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb. 732 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 733 } 734 735 // Set narrow_oop_base and narrow_oop_use_implicit_null_checks 736 // used in ReservedHeapSpace() constructors. 737 // The final values will be set in initialize_heap() below. 738 if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax) && 739 (!UseCompressedKlassPointers || (base + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax)) { 740 // Use zero based compressed oops 741 Universe::set_narrow_oop_base(NULL); 742 // Don't need guard page for implicit checks in indexed 743 // addressing mode with zero based Compressed Oops. 744 Universe::set_narrow_oop_use_implicit_null_checks(true); 745 } else { 746 // Set to a non-NULL value so the ReservedSpace ctor computes 747 // the correct no-access prefix. 748 // The final value will be set in initialize_heap() below. 749 Universe::set_narrow_oop_base((address)NarrowOopHeapMax); 750 #ifdef _WIN64 751 if (UseLargePages) { 752 // Cannot allocate guard pages for implicit checks in indexed 753 // addressing mode when large pages are specified on windows. 754 Universe::set_narrow_oop_use_implicit_null_checks(false); 755 } 756 #endif // _WIN64 757 } 758 } 759 #endif 760 return (char*)base; // also return NULL (don't care) for 32-bit VM 761 } 762 763 jint Universe::initialize_heap() { 764 765 if (UseParallelGC) { 766 #if INCLUDE_ALL_GCS 767 Universe::_collectedHeap = new ParallelScavengeHeap(); 768 #else // INCLUDE_ALL_GCS 769 fatal("UseParallelGC not supported in this VM."); 770 #endif // INCLUDE_ALL_GCS 771 772 } else if (UseG1GC) { 773 #if INCLUDE_ALL_GCS 774 G1CollectorPolicy* g1p = new G1CollectorPolicy(); 775 G1CollectedHeap* g1h = new G1CollectedHeap(g1p); 776 Universe::_collectedHeap = g1h; 777 #else // INCLUDE_ALL_GCS 778 fatal("UseG1GC not supported in java kernel vm."); 779 #endif // INCLUDE_ALL_GCS 780 781 } else { 782 GenCollectorPolicy *gc_policy; 783 784 if (UseSerialGC) { 785 gc_policy = new MarkSweepPolicy(); 786 } else if (UseConcMarkSweepGC) { 787 #if INCLUDE_ALL_GCS 788 if (UseAdaptiveSizePolicy) { 789 gc_policy = new ASConcurrentMarkSweepPolicy(); 790 } else { 791 gc_policy = new ConcurrentMarkSweepPolicy(); 792 } 793 #else // INCLUDE_ALL_GCS 794 fatal("UseConcMarkSweepGC not supported in this VM."); 795 #endif // INCLUDE_ALL_GCS 796 } else { // default old generation 797 gc_policy = new MarkSweepPolicy(); 798 } 799 800 Universe::_collectedHeap = new GenCollectedHeap(gc_policy); 801 } 802 803 jint status = Universe::heap()->initialize(); 804 if (status != JNI_OK) { 805 return status; 806 } 807 808 #ifdef _LP64 809 if (UseCompressedOops) { 810 // Subtract a page because something can get allocated at heap base. 811 // This also makes implicit null checking work, because the 812 // memory+1 page below heap_base needs to cause a signal. 813 // See needs_explicit_null_check. 814 // Only set the heap base for compressed oops because it indicates 815 // compressed oops for pstack code. 816 bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose); 817 if (verbose) { 818 tty->cr(); 819 tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB", 820 Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M); 821 } 822 if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) || 823 (UseCompressedKlassPointers && 824 ((uint64_t)Universe::heap()->base() + Universe::class_metaspace_size() > KlassEncodingMetaspaceMax))) { 825 // Can't reserve heap below 32Gb. 826 // keep the Universe::narrow_oop_base() set in Universe::reserve_heap() 827 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 828 if (verbose) { 829 tty->print(", %s: "PTR_FORMAT, 830 narrow_oop_mode_to_string(HeapBasedNarrowOop), 831 Universe::narrow_oop_base()); 832 } 833 } else { 834 Universe::set_narrow_oop_base(0); 835 if (verbose) { 836 tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop)); 837 } 838 #ifdef _WIN64 839 if (!Universe::narrow_oop_use_implicit_null_checks()) { 840 // Don't need guard page for implicit checks in indexed addressing 841 // mode with zero based Compressed Oops. 842 Universe::set_narrow_oop_use_implicit_null_checks(true); 843 } 844 #endif // _WIN64 845 if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) { 846 // Can't reserve heap below 4Gb. 847 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 848 } else { 849 Universe::set_narrow_oop_shift(0); 850 if (verbose) { 851 tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop)); 852 } 853 } 854 } 855 if (verbose) { 856 tty->cr(); 857 tty->cr(); 858 } 859 if (UseCompressedKlassPointers) { 860 Universe::set_narrow_klass_base(Universe::narrow_oop_base()); 861 Universe::set_narrow_klass_shift(MIN2(Universe::narrow_oop_shift(), LogKlassAlignmentInBytes)); 862 } 863 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base()); 864 } 865 // Universe::narrow_oop_base() is one page below the metaspace 866 // base. The actual metaspace base depends on alignment constraints 867 // so we don't know its exact location here. 868 assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size() - ClassMetaspaceSize) || 869 Universe::narrow_oop_base() == NULL, "invalid value"); 870 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes || 871 Universe::narrow_oop_shift() == 0, "invalid value"); 872 #endif 873 874 // We will never reach the CATCH below since Exceptions::_throw will cause 875 // the VM to exit if an exception is thrown during initialization 876 877 if (UseTLAB) { 878 assert(Universe::heap()->supports_tlab_allocation(), 879 "Should support thread-local allocation buffers"); 880 ThreadLocalAllocBuffer::startup_initialization(); 881 } 882 return JNI_OK; 883 } 884 885 886 // Reserve the Java heap, which is now the same for all GCs. 887 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) { 888 // Add in the class metaspace area so the classes in the headers can 889 // be compressed the same as instances. 890 // Need to round class space size up because it's below the heap and 891 // the actual alignment depends on its size. 892 Universe::set_class_metaspace_size(align_size_up(ClassMetaspaceSize, alignment)); 893 size_t total_reserved = align_size_up(heap_size + Universe::class_metaspace_size(), alignment); 894 assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())), 895 "heap size is too big for compressed oops"); 896 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); 897 898 ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr); 899 900 if (UseCompressedOops) { 901 if (addr != NULL && !total_rs.is_reserved()) { 902 // Failed to reserve at specified address - the requested memory 903 // region is taken already, for example, by 'java' launcher. 904 // Try again to reserver heap higher. 905 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); 906 907 ReservedHeapSpace total_rs0(total_reserved, alignment, 908 UseLargePages, addr); 909 910 if (addr != NULL && !total_rs0.is_reserved()) { 911 // Failed to reserve at specified address again - give up. 912 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); 913 assert(addr == NULL, ""); 914 915 ReservedHeapSpace total_rs1(total_reserved, alignment, 916 UseLargePages, addr); 917 total_rs = total_rs1; 918 } else { 919 total_rs = total_rs0; 920 } 921 } 922 } 923 924 if (!total_rs.is_reserved()) { 925 vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K)); 926 return total_rs; 927 } 928 929 // Split the reserved space into main Java heap and a space for 930 // classes so that they can be compressed using the same algorithm 931 // as compressed oops. If compress oops and compress klass ptrs are 932 // used we need the meta space first: if the alignment used for 933 // compressed oops is greater than the one used for compressed klass 934 // ptrs, a metadata space on top of the heap could become 935 // unreachable. 936 ReservedSpace class_rs = total_rs.first_part(Universe::class_metaspace_size()); 937 ReservedSpace heap_rs = total_rs.last_part(Universe::class_metaspace_size(), alignment); 938 Metaspace::initialize_class_space(class_rs); 939 940 if (UseCompressedOops) { 941 // Universe::initialize_heap() will reset this to NULL if unscaled 942 // or zero-based narrow oops are actually used. 943 address base = (address)(total_rs.base() - os::vm_page_size()); 944 Universe::set_narrow_oop_base(base); 945 } 946 return heap_rs; 947 } 948 949 950 // It's the caller's repsonsibility to ensure glitch-freedom 951 // (if required). 952 void Universe::update_heap_info_at_gc() { 953 _heap_capacity_at_last_gc = heap()->capacity(); 954 _heap_used_at_last_gc = heap()->used(); 955 } 956 957 958 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) { 959 switch (mode) { 960 case UnscaledNarrowOop: 961 return "32-bits Oops"; 962 case ZeroBasedNarrowOop: 963 return "zero based Compressed Oops"; 964 case HeapBasedNarrowOop: 965 return "Compressed Oops with base"; 966 } 967 968 ShouldNotReachHere(); 969 return ""; 970 } 971 972 973 Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() { 974 if (narrow_oop_base() != 0) { 975 return HeapBasedNarrowOop; 976 } 977 978 if (narrow_oop_shift() != 0) { 979 return ZeroBasedNarrowOop; 980 } 981 982 return UnscaledNarrowOop; 983 } 984 985 986 void universe2_init() { 987 EXCEPTION_MARK; 988 Universe::genesis(CATCH); 989 } 990 991 992 // This function is defined in JVM.cpp 993 extern void initialize_converter_functions(); 994 995 bool universe_post_init() { 996 assert(!is_init_completed(), "Error: initialization not yet completed!"); 997 Universe::_fully_initialized = true; 998 EXCEPTION_MARK; 999 { ResourceMark rm; 1000 Interpreter::initialize(); // needed for interpreter entry points 1001 if (!UseSharedSpaces) { 1002 HandleMark hm(THREAD); 1003 KlassHandle ok_h(THREAD, SystemDictionary::Object_klass()); 1004 Universe::reinitialize_vtable_of(ok_h, CHECK_false); 1005 Universe::reinitialize_itables(CHECK_false); 1006 } 1007 } 1008 1009 HandleMark hm(THREAD); 1010 Klass* k; 1011 instanceKlassHandle k_h; 1012 // Setup preallocated empty java.lang.Class array 1013 Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false); 1014 1015 // Setup preallocated OutOfMemoryError errors 1016 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false); 1017 k_h = instanceKlassHandle(THREAD, k); 1018 Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false); 1019 Universe::_out_of_memory_error_metaspace = k_h->allocate_instance(CHECK_false); 1020 Universe::_out_of_memory_error_class_metaspace = k_h->allocate_instance(CHECK_false); 1021 Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false); 1022 Universe::_out_of_memory_error_gc_overhead_limit = 1023 k_h->allocate_instance(CHECK_false); 1024 1025 // Setup preallocated NullPointerException 1026 // (this is currently used for a cheap & dirty solution in compiler exception handling) 1027 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false); 1028 Universe::_null_ptr_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false); 1029 // Setup preallocated ArithmeticException 1030 // (this is currently used for a cheap & dirty solution in compiler exception handling) 1031 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ArithmeticException(), true, CHECK_false); 1032 Universe::_arithmetic_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false); 1033 // Virtual Machine Error for when we get into a situation we can't resolve 1034 k = SystemDictionary::resolve_or_fail( 1035 vmSymbols::java_lang_VirtualMachineError(), true, CHECK_false); 1036 bool linked = InstanceKlass::cast(k)->link_class_or_fail(CHECK_false); 1037 if (!linked) { 1038 tty->print_cr("Unable to link/verify VirtualMachineError class"); 1039 return false; // initialization failed 1040 } 1041 Universe::_virtual_machine_error_instance = 1042 InstanceKlass::cast(k)->allocate_instance(CHECK_false); 1043 1044 Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false); 1045 1046 if (!DumpSharedSpaces) { 1047 // These are the only Java fields that are currently set during shared space dumping. 1048 // We prefer to not handle this generally, so we always reinitialize these detail messages. 1049 Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false); 1050 java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg()); 1051 1052 msg = java_lang_String::create_from_str("Metadata space", CHECK_false); 1053 java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg()); 1054 msg = java_lang_String::create_from_str("Class Metadata space", CHECK_false); 1055 java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg()); 1056 1057 msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false); 1058 java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg()); 1059 1060 msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false); 1061 java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg()); 1062 1063 msg = java_lang_String::create_from_str("/ by zero", CHECK_false); 1064 java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg()); 1065 1066 // Setup the array of errors that have preallocated backtrace 1067 k = Universe::_out_of_memory_error_java_heap->klass(); 1068 assert(k->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error"); 1069 k_h = instanceKlassHandle(THREAD, k); 1070 1071 int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0; 1072 Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false); 1073 for (int i=0; i<len; i++) { 1074 oop err = k_h->allocate_instance(CHECK_false); 1075 Handle err_h = Handle(THREAD, err); 1076 java_lang_Throwable::allocate_backtrace(err_h, CHECK_false); 1077 Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h()); 1078 } 1079 Universe::_preallocated_out_of_memory_error_avail_count = (jint)len; 1080 } 1081 1082 1083 // Setup static method for registering finalizers 1084 // The finalizer klass must be linked before looking up the method, in 1085 // case it needs to get rewritten. 1086 InstanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false); 1087 Method* m = InstanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method( 1088 vmSymbols::register_method_name(), 1089 vmSymbols::register_method_signature()); 1090 if (m == NULL || !m->is_static()) { 1091 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(), 1092 "java.lang.ref.Finalizer.register", false); 1093 } 1094 Universe::_finalizer_register_cache->init( 1095 SystemDictionary::Finalizer_klass(), m, CHECK_false); 1096 1097 // Resolve on first use and initialize class. 1098 // Note: No race-condition here, since a resolve will always return the same result 1099 1100 // Setup method for security checks 1101 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_reflect_Method(), true, CHECK_false); 1102 k_h = instanceKlassHandle(THREAD, k); 1103 k_h->link_class(CHECK_false); 1104 m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_object_array_object_signature()); 1105 if (m == NULL || m->is_static()) { 1106 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(), 1107 "java.lang.reflect.Method.invoke", false); 1108 } 1109 Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false); 1110 1111 // Setup method for registering loaded classes in class loader vector 1112 InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false); 1113 m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature()); 1114 if (m == NULL || m->is_static()) { 1115 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(), 1116 "java.lang.ClassLoader.addClass", false); 1117 } 1118 Universe::_loader_addClass_cache->init( 1119 SystemDictionary::ClassLoader_klass(), m, CHECK_false); 1120 1121 // Setup method for checking protection domain 1122 InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false); 1123 m = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())-> 1124 find_method(vmSymbols::impliesCreateAccessControlContext_name(), 1125 vmSymbols::void_boolean_signature()); 1126 // Allow NULL which should only happen with bootstrapping. 1127 if (m != NULL) { 1128 if (m->is_static()) { 1129 // NoSuchMethodException doesn't actually work because it tries to run the 1130 // <init> function before java_lang_Class is linked. Print error and exit. 1131 tty->print_cr("ProtectionDomain.impliesCreateAccessControlContext() has the wrong linkage"); 1132 return false; // initialization failed 1133 } 1134 Universe::_pd_implies_cache->init( 1135 SystemDictionary::ProtectionDomain_klass(), m, CHECK_false);; 1136 } 1137 1138 // The folowing is initializing converter functions for serialization in 1139 // JVM.cpp. If we clean up the StrictMath code above we may want to find 1140 // a better solution for this as well. 1141 initialize_converter_functions(); 1142 1143 // This needs to be done before the first scavenge/gc, since 1144 // it's an input to soft ref clearing policy. 1145 { 1146 MutexLocker x(Heap_lock); 1147 Universe::update_heap_info_at_gc(); 1148 } 1149 1150 // ("weak") refs processing infrastructure initialization 1151 Universe::heap()->post_initialize(); 1152 1153 // Initialize performance counters for metaspaces 1154 MetaspaceCounters::initialize_performance_counters(); 1155 MemoryService::add_metaspace_memory_pools(); 1156 1157 GC_locker::unlock(); // allow gc after bootstrapping 1158 1159 MemoryService::set_universe_heap(Universe::_collectedHeap); 1160 return true; 1161 } 1162 1163 1164 void Universe::compute_base_vtable_size() { 1165 _base_vtable_size = ClassLoader::compute_Object_vtable(); 1166 } 1167 1168 1169 // %%% The Universe::flush_foo methods belong in CodeCache. 1170 1171 // Flushes compiled methods dependent on dependee. 1172 void Universe::flush_dependents_on(instanceKlassHandle dependee) { 1173 assert_lock_strong(Compile_lock); 1174 1175 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; 1176 1177 // CodeCache can only be updated by a thread_in_VM and they will all be 1178 // stopped dring the safepoint so CodeCache will be safe to update without 1179 // holding the CodeCache_lock. 1180 1181 KlassDepChange changes(dependee); 1182 1183 // Compute the dependent nmethods 1184 if (CodeCache::mark_for_deoptimization(changes) > 0) { 1185 // At least one nmethod has been marked for deoptimization 1186 VM_Deoptimize op; 1187 VMThread::execute(&op); 1188 } 1189 } 1190 1191 // Flushes compiled methods dependent on a particular CallSite 1192 // instance when its target is different than the given MethodHandle. 1193 void Universe::flush_dependents_on(Handle call_site, Handle method_handle) { 1194 assert_lock_strong(Compile_lock); 1195 1196 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; 1197 1198 // CodeCache can only be updated by a thread_in_VM and they will all be 1199 // stopped dring the safepoint so CodeCache will be safe to update without 1200 // holding the CodeCache_lock. 1201 1202 CallSiteDepChange changes(call_site(), method_handle()); 1203 1204 // Compute the dependent nmethods that have a reference to a 1205 // CallSite object. We use InstanceKlass::mark_dependent_nmethod 1206 // directly instead of CodeCache::mark_for_deoptimization because we 1207 // want dependents on the call site class only not all classes in 1208 // the ContextStream. 1209 int marked = 0; 1210 { 1211 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1212 InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass()); 1213 marked = call_site_klass->mark_dependent_nmethods(changes); 1214 } 1215 if (marked > 0) { 1216 // At least one nmethod has been marked for deoptimization 1217 VM_Deoptimize op; 1218 VMThread::execute(&op); 1219 } 1220 } 1221 1222 #ifdef HOTSWAP 1223 // Flushes compiled methods dependent on dependee in the evolutionary sense 1224 void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) { 1225 // --- Compile_lock is not held. However we are at a safepoint. 1226 assert_locked_or_safepoint(Compile_lock); 1227 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; 1228 1229 // CodeCache can only be updated by a thread_in_VM and they will all be 1230 // stopped dring the safepoint so CodeCache will be safe to update without 1231 // holding the CodeCache_lock. 1232 1233 // Compute the dependent nmethods 1234 if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) { 1235 // At least one nmethod has been marked for deoptimization 1236 1237 // All this already happens inside a VM_Operation, so we'll do all the work here. 1238 // Stuff copied from VM_Deoptimize and modified slightly. 1239 1240 // We do not want any GCs to happen while we are in the middle of this VM operation 1241 ResourceMark rm; 1242 DeoptimizationMarker dm; 1243 1244 // Deoptimize all activations depending on marked nmethods 1245 Deoptimization::deoptimize_dependents(); 1246 1247 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) 1248 CodeCache::make_marked_nmethods_not_entrant(); 1249 } 1250 } 1251 #endif // HOTSWAP 1252 1253 1254 // Flushes compiled methods dependent on dependee 1255 void Universe::flush_dependents_on_method(methodHandle m_h) { 1256 // --- Compile_lock is not held. However we are at a safepoint. 1257 assert_locked_or_safepoint(Compile_lock); 1258 1259 // CodeCache can only be updated by a thread_in_VM and they will all be 1260 // stopped dring the safepoint so CodeCache will be safe to update without 1261 // holding the CodeCache_lock. 1262 1263 // Compute the dependent nmethods 1264 if (CodeCache::mark_for_deoptimization(m_h()) > 0) { 1265 // At least one nmethod has been marked for deoptimization 1266 1267 // All this already happens inside a VM_Operation, so we'll do all the work here. 1268 // Stuff copied from VM_Deoptimize and modified slightly. 1269 1270 // We do not want any GCs to happen while we are in the middle of this VM operation 1271 ResourceMark rm; 1272 DeoptimizationMarker dm; 1273 1274 // Deoptimize all activations depending on marked nmethods 1275 Deoptimization::deoptimize_dependents(); 1276 1277 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) 1278 CodeCache::make_marked_nmethods_not_entrant(); 1279 } 1280 } 1281 1282 void Universe::print() { 1283 print_on(gclog_or_tty); 1284 } 1285 1286 void Universe::print_on(outputStream* st, bool extended) { 1287 st->print_cr("Heap"); 1288 if (!extended) { 1289 heap()->print_on(st); 1290 } else { 1291 heap()->print_extended_on(st); 1292 } 1293 } 1294 1295 void Universe::print_heap_at_SIGBREAK() { 1296 if (PrintHeapAtSIGBREAK) { 1297 MutexLocker hl(Heap_lock); 1298 print_on(tty); 1299 tty->cr(); 1300 tty->flush(); 1301 } 1302 } 1303 1304 void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) { 1305 st->print_cr("{Heap before GC invocations=%u (full %u):", 1306 heap()->total_collections(), 1307 heap()->total_full_collections()); 1308 if (!PrintHeapAtGCExtended || ignore_extended) { 1309 heap()->print_on(st); 1310 } else { 1311 heap()->print_extended_on(st); 1312 } 1313 } 1314 1315 void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) { 1316 st->print_cr("Heap after GC invocations=%u (full %u):", 1317 heap()->total_collections(), 1318 heap()->total_full_collections()); 1319 if (!PrintHeapAtGCExtended || ignore_extended) { 1320 heap()->print_on(st); 1321 } else { 1322 heap()->print_extended_on(st); 1323 } 1324 st->print_cr("}"); 1325 } 1326 1327 void Universe::verify(VerifyOption option, const char* prefix, bool silent) { 1328 // The use of _verify_in_progress is a temporary work around for 1329 // 6320749. Don't bother with a creating a class to set and clear 1330 // it since it is only used in this method and the control flow is 1331 // straight forward. 1332 _verify_in_progress = true; 1333 1334 COMPILER2_PRESENT( 1335 assert(!DerivedPointerTable::is_active(), 1336 "DPT should not be active during verification " 1337 "(of thread stacks below)"); 1338 ) 1339 1340 ResourceMark rm; 1341 HandleMark hm; // Handles created during verification can be zapped 1342 _verify_count++; 1343 1344 if (!silent) gclog_or_tty->print(prefix); 1345 if (!silent) gclog_or_tty->print("[Verifying "); 1346 if (!silent) gclog_or_tty->print("threads "); 1347 Threads::verify(); 1348 if (!silent) gclog_or_tty->print("heap "); 1349 heap()->verify(silent, option); 1350 if (!silent) gclog_or_tty->print("syms "); 1351 SymbolTable::verify(); 1352 if (!silent) gclog_or_tty->print("strs "); 1353 StringTable::verify(); 1354 { 1355 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1356 if (!silent) gclog_or_tty->print("zone "); 1357 CodeCache::verify(); 1358 } 1359 if (!silent) gclog_or_tty->print("dict "); 1360 SystemDictionary::verify(); 1361 #ifndef PRODUCT 1362 if (!silent) gclog_or_tty->print("cldg "); 1363 ClassLoaderDataGraph::verify(); 1364 #endif 1365 if (!silent) gclog_or_tty->print("metaspace chunks "); 1366 MetaspaceAux::verify_free_chunks(); 1367 if (!silent) gclog_or_tty->print("hand "); 1368 JNIHandles::verify(); 1369 if (!silent) gclog_or_tty->print("C-heap "); 1370 os::check_heap(); 1371 if (!silent) gclog_or_tty->print("code cache "); 1372 CodeCache::verify_oops(); 1373 if (!silent) gclog_or_tty->print_cr("]"); 1374 1375 _verify_in_progress = false; 1376 } 1377 1378 // Oop verification (see MacroAssembler::verify_oop) 1379 1380 static uintptr_t _verify_oop_data[2] = {0, (uintptr_t)-1}; 1381 static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1}; 1382 1383 1384 #ifndef PRODUCT 1385 1386 static void calculate_verify_data(uintptr_t verify_data[2], 1387 HeapWord* low_boundary, 1388 HeapWord* high_boundary) { 1389 assert(low_boundary < high_boundary, "bad interval"); 1390 1391 // decide which low-order bits we require to be clear: 1392 size_t alignSize = MinObjAlignmentInBytes; 1393 size_t min_object_size = CollectedHeap::min_fill_size(); 1394 1395 // make an inclusive limit: 1396 uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize; 1397 uintptr_t min = (uintptr_t)low_boundary; 1398 assert(min < max, "bad interval"); 1399 uintptr_t diff = max ^ min; 1400 1401 // throw away enough low-order bits to make the diff vanish 1402 uintptr_t mask = (uintptr_t)(-1); 1403 while ((mask & diff) != 0) 1404 mask <<= 1; 1405 uintptr_t bits = (min & mask); 1406 assert(bits == (max & mask), "correct mask"); 1407 // check an intermediate value between min and max, just to make sure: 1408 assert(bits == ((min + (max-min)/2) & mask), "correct mask"); 1409 1410 // require address alignment, too: 1411 mask |= (alignSize - 1); 1412 1413 if (!(verify_data[0] == 0 && verify_data[1] == (uintptr_t)-1)) { 1414 assert(verify_data[0] == mask && verify_data[1] == bits, "mask stability"); 1415 } 1416 verify_data[0] = mask; 1417 verify_data[1] = bits; 1418 } 1419 1420 // Oop verification (see MacroAssembler::verify_oop) 1421 1422 uintptr_t Universe::verify_oop_mask() { 1423 MemRegion m = heap()->reserved_region(); 1424 calculate_verify_data(_verify_oop_data, 1425 m.start(), 1426 m.end()); 1427 return _verify_oop_data[0]; 1428 } 1429 1430 1431 1432 uintptr_t Universe::verify_oop_bits() { 1433 verify_oop_mask(); 1434 return _verify_oop_data[1]; 1435 } 1436 1437 uintptr_t Universe::verify_mark_mask() { 1438 return markOopDesc::lock_mask_in_place; 1439 } 1440 1441 uintptr_t Universe::verify_mark_bits() { 1442 intptr_t mask = verify_mark_mask(); 1443 intptr_t bits = (intptr_t)markOopDesc::prototype(); 1444 assert((bits & ~mask) == 0, "no stray header bits"); 1445 return bits; 1446 } 1447 #endif // PRODUCT 1448 1449 1450 void Universe::compute_verify_oop_data() { 1451 verify_oop_mask(); 1452 verify_oop_bits(); 1453 verify_mark_mask(); 1454 verify_mark_bits(); 1455 } 1456 1457 1458 void CommonMethodOopCache::init(Klass* k, Method* m, TRAPS) { 1459 if (!UseSharedSpaces) { 1460 _klass = k; 1461 } 1462 #ifndef PRODUCT 1463 else { 1464 // sharing initilization should have already set up _klass 1465 assert(_klass != NULL, "just checking"); 1466 } 1467 #endif 1468 1469 _method_idnum = m->method_idnum(); 1470 assert(_method_idnum >= 0, "sanity check"); 1471 } 1472 1473 1474 ActiveMethodOopsCache::~ActiveMethodOopsCache() { 1475 if (_prev_methods != NULL) { 1476 delete _prev_methods; 1477 _prev_methods = NULL; 1478 } 1479 } 1480 1481 1482 void ActiveMethodOopsCache::add_previous_version(Method* method) { 1483 assert(Thread::current()->is_VM_thread(), 1484 "only VMThread can add previous versions"); 1485 1486 // Only append the previous method if it is executing on the stack. 1487 if (method->on_stack()) { 1488 1489 if (_prev_methods == NULL) { 1490 // This is the first previous version so make some space. 1491 // Start with 2 elements under the assumption that the class 1492 // won't be redefined much. 1493 _prev_methods = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Method*>(2, true); 1494 } 1495 1496 // RC_TRACE macro has an embedded ResourceMark 1497 RC_TRACE(0x00000100, 1498 ("add: %s(%s): adding prev version ref for cached method @%d", 1499 method->name()->as_C_string(), method->signature()->as_C_string(), 1500 _prev_methods->length())); 1501 1502 _prev_methods->append(method); 1503 } 1504 1505 1506 // Since the caller is the VMThread and we are at a safepoint, this is a good 1507 // time to clear out unused method references. 1508 1509 if (_prev_methods == NULL) return; 1510 1511 for (int i = _prev_methods->length() - 1; i >= 0; i--) { 1512 Method* method = _prev_methods->at(i); 1513 assert(method != NULL, "weak method ref was unexpectedly cleared"); 1514 1515 if (!method->on_stack()) { 1516 // This method isn't running anymore so remove it 1517 _prev_methods->remove_at(i); 1518 MetadataFactory::free_metadata(method->method_holder()->class_loader_data(), method); 1519 } else { 1520 // RC_TRACE macro has an embedded ResourceMark 1521 RC_TRACE(0x00000400, 1522 ("add: %s(%s): previous cached method @%d is alive", 1523 method->name()->as_C_string(), method->signature()->as_C_string(), i)); 1524 } 1525 } 1526 } // end add_previous_version() 1527 1528 1529 bool ActiveMethodOopsCache::is_same_method(const Method* method) const { 1530 InstanceKlass* ik = InstanceKlass::cast(klass()); 1531 const Method* check_method = ik->method_with_idnum(method_idnum()); 1532 assert(check_method != NULL, "sanity check"); 1533 if (check_method == method) { 1534 // done with the easy case 1535 return true; 1536 } 1537 1538 if (_prev_methods != NULL) { 1539 // The cached method has been redefined at least once so search 1540 // the previous versions for a match. 1541 for (int i = 0; i < _prev_methods->length(); i++) { 1542 check_method = _prev_methods->at(i); 1543 if (check_method == method) { 1544 // a previous version matches 1545 return true; 1546 } 1547 } 1548 } 1549 1550 // either no previous versions or no previous version matched 1551 return false; 1552 } 1553 1554 1555 Method* LatestMethodOopCache::get_Method() { 1556 if (klass() == NULL) return NULL; 1557 InstanceKlass* ik = InstanceKlass::cast(klass()); 1558 Method* m = ik->method_with_idnum(method_idnum()); 1559 assert(m != NULL, "sanity check"); 1560 return m; 1561 } 1562 1563 1564 #ifdef ASSERT 1565 // Release dummy object(s) at bottom of heap 1566 bool Universe::release_fullgc_alot_dummy() { 1567 MutexLocker ml(FullGCALot_lock); 1568 if (_fullgc_alot_dummy_array != NULL) { 1569 if (_fullgc_alot_dummy_next >= _fullgc_alot_dummy_array->length()) { 1570 // No more dummies to release, release entire array instead 1571 _fullgc_alot_dummy_array = NULL; 1572 return false; 1573 } 1574 if (!UseConcMarkSweepGC) { 1575 // Release dummy at bottom of old generation 1576 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL); 1577 } 1578 // Release dummy at bottom of permanent generation 1579 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL); 1580 } 1581 return true; 1582 } 1583 1584 #endif // ASSERT