1 /* 2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_universe.cpp.incl" 27 28 // Known objects 29 klassOop Universe::_boolArrayKlassObj = NULL; 30 klassOop Universe::_byteArrayKlassObj = NULL; 31 klassOop Universe::_charArrayKlassObj = NULL; 32 klassOop Universe::_intArrayKlassObj = NULL; 33 klassOop Universe::_shortArrayKlassObj = NULL; 34 klassOop Universe::_longArrayKlassObj = NULL; 35 klassOop Universe::_singleArrayKlassObj = NULL; 36 klassOop Universe::_doubleArrayKlassObj = NULL; 37 klassOop Universe::_typeArrayKlassObjs[T_VOID+1] = { NULL /*, NULL...*/ }; 38 klassOop Universe::_objectArrayKlassObj = NULL; 39 klassOop Universe::_symbolKlassObj = NULL; 40 klassOop Universe::_methodKlassObj = NULL; 41 klassOop Universe::_constMethodKlassObj = NULL; 42 klassOop Universe::_methodDataKlassObj = NULL; 43 klassOop Universe::_klassKlassObj = NULL; 44 klassOop Universe::_arrayKlassKlassObj = NULL; 45 klassOop Universe::_objArrayKlassKlassObj = NULL; 46 klassOop Universe::_typeArrayKlassKlassObj = NULL; 47 klassOop Universe::_instanceKlassKlassObj = NULL; 48 klassOop Universe::_constantPoolKlassObj = NULL; 49 klassOop Universe::_constantPoolCacheKlassObj = NULL; 50 klassOop Universe::_compiledICHolderKlassObj = NULL; 51 klassOop Universe::_systemObjArrayKlassObj = NULL; 52 oop Universe::_int_mirror = NULL; 53 oop Universe::_float_mirror = NULL; 54 oop Universe::_double_mirror = NULL; 55 oop Universe::_byte_mirror = NULL; 56 oop Universe::_bool_mirror = NULL; 57 oop Universe::_char_mirror = NULL; 58 oop Universe::_long_mirror = NULL; 59 oop Universe::_short_mirror = NULL; 60 oop Universe::_void_mirror = NULL; 61 oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ }; 62 oop Universe::_main_thread_group = NULL; 63 oop Universe::_system_thread_group = NULL; 64 typeArrayOop Universe::_the_empty_byte_array = NULL; 65 typeArrayOop Universe::_the_empty_short_array = NULL; 66 typeArrayOop Universe::_the_empty_int_array = NULL; 67 objArrayOop Universe::_the_empty_system_obj_array = NULL; 68 objArrayOop Universe::_the_empty_class_klass_array = NULL; 69 objArrayOop Universe::_the_array_interfaces_array = NULL; 70 LatestMethodOopCache* Universe::_finalizer_register_cache = NULL; 71 LatestMethodOopCache* Universe::_loader_addClass_cache = NULL; 72 ActiveMethodOopsCache* Universe::_reflect_invoke_cache = NULL; 73 oop Universe::_out_of_memory_error_java_heap = NULL; 74 oop Universe::_out_of_memory_error_perm_gen = NULL; 75 oop Universe::_out_of_memory_error_array_size = NULL; 76 oop Universe::_out_of_memory_error_gc_overhead_limit = NULL; 77 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL; 78 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0; 79 bool Universe::_verify_in_progress = false; 80 oop Universe::_null_ptr_exception_instance = NULL; 81 oop Universe::_arithmetic_exception_instance = NULL; 82 oop Universe::_virtual_machine_error_instance = NULL; 83 oop Universe::_vm_exception = NULL; 84 oop Universe::_emptySymbol = NULL; 85 86 // These variables are guarded by FullGCALot_lock. 87 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;) 88 debug_only(int Universe::_fullgc_alot_dummy_next = 0;) 89 90 91 // Heap 92 int Universe::_verify_count = 0; 93 94 int Universe::_base_vtable_size = 0; 95 bool Universe::_bootstrapping = false; 96 bool Universe::_fully_initialized = false; 97 98 size_t Universe::_heap_capacity_at_last_gc; 99 size_t Universe::_heap_used_at_last_gc = 0; 100 101 CollectedHeap* Universe::_collectedHeap = NULL; 102 103 NarrowOopStruct Universe::_narrow_oop = { NULL, 0, true }; 104 105 106 void Universe::basic_type_classes_do(void f(klassOop)) { 107 f(boolArrayKlassObj()); 108 f(byteArrayKlassObj()); 109 f(charArrayKlassObj()); 110 f(intArrayKlassObj()); 111 f(shortArrayKlassObj()); 112 f(longArrayKlassObj()); 113 f(singleArrayKlassObj()); 114 f(doubleArrayKlassObj()); 115 } 116 117 118 void Universe::system_classes_do(void f(klassOop)) { 119 f(symbolKlassObj()); 120 f(methodKlassObj()); 121 f(constMethodKlassObj()); 122 f(methodDataKlassObj()); 123 f(klassKlassObj()); 124 f(arrayKlassKlassObj()); 125 f(objArrayKlassKlassObj()); 126 f(typeArrayKlassKlassObj()); 127 f(instanceKlassKlassObj()); 128 f(constantPoolKlassObj()); 129 f(systemObjArrayKlassObj()); 130 } 131 132 void Universe::oops_do(OopClosure* f, bool do_all) { 133 134 f->do_oop((oop*) &_int_mirror); 135 f->do_oop((oop*) &_float_mirror); 136 f->do_oop((oop*) &_double_mirror); 137 f->do_oop((oop*) &_byte_mirror); 138 f->do_oop((oop*) &_bool_mirror); 139 f->do_oop((oop*) &_char_mirror); 140 f->do_oop((oop*) &_long_mirror); 141 f->do_oop((oop*) &_short_mirror); 142 f->do_oop((oop*) &_void_mirror); 143 144 // It's important to iterate over these guys even if they are null, 145 // since that's how shared heaps are restored. 146 for (int i = T_BOOLEAN; i < T_VOID+1; i++) { 147 f->do_oop((oop*) &_mirrors[i]); 148 } 149 assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking"); 150 151 // %%% Consider moving those "shared oops" over here with the others. 152 f->do_oop((oop*)&_boolArrayKlassObj); 153 f->do_oop((oop*)&_byteArrayKlassObj); 154 f->do_oop((oop*)&_charArrayKlassObj); 155 f->do_oop((oop*)&_intArrayKlassObj); 156 f->do_oop((oop*)&_shortArrayKlassObj); 157 f->do_oop((oop*)&_longArrayKlassObj); 158 f->do_oop((oop*)&_singleArrayKlassObj); 159 f->do_oop((oop*)&_doubleArrayKlassObj); 160 f->do_oop((oop*)&_objectArrayKlassObj); 161 { 162 for (int i = 0; i < T_VOID+1; i++) { 163 if (_typeArrayKlassObjs[i] != NULL) { 164 assert(i >= T_BOOLEAN, "checking"); 165 f->do_oop((oop*)&_typeArrayKlassObjs[i]); 166 } else if (do_all) { 167 f->do_oop((oop*)&_typeArrayKlassObjs[i]); 168 } 169 } 170 } 171 f->do_oop((oop*)&_symbolKlassObj); 172 f->do_oop((oop*)&_methodKlassObj); 173 f->do_oop((oop*)&_constMethodKlassObj); 174 f->do_oop((oop*)&_methodDataKlassObj); 175 f->do_oop((oop*)&_klassKlassObj); 176 f->do_oop((oop*)&_arrayKlassKlassObj); 177 f->do_oop((oop*)&_objArrayKlassKlassObj); 178 f->do_oop((oop*)&_typeArrayKlassKlassObj); 179 f->do_oop((oop*)&_instanceKlassKlassObj); 180 f->do_oop((oop*)&_constantPoolKlassObj); 181 f->do_oop((oop*)&_constantPoolCacheKlassObj); 182 f->do_oop((oop*)&_compiledICHolderKlassObj); 183 f->do_oop((oop*)&_systemObjArrayKlassObj); 184 f->do_oop((oop*)&_the_empty_byte_array); 185 f->do_oop((oop*)&_the_empty_short_array); 186 f->do_oop((oop*)&_the_empty_int_array); 187 f->do_oop((oop*)&_the_empty_system_obj_array); 188 f->do_oop((oop*)&_the_empty_class_klass_array); 189 f->do_oop((oop*)&_the_array_interfaces_array); 190 _finalizer_register_cache->oops_do(f); 191 _loader_addClass_cache->oops_do(f); 192 _reflect_invoke_cache->oops_do(f); 193 f->do_oop((oop*)&_out_of_memory_error_java_heap); 194 f->do_oop((oop*)&_out_of_memory_error_perm_gen); 195 f->do_oop((oop*)&_out_of_memory_error_array_size); 196 f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit); 197 if (_preallocated_out_of_memory_error_array != (oop)NULL) { // NULL when DumpSharedSpaces 198 f->do_oop((oop*)&_preallocated_out_of_memory_error_array); 199 } 200 f->do_oop((oop*)&_null_ptr_exception_instance); 201 f->do_oop((oop*)&_arithmetic_exception_instance); 202 f->do_oop((oop*)&_virtual_machine_error_instance); 203 f->do_oop((oop*)&_main_thread_group); 204 f->do_oop((oop*)&_system_thread_group); 205 f->do_oop((oop*)&_vm_exception); 206 f->do_oop((oop*)&_emptySymbol); 207 debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);) 208 } 209 210 211 void Universe::check_alignment(uintx size, uintx alignment, const char* name) { 212 if (size < alignment || size % alignment != 0) { 213 ResourceMark rm; 214 stringStream st; 215 st.print("Size of %s (%ld bytes) must be aligned to %ld bytes", name, size, alignment); 216 char* error = st.as_string(); 217 vm_exit_during_initialization(error); 218 } 219 } 220 221 222 void Universe::genesis(TRAPS) { 223 ResourceMark rm; 224 { FlagSetting fs(_bootstrapping, true); 225 226 { MutexLocker mc(Compile_lock); 227 228 // determine base vtable size; without that we cannot create the array klasses 229 compute_base_vtable_size(); 230 231 if (!UseSharedSpaces) { 232 _klassKlassObj = klassKlass::create_klass(CHECK); 233 _arrayKlassKlassObj = arrayKlassKlass::create_klass(CHECK); 234 235 _objArrayKlassKlassObj = objArrayKlassKlass::create_klass(CHECK); 236 _instanceKlassKlassObj = instanceKlassKlass::create_klass(CHECK); 237 _typeArrayKlassKlassObj = typeArrayKlassKlass::create_klass(CHECK); 238 239 _symbolKlassObj = symbolKlass::create_klass(CHECK); 240 241 _emptySymbol = oopFactory::new_symbol("", CHECK); 242 243 _boolArrayKlassObj = typeArrayKlass::create_klass(T_BOOLEAN, sizeof(jboolean), CHECK); 244 _charArrayKlassObj = typeArrayKlass::create_klass(T_CHAR, sizeof(jchar), CHECK); 245 _singleArrayKlassObj = typeArrayKlass::create_klass(T_FLOAT, sizeof(jfloat), CHECK); 246 _doubleArrayKlassObj = typeArrayKlass::create_klass(T_DOUBLE, sizeof(jdouble), CHECK); 247 _byteArrayKlassObj = typeArrayKlass::create_klass(T_BYTE, sizeof(jbyte), CHECK); 248 _shortArrayKlassObj = typeArrayKlass::create_klass(T_SHORT, sizeof(jshort), CHECK); 249 _intArrayKlassObj = typeArrayKlass::create_klass(T_INT, sizeof(jint), CHECK); 250 _longArrayKlassObj = typeArrayKlass::create_klass(T_LONG, sizeof(jlong), CHECK); 251 252 _typeArrayKlassObjs[T_BOOLEAN] = _boolArrayKlassObj; 253 _typeArrayKlassObjs[T_CHAR] = _charArrayKlassObj; 254 _typeArrayKlassObjs[T_FLOAT] = _singleArrayKlassObj; 255 _typeArrayKlassObjs[T_DOUBLE] = _doubleArrayKlassObj; 256 _typeArrayKlassObjs[T_BYTE] = _byteArrayKlassObj; 257 _typeArrayKlassObjs[T_SHORT] = _shortArrayKlassObj; 258 _typeArrayKlassObjs[T_INT] = _intArrayKlassObj; 259 _typeArrayKlassObjs[T_LONG] = _longArrayKlassObj; 260 261 _methodKlassObj = methodKlass::create_klass(CHECK); 262 _constMethodKlassObj = constMethodKlass::create_klass(CHECK); 263 _methodDataKlassObj = methodDataKlass::create_klass(CHECK); 264 _constantPoolKlassObj = constantPoolKlass::create_klass(CHECK); 265 _constantPoolCacheKlassObj = constantPoolCacheKlass::create_klass(CHECK); 266 267 _compiledICHolderKlassObj = compiledICHolderKlass::create_klass(CHECK); 268 _systemObjArrayKlassObj = objArrayKlassKlass::cast(objArrayKlassKlassObj())->allocate_system_objArray_klass(CHECK); 269 270 _the_empty_byte_array = oopFactory::new_permanent_byteArray(0, CHECK); 271 _the_empty_short_array = oopFactory::new_permanent_shortArray(0, CHECK); 272 _the_empty_int_array = oopFactory::new_permanent_intArray(0, CHECK); 273 _the_empty_system_obj_array = oopFactory::new_system_objArray(0, CHECK); 274 275 _the_array_interfaces_array = oopFactory::new_system_objArray(2, CHECK); 276 _vm_exception = oopFactory::new_symbol("vm exception holder", CHECK); 277 } else { 278 FileMapInfo *mapinfo = FileMapInfo::current_info(); 279 char* buffer = mapinfo->region_base(CompactingPermGenGen::md); 280 void** vtbl_list = (void**)buffer; 281 init_self_patching_vtbl_list(vtbl_list, 282 CompactingPermGenGen::vtbl_list_size); 283 } 284 } 285 286 vmSymbols::initialize(CHECK); 287 288 SystemDictionary::initialize(CHECK); 289 290 klassOop ok = SystemDictionary::object_klass(); 291 292 if (UseSharedSpaces) { 293 // Verify shared interfaces array. 294 assert(_the_array_interfaces_array->obj_at(0) == 295 SystemDictionary::cloneable_klass(), "u3"); 296 assert(_the_array_interfaces_array->obj_at(1) == 297 SystemDictionary::serializable_klass(), "u3"); 298 299 // Verify element klass for system obj array klass 300 assert(objArrayKlass::cast(_systemObjArrayKlassObj)->element_klass() == ok, "u1"); 301 assert(objArrayKlass::cast(_systemObjArrayKlassObj)->bottom_klass() == ok, "u2"); 302 303 // Verify super class for the classes created above 304 assert(Klass::cast(boolArrayKlassObj() )->super() == ok, "u3"); 305 assert(Klass::cast(charArrayKlassObj() )->super() == ok, "u3"); 306 assert(Klass::cast(singleArrayKlassObj() )->super() == ok, "u3"); 307 assert(Klass::cast(doubleArrayKlassObj() )->super() == ok, "u3"); 308 assert(Klass::cast(byteArrayKlassObj() )->super() == ok, "u3"); 309 assert(Klass::cast(shortArrayKlassObj() )->super() == ok, "u3"); 310 assert(Klass::cast(intArrayKlassObj() )->super() == ok, "u3"); 311 assert(Klass::cast(longArrayKlassObj() )->super() == ok, "u3"); 312 assert(Klass::cast(constantPoolKlassObj() )->super() == ok, "u3"); 313 assert(Klass::cast(systemObjArrayKlassObj())->super() == ok, "u3"); 314 } else { 315 // Set up shared interfaces array. (Do this before supers are set up.) 316 _the_array_interfaces_array->obj_at_put(0, SystemDictionary::cloneable_klass()); 317 _the_array_interfaces_array->obj_at_put(1, SystemDictionary::serializable_klass()); 318 319 // Set element klass for system obj array klass 320 objArrayKlass::cast(_systemObjArrayKlassObj)->set_element_klass(ok); 321 objArrayKlass::cast(_systemObjArrayKlassObj)->set_bottom_klass(ok); 322 323 // Set super class for the classes created above 324 Klass::cast(boolArrayKlassObj() )->initialize_supers(ok, CHECK); 325 Klass::cast(charArrayKlassObj() )->initialize_supers(ok, CHECK); 326 Klass::cast(singleArrayKlassObj() )->initialize_supers(ok, CHECK); 327 Klass::cast(doubleArrayKlassObj() )->initialize_supers(ok, CHECK); 328 Klass::cast(byteArrayKlassObj() )->initialize_supers(ok, CHECK); 329 Klass::cast(shortArrayKlassObj() )->initialize_supers(ok, CHECK); 330 Klass::cast(intArrayKlassObj() )->initialize_supers(ok, CHECK); 331 Klass::cast(longArrayKlassObj() )->initialize_supers(ok, CHECK); 332 Klass::cast(constantPoolKlassObj() )->initialize_supers(ok, CHECK); 333 Klass::cast(systemObjArrayKlassObj())->initialize_supers(ok, CHECK); 334 Klass::cast(boolArrayKlassObj() )->set_super(ok); 335 Klass::cast(charArrayKlassObj() )->set_super(ok); 336 Klass::cast(singleArrayKlassObj() )->set_super(ok); 337 Klass::cast(doubleArrayKlassObj() )->set_super(ok); 338 Klass::cast(byteArrayKlassObj() )->set_super(ok); 339 Klass::cast(shortArrayKlassObj() )->set_super(ok); 340 Klass::cast(intArrayKlassObj() )->set_super(ok); 341 Klass::cast(longArrayKlassObj() )->set_super(ok); 342 Klass::cast(constantPoolKlassObj() )->set_super(ok); 343 Klass::cast(systemObjArrayKlassObj())->set_super(ok); 344 } 345 346 Klass::cast(boolArrayKlassObj() )->append_to_sibling_list(); 347 Klass::cast(charArrayKlassObj() )->append_to_sibling_list(); 348 Klass::cast(singleArrayKlassObj() )->append_to_sibling_list(); 349 Klass::cast(doubleArrayKlassObj() )->append_to_sibling_list(); 350 Klass::cast(byteArrayKlassObj() )->append_to_sibling_list(); 351 Klass::cast(shortArrayKlassObj() )->append_to_sibling_list(); 352 Klass::cast(intArrayKlassObj() )->append_to_sibling_list(); 353 Klass::cast(longArrayKlassObj() )->append_to_sibling_list(); 354 Klass::cast(constantPoolKlassObj() )->append_to_sibling_list(); 355 Klass::cast(systemObjArrayKlassObj())->append_to_sibling_list(); 356 } // end of core bootstrapping 357 358 // Initialize _objectArrayKlass after core bootstraping to make 359 // sure the super class is set up properly for _objectArrayKlass. 360 _objectArrayKlassObj = instanceKlass:: 361 cast(SystemDictionary::object_klass())->array_klass(1, CHECK); 362 // Add the class to the class hierarchy manually to make sure that 363 // its vtable is initialized after core bootstrapping is completed. 364 Klass::cast(_objectArrayKlassObj)->append_to_sibling_list(); 365 366 // Compute is_jdk version flags. 367 // Only 1.3 or later has the java.lang.Shutdown class. 368 // Only 1.4 or later has the java.lang.CharSequence interface. 369 // Only 1.5 or later has the java.lang.management.MemoryUsage class. 370 if (JDK_Version::is_partially_initialized()) { 371 uint8_t jdk_version; 372 klassOop k = SystemDictionary::resolve_or_null( 373 vmSymbolHandles::java_lang_management_MemoryUsage(), THREAD); 374 CLEAR_PENDING_EXCEPTION; // ignore exceptions 375 if (k == NULL) { 376 k = SystemDictionary::resolve_or_null( 377 vmSymbolHandles::java_lang_CharSequence(), THREAD); 378 CLEAR_PENDING_EXCEPTION; // ignore exceptions 379 if (k == NULL) { 380 k = SystemDictionary::resolve_or_null( 381 vmSymbolHandles::java_lang_Shutdown(), THREAD); 382 CLEAR_PENDING_EXCEPTION; // ignore exceptions 383 if (k == NULL) { 384 jdk_version = 2; 385 } else { 386 jdk_version = 3; 387 } 388 } else { 389 jdk_version = 4; 390 } 391 } else { 392 jdk_version = 5; 393 } 394 JDK_Version::fully_initialize(jdk_version); 395 } 396 397 #ifdef ASSERT 398 if (FullGCALot) { 399 // Allocate an array of dummy objects. 400 // We'd like these to be at the bottom of the old generation, 401 // so that when we free one and then collect, 402 // (almost) the whole heap moves 403 // and we find out if we actually update all the oops correctly. 404 // But we can't allocate directly in the old generation, 405 // so we allocate wherever, and hope that the first collection 406 // moves these objects to the bottom of the old generation. 407 // We can allocate directly in the permanent generation, so we do. 408 int size; 409 if (UseConcMarkSweepGC) { 410 warning("Using +FullGCALot with concurrent mark sweep gc " 411 "will not force all objects to relocate"); 412 size = FullGCALotDummies; 413 } else { 414 size = FullGCALotDummies * 2; 415 } 416 objArrayOop naked_array = oopFactory::new_system_objArray(size, CHECK); 417 objArrayHandle dummy_array(THREAD, naked_array); 418 int i = 0; 419 while (i < size) { 420 if (!UseConcMarkSweepGC) { 421 // Allocate dummy in old generation 422 oop dummy = instanceKlass::cast(SystemDictionary::object_klass())->allocate_instance(CHECK); 423 dummy_array->obj_at_put(i++, dummy); 424 } 425 // Allocate dummy in permanent generation 426 oop dummy = instanceKlass::cast(SystemDictionary::object_klass())->allocate_permanent_instance(CHECK); 427 dummy_array->obj_at_put(i++, dummy); 428 } 429 { 430 // Only modify the global variable inside the mutex. 431 // If we had a race to here, the other dummy_array instances 432 // and their elements just get dropped on the floor, which is fine. 433 MutexLocker ml(FullGCALot_lock); 434 if (_fullgc_alot_dummy_array == NULL) { 435 _fullgc_alot_dummy_array = dummy_array(); 436 } 437 } 438 assert(i == _fullgc_alot_dummy_array->length(), "just checking"); 439 } 440 #endif 441 } 442 443 444 static inline void add_vtable(void** list, int* n, Klass* o, int count) { 445 list[(*n)++] = *(void**)&o->vtbl_value(); 446 guarantee((*n) <= count, "vtable list too small."); 447 } 448 449 450 void Universe::init_self_patching_vtbl_list(void** list, int count) { 451 int n = 0; 452 { klassKlass o; add_vtable(list, &n, &o, count); } 453 { arrayKlassKlass o; add_vtable(list, &n, &o, count); } 454 { objArrayKlassKlass o; add_vtable(list, &n, &o, count); } 455 { instanceKlassKlass o; add_vtable(list, &n, &o, count); } 456 { instanceKlass o; add_vtable(list, &n, &o, count); } 457 { instanceRefKlass o; add_vtable(list, &n, &o, count); } 458 { typeArrayKlassKlass o; add_vtable(list, &n, &o, count); } 459 { symbolKlass o; add_vtable(list, &n, &o, count); } 460 { typeArrayKlass o; add_vtable(list, &n, &o, count); } 461 { methodKlass o; add_vtable(list, &n, &o, count); } 462 { constMethodKlass o; add_vtable(list, &n, &o, count); } 463 { constantPoolKlass o; add_vtable(list, &n, &o, count); } 464 { constantPoolCacheKlass o; add_vtable(list, &n, &o, count); } 465 { objArrayKlass o; add_vtable(list, &n, &o, count); } 466 { methodDataKlass o; add_vtable(list, &n, &o, count); } 467 { compiledICHolderKlass o; add_vtable(list, &n, &o, count); } 468 } 469 470 471 class FixupMirrorClosure: public ObjectClosure { 472 public: 473 virtual void do_object(oop obj) { 474 if (obj->is_klass()) { 475 EXCEPTION_MARK; 476 KlassHandle k(THREAD, klassOop(obj)); 477 // We will never reach the CATCH below since Exceptions::_throw will cause 478 // the VM to exit if an exception is thrown during initialization 479 java_lang_Class::create_mirror(k, CATCH); 480 // This call unconditionally creates a new mirror for k, 481 // and links in k's component_mirror field if k is an array. 482 // If k is an objArray, k's element type must already have 483 // a mirror. In other words, this closure must process 484 // the component type of an objArray k before it processes k. 485 // This works because the permgen iterator presents arrays 486 // and their component types in order of creation. 487 } 488 } 489 }; 490 491 void Universe::initialize_basic_type_mirrors(TRAPS) { 492 if (UseSharedSpaces) { 493 assert(_int_mirror != NULL, "already loaded"); 494 assert(_void_mirror == _mirrors[T_VOID], "consistently loaded"); 495 } else { 496 497 assert(_int_mirror==NULL, "basic type mirrors already initialized"); 498 _int_mirror = 499 java_lang_Class::create_basic_type_mirror("int", T_INT, CHECK); 500 _float_mirror = 501 java_lang_Class::create_basic_type_mirror("float", T_FLOAT, CHECK); 502 _double_mirror = 503 java_lang_Class::create_basic_type_mirror("double", T_DOUBLE, CHECK); 504 _byte_mirror = 505 java_lang_Class::create_basic_type_mirror("byte", T_BYTE, CHECK); 506 _bool_mirror = 507 java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK); 508 _char_mirror = 509 java_lang_Class::create_basic_type_mirror("char", T_CHAR, CHECK); 510 _long_mirror = 511 java_lang_Class::create_basic_type_mirror("long", T_LONG, CHECK); 512 _short_mirror = 513 java_lang_Class::create_basic_type_mirror("short", T_SHORT, CHECK); 514 _void_mirror = 515 java_lang_Class::create_basic_type_mirror("void", T_VOID, CHECK); 516 517 _mirrors[T_INT] = _int_mirror; 518 _mirrors[T_FLOAT] = _float_mirror; 519 _mirrors[T_DOUBLE] = _double_mirror; 520 _mirrors[T_BYTE] = _byte_mirror; 521 _mirrors[T_BOOLEAN] = _bool_mirror; 522 _mirrors[T_CHAR] = _char_mirror; 523 _mirrors[T_LONG] = _long_mirror; 524 _mirrors[T_SHORT] = _short_mirror; 525 _mirrors[T_VOID] = _void_mirror; 526 //_mirrors[T_OBJECT] = instanceKlass::cast(_object_klass)->java_mirror(); 527 //_mirrors[T_ARRAY] = instanceKlass::cast(_object_klass)->java_mirror(); 528 } 529 } 530 531 void Universe::fixup_mirrors(TRAPS) { 532 // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly, 533 // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply 534 // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note 535 // that the number of objects allocated at this point is very small. 536 assert(SystemDictionary::class_klass_loaded(), "java.lang.Class should be loaded"); 537 FixupMirrorClosure blk; 538 Universe::heap()->permanent_object_iterate(&blk); 539 } 540 541 542 static bool has_run_finalizers_on_exit = false; 543 544 void Universe::run_finalizers_on_exit() { 545 if (has_run_finalizers_on_exit) return; 546 has_run_finalizers_on_exit = true; 547 548 // Called on VM exit. This ought to be run in a separate thread. 549 if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit"); 550 { 551 PRESERVE_EXCEPTION_MARK; 552 KlassHandle finalizer_klass(THREAD, SystemDictionary::finalizer_klass()); 553 JavaValue result(T_VOID); 554 JavaCalls::call_static( 555 &result, 556 finalizer_klass, 557 vmSymbolHandles::run_finalizers_on_exit_name(), 558 vmSymbolHandles::void_method_signature(), 559 THREAD 560 ); 561 // Ignore any pending exceptions 562 CLEAR_PENDING_EXCEPTION; 563 } 564 } 565 566 567 // initialize_vtable could cause gc if 568 // 1) we specified true to initialize_vtable and 569 // 2) this ran after gc was enabled 570 // In case those ever change we use handles for oops 571 void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) { 572 // init vtable of k and all subclasses 573 Klass* ko = k_h()->klass_part(); 574 klassVtable* vt = ko->vtable(); 575 if (vt) vt->initialize_vtable(false, CHECK); 576 if (ko->oop_is_instance()) { 577 instanceKlass* ik = (instanceKlass*)ko; 578 for (KlassHandle s_h(THREAD, ik->subklass()); s_h() != NULL; s_h = (THREAD, s_h()->klass_part()->next_sibling())) { 579 reinitialize_vtable_of(s_h, CHECK); 580 } 581 } 582 } 583 584 585 void initialize_itable_for_klass(klassOop k, TRAPS) { 586 instanceKlass::cast(k)->itable()->initialize_itable(false, CHECK); 587 } 588 589 590 void Universe::reinitialize_itables(TRAPS) { 591 SystemDictionary::classes_do(initialize_itable_for_klass, CHECK); 592 593 } 594 595 596 bool Universe::on_page_boundary(void* addr) { 597 return ((uintptr_t) addr) % os::vm_page_size() == 0; 598 } 599 600 601 bool Universe::should_fill_in_stack_trace(Handle throwable) { 602 // never attempt to fill in the stack trace of preallocated errors that do not have 603 // backtrace. These errors are kept alive forever and may be "re-used" when all 604 // preallocated errors with backtrace have been consumed. Also need to avoid 605 // a potential loop which could happen if an out of memory occurs when attempting 606 // to allocate the backtrace. 607 return ((throwable() != Universe::_out_of_memory_error_java_heap) && 608 (throwable() != Universe::_out_of_memory_error_perm_gen) && 609 (throwable() != Universe::_out_of_memory_error_array_size) && 610 (throwable() != Universe::_out_of_memory_error_gc_overhead_limit)); 611 } 612 613 614 oop Universe::gen_out_of_memory_error(oop default_err) { 615 // generate an out of memory error: 616 // - if there is a preallocated error with backtrace available then return it wth 617 // a filled in stack trace. 618 // - if there are no preallocated errors with backtrace available then return 619 // an error without backtrace. 620 int next; 621 if (_preallocated_out_of_memory_error_avail_count > 0) { 622 next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count); 623 assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt"); 624 } else { 625 next = -1; 626 } 627 if (next < 0) { 628 // all preallocated errors have been used. 629 // return default 630 return default_err; 631 } else { 632 // get the error object at the slot and set set it to NULL so that the 633 // array isn't keeping it alive anymore. 634 oop exc = preallocated_out_of_memory_errors()->obj_at(next); 635 assert(exc != NULL, "slot has been used already"); 636 preallocated_out_of_memory_errors()->obj_at_put(next, NULL); 637 638 // use the message from the default error 639 oop msg = java_lang_Throwable::message(default_err); 640 assert(msg != NULL, "no message"); 641 java_lang_Throwable::set_message(exc, msg); 642 643 // populate the stack trace and return it. 644 java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(exc); 645 return exc; 646 } 647 } 648 649 static intptr_t non_oop_bits = 0; 650 651 void* Universe::non_oop_word() { 652 // Neither the high bits nor the low bits of this value is allowed 653 // to look like (respectively) the high or low bits of a real oop. 654 // 655 // High and low are CPU-specific notions, but low always includes 656 // the low-order bit. Since oops are always aligned at least mod 4, 657 // setting the low-order bit will ensure that the low half of the 658 // word will never look like that of a real oop. 659 // 660 // Using the OS-supplied non-memory-address word (usually 0 or -1) 661 // will take care of the high bits, however many there are. 662 663 if (non_oop_bits == 0) { 664 non_oop_bits = (intptr_t)os::non_memory_address_word() | 1; 665 } 666 667 return (void*)non_oop_bits; 668 } 669 670 jint universe_init() { 671 assert(!Universe::_fully_initialized, "called after initialize_vtables"); 672 guarantee(1 << LogHeapWordSize == sizeof(HeapWord), 673 "LogHeapWordSize is incorrect."); 674 guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?"); 675 guarantee(sizeof(oop) % sizeof(HeapWord) == 0, 676 "oop size is not not a multiple of HeapWord size"); 677 TraceTime timer("Genesis", TraceStartupTime); 678 GC_locker::lock(); // do not allow gc during bootstrapping 679 JavaClasses::compute_hard_coded_offsets(); 680 681 // Get map info from shared archive file. 682 if (DumpSharedSpaces) 683 UseSharedSpaces = false; 684 685 FileMapInfo* mapinfo = NULL; 686 if (UseSharedSpaces) { 687 mapinfo = NEW_C_HEAP_OBJ(FileMapInfo); 688 memset(mapinfo, 0, sizeof(FileMapInfo)); 689 690 // Open the shared archive file, read and validate the header. If 691 // initialization files, shared spaces [UseSharedSpaces] are 692 // disabled and the file is closed. 693 694 if (mapinfo->initialize()) { 695 FileMapInfo::set_current_info(mapinfo); 696 } else { 697 assert(!mapinfo->is_open() && !UseSharedSpaces, 698 "archive file not closed or shared spaces not disabled."); 699 } 700 } 701 702 jint status = Universe::initialize_heap(); 703 if (status != JNI_OK) { 704 return status; 705 } 706 707 // We have a heap so create the methodOop caches before 708 // CompactingPermGenGen::initialize_oops() tries to populate them. 709 Universe::_finalizer_register_cache = new LatestMethodOopCache(); 710 Universe::_loader_addClass_cache = new LatestMethodOopCache(); 711 Universe::_reflect_invoke_cache = new ActiveMethodOopsCache(); 712 713 if (UseSharedSpaces) { 714 715 // Read the data structures supporting the shared spaces (shared 716 // system dictionary, symbol table, etc.). After that, access to 717 // the file (other than the mapped regions) is no longer needed, and 718 // the file is closed. Closing the file does not affect the 719 // currently mapped regions. 720 721 CompactingPermGenGen::initialize_oops(); 722 mapinfo->close(); 723 724 } else { 725 SymbolTable::create_table(); 726 StringTable::create_table(); 727 ClassLoader::create_package_info_table(); 728 } 729 730 return JNI_OK; 731 } 732 733 // Choose the heap base address and oop encoding mode 734 // when compressed oops are used: 735 // Unscaled - Use 32-bits oops without encoding when 736 // NarrowOopHeapBaseMin + heap_size < 4Gb 737 // ZeroBased - Use zero based compressed oops with encoding when 738 // NarrowOopHeapBaseMin + heap_size < 32Gb 739 // HeapBased - Use compressed oops with heap base + encoding. 740 741 // 4Gb 742 static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1); 743 // 32Gb 744 static const uint64_t OopEncodingHeapMax = NarrowOopHeapMax << LogMinObjAlignmentInBytes; 745 746 char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) { 747 #ifdef _LP64 748 if (UseCompressedOops) { 749 assert(mode == UnscaledNarrowOop || 750 mode == ZeroBasedNarrowOop || 751 mode == HeapBasedNarrowOop, "mode is invalid"); 752 // Return specified base for the first request. 753 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) { 754 return (char*)HeapBaseMinAddress; 755 } 756 const size_t total_size = heap_size + HeapBaseMinAddress; 757 if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) { 758 if (total_size <= NarrowOopHeapMax && (mode == UnscaledNarrowOop) && 759 (Universe::narrow_oop_shift() == 0)) { 760 // Use 32-bits oops without encoding and 761 // place heap's top on the 4Gb boundary 762 return (char*)(NarrowOopHeapMax - heap_size); 763 } else { 764 // Can't reserve with NarrowOopShift == 0 765 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 766 if (mode == UnscaledNarrowOop || 767 mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) { 768 // Use zero based compressed oops with encoding and 769 // place heap's top on the 32Gb boundary in case 770 // total_size > 4Gb or failed to reserve below 4Gb. 771 return (char*)(OopEncodingHeapMax - heap_size); 772 } 773 } 774 } else { 775 // Can't reserve below 32Gb. 776 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 777 } 778 } 779 #endif 780 return NULL; // also return NULL (don't care) for 32-bit VM 781 } 782 783 jint Universe::initialize_heap() { 784 785 if (UseParallelGC) { 786 #ifndef SERIALGC 787 Universe::_collectedHeap = new ParallelScavengeHeap(); 788 #else // SERIALGC 789 fatal("UseParallelGC not supported in java kernel vm."); 790 #endif // SERIALGC 791 792 } else if (UseG1GC) { 793 #ifndef SERIALGC 794 G1CollectorPolicy* g1p = new G1CollectorPolicy_BestRegionsFirst(); 795 G1CollectedHeap* g1h = new G1CollectedHeap(g1p); 796 Universe::_collectedHeap = g1h; 797 #else // SERIALGC 798 fatal("UseG1GC not supported in java kernel vm."); 799 #endif // SERIALGC 800 801 } else { 802 GenCollectorPolicy *gc_policy; 803 804 if (UseSerialGC) { 805 gc_policy = new MarkSweepPolicy(); 806 } else if (UseConcMarkSweepGC) { 807 #ifndef SERIALGC 808 if (UseAdaptiveSizePolicy) { 809 gc_policy = new ASConcurrentMarkSweepPolicy(); 810 } else { 811 gc_policy = new ConcurrentMarkSweepPolicy(); 812 } 813 #else // SERIALGC 814 fatal("UseConcMarkSweepGC not supported in java kernel vm."); 815 #endif // SERIALGC 816 } else { // default old generation 817 gc_policy = new MarkSweepPolicy(); 818 } 819 820 Universe::_collectedHeap = new GenCollectedHeap(gc_policy); 821 } 822 823 jint status = Universe::heap()->initialize(); 824 if (status != JNI_OK) { 825 return status; 826 } 827 828 #ifdef _LP64 829 if (UseCompressedOops) { 830 // Subtract a page because something can get allocated at heap base. 831 // This also makes implicit null checking work, because the 832 // memory+1 page below heap_base needs to cause a signal. 833 // See needs_explicit_null_check. 834 // Only set the heap base for compressed oops because it indicates 835 // compressed oops for pstack code. 836 if (PrintCompressedOopsMode) { 837 tty->cr(); 838 tty->print("heap address: "PTR_FORMAT, Universe::heap()->base()); 839 } 840 if ((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) { 841 // Can't reserve heap below 32Gb. 842 Universe::set_narrow_oop_base(Universe::heap()->base() - os::vm_page_size()); 843 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 844 if (PrintCompressedOopsMode) { 845 tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base()); 846 } 847 } else { 848 Universe::set_narrow_oop_base(0); 849 if (PrintCompressedOopsMode) { 850 tty->print(", zero based Compressed Oops"); 851 } 852 #ifdef _WIN64 853 if (!Universe::narrow_oop_use_implicit_null_checks()) { 854 // Don't need guard page for implicit checks in indexed addressing 855 // mode with zero based Compressed Oops. 856 Universe::set_narrow_oop_use_implicit_null_checks(true); 857 } 858 #endif // _WIN64 859 if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) { 860 // Can't reserve heap below 4Gb. 861 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 862 } else { 863 Universe::set_narrow_oop_shift(0); 864 if (PrintCompressedOopsMode) { 865 tty->print(", 32-bits Oops"); 866 } 867 } 868 } 869 if (PrintCompressedOopsMode) { 870 tty->cr(); 871 tty->cr(); 872 } 873 } 874 assert(Universe::narrow_oop_base() == (Universe::heap()->base() - os::vm_page_size()) || 875 Universe::narrow_oop_base() == NULL, "invalid value"); 876 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes || 877 Universe::narrow_oop_shift() == 0, "invalid value"); 878 #endif 879 880 // We will never reach the CATCH below since Exceptions::_throw will cause 881 // the VM to exit if an exception is thrown during initialization 882 883 if (UseTLAB) { 884 assert(Universe::heap()->supports_tlab_allocation(), 885 "Should support thread-local allocation buffers"); 886 ThreadLocalAllocBuffer::startup_initialization(); 887 } 888 return JNI_OK; 889 } 890 891 // It's the caller's repsonsibility to ensure glitch-freedom 892 // (if required). 893 void Universe::update_heap_info_at_gc() { 894 _heap_capacity_at_last_gc = heap()->capacity(); 895 _heap_used_at_last_gc = heap()->used(); 896 } 897 898 899 900 void universe2_init() { 901 EXCEPTION_MARK; 902 Universe::genesis(CATCH); 903 // Although we'd like to verify here that the state of the heap 904 // is good, we can't because the main thread has not yet added 905 // itself to the threads list (so, using current interfaces 906 // we can't "fill" its TLAB), unless TLABs are disabled. 907 if (VerifyBeforeGC && !UseTLAB && 908 Universe::heap()->total_collections() >= VerifyGCStartAt) { 909 Universe::heap()->prepare_for_verify(); 910 Universe::verify(); // make sure we're starting with a clean slate 911 } 912 } 913 914 915 // This function is defined in JVM.cpp 916 extern void initialize_converter_functions(); 917 918 bool universe_post_init() { 919 Universe::_fully_initialized = true; 920 EXCEPTION_MARK; 921 { ResourceMark rm; 922 Interpreter::initialize(); // needed for interpreter entry points 923 if (!UseSharedSpaces) { 924 KlassHandle ok_h(THREAD, SystemDictionary::object_klass()); 925 Universe::reinitialize_vtable_of(ok_h, CHECK_false); 926 Universe::reinitialize_itables(CHECK_false); 927 } 928 } 929 930 klassOop k; 931 instanceKlassHandle k_h; 932 if (!UseSharedSpaces) { 933 // Setup preallocated empty java.lang.Class array 934 Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_false); 935 // Setup preallocated OutOfMemoryError errors 936 k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_OutOfMemoryError(), true, CHECK_false); 937 k_h = instanceKlassHandle(THREAD, k); 938 Universe::_out_of_memory_error_java_heap = k_h->allocate_permanent_instance(CHECK_false); 939 Universe::_out_of_memory_error_perm_gen = k_h->allocate_permanent_instance(CHECK_false); 940 Universe::_out_of_memory_error_array_size = k_h->allocate_permanent_instance(CHECK_false); 941 Universe::_out_of_memory_error_gc_overhead_limit = 942 k_h->allocate_permanent_instance(CHECK_false); 943 944 // Setup preallocated NullPointerException 945 // (this is currently used for a cheap & dirty solution in compiler exception handling) 946 k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_NullPointerException(), true, CHECK_false); 947 Universe::_null_ptr_exception_instance = instanceKlass::cast(k)->allocate_permanent_instance(CHECK_false); 948 // Setup preallocated ArithmeticException 949 // (this is currently used for a cheap & dirty solution in compiler exception handling) 950 k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_ArithmeticException(), true, CHECK_false); 951 Universe::_arithmetic_exception_instance = instanceKlass::cast(k)->allocate_permanent_instance(CHECK_false); 952 // Virtual Machine Error for when we get into a situation we can't resolve 953 k = SystemDictionary::resolve_or_fail( 954 vmSymbolHandles::java_lang_VirtualMachineError(), true, CHECK_false); 955 bool linked = instanceKlass::cast(k)->link_class_or_fail(CHECK_false); 956 if (!linked) { 957 tty->print_cr("Unable to link/verify VirtualMachineError class"); 958 return false; // initialization failed 959 } 960 Universe::_virtual_machine_error_instance = 961 instanceKlass::cast(k)->allocate_permanent_instance(CHECK_false); 962 } 963 if (!DumpSharedSpaces) { 964 // These are the only Java fields that are currently set during shared space dumping. 965 // We prefer to not handle this generally, so we always reinitialize these detail messages. 966 Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false); 967 java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg()); 968 969 msg = java_lang_String::create_from_str("PermGen space", CHECK_false); 970 java_lang_Throwable::set_message(Universe::_out_of_memory_error_perm_gen, msg()); 971 972 msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false); 973 java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg()); 974 975 msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false); 976 java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg()); 977 978 msg = java_lang_String::create_from_str("/ by zero", CHECK_false); 979 java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg()); 980 981 // Setup the array of errors that have preallocated backtrace 982 k = Universe::_out_of_memory_error_java_heap->klass(); 983 assert(k->klass_part()->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error"); 984 k_h = instanceKlassHandle(THREAD, k); 985 986 int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0; 987 Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false); 988 for (int i=0; i<len; i++) { 989 oop err = k_h->allocate_permanent_instance(CHECK_false); 990 Handle err_h = Handle(THREAD, err); 991 java_lang_Throwable::allocate_backtrace(err_h, CHECK_false); 992 Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h()); 993 } 994 Universe::_preallocated_out_of_memory_error_avail_count = (jint)len; 995 } 996 997 998 // Setup static method for registering finalizers 999 // The finalizer klass must be linked before looking up the method, in 1000 // case it needs to get rewritten. 1001 instanceKlass::cast(SystemDictionary::finalizer_klass())->link_class(CHECK_false); 1002 methodOop m = instanceKlass::cast(SystemDictionary::finalizer_klass())->find_method( 1003 vmSymbols::register_method_name(), 1004 vmSymbols::register_method_signature()); 1005 if (m == NULL || !m->is_static()) { 1006 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(), 1007 "java.lang.ref.Finalizer.register", false); 1008 } 1009 Universe::_finalizer_register_cache->init( 1010 SystemDictionary::finalizer_klass(), m, CHECK_false); 1011 1012 // Resolve on first use and initialize class. 1013 // Note: No race-condition here, since a resolve will always return the same result 1014 1015 // Setup method for security checks 1016 k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_reflect_Method(), true, CHECK_false); 1017 k_h = instanceKlassHandle(THREAD, k); 1018 k_h->link_class(CHECK_false); 1019 m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_array_object_object_signature()); 1020 if (m == NULL || m->is_static()) { 1021 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(), 1022 "java.lang.reflect.Method.invoke", false); 1023 } 1024 Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false); 1025 1026 // Setup method for registering loaded classes in class loader vector 1027 instanceKlass::cast(SystemDictionary::classloader_klass())->link_class(CHECK_false); 1028 m = instanceKlass::cast(SystemDictionary::classloader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature()); 1029 if (m == NULL || m->is_static()) { 1030 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(), 1031 "java.lang.ClassLoader.addClass", false); 1032 } 1033 Universe::_loader_addClass_cache->init( 1034 SystemDictionary::classloader_klass(), m, CHECK_false); 1035 1036 // The folowing is initializing converter functions for serialization in 1037 // JVM.cpp. If we clean up the StrictMath code above we may want to find 1038 // a better solution for this as well. 1039 initialize_converter_functions(); 1040 1041 // This needs to be done before the first scavenge/gc, since 1042 // it's an input to soft ref clearing policy. 1043 { 1044 MutexLocker x(Heap_lock); 1045 Universe::update_heap_info_at_gc(); 1046 } 1047 1048 // ("weak") refs processing infrastructure initialization 1049 Universe::heap()->post_initialize(); 1050 1051 GC_locker::unlock(); // allow gc after bootstrapping 1052 1053 MemoryService::set_universe_heap(Universe::_collectedHeap); 1054 return true; 1055 } 1056 1057 1058 void Universe::compute_base_vtable_size() { 1059 _base_vtable_size = ClassLoader::compute_Object_vtable(); 1060 } 1061 1062 1063 // %%% The Universe::flush_foo methods belong in CodeCache. 1064 1065 // Flushes compiled methods dependent on dependee. 1066 void Universe::flush_dependents_on(instanceKlassHandle dependee) { 1067 assert_lock_strong(Compile_lock); 1068 1069 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; 1070 1071 // CodeCache can only be updated by a thread_in_VM and they will all be 1072 // stopped dring the safepoint so CodeCache will be safe to update without 1073 // holding the CodeCache_lock. 1074 1075 DepChange changes(dependee); 1076 1077 // Compute the dependent nmethods 1078 if (CodeCache::mark_for_deoptimization(changes) > 0) { 1079 // At least one nmethod has been marked for deoptimization 1080 VM_Deoptimize op; 1081 VMThread::execute(&op); 1082 } 1083 } 1084 1085 #ifdef HOTSWAP 1086 // Flushes compiled methods dependent on dependee in the evolutionary sense 1087 void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) { 1088 // --- Compile_lock is not held. However we are at a safepoint. 1089 assert_locked_or_safepoint(Compile_lock); 1090 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return; 1091 1092 // CodeCache can only be updated by a thread_in_VM and they will all be 1093 // stopped dring the safepoint so CodeCache will be safe to update without 1094 // holding the CodeCache_lock. 1095 1096 // Compute the dependent nmethods 1097 if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) { 1098 // At least one nmethod has been marked for deoptimization 1099 1100 // All this already happens inside a VM_Operation, so we'll do all the work here. 1101 // Stuff copied from VM_Deoptimize and modified slightly. 1102 1103 // We do not want any GCs to happen while we are in the middle of this VM operation 1104 ResourceMark rm; 1105 DeoptimizationMarker dm; 1106 1107 // Deoptimize all activations depending on marked nmethods 1108 Deoptimization::deoptimize_dependents(); 1109 1110 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) 1111 CodeCache::make_marked_nmethods_not_entrant(); 1112 } 1113 } 1114 #endif // HOTSWAP 1115 1116 1117 // Flushes compiled methods dependent on dependee 1118 void Universe::flush_dependents_on_method(methodHandle m_h) { 1119 // --- Compile_lock is not held. However we are at a safepoint. 1120 assert_locked_or_safepoint(Compile_lock); 1121 1122 // CodeCache can only be updated by a thread_in_VM and they will all be 1123 // stopped dring the safepoint so CodeCache will be safe to update without 1124 // holding the CodeCache_lock. 1125 1126 // Compute the dependent nmethods 1127 if (CodeCache::mark_for_deoptimization(m_h()) > 0) { 1128 // At least one nmethod has been marked for deoptimization 1129 1130 // All this already happens inside a VM_Operation, so we'll do all the work here. 1131 // Stuff copied from VM_Deoptimize and modified slightly. 1132 1133 // We do not want any GCs to happen while we are in the middle of this VM operation 1134 ResourceMark rm; 1135 DeoptimizationMarker dm; 1136 1137 // Deoptimize all activations depending on marked nmethods 1138 Deoptimization::deoptimize_dependents(); 1139 1140 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) 1141 CodeCache::make_marked_nmethods_not_entrant(); 1142 } 1143 } 1144 1145 void Universe::print() { print_on(gclog_or_tty); } 1146 1147 void Universe::print_on(outputStream* st) { 1148 st->print_cr("Heap"); 1149 heap()->print_on(st); 1150 } 1151 1152 void Universe::print_heap_at_SIGBREAK() { 1153 if (PrintHeapAtSIGBREAK) { 1154 MutexLocker hl(Heap_lock); 1155 print_on(tty); 1156 tty->cr(); 1157 tty->flush(); 1158 } 1159 } 1160 1161 void Universe::print_heap_before_gc(outputStream* st) { 1162 st->print_cr("{Heap before GC invocations=%u (full %u):", 1163 heap()->total_collections(), 1164 heap()->total_full_collections()); 1165 heap()->print_on(st); 1166 } 1167 1168 void Universe::print_heap_after_gc(outputStream* st) { 1169 st->print_cr("Heap after GC invocations=%u (full %u):", 1170 heap()->total_collections(), 1171 heap()->total_full_collections()); 1172 heap()->print_on(st); 1173 st->print_cr("}"); 1174 } 1175 1176 void Universe::verify(bool allow_dirty, bool silent, bool option) { 1177 if (SharedSkipVerify) { 1178 return; 1179 } 1180 1181 // The use of _verify_in_progress is a temporary work around for 1182 // 6320749. Don't bother with a creating a class to set and clear 1183 // it since it is only used in this method and the control flow is 1184 // straight forward. 1185 _verify_in_progress = true; 1186 1187 COMPILER2_PRESENT( 1188 assert(!DerivedPointerTable::is_active(), 1189 "DPT should not be active during verification " 1190 "(of thread stacks below)"); 1191 ) 1192 1193 ResourceMark rm; 1194 HandleMark hm; // Handles created during verification can be zapped 1195 _verify_count++; 1196 1197 if (!silent) gclog_or_tty->print("[Verifying "); 1198 if (!silent) gclog_or_tty->print("threads "); 1199 Threads::verify(); 1200 heap()->verify(allow_dirty, silent, option); 1201 1202 if (!silent) gclog_or_tty->print("syms "); 1203 SymbolTable::verify(); 1204 if (!silent) gclog_or_tty->print("strs "); 1205 StringTable::verify(); 1206 { 1207 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1208 if (!silent) gclog_or_tty->print("zone "); 1209 CodeCache::verify(); 1210 } 1211 if (!silent) gclog_or_tty->print("dict "); 1212 SystemDictionary::verify(); 1213 if (!silent) gclog_or_tty->print("hand "); 1214 JNIHandles::verify(); 1215 if (!silent) gclog_or_tty->print("C-heap "); 1216 os::check_heap(); 1217 if (!silent) gclog_or_tty->print_cr("]"); 1218 1219 _verify_in_progress = false; 1220 } 1221 1222 // Oop verification (see MacroAssembler::verify_oop) 1223 1224 static uintptr_t _verify_oop_data[2] = {0, (uintptr_t)-1}; 1225 static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1}; 1226 1227 1228 static void calculate_verify_data(uintptr_t verify_data[2], 1229 HeapWord* low_boundary, 1230 HeapWord* high_boundary) { 1231 assert(low_boundary < high_boundary, "bad interval"); 1232 1233 // decide which low-order bits we require to be clear: 1234 size_t alignSize = MinObjAlignmentInBytes; 1235 size_t min_object_size = oopDesc::header_size(); 1236 1237 // make an inclusive limit: 1238 uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize; 1239 uintptr_t min = (uintptr_t)low_boundary; 1240 assert(min < max, "bad interval"); 1241 uintptr_t diff = max ^ min; 1242 1243 // throw away enough low-order bits to make the diff vanish 1244 uintptr_t mask = (uintptr_t)(-1); 1245 while ((mask & diff) != 0) 1246 mask <<= 1; 1247 uintptr_t bits = (min & mask); 1248 assert(bits == (max & mask), "correct mask"); 1249 // check an intermediate value between min and max, just to make sure: 1250 assert(bits == ((min + (max-min)/2) & mask), "correct mask"); 1251 1252 // require address alignment, too: 1253 mask |= (alignSize - 1); 1254 1255 if (!(verify_data[0] == 0 && verify_data[1] == (uintptr_t)-1)) { 1256 assert(verify_data[0] == mask && verify_data[1] == bits, "mask stability"); 1257 } 1258 verify_data[0] = mask; 1259 verify_data[1] = bits; 1260 } 1261 1262 1263 // Oop verification (see MacroAssembler::verify_oop) 1264 #ifndef PRODUCT 1265 1266 uintptr_t Universe::verify_oop_mask() { 1267 MemRegion m = heap()->reserved_region(); 1268 calculate_verify_data(_verify_oop_data, 1269 m.start(), 1270 m.end()); 1271 return _verify_oop_data[0]; 1272 } 1273 1274 1275 1276 uintptr_t Universe::verify_oop_bits() { 1277 verify_oop_mask(); 1278 return _verify_oop_data[1]; 1279 } 1280 1281 1282 uintptr_t Universe::verify_klass_mask() { 1283 /* $$$ 1284 // A klass can never live in the new space. Since the new and old 1285 // spaces can change size, we must settle for bounds-checking against 1286 // the bottom of the world, plus the smallest possible new and old 1287 // space sizes that may arise during execution. 1288 size_t min_new_size = Universe::new_size(); // in bytes 1289 size_t min_old_size = Universe::old_size(); // in bytes 1290 calculate_verify_data(_verify_klass_data, 1291 (HeapWord*)((uintptr_t)_new_gen->low_boundary + min_new_size + min_old_size), 1292 _perm_gen->high_boundary); 1293 */ 1294 // Why doesn't the above just say that klass's always live in the perm 1295 // gen? I'll see if that seems to work... 1296 MemRegion permanent_reserved; 1297 switch (Universe::heap()->kind()) { 1298 default: 1299 // ???: What if a CollectedHeap doesn't have a permanent generation? 1300 ShouldNotReachHere(); 1301 break; 1302 case CollectedHeap::GenCollectedHeap: 1303 case CollectedHeap::G1CollectedHeap: { 1304 SharedHeap* sh = (SharedHeap*) Universe::heap(); 1305 permanent_reserved = sh->perm_gen()->reserved(); 1306 break; 1307 } 1308 #ifndef SERIALGC 1309 case CollectedHeap::ParallelScavengeHeap: { 1310 ParallelScavengeHeap* psh = (ParallelScavengeHeap*) Universe::heap(); 1311 permanent_reserved = psh->perm_gen()->reserved(); 1312 break; 1313 } 1314 #endif // SERIALGC 1315 } 1316 calculate_verify_data(_verify_klass_data, 1317 permanent_reserved.start(), 1318 permanent_reserved.end()); 1319 1320 return _verify_klass_data[0]; 1321 } 1322 1323 1324 1325 uintptr_t Universe::verify_klass_bits() { 1326 verify_klass_mask(); 1327 return _verify_klass_data[1]; 1328 } 1329 1330 1331 uintptr_t Universe::verify_mark_mask() { 1332 return markOopDesc::lock_mask_in_place; 1333 } 1334 1335 1336 1337 uintptr_t Universe::verify_mark_bits() { 1338 intptr_t mask = verify_mark_mask(); 1339 intptr_t bits = (intptr_t)markOopDesc::prototype(); 1340 assert((bits & ~mask) == 0, "no stray header bits"); 1341 return bits; 1342 } 1343 #endif // PRODUCT 1344 1345 1346 void Universe::compute_verify_oop_data() { 1347 verify_oop_mask(); 1348 verify_oop_bits(); 1349 verify_mark_mask(); 1350 verify_mark_bits(); 1351 verify_klass_mask(); 1352 verify_klass_bits(); 1353 } 1354 1355 1356 void CommonMethodOopCache::init(klassOop k, methodOop m, TRAPS) { 1357 if (!UseSharedSpaces) { 1358 _klass = k; 1359 } 1360 #ifndef PRODUCT 1361 else { 1362 // sharing initilization should have already set up _klass 1363 assert(_klass != NULL, "just checking"); 1364 } 1365 #endif 1366 1367 _method_idnum = m->method_idnum(); 1368 assert(_method_idnum >= 0, "sanity check"); 1369 } 1370 1371 1372 ActiveMethodOopsCache::~ActiveMethodOopsCache() { 1373 if (_prev_methods != NULL) { 1374 for (int i = _prev_methods->length() - 1; i >= 0; i--) { 1375 jweak method_ref = _prev_methods->at(i); 1376 if (method_ref != NULL) { 1377 JNIHandles::destroy_weak_global(method_ref); 1378 } 1379 } 1380 delete _prev_methods; 1381 _prev_methods = NULL; 1382 } 1383 } 1384 1385 1386 void ActiveMethodOopsCache::add_previous_version(const methodOop method) { 1387 assert(Thread::current()->is_VM_thread(), 1388 "only VMThread can add previous versions"); 1389 1390 if (_prev_methods == NULL) { 1391 // This is the first previous version so make some space. 1392 // Start with 2 elements under the assumption that the class 1393 // won't be redefined much. 1394 _prev_methods = new (ResourceObj::C_HEAP) GrowableArray<jweak>(2, true); 1395 } 1396 1397 // RC_TRACE macro has an embedded ResourceMark 1398 RC_TRACE(0x00000100, 1399 ("add: %s(%s): adding prev version ref for cached method @%d", 1400 method->name()->as_C_string(), method->signature()->as_C_string(), 1401 _prev_methods->length())); 1402 1403 methodHandle method_h(method); 1404 jweak method_ref = JNIHandles::make_weak_global(method_h); 1405 _prev_methods->append(method_ref); 1406 1407 // Using weak references allows previous versions of the cached 1408 // method to be GC'ed when they are no longer needed. Since the 1409 // caller is the VMThread and we are at a safepoint, this is a good 1410 // time to clear out unused weak references. 1411 1412 for (int i = _prev_methods->length() - 1; i >= 0; i--) { 1413 jweak method_ref = _prev_methods->at(i); 1414 assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); 1415 if (method_ref == NULL) { 1416 _prev_methods->remove_at(i); 1417 // Since we are traversing the array backwards, we don't have to 1418 // do anything special with the index. 1419 continue; // robustness 1420 } 1421 1422 methodOop m = (methodOop)JNIHandles::resolve(method_ref); 1423 if (m == NULL) { 1424 // this method entry has been GC'ed so remove it 1425 JNIHandles::destroy_weak_global(method_ref); 1426 _prev_methods->remove_at(i); 1427 } else { 1428 // RC_TRACE macro has an embedded ResourceMark 1429 RC_TRACE(0x00000400, ("add: %s(%s): previous cached method @%d is alive", 1430 m->name()->as_C_string(), m->signature()->as_C_string(), i)); 1431 } 1432 } 1433 } // end add_previous_version() 1434 1435 1436 bool ActiveMethodOopsCache::is_same_method(const methodOop method) const { 1437 instanceKlass* ik = instanceKlass::cast(klass()); 1438 methodOop check_method = ik->method_with_idnum(method_idnum()); 1439 assert(check_method != NULL, "sanity check"); 1440 if (check_method == method) { 1441 // done with the easy case 1442 return true; 1443 } 1444 1445 if (_prev_methods != NULL) { 1446 // The cached method has been redefined at least once so search 1447 // the previous versions for a match. 1448 for (int i = 0; i < _prev_methods->length(); i++) { 1449 jweak method_ref = _prev_methods->at(i); 1450 assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); 1451 if (method_ref == NULL) { 1452 continue; // robustness 1453 } 1454 1455 check_method = (methodOop)JNIHandles::resolve(method_ref); 1456 if (check_method == method) { 1457 // a previous version matches 1458 return true; 1459 } 1460 } 1461 } 1462 1463 // either no previous versions or no previous version matched 1464 return false; 1465 } 1466 1467 1468 methodOop LatestMethodOopCache::get_methodOop() { 1469 instanceKlass* ik = instanceKlass::cast(klass()); 1470 methodOop m = ik->method_with_idnum(method_idnum()); 1471 assert(m != NULL, "sanity check"); 1472 return m; 1473 } 1474 1475 1476 #ifdef ASSERT 1477 // Release dummy object(s) at bottom of heap 1478 bool Universe::release_fullgc_alot_dummy() { 1479 MutexLocker ml(FullGCALot_lock); 1480 if (_fullgc_alot_dummy_array != NULL) { 1481 if (_fullgc_alot_dummy_next >= _fullgc_alot_dummy_array->length()) { 1482 // No more dummies to release, release entire array instead 1483 _fullgc_alot_dummy_array = NULL; 1484 return false; 1485 } 1486 if (!UseConcMarkSweepGC) { 1487 // Release dummy at bottom of old generation 1488 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL); 1489 } 1490 // Release dummy at bottom of permanent generation 1491 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL); 1492 } 1493 return true; 1494 } 1495 1496 #endif // ASSERT