1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/verifier.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "compiler/compileBroker.hpp" 31 #include "gc_implementation/shared/markSweep.inline.hpp" 32 #include "gc_interface/collectedHeap.inline.hpp" 33 #include "interpreter/oopMapCache.hpp" 34 #include "interpreter/rewriter.hpp" 35 #include "jvmtifiles/jvmti.h" 36 #include "memory/genOopClosures.inline.hpp" 37 #include "memory/oopFactory.hpp" 38 #include "memory/permGen.hpp" 39 #include "oops/fieldStreams.hpp" 40 #include "oops/instanceKlass.hpp" 41 #include "oops/instanceMirrorKlass.hpp" 42 #include "oops/instanceOop.hpp" 43 #include "oops/methodOop.hpp" 44 #include "oops/objArrayKlassKlass.hpp" 45 #include "oops/oop.inline.hpp" 46 #include "oops/symbol.hpp" 47 #include "prims/jvmtiExport.hpp" 48 #include "prims/jvmtiRedefineClassesTrace.hpp" 49 #include "runtime/fieldDescriptor.hpp" 50 #include "runtime/handles.inline.hpp" 51 #include "runtime/javaCalls.hpp" 52 #include "runtime/mutexLocker.hpp" 53 #include "services/threadService.hpp" 54 #include "utilities/dtrace.hpp" 55 #ifdef TARGET_OS_FAMILY_linux 56 # include "thread_linux.inline.hpp" 57 #endif 58 #ifdef TARGET_OS_FAMILY_solaris 59 # include "thread_solaris.inline.hpp" 60 #endif 61 #ifdef TARGET_OS_FAMILY_windows 62 # include "thread_windows.inline.hpp" 63 #endif 64 #ifndef SERIALGC 65 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 66 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 67 #include "gc_implementation/g1/g1RemSet.inline.hpp" 68 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 69 #include "gc_implementation/parNew/parOopClosures.inline.hpp" 70 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" 71 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" 72 #include "oops/oop.pcgc.inline.hpp" 73 #endif 74 #ifdef COMPILER1 75 #include "c1/c1_Compiler.hpp" 76 #endif 77 78 #ifdef DTRACE_ENABLED 79 80 HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required, 81 char*, intptr_t, oop, intptr_t); 82 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive, 83 char*, intptr_t, oop, intptr_t, int); 84 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent, 85 char*, intptr_t, oop, intptr_t, int); 86 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous, 87 char*, intptr_t, oop, intptr_t, int); 88 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed, 89 char*, intptr_t, oop, intptr_t, int); 90 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit, 91 char*, intptr_t, oop, intptr_t, int); 92 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error, 93 char*, intptr_t, oop, intptr_t, int); 94 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end, 95 char*, intptr_t, oop, intptr_t, int); 96 97 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type) \ 98 { \ 99 char* data = NULL; \ 100 int len = 0; \ 101 Symbol* name = (clss)->name(); \ 102 if (name != NULL) { \ 103 data = (char*)name->bytes(); \ 104 len = name->utf8_length(); \ 105 } \ 106 HS_DTRACE_PROBE4(hotspot, class__initialization__##type, \ 107 data, len, (clss)->class_loader(), thread_type); \ 108 } 109 110 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \ 111 { \ 112 char* data = NULL; \ 113 int len = 0; \ 114 Symbol* name = (clss)->name(); \ 115 if (name != NULL) { \ 116 data = (char*)name->bytes(); \ 117 len = name->utf8_length(); \ 118 } \ 119 HS_DTRACE_PROBE5(hotspot, class__initialization__##type, \ 120 data, len, (clss)->class_loader(), thread_type, wait); \ 121 } 122 123 #else // ndef DTRACE_ENABLED 124 125 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type) 126 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) 127 128 #endif // ndef DTRACE_ENABLED 129 130 bool instanceKlass::should_be_initialized() const { 131 return !is_initialized(); 132 } 133 134 klassVtable* instanceKlass::vtable() const { 135 return new klassVtable(as_klassOop(), start_of_vtable(), vtable_length() / vtableEntry::size()); 136 } 137 138 klassItable* instanceKlass::itable() const { 139 return new klassItable(as_klassOop()); 140 } 141 142 void instanceKlass::eager_initialize(Thread *thread) { 143 if (!EagerInitialization) return; 144 145 if (this->is_not_initialized()) { 146 // abort if the the class has a class initializer 147 if (this->class_initializer() != NULL) return; 148 149 // abort if it is java.lang.Object (initialization is handled in genesis) 150 klassOop super = this->super(); 151 if (super == NULL) return; 152 153 // abort if the super class should be initialized 154 if (!instanceKlass::cast(super)->is_initialized()) return; 155 156 // call body to expose the this pointer 157 instanceKlassHandle this_oop(thread, this->as_klassOop()); 158 eager_initialize_impl(this_oop); 159 } 160 } 161 162 163 void instanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) { 164 EXCEPTION_MARK; 165 ObjectLocker ol(this_oop, THREAD); 166 167 // abort if someone beat us to the initialization 168 if (!this_oop->is_not_initialized()) return; // note: not equivalent to is_initialized() 169 170 ClassState old_state = this_oop->_init_state; 171 link_class_impl(this_oop, true, THREAD); 172 if (HAS_PENDING_EXCEPTION) { 173 CLEAR_PENDING_EXCEPTION; 174 // Abort if linking the class throws an exception. 175 176 // Use a test to avoid redundantly resetting the state if there's 177 // no change. Set_init_state() asserts that state changes make 178 // progress, whereas here we might just be spinning in place. 179 if( old_state != this_oop->_init_state ) 180 this_oop->set_init_state (old_state); 181 } else { 182 // linking successfull, mark class as initialized 183 this_oop->set_init_state (fully_initialized); 184 // trace 185 if (TraceClassInitialization) { 186 ResourceMark rm(THREAD); 187 tty->print_cr("[Initialized %s without side effects]", this_oop->external_name()); 188 } 189 } 190 } 191 192 193 // See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization 194 // process. The step comments refers to the procedure described in that section. 195 // Note: implementation moved to static method to expose the this pointer. 196 void instanceKlass::initialize(TRAPS) { 197 if (this->should_be_initialized()) { 198 HandleMark hm(THREAD); 199 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 200 initialize_impl(this_oop, CHECK); 201 // Note: at this point the class may be initialized 202 // OR it may be in the state of being initialized 203 // in case of recursive initialization! 204 } else { 205 assert(is_initialized(), "sanity check"); 206 } 207 } 208 209 210 bool instanceKlass::verify_code( 211 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) { 212 // 1) Verify the bytecodes 213 Verifier::Mode mode = 214 throw_verifyerror ? Verifier::ThrowException : Verifier::NoException; 215 return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false); 216 } 217 218 219 // Used exclusively by the shared spaces dump mechanism to prevent 220 // classes mapped into the shared regions in new VMs from appearing linked. 221 222 void instanceKlass::unlink_class() { 223 assert(is_linked(), "must be linked"); 224 _init_state = loaded; 225 } 226 227 void instanceKlass::link_class(TRAPS) { 228 assert(is_loaded(), "must be loaded"); 229 if (!is_linked()) { 230 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 231 link_class_impl(this_oop, true, CHECK); 232 } 233 } 234 235 // Called to verify that a class can link during initialization, without 236 // throwing a VerifyError. 237 bool instanceKlass::link_class_or_fail(TRAPS) { 238 assert(is_loaded(), "must be loaded"); 239 if (!is_linked()) { 240 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 241 link_class_impl(this_oop, false, CHECK_false); 242 } 243 return is_linked(); 244 } 245 246 bool instanceKlass::link_class_impl( 247 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) { 248 // check for error state 249 if (this_oop->is_in_error_state()) { 250 ResourceMark rm(THREAD); 251 THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(), 252 this_oop->external_name(), false); 253 } 254 // return if already verified 255 if (this_oop->is_linked()) { 256 return true; 257 } 258 259 // Timing 260 // timer handles recursion 261 assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl"); 262 JavaThread* jt = (JavaThread*)THREAD; 263 264 // link super class before linking this class 265 instanceKlassHandle super(THREAD, this_oop->super()); 266 if (super.not_null()) { 267 if (super->is_interface()) { // check if super class is an interface 268 ResourceMark rm(THREAD); 269 Exceptions::fthrow( 270 THREAD_AND_LOCATION, 271 vmSymbols::java_lang_IncompatibleClassChangeError(), 272 "class %s has interface %s as super class", 273 this_oop->external_name(), 274 super->external_name() 275 ); 276 return false; 277 } 278 279 link_class_impl(super, throw_verifyerror, CHECK_false); 280 } 281 282 // link all interfaces implemented by this class before linking this class 283 objArrayHandle interfaces (THREAD, this_oop->local_interfaces()); 284 int num_interfaces = interfaces->length(); 285 for (int index = 0; index < num_interfaces; index++) { 286 HandleMark hm(THREAD); 287 instanceKlassHandle ih(THREAD, klassOop(interfaces->obj_at(index))); 288 link_class_impl(ih, throw_verifyerror, CHECK_false); 289 } 290 291 // in case the class is linked in the process of linking its superclasses 292 if (this_oop->is_linked()) { 293 return true; 294 } 295 296 // trace only the link time for this klass that includes 297 // the verification time 298 PerfClassTraceTime vmtimer(ClassLoader::perf_class_link_time(), 299 ClassLoader::perf_class_link_selftime(), 300 ClassLoader::perf_classes_linked(), 301 jt->get_thread_stat()->perf_recursion_counts_addr(), 302 jt->get_thread_stat()->perf_timers_addr(), 303 PerfClassTraceTime::CLASS_LINK); 304 305 // verification & rewriting 306 { 307 ObjectLocker ol(this_oop, THREAD); 308 // rewritten will have been set if loader constraint error found 309 // on an earlier link attempt 310 // don't verify or rewrite if already rewritten 311 if (!this_oop->is_linked()) { 312 if (!this_oop->is_rewritten()) { 313 { 314 // Timer includes any side effects of class verification (resolution, 315 // etc), but not recursive entry into verify_code(). 316 PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(), 317 ClassLoader::perf_class_verify_selftime(), 318 ClassLoader::perf_classes_verified(), 319 jt->get_thread_stat()->perf_recursion_counts_addr(), 320 jt->get_thread_stat()->perf_timers_addr(), 321 PerfClassTraceTime::CLASS_VERIFY); 322 bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD); 323 if (!verify_ok) { 324 return false; 325 } 326 } 327 328 // Just in case a side-effect of verify linked this class already 329 // (which can sometimes happen since the verifier loads classes 330 // using custom class loaders, which are free to initialize things) 331 if (this_oop->is_linked()) { 332 return true; 333 } 334 335 // also sets rewritten 336 this_oop->rewrite_class(CHECK_false); 337 } 338 339 // relocate jsrs and link methods after they are all rewritten 340 this_oop->relocate_and_link_methods(CHECK_false); 341 342 // Initialize the vtable and interface table after 343 // methods have been rewritten since rewrite may 344 // fabricate new methodOops. 345 // also does loader constraint checking 346 if (!this_oop()->is_shared()) { 347 ResourceMark rm(THREAD); 348 this_oop->vtable()->initialize_vtable(true, CHECK_false); 349 this_oop->itable()->initialize_itable(true, CHECK_false); 350 } 351 #ifdef ASSERT 352 else { 353 ResourceMark rm(THREAD); 354 this_oop->vtable()->verify(tty, true); 355 // In case itable verification is ever added. 356 // this_oop->itable()->verify(tty, true); 357 } 358 #endif 359 this_oop->set_init_state(linked); 360 if (JvmtiExport::should_post_class_prepare()) { 361 Thread *thread = THREAD; 362 assert(thread->is_Java_thread(), "thread->is_Java_thread()"); 363 JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop()); 364 } 365 } 366 } 367 return true; 368 } 369 370 371 // Rewrite the byte codes of all of the methods of a class. 372 // The rewriter must be called exactly once. Rewriting must happen after 373 // verification but before the first method of the class is executed. 374 void instanceKlass::rewrite_class(TRAPS) { 375 assert(is_loaded(), "must be loaded"); 376 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 377 if (this_oop->is_rewritten()) { 378 assert(this_oop()->is_shared(), "rewriting an unshared class?"); 379 return; 380 } 381 Rewriter::rewrite(this_oop, CHECK); 382 this_oop->set_rewritten(); 383 } 384 385 // Now relocate and link method entry points after class is rewritten. 386 // This is outside is_rewritten flag. In case of an exception, it can be 387 // executed more than once. 388 void instanceKlass::relocate_and_link_methods(TRAPS) { 389 assert(is_loaded(), "must be loaded"); 390 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 391 Rewriter::relocate_and_link(this_oop, CHECK); 392 } 393 394 395 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) { 396 // Make sure klass is linked (verified) before initialization 397 // A class could already be verified, since it has been reflected upon. 398 this_oop->link_class(CHECK); 399 400 DTRACE_CLASSINIT_PROBE(required, instanceKlass::cast(this_oop()), -1); 401 402 bool wait = false; 403 404 // refer to the JVM book page 47 for description of steps 405 // Step 1 406 { ObjectLocker ol(this_oop, THREAD); 407 408 Thread *self = THREAD; // it's passed the current thread 409 410 // Step 2 411 // If we were to use wait() instead of waitInterruptibly() then 412 // we might end up throwing IE from link/symbol resolution sites 413 // that aren't expected to throw. This would wreak havoc. See 6320309. 414 while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) { 415 wait = true; 416 ol.waitUninterruptibly(CHECK); 417 } 418 419 // Step 3 420 if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self)) { 421 DTRACE_CLASSINIT_PROBE_WAIT(recursive, instanceKlass::cast(this_oop()), -1,wait); 422 return; 423 } 424 425 // Step 4 426 if (this_oop->is_initialized()) { 427 DTRACE_CLASSINIT_PROBE_WAIT(concurrent, instanceKlass::cast(this_oop()), -1,wait); 428 return; 429 } 430 431 // Step 5 432 if (this_oop->is_in_error_state()) { 433 DTRACE_CLASSINIT_PROBE_WAIT(erroneous, instanceKlass::cast(this_oop()), -1,wait); 434 ResourceMark rm(THREAD); 435 const char* desc = "Could not initialize class "; 436 const char* className = this_oop->external_name(); 437 size_t msglen = strlen(desc) + strlen(className) + 1; 438 char* message = NEW_RESOURCE_ARRAY(char, msglen); 439 if (NULL == message) { 440 // Out of memory: can't create detailed error message 441 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className); 442 } else { 443 jio_snprintf(message, msglen, "%s%s", desc, className); 444 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message); 445 } 446 } 447 448 // Step 6 449 this_oop->set_init_state(being_initialized); 450 this_oop->set_init_thread(self); 451 } 452 453 // Step 7 454 klassOop super_klass = this_oop->super(); 455 if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) { 456 Klass::cast(super_klass)->initialize(THREAD); 457 458 if (HAS_PENDING_EXCEPTION) { 459 Handle e(THREAD, PENDING_EXCEPTION); 460 CLEAR_PENDING_EXCEPTION; 461 { 462 EXCEPTION_MARK; 463 this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads 464 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, superclass initialization error is thrown below 465 } 466 DTRACE_CLASSINIT_PROBE_WAIT(super__failed, instanceKlass::cast(this_oop()), -1,wait); 467 THROW_OOP(e()); 468 } 469 } 470 471 // Step 8 472 { 473 assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl"); 474 JavaThread* jt = (JavaThread*)THREAD; 475 DTRACE_CLASSINIT_PROBE_WAIT(clinit, instanceKlass::cast(this_oop()), -1,wait); 476 // Timer includes any side effects of class initialization (resolution, 477 // etc), but not recursive entry into call_class_initializer(). 478 PerfClassTraceTime timer(ClassLoader::perf_class_init_time(), 479 ClassLoader::perf_class_init_selftime(), 480 ClassLoader::perf_classes_inited(), 481 jt->get_thread_stat()->perf_recursion_counts_addr(), 482 jt->get_thread_stat()->perf_timers_addr(), 483 PerfClassTraceTime::CLASS_CLINIT); 484 this_oop->call_class_initializer(THREAD); 485 } 486 487 // Step 9 488 if (!HAS_PENDING_EXCEPTION) { 489 this_oop->set_initialization_state_and_notify(fully_initialized, CHECK); 490 { ResourceMark rm(THREAD); 491 debug_only(this_oop->vtable()->verify(tty, true);) 492 } 493 } 494 else { 495 // Step 10 and 11 496 Handle e(THREAD, PENDING_EXCEPTION); 497 CLEAR_PENDING_EXCEPTION; 498 { 499 EXCEPTION_MARK; 500 this_oop->set_initialization_state_and_notify(initialization_error, THREAD); 501 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, class initialization error is thrown below 502 } 503 DTRACE_CLASSINIT_PROBE_WAIT(error, instanceKlass::cast(this_oop()), -1,wait); 504 if (e->is_a(SystemDictionary::Error_klass())) { 505 THROW_OOP(e()); 506 } else { 507 JavaCallArguments args(e); 508 THROW_ARG(vmSymbols::java_lang_ExceptionInInitializerError(), 509 vmSymbols::throwable_void_signature(), 510 &args); 511 } 512 } 513 DTRACE_CLASSINIT_PROBE_WAIT(end, instanceKlass::cast(this_oop()), -1,wait); 514 } 515 516 517 // Note: implementation moved to static method to expose the this pointer. 518 void instanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) { 519 instanceKlassHandle kh(THREAD, this->as_klassOop()); 520 set_initialization_state_and_notify_impl(kh, state, CHECK); 521 } 522 523 void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) { 524 ObjectLocker ol(this_oop, THREAD); 525 this_oop->set_init_state(state); 526 ol.notify_all(CHECK); 527 } 528 529 void instanceKlass::add_implementor(klassOop k) { 530 assert(Compile_lock->owned_by_self(), ""); 531 // Filter out my subinterfaces. 532 // (Note: Interfaces are never on the subklass list.) 533 if (instanceKlass::cast(k)->is_interface()) return; 534 535 // Filter out subclasses whose supers already implement me. 536 // (Note: CHA must walk subclasses of direct implementors 537 // in order to locate indirect implementors.) 538 klassOop sk = instanceKlass::cast(k)->super(); 539 if (sk != NULL && instanceKlass::cast(sk)->implements_interface(as_klassOop())) 540 // We only need to check one immediate superclass, since the 541 // implements_interface query looks at transitive_interfaces. 542 // Any supers of the super have the same (or fewer) transitive_interfaces. 543 return; 544 545 // Update number of implementors 546 int i = _nof_implementors++; 547 548 // Record this implementor, if there are not too many already 549 if (i < implementors_limit) { 550 assert(_implementors[i] == NULL, "should be exactly one implementor"); 551 oop_store_without_check((oop*)&_implementors[i], k); 552 } else if (i == implementors_limit) { 553 // clear out the list on first overflow 554 for (int i2 = 0; i2 < implementors_limit; i2++) 555 oop_store_without_check((oop*)&_implementors[i2], NULL); 556 } 557 558 // The implementor also implements the transitive_interfaces 559 for (int index = 0; index < local_interfaces()->length(); index++) { 560 instanceKlass::cast(klassOop(local_interfaces()->obj_at(index)))->add_implementor(k); 561 } 562 } 563 564 void instanceKlass::init_implementor() { 565 for (int i = 0; i < implementors_limit; i++) 566 oop_store_without_check((oop*)&_implementors[i], NULL); 567 _nof_implementors = 0; 568 } 569 570 571 void instanceKlass::process_interfaces(Thread *thread) { 572 // link this class into the implementors list of every interface it implements 573 KlassHandle this_as_oop (thread, this->as_klassOop()); 574 for (int i = local_interfaces()->length() - 1; i >= 0; i--) { 575 assert(local_interfaces()->obj_at(i)->is_klass(), "must be a klass"); 576 instanceKlass* interf = instanceKlass::cast(klassOop(local_interfaces()->obj_at(i))); 577 assert(interf->is_interface(), "expected interface"); 578 interf->add_implementor(this_as_oop()); 579 } 580 } 581 582 bool instanceKlass::can_be_primary_super_slow() const { 583 if (is_interface()) 584 return false; 585 else 586 return Klass::can_be_primary_super_slow(); 587 } 588 589 objArrayOop instanceKlass::compute_secondary_supers(int num_extra_slots, TRAPS) { 590 // The secondaries are the implemented interfaces. 591 instanceKlass* ik = instanceKlass::cast(as_klassOop()); 592 objArrayHandle interfaces (THREAD, ik->transitive_interfaces()); 593 int num_secondaries = num_extra_slots + interfaces->length(); 594 if (num_secondaries == 0) { 595 return Universe::the_empty_system_obj_array(); 596 } else if (num_extra_slots == 0) { 597 return interfaces(); 598 } else { 599 // a mix of both 600 objArrayOop secondaries = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL); 601 for (int i = 0; i < interfaces->length(); i++) { 602 secondaries->obj_at_put(num_extra_slots+i, interfaces->obj_at(i)); 603 } 604 return secondaries; 605 } 606 } 607 608 bool instanceKlass::compute_is_subtype_of(klassOop k) { 609 if (Klass::cast(k)->is_interface()) { 610 return implements_interface(k); 611 } else { 612 return Klass::compute_is_subtype_of(k); 613 } 614 } 615 616 bool instanceKlass::implements_interface(klassOop k) const { 617 if (as_klassOop() == k) return true; 618 assert(Klass::cast(k)->is_interface(), "should be an interface class"); 619 for (int i = 0; i < transitive_interfaces()->length(); i++) { 620 if (transitive_interfaces()->obj_at(i) == k) { 621 return true; 622 } 623 } 624 return false; 625 } 626 627 objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) { 628 if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException()); 629 if (length > arrayOopDesc::max_array_length(T_OBJECT)) { 630 report_java_out_of_memory("Requested array size exceeds VM limit"); 631 THROW_OOP_0(Universe::out_of_memory_error_array_size()); 632 } 633 int size = objArrayOopDesc::object_size(length); 634 klassOop ak = array_klass(n, CHECK_NULL); 635 KlassHandle h_ak (THREAD, ak); 636 objArrayOop o = 637 (objArrayOop)CollectedHeap::array_allocate(h_ak, size, length, CHECK_NULL); 638 return o; 639 } 640 641 instanceOop instanceKlass::register_finalizer(instanceOop i, TRAPS) { 642 if (TraceFinalizerRegistration) { 643 tty->print("Registered "); 644 i->print_value_on(tty); 645 tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", (address)i); 646 } 647 instanceHandle h_i(THREAD, i); 648 // Pass the handle as argument, JavaCalls::call expects oop as jobjects 649 JavaValue result(T_VOID); 650 JavaCallArguments args(h_i); 651 methodHandle mh (THREAD, Universe::finalizer_register_method()); 652 JavaCalls::call(&result, mh, &args, CHECK_NULL); 653 return h_i(); 654 } 655 656 instanceOop instanceKlass::allocate_instance(TRAPS) { 657 assert(!oop_is_instanceMirror(), "wrong allocation path"); 658 bool has_finalizer_flag = has_finalizer(); // Query before possible GC 659 int size = size_helper(); // Query before forming handle. 660 661 KlassHandle h_k(THREAD, as_klassOop()); 662 663 instanceOop i; 664 665 i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL); 666 if (has_finalizer_flag && !RegisterFinalizersAtInit) { 667 i = register_finalizer(i, CHECK_NULL); 668 } 669 return i; 670 } 671 672 instanceOop instanceKlass::allocate_permanent_instance(TRAPS) { 673 // Finalizer registration occurs in the Object.<init> constructor 674 // and constructors normally aren't run when allocating perm 675 // instances so simply disallow finalizable perm objects. This can 676 // be relaxed if a need for it is found. 677 assert(!has_finalizer(), "perm objects not allowed to have finalizers"); 678 assert(!oop_is_instanceMirror(), "wrong allocation path"); 679 int size = size_helper(); // Query before forming handle. 680 KlassHandle h_k(THREAD, as_klassOop()); 681 instanceOop i = (instanceOop) 682 CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL); 683 return i; 684 } 685 686 void instanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) { 687 if (is_interface() || is_abstract()) { 688 ResourceMark rm(THREAD); 689 THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError() 690 : vmSymbols::java_lang_InstantiationException(), external_name()); 691 } 692 if (as_klassOop() == SystemDictionary::Class_klass()) { 693 ResourceMark rm(THREAD); 694 THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError() 695 : vmSymbols::java_lang_IllegalAccessException(), external_name()); 696 } 697 } 698 699 klassOop instanceKlass::array_klass_impl(bool or_null, int n, TRAPS) { 700 instanceKlassHandle this_oop(THREAD, as_klassOop()); 701 return array_klass_impl(this_oop, or_null, n, THREAD); 702 } 703 704 klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) { 705 if (this_oop->array_klasses() == NULL) { 706 if (or_null) return NULL; 707 708 ResourceMark rm; 709 JavaThread *jt = (JavaThread *)THREAD; 710 { 711 // Atomic creation of array_klasses 712 MutexLocker mc(Compile_lock, THREAD); // for vtables 713 MutexLocker ma(MultiArray_lock, THREAD); 714 715 // Check if update has already taken place 716 if (this_oop->array_klasses() == NULL) { 717 objArrayKlassKlass* oakk = 718 (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part(); 719 720 klassOop k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL); 721 this_oop->set_array_klasses(k); 722 } 723 } 724 } 725 // _this will always be set at this point 726 objArrayKlass* oak = (objArrayKlass*)this_oop->array_klasses()->klass_part(); 727 if (or_null) { 728 return oak->array_klass_or_null(n); 729 } 730 return oak->array_klass(n, CHECK_NULL); 731 } 732 733 klassOop instanceKlass::array_klass_impl(bool or_null, TRAPS) { 734 return array_klass_impl(or_null, 1, THREAD); 735 } 736 737 void instanceKlass::call_class_initializer(TRAPS) { 738 instanceKlassHandle ik (THREAD, as_klassOop()); 739 call_class_initializer_impl(ik, THREAD); 740 } 741 742 static int call_class_initializer_impl_counter = 0; // for debugging 743 744 methodOop instanceKlass::class_initializer() { 745 methodOop clinit = find_method( 746 vmSymbols::class_initializer_name(), vmSymbols::void_method_signature()); 747 if (clinit != NULL && clinit->has_valid_initializer_flags()) { 748 return clinit; 749 } 750 return NULL; 751 } 752 753 void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) { 754 methodHandle h_method(THREAD, this_oop->class_initializer()); 755 assert(!this_oop->is_initialized(), "we cannot initialize twice"); 756 if (TraceClassInitialization) { 757 tty->print("%d Initializing ", call_class_initializer_impl_counter++); 758 this_oop->name()->print_value(); 759 tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop()); 760 } 761 if (h_method() != NULL) { 762 JavaCallArguments args; // No arguments 763 JavaValue result(T_VOID); 764 JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args) 765 } 766 } 767 768 769 void instanceKlass::mask_for(methodHandle method, int bci, 770 InterpreterOopMap* entry_for) { 771 // Dirty read, then double-check under a lock. 772 if (_oop_map_cache == NULL) { 773 // Otherwise, allocate a new one. 774 MutexLocker x(OopMapCacheAlloc_lock); 775 // First time use. Allocate a cache in C heap 776 if (_oop_map_cache == NULL) { 777 _oop_map_cache = new OopMapCache(); 778 } 779 } 780 // _oop_map_cache is constant after init; lookup below does is own locking. 781 _oop_map_cache->lookup(method, bci, entry_for); 782 } 783 784 785 bool instanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const { 786 for (JavaFieldStream fs(as_klassOop()); !fs.done(); fs.next()) { 787 Symbol* f_name = fs.name(); 788 Symbol* f_sig = fs.signature(); 789 if (f_name == name && f_sig == sig) { 790 fd->initialize(as_klassOop(), fs.index()); 791 return true; 792 } 793 } 794 return false; 795 } 796 797 798 void instanceKlass::shared_symbols_iterate(SymbolClosure* closure) { 799 Klass::shared_symbols_iterate(closure); 800 closure->do_symbol(&_generic_signature); 801 closure->do_symbol(&_source_file_name); 802 closure->do_symbol(&_source_debug_extension); 803 804 for (JavaFieldStream fs(this); !fs.done(); fs.next()) { 805 int name_index = fs.name_index(); 806 closure->do_symbol(constants()->symbol_at_addr(name_index)); 807 int sig_index = fs.signature_index(); 808 closure->do_symbol(constants()->symbol_at_addr(sig_index)); 809 } 810 } 811 812 813 klassOop instanceKlass::find_interface_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const { 814 const int n = local_interfaces()->length(); 815 for (int i = 0; i < n; i++) { 816 klassOop intf1 = klassOop(local_interfaces()->obj_at(i)); 817 assert(Klass::cast(intf1)->is_interface(), "just checking type"); 818 // search for field in current interface 819 if (instanceKlass::cast(intf1)->find_local_field(name, sig, fd)) { 820 assert(fd->is_static(), "interface field must be static"); 821 return intf1; 822 } 823 // search for field in direct superinterfaces 824 klassOop intf2 = instanceKlass::cast(intf1)->find_interface_field(name, sig, fd); 825 if (intf2 != NULL) return intf2; 826 } 827 // otherwise field lookup fails 828 return NULL; 829 } 830 831 832 klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const { 833 // search order according to newest JVM spec (5.4.3.2, p.167). 834 // 1) search for field in current klass 835 if (find_local_field(name, sig, fd)) { 836 return as_klassOop(); 837 } 838 // 2) search for field recursively in direct superinterfaces 839 { klassOop intf = find_interface_field(name, sig, fd); 840 if (intf != NULL) return intf; 841 } 842 // 3) apply field lookup recursively if superclass exists 843 { klassOop supr = super(); 844 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, fd); 845 } 846 // 4) otherwise field lookup fails 847 return NULL; 848 } 849 850 851 klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, bool is_static, fieldDescriptor* fd) const { 852 // search order according to newest JVM spec (5.4.3.2, p.167). 853 // 1) search for field in current klass 854 if (find_local_field(name, sig, fd)) { 855 if (fd->is_static() == is_static) return as_klassOop(); 856 } 857 // 2) search for field recursively in direct superinterfaces 858 if (is_static) { 859 klassOop intf = find_interface_field(name, sig, fd); 860 if (intf != NULL) return intf; 861 } 862 // 3) apply field lookup recursively if superclass exists 863 { klassOop supr = super(); 864 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, is_static, fd); 865 } 866 // 4) otherwise field lookup fails 867 return NULL; 868 } 869 870 871 bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const { 872 for (JavaFieldStream fs(as_klassOop()); !fs.done(); fs.next()) { 873 if (fs.offset() == offset) { 874 fd->initialize(as_klassOop(), fs.index()); 875 if (fd->is_static() == is_static) return true; 876 } 877 } 878 return false; 879 } 880 881 882 bool instanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const { 883 klassOop klass = as_klassOop(); 884 while (klass != NULL) { 885 if (instanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) { 886 return true; 887 } 888 klass = Klass::cast(klass)->super(); 889 } 890 return false; 891 } 892 893 894 void instanceKlass::methods_do(void f(methodOop method)) { 895 int len = methods()->length(); 896 for (int index = 0; index < len; index++) { 897 methodOop m = methodOop(methods()->obj_at(index)); 898 assert(m->is_method(), "must be method"); 899 f(m); 900 } 901 } 902 903 904 void instanceKlass::do_local_static_fields(FieldClosure* cl) { 905 for (JavaFieldStream fs(this); !fs.done(); fs.next()) { 906 if (fs.access_flags().is_static()) { 907 fieldDescriptor fd; 908 fd.initialize(as_klassOop(), fs.index()); 909 cl->do_field(&fd); 910 } 911 } 912 } 913 914 915 void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) { 916 instanceKlassHandle h_this(THREAD, as_klassOop()); 917 do_local_static_fields_impl(h_this, f, CHECK); 918 } 919 920 921 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) { 922 for (JavaFieldStream fs(this_oop()); !fs.done(); fs.next()) { 923 if (fs.access_flags().is_static()) { 924 fieldDescriptor fd; 925 fd.initialize(this_oop(), fs.index()); 926 f(&fd, CHECK); 927 } 928 } 929 } 930 931 932 static int compare_fields_by_offset(int* a, int* b) { 933 return a[0] - b[0]; 934 } 935 936 void instanceKlass::do_nonstatic_fields(FieldClosure* cl) { 937 instanceKlass* super = superklass(); 938 if (super != NULL) { 939 super->do_nonstatic_fields(cl); 940 } 941 fieldDescriptor fd; 942 int length = java_fields_count(); 943 // In DebugInfo nonstatic fields are sorted by offset. 944 int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1)); 945 int j = 0; 946 for (int i = 0; i < length; i += 1) { 947 fd.initialize(as_klassOop(), i); 948 if (!fd.is_static()) { 949 fields_sorted[j + 0] = fd.offset(); 950 fields_sorted[j + 1] = i; 951 j += 2; 952 } 953 } 954 if (j > 0) { 955 length = j; 956 // _sort_Fn is defined in growableArray.hpp. 957 qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset); 958 for (int i = 0; i < length; i += 2) { 959 fd.initialize(as_klassOop(), fields_sorted[i + 1]); 960 assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields"); 961 cl->do_field(&fd); 962 } 963 } 964 FREE_C_HEAP_ARRAY(int, fields_sorted); 965 } 966 967 968 void instanceKlass::array_klasses_do(void f(klassOop k)) { 969 if (array_klasses() != NULL) 970 arrayKlass::cast(array_klasses())->array_klasses_do(f); 971 } 972 973 974 void instanceKlass::with_array_klasses_do(void f(klassOop k)) { 975 f(as_klassOop()); 976 array_klasses_do(f); 977 } 978 979 #ifdef ASSERT 980 static int linear_search(objArrayOop methods, Symbol* name, Symbol* signature) { 981 int len = methods->length(); 982 for (int index = 0; index < len; index++) { 983 methodOop m = (methodOop)(methods->obj_at(index)); 984 assert(m->is_method(), "must be method"); 985 if (m->signature() == signature && m->name() == name) { 986 return index; 987 } 988 } 989 return -1; 990 } 991 #endif 992 993 methodOop instanceKlass::find_method(Symbol* name, Symbol* signature) const { 994 return instanceKlass::find_method(methods(), name, signature); 995 } 996 997 methodOop instanceKlass::find_method(objArrayOop methods, Symbol* name, Symbol* signature) { 998 int len = methods->length(); 999 // methods are sorted, so do binary search 1000 int l = 0; 1001 int h = len - 1; 1002 while (l <= h) { 1003 int mid = (l + h) >> 1; 1004 methodOop m = (methodOop)methods->obj_at(mid); 1005 assert(m->is_method(), "must be method"); 1006 int res = m->name()->fast_compare(name); 1007 if (res == 0) { 1008 // found matching name; do linear search to find matching signature 1009 // first, quick check for common case 1010 if (m->signature() == signature) return m; 1011 // search downwards through overloaded methods 1012 int i; 1013 for (i = mid - 1; i >= l; i--) { 1014 methodOop m = (methodOop)methods->obj_at(i); 1015 assert(m->is_method(), "must be method"); 1016 if (m->name() != name) break; 1017 if (m->signature() == signature) return m; 1018 } 1019 // search upwards 1020 for (i = mid + 1; i <= h; i++) { 1021 methodOop m = (methodOop)methods->obj_at(i); 1022 assert(m->is_method(), "must be method"); 1023 if (m->name() != name) break; 1024 if (m->signature() == signature) return m; 1025 } 1026 // not found 1027 #ifdef ASSERT 1028 int index = linear_search(methods, name, signature); 1029 assert(index == -1, err_msg("binary search should have found entry %d", index)); 1030 #endif 1031 return NULL; 1032 } else if (res < 0) { 1033 l = mid + 1; 1034 } else { 1035 h = mid - 1; 1036 } 1037 } 1038 #ifdef ASSERT 1039 int index = linear_search(methods, name, signature); 1040 assert(index == -1, err_msg("binary search should have found entry %d", index)); 1041 #endif 1042 return NULL; 1043 } 1044 1045 methodOop instanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const { 1046 klassOop klass = as_klassOop(); 1047 while (klass != NULL) { 1048 methodOop method = instanceKlass::cast(klass)->find_method(name, signature); 1049 if (method != NULL) return method; 1050 klass = instanceKlass::cast(klass)->super(); 1051 } 1052 return NULL; 1053 } 1054 1055 // lookup a method in all the interfaces that this class implements 1056 methodOop instanceKlass::lookup_method_in_all_interfaces(Symbol* name, 1057 Symbol* signature) const { 1058 objArrayOop all_ifs = instanceKlass::cast(as_klassOop())->transitive_interfaces(); 1059 int num_ifs = all_ifs->length(); 1060 instanceKlass *ik = NULL; 1061 for (int i = 0; i < num_ifs; i++) { 1062 ik = instanceKlass::cast(klassOop(all_ifs->obj_at(i))); 1063 methodOop m = ik->lookup_method(name, signature); 1064 if (m != NULL) { 1065 return m; 1066 } 1067 } 1068 return NULL; 1069 } 1070 1071 /* jni_id_for_impl for jfieldIds only */ 1072 JNIid* instanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) { 1073 MutexLocker ml(JfieldIdCreation_lock); 1074 // Retry lookup after we got the lock 1075 JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset); 1076 if (probe == NULL) { 1077 // Slow case, allocate new static field identifier 1078 probe = new JNIid(this_oop->as_klassOop(), offset, this_oop->jni_ids()); 1079 this_oop->set_jni_ids(probe); 1080 } 1081 return probe; 1082 } 1083 1084 1085 /* jni_id_for for jfieldIds only */ 1086 JNIid* instanceKlass::jni_id_for(int offset) { 1087 JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset); 1088 if (probe == NULL) { 1089 probe = jni_id_for_impl(this->as_klassOop(), offset); 1090 } 1091 return probe; 1092 } 1093 1094 1095 // Lookup or create a jmethodID. 1096 // This code is called by the VMThread and JavaThreads so the 1097 // locking has to be done very carefully to avoid deadlocks 1098 // and/or other cache consistency problems. 1099 // 1100 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) { 1101 size_t idnum = (size_t)method_h->method_idnum(); 1102 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire(); 1103 size_t length = 0; 1104 jmethodID id = NULL; 1105 1106 // We use a double-check locking idiom here because this cache is 1107 // performance sensitive. In the normal system, this cache only 1108 // transitions from NULL to non-NULL which is safe because we use 1109 // release_set_methods_jmethod_ids() to advertise the new cache. 1110 // A partially constructed cache should never be seen by a racing 1111 // thread. We also use release_store_ptr() to save a new jmethodID 1112 // in the cache so a partially constructed jmethodID should never be 1113 // seen either. Cache reads of existing jmethodIDs proceed without a 1114 // lock, but cache writes of a new jmethodID requires uniqueness and 1115 // creation of the cache itself requires no leaks so a lock is 1116 // generally acquired in those two cases. 1117 // 1118 // If the RedefineClasses() API has been used, then this cache can 1119 // grow and we'll have transitions from non-NULL to bigger non-NULL. 1120 // Cache creation requires no leaks and we require safety between all 1121 // cache accesses and freeing of the old cache so a lock is generally 1122 // acquired when the RedefineClasses() API has been used. 1123 1124 if (jmeths != NULL) { 1125 // the cache already exists 1126 if (!ik_h->idnum_can_increment()) { 1127 // the cache can't grow so we can just get the current values 1128 get_jmethod_id_length_value(jmeths, idnum, &length, &id); 1129 } else { 1130 // cache can grow so we have to be more careful 1131 if (Threads::number_of_threads() == 0 || 1132 SafepointSynchronize::is_at_safepoint()) { 1133 // we're single threaded or at a safepoint - no locking needed 1134 get_jmethod_id_length_value(jmeths, idnum, &length, &id); 1135 } else { 1136 MutexLocker ml(JmethodIdCreation_lock); 1137 get_jmethod_id_length_value(jmeths, idnum, &length, &id); 1138 } 1139 } 1140 } 1141 // implied else: 1142 // we need to allocate a cache so default length and id values are good 1143 1144 if (jmeths == NULL || // no cache yet 1145 length <= idnum || // cache is too short 1146 id == NULL) { // cache doesn't contain entry 1147 1148 // This function can be called by the VMThread so we have to do all 1149 // things that might block on a safepoint before grabbing the lock. 1150 // Otherwise, we can deadlock with the VMThread or have a cache 1151 // consistency issue. These vars keep track of what we might have 1152 // to free after the lock is dropped. 1153 jmethodID to_dealloc_id = NULL; 1154 jmethodID* to_dealloc_jmeths = NULL; 1155 1156 // may not allocate new_jmeths or use it if we allocate it 1157 jmethodID* new_jmeths = NULL; 1158 if (length <= idnum) { 1159 // allocate a new cache that might be used 1160 size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count()); 1161 new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1); 1162 memset(new_jmeths, 0, (size+1)*sizeof(jmethodID)); 1163 // cache size is stored in element[0], other elements offset by one 1164 new_jmeths[0] = (jmethodID)size; 1165 } 1166 1167 // allocate a new jmethodID that might be used 1168 jmethodID new_id = NULL; 1169 if (method_h->is_old() && !method_h->is_obsolete()) { 1170 // The method passed in is old (but not obsolete), we need to use the current version 1171 methodOop current_method = ik_h->method_with_idnum((int)idnum); 1172 assert(current_method != NULL, "old and but not obsolete, so should exist"); 1173 methodHandle current_method_h(current_method == NULL? method_h() : current_method); 1174 new_id = JNIHandles::make_jmethod_id(current_method_h); 1175 } else { 1176 // It is the current version of the method or an obsolete method, 1177 // use the version passed in 1178 new_id = JNIHandles::make_jmethod_id(method_h); 1179 } 1180 1181 if (Threads::number_of_threads() == 0 || 1182 SafepointSynchronize::is_at_safepoint()) { 1183 // we're single threaded or at a safepoint - no locking needed 1184 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths, 1185 &to_dealloc_id, &to_dealloc_jmeths); 1186 } else { 1187 MutexLocker ml(JmethodIdCreation_lock); 1188 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths, 1189 &to_dealloc_id, &to_dealloc_jmeths); 1190 } 1191 1192 // The lock has been dropped so we can free resources. 1193 // Free up either the old cache or the new cache if we allocated one. 1194 if (to_dealloc_jmeths != NULL) { 1195 FreeHeap(to_dealloc_jmeths); 1196 } 1197 // free up the new ID since it wasn't needed 1198 if (to_dealloc_id != NULL) { 1199 JNIHandles::destroy_jmethod_id(to_dealloc_id); 1200 } 1201 } 1202 return id; 1203 } 1204 1205 1206 // Common code to fetch the jmethodID from the cache or update the 1207 // cache with the new jmethodID. This function should never do anything 1208 // that causes the caller to go to a safepoint or we can deadlock with 1209 // the VMThread or have cache consistency issues. 1210 // 1211 jmethodID instanceKlass::get_jmethod_id_fetch_or_update( 1212 instanceKlassHandle ik_h, size_t idnum, jmethodID new_id, 1213 jmethodID* new_jmeths, jmethodID* to_dealloc_id_p, 1214 jmethodID** to_dealloc_jmeths_p) { 1215 assert(new_id != NULL, "sanity check"); 1216 assert(to_dealloc_id_p != NULL, "sanity check"); 1217 assert(to_dealloc_jmeths_p != NULL, "sanity check"); 1218 assert(Threads::number_of_threads() == 0 || 1219 SafepointSynchronize::is_at_safepoint() || 1220 JmethodIdCreation_lock->owned_by_self(), "sanity check"); 1221 1222 // reacquire the cache - we are locked, single threaded or at a safepoint 1223 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire(); 1224 jmethodID id = NULL; 1225 size_t length = 0; 1226 1227 if (jmeths == NULL || // no cache yet 1228 (length = (size_t)jmeths[0]) <= idnum) { // cache is too short 1229 if (jmeths != NULL) { 1230 // copy any existing entries from the old cache 1231 for (size_t index = 0; index < length; index++) { 1232 new_jmeths[index+1] = jmeths[index+1]; 1233 } 1234 *to_dealloc_jmeths_p = jmeths; // save old cache for later delete 1235 } 1236 ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths); 1237 } else { 1238 // fetch jmethodID (if any) from the existing cache 1239 id = jmeths[idnum+1]; 1240 *to_dealloc_jmeths_p = new_jmeths; // save new cache for later delete 1241 } 1242 if (id == NULL) { 1243 // No matching jmethodID in the existing cache or we have a new 1244 // cache or we just grew the cache. This cache write is done here 1245 // by the first thread to win the foot race because a jmethodID 1246 // needs to be unique once it is generally available. 1247 id = new_id; 1248 1249 // The jmethodID cache can be read while unlocked so we have to 1250 // make sure the new jmethodID is complete before installing it 1251 // in the cache. 1252 OrderAccess::release_store_ptr(&jmeths[idnum+1], id); 1253 } else { 1254 *to_dealloc_id_p = new_id; // save new id for later delete 1255 } 1256 return id; 1257 } 1258 1259 1260 // Common code to get the jmethodID cache length and the jmethodID 1261 // value at index idnum if there is one. 1262 // 1263 void instanceKlass::get_jmethod_id_length_value(jmethodID* cache, 1264 size_t idnum, size_t *length_p, jmethodID* id_p) { 1265 assert(cache != NULL, "sanity check"); 1266 assert(length_p != NULL, "sanity check"); 1267 assert(id_p != NULL, "sanity check"); 1268 1269 // cache size is stored in element[0], other elements offset by one 1270 *length_p = (size_t)cache[0]; 1271 if (*length_p <= idnum) { // cache is too short 1272 *id_p = NULL; 1273 } else { 1274 *id_p = cache[idnum+1]; // fetch jmethodID (if any) 1275 } 1276 } 1277 1278 1279 // Lookup a jmethodID, NULL if not found. Do no blocking, no allocations, no handles 1280 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) { 1281 size_t idnum = (size_t)method->method_idnum(); 1282 jmethodID* jmeths = methods_jmethod_ids_acquire(); 1283 size_t length; // length assigned as debugging crumb 1284 jmethodID id = NULL; 1285 if (jmeths != NULL && // If there is a cache 1286 (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough, 1287 id = jmeths[idnum+1]; // Look up the id (may be NULL) 1288 } 1289 return id; 1290 } 1291 1292 1293 // Cache an itable index 1294 void instanceKlass::set_cached_itable_index(size_t idnum, int index) { 1295 int* indices = methods_cached_itable_indices_acquire(); 1296 int* to_dealloc_indices = NULL; 1297 1298 // We use a double-check locking idiom here because this cache is 1299 // performance sensitive. In the normal system, this cache only 1300 // transitions from NULL to non-NULL which is safe because we use 1301 // release_set_methods_cached_itable_indices() to advertise the 1302 // new cache. A partially constructed cache should never be seen 1303 // by a racing thread. Cache reads and writes proceed without a 1304 // lock, but creation of the cache itself requires no leaks so a 1305 // lock is generally acquired in that case. 1306 // 1307 // If the RedefineClasses() API has been used, then this cache can 1308 // grow and we'll have transitions from non-NULL to bigger non-NULL. 1309 // Cache creation requires no leaks and we require safety between all 1310 // cache accesses and freeing of the old cache so a lock is generally 1311 // acquired when the RedefineClasses() API has been used. 1312 1313 if (indices == NULL || idnum_can_increment()) { 1314 // we need a cache or the cache can grow 1315 MutexLocker ml(JNICachedItableIndex_lock); 1316 // reacquire the cache to see if another thread already did the work 1317 indices = methods_cached_itable_indices_acquire(); 1318 size_t length = 0; 1319 // cache size is stored in element[0], other elements offset by one 1320 if (indices == NULL || (length = (size_t)indices[0]) <= idnum) { 1321 size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count()); 1322 int* new_indices = NEW_C_HEAP_ARRAY(int, size+1); 1323 new_indices[0] = (int)size; 1324 // copy any existing entries 1325 size_t i; 1326 for (i = 0; i < length; i++) { 1327 new_indices[i+1] = indices[i+1]; 1328 } 1329 // Set all the rest to -1 1330 for (i = length; i < size; i++) { 1331 new_indices[i+1] = -1; 1332 } 1333 if (indices != NULL) { 1334 // We have an old cache to delete so save it for after we 1335 // drop the lock. 1336 to_dealloc_indices = indices; 1337 } 1338 release_set_methods_cached_itable_indices(indices = new_indices); 1339 } 1340 1341 if (idnum_can_increment()) { 1342 // this cache can grow so we have to write to it safely 1343 indices[idnum+1] = index; 1344 } 1345 } else { 1346 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); 1347 } 1348 1349 if (!idnum_can_increment()) { 1350 // The cache cannot grow and this JNI itable index value does not 1351 // have to be unique like a jmethodID. If there is a race to set it, 1352 // it doesn't matter. 1353 indices[idnum+1] = index; 1354 } 1355 1356 if (to_dealloc_indices != NULL) { 1357 // we allocated a new cache so free the old one 1358 FreeHeap(to_dealloc_indices); 1359 } 1360 } 1361 1362 1363 // Retrieve a cached itable index 1364 int instanceKlass::cached_itable_index(size_t idnum) { 1365 int* indices = methods_cached_itable_indices_acquire(); 1366 if (indices != NULL && ((size_t)indices[0]) > idnum) { 1367 // indices exist and are long enough, retrieve possible cached 1368 return indices[idnum+1]; 1369 } 1370 return -1; 1371 } 1372 1373 1374 // 1375 // nmethodBucket is used to record dependent nmethods for 1376 // deoptimization. nmethod dependencies are actually <klass, method> 1377 // pairs but we really only care about the klass part for purposes of 1378 // finding nmethods which might need to be deoptimized. Instead of 1379 // recording the method, a count of how many times a particular nmethod 1380 // was recorded is kept. This ensures that any recording errors are 1381 // noticed since an nmethod should be removed as many times are it's 1382 // added. 1383 // 1384 class nmethodBucket { 1385 private: 1386 nmethod* _nmethod; 1387 int _count; 1388 nmethodBucket* _next; 1389 1390 public: 1391 nmethodBucket(nmethod* nmethod, nmethodBucket* next) { 1392 _nmethod = nmethod; 1393 _next = next; 1394 _count = 1; 1395 } 1396 int count() { return _count; } 1397 int increment() { _count += 1; return _count; } 1398 int decrement() { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; } 1399 nmethodBucket* next() { return _next; } 1400 void set_next(nmethodBucket* b) { _next = b; } 1401 nmethod* get_nmethod() { return _nmethod; } 1402 }; 1403 1404 1405 // 1406 // Walk the list of dependent nmethods searching for nmethods which 1407 // are dependent on the changes that were passed in and mark them for 1408 // deoptimization. Returns the number of nmethods found. 1409 // 1410 int instanceKlass::mark_dependent_nmethods(DepChange& changes) { 1411 assert_locked_or_safepoint(CodeCache_lock); 1412 int found = 0; 1413 nmethodBucket* b = _dependencies; 1414 while (b != NULL) { 1415 nmethod* nm = b->get_nmethod(); 1416 // since dependencies aren't removed until an nmethod becomes a zombie, 1417 // the dependency list may contain nmethods which aren't alive. 1418 if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) { 1419 if (TraceDependencies) { 1420 ResourceMark rm; 1421 tty->print_cr("Marked for deoptimization"); 1422 tty->print_cr(" context = %s", this->external_name()); 1423 changes.print(); 1424 nm->print(); 1425 nm->print_dependencies(); 1426 } 1427 nm->mark_for_deoptimization(); 1428 found++; 1429 } 1430 b = b->next(); 1431 } 1432 return found; 1433 } 1434 1435 1436 // 1437 // Add an nmethodBucket to the list of dependencies for this nmethod. 1438 // It's possible that an nmethod has multiple dependencies on this klass 1439 // so a count is kept for each bucket to guarantee that creation and 1440 // deletion of dependencies is consistent. 1441 // 1442 void instanceKlass::add_dependent_nmethod(nmethod* nm) { 1443 assert_locked_or_safepoint(CodeCache_lock); 1444 nmethodBucket* b = _dependencies; 1445 nmethodBucket* last = NULL; 1446 while (b != NULL) { 1447 if (nm == b->get_nmethod()) { 1448 b->increment(); 1449 return; 1450 } 1451 b = b->next(); 1452 } 1453 _dependencies = new nmethodBucket(nm, _dependencies); 1454 } 1455 1456 1457 // 1458 // Decrement count of the nmethod in the dependency list and remove 1459 // the bucket competely when the count goes to 0. This method must 1460 // find a corresponding bucket otherwise there's a bug in the 1461 // recording of dependecies. 1462 // 1463 void instanceKlass::remove_dependent_nmethod(nmethod* nm) { 1464 assert_locked_or_safepoint(CodeCache_lock); 1465 nmethodBucket* b = _dependencies; 1466 nmethodBucket* last = NULL; 1467 while (b != NULL) { 1468 if (nm == b->get_nmethod()) { 1469 if (b->decrement() == 0) { 1470 if (last == NULL) { 1471 _dependencies = b->next(); 1472 } else { 1473 last->set_next(b->next()); 1474 } 1475 delete b; 1476 } 1477 return; 1478 } 1479 last = b; 1480 b = b->next(); 1481 } 1482 #ifdef ASSERT 1483 tty->print_cr("### %s can't find dependent nmethod:", this->external_name()); 1484 nm->print(); 1485 #endif // ASSERT 1486 ShouldNotReachHere(); 1487 } 1488 1489 1490 #ifndef PRODUCT 1491 void instanceKlass::print_dependent_nmethods(bool verbose) { 1492 nmethodBucket* b = _dependencies; 1493 int idx = 0; 1494 while (b != NULL) { 1495 nmethod* nm = b->get_nmethod(); 1496 tty->print("[%d] count=%d { ", idx++, b->count()); 1497 if (!verbose) { 1498 nm->print_on(tty, "nmethod"); 1499 tty->print_cr(" } "); 1500 } else { 1501 nm->print(); 1502 nm->print_dependencies(); 1503 tty->print_cr("--- } "); 1504 } 1505 b = b->next(); 1506 } 1507 } 1508 1509 1510 bool instanceKlass::is_dependent_nmethod(nmethod* nm) { 1511 nmethodBucket* b = _dependencies; 1512 while (b != NULL) { 1513 if (nm == b->get_nmethod()) { 1514 return true; 1515 } 1516 b = b->next(); 1517 } 1518 return false; 1519 } 1520 #endif //PRODUCT 1521 1522 1523 #ifdef ASSERT 1524 template <class T> void assert_is_in(T *p) { 1525 T heap_oop = oopDesc::load_heap_oop(p); 1526 if (!oopDesc::is_null(heap_oop)) { 1527 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 1528 assert(Universe::heap()->is_in(o), "should be in heap"); 1529 } 1530 } 1531 template <class T> void assert_is_in_closed_subset(T *p) { 1532 T heap_oop = oopDesc::load_heap_oop(p); 1533 if (!oopDesc::is_null(heap_oop)) { 1534 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 1535 assert(Universe::heap()->is_in_closed_subset(o), "should be in closed"); 1536 } 1537 } 1538 template <class T> void assert_is_in_reserved(T *p) { 1539 T heap_oop = oopDesc::load_heap_oop(p); 1540 if (!oopDesc::is_null(heap_oop)) { 1541 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 1542 assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); 1543 } 1544 } 1545 template <class T> void assert_nothing(T *p) {} 1546 1547 #else 1548 template <class T> void assert_is_in(T *p) {} 1549 template <class T> void assert_is_in_closed_subset(T *p) {} 1550 template <class T> void assert_is_in_reserved(T *p) {} 1551 template <class T> void assert_nothing(T *p) {} 1552 #endif // ASSERT 1553 1554 // 1555 // Macros that iterate over areas of oops which are specialized on type of 1556 // oop pointer either narrow or wide, depending on UseCompressedOops 1557 // 1558 // Parameters are: 1559 // T - type of oop to point to (either oop or narrowOop) 1560 // start_p - starting pointer for region to iterate over 1561 // count - number of oops or narrowOops to iterate over 1562 // do_oop - action to perform on each oop (it's arbitrary C code which 1563 // makes it more efficient to put in a macro rather than making 1564 // it a template function) 1565 // assert_fn - assert function which is template function because performance 1566 // doesn't matter when enabled. 1567 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \ 1568 T, start_p, count, do_oop, \ 1569 assert_fn) \ 1570 { \ 1571 T* p = (T*)(start_p); \ 1572 T* const end = p + (count); \ 1573 while (p < end) { \ 1574 (assert_fn)(p); \ 1575 do_oop; \ 1576 ++p; \ 1577 } \ 1578 } 1579 1580 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \ 1581 T, start_p, count, do_oop, \ 1582 assert_fn) \ 1583 { \ 1584 T* const start = (T*)(start_p); \ 1585 T* p = start + (count); \ 1586 while (start < p) { \ 1587 --p; \ 1588 (assert_fn)(p); \ 1589 do_oop; \ 1590 } \ 1591 } 1592 1593 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \ 1594 T, start_p, count, low, high, \ 1595 do_oop, assert_fn) \ 1596 { \ 1597 T* const l = (T*)(low); \ 1598 T* const h = (T*)(high); \ 1599 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \ 1600 mask_bits((intptr_t)h, sizeof(T)-1) == 0, \ 1601 "bounded region must be properly aligned"); \ 1602 T* p = (T*)(start_p); \ 1603 T* end = p + (count); \ 1604 if (p < l) p = l; \ 1605 if (end > h) end = h; \ 1606 while (p < end) { \ 1607 (assert_fn)(p); \ 1608 do_oop; \ 1609 ++p; \ 1610 } \ 1611 } 1612 1613 1614 // The following macros call specialized macros, passing either oop or 1615 // narrowOop as the specialization type. These test the UseCompressedOops 1616 // flag. 1617 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \ 1618 { \ 1619 /* Compute oopmap block range. The common case \ 1620 is nonstatic_oop_map_size == 1. */ \ 1621 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ 1622 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ 1623 if (UseCompressedOops) { \ 1624 while (map < end_map) { \ 1625 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ 1626 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ 1627 do_oop, assert_fn) \ 1628 ++map; \ 1629 } \ 1630 } else { \ 1631 while (map < end_map) { \ 1632 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ 1633 obj->obj_field_addr<oop>(map->offset()), map->count(), \ 1634 do_oop, assert_fn) \ 1635 ++map; \ 1636 } \ 1637 } \ 1638 } 1639 1640 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \ 1641 { \ 1642 OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \ 1643 OopMapBlock* map = start_map + nonstatic_oop_map_count(); \ 1644 if (UseCompressedOops) { \ 1645 while (start_map < map) { \ 1646 --map; \ 1647 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \ 1648 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ 1649 do_oop, assert_fn) \ 1650 } \ 1651 } else { \ 1652 while (start_map < map) { \ 1653 --map; \ 1654 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \ 1655 obj->obj_field_addr<oop>(map->offset()), map->count(), \ 1656 do_oop, assert_fn) \ 1657 } \ 1658 } \ 1659 } 1660 1661 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \ 1662 assert_fn) \ 1663 { \ 1664 /* Compute oopmap block range. The common case is \ 1665 nonstatic_oop_map_size == 1, so we accept the \ 1666 usually non-existent extra overhead of examining \ 1667 all the maps. */ \ 1668 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ 1669 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ 1670 if (UseCompressedOops) { \ 1671 while (map < end_map) { \ 1672 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ 1673 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ 1674 low, high, \ 1675 do_oop, assert_fn) \ 1676 ++map; \ 1677 } \ 1678 } else { \ 1679 while (map < end_map) { \ 1680 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ 1681 obj->obj_field_addr<oop>(map->offset()), map->count(), \ 1682 low, high, \ 1683 do_oop, assert_fn) \ 1684 ++map; \ 1685 } \ 1686 } \ 1687 } 1688 1689 void instanceKlass::oop_follow_contents(oop obj) { 1690 assert(obj != NULL, "can't follow the content of NULL object"); 1691 obj->follow_header(); 1692 InstanceKlass_OOP_MAP_ITERATE( \ 1693 obj, \ 1694 MarkSweep::mark_and_push(p), \ 1695 assert_is_in_closed_subset) 1696 } 1697 1698 #ifndef SERIALGC 1699 void instanceKlass::oop_follow_contents(ParCompactionManager* cm, 1700 oop obj) { 1701 assert(obj != NULL, "can't follow the content of NULL object"); 1702 obj->follow_header(cm); 1703 InstanceKlass_OOP_MAP_ITERATE( \ 1704 obj, \ 1705 PSParallelCompact::mark_and_push(cm, p), \ 1706 assert_is_in) 1707 } 1708 #endif // SERIALGC 1709 1710 // closure's do_header() method dicates whether the given closure should be 1711 // applied to the klass ptr in the object header. 1712 1713 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 1714 \ 1715 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ 1716 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\ 1717 /* header */ \ 1718 if (closure->do_header()) { \ 1719 obj->oop_iterate_header(closure); \ 1720 } \ 1721 InstanceKlass_OOP_MAP_ITERATE( \ 1722 obj, \ 1723 SpecializationStats:: \ 1724 record_do_oop_call##nv_suffix(SpecializationStats::ik); \ 1725 (closure)->do_oop##nv_suffix(p), \ 1726 assert_is_in_closed_subset) \ 1727 return size_helper(); \ 1728 } 1729 1730 #ifndef SERIALGC 1731 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ 1732 \ 1733 int instanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \ 1734 OopClosureType* closure) { \ 1735 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \ 1736 /* header */ \ 1737 if (closure->do_header()) { \ 1738 obj->oop_iterate_header(closure); \ 1739 } \ 1740 /* instance variables */ \ 1741 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ 1742 obj, \ 1743 SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\ 1744 (closure)->do_oop##nv_suffix(p), \ 1745 assert_is_in_closed_subset) \ 1746 return size_helper(); \ 1747 } 1748 #endif // !SERIALGC 1749 1750 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ 1751 \ 1752 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \ 1753 OopClosureType* closure, \ 1754 MemRegion mr) { \ 1755 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\ 1756 if (closure->do_header()) { \ 1757 obj->oop_iterate_header(closure, mr); \ 1758 } \ 1759 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \ 1760 obj, mr.start(), mr.end(), \ 1761 (closure)->do_oop##nv_suffix(p), \ 1762 assert_is_in_closed_subset) \ 1763 return size_helper(); \ 1764 } 1765 1766 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN) 1767 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN) 1768 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) 1769 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) 1770 #ifndef SERIALGC 1771 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) 1772 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) 1773 #endif // !SERIALGC 1774 1775 int instanceKlass::oop_adjust_pointers(oop obj) { 1776 int size = size_helper(); 1777 InstanceKlass_OOP_MAP_ITERATE( \ 1778 obj, \ 1779 MarkSweep::adjust_pointer(p), \ 1780 assert_is_in) 1781 obj->adjust_header(); 1782 return size; 1783 } 1784 1785 #ifndef SERIALGC 1786 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { 1787 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ 1788 obj, \ 1789 if (PSScavenge::should_scavenge(p)) { \ 1790 pm->claim_or_forward_depth(p); \ 1791 }, \ 1792 assert_nothing ) 1793 } 1794 1795 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { 1796 InstanceKlass_OOP_MAP_ITERATE( \ 1797 obj, \ 1798 PSParallelCompact::adjust_pointer(p), \ 1799 assert_nothing) 1800 return size_helper(); 1801 } 1802 1803 #endif // SERIALGC 1804 1805 // This klass is alive but the implementor link is not followed/updated. 1806 // Subklass and sibling links are handled by Klass::follow_weak_klass_links 1807 1808 void instanceKlass::follow_weak_klass_links( 1809 BoolObjectClosure* is_alive, OopClosure* keep_alive) { 1810 assert(is_alive->do_object_b(as_klassOop()), "this oop should be live"); 1811 if (ClassUnloading) { 1812 for (int i = 0; i < implementors_limit; i++) { 1813 klassOop impl = _implementors[i]; 1814 if (impl == NULL) break; // no more in the list 1815 if (!is_alive->do_object_b(impl)) { 1816 // remove this guy from the list by overwriting him with the tail 1817 int lasti = --_nof_implementors; 1818 assert(lasti >= i && lasti < implementors_limit, "just checking"); 1819 _implementors[i] = _implementors[lasti]; 1820 _implementors[lasti] = NULL; 1821 --i; // rerun the loop at this index 1822 } 1823 } 1824 } else { 1825 for (int i = 0; i < implementors_limit; i++) { 1826 keep_alive->do_oop(&adr_implementors()[i]); 1827 } 1828 } 1829 Klass::follow_weak_klass_links(is_alive, keep_alive); 1830 } 1831 1832 void instanceKlass::remove_unshareable_info() { 1833 Klass::remove_unshareable_info(); 1834 init_implementor(); 1835 } 1836 1837 static void clear_all_breakpoints(methodOop m) { 1838 m->clear_all_breakpoints(); 1839 } 1840 1841 void instanceKlass::release_C_heap_structures() { 1842 // Deallocate oop map cache 1843 if (_oop_map_cache != NULL) { 1844 delete _oop_map_cache; 1845 _oop_map_cache = NULL; 1846 } 1847 1848 // Deallocate JNI identifiers for jfieldIDs 1849 JNIid::deallocate(jni_ids()); 1850 set_jni_ids(NULL); 1851 1852 jmethodID* jmeths = methods_jmethod_ids_acquire(); 1853 if (jmeths != (jmethodID*)NULL) { 1854 release_set_methods_jmethod_ids(NULL); 1855 FreeHeap(jmeths); 1856 } 1857 1858 int* indices = methods_cached_itable_indices_acquire(); 1859 if (indices != (int*)NULL) { 1860 release_set_methods_cached_itable_indices(NULL); 1861 FreeHeap(indices); 1862 } 1863 1864 // release dependencies 1865 nmethodBucket* b = _dependencies; 1866 _dependencies = NULL; 1867 while (b != NULL) { 1868 nmethodBucket* next = b->next(); 1869 delete b; 1870 b = next; 1871 } 1872 1873 // Deallocate breakpoint records 1874 if (breakpoints() != 0x0) { 1875 methods_do(clear_all_breakpoints); 1876 assert(breakpoints() == 0x0, "should have cleared breakpoints"); 1877 } 1878 1879 // deallocate information about previous versions 1880 if (_previous_versions != NULL) { 1881 for (int i = _previous_versions->length() - 1; i >= 0; i--) { 1882 PreviousVersionNode * pv_node = _previous_versions->at(i); 1883 delete pv_node; 1884 } 1885 delete _previous_versions; 1886 _previous_versions = NULL; 1887 } 1888 1889 // deallocate the cached class file 1890 if (_cached_class_file_bytes != NULL) { 1891 os::free(_cached_class_file_bytes); 1892 _cached_class_file_bytes = NULL; 1893 _cached_class_file_len = 0; 1894 } 1895 1896 // Decrement symbol reference counts associated with the unloaded class. 1897 if (_name != NULL) _name->decrement_refcount(); 1898 // unreference array name derived from this class name (arrays of an unloaded 1899 // class can't be referenced anymore). 1900 if (_array_name != NULL) _array_name->decrement_refcount(); 1901 if (_source_file_name != NULL) _source_file_name->decrement_refcount(); 1902 if (_source_debug_extension != NULL) _source_debug_extension->decrement_refcount(); 1903 // walk constant pool and decrement symbol reference counts 1904 _constants->unreference_symbols(); 1905 } 1906 1907 void instanceKlass::set_source_file_name(Symbol* n) { 1908 _source_file_name = n; 1909 if (_source_file_name != NULL) _source_file_name->increment_refcount(); 1910 } 1911 1912 void instanceKlass::set_source_debug_extension(Symbol* n) { 1913 _source_debug_extension = n; 1914 if (_source_debug_extension != NULL) _source_debug_extension->increment_refcount(); 1915 } 1916 1917 address instanceKlass::static_field_addr(int offset) { 1918 return (address)(offset + instanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror()); 1919 } 1920 1921 1922 const char* instanceKlass::signature_name() const { 1923 const char* src = (const char*) (name()->as_C_string()); 1924 const int src_length = (int)strlen(src); 1925 char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3); 1926 int src_index = 0; 1927 int dest_index = 0; 1928 dest[dest_index++] = 'L'; 1929 while (src_index < src_length) { 1930 dest[dest_index++] = src[src_index++]; 1931 } 1932 dest[dest_index++] = ';'; 1933 dest[dest_index] = '\0'; 1934 return dest; 1935 } 1936 1937 // different verisons of is_same_class_package 1938 bool instanceKlass::is_same_class_package(klassOop class2) { 1939 klassOop class1 = as_klassOop(); 1940 oop classloader1 = instanceKlass::cast(class1)->class_loader(); 1941 Symbol* classname1 = Klass::cast(class1)->name(); 1942 1943 if (Klass::cast(class2)->oop_is_objArray()) { 1944 class2 = objArrayKlass::cast(class2)->bottom_klass(); 1945 } 1946 oop classloader2; 1947 if (Klass::cast(class2)->oop_is_instance()) { 1948 classloader2 = instanceKlass::cast(class2)->class_loader(); 1949 } else { 1950 assert(Klass::cast(class2)->oop_is_typeArray(), "should be type array"); 1951 classloader2 = NULL; 1952 } 1953 Symbol* classname2 = Klass::cast(class2)->name(); 1954 1955 return instanceKlass::is_same_class_package(classloader1, classname1, 1956 classloader2, classname2); 1957 } 1958 1959 bool instanceKlass::is_same_class_package(oop classloader2, Symbol* classname2) { 1960 klassOop class1 = as_klassOop(); 1961 oop classloader1 = instanceKlass::cast(class1)->class_loader(); 1962 Symbol* classname1 = Klass::cast(class1)->name(); 1963 1964 return instanceKlass::is_same_class_package(classloader1, classname1, 1965 classloader2, classname2); 1966 } 1967 1968 // return true if two classes are in the same package, classloader 1969 // and classname information is enough to determine a class's package 1970 bool instanceKlass::is_same_class_package(oop class_loader1, Symbol* class_name1, 1971 oop class_loader2, Symbol* class_name2) { 1972 if (class_loader1 != class_loader2) { 1973 return false; 1974 } else if (class_name1 == class_name2) { 1975 return true; // skip painful bytewise comparison 1976 } else { 1977 ResourceMark rm; 1978 1979 // The Symbol*'s are in UTF8 encoding. Since we only need to check explicitly 1980 // for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding. 1981 // Otherwise, we just compare jbyte values between the strings. 1982 const jbyte *name1 = class_name1->base(); 1983 const jbyte *name2 = class_name2->base(); 1984 1985 const jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/'); 1986 const jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/'); 1987 1988 if ((last_slash1 == NULL) || (last_slash2 == NULL)) { 1989 // One of the two doesn't have a package. Only return true 1990 // if the other one also doesn't have a package. 1991 return last_slash1 == last_slash2; 1992 } else { 1993 // Skip over '['s 1994 if (*name1 == '[') { 1995 do { 1996 name1++; 1997 } while (*name1 == '['); 1998 if (*name1 != 'L') { 1999 // Something is terribly wrong. Shouldn't be here. 2000 return false; 2001 } 2002 } 2003 if (*name2 == '[') { 2004 do { 2005 name2++; 2006 } while (*name2 == '['); 2007 if (*name2 != 'L') { 2008 // Something is terribly wrong. Shouldn't be here. 2009 return false; 2010 } 2011 } 2012 2013 // Check that package part is identical 2014 int length1 = last_slash1 - name1; 2015 int length2 = last_slash2 - name2; 2016 2017 return UTF8::equal(name1, length1, name2, length2); 2018 } 2019 } 2020 } 2021 2022 // Returns true iff super_method can be overridden by a method in targetclassname 2023 // See JSL 3rd edition 8.4.6.1 2024 // Assumes name-signature match 2025 // "this" is instanceKlass of super_method which must exist 2026 // note that the instanceKlass of the method in the targetclassname has not always been created yet 2027 bool instanceKlass::is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS) { 2028 // Private methods can not be overridden 2029 if (super_method->is_private()) { 2030 return false; 2031 } 2032 // If super method is accessible, then override 2033 if ((super_method->is_protected()) || 2034 (super_method->is_public())) { 2035 return true; 2036 } 2037 // Package-private methods are not inherited outside of package 2038 assert(super_method->is_package_private(), "must be package private"); 2039 return(is_same_class_package(targetclassloader(), targetclassname)); 2040 } 2041 2042 /* defined for now in jvm.cpp, for historical reasons *-- 2043 klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle self, 2044 Symbol*& simple_name_result, TRAPS) { 2045 ... 2046 } 2047 */ 2048 2049 // tell if two classes have the same enclosing class (at package level) 2050 bool instanceKlass::is_same_package_member_impl(instanceKlassHandle class1, 2051 klassOop class2_oop, TRAPS) { 2052 if (class2_oop == class1->as_klassOop()) return true; 2053 if (!Klass::cast(class2_oop)->oop_is_instance()) return false; 2054 instanceKlassHandle class2(THREAD, class2_oop); 2055 2056 // must be in same package before we try anything else 2057 if (!class1->is_same_class_package(class2->class_loader(), class2->name())) 2058 return false; 2059 2060 // As long as there is an outer1.getEnclosingClass, 2061 // shift the search outward. 2062 instanceKlassHandle outer1 = class1; 2063 for (;;) { 2064 // As we walk along, look for equalities between outer1 and class2. 2065 // Eventually, the walks will terminate as outer1 stops 2066 // at the top-level class around the original class. 2067 bool ignore_inner_is_member; 2068 klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member, 2069 CHECK_false); 2070 if (next == NULL) break; 2071 if (next == class2()) return true; 2072 outer1 = instanceKlassHandle(THREAD, next); 2073 } 2074 2075 // Now do the same for class2. 2076 instanceKlassHandle outer2 = class2; 2077 for (;;) { 2078 bool ignore_inner_is_member; 2079 klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member, 2080 CHECK_false); 2081 if (next == NULL) break; 2082 // Might as well check the new outer against all available values. 2083 if (next == class1()) return true; 2084 if (next == outer1()) return true; 2085 outer2 = instanceKlassHandle(THREAD, next); 2086 } 2087 2088 // If by this point we have not found an equality between the 2089 // two classes, we know they are in separate package members. 2090 return false; 2091 } 2092 2093 2094 jint instanceKlass::compute_modifier_flags(TRAPS) const { 2095 klassOop k = as_klassOop(); 2096 jint access = access_flags().as_int(); 2097 2098 // But check if it happens to be member class. 2099 typeArrayOop inner_class_list = inner_classes(); 2100 int length = (inner_class_list == NULL) ? 0 : inner_class_list->length(); 2101 assert (length % instanceKlass::inner_class_next_offset == 0, "just checking"); 2102 if (length > 0) { 2103 typeArrayHandle inner_class_list_h(THREAD, inner_class_list); 2104 instanceKlassHandle ik(THREAD, k); 2105 for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) { 2106 int ioff = inner_class_list_h->ushort_at( 2107 i + instanceKlass::inner_class_inner_class_info_offset); 2108 2109 // Inner class attribute can be zero, skip it. 2110 // Strange but true: JVM spec. allows null inner class refs. 2111 if (ioff == 0) continue; 2112 2113 // only look at classes that are already loaded 2114 // since we are looking for the flags for our self. 2115 Symbol* inner_name = ik->constants()->klass_name_at(ioff); 2116 if ((ik->name() == inner_name)) { 2117 // This is really a member class. 2118 access = inner_class_list_h->ushort_at(i + instanceKlass::inner_class_access_flags_offset); 2119 break; 2120 } 2121 } 2122 } 2123 // Remember to strip ACC_SUPER bit 2124 return (access & (~JVM_ACC_SUPER)) & JVM_ACC_WRITTEN_FLAGS; 2125 } 2126 2127 jint instanceKlass::jvmti_class_status() const { 2128 jint result = 0; 2129 2130 if (is_linked()) { 2131 result |= JVMTI_CLASS_STATUS_VERIFIED | JVMTI_CLASS_STATUS_PREPARED; 2132 } 2133 2134 if (is_initialized()) { 2135 assert(is_linked(), "Class status is not consistent"); 2136 result |= JVMTI_CLASS_STATUS_INITIALIZED; 2137 } 2138 if (is_in_error_state()) { 2139 result |= JVMTI_CLASS_STATUS_ERROR; 2140 } 2141 return result; 2142 } 2143 2144 methodOop instanceKlass::method_at_itable(klassOop holder, int index, TRAPS) { 2145 itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable(); 2146 int method_table_offset_in_words = ioe->offset()/wordSize; 2147 int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words()) 2148 / itableOffsetEntry::size(); 2149 2150 for (int cnt = 0 ; ; cnt ++, ioe ++) { 2151 // If the interface isn't implemented by the receiver class, 2152 // the VM should throw IncompatibleClassChangeError. 2153 if (cnt >= nof_interfaces) { 2154 THROW_0(vmSymbols::java_lang_IncompatibleClassChangeError()); 2155 } 2156 2157 klassOop ik = ioe->interface_klass(); 2158 if (ik == holder) break; 2159 } 2160 2161 itableMethodEntry* ime = ioe->first_method_entry(as_klassOop()); 2162 methodOop m = ime[index].method(); 2163 if (m == NULL) { 2164 THROW_0(vmSymbols::java_lang_AbstractMethodError()); 2165 } 2166 return m; 2167 } 2168 2169 // On-stack replacement stuff 2170 void instanceKlass::add_osr_nmethod(nmethod* n) { 2171 // only one compilation can be active 2172 NEEDS_CLEANUP 2173 // This is a short non-blocking critical region, so the no safepoint check is ok. 2174 OsrList_lock->lock_without_safepoint_check(); 2175 assert(n->is_osr_method(), "wrong kind of nmethod"); 2176 n->set_osr_link(osr_nmethods_head()); 2177 set_osr_nmethods_head(n); 2178 // Raise the highest osr level if necessary 2179 if (TieredCompilation) { 2180 methodOop m = n->method(); 2181 m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level())); 2182 } 2183 // Remember to unlock again 2184 OsrList_lock->unlock(); 2185 2186 // Get rid of the osr methods for the same bci that have lower levels. 2187 if (TieredCompilation) { 2188 for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) { 2189 nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true); 2190 if (inv != NULL && inv->is_in_use()) { 2191 inv->make_not_entrant(); 2192 } 2193 } 2194 } 2195 } 2196 2197 2198 void instanceKlass::remove_osr_nmethod(nmethod* n) { 2199 // This is a short non-blocking critical region, so the no safepoint check is ok. 2200 OsrList_lock->lock_without_safepoint_check(); 2201 assert(n->is_osr_method(), "wrong kind of nmethod"); 2202 nmethod* last = NULL; 2203 nmethod* cur = osr_nmethods_head(); 2204 int max_level = CompLevel_none; // Find the max comp level excluding n 2205 methodOop m = n->method(); 2206 // Search for match 2207 while(cur != NULL && cur != n) { 2208 if (TieredCompilation) { 2209 // Find max level before n 2210 max_level = MAX2(max_level, cur->comp_level()); 2211 } 2212 last = cur; 2213 cur = cur->osr_link(); 2214 } 2215 nmethod* next = NULL; 2216 if (cur == n) { 2217 next = cur->osr_link(); 2218 if (last == NULL) { 2219 // Remove first element 2220 set_osr_nmethods_head(next); 2221 } else { 2222 last->set_osr_link(next); 2223 } 2224 } 2225 n->set_osr_link(NULL); 2226 if (TieredCompilation) { 2227 cur = next; 2228 while (cur != NULL) { 2229 // Find max level after n 2230 max_level = MAX2(max_level, cur->comp_level()); 2231 cur = cur->osr_link(); 2232 } 2233 m->set_highest_osr_comp_level(max_level); 2234 } 2235 // Remember to unlock again 2236 OsrList_lock->unlock(); 2237 } 2238 2239 nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const { 2240 // This is a short non-blocking critical region, so the no safepoint check is ok. 2241 OsrList_lock->lock_without_safepoint_check(); 2242 nmethod* osr = osr_nmethods_head(); 2243 nmethod* best = NULL; 2244 while (osr != NULL) { 2245 assert(osr->is_osr_method(), "wrong kind of nmethod found in chain"); 2246 // There can be a time when a c1 osr method exists but we are waiting 2247 // for a c2 version. When c2 completes its osr nmethod we will trash 2248 // the c1 version and only be able to find the c2 version. However 2249 // while we overflow in the c1 code at back branches we don't want to 2250 // try and switch to the same code as we are already running 2251 2252 if (osr->method() == m && 2253 (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) { 2254 if (match_level) { 2255 if (osr->comp_level() == comp_level) { 2256 // Found a match - return it. 2257 OsrList_lock->unlock(); 2258 return osr; 2259 } 2260 } else { 2261 if (best == NULL || (osr->comp_level() > best->comp_level())) { 2262 if (osr->comp_level() == CompLevel_highest_tier) { 2263 // Found the best possible - return it. 2264 OsrList_lock->unlock(); 2265 return osr; 2266 } 2267 best = osr; 2268 } 2269 } 2270 } 2271 osr = osr->osr_link(); 2272 } 2273 OsrList_lock->unlock(); 2274 if (best != NULL && best->comp_level() >= comp_level && match_level == false) { 2275 return best; 2276 } 2277 return NULL; 2278 } 2279 2280 // ----------------------------------------------------------------------------------------------------- 2281 #ifndef PRODUCT 2282 2283 // Printing 2284 2285 #define BULLET " - " 2286 2287 void FieldPrinter::do_field(fieldDescriptor* fd) { 2288 _st->print(BULLET); 2289 if (_obj == NULL) { 2290 fd->print_on(_st); 2291 _st->cr(); 2292 } else { 2293 fd->print_on_for(_st, _obj); 2294 _st->cr(); 2295 } 2296 } 2297 2298 2299 void instanceKlass::oop_print_on(oop obj, outputStream* st) { 2300 Klass::oop_print_on(obj, st); 2301 2302 if (as_klassOop() == SystemDictionary::String_klass()) { 2303 typeArrayOop value = java_lang_String::value(obj); 2304 juint offset = java_lang_String::offset(obj); 2305 juint length = java_lang_String::length(obj); 2306 if (value != NULL && 2307 value->is_typeArray() && 2308 offset <= (juint) value->length() && 2309 offset + length <= (juint) value->length()) { 2310 st->print(BULLET"string: "); 2311 Handle h_obj(obj); 2312 java_lang_String::print(h_obj, st); 2313 st->cr(); 2314 if (!WizardMode) return; // that is enough 2315 } 2316 } 2317 2318 st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj)); 2319 FieldPrinter print_field(st, obj); 2320 do_nonstatic_fields(&print_field); 2321 2322 if (as_klassOop() == SystemDictionary::Class_klass()) { 2323 st->print(BULLET"signature: "); 2324 java_lang_Class::print_signature(obj, st); 2325 st->cr(); 2326 klassOop mirrored_klass = java_lang_Class::as_klassOop(obj); 2327 st->print(BULLET"fake entry for mirror: "); 2328 mirrored_klass->print_value_on(st); 2329 st->cr(); 2330 st->print(BULLET"fake entry resolved_constructor: "); 2331 methodOop ctor = java_lang_Class::resolved_constructor(obj); 2332 ctor->print_value_on(st); 2333 klassOop array_klass = java_lang_Class::array_klass(obj); 2334 st->cr(); 2335 st->print(BULLET"fake entry for array: "); 2336 array_klass->print_value_on(st); 2337 st->cr(); 2338 st->print_cr(BULLET"fake entry for oop_size: %d", java_lang_Class::oop_size(obj)); 2339 st->print_cr(BULLET"fake entry for static_oop_field_count: %d", java_lang_Class::static_oop_field_count(obj)); 2340 klassOop real_klass = java_lang_Class::as_klassOop(obj); 2341 if (real_klass != NULL && real_klass->klass_part()->oop_is_instance()) { 2342 instanceKlass::cast(real_klass)->do_local_static_fields(&print_field); 2343 } 2344 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) { 2345 st->print(BULLET"signature: "); 2346 java_lang_invoke_MethodType::print_signature(obj, st); 2347 st->cr(); 2348 } 2349 } 2350 2351 #endif //PRODUCT 2352 2353 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) { 2354 st->print("a "); 2355 name()->print_value_on(st); 2356 obj->print_address_on(st); 2357 if (as_klassOop() == SystemDictionary::String_klass() 2358 && java_lang_String::value(obj) != NULL) { 2359 ResourceMark rm; 2360 int len = java_lang_String::length(obj); 2361 int plen = (len < 24 ? len : 12); 2362 char* str = java_lang_String::as_utf8_string(obj, 0, plen); 2363 st->print(" = \"%s\"", str); 2364 if (len > plen) 2365 st->print("...[%d]", len); 2366 } else if (as_klassOop() == SystemDictionary::Class_klass()) { 2367 klassOop k = java_lang_Class::as_klassOop(obj); 2368 st->print(" = "); 2369 if (k != NULL) { 2370 k->print_value_on(st); 2371 } else { 2372 const char* tname = type2name(java_lang_Class::primitive_type(obj)); 2373 st->print("%s", tname ? tname : "type?"); 2374 } 2375 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) { 2376 st->print(" = "); 2377 java_lang_invoke_MethodType::print_signature(obj, st); 2378 } else if (java_lang_boxing_object::is_instance(obj)) { 2379 st->print(" = "); 2380 java_lang_boxing_object::print(obj, st); 2381 } 2382 } 2383 2384 const char* instanceKlass::internal_name() const { 2385 return external_name(); 2386 } 2387 2388 // Verification 2389 2390 class VerifyFieldClosure: public OopClosure { 2391 protected: 2392 template <class T> void do_oop_work(T* p) { 2393 guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap"); 2394 oop obj = oopDesc::load_decode_heap_oop(p); 2395 if (!obj->is_oop_or_null()) { 2396 tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj); 2397 Universe::print(); 2398 guarantee(false, "boom"); 2399 } 2400 } 2401 public: 2402 virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); } 2403 virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); } 2404 }; 2405 2406 void instanceKlass::oop_verify_on(oop obj, outputStream* st) { 2407 Klass::oop_verify_on(obj, st); 2408 VerifyFieldClosure blk; 2409 oop_oop_iterate(obj, &blk); 2410 } 2411 2412 // JNIid class for jfieldIDs only 2413 // Note to reviewers: 2414 // These JNI functions are just moved over to column 1 and not changed 2415 // in the compressed oops workspace. 2416 JNIid::JNIid(klassOop holder, int offset, JNIid* next) { 2417 _holder = holder; 2418 _offset = offset; 2419 _next = next; 2420 debug_only(_is_static_field_id = false;) 2421 } 2422 2423 2424 JNIid* JNIid::find(int offset) { 2425 JNIid* current = this; 2426 while (current != NULL) { 2427 if (current->offset() == offset) return current; 2428 current = current->next(); 2429 } 2430 return NULL; 2431 } 2432 2433 void JNIid::oops_do(OopClosure* f) { 2434 for (JNIid* cur = this; cur != NULL; cur = cur->next()) { 2435 f->do_oop(cur->holder_addr()); 2436 } 2437 } 2438 2439 void JNIid::deallocate(JNIid* current) { 2440 while (current != NULL) { 2441 JNIid* next = current->next(); 2442 delete current; 2443 current = next; 2444 } 2445 } 2446 2447 2448 void JNIid::verify(klassOop holder) { 2449 int first_field_offset = instanceMirrorKlass::offset_of_static_fields(); 2450 int end_field_offset; 2451 end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize); 2452 2453 JNIid* current = this; 2454 while (current != NULL) { 2455 guarantee(current->holder() == holder, "Invalid klass in JNIid"); 2456 #ifdef ASSERT 2457 int o = current->offset(); 2458 if (current->is_static_field_id()) { 2459 guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid"); 2460 } 2461 #endif 2462 current = current->next(); 2463 } 2464 } 2465 2466 2467 #ifdef ASSERT 2468 void instanceKlass::set_init_state(ClassState state) { 2469 bool good_state = as_klassOop()->is_shared() ? (_init_state <= state) 2470 : (_init_state < state); 2471 assert(good_state || state == allocated, "illegal state transition"); 2472 _init_state = state; 2473 } 2474 #endif 2475 2476 2477 // RedefineClasses() support for previous versions: 2478 2479 // Add an information node that contains weak references to the 2480 // interesting parts of the previous version of the_class. 2481 // This is also where we clean out any unused weak references. 2482 // Note that while we delete nodes from the _previous_versions 2483 // array, we never delete the array itself until the klass is 2484 // unloaded. The has_been_redefined() query depends on that fact. 2485 // 2486 void instanceKlass::add_previous_version(instanceKlassHandle ikh, 2487 BitMap* emcp_methods, int emcp_method_count) { 2488 assert(Thread::current()->is_VM_thread(), 2489 "only VMThread can add previous versions"); 2490 2491 if (_previous_versions == NULL) { 2492 // This is the first previous version so make some space. 2493 // Start with 2 elements under the assumption that the class 2494 // won't be redefined much. 2495 _previous_versions = new (ResourceObj::C_HEAP) 2496 GrowableArray<PreviousVersionNode *>(2, true); 2497 } 2498 2499 // RC_TRACE macro has an embedded ResourceMark 2500 RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d", 2501 ikh->external_name(), _previous_versions->length(), emcp_method_count)); 2502 constantPoolHandle cp_h(ikh->constants()); 2503 jobject cp_ref; 2504 if (cp_h->is_shared()) { 2505 // a shared ConstantPool requires a regular reference; a weak 2506 // reference would be collectible 2507 cp_ref = JNIHandles::make_global(cp_h); 2508 } else { 2509 cp_ref = JNIHandles::make_weak_global(cp_h); 2510 } 2511 PreviousVersionNode * pv_node = NULL; 2512 objArrayOop old_methods = ikh->methods(); 2513 2514 if (emcp_method_count == 0) { 2515 // non-shared ConstantPool gets a weak reference 2516 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL); 2517 RC_TRACE(0x00000400, 2518 ("add: all methods are obsolete; flushing any EMCP weak refs")); 2519 } else { 2520 int local_count = 0; 2521 GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP) 2522 GrowableArray<jweak>(emcp_method_count, true); 2523 for (int i = 0; i < old_methods->length(); i++) { 2524 if (emcp_methods->at(i)) { 2525 // this old method is EMCP so save a weak ref 2526 methodOop old_method = (methodOop) old_methods->obj_at(i); 2527 methodHandle old_method_h(old_method); 2528 jweak method_ref = JNIHandles::make_weak_global(old_method_h); 2529 method_refs->append(method_ref); 2530 if (++local_count >= emcp_method_count) { 2531 // no more EMCP methods so bail out now 2532 break; 2533 } 2534 } 2535 } 2536 // non-shared ConstantPool gets a weak reference 2537 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs); 2538 } 2539 2540 _previous_versions->append(pv_node); 2541 2542 // Using weak references allows the interesting parts of previous 2543 // classes to be GC'ed when they are no longer needed. Since the 2544 // caller is the VMThread and we are at a safepoint, this is a good 2545 // time to clear out unused weak references. 2546 2547 RC_TRACE(0x00000400, ("add: previous version length=%d", 2548 _previous_versions->length())); 2549 2550 // skip the last entry since we just added it 2551 for (int i = _previous_versions->length() - 2; i >= 0; i--) { 2552 // check the previous versions array for a GC'ed weak refs 2553 pv_node = _previous_versions->at(i); 2554 cp_ref = pv_node->prev_constant_pool(); 2555 assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); 2556 if (cp_ref == NULL) { 2557 delete pv_node; 2558 _previous_versions->remove_at(i); 2559 // Since we are traversing the array backwards, we don't have to 2560 // do anything special with the index. 2561 continue; // robustness 2562 } 2563 2564 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2565 if (cp == NULL) { 2566 // this entry has been GC'ed so remove it 2567 delete pv_node; 2568 _previous_versions->remove_at(i); 2569 // Since we are traversing the array backwards, we don't have to 2570 // do anything special with the index. 2571 continue; 2572 } else { 2573 RC_TRACE(0x00000400, ("add: previous version @%d is alive", i)); 2574 } 2575 2576 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); 2577 if (method_refs != NULL) { 2578 RC_TRACE(0x00000400, ("add: previous methods length=%d", 2579 method_refs->length())); 2580 for (int j = method_refs->length() - 1; j >= 0; j--) { 2581 jweak method_ref = method_refs->at(j); 2582 assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); 2583 if (method_ref == NULL) { 2584 method_refs->remove_at(j); 2585 // Since we are traversing the array backwards, we don't have to 2586 // do anything special with the index. 2587 continue; // robustness 2588 } 2589 2590 methodOop method = (methodOop)JNIHandles::resolve(method_ref); 2591 if (method == NULL || emcp_method_count == 0) { 2592 // This method entry has been GC'ed or the current 2593 // RedefineClasses() call has made all methods obsolete 2594 // so remove it. 2595 JNIHandles::destroy_weak_global(method_ref); 2596 method_refs->remove_at(j); 2597 } else { 2598 // RC_TRACE macro has an embedded ResourceMark 2599 RC_TRACE(0x00000400, 2600 ("add: %s(%s): previous method @%d in version @%d is alive", 2601 method->name()->as_C_string(), method->signature()->as_C_string(), 2602 j, i)); 2603 } 2604 } 2605 } 2606 } 2607 2608 int obsolete_method_count = old_methods->length() - emcp_method_count; 2609 2610 if (emcp_method_count != 0 && obsolete_method_count != 0 && 2611 _previous_versions->length() > 1) { 2612 // We have a mix of obsolete and EMCP methods. If there is more 2613 // than the previous version that we just added, then we have to 2614 // clear out any matching EMCP method entries the hard way. 2615 int local_count = 0; 2616 for (int i = 0; i < old_methods->length(); i++) { 2617 if (!emcp_methods->at(i)) { 2618 // only obsolete methods are interesting 2619 methodOop old_method = (methodOop) old_methods->obj_at(i); 2620 Symbol* m_name = old_method->name(); 2621 Symbol* m_signature = old_method->signature(); 2622 2623 // skip the last entry since we just added it 2624 for (int j = _previous_versions->length() - 2; j >= 0; j--) { 2625 // check the previous versions array for a GC'ed weak refs 2626 pv_node = _previous_versions->at(j); 2627 cp_ref = pv_node->prev_constant_pool(); 2628 assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); 2629 if (cp_ref == NULL) { 2630 delete pv_node; 2631 _previous_versions->remove_at(j); 2632 // Since we are traversing the array backwards, we don't have to 2633 // do anything special with the index. 2634 continue; // robustness 2635 } 2636 2637 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2638 if (cp == NULL) { 2639 // this entry has been GC'ed so remove it 2640 delete pv_node; 2641 _previous_versions->remove_at(j); 2642 // Since we are traversing the array backwards, we don't have to 2643 // do anything special with the index. 2644 continue; 2645 } 2646 2647 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); 2648 if (method_refs == NULL) { 2649 // We have run into a PreviousVersion generation where 2650 // all methods were made obsolete during that generation's 2651 // RedefineClasses() operation. At the time of that 2652 // operation, all EMCP methods were flushed so we don't 2653 // have to go back any further. 2654 // 2655 // A NULL method_refs is different than an empty method_refs. 2656 // We cannot infer any optimizations about older generations 2657 // from an empty method_refs for the current generation. 2658 break; 2659 } 2660 2661 for (int k = method_refs->length() - 1; k >= 0; k--) { 2662 jweak method_ref = method_refs->at(k); 2663 assert(method_ref != NULL, 2664 "weak method ref was unexpectedly cleared"); 2665 if (method_ref == NULL) { 2666 method_refs->remove_at(k); 2667 // Since we are traversing the array backwards, we don't 2668 // have to do anything special with the index. 2669 continue; // robustness 2670 } 2671 2672 methodOop method = (methodOop)JNIHandles::resolve(method_ref); 2673 if (method == NULL) { 2674 // this method entry has been GC'ed so skip it 2675 JNIHandles::destroy_weak_global(method_ref); 2676 method_refs->remove_at(k); 2677 continue; 2678 } 2679 2680 if (method->name() == m_name && 2681 method->signature() == m_signature) { 2682 // The current RedefineClasses() call has made all EMCP 2683 // versions of this method obsolete so mark it as obsolete 2684 // and remove the weak ref. 2685 RC_TRACE(0x00000400, 2686 ("add: %s(%s): flush obsolete method @%d in version @%d", 2687 m_name->as_C_string(), m_signature->as_C_string(), k, j)); 2688 2689 method->set_is_obsolete(); 2690 JNIHandles::destroy_weak_global(method_ref); 2691 method_refs->remove_at(k); 2692 break; 2693 } 2694 } 2695 2696 // The previous loop may not find a matching EMCP method, but 2697 // that doesn't mean that we can optimize and not go any 2698 // further back in the PreviousVersion generations. The EMCP 2699 // method for this generation could have already been GC'ed, 2700 // but there still may be an older EMCP method that has not 2701 // been GC'ed. 2702 } 2703 2704 if (++local_count >= obsolete_method_count) { 2705 // no more obsolete methods so bail out now 2706 break; 2707 } 2708 } 2709 } 2710 } 2711 } // end add_previous_version() 2712 2713 2714 // Determine if instanceKlass has a previous version. 2715 bool instanceKlass::has_previous_version() const { 2716 if (_previous_versions == NULL) { 2717 // no previous versions array so answer is easy 2718 return false; 2719 } 2720 2721 for (int i = _previous_versions->length() - 1; i >= 0; i--) { 2722 // Check the previous versions array for an info node that hasn't 2723 // been GC'ed 2724 PreviousVersionNode * pv_node = _previous_versions->at(i); 2725 2726 jobject cp_ref = pv_node->prev_constant_pool(); 2727 assert(cp_ref != NULL, "cp reference was unexpectedly cleared"); 2728 if (cp_ref == NULL) { 2729 continue; // robustness 2730 } 2731 2732 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2733 if (cp != NULL) { 2734 // we have at least one previous version 2735 return true; 2736 } 2737 2738 // We don't have to check the method refs. If the constant pool has 2739 // been GC'ed then so have the methods. 2740 } 2741 2742 // all of the underlying nodes' info has been GC'ed 2743 return false; 2744 } // end has_previous_version() 2745 2746 methodOop instanceKlass::method_with_idnum(int idnum) { 2747 methodOop m = NULL; 2748 if (idnum < methods()->length()) { 2749 m = (methodOop) methods()->obj_at(idnum); 2750 } 2751 if (m == NULL || m->method_idnum() != idnum) { 2752 for (int index = 0; index < methods()->length(); ++index) { 2753 m = (methodOop) methods()->obj_at(index); 2754 if (m->method_idnum() == idnum) { 2755 return m; 2756 } 2757 } 2758 } 2759 return m; 2760 } 2761 2762 2763 // Set the annotation at 'idnum' to 'anno'. 2764 // We don't want to create or extend the array if 'anno' is NULL, since that is the 2765 // default value. However, if the array exists and is long enough, we must set NULL values. 2766 void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, objArrayOop* md_p) { 2767 objArrayOop md = *md_p; 2768 if (md != NULL && md->length() > idnum) { 2769 md->obj_at_put(idnum, anno); 2770 } else if (anno != NULL) { 2771 // create the array 2772 int length = MAX2(idnum+1, (int)_idnum_allocated_count); 2773 md = oopFactory::new_system_objArray(length, Thread::current()); 2774 if (*md_p != NULL) { 2775 // copy the existing entries 2776 for (int index = 0; index < (*md_p)->length(); index++) { 2777 md->obj_at_put(index, (*md_p)->obj_at(index)); 2778 } 2779 } 2780 set_annotations(md, md_p); 2781 md->obj_at_put(idnum, anno); 2782 } // if no array and idnum isn't included there is nothing to do 2783 } 2784 2785 // Construct a PreviousVersionNode entry for the array hung off 2786 // the instanceKlass. 2787 PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool, 2788 bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) { 2789 2790 _prev_constant_pool = prev_constant_pool; 2791 _prev_cp_is_weak = prev_cp_is_weak; 2792 _prev_EMCP_methods = prev_EMCP_methods; 2793 } 2794 2795 2796 // Destroy a PreviousVersionNode 2797 PreviousVersionNode::~PreviousVersionNode() { 2798 if (_prev_constant_pool != NULL) { 2799 if (_prev_cp_is_weak) { 2800 JNIHandles::destroy_weak_global(_prev_constant_pool); 2801 } else { 2802 JNIHandles::destroy_global(_prev_constant_pool); 2803 } 2804 } 2805 2806 if (_prev_EMCP_methods != NULL) { 2807 for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) { 2808 jweak method_ref = _prev_EMCP_methods->at(i); 2809 if (method_ref != NULL) { 2810 JNIHandles::destroy_weak_global(method_ref); 2811 } 2812 } 2813 delete _prev_EMCP_methods; 2814 } 2815 } 2816 2817 2818 // Construct a PreviousVersionInfo entry 2819 PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) { 2820 _prev_constant_pool_handle = constantPoolHandle(); // NULL handle 2821 _prev_EMCP_method_handles = NULL; 2822 2823 jobject cp_ref = pv_node->prev_constant_pool(); 2824 assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared"); 2825 if (cp_ref == NULL) { 2826 return; // robustness 2827 } 2828 2829 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2830 if (cp == NULL) { 2831 // Weak reference has been GC'ed. Since the constant pool has been 2832 // GC'ed, the methods have also been GC'ed. 2833 return; 2834 } 2835 2836 // make the constantPoolOop safe to return 2837 _prev_constant_pool_handle = constantPoolHandle(cp); 2838 2839 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); 2840 if (method_refs == NULL) { 2841 // the instanceKlass did not have any EMCP methods 2842 return; 2843 } 2844 2845 _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10); 2846 2847 int n_methods = method_refs->length(); 2848 for (int i = 0; i < n_methods; i++) { 2849 jweak method_ref = method_refs->at(i); 2850 assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); 2851 if (method_ref == NULL) { 2852 continue; // robustness 2853 } 2854 2855 methodOop method = (methodOop)JNIHandles::resolve(method_ref); 2856 if (method == NULL) { 2857 // this entry has been GC'ed so skip it 2858 continue; 2859 } 2860 2861 // make the methodOop safe to return 2862 _prev_EMCP_method_handles->append(methodHandle(method)); 2863 } 2864 } 2865 2866 2867 // Destroy a PreviousVersionInfo 2868 PreviousVersionInfo::~PreviousVersionInfo() { 2869 // Since _prev_EMCP_method_handles is not C-heap allocated, we 2870 // don't have to delete it. 2871 } 2872 2873 2874 // Construct a helper for walking the previous versions array 2875 PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) { 2876 _previous_versions = ik->previous_versions(); 2877 _current_index = 0; 2878 // _hm needs no initialization 2879 _current_p = NULL; 2880 } 2881 2882 2883 // Destroy a PreviousVersionWalker 2884 PreviousVersionWalker::~PreviousVersionWalker() { 2885 // Delete the current info just in case the caller didn't walk to 2886 // the end of the previous versions list. No harm if _current_p is 2887 // already NULL. 2888 delete _current_p; 2889 2890 // When _hm is destroyed, all the Handles returned in 2891 // PreviousVersionInfo objects will be destroyed. 2892 // Also, after this destructor is finished it will be 2893 // safe to delete the GrowableArray allocated in the 2894 // PreviousVersionInfo objects. 2895 } 2896 2897 2898 // Return the interesting information for the next previous version 2899 // of the klass. Returns NULL if there are no more previous versions. 2900 PreviousVersionInfo* PreviousVersionWalker::next_previous_version() { 2901 if (_previous_versions == NULL) { 2902 // no previous versions so nothing to return 2903 return NULL; 2904 } 2905 2906 delete _current_p; // cleanup the previous info for the caller 2907 _current_p = NULL; // reset to NULL so we don't delete same object twice 2908 2909 int length = _previous_versions->length(); 2910 2911 while (_current_index < length) { 2912 PreviousVersionNode * pv_node = _previous_versions->at(_current_index++); 2913 PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP) 2914 PreviousVersionInfo(pv_node); 2915 2916 constantPoolHandle cp_h = pv_info->prev_constant_pool_handle(); 2917 if (cp_h.is_null()) { 2918 delete pv_info; 2919 2920 // The underlying node's info has been GC'ed so try the next one. 2921 // We don't have to check the methods. If the constant pool has 2922 // GC'ed then so have the methods. 2923 continue; 2924 } 2925 2926 // Found a node with non GC'ed info so return it. The caller will 2927 // need to delete pv_info when they are done with it. 2928 _current_p = pv_info; 2929 return pv_info; 2930 } 2931 2932 // all of the underlying nodes' info has been GC'ed 2933 return NULL; 2934 } // end next_previous_version()