1 /* 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/verifier.hpp" 29 #include "classfile/vmSymbols.hpp" 30 #include "compiler/compileBroker.hpp" 31 #include "gc_implementation/shared/markSweep.inline.hpp" 32 #include "gc_interface/collectedHeap.inline.hpp" 33 #include "interpreter/oopMapCache.hpp" 34 #include "interpreter/rewriter.hpp" 35 #include "jvmtifiles/jvmti.h" 36 #include "memory/genOopClosures.inline.hpp" 37 #include "memory/oopFactory.hpp" 38 #include "memory/permGen.hpp" 39 #include "oops/instanceKlass.hpp" 40 #include "oops/instanceMirrorKlass.hpp" 41 #include "oops/instanceOop.hpp" 42 #include "oops/methodOop.hpp" 43 #include "oops/objArrayKlassKlass.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "oops/symbol.hpp" 46 #include "prims/jvmtiExport.hpp" 47 #include "prims/jvmtiRedefineClassesTrace.hpp" 48 #include "runtime/fieldDescriptor.hpp" 49 #include "runtime/handles.inline.hpp" 50 #include "runtime/javaCalls.hpp" 51 #include "runtime/mutexLocker.hpp" 52 #include "services/threadService.hpp" 53 #include "utilities/dtrace.hpp" 54 #ifdef TARGET_OS_FAMILY_linux 55 # include "thread_linux.inline.hpp" 56 #endif 57 #ifdef TARGET_OS_FAMILY_solaris 58 # include "thread_solaris.inline.hpp" 59 #endif 60 #ifdef TARGET_OS_FAMILY_windows 61 # include "thread_windows.inline.hpp" 62 #endif 63 #ifndef SERIALGC 64 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 65 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 66 #include "gc_implementation/g1/g1RemSet.inline.hpp" 67 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 68 #include "gc_implementation/parNew/parOopClosures.inline.hpp" 69 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" 70 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" 71 #include "oops/oop.pcgc.inline.hpp" 72 #endif 73 #ifdef COMPILER1 74 #include "c1/c1_Compiler.hpp" 75 #endif 76 77 #ifdef DTRACE_ENABLED 78 79 HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required, 80 char*, intptr_t, oop, intptr_t); 81 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive, 82 char*, intptr_t, oop, intptr_t, int); 83 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent, 84 char*, intptr_t, oop, intptr_t, int); 85 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous, 86 char*, intptr_t, oop, intptr_t, int); 87 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed, 88 char*, intptr_t, oop, intptr_t, int); 89 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit, 90 char*, intptr_t, oop, intptr_t, int); 91 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error, 92 char*, intptr_t, oop, intptr_t, int); 93 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end, 94 char*, intptr_t, oop, intptr_t, int); 95 96 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type) \ 97 { \ 98 char* data = NULL; \ 99 int len = 0; \ 100 Symbol* name = (clss)->name(); \ 101 if (name != NULL) { \ 102 data = (char*)name->bytes(); \ 103 len = name->utf8_length(); \ 104 } \ 105 HS_DTRACE_PROBE4(hotspot, class__initialization__##type, \ 106 data, len, (clss)->class_loader(), thread_type); \ 107 } 108 109 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \ 110 { \ 111 char* data = NULL; \ 112 int len = 0; \ 113 Symbol* name = (clss)->name(); \ 114 if (name != NULL) { \ 115 data = (char*)name->bytes(); \ 116 len = name->utf8_length(); \ 117 } \ 118 HS_DTRACE_PROBE5(hotspot, class__initialization__##type, \ 119 data, len, (clss)->class_loader(), thread_type, wait); \ 120 } 121 122 #else // ndef DTRACE_ENABLED 123 124 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type) 125 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) 126 127 #endif // ndef DTRACE_ENABLED 128 129 bool instanceKlass::should_be_initialized() const { 130 return !is_initialized(); 131 } 132 133 klassVtable* instanceKlass::vtable() const { 134 return new klassVtable(as_klassOop(), start_of_vtable(), vtable_length() / vtableEntry::size()); 135 } 136 137 klassItable* instanceKlass::itable() const { 138 return new klassItable(as_klassOop()); 139 } 140 141 void instanceKlass::eager_initialize(Thread *thread) { 142 if (!EagerInitialization) return; 143 144 if (this->is_not_initialized()) { 145 // abort if the the class has a class initializer 146 if (this->class_initializer() != NULL) return; 147 148 // abort if it is java.lang.Object (initialization is handled in genesis) 149 klassOop super = this->super(); 150 if (super == NULL) return; 151 152 // abort if the super class should be initialized 153 if (!instanceKlass::cast(super)->is_initialized()) return; 154 155 // call body to expose the this pointer 156 instanceKlassHandle this_oop(thread, this->as_klassOop()); 157 eager_initialize_impl(this_oop); 158 } 159 } 160 161 162 void instanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) { 163 EXCEPTION_MARK; 164 ObjectLocker ol(this_oop, THREAD); 165 166 // abort if someone beat us to the initialization 167 if (!this_oop->is_not_initialized()) return; // note: not equivalent to is_initialized() 168 169 ClassState old_state = this_oop->_init_state; 170 link_class_impl(this_oop, true, THREAD); 171 if (HAS_PENDING_EXCEPTION) { 172 CLEAR_PENDING_EXCEPTION; 173 // Abort if linking the class throws an exception. 174 175 // Use a test to avoid redundantly resetting the state if there's 176 // no change. Set_init_state() asserts that state changes make 177 // progress, whereas here we might just be spinning in place. 178 if( old_state != this_oop->_init_state ) 179 this_oop->set_init_state (old_state); 180 } else { 181 // linking successfull, mark class as initialized 182 this_oop->set_init_state (fully_initialized); 183 // trace 184 if (TraceClassInitialization) { 185 ResourceMark rm(THREAD); 186 tty->print_cr("[Initialized %s without side effects]", this_oop->external_name()); 187 } 188 } 189 } 190 191 192 // See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization 193 // process. The step comments refers to the procedure described in that section. 194 // Note: implementation moved to static method to expose the this pointer. 195 void instanceKlass::initialize(TRAPS) { 196 if (this->should_be_initialized()) { 197 HandleMark hm(THREAD); 198 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 199 initialize_impl(this_oop, CHECK); 200 // Note: at this point the class may be initialized 201 // OR it may be in the state of being initialized 202 // in case of recursive initialization! 203 } else { 204 assert(is_initialized(), "sanity check"); 205 } 206 } 207 208 209 bool instanceKlass::verify_code( 210 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) { 211 // 1) Verify the bytecodes 212 Verifier::Mode mode = 213 throw_verifyerror ? Verifier::ThrowException : Verifier::NoException; 214 return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false); 215 } 216 217 218 // Used exclusively by the shared spaces dump mechanism to prevent 219 // classes mapped into the shared regions in new VMs from appearing linked. 220 221 void instanceKlass::unlink_class() { 222 assert(is_linked(), "must be linked"); 223 _init_state = loaded; 224 } 225 226 void instanceKlass::link_class(TRAPS) { 227 assert(is_loaded(), "must be loaded"); 228 if (!is_linked()) { 229 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 230 link_class_impl(this_oop, true, CHECK); 231 } 232 } 233 234 // Called to verify that a class can link during initialization, without 235 // throwing a VerifyError. 236 bool instanceKlass::link_class_or_fail(TRAPS) { 237 assert(is_loaded(), "must be loaded"); 238 if (!is_linked()) { 239 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 240 link_class_impl(this_oop, false, CHECK_false); 241 } 242 return is_linked(); 243 } 244 245 bool instanceKlass::link_class_impl( 246 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) { 247 // check for error state 248 if (this_oop->is_in_error_state()) { 249 ResourceMark rm(THREAD); 250 THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(), 251 this_oop->external_name(), false); 252 } 253 // return if already verified 254 if (this_oop->is_linked()) { 255 return true; 256 } 257 258 // Timing 259 // timer handles recursion 260 assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl"); 261 JavaThread* jt = (JavaThread*)THREAD; 262 263 // link super class before linking this class 264 instanceKlassHandle super(THREAD, this_oop->super()); 265 if (super.not_null()) { 266 if (super->is_interface()) { // check if super class is an interface 267 ResourceMark rm(THREAD); 268 Exceptions::fthrow( 269 THREAD_AND_LOCATION, 270 vmSymbols::java_lang_IncompatibleClassChangeError(), 271 "class %s has interface %s as super class", 272 this_oop->external_name(), 273 super->external_name() 274 ); 275 return false; 276 } 277 278 link_class_impl(super, throw_verifyerror, CHECK_false); 279 } 280 281 // link all interfaces implemented by this class before linking this class 282 objArrayHandle interfaces (THREAD, this_oop->local_interfaces()); 283 int num_interfaces = interfaces->length(); 284 for (int index = 0; index < num_interfaces; index++) { 285 HandleMark hm(THREAD); 286 instanceKlassHandle ih(THREAD, klassOop(interfaces->obj_at(index))); 287 link_class_impl(ih, throw_verifyerror, CHECK_false); 288 } 289 290 // in case the class is linked in the process of linking its superclasses 291 if (this_oop->is_linked()) { 292 return true; 293 } 294 295 // trace only the link time for this klass that includes 296 // the verification time 297 PerfClassTraceTime vmtimer(ClassLoader::perf_class_link_time(), 298 ClassLoader::perf_class_link_selftime(), 299 ClassLoader::perf_classes_linked(), 300 jt->get_thread_stat()->perf_recursion_counts_addr(), 301 jt->get_thread_stat()->perf_timers_addr(), 302 PerfClassTraceTime::CLASS_LINK); 303 304 // verification & rewriting 305 { 306 ObjectLocker ol(this_oop, THREAD); 307 // rewritten will have been set if loader constraint error found 308 // on an earlier link attempt 309 // don't verify or rewrite if already rewritten 310 if (!this_oop->is_linked()) { 311 if (!this_oop->is_rewritten()) { 312 { 313 // Timer includes any side effects of class verification (resolution, 314 // etc), but not recursive entry into verify_code(). 315 PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(), 316 ClassLoader::perf_class_verify_selftime(), 317 ClassLoader::perf_classes_verified(), 318 jt->get_thread_stat()->perf_recursion_counts_addr(), 319 jt->get_thread_stat()->perf_timers_addr(), 320 PerfClassTraceTime::CLASS_VERIFY); 321 bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD); 322 if (!verify_ok) { 323 return false; 324 } 325 } 326 327 // Just in case a side-effect of verify linked this class already 328 // (which can sometimes happen since the verifier loads classes 329 // using custom class loaders, which are free to initialize things) 330 if (this_oop->is_linked()) { 331 return true; 332 } 333 334 // also sets rewritten 335 this_oop->rewrite_class(CHECK_false); 336 } 337 338 // relocate jsrs and link methods after they are all rewritten 339 this_oop->relocate_and_link_methods(CHECK_false); 340 341 // Initialize the vtable and interface table after 342 // methods have been rewritten since rewrite may 343 // fabricate new methodOops. 344 // also does loader constraint checking 345 if (!this_oop()->is_shared()) { 346 ResourceMark rm(THREAD); 347 this_oop->vtable()->initialize_vtable(true, CHECK_false); 348 this_oop->itable()->initialize_itable(true, CHECK_false); 349 } 350 #ifdef ASSERT 351 else { 352 ResourceMark rm(THREAD); 353 this_oop->vtable()->verify(tty, true); 354 // In case itable verification is ever added. 355 // this_oop->itable()->verify(tty, true); 356 } 357 #endif 358 this_oop->set_init_state(linked); 359 if (JvmtiExport::should_post_class_prepare()) { 360 Thread *thread = THREAD; 361 assert(thread->is_Java_thread(), "thread->is_Java_thread()"); 362 JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop()); 363 } 364 } 365 } 366 return true; 367 } 368 369 370 // Rewrite the byte codes of all of the methods of a class. 371 // The rewriter must be called exactly once. Rewriting must happen after 372 // verification but before the first method of the class is executed. 373 void instanceKlass::rewrite_class(TRAPS) { 374 assert(is_loaded(), "must be loaded"); 375 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 376 if (this_oop->is_rewritten()) { 377 assert(this_oop()->is_shared(), "rewriting an unshared class?"); 378 return; 379 } 380 Rewriter::rewrite(this_oop, CHECK); 381 this_oop->set_rewritten(); 382 } 383 384 // Now relocate and link method entry points after class is rewritten. 385 // This is outside is_rewritten flag. In case of an exception, it can be 386 // executed more than once. 387 void instanceKlass::relocate_and_link_methods(TRAPS) { 388 assert(is_loaded(), "must be loaded"); 389 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 390 Rewriter::relocate_and_link(this_oop, CHECK); 391 } 392 393 394 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) { 395 // Make sure klass is linked (verified) before initialization 396 // A class could already be verified, since it has been reflected upon. 397 this_oop->link_class(CHECK); 398 399 DTRACE_CLASSINIT_PROBE(required, instanceKlass::cast(this_oop()), -1); 400 401 bool wait = false; 402 403 // refer to the JVM book page 47 for description of steps 404 // Step 1 405 { ObjectLocker ol(this_oop, THREAD); 406 407 Thread *self = THREAD; // it's passed the current thread 408 409 // Step 2 410 // If we were to use wait() instead of waitInterruptibly() then 411 // we might end up throwing IE from link/symbol resolution sites 412 // that aren't expected to throw. This would wreak havoc. See 6320309. 413 while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) { 414 wait = true; 415 ol.waitUninterruptibly(CHECK); 416 } 417 418 // Step 3 419 if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self)) { 420 DTRACE_CLASSINIT_PROBE_WAIT(recursive, instanceKlass::cast(this_oop()), -1,wait); 421 return; 422 } 423 424 // Step 4 425 if (this_oop->is_initialized()) { 426 DTRACE_CLASSINIT_PROBE_WAIT(concurrent, instanceKlass::cast(this_oop()), -1,wait); 427 return; 428 } 429 430 // Step 5 431 if (this_oop->is_in_error_state()) { 432 DTRACE_CLASSINIT_PROBE_WAIT(erroneous, instanceKlass::cast(this_oop()), -1,wait); 433 ResourceMark rm(THREAD); 434 const char* desc = "Could not initialize class "; 435 const char* className = this_oop->external_name(); 436 size_t msglen = strlen(desc) + strlen(className) + 1; 437 char* message = NEW_RESOURCE_ARRAY(char, msglen); 438 if (NULL == message) { 439 // Out of memory: can't create detailed error message 440 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className); 441 } else { 442 jio_snprintf(message, msglen, "%s%s", desc, className); 443 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message); 444 } 445 } 446 447 // Step 6 448 this_oop->set_init_state(being_initialized); 449 this_oop->set_init_thread(self); 450 } 451 452 // Step 7 453 klassOop super_klass = this_oop->super(); 454 if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) { 455 Klass::cast(super_klass)->initialize(THREAD); 456 457 if (HAS_PENDING_EXCEPTION) { 458 Handle e(THREAD, PENDING_EXCEPTION); 459 CLEAR_PENDING_EXCEPTION; 460 { 461 EXCEPTION_MARK; 462 this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads 463 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, superclass initialization error is thrown below 464 } 465 DTRACE_CLASSINIT_PROBE_WAIT(super__failed, instanceKlass::cast(this_oop()), -1,wait); 466 THROW_OOP(e()); 467 } 468 } 469 470 // Step 8 471 { 472 assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl"); 473 JavaThread* jt = (JavaThread*)THREAD; 474 DTRACE_CLASSINIT_PROBE_WAIT(clinit, instanceKlass::cast(this_oop()), -1,wait); 475 // Timer includes any side effects of class initialization (resolution, 476 // etc), but not recursive entry into call_class_initializer(). 477 PerfClassTraceTime timer(ClassLoader::perf_class_init_time(), 478 ClassLoader::perf_class_init_selftime(), 479 ClassLoader::perf_classes_inited(), 480 jt->get_thread_stat()->perf_recursion_counts_addr(), 481 jt->get_thread_stat()->perf_timers_addr(), 482 PerfClassTraceTime::CLASS_CLINIT); 483 this_oop->call_class_initializer(THREAD); 484 } 485 486 // Step 9 487 if (!HAS_PENDING_EXCEPTION) { 488 this_oop->set_initialization_state_and_notify(fully_initialized, CHECK); 489 { ResourceMark rm(THREAD); 490 debug_only(this_oop->vtable()->verify(tty, true);) 491 } 492 } 493 else { 494 // Step 10 and 11 495 Handle e(THREAD, PENDING_EXCEPTION); 496 CLEAR_PENDING_EXCEPTION; 497 { 498 EXCEPTION_MARK; 499 this_oop->set_initialization_state_and_notify(initialization_error, THREAD); 500 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, class initialization error is thrown below 501 } 502 DTRACE_CLASSINIT_PROBE_WAIT(error, instanceKlass::cast(this_oop()), -1,wait); 503 if (e->is_a(SystemDictionary::Error_klass())) { 504 THROW_OOP(e()); 505 } else { 506 JavaCallArguments args(e); 507 THROW_ARG(vmSymbols::java_lang_ExceptionInInitializerError(), 508 vmSymbols::throwable_void_signature(), 509 &args); 510 } 511 } 512 DTRACE_CLASSINIT_PROBE_WAIT(end, instanceKlass::cast(this_oop()), -1,wait); 513 } 514 515 516 // Note: implementation moved to static method to expose the this pointer. 517 void instanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) { 518 instanceKlassHandle kh(THREAD, this->as_klassOop()); 519 set_initialization_state_and_notify_impl(kh, state, CHECK); 520 } 521 522 void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) { 523 ObjectLocker ol(this_oop, THREAD); 524 this_oop->set_init_state(state); 525 ol.notify_all(CHECK); 526 } 527 528 void instanceKlass::add_implementor(klassOop k) { 529 assert(Compile_lock->owned_by_self(), ""); 530 // Filter out my subinterfaces. 531 // (Note: Interfaces are never on the subklass list.) 532 if (instanceKlass::cast(k)->is_interface()) return; 533 534 // Filter out subclasses whose supers already implement me. 535 // (Note: CHA must walk subclasses of direct implementors 536 // in order to locate indirect implementors.) 537 klassOop sk = instanceKlass::cast(k)->super(); 538 if (sk != NULL && instanceKlass::cast(sk)->implements_interface(as_klassOop())) 539 // We only need to check one immediate superclass, since the 540 // implements_interface query looks at transitive_interfaces. 541 // Any supers of the super have the same (or fewer) transitive_interfaces. 542 return; 543 544 // Update number of implementors 545 int i = _nof_implementors++; 546 547 // Record this implementor, if there are not too many already 548 if (i < implementors_limit) { 549 assert(_implementors[i] == NULL, "should be exactly one implementor"); 550 oop_store_without_check((oop*)&_implementors[i], k); 551 } else if (i == implementors_limit) { 552 // clear out the list on first overflow 553 for (int i2 = 0; i2 < implementors_limit; i2++) 554 oop_store_without_check((oop*)&_implementors[i2], NULL); 555 } 556 557 // The implementor also implements the transitive_interfaces 558 for (int index = 0; index < local_interfaces()->length(); index++) { 559 instanceKlass::cast(klassOop(local_interfaces()->obj_at(index)))->add_implementor(k); 560 } 561 } 562 563 void instanceKlass::init_implementor() { 564 for (int i = 0; i < implementors_limit; i++) 565 oop_store_without_check((oop*)&_implementors[i], NULL); 566 _nof_implementors = 0; 567 } 568 569 570 void instanceKlass::process_interfaces(Thread *thread) { 571 // link this class into the implementors list of every interface it implements 572 KlassHandle this_as_oop (thread, this->as_klassOop()); 573 for (int i = local_interfaces()->length() - 1; i >= 0; i--) { 574 assert(local_interfaces()->obj_at(i)->is_klass(), "must be a klass"); 575 instanceKlass* interf = instanceKlass::cast(klassOop(local_interfaces()->obj_at(i))); 576 assert(interf->is_interface(), "expected interface"); 577 interf->add_implementor(this_as_oop()); 578 } 579 } 580 581 bool instanceKlass::can_be_primary_super_slow() const { 582 if (is_interface()) 583 return false; 584 else 585 return Klass::can_be_primary_super_slow(); 586 } 587 588 objArrayOop instanceKlass::compute_secondary_supers(int num_extra_slots, TRAPS) { 589 // The secondaries are the implemented interfaces. 590 instanceKlass* ik = instanceKlass::cast(as_klassOop()); 591 objArrayHandle interfaces (THREAD, ik->transitive_interfaces()); 592 int num_secondaries = num_extra_slots + interfaces->length(); 593 if (num_secondaries == 0) { 594 return Universe::the_empty_system_obj_array(); 595 } else if (num_extra_slots == 0) { 596 return interfaces(); 597 } else { 598 // a mix of both 599 objArrayOop secondaries = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL); 600 for (int i = 0; i < interfaces->length(); i++) { 601 secondaries->obj_at_put(num_extra_slots+i, interfaces->obj_at(i)); 602 } 603 return secondaries; 604 } 605 } 606 607 bool instanceKlass::compute_is_subtype_of(klassOop k) { 608 if (Klass::cast(k)->is_interface()) { 609 return implements_interface(k); 610 } else { 611 return Klass::compute_is_subtype_of(k); 612 } 613 } 614 615 bool instanceKlass::implements_interface(klassOop k) const { 616 if (as_klassOop() == k) return true; 617 assert(Klass::cast(k)->is_interface(), "should be an interface class"); 618 for (int i = 0; i < transitive_interfaces()->length(); i++) { 619 if (transitive_interfaces()->obj_at(i) == k) { 620 return true; 621 } 622 } 623 return false; 624 } 625 626 objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) { 627 if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException()); 628 if (length > arrayOopDesc::max_array_length(T_OBJECT)) { 629 report_java_out_of_memory("Requested array size exceeds VM limit"); 630 THROW_OOP_0(Universe::out_of_memory_error_array_size()); 631 } 632 int size = objArrayOopDesc::object_size(length); 633 klassOop ak = array_klass(n, CHECK_NULL); 634 KlassHandle h_ak (THREAD, ak); 635 objArrayOop o = 636 (objArrayOop)CollectedHeap::array_allocate(h_ak, size, length, CHECK_NULL); 637 return o; 638 } 639 640 instanceOop instanceKlass::register_finalizer(instanceOop i, TRAPS) { 641 if (TraceFinalizerRegistration) { 642 tty->print("Registered "); 643 i->print_value_on(tty); 644 tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", (address)i); 645 } 646 instanceHandle h_i(THREAD, i); 647 // Pass the handle as argument, JavaCalls::call expects oop as jobjects 648 JavaValue result(T_VOID); 649 JavaCallArguments args(h_i); 650 methodHandle mh (THREAD, Universe::finalizer_register_method()); 651 JavaCalls::call(&result, mh, &args, CHECK_NULL); 652 return h_i(); 653 } 654 655 instanceOop instanceKlass::allocate_instance(TRAPS) { 656 assert(!oop_is_instanceMirror(), "wrong allocation path"); 657 bool has_finalizer_flag = has_finalizer(); // Query before possible GC 658 int size = size_helper(); // Query before forming handle. 659 660 KlassHandle h_k(THREAD, as_klassOop()); 661 662 instanceOop i; 663 664 i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL); 665 if (has_finalizer_flag && !RegisterFinalizersAtInit) { 666 i = register_finalizer(i, CHECK_NULL); 667 } 668 return i; 669 } 670 671 instanceOop instanceKlass::allocate_permanent_instance(TRAPS) { 672 // Finalizer registration occurs in the Object.<init> constructor 673 // and constructors normally aren't run when allocating perm 674 // instances so simply disallow finalizable perm objects. This can 675 // be relaxed if a need for it is found. 676 assert(!has_finalizer(), "perm objects not allowed to have finalizers"); 677 assert(!oop_is_instanceMirror(), "wrong allocation path"); 678 int size = size_helper(); // Query before forming handle. 679 KlassHandle h_k(THREAD, as_klassOop()); 680 instanceOop i = (instanceOop) 681 CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL); 682 return i; 683 } 684 685 void instanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) { 686 if (is_interface() || is_abstract()) { 687 ResourceMark rm(THREAD); 688 THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError() 689 : vmSymbols::java_lang_InstantiationException(), external_name()); 690 } 691 if (as_klassOop() == SystemDictionary::Class_klass()) { 692 ResourceMark rm(THREAD); 693 THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError() 694 : vmSymbols::java_lang_IllegalAccessException(), external_name()); 695 } 696 } 697 698 klassOop instanceKlass::array_klass_impl(bool or_null, int n, TRAPS) { 699 instanceKlassHandle this_oop(THREAD, as_klassOop()); 700 return array_klass_impl(this_oop, or_null, n, THREAD); 701 } 702 703 klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) { 704 if (this_oop->array_klasses() == NULL) { 705 if (or_null) return NULL; 706 707 ResourceMark rm; 708 JavaThread *jt = (JavaThread *)THREAD; 709 { 710 // Atomic creation of array_klasses 711 MutexLocker mc(Compile_lock, THREAD); // for vtables 712 MutexLocker ma(MultiArray_lock, THREAD); 713 714 // Check if update has already taken place 715 if (this_oop->array_klasses() == NULL) { 716 objArrayKlassKlass* oakk = 717 (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part(); 718 719 klassOop k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL); 720 this_oop->set_array_klasses(k); 721 } 722 } 723 } 724 // _this will always be set at this point 725 objArrayKlass* oak = (objArrayKlass*)this_oop->array_klasses()->klass_part(); 726 if (or_null) { 727 return oak->array_klass_or_null(n); 728 } 729 return oak->array_klass(n, CHECK_NULL); 730 } 731 732 klassOop instanceKlass::array_klass_impl(bool or_null, TRAPS) { 733 return array_klass_impl(or_null, 1, THREAD); 734 } 735 736 void instanceKlass::call_class_initializer(TRAPS) { 737 instanceKlassHandle ik (THREAD, as_klassOop()); 738 call_class_initializer_impl(ik, THREAD); 739 } 740 741 static int call_class_initializer_impl_counter = 0; // for debugging 742 743 methodOop instanceKlass::class_initializer() { 744 methodOop clinit = find_method( 745 vmSymbols::class_initializer_name(), vmSymbols::void_method_signature()); 746 if (clinit != NULL && clinit->has_valid_initializer_flags()) { 747 return clinit; 748 } 749 return NULL; 750 } 751 752 void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) { 753 methodHandle h_method(THREAD, this_oop->class_initializer()); 754 assert(!this_oop->is_initialized(), "we cannot initialize twice"); 755 if (TraceClassInitialization) { 756 tty->print("%d Initializing ", call_class_initializer_impl_counter++); 757 this_oop->name()->print_value(); 758 tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop()); 759 } 760 if (h_method() != NULL) { 761 JavaCallArguments args; // No arguments 762 JavaValue result(T_VOID); 763 JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args) 764 } 765 } 766 767 768 void instanceKlass::mask_for(methodHandle method, int bci, 769 InterpreterOopMap* entry_for) { 770 // Dirty read, then double-check under a lock. 771 if (_oop_map_cache == NULL) { 772 // Otherwise, allocate a new one. 773 MutexLocker x(OopMapCacheAlloc_lock); 774 // First time use. Allocate a cache in C heap 775 if (_oop_map_cache == NULL) { 776 _oop_map_cache = new OopMapCache(); 777 } 778 } 779 // _oop_map_cache is constant after init; lookup below does is own locking. 780 _oop_map_cache->lookup(method, bci, entry_for); 781 } 782 783 784 bool instanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const { 785 const int n = fields()->length(); 786 for (int i = 0; i < n; i += next_offset ) { 787 int name_index = fields()->ushort_at(i + name_index_offset); 788 int sig_index = fields()->ushort_at(i + signature_index_offset); 789 Symbol* f_name = constants()->symbol_at(name_index); 790 Symbol* f_sig = constants()->symbol_at(sig_index); 791 if (f_name == name && f_sig == sig) { 792 fd->initialize(as_klassOop(), i); 793 return true; 794 } 795 } 796 return false; 797 } 798 799 800 void instanceKlass::shared_symbols_iterate(SymbolClosure* closure) { 801 Klass::shared_symbols_iterate(closure); 802 closure->do_symbol(&_generic_signature); 803 closure->do_symbol(&_source_file_name); 804 closure->do_symbol(&_source_debug_extension); 805 806 const int n = fields()->length(); 807 for (int i = 0; i < n; i += next_offset ) { 808 int name_index = fields()->ushort_at(i + name_index_offset); 809 closure->do_symbol(constants()->symbol_at_addr(name_index)); 810 int sig_index = fields()->ushort_at(i + signature_index_offset); 811 closure->do_symbol(constants()->symbol_at_addr(sig_index)); 812 } 813 } 814 815 816 klassOop instanceKlass::find_interface_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const { 817 const int n = local_interfaces()->length(); 818 for (int i = 0; i < n; i++) { 819 klassOop intf1 = klassOop(local_interfaces()->obj_at(i)); 820 assert(Klass::cast(intf1)->is_interface(), "just checking type"); 821 // search for field in current interface 822 if (instanceKlass::cast(intf1)->find_local_field(name, sig, fd)) { 823 assert(fd->is_static(), "interface field must be static"); 824 return intf1; 825 } 826 // search for field in direct superinterfaces 827 klassOop intf2 = instanceKlass::cast(intf1)->find_interface_field(name, sig, fd); 828 if (intf2 != NULL) return intf2; 829 } 830 // otherwise field lookup fails 831 return NULL; 832 } 833 834 835 klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const { 836 // search order according to newest JVM spec (5.4.3.2, p.167). 837 // 1) search for field in current klass 838 if (find_local_field(name, sig, fd)) { 839 return as_klassOop(); 840 } 841 // 2) search for field recursively in direct superinterfaces 842 { klassOop intf = find_interface_field(name, sig, fd); 843 if (intf != NULL) return intf; 844 } 845 // 3) apply field lookup recursively if superclass exists 846 { klassOop supr = super(); 847 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, fd); 848 } 849 // 4) otherwise field lookup fails 850 return NULL; 851 } 852 853 854 klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, bool is_static, fieldDescriptor* fd) const { 855 // search order according to newest JVM spec (5.4.3.2, p.167). 856 // 1) search for field in current klass 857 if (find_local_field(name, sig, fd)) { 858 if (fd->is_static() == is_static) return as_klassOop(); 859 } 860 // 2) search for field recursively in direct superinterfaces 861 if (is_static) { 862 klassOop intf = find_interface_field(name, sig, fd); 863 if (intf != NULL) return intf; 864 } 865 // 3) apply field lookup recursively if superclass exists 866 { klassOop supr = super(); 867 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, is_static, fd); 868 } 869 // 4) otherwise field lookup fails 870 return NULL; 871 } 872 873 874 bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const { 875 int length = fields()->length(); 876 for (int i = 0; i < length; i += next_offset) { 877 if (offset_from_fields( i ) == offset) { 878 fd->initialize(as_klassOop(), i); 879 if (fd->is_static() == is_static) return true; 880 } 881 } 882 return false; 883 } 884 885 886 bool instanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const { 887 klassOop klass = as_klassOop(); 888 while (klass != NULL) { 889 if (instanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) { 890 return true; 891 } 892 klass = Klass::cast(klass)->super(); 893 } 894 return false; 895 } 896 897 898 void instanceKlass::methods_do(void f(methodOop method)) { 899 int len = methods()->length(); 900 for (int index = 0; index < len; index++) { 901 methodOop m = methodOop(methods()->obj_at(index)); 902 assert(m->is_method(), "must be method"); 903 f(m); 904 } 905 } 906 907 908 void instanceKlass::do_local_static_fields(FieldClosure* cl) { 909 fieldDescriptor fd; 910 int length = fields()->length(); 911 for (int i = 0; i < length; i += next_offset) { 912 fd.initialize(as_klassOop(), i); 913 if (fd.is_static()) cl->do_field(&fd); 914 } 915 } 916 917 918 void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) { 919 instanceKlassHandle h_this(THREAD, as_klassOop()); 920 do_local_static_fields_impl(h_this, f, CHECK); 921 } 922 923 924 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) { 925 fieldDescriptor fd; 926 int length = this_oop->fields()->length(); 927 for (int i = 0; i < length; i += next_offset) { 928 fd.initialize(this_oop(), i); 929 if (fd.is_static()) { f(&fd, CHECK); } // Do NOT remove {}! (CHECK macro expands into several statements) 930 } 931 } 932 933 934 static int compare_fields_by_offset(int* a, int* b) { 935 return a[0] - b[0]; 936 } 937 938 void instanceKlass::do_nonstatic_fields(FieldClosure* cl) { 939 instanceKlass* super = superklass(); 940 if (super != NULL) { 941 super->do_nonstatic_fields(cl); 942 } 943 fieldDescriptor fd; 944 int length = fields()->length(); 945 // In DebugInfo nonstatic fields are sorted by offset. 946 int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1)); 947 int j = 0; 948 for (int i = 0; i < length; i += next_offset) { 949 fd.initialize(as_klassOop(), i); 950 if (!fd.is_static()) { 951 fields_sorted[j + 0] = fd.offset(); 952 fields_sorted[j + 1] = i; 953 j += 2; 954 } 955 } 956 if (j > 0) { 957 length = j; 958 // _sort_Fn is defined in growableArray.hpp. 959 qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset); 960 for (int i = 0; i < length; i += 2) { 961 fd.initialize(as_klassOop(), fields_sorted[i + 1]); 962 assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields"); 963 cl->do_field(&fd); 964 } 965 } 966 FREE_C_HEAP_ARRAY(int, fields_sorted); 967 } 968 969 970 void instanceKlass::array_klasses_do(void f(klassOop k)) { 971 if (array_klasses() != NULL) 972 arrayKlass::cast(array_klasses())->array_klasses_do(f); 973 } 974 975 976 void instanceKlass::with_array_klasses_do(void f(klassOop k)) { 977 f(as_klassOop()); 978 array_klasses_do(f); 979 } 980 981 #ifdef ASSERT 982 static int linear_search(objArrayOop methods, Symbol* name, Symbol* signature) { 983 int len = methods->length(); 984 for (int index = 0; index < len; index++) { 985 methodOop m = (methodOop)(methods->obj_at(index)); 986 assert(m->is_method(), "must be method"); 987 if (m->signature() == signature && m->name() == name) { 988 return index; 989 } 990 } 991 return -1; 992 } 993 #endif 994 995 methodOop instanceKlass::find_method(Symbol* name, Symbol* signature) const { 996 return instanceKlass::find_method(methods(), name, signature); 997 } 998 999 methodOop instanceKlass::find_method(objArrayOop methods, Symbol* name, Symbol* signature) { 1000 int len = methods->length(); 1001 // methods are sorted, so do binary search 1002 int l = 0; 1003 int h = len - 1; 1004 while (l <= h) { 1005 int mid = (l + h) >> 1; 1006 methodOop m = (methodOop)methods->obj_at(mid); 1007 assert(m->is_method(), "must be method"); 1008 int res = m->name()->fast_compare(name); 1009 if (res == 0) { 1010 // found matching name; do linear search to find matching signature 1011 // first, quick check for common case 1012 if (m->signature() == signature) return m; 1013 // search downwards through overloaded methods 1014 int i; 1015 for (i = mid - 1; i >= l; i--) { 1016 methodOop m = (methodOop)methods->obj_at(i); 1017 assert(m->is_method(), "must be method"); 1018 if (m->name() != name) break; 1019 if (m->signature() == signature) return m; 1020 } 1021 // search upwards 1022 for (i = mid + 1; i <= h; i++) { 1023 methodOop m = (methodOop)methods->obj_at(i); 1024 assert(m->is_method(), "must be method"); 1025 if (m->name() != name) break; 1026 if (m->signature() == signature) return m; 1027 } 1028 // not found 1029 #ifdef ASSERT 1030 int index = linear_search(methods, name, signature); 1031 assert(index == -1, err_msg("binary search should have found entry %d", index)); 1032 #endif 1033 return NULL; 1034 } else if (res < 0) { 1035 l = mid + 1; 1036 } else { 1037 h = mid - 1; 1038 } 1039 } 1040 #ifdef ASSERT 1041 int index = linear_search(methods, name, signature); 1042 assert(index == -1, err_msg("binary search should have found entry %d", index)); 1043 #endif 1044 return NULL; 1045 } 1046 1047 methodOop instanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const { 1048 klassOop klass = as_klassOop(); 1049 while (klass != NULL) { 1050 methodOop method = instanceKlass::cast(klass)->find_method(name, signature); 1051 if (method != NULL) return method; 1052 klass = instanceKlass::cast(klass)->super(); 1053 } 1054 return NULL; 1055 } 1056 1057 // lookup a method in all the interfaces that this class implements 1058 methodOop instanceKlass::lookup_method_in_all_interfaces(Symbol* name, 1059 Symbol* signature) const { 1060 objArrayOop all_ifs = instanceKlass::cast(as_klassOop())->transitive_interfaces(); 1061 int num_ifs = all_ifs->length(); 1062 instanceKlass *ik = NULL; 1063 for (int i = 0; i < num_ifs; i++) { 1064 ik = instanceKlass::cast(klassOop(all_ifs->obj_at(i))); 1065 methodOop m = ik->lookup_method(name, signature); 1066 if (m != NULL) { 1067 return m; 1068 } 1069 } 1070 return NULL; 1071 } 1072 1073 /* jni_id_for_impl for jfieldIds only */ 1074 JNIid* instanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) { 1075 MutexLocker ml(JfieldIdCreation_lock); 1076 // Retry lookup after we got the lock 1077 JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset); 1078 if (probe == NULL) { 1079 // Slow case, allocate new static field identifier 1080 probe = new JNIid(this_oop->as_klassOop(), offset, this_oop->jni_ids()); 1081 this_oop->set_jni_ids(probe); 1082 } 1083 return probe; 1084 } 1085 1086 1087 /* jni_id_for for jfieldIds only */ 1088 JNIid* instanceKlass::jni_id_for(int offset) { 1089 JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset); 1090 if (probe == NULL) { 1091 probe = jni_id_for_impl(this->as_klassOop(), offset); 1092 } 1093 return probe; 1094 } 1095 1096 1097 // Lookup or create a jmethodID. 1098 // This code is called by the VMThread and JavaThreads so the 1099 // locking has to be done very carefully to avoid deadlocks 1100 // and/or other cache consistency problems. 1101 // 1102 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) { 1103 size_t idnum = (size_t)method_h->method_idnum(); 1104 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire(); 1105 size_t length = 0; 1106 jmethodID id = NULL; 1107 1108 // We use a double-check locking idiom here because this cache is 1109 // performance sensitive. In the normal system, this cache only 1110 // transitions from NULL to non-NULL which is safe because we use 1111 // release_set_methods_jmethod_ids() to advertise the new cache. 1112 // A partially constructed cache should never be seen by a racing 1113 // thread. We also use release_store_ptr() to save a new jmethodID 1114 // in the cache so a partially constructed jmethodID should never be 1115 // seen either. Cache reads of existing jmethodIDs proceed without a 1116 // lock, but cache writes of a new jmethodID requires uniqueness and 1117 // creation of the cache itself requires no leaks so a lock is 1118 // generally acquired in those two cases. 1119 // 1120 // If the RedefineClasses() API has been used, then this cache can 1121 // grow and we'll have transitions from non-NULL to bigger non-NULL. 1122 // Cache creation requires no leaks and we require safety between all 1123 // cache accesses and freeing of the old cache so a lock is generally 1124 // acquired when the RedefineClasses() API has been used. 1125 1126 if (jmeths != NULL) { 1127 // the cache already exists 1128 if (!ik_h->idnum_can_increment()) { 1129 // the cache can't grow so we can just get the current values 1130 get_jmethod_id_length_value(jmeths, idnum, &length, &id); 1131 } else { 1132 // cache can grow so we have to be more careful 1133 if (Threads::number_of_threads() == 0 || 1134 SafepointSynchronize::is_at_safepoint()) { 1135 // we're single threaded or at a safepoint - no locking needed 1136 get_jmethod_id_length_value(jmeths, idnum, &length, &id); 1137 } else { 1138 MutexLocker ml(JmethodIdCreation_lock); 1139 get_jmethod_id_length_value(jmeths, idnum, &length, &id); 1140 } 1141 } 1142 } 1143 // implied else: 1144 // we need to allocate a cache so default length and id values are good 1145 1146 if (jmeths == NULL || // no cache yet 1147 length <= idnum || // cache is too short 1148 id == NULL) { // cache doesn't contain entry 1149 1150 // This function can be called by the VMThread so we have to do all 1151 // things that might block on a safepoint before grabbing the lock. 1152 // Otherwise, we can deadlock with the VMThread or have a cache 1153 // consistency issue. These vars keep track of what we might have 1154 // to free after the lock is dropped. 1155 jmethodID to_dealloc_id = NULL; 1156 jmethodID* to_dealloc_jmeths = NULL; 1157 1158 // may not allocate new_jmeths or use it if we allocate it 1159 jmethodID* new_jmeths = NULL; 1160 if (length <= idnum) { 1161 // allocate a new cache that might be used 1162 size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count()); 1163 new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1); 1164 memset(new_jmeths, 0, (size+1)*sizeof(jmethodID)); 1165 // cache size is stored in element[0], other elements offset by one 1166 new_jmeths[0] = (jmethodID)size; 1167 } 1168 1169 // allocate a new jmethodID that might be used 1170 jmethodID new_id = NULL; 1171 if (method_h->is_old() && !method_h->is_obsolete()) { 1172 // The method passed in is old (but not obsolete), we need to use the current version 1173 methodOop current_method = ik_h->method_with_idnum((int)idnum); 1174 assert(current_method != NULL, "old and but not obsolete, so should exist"); 1175 methodHandle current_method_h(current_method == NULL? method_h() : current_method); 1176 new_id = JNIHandles::make_jmethod_id(current_method_h); 1177 } else { 1178 // It is the current version of the method or an obsolete method, 1179 // use the version passed in 1180 new_id = JNIHandles::make_jmethod_id(method_h); 1181 } 1182 1183 if (Threads::number_of_threads() == 0 || 1184 SafepointSynchronize::is_at_safepoint()) { 1185 // we're single threaded or at a safepoint - no locking needed 1186 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths, 1187 &to_dealloc_id, &to_dealloc_jmeths); 1188 } else { 1189 MutexLocker ml(JmethodIdCreation_lock); 1190 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths, 1191 &to_dealloc_id, &to_dealloc_jmeths); 1192 } 1193 1194 // The lock has been dropped so we can free resources. 1195 // Free up either the old cache or the new cache if we allocated one. 1196 if (to_dealloc_jmeths != NULL) { 1197 FreeHeap(to_dealloc_jmeths); 1198 } 1199 // free up the new ID since it wasn't needed 1200 if (to_dealloc_id != NULL) { 1201 JNIHandles::destroy_jmethod_id(to_dealloc_id); 1202 } 1203 } 1204 return id; 1205 } 1206 1207 1208 // Common code to fetch the jmethodID from the cache or update the 1209 // cache with the new jmethodID. This function should never do anything 1210 // that causes the caller to go to a safepoint or we can deadlock with 1211 // the VMThread or have cache consistency issues. 1212 // 1213 jmethodID instanceKlass::get_jmethod_id_fetch_or_update( 1214 instanceKlassHandle ik_h, size_t idnum, jmethodID new_id, 1215 jmethodID* new_jmeths, jmethodID* to_dealloc_id_p, 1216 jmethodID** to_dealloc_jmeths_p) { 1217 assert(new_id != NULL, "sanity check"); 1218 assert(to_dealloc_id_p != NULL, "sanity check"); 1219 assert(to_dealloc_jmeths_p != NULL, "sanity check"); 1220 assert(Threads::number_of_threads() == 0 || 1221 SafepointSynchronize::is_at_safepoint() || 1222 JmethodIdCreation_lock->owned_by_self(), "sanity check"); 1223 1224 // reacquire the cache - we are locked, single threaded or at a safepoint 1225 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire(); 1226 jmethodID id = NULL; 1227 size_t length = 0; 1228 1229 if (jmeths == NULL || // no cache yet 1230 (length = (size_t)jmeths[0]) <= idnum) { // cache is too short 1231 if (jmeths != NULL) { 1232 // copy any existing entries from the old cache 1233 for (size_t index = 0; index < length; index++) { 1234 new_jmeths[index+1] = jmeths[index+1]; 1235 } 1236 *to_dealloc_jmeths_p = jmeths; // save old cache for later delete 1237 } 1238 ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths); 1239 } else { 1240 // fetch jmethodID (if any) from the existing cache 1241 id = jmeths[idnum+1]; 1242 *to_dealloc_jmeths_p = new_jmeths; // save new cache for later delete 1243 } 1244 if (id == NULL) { 1245 // No matching jmethodID in the existing cache or we have a new 1246 // cache or we just grew the cache. This cache write is done here 1247 // by the first thread to win the foot race because a jmethodID 1248 // needs to be unique once it is generally available. 1249 id = new_id; 1250 1251 // The jmethodID cache can be read while unlocked so we have to 1252 // make sure the new jmethodID is complete before installing it 1253 // in the cache. 1254 OrderAccess::release_store_ptr(&jmeths[idnum+1], id); 1255 } else { 1256 *to_dealloc_id_p = new_id; // save new id for later delete 1257 } 1258 return id; 1259 } 1260 1261 1262 // Common code to get the jmethodID cache length and the jmethodID 1263 // value at index idnum if there is one. 1264 // 1265 void instanceKlass::get_jmethod_id_length_value(jmethodID* cache, 1266 size_t idnum, size_t *length_p, jmethodID* id_p) { 1267 assert(cache != NULL, "sanity check"); 1268 assert(length_p != NULL, "sanity check"); 1269 assert(id_p != NULL, "sanity check"); 1270 1271 // cache size is stored in element[0], other elements offset by one 1272 *length_p = (size_t)cache[0]; 1273 if (*length_p <= idnum) { // cache is too short 1274 *id_p = NULL; 1275 } else { 1276 *id_p = cache[idnum+1]; // fetch jmethodID (if any) 1277 } 1278 } 1279 1280 1281 // Lookup a jmethodID, NULL if not found. Do no blocking, no allocations, no handles 1282 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) { 1283 size_t idnum = (size_t)method->method_idnum(); 1284 jmethodID* jmeths = methods_jmethod_ids_acquire(); 1285 size_t length; // length assigned as debugging crumb 1286 jmethodID id = NULL; 1287 if (jmeths != NULL && // If there is a cache 1288 (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough, 1289 id = jmeths[idnum+1]; // Look up the id (may be NULL) 1290 } 1291 return id; 1292 } 1293 1294 1295 // Cache an itable index 1296 void instanceKlass::set_cached_itable_index(size_t idnum, int index) { 1297 int* indices = methods_cached_itable_indices_acquire(); 1298 int* to_dealloc_indices = NULL; 1299 1300 // We use a double-check locking idiom here because this cache is 1301 // performance sensitive. In the normal system, this cache only 1302 // transitions from NULL to non-NULL which is safe because we use 1303 // release_set_methods_cached_itable_indices() to advertise the 1304 // new cache. A partially constructed cache should never be seen 1305 // by a racing thread. Cache reads and writes proceed without a 1306 // lock, but creation of the cache itself requires no leaks so a 1307 // lock is generally acquired in that case. 1308 // 1309 // If the RedefineClasses() API has been used, then this cache can 1310 // grow and we'll have transitions from non-NULL to bigger non-NULL. 1311 // Cache creation requires no leaks and we require safety between all 1312 // cache accesses and freeing of the old cache so a lock is generally 1313 // acquired when the RedefineClasses() API has been used. 1314 1315 if (indices == NULL || idnum_can_increment()) { 1316 // we need a cache or the cache can grow 1317 MutexLocker ml(JNICachedItableIndex_lock); 1318 // reacquire the cache to see if another thread already did the work 1319 indices = methods_cached_itable_indices_acquire(); 1320 size_t length = 0; 1321 // cache size is stored in element[0], other elements offset by one 1322 if (indices == NULL || (length = (size_t)indices[0]) <= idnum) { 1323 size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count()); 1324 int* new_indices = NEW_C_HEAP_ARRAY(int, size+1); 1325 new_indices[0] = (int)size; 1326 // copy any existing entries 1327 size_t i; 1328 for (i = 0; i < length; i++) { 1329 new_indices[i+1] = indices[i+1]; 1330 } 1331 // Set all the rest to -1 1332 for (i = length; i < size; i++) { 1333 new_indices[i+1] = -1; 1334 } 1335 if (indices != NULL) { 1336 // We have an old cache to delete so save it for after we 1337 // drop the lock. 1338 to_dealloc_indices = indices; 1339 } 1340 release_set_methods_cached_itable_indices(indices = new_indices); 1341 } 1342 1343 if (idnum_can_increment()) { 1344 // this cache can grow so we have to write to it safely 1345 indices[idnum+1] = index; 1346 } 1347 } else { 1348 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); 1349 } 1350 1351 if (!idnum_can_increment()) { 1352 // The cache cannot grow and this JNI itable index value does not 1353 // have to be unique like a jmethodID. If there is a race to set it, 1354 // it doesn't matter. 1355 indices[idnum+1] = index; 1356 } 1357 1358 if (to_dealloc_indices != NULL) { 1359 // we allocated a new cache so free the old one 1360 FreeHeap(to_dealloc_indices); 1361 } 1362 } 1363 1364 1365 // Retrieve a cached itable index 1366 int instanceKlass::cached_itable_index(size_t idnum) { 1367 int* indices = methods_cached_itable_indices_acquire(); 1368 if (indices != NULL && ((size_t)indices[0]) > idnum) { 1369 // indices exist and are long enough, retrieve possible cached 1370 return indices[idnum+1]; 1371 } 1372 return -1; 1373 } 1374 1375 1376 // 1377 // Walk the list of dependent nmethods searching for nmethods which 1378 // are dependent on the changes that were passed in and mark them for 1379 // deoptimization. Returns the number of nmethods found. 1380 // 1381 int instanceKlass::mark_dependent_nmethods(DepChange& changes) { 1382 assert_locked_or_safepoint(CodeCache_lock); 1383 int found = 0; 1384 nmethodBucket* b = _dependencies; 1385 while (b != NULL) { 1386 nmethod* nm = b->get_nmethod(); 1387 // since dependencies aren't removed until an nmethod becomes a zombie, 1388 // the dependency list may contain nmethods which aren't alive. 1389 if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) { 1390 if (TraceDependencies) { 1391 ResourceMark rm; 1392 tty->print_cr("Marked for deoptimization"); 1393 tty->print_cr(" context = %s", this->external_name()); 1394 changes.print(); 1395 nm->print(); 1396 nm->print_dependencies(); 1397 } 1398 nm->mark_for_deoptimization(); 1399 found++; 1400 } 1401 b = b->next(); 1402 } 1403 return found; 1404 } 1405 1406 1407 // 1408 // Add an nmethodBucket to the list of dependencies for this nmethod. 1409 // It's possible that an nmethod has multiple dependencies on this klass 1410 // so a count is kept for each bucket to guarantee that creation and 1411 // deletion of dependencies is consistent. 1412 // 1413 void instanceKlass::add_dependent_nmethod(nmethod* nm) { 1414 assert_locked_or_safepoint(CodeCache_lock); 1415 nmethodBucket* b = _dependencies; 1416 nmethodBucket* last = NULL; 1417 while (b != NULL) { 1418 if (nm == b->get_nmethod()) { 1419 b->increment(); 1420 return; 1421 } 1422 b = b->next(); 1423 } 1424 _dependencies = new nmethodBucket(nm, _dependencies); 1425 } 1426 1427 1428 // 1429 // Decrement count of the nmethod in the dependency list and remove 1430 // the bucket competely when the count goes to 0. This method must 1431 // find a corresponding bucket otherwise there's a bug in the 1432 // recording of dependecies. 1433 // 1434 void instanceKlass::remove_dependent_nmethod(nmethod* nm) { 1435 assert_locked_or_safepoint(CodeCache_lock); 1436 nmethodBucket* b = _dependencies; 1437 nmethodBucket* last = NULL; 1438 while (b != NULL) { 1439 if (nm == b->get_nmethod()) { 1440 if (b->decrement() == 0) { 1441 if (last == NULL) { 1442 _dependencies = b->next(); 1443 } else { 1444 last->set_next(b->next()); 1445 } 1446 delete b; 1447 } 1448 return; 1449 } 1450 last = b; 1451 b = b->next(); 1452 } 1453 #ifdef ASSERT 1454 tty->print_cr("### %s can't find dependent nmethod:", this->external_name()); 1455 nm->print(); 1456 #endif // ASSERT 1457 ShouldNotReachHere(); 1458 } 1459 1460 1461 #ifndef PRODUCT 1462 void instanceKlass::print_dependent_nmethods(bool verbose) { 1463 nmethodBucket* b = _dependencies; 1464 int idx = 0; 1465 while (b != NULL) { 1466 nmethod* nm = b->get_nmethod(); 1467 tty->print("[%d] count=%d { ", idx++, b->count()); 1468 if (!verbose) { 1469 nm->print_on(tty, "nmethod"); 1470 tty->print_cr(" } "); 1471 } else { 1472 nm->print(); 1473 nm->print_dependencies(); 1474 tty->print_cr("--- } "); 1475 } 1476 b = b->next(); 1477 } 1478 } 1479 1480 1481 bool instanceKlass::is_dependent_nmethod(nmethod* nm) { 1482 nmethodBucket* b = _dependencies; 1483 while (b != NULL) { 1484 if (nm == b->get_nmethod()) { 1485 return true; 1486 } 1487 b = b->next(); 1488 } 1489 return false; 1490 } 1491 #endif //PRODUCT 1492 1493 1494 #ifdef ASSERT 1495 template <class T> void assert_is_in(T *p) { 1496 T heap_oop = oopDesc::load_heap_oop(p); 1497 if (!oopDesc::is_null(heap_oop)) { 1498 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 1499 assert(Universe::heap()->is_in(o), "should be in heap"); 1500 } 1501 } 1502 template <class T> void assert_is_in_closed_subset(T *p) { 1503 T heap_oop = oopDesc::load_heap_oop(p); 1504 if (!oopDesc::is_null(heap_oop)) { 1505 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 1506 assert(Universe::heap()->is_in_closed_subset(o), "should be in closed"); 1507 } 1508 } 1509 template <class T> void assert_is_in_reserved(T *p) { 1510 T heap_oop = oopDesc::load_heap_oop(p); 1511 if (!oopDesc::is_null(heap_oop)) { 1512 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 1513 assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); 1514 } 1515 } 1516 template <class T> void assert_nothing(T *p) {} 1517 1518 #else 1519 template <class T> void assert_is_in(T *p) {} 1520 template <class T> void assert_is_in_closed_subset(T *p) {} 1521 template <class T> void assert_is_in_reserved(T *p) {} 1522 template <class T> void assert_nothing(T *p) {} 1523 #endif // ASSERT 1524 1525 // 1526 // Macros that iterate over areas of oops which are specialized on type of 1527 // oop pointer either narrow or wide, depending on UseCompressedOops 1528 // 1529 // Parameters are: 1530 // T - type of oop to point to (either oop or narrowOop) 1531 // start_p - starting pointer for region to iterate over 1532 // count - number of oops or narrowOops to iterate over 1533 // do_oop - action to perform on each oop (it's arbitrary C code which 1534 // makes it more efficient to put in a macro rather than making 1535 // it a template function) 1536 // assert_fn - assert function which is template function because performance 1537 // doesn't matter when enabled. 1538 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \ 1539 T, start_p, count, do_oop, \ 1540 assert_fn) \ 1541 { \ 1542 T* p = (T*)(start_p); \ 1543 T* const end = p + (count); \ 1544 while (p < end) { \ 1545 (assert_fn)(p); \ 1546 do_oop; \ 1547 ++p; \ 1548 } \ 1549 } 1550 1551 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \ 1552 T, start_p, count, do_oop, \ 1553 assert_fn) \ 1554 { \ 1555 T* const start = (T*)(start_p); \ 1556 T* p = start + (count); \ 1557 while (start < p) { \ 1558 --p; \ 1559 (assert_fn)(p); \ 1560 do_oop; \ 1561 } \ 1562 } 1563 1564 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \ 1565 T, start_p, count, low, high, \ 1566 do_oop, assert_fn) \ 1567 { \ 1568 T* const l = (T*)(low); \ 1569 T* const h = (T*)(high); \ 1570 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \ 1571 mask_bits((intptr_t)h, sizeof(T)-1) == 0, \ 1572 "bounded region must be properly aligned"); \ 1573 T* p = (T*)(start_p); \ 1574 T* end = p + (count); \ 1575 if (p < l) p = l; \ 1576 if (end > h) end = h; \ 1577 while (p < end) { \ 1578 (assert_fn)(p); \ 1579 do_oop; \ 1580 ++p; \ 1581 } \ 1582 } 1583 1584 1585 // The following macros call specialized macros, passing either oop or 1586 // narrowOop as the specialization type. These test the UseCompressedOops 1587 // flag. 1588 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \ 1589 { \ 1590 /* Compute oopmap block range. The common case \ 1591 is nonstatic_oop_map_size == 1. */ \ 1592 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ 1593 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ 1594 if (UseCompressedOops) { \ 1595 while (map < end_map) { \ 1596 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ 1597 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ 1598 do_oop, assert_fn) \ 1599 ++map; \ 1600 } \ 1601 } else { \ 1602 while (map < end_map) { \ 1603 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ 1604 obj->obj_field_addr<oop>(map->offset()), map->count(), \ 1605 do_oop, assert_fn) \ 1606 ++map; \ 1607 } \ 1608 } \ 1609 } 1610 1611 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \ 1612 { \ 1613 OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \ 1614 OopMapBlock* map = start_map + nonstatic_oop_map_count(); \ 1615 if (UseCompressedOops) { \ 1616 while (start_map < map) { \ 1617 --map; \ 1618 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \ 1619 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ 1620 do_oop, assert_fn) \ 1621 } \ 1622 } else { \ 1623 while (start_map < map) { \ 1624 --map; \ 1625 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \ 1626 obj->obj_field_addr<oop>(map->offset()), map->count(), \ 1627 do_oop, assert_fn) \ 1628 } \ 1629 } \ 1630 } 1631 1632 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \ 1633 assert_fn) \ 1634 { \ 1635 /* Compute oopmap block range. The common case is \ 1636 nonstatic_oop_map_size == 1, so we accept the \ 1637 usually non-existent extra overhead of examining \ 1638 all the maps. */ \ 1639 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ 1640 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ 1641 if (UseCompressedOops) { \ 1642 while (map < end_map) { \ 1643 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ 1644 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ 1645 low, high, \ 1646 do_oop, assert_fn) \ 1647 ++map; \ 1648 } \ 1649 } else { \ 1650 while (map < end_map) { \ 1651 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ 1652 obj->obj_field_addr<oop>(map->offset()), map->count(), \ 1653 low, high, \ 1654 do_oop, assert_fn) \ 1655 ++map; \ 1656 } \ 1657 } \ 1658 } 1659 1660 void instanceKlass::oop_follow_contents(oop obj) { 1661 assert(obj != NULL, "can't follow the content of NULL object"); 1662 obj->follow_header(); 1663 InstanceKlass_OOP_MAP_ITERATE( \ 1664 obj, \ 1665 MarkSweep::mark_and_push(p), \ 1666 assert_is_in_closed_subset) 1667 } 1668 1669 #ifndef SERIALGC 1670 void instanceKlass::oop_follow_contents(ParCompactionManager* cm, 1671 oop obj) { 1672 assert(obj != NULL, "can't follow the content of NULL object"); 1673 obj->follow_header(cm); 1674 InstanceKlass_OOP_MAP_ITERATE( \ 1675 obj, \ 1676 PSParallelCompact::mark_and_push(cm, p), \ 1677 assert_is_in) 1678 } 1679 #endif // SERIALGC 1680 1681 // closure's do_header() method dicates whether the given closure should be 1682 // applied to the klass ptr in the object header. 1683 1684 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 1685 \ 1686 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ 1687 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\ 1688 /* header */ \ 1689 if (closure->do_header()) { \ 1690 obj->oop_iterate_header(closure); \ 1691 } \ 1692 InstanceKlass_OOP_MAP_ITERATE( \ 1693 obj, \ 1694 SpecializationStats:: \ 1695 record_do_oop_call##nv_suffix(SpecializationStats::ik); \ 1696 (closure)->do_oop##nv_suffix(p), \ 1697 assert_is_in_closed_subset) \ 1698 return size_helper(); \ 1699 } 1700 1701 #ifndef SERIALGC 1702 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ 1703 \ 1704 int instanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \ 1705 OopClosureType* closure) { \ 1706 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \ 1707 /* header */ \ 1708 if (closure->do_header()) { \ 1709 obj->oop_iterate_header(closure); \ 1710 } \ 1711 /* instance variables */ \ 1712 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ 1713 obj, \ 1714 SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\ 1715 (closure)->do_oop##nv_suffix(p), \ 1716 assert_is_in_closed_subset) \ 1717 return size_helper(); \ 1718 } 1719 #endif // !SERIALGC 1720 1721 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ 1722 \ 1723 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \ 1724 OopClosureType* closure, \ 1725 MemRegion mr) { \ 1726 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\ 1727 if (closure->do_header()) { \ 1728 obj->oop_iterate_header(closure, mr); \ 1729 } \ 1730 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \ 1731 obj, mr.start(), mr.end(), \ 1732 (closure)->do_oop##nv_suffix(p), \ 1733 assert_is_in_closed_subset) \ 1734 return size_helper(); \ 1735 } 1736 1737 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN) 1738 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN) 1739 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) 1740 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) 1741 #ifndef SERIALGC 1742 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) 1743 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) 1744 #endif // !SERIALGC 1745 1746 int instanceKlass::oop_adjust_pointers(oop obj) { 1747 int size = size_helper(); 1748 InstanceKlass_OOP_MAP_ITERATE( \ 1749 obj, \ 1750 MarkSweep::adjust_pointer(p), \ 1751 assert_is_in) 1752 obj->adjust_header(); 1753 return size; 1754 } 1755 1756 #ifndef SERIALGC 1757 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { 1758 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ 1759 obj, \ 1760 if (PSScavenge::should_scavenge(p)) { \ 1761 pm->claim_or_forward_depth(p); \ 1762 }, \ 1763 assert_nothing ) 1764 } 1765 1766 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { 1767 InstanceKlass_OOP_MAP_ITERATE( \ 1768 obj, \ 1769 PSParallelCompact::adjust_pointer(p), \ 1770 assert_nothing) 1771 return size_helper(); 1772 } 1773 1774 #endif // SERIALGC 1775 1776 // This klass is alive but the implementor link is not followed/updated. 1777 // Subklass and sibling links are handled by Klass::follow_weak_klass_links 1778 1779 void instanceKlass::follow_weak_klass_links( 1780 BoolObjectClosure* is_alive, OopClosure* keep_alive) { 1781 assert(is_alive->do_object_b(as_klassOop()), "this oop should be live"); 1782 if (ClassUnloading) { 1783 for (int i = 0; i < implementors_limit; i++) { 1784 klassOop impl = _implementors[i]; 1785 if (impl == NULL) break; // no more in the list 1786 if (!is_alive->do_object_b(impl)) { 1787 // remove this guy from the list by overwriting him with the tail 1788 int lasti = --_nof_implementors; 1789 assert(lasti >= i && lasti < implementors_limit, "just checking"); 1790 _implementors[i] = _implementors[lasti]; 1791 _implementors[lasti] = NULL; 1792 --i; // rerun the loop at this index 1793 } 1794 } 1795 } else { 1796 for (int i = 0; i < implementors_limit; i++) { 1797 keep_alive->do_oop(&adr_implementors()[i]); 1798 } 1799 } 1800 Klass::follow_weak_klass_links(is_alive, keep_alive); 1801 } 1802 1803 void instanceKlass::remove_unshareable_info() { 1804 Klass::remove_unshareable_info(); 1805 init_implementor(); 1806 } 1807 1808 static void clear_all_breakpoints(methodOop m) { 1809 m->clear_all_breakpoints(); 1810 } 1811 1812 void instanceKlass::release_C_heap_structures() { 1813 // Deallocate oop map cache 1814 if (_oop_map_cache != NULL) { 1815 delete _oop_map_cache; 1816 _oop_map_cache = NULL; 1817 } 1818 1819 // Deallocate JNI identifiers for jfieldIDs 1820 JNIid::deallocate(jni_ids()); 1821 set_jni_ids(NULL); 1822 1823 jmethodID* jmeths = methods_jmethod_ids_acquire(); 1824 if (jmeths != (jmethodID*)NULL) { 1825 release_set_methods_jmethod_ids(NULL); 1826 FreeHeap(jmeths); 1827 } 1828 1829 int* indices = methods_cached_itable_indices_acquire(); 1830 if (indices != (int*)NULL) { 1831 release_set_methods_cached_itable_indices(NULL); 1832 FreeHeap(indices); 1833 } 1834 1835 // release dependencies 1836 nmethodBucket* b = _dependencies; 1837 _dependencies = NULL; 1838 while (b != NULL) { 1839 nmethodBucket* next = b->next(); 1840 delete b; 1841 b = next; 1842 } 1843 1844 // Deallocate breakpoint records 1845 if (breakpoints() != 0x0) { 1846 methods_do(clear_all_breakpoints); 1847 assert(breakpoints() == 0x0, "should have cleared breakpoints"); 1848 } 1849 1850 // deallocate information about previous versions 1851 if (_previous_versions != NULL) { 1852 for (int i = _previous_versions->length() - 1; i >= 0; i--) { 1853 PreviousVersionNode * pv_node = _previous_versions->at(i); 1854 delete pv_node; 1855 } 1856 delete _previous_versions; 1857 _previous_versions = NULL; 1858 } 1859 1860 // deallocate the cached class file 1861 if (_cached_class_file_bytes != NULL) { 1862 os::free(_cached_class_file_bytes); 1863 _cached_class_file_bytes = NULL; 1864 _cached_class_file_len = 0; 1865 } 1866 1867 // Decrement symbol reference counts associated with the unloaded class. 1868 if (_name != NULL) _name->decrement_refcount(); 1869 // unreference array name derived from this class name (arrays of an unloaded 1870 // class can't be referenced anymore). 1871 if (_array_name != NULL) _array_name->decrement_refcount(); 1872 if (_source_file_name != NULL) _source_file_name->decrement_refcount(); 1873 if (_source_debug_extension != NULL) _source_debug_extension->decrement_refcount(); 1874 // walk constant pool and decrement symbol reference counts 1875 _constants->unreference_symbols(); 1876 } 1877 1878 void instanceKlass::set_source_file_name(Symbol* n) { 1879 _source_file_name = n; 1880 if (_source_file_name != NULL) _source_file_name->increment_refcount(); 1881 } 1882 1883 void instanceKlass::set_source_debug_extension(Symbol* n) { 1884 _source_debug_extension = n; 1885 if (_source_debug_extension != NULL) _source_debug_extension->increment_refcount(); 1886 } 1887 1888 address instanceKlass::static_field_addr(int offset) { 1889 return (address)(offset + instanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror()); 1890 } 1891 1892 1893 const char* instanceKlass::signature_name() const { 1894 const char* src = (const char*) (name()->as_C_string()); 1895 const int src_length = (int)strlen(src); 1896 char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3); 1897 int src_index = 0; 1898 int dest_index = 0; 1899 dest[dest_index++] = 'L'; 1900 while (src_index < src_length) { 1901 dest[dest_index++] = src[src_index++]; 1902 } 1903 dest[dest_index++] = ';'; 1904 dest[dest_index] = '\0'; 1905 return dest; 1906 } 1907 1908 // different verisons of is_same_class_package 1909 bool instanceKlass::is_same_class_package(klassOop class2) { 1910 klassOop class1 = as_klassOop(); 1911 oop classloader1 = instanceKlass::cast(class1)->class_loader(); 1912 Symbol* classname1 = Klass::cast(class1)->name(); 1913 1914 if (Klass::cast(class2)->oop_is_objArray()) { 1915 class2 = objArrayKlass::cast(class2)->bottom_klass(); 1916 } 1917 oop classloader2; 1918 if (Klass::cast(class2)->oop_is_instance()) { 1919 classloader2 = instanceKlass::cast(class2)->class_loader(); 1920 } else { 1921 assert(Klass::cast(class2)->oop_is_typeArray(), "should be type array"); 1922 classloader2 = NULL; 1923 } 1924 Symbol* classname2 = Klass::cast(class2)->name(); 1925 1926 return instanceKlass::is_same_class_package(classloader1, classname1, 1927 classloader2, classname2); 1928 } 1929 1930 bool instanceKlass::is_same_class_package(oop classloader2, Symbol* classname2) { 1931 klassOop class1 = as_klassOop(); 1932 oop classloader1 = instanceKlass::cast(class1)->class_loader(); 1933 Symbol* classname1 = Klass::cast(class1)->name(); 1934 1935 return instanceKlass::is_same_class_package(classloader1, classname1, 1936 classloader2, classname2); 1937 } 1938 1939 // return true if two classes are in the same package, classloader 1940 // and classname information is enough to determine a class's package 1941 bool instanceKlass::is_same_class_package(oop class_loader1, Symbol* class_name1, 1942 oop class_loader2, Symbol* class_name2) { 1943 if (class_loader1 != class_loader2) { 1944 return false; 1945 } else if (class_name1 == class_name2) { 1946 return true; // skip painful bytewise comparison 1947 } else { 1948 ResourceMark rm; 1949 1950 // The Symbol*'s are in UTF8 encoding. Since we only need to check explicitly 1951 // for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding. 1952 // Otherwise, we just compare jbyte values between the strings. 1953 const jbyte *name1 = class_name1->base(); 1954 const jbyte *name2 = class_name2->base(); 1955 1956 const jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/'); 1957 const jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/'); 1958 1959 if ((last_slash1 == NULL) || (last_slash2 == NULL)) { 1960 // One of the two doesn't have a package. Only return true 1961 // if the other one also doesn't have a package. 1962 return last_slash1 == last_slash2; 1963 } else { 1964 // Skip over '['s 1965 if (*name1 == '[') { 1966 do { 1967 name1++; 1968 } while (*name1 == '['); 1969 if (*name1 != 'L') { 1970 // Something is terribly wrong. Shouldn't be here. 1971 return false; 1972 } 1973 } 1974 if (*name2 == '[') { 1975 do { 1976 name2++; 1977 } while (*name2 == '['); 1978 if (*name2 != 'L') { 1979 // Something is terribly wrong. Shouldn't be here. 1980 return false; 1981 } 1982 } 1983 1984 // Check that package part is identical 1985 int length1 = last_slash1 - name1; 1986 int length2 = last_slash2 - name2; 1987 1988 return UTF8::equal(name1, length1, name2, length2); 1989 } 1990 } 1991 } 1992 1993 // Returns true iff super_method can be overridden by a method in targetclassname 1994 // See JSL 3rd edition 8.4.6.1 1995 // Assumes name-signature match 1996 // "this" is instanceKlass of super_method which must exist 1997 // note that the instanceKlass of the method in the targetclassname has not always been created yet 1998 bool instanceKlass::is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS) { 1999 // Private methods can not be overridden 2000 if (super_method->is_private()) { 2001 return false; 2002 } 2003 // If super method is accessible, then override 2004 if ((super_method->is_protected()) || 2005 (super_method->is_public())) { 2006 return true; 2007 } 2008 // Package-private methods are not inherited outside of package 2009 assert(super_method->is_package_private(), "must be package private"); 2010 return(is_same_class_package(targetclassloader(), targetclassname)); 2011 } 2012 2013 /* defined for now in jvm.cpp, for historical reasons *-- 2014 klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle self, 2015 Symbol*& simple_name_result, TRAPS) { 2016 ... 2017 } 2018 */ 2019 2020 // tell if two classes have the same enclosing class (at package level) 2021 bool instanceKlass::is_same_package_member_impl(instanceKlassHandle class1, 2022 klassOop class2_oop, TRAPS) { 2023 if (class2_oop == class1->as_klassOop()) return true; 2024 if (!Klass::cast(class2_oop)->oop_is_instance()) return false; 2025 instanceKlassHandle class2(THREAD, class2_oop); 2026 2027 // must be in same package before we try anything else 2028 if (!class1->is_same_class_package(class2->class_loader(), class2->name())) 2029 return false; 2030 2031 // As long as there is an outer1.getEnclosingClass, 2032 // shift the search outward. 2033 instanceKlassHandle outer1 = class1; 2034 for (;;) { 2035 // As we walk along, look for equalities between outer1 and class2. 2036 // Eventually, the walks will terminate as outer1 stops 2037 // at the top-level class around the original class. 2038 bool ignore_inner_is_member; 2039 klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member, 2040 CHECK_false); 2041 if (next == NULL) break; 2042 if (next == class2()) return true; 2043 outer1 = instanceKlassHandle(THREAD, next); 2044 } 2045 2046 // Now do the same for class2. 2047 instanceKlassHandle outer2 = class2; 2048 for (;;) { 2049 bool ignore_inner_is_member; 2050 klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member, 2051 CHECK_false); 2052 if (next == NULL) break; 2053 // Might as well check the new outer against all available values. 2054 if (next == class1()) return true; 2055 if (next == outer1()) return true; 2056 outer2 = instanceKlassHandle(THREAD, next); 2057 } 2058 2059 // If by this point we have not found an equality between the 2060 // two classes, we know they are in separate package members. 2061 return false; 2062 } 2063 2064 2065 jint instanceKlass::compute_modifier_flags(TRAPS) const { 2066 klassOop k = as_klassOop(); 2067 jint access = access_flags().as_int(); 2068 2069 // But check if it happens to be member class. 2070 typeArrayOop inner_class_list = inner_classes(); 2071 int length = (inner_class_list == NULL) ? 0 : inner_class_list->length(); 2072 assert (length % instanceKlass::inner_class_next_offset == 0, "just checking"); 2073 if (length > 0) { 2074 typeArrayHandle inner_class_list_h(THREAD, inner_class_list); 2075 instanceKlassHandle ik(THREAD, k); 2076 for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) { 2077 int ioff = inner_class_list_h->ushort_at( 2078 i + instanceKlass::inner_class_inner_class_info_offset); 2079 2080 // Inner class attribute can be zero, skip it. 2081 // Strange but true: JVM spec. allows null inner class refs. 2082 if (ioff == 0) continue; 2083 2084 // only look at classes that are already loaded 2085 // since we are looking for the flags for our self. 2086 Symbol* inner_name = ik->constants()->klass_name_at(ioff); 2087 if ((ik->name() == inner_name)) { 2088 // This is really a member class. 2089 access = inner_class_list_h->ushort_at(i + instanceKlass::inner_class_access_flags_offset); 2090 break; 2091 } 2092 } 2093 } 2094 // Remember to strip ACC_SUPER bit 2095 return (access & (~JVM_ACC_SUPER)) & JVM_ACC_WRITTEN_FLAGS; 2096 } 2097 2098 jint instanceKlass::jvmti_class_status() const { 2099 jint result = 0; 2100 2101 if (is_linked()) { 2102 result |= JVMTI_CLASS_STATUS_VERIFIED | JVMTI_CLASS_STATUS_PREPARED; 2103 } 2104 2105 if (is_initialized()) { 2106 assert(is_linked(), "Class status is not consistent"); 2107 result |= JVMTI_CLASS_STATUS_INITIALIZED; 2108 } 2109 if (is_in_error_state()) { 2110 result |= JVMTI_CLASS_STATUS_ERROR; 2111 } 2112 return result; 2113 } 2114 2115 methodOop instanceKlass::method_at_itable(klassOop holder, int index, TRAPS) { 2116 itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable(); 2117 int method_table_offset_in_words = ioe->offset()/wordSize; 2118 int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words()) 2119 / itableOffsetEntry::size(); 2120 2121 for (int cnt = 0 ; ; cnt ++, ioe ++) { 2122 // If the interface isn't implemented by the receiver class, 2123 // the VM should throw IncompatibleClassChangeError. 2124 if (cnt >= nof_interfaces) { 2125 THROW_0(vmSymbols::java_lang_IncompatibleClassChangeError()); 2126 } 2127 2128 klassOop ik = ioe->interface_klass(); 2129 if (ik == holder) break; 2130 } 2131 2132 itableMethodEntry* ime = ioe->first_method_entry(as_klassOop()); 2133 methodOop m = ime[index].method(); 2134 if (m == NULL) { 2135 THROW_0(vmSymbols::java_lang_AbstractMethodError()); 2136 } 2137 return m; 2138 } 2139 2140 // On-stack replacement stuff 2141 void instanceKlass::add_osr_nmethod(nmethod* n) { 2142 // only one compilation can be active 2143 NEEDS_CLEANUP 2144 // This is a short non-blocking critical region, so the no safepoint check is ok. 2145 OsrList_lock->lock_without_safepoint_check(); 2146 assert(n->is_osr_method(), "wrong kind of nmethod"); 2147 n->set_osr_link(osr_nmethods_head()); 2148 set_osr_nmethods_head(n); 2149 // Raise the highest osr level if necessary 2150 if (TieredCompilation) { 2151 methodOop m = n->method(); 2152 m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level())); 2153 } 2154 // Remember to unlock again 2155 OsrList_lock->unlock(); 2156 2157 // Get rid of the osr methods for the same bci that have lower levels. 2158 if (TieredCompilation) { 2159 for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) { 2160 nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true); 2161 if (inv != NULL && inv->is_in_use()) { 2162 inv->make_not_entrant(); 2163 } 2164 } 2165 } 2166 } 2167 2168 2169 void instanceKlass::remove_osr_nmethod(nmethod* n) { 2170 // This is a short non-blocking critical region, so the no safepoint check is ok. 2171 OsrList_lock->lock_without_safepoint_check(); 2172 assert(n->is_osr_method(), "wrong kind of nmethod"); 2173 nmethod* last = NULL; 2174 nmethod* cur = osr_nmethods_head(); 2175 int max_level = CompLevel_none; // Find the max comp level excluding n 2176 methodOop m = n->method(); 2177 // Search for match 2178 while(cur != NULL && cur != n) { 2179 if (TieredCompilation) { 2180 // Find max level before n 2181 max_level = MAX2(max_level, cur->comp_level()); 2182 } 2183 last = cur; 2184 cur = cur->osr_link(); 2185 } 2186 nmethod* next = NULL; 2187 if (cur == n) { 2188 next = cur->osr_link(); 2189 if (last == NULL) { 2190 // Remove first element 2191 set_osr_nmethods_head(next); 2192 } else { 2193 last->set_osr_link(next); 2194 } 2195 } 2196 n->set_osr_link(NULL); 2197 if (TieredCompilation) { 2198 cur = next; 2199 while (cur != NULL) { 2200 // Find max level after n 2201 max_level = MAX2(max_level, cur->comp_level()); 2202 cur = cur->osr_link(); 2203 } 2204 m->set_highest_osr_comp_level(max_level); 2205 } 2206 // Remember to unlock again 2207 OsrList_lock->unlock(); 2208 } 2209 2210 nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const { 2211 // This is a short non-blocking critical region, so the no safepoint check is ok. 2212 OsrList_lock->lock_without_safepoint_check(); 2213 nmethod* osr = osr_nmethods_head(); 2214 nmethod* best = NULL; 2215 while (osr != NULL) { 2216 assert(osr->is_osr_method(), "wrong kind of nmethod found in chain"); 2217 // There can be a time when a c1 osr method exists but we are waiting 2218 // for a c2 version. When c2 completes its osr nmethod we will trash 2219 // the c1 version and only be able to find the c2 version. However 2220 // while we overflow in the c1 code at back branches we don't want to 2221 // try and switch to the same code as we are already running 2222 2223 if (osr->method() == m && 2224 (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) { 2225 if (match_level) { 2226 if (osr->comp_level() == comp_level) { 2227 // Found a match - return it. 2228 OsrList_lock->unlock(); 2229 return osr; 2230 } 2231 } else { 2232 if (best == NULL || (osr->comp_level() > best->comp_level())) { 2233 if (osr->comp_level() == CompLevel_highest_tier) { 2234 // Found the best possible - return it. 2235 OsrList_lock->unlock(); 2236 return osr; 2237 } 2238 best = osr; 2239 } 2240 } 2241 } 2242 osr = osr->osr_link(); 2243 } 2244 OsrList_lock->unlock(); 2245 if (best != NULL && best->comp_level() >= comp_level && match_level == false) { 2246 return best; 2247 } 2248 return NULL; 2249 } 2250 2251 // ----------------------------------------------------------------------------------------------------- 2252 #ifndef PRODUCT 2253 2254 // Printing 2255 2256 #define BULLET " - " 2257 2258 void FieldPrinter::do_field(fieldDescriptor* fd) { 2259 _st->print(BULLET); 2260 if (_obj == NULL) { 2261 fd->print_on(_st); 2262 _st->cr(); 2263 } else { 2264 fd->print_on_for(_st, _obj); 2265 _st->cr(); 2266 } 2267 } 2268 2269 2270 void instanceKlass::oop_print_on(oop obj, outputStream* st) { 2271 Klass::oop_print_on(obj, st); 2272 2273 if (as_klassOop() == SystemDictionary::String_klass()) { 2274 typeArrayOop value = java_lang_String::value(obj); 2275 juint offset = java_lang_String::offset(obj); 2276 juint length = java_lang_String::length(obj); 2277 if (value != NULL && 2278 value->is_typeArray() && 2279 offset <= (juint) value->length() && 2280 offset + length <= (juint) value->length()) { 2281 st->print(BULLET"string: "); 2282 Handle h_obj(obj); 2283 java_lang_String::print(h_obj, st); 2284 st->cr(); 2285 if (!WizardMode) return; // that is enough 2286 } 2287 } 2288 2289 st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj)); 2290 FieldPrinter print_field(st, obj); 2291 do_nonstatic_fields(&print_field); 2292 2293 if (as_klassOop() == SystemDictionary::Class_klass()) { 2294 st->print(BULLET"signature: "); 2295 java_lang_Class::print_signature(obj, st); 2296 st->cr(); 2297 klassOop mirrored_klass = java_lang_Class::as_klassOop(obj); 2298 st->print(BULLET"fake entry for mirror: "); 2299 mirrored_klass->print_value_on(st); 2300 st->cr(); 2301 st->print(BULLET"fake entry resolved_constructor: "); 2302 methodOop ctor = java_lang_Class::resolved_constructor(obj); 2303 ctor->print_value_on(st); 2304 klassOop array_klass = java_lang_Class::array_klass(obj); 2305 st->cr(); 2306 st->print(BULLET"fake entry for array: "); 2307 array_klass->print_value_on(st); 2308 st->cr(); 2309 st->print_cr(BULLET"fake entry for oop_size: %d", java_lang_Class::oop_size(obj)); 2310 st->print_cr(BULLET"fake entry for static_oop_field_count: %d", java_lang_Class::static_oop_field_count(obj)); 2311 klassOop real_klass = java_lang_Class::as_klassOop(obj); 2312 if (real_klass != NULL && real_klass->klass_part()->oop_is_instance()) { 2313 instanceKlass::cast(real_klass)->do_local_static_fields(&print_field); 2314 } 2315 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) { 2316 st->print(BULLET"signature: "); 2317 java_lang_invoke_MethodType::print_signature(obj, st); 2318 st->cr(); 2319 } 2320 } 2321 2322 #endif //PRODUCT 2323 2324 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) { 2325 st->print("a "); 2326 name()->print_value_on(st); 2327 obj->print_address_on(st); 2328 if (as_klassOop() == SystemDictionary::String_klass() 2329 && java_lang_String::value(obj) != NULL) { 2330 ResourceMark rm; 2331 int len = java_lang_String::length(obj); 2332 int plen = (len < 24 ? len : 12); 2333 char* str = java_lang_String::as_utf8_string(obj, 0, plen); 2334 st->print(" = \"%s\"", str); 2335 if (len > plen) 2336 st->print("...[%d]", len); 2337 } else if (as_klassOop() == SystemDictionary::Class_klass()) { 2338 klassOop k = java_lang_Class::as_klassOop(obj); 2339 st->print(" = "); 2340 if (k != NULL) { 2341 k->print_value_on(st); 2342 } else { 2343 const char* tname = type2name(java_lang_Class::primitive_type(obj)); 2344 st->print("%s", tname ? tname : "type?"); 2345 } 2346 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) { 2347 st->print(" = "); 2348 java_lang_invoke_MethodType::print_signature(obj, st); 2349 } else if (java_lang_boxing_object::is_instance(obj)) { 2350 st->print(" = "); 2351 java_lang_boxing_object::print(obj, st); 2352 } 2353 } 2354 2355 const char* instanceKlass::internal_name() const { 2356 return external_name(); 2357 } 2358 2359 // Verification 2360 2361 class VerifyFieldClosure: public OopClosure { 2362 protected: 2363 template <class T> void do_oop_work(T* p) { 2364 guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap"); 2365 oop obj = oopDesc::load_decode_heap_oop(p); 2366 if (!obj->is_oop_or_null()) { 2367 tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj); 2368 Universe::print(); 2369 guarantee(false, "boom"); 2370 } 2371 } 2372 public: 2373 virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); } 2374 virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); } 2375 }; 2376 2377 void instanceKlass::oop_verify_on(oop obj, outputStream* st) { 2378 Klass::oop_verify_on(obj, st); 2379 VerifyFieldClosure blk; 2380 oop_oop_iterate(obj, &blk); 2381 } 2382 2383 #ifndef PRODUCT 2384 2385 void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) { 2386 // This verification code is disabled. JDK_Version::is_gte_jdk14x_version() 2387 // cannot be called since this function is called before the VM is 2388 // able to determine what JDK version is running with. 2389 // The check below always is false since 1.4. 2390 return; 2391 2392 // This verification code temporarily disabled for the 1.4 2393 // reflection implementation since java.lang.Class now has 2394 // Java-level instance fields. Should rewrite this to handle this 2395 // case. 2396 if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) { 2397 // Verify that java.lang.Class instances have a fake oop field added. 2398 instanceKlass* ik = instanceKlass::cast(k); 2399 2400 // Check that we have the right class 2401 static bool first_time = true; 2402 guarantee(k == SystemDictionary::Class_klass() && first_time, "Invalid verify of maps"); 2403 first_time = false; 2404 const int extra = java_lang_Class::number_of_fake_oop_fields; 2405 guarantee(ik->nonstatic_field_size() == extra, "just checking"); 2406 guarantee(ik->nonstatic_oop_map_count() == 1, "just checking"); 2407 guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking"); 2408 2409 // Check that the map is (2,extra) 2410 int offset = java_lang_Class::klass_offset; 2411 2412 OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); 2413 guarantee(map->offset() == offset && map->count() == (unsigned int) extra, 2414 "sanity"); 2415 } 2416 } 2417 2418 #endif // ndef PRODUCT 2419 2420 // JNIid class for jfieldIDs only 2421 // Note to reviewers: 2422 // These JNI functions are just moved over to column 1 and not changed 2423 // in the compressed oops workspace. 2424 JNIid::JNIid(klassOop holder, int offset, JNIid* next) { 2425 _holder = holder; 2426 _offset = offset; 2427 _next = next; 2428 debug_only(_is_static_field_id = false;) 2429 } 2430 2431 2432 JNIid* JNIid::find(int offset) { 2433 JNIid* current = this; 2434 while (current != NULL) { 2435 if (current->offset() == offset) return current; 2436 current = current->next(); 2437 } 2438 return NULL; 2439 } 2440 2441 void JNIid::oops_do(OopClosure* f) { 2442 for (JNIid* cur = this; cur != NULL; cur = cur->next()) { 2443 f->do_oop(cur->holder_addr()); 2444 } 2445 } 2446 2447 void JNIid::deallocate(JNIid* current) { 2448 while (current != NULL) { 2449 JNIid* next = current->next(); 2450 delete current; 2451 current = next; 2452 } 2453 } 2454 2455 2456 void JNIid::verify(klassOop holder) { 2457 int first_field_offset = instanceMirrorKlass::offset_of_static_fields(); 2458 int end_field_offset; 2459 end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize); 2460 2461 JNIid* current = this; 2462 while (current != NULL) { 2463 guarantee(current->holder() == holder, "Invalid klass in JNIid"); 2464 #ifdef ASSERT 2465 int o = current->offset(); 2466 if (current->is_static_field_id()) { 2467 guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid"); 2468 } 2469 #endif 2470 current = current->next(); 2471 } 2472 } 2473 2474 2475 #ifdef ASSERT 2476 void instanceKlass::set_init_state(ClassState state) { 2477 bool good_state = as_klassOop()->is_shared() ? (_init_state <= state) 2478 : (_init_state < state); 2479 assert(good_state || state == allocated, "illegal state transition"); 2480 _init_state = state; 2481 } 2482 #endif 2483 2484 2485 // RedefineClasses() support for previous versions: 2486 2487 // Add an information node that contains weak references to the 2488 // interesting parts of the previous version of the_class. 2489 // This is also where we clean out any unused weak references. 2490 // Note that while we delete nodes from the _previous_versions 2491 // array, we never delete the array itself until the klass is 2492 // unloaded. The has_been_redefined() query depends on that fact. 2493 // 2494 void instanceKlass::add_previous_version(instanceKlassHandle ikh, 2495 BitMap* emcp_methods, int emcp_method_count) { 2496 assert(Thread::current()->is_VM_thread(), 2497 "only VMThread can add previous versions"); 2498 2499 if (_previous_versions == NULL) { 2500 // This is the first previous version so make some space. 2501 // Start with 2 elements under the assumption that the class 2502 // won't be redefined much. 2503 _previous_versions = new (ResourceObj::C_HEAP) 2504 GrowableArray<PreviousVersionNode *>(2, true); 2505 } 2506 2507 // RC_TRACE macro has an embedded ResourceMark 2508 RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d", 2509 ikh->external_name(), _previous_versions->length(), emcp_method_count)); 2510 constantPoolHandle cp_h(ikh->constants()); 2511 jobject cp_ref; 2512 if (cp_h->is_shared()) { 2513 // a shared ConstantPool requires a regular reference; a weak 2514 // reference would be collectible 2515 cp_ref = JNIHandles::make_global(cp_h); 2516 } else { 2517 cp_ref = JNIHandles::make_weak_global(cp_h); 2518 } 2519 PreviousVersionNode * pv_node = NULL; 2520 objArrayOop old_methods = ikh->methods(); 2521 2522 if (emcp_method_count == 0) { 2523 // non-shared ConstantPool gets a weak reference 2524 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL); 2525 RC_TRACE(0x00000400, 2526 ("add: all methods are obsolete; flushing any EMCP weak refs")); 2527 } else { 2528 int local_count = 0; 2529 GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP) 2530 GrowableArray<jweak>(emcp_method_count, true); 2531 for (int i = 0; i < old_methods->length(); i++) { 2532 if (emcp_methods->at(i)) { 2533 // this old method is EMCP so save a weak ref 2534 methodOop old_method = (methodOop) old_methods->obj_at(i); 2535 methodHandle old_method_h(old_method); 2536 jweak method_ref = JNIHandles::make_weak_global(old_method_h); 2537 method_refs->append(method_ref); 2538 if (++local_count >= emcp_method_count) { 2539 // no more EMCP methods so bail out now 2540 break; 2541 } 2542 } 2543 } 2544 // non-shared ConstantPool gets a weak reference 2545 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs); 2546 } 2547 2548 _previous_versions->append(pv_node); 2549 2550 // Using weak references allows the interesting parts of previous 2551 // classes to be GC'ed when they are no longer needed. Since the 2552 // caller is the VMThread and we are at a safepoint, this is a good 2553 // time to clear out unused weak references. 2554 2555 RC_TRACE(0x00000400, ("add: previous version length=%d", 2556 _previous_versions->length())); 2557 2558 // skip the last entry since we just added it 2559 for (int i = _previous_versions->length() - 2; i >= 0; i--) { 2560 // check the previous versions array for a GC'ed weak refs 2561 pv_node = _previous_versions->at(i); 2562 cp_ref = pv_node->prev_constant_pool(); 2563 assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); 2564 if (cp_ref == NULL) { 2565 delete pv_node; 2566 _previous_versions->remove_at(i); 2567 // Since we are traversing the array backwards, we don't have to 2568 // do anything special with the index. 2569 continue; // robustness 2570 } 2571 2572 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2573 if (cp == NULL) { 2574 // this entry has been GC'ed so remove it 2575 delete pv_node; 2576 _previous_versions->remove_at(i); 2577 // Since we are traversing the array backwards, we don't have to 2578 // do anything special with the index. 2579 continue; 2580 } else { 2581 RC_TRACE(0x00000400, ("add: previous version @%d is alive", i)); 2582 } 2583 2584 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); 2585 if (method_refs != NULL) { 2586 RC_TRACE(0x00000400, ("add: previous methods length=%d", 2587 method_refs->length())); 2588 for (int j = method_refs->length() - 1; j >= 0; j--) { 2589 jweak method_ref = method_refs->at(j); 2590 assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); 2591 if (method_ref == NULL) { 2592 method_refs->remove_at(j); 2593 // Since we are traversing the array backwards, we don't have to 2594 // do anything special with the index. 2595 continue; // robustness 2596 } 2597 2598 methodOop method = (methodOop)JNIHandles::resolve(method_ref); 2599 if (method == NULL || emcp_method_count == 0) { 2600 // This method entry has been GC'ed or the current 2601 // RedefineClasses() call has made all methods obsolete 2602 // so remove it. 2603 JNIHandles::destroy_weak_global(method_ref); 2604 method_refs->remove_at(j); 2605 } else { 2606 // RC_TRACE macro has an embedded ResourceMark 2607 RC_TRACE(0x00000400, 2608 ("add: %s(%s): previous method @%d in version @%d is alive", 2609 method->name()->as_C_string(), method->signature()->as_C_string(), 2610 j, i)); 2611 } 2612 } 2613 } 2614 } 2615 2616 int obsolete_method_count = old_methods->length() - emcp_method_count; 2617 2618 if (emcp_method_count != 0 && obsolete_method_count != 0 && 2619 _previous_versions->length() > 1) { 2620 // We have a mix of obsolete and EMCP methods. If there is more 2621 // than the previous version that we just added, then we have to 2622 // clear out any matching EMCP method entries the hard way. 2623 int local_count = 0; 2624 for (int i = 0; i < old_methods->length(); i++) { 2625 if (!emcp_methods->at(i)) { 2626 // only obsolete methods are interesting 2627 methodOop old_method = (methodOop) old_methods->obj_at(i); 2628 Symbol* m_name = old_method->name(); 2629 Symbol* m_signature = old_method->signature(); 2630 2631 // skip the last entry since we just added it 2632 for (int j = _previous_versions->length() - 2; j >= 0; j--) { 2633 // check the previous versions array for a GC'ed weak refs 2634 pv_node = _previous_versions->at(j); 2635 cp_ref = pv_node->prev_constant_pool(); 2636 assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); 2637 if (cp_ref == NULL) { 2638 delete pv_node; 2639 _previous_versions->remove_at(j); 2640 // Since we are traversing the array backwards, we don't have to 2641 // do anything special with the index. 2642 continue; // robustness 2643 } 2644 2645 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2646 if (cp == NULL) { 2647 // this entry has been GC'ed so remove it 2648 delete pv_node; 2649 _previous_versions->remove_at(j); 2650 // Since we are traversing the array backwards, we don't have to 2651 // do anything special with the index. 2652 continue; 2653 } 2654 2655 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); 2656 if (method_refs == NULL) { 2657 // We have run into a PreviousVersion generation where 2658 // all methods were made obsolete during that generation's 2659 // RedefineClasses() operation. At the time of that 2660 // operation, all EMCP methods were flushed so we don't 2661 // have to go back any further. 2662 // 2663 // A NULL method_refs is different than an empty method_refs. 2664 // We cannot infer any optimizations about older generations 2665 // from an empty method_refs for the current generation. 2666 break; 2667 } 2668 2669 for (int k = method_refs->length() - 1; k >= 0; k--) { 2670 jweak method_ref = method_refs->at(k); 2671 assert(method_ref != NULL, 2672 "weak method ref was unexpectedly cleared"); 2673 if (method_ref == NULL) { 2674 method_refs->remove_at(k); 2675 // Since we are traversing the array backwards, we don't 2676 // have to do anything special with the index. 2677 continue; // robustness 2678 } 2679 2680 methodOop method = (methodOop)JNIHandles::resolve(method_ref); 2681 if (method == NULL) { 2682 // this method entry has been GC'ed so skip it 2683 JNIHandles::destroy_weak_global(method_ref); 2684 method_refs->remove_at(k); 2685 continue; 2686 } 2687 2688 if (method->name() == m_name && 2689 method->signature() == m_signature) { 2690 // The current RedefineClasses() call has made all EMCP 2691 // versions of this method obsolete so mark it as obsolete 2692 // and remove the weak ref. 2693 RC_TRACE(0x00000400, 2694 ("add: %s(%s): flush obsolete method @%d in version @%d", 2695 m_name->as_C_string(), m_signature->as_C_string(), k, j)); 2696 2697 method->set_is_obsolete(); 2698 JNIHandles::destroy_weak_global(method_ref); 2699 method_refs->remove_at(k); 2700 break; 2701 } 2702 } 2703 2704 // The previous loop may not find a matching EMCP method, but 2705 // that doesn't mean that we can optimize and not go any 2706 // further back in the PreviousVersion generations. The EMCP 2707 // method for this generation could have already been GC'ed, 2708 // but there still may be an older EMCP method that has not 2709 // been GC'ed. 2710 } 2711 2712 if (++local_count >= obsolete_method_count) { 2713 // no more obsolete methods so bail out now 2714 break; 2715 } 2716 } 2717 } 2718 } 2719 } // end add_previous_version() 2720 2721 2722 // Determine if instanceKlass has a previous version. 2723 bool instanceKlass::has_previous_version() const { 2724 if (_previous_versions == NULL) { 2725 // no previous versions array so answer is easy 2726 return false; 2727 } 2728 2729 for (int i = _previous_versions->length() - 1; i >= 0; i--) { 2730 // Check the previous versions array for an info node that hasn't 2731 // been GC'ed 2732 PreviousVersionNode * pv_node = _previous_versions->at(i); 2733 2734 jobject cp_ref = pv_node->prev_constant_pool(); 2735 assert(cp_ref != NULL, "cp reference was unexpectedly cleared"); 2736 if (cp_ref == NULL) { 2737 continue; // robustness 2738 } 2739 2740 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2741 if (cp != NULL) { 2742 // we have at least one previous version 2743 return true; 2744 } 2745 2746 // We don't have to check the method refs. If the constant pool has 2747 // been GC'ed then so have the methods. 2748 } 2749 2750 // all of the underlying nodes' info has been GC'ed 2751 return false; 2752 } // end has_previous_version() 2753 2754 methodOop instanceKlass::method_with_idnum(int idnum) { 2755 methodOop m = NULL; 2756 if (idnum < methods()->length()) { 2757 m = (methodOop) methods()->obj_at(idnum); 2758 } 2759 if (m == NULL || m->method_idnum() != idnum) { 2760 for (int index = 0; index < methods()->length(); ++index) { 2761 m = (methodOop) methods()->obj_at(index); 2762 if (m->method_idnum() == idnum) { 2763 return m; 2764 } 2765 } 2766 } 2767 return m; 2768 } 2769 2770 2771 // Set the annotation at 'idnum' to 'anno'. 2772 // We don't want to create or extend the array if 'anno' is NULL, since that is the 2773 // default value. However, if the array exists and is long enough, we must set NULL values. 2774 void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, objArrayOop* md_p) { 2775 objArrayOop md = *md_p; 2776 if (md != NULL && md->length() > idnum) { 2777 md->obj_at_put(idnum, anno); 2778 } else if (anno != NULL) { 2779 // create the array 2780 int length = MAX2(idnum+1, (int)_idnum_allocated_count); 2781 md = oopFactory::new_system_objArray(length, Thread::current()); 2782 if (*md_p != NULL) { 2783 // copy the existing entries 2784 for (int index = 0; index < (*md_p)->length(); index++) { 2785 md->obj_at_put(index, (*md_p)->obj_at(index)); 2786 } 2787 } 2788 set_annotations(md, md_p); 2789 md->obj_at_put(idnum, anno); 2790 } // if no array and idnum isn't included there is nothing to do 2791 } 2792 2793 // Construct a PreviousVersionNode entry for the array hung off 2794 // the instanceKlass. 2795 PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool, 2796 bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) { 2797 2798 _prev_constant_pool = prev_constant_pool; 2799 _prev_cp_is_weak = prev_cp_is_weak; 2800 _prev_EMCP_methods = prev_EMCP_methods; 2801 } 2802 2803 2804 // Destroy a PreviousVersionNode 2805 PreviousVersionNode::~PreviousVersionNode() { 2806 if (_prev_constant_pool != NULL) { 2807 if (_prev_cp_is_weak) { 2808 JNIHandles::destroy_weak_global(_prev_constant_pool); 2809 } else { 2810 JNIHandles::destroy_global(_prev_constant_pool); 2811 } 2812 } 2813 2814 if (_prev_EMCP_methods != NULL) { 2815 for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) { 2816 jweak method_ref = _prev_EMCP_methods->at(i); 2817 if (method_ref != NULL) { 2818 JNIHandles::destroy_weak_global(method_ref); 2819 } 2820 } 2821 delete _prev_EMCP_methods; 2822 } 2823 } 2824 2825 2826 // Construct a PreviousVersionInfo entry 2827 PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) { 2828 _prev_constant_pool_handle = constantPoolHandle(); // NULL handle 2829 _prev_EMCP_method_handles = NULL; 2830 2831 jobject cp_ref = pv_node->prev_constant_pool(); 2832 assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared"); 2833 if (cp_ref == NULL) { 2834 return; // robustness 2835 } 2836 2837 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2838 if (cp == NULL) { 2839 // Weak reference has been GC'ed. Since the constant pool has been 2840 // GC'ed, the methods have also been GC'ed. 2841 return; 2842 } 2843 2844 // make the constantPoolOop safe to return 2845 _prev_constant_pool_handle = constantPoolHandle(cp); 2846 2847 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); 2848 if (method_refs == NULL) { 2849 // the instanceKlass did not have any EMCP methods 2850 return; 2851 } 2852 2853 _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10); 2854 2855 int n_methods = method_refs->length(); 2856 for (int i = 0; i < n_methods; i++) { 2857 jweak method_ref = method_refs->at(i); 2858 assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); 2859 if (method_ref == NULL) { 2860 continue; // robustness 2861 } 2862 2863 methodOop method = (methodOop)JNIHandles::resolve(method_ref); 2864 if (method == NULL) { 2865 // this entry has been GC'ed so skip it 2866 continue; 2867 } 2868 2869 // make the methodOop safe to return 2870 _prev_EMCP_method_handles->append(methodHandle(method)); 2871 } 2872 } 2873 2874 2875 // Destroy a PreviousVersionInfo 2876 PreviousVersionInfo::~PreviousVersionInfo() { 2877 // Since _prev_EMCP_method_handles is not C-heap allocated, we 2878 // don't have to delete it. 2879 } 2880 2881 2882 // Construct a helper for walking the previous versions array 2883 PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) { 2884 _previous_versions = ik->previous_versions(); 2885 _current_index = 0; 2886 // _hm needs no initialization 2887 _current_p = NULL; 2888 } 2889 2890 2891 // Destroy a PreviousVersionWalker 2892 PreviousVersionWalker::~PreviousVersionWalker() { 2893 // Delete the current info just in case the caller didn't walk to 2894 // the end of the previous versions list. No harm if _current_p is 2895 // already NULL. 2896 delete _current_p; 2897 2898 // When _hm is destroyed, all the Handles returned in 2899 // PreviousVersionInfo objects will be destroyed. 2900 // Also, after this destructor is finished it will be 2901 // safe to delete the GrowableArray allocated in the 2902 // PreviousVersionInfo objects. 2903 } 2904 2905 2906 // Return the interesting information for the next previous version 2907 // of the klass. Returns NULL if there are no more previous versions. 2908 PreviousVersionInfo* PreviousVersionWalker::next_previous_version() { 2909 if (_previous_versions == NULL) { 2910 // no previous versions so nothing to return 2911 return NULL; 2912 } 2913 2914 delete _current_p; // cleanup the previous info for the caller 2915 _current_p = NULL; // reset to NULL so we don't delete same object twice 2916 2917 int length = _previous_versions->length(); 2918 2919 while (_current_index < length) { 2920 PreviousVersionNode * pv_node = _previous_versions->at(_current_index++); 2921 PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP) 2922 PreviousVersionInfo(pv_node); 2923 2924 constantPoolHandle cp_h = pv_info->prev_constant_pool_handle(); 2925 if (cp_h.is_null()) { 2926 delete pv_info; 2927 2928 // The underlying node's info has been GC'ed so try the next one. 2929 // We don't have to check the methods. If the constant pool has 2930 // GC'ed then so have the methods. 2931 continue; 2932 } 2933 2934 // Found a node with non GC'ed info so return it. The caller will 2935 // need to delete pv_info when they are done with it. 2936 _current_p = pv_info; 2937 return pv_info; 2938 } 2939 2940 // all of the underlying nodes' info has been GC'ed 2941 return NULL; 2942 } // end next_previous_version()