1 /*
   2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/verifier.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "gc_implementation/shared/markSweep.inline.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "interpreter/oopMapCache.hpp"
  34 #include "interpreter/rewriter.hpp"
  35 #include "jvmtifiles/jvmti.h"
  36 #include "memory/genOopClosures.inline.hpp"
  37 #include "memory/oopFactory.hpp"
  38 #include "memory/permGen.hpp"
  39 #include "oops/instanceKlass.hpp"
  40 #include "oops/instanceOop.hpp"
  41 #include "oops/methodOop.hpp"
  42 #include "oops/objArrayKlassKlass.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "oops/symbol.hpp"
  45 #include "prims/jvmtiExport.hpp"
  46 #include "prims/jvmtiRedefineClassesTrace.hpp"
  47 #include "runtime/fieldDescriptor.hpp"
  48 #include "runtime/handles.inline.hpp"
  49 #include "runtime/javaCalls.hpp"
  50 #include "runtime/mutexLocker.hpp"
  51 #include "services/threadService.hpp"
  52 #include "utilities/dtrace.hpp"
  53 #ifdef TARGET_OS_FAMILY_linux
  54 # include "thread_linux.inline.hpp"
  55 #endif
  56 #ifdef TARGET_OS_FAMILY_solaris
  57 # include "thread_solaris.inline.hpp"
  58 #endif
  59 #ifdef TARGET_OS_FAMILY_windows
  60 # include "thread_windows.inline.hpp"
  61 #endif
  62 #ifndef SERIALGC
  63 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  64 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  65 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  66 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  67 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
  68 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
  69 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
  70 #include "oops/oop.pcgc.inline.hpp"
  71 #endif
  72 #ifdef COMPILER1
  73 #include "c1/c1_Compiler.hpp"
  74 #endif
  75 
  76 #ifdef DTRACE_ENABLED
  77 
  78 HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required,
  79   char*, intptr_t, oop, intptr_t);
  80 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive,
  81   char*, intptr_t, oop, intptr_t, int);
  82 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent,
  83   char*, intptr_t, oop, intptr_t, int);
  84 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous,
  85   char*, intptr_t, oop, intptr_t, int);
  86 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed,
  87   char*, intptr_t, oop, intptr_t, int);
  88 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit,
  89   char*, intptr_t, oop, intptr_t, int);
  90 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error,
  91   char*, intptr_t, oop, intptr_t, int);
  92 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
  93   char*, intptr_t, oop, intptr_t, int);
  94 
  95 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)          \
  96   {                                                              \
  97     char* data = NULL;                                           \
  98     int len = 0;                                                 \
  99     Symbol* name = (clss)->name();                               \
 100     if (name != NULL) {                                          \
 101       data = (char*)name->bytes();                               \
 102       len = name->utf8_length();                                 \
 103     }                                                            \
 104     HS_DTRACE_PROBE4(hotspot, class__initialization__##type,     \
 105       data, len, (clss)->class_loader(), thread_type);           \
 106   }
 107 
 108 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
 109   {                                                              \
 110     char* data = NULL;                                           \
 111     int len = 0;                                                 \
 112     Symbol* name = (clss)->name();                               \
 113     if (name != NULL) {                                          \
 114       data = (char*)name->bytes();                               \
 115       len = name->utf8_length();                                 \
 116     }                                                            \
 117     HS_DTRACE_PROBE5(hotspot, class__initialization__##type,     \
 118       data, len, (clss)->class_loader(), thread_type, wait);     \
 119   }
 120 
 121 #else //  ndef DTRACE_ENABLED
 122 
 123 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)
 124 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait)
 125 
 126 #endif //  ndef DTRACE_ENABLED
 127 
 128 bool instanceKlass::should_be_initialized() const {
 129   return !is_initialized();
 130 }
 131 
 132 klassVtable* instanceKlass::vtable() const {
 133   return new klassVtable(as_klassOop(), start_of_vtable(), vtable_length() / vtableEntry::size());
 134 }
 135 
 136 klassItable* instanceKlass::itable() const {
 137   return new klassItable(as_klassOop());
 138 }
 139 
 140 void instanceKlass::eager_initialize(Thread *thread) {
 141   if (!EagerInitialization) return;
 142 
 143   if (this->is_not_initialized()) {
 144     // abort if the the class has a class initializer
 145     if (this->class_initializer() != NULL) return;
 146 
 147     // abort if it is java.lang.Object (initialization is handled in genesis)
 148     klassOop super = this->super();
 149     if (super == NULL) return;
 150 
 151     // abort if the super class should be initialized
 152     if (!instanceKlass::cast(super)->is_initialized()) return;
 153 
 154     // call body to expose the this pointer
 155     instanceKlassHandle this_oop(thread, this->as_klassOop());
 156     eager_initialize_impl(this_oop);
 157   }
 158 }
 159 
 160 
 161 void instanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
 162   EXCEPTION_MARK;
 163   ObjectLocker ol(this_oop, THREAD);
 164 
 165   // abort if someone beat us to the initialization
 166   if (!this_oop->is_not_initialized()) return;  // note: not equivalent to is_initialized()
 167 
 168   ClassState old_state = this_oop->_init_state;
 169   link_class_impl(this_oop, true, THREAD);
 170   if (HAS_PENDING_EXCEPTION) {
 171     CLEAR_PENDING_EXCEPTION;
 172     // Abort if linking the class throws an exception.
 173 
 174     // Use a test to avoid redundantly resetting the state if there's
 175     // no change.  Set_init_state() asserts that state changes make
 176     // progress, whereas here we might just be spinning in place.
 177     if( old_state != this_oop->_init_state )
 178       this_oop->set_init_state (old_state);
 179   } else {
 180     // linking successfull, mark class as initialized
 181     this_oop->set_init_state (fully_initialized);
 182     // trace
 183     if (TraceClassInitialization) {
 184       ResourceMark rm(THREAD);
 185       tty->print_cr("[Initialized %s without side effects]", this_oop->external_name());
 186     }
 187   }
 188 }
 189 
 190 
 191 // See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization
 192 // process. The step comments refers to the procedure described in that section.
 193 // Note: implementation moved to static method to expose the this pointer.
 194 void instanceKlass::initialize(TRAPS) {
 195   if (this->should_be_initialized()) {
 196     HandleMark hm(THREAD);
 197     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
 198     initialize_impl(this_oop, CHECK);
 199     // Note: at this point the class may be initialized
 200     //       OR it may be in the state of being initialized
 201     //       in case of recursive initialization!
 202   } else {
 203     assert(is_initialized(), "sanity check");
 204   }
 205 }
 206 
 207 
 208 bool instanceKlass::verify_code(
 209     instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
 210   // 1) Verify the bytecodes
 211   Verifier::Mode mode =
 212     throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
 213   return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false);
 214 }
 215 
 216 
 217 // Used exclusively by the shared spaces dump mechanism to prevent
 218 // classes mapped into the shared regions in new VMs from appearing linked.
 219 
 220 void instanceKlass::unlink_class() {
 221   assert(is_linked(), "must be linked");
 222   _init_state = loaded;
 223 }
 224 
 225 void instanceKlass::link_class(TRAPS) {
 226   assert(is_loaded(), "must be loaded");
 227   if (!is_linked()) {
 228     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
 229     link_class_impl(this_oop, true, CHECK);
 230   }
 231 }
 232 
 233 // Called to verify that a class can link during initialization, without
 234 // throwing a VerifyError.
 235 bool instanceKlass::link_class_or_fail(TRAPS) {
 236   assert(is_loaded(), "must be loaded");
 237   if (!is_linked()) {
 238     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
 239     link_class_impl(this_oop, false, CHECK_false);
 240   }
 241   return is_linked();
 242 }
 243 
 244 bool instanceKlass::link_class_impl(
 245     instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
 246   // check for error state
 247   if (this_oop->is_in_error_state()) {
 248     ResourceMark rm(THREAD);
 249     THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(),
 250                this_oop->external_name(), false);
 251   }
 252   // return if already verified
 253   if (this_oop->is_linked()) {
 254     return true;
 255   }
 256 
 257   // Timing
 258   // timer handles recursion
 259   assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl");
 260   JavaThread* jt = (JavaThread*)THREAD;
 261 
 262   // link super class before linking this class
 263   instanceKlassHandle super(THREAD, this_oop->super());
 264   if (super.not_null()) {
 265     if (super->is_interface()) {  // check if super class is an interface
 266       ResourceMark rm(THREAD);
 267       Exceptions::fthrow(
 268         THREAD_AND_LOCATION,
 269         vmSymbols::java_lang_IncompatibleClassChangeError(),
 270         "class %s has interface %s as super class",
 271         this_oop->external_name(),
 272         super->external_name()
 273       );
 274       return false;
 275     }
 276 
 277     link_class_impl(super, throw_verifyerror, CHECK_false);
 278   }
 279 
 280   // link all interfaces implemented by this class before linking this class
 281   objArrayHandle interfaces (THREAD, this_oop->local_interfaces());
 282   int num_interfaces = interfaces->length();
 283   for (int index = 0; index < num_interfaces; index++) {
 284     HandleMark hm(THREAD);
 285     instanceKlassHandle ih(THREAD, klassOop(interfaces->obj_at(index)));
 286     link_class_impl(ih, throw_verifyerror, CHECK_false);
 287   }
 288 
 289   // in case the class is linked in the process of linking its superclasses
 290   if (this_oop->is_linked()) {
 291     return true;
 292   }
 293 
 294   // trace only the link time for this klass that includes
 295   // the verification time
 296   PerfClassTraceTime vmtimer(ClassLoader::perf_class_link_time(),
 297                              ClassLoader::perf_class_link_selftime(),
 298                              ClassLoader::perf_classes_linked(),
 299                              jt->get_thread_stat()->perf_recursion_counts_addr(),
 300                              jt->get_thread_stat()->perf_timers_addr(),
 301                              PerfClassTraceTime::CLASS_LINK);
 302 
 303   // verification & rewriting
 304   {
 305     ObjectLocker ol(this_oop, THREAD);
 306     // rewritten will have been set if loader constraint error found
 307     // on an earlier link attempt
 308     // don't verify or rewrite if already rewritten
 309     if (!this_oop->is_linked()) {
 310       if (!this_oop->is_rewritten()) {
 311         {
 312           // Timer includes any side effects of class verification (resolution,
 313           // etc), but not recursive entry into verify_code().
 314           PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(),
 315                                    ClassLoader::perf_class_verify_selftime(),
 316                                    ClassLoader::perf_classes_verified(),
 317                                    jt->get_thread_stat()->perf_recursion_counts_addr(),
 318                                    jt->get_thread_stat()->perf_timers_addr(),
 319                                    PerfClassTraceTime::CLASS_VERIFY);
 320           bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD);
 321           if (!verify_ok) {
 322             return false;
 323           }
 324         }
 325 
 326         // Just in case a side-effect of verify linked this class already
 327         // (which can sometimes happen since the verifier loads classes
 328         // using custom class loaders, which are free to initialize things)
 329         if (this_oop->is_linked()) {
 330           return true;
 331         }
 332 
 333         // also sets rewritten
 334         this_oop->rewrite_class(CHECK_false);
 335       }
 336 
 337       // Initialize the vtable and interface table after
 338       // methods have been rewritten since rewrite may
 339       // fabricate new methodOops.
 340       // also does loader constraint checking
 341       if (!this_oop()->is_shared()) {
 342         ResourceMark rm(THREAD);
 343         this_oop->vtable()->initialize_vtable(true, CHECK_false);
 344         this_oop->itable()->initialize_itable(true, CHECK_false);
 345       }
 346 #ifdef ASSERT
 347       else {
 348         ResourceMark rm(THREAD);
 349         this_oop->vtable()->verify(tty, true);
 350         // In case itable verification is ever added.
 351         // this_oop->itable()->verify(tty, true);
 352       }
 353 #endif
 354       this_oop->set_init_state(linked);
 355       if (JvmtiExport::should_post_class_prepare()) {
 356         Thread *thread = THREAD;
 357         assert(thread->is_Java_thread(), "thread->is_Java_thread()");
 358         JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop());
 359       }
 360     }
 361   }
 362   return true;
 363 }
 364 
 365 
 366 // Rewrite the byte codes of all of the methods of a class.
 367 // Three cases:
 368 //    During the link of a newly loaded class.
 369 //    During the preloading of classes to be written to the shared spaces.
 370 //      - Rewrite the methods and update the method entry points.
 371 //
 372 //    During the link of a class in the shared spaces.
 373 //      - The methods were already rewritten, update the metho entry points.
 374 //
 375 // The rewriter must be called exactly once. Rewriting must happen after
 376 // verification but before the first method of the class is executed.
 377 
 378 void instanceKlass::rewrite_class(TRAPS) {
 379   assert(is_loaded(), "must be loaded");
 380   instanceKlassHandle this_oop(THREAD, this->as_klassOop());
 381   if (this_oop->is_rewritten()) {
 382     assert(this_oop()->is_shared(), "rewriting an unshared class?");
 383     return;
 384   }
 385   Rewriter::rewrite(this_oop, CHECK); // No exception can happen here
 386   this_oop->set_rewritten();
 387 }
 388 
 389 
 390 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
 391   // Make sure klass is linked (verified) before initialization
 392   // A class could already be verified, since it has been reflected upon.
 393   this_oop->link_class(CHECK);
 394 
 395   DTRACE_CLASSINIT_PROBE(required, instanceKlass::cast(this_oop()), -1);
 396 
 397   bool wait = false;
 398 
 399   // refer to the JVM book page 47 for description of steps
 400   // Step 1
 401   { ObjectLocker ol(this_oop, THREAD);
 402 
 403     Thread *self = THREAD; // it's passed the current thread
 404 
 405     // Step 2
 406     // If we were to use wait() instead of waitInterruptibly() then
 407     // we might end up throwing IE from link/symbol resolution sites
 408     // that aren't expected to throw.  This would wreak havoc.  See 6320309.
 409     while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) {
 410         wait = true;
 411       ol.waitUninterruptibly(CHECK);
 412     }
 413 
 414     // Step 3
 415     if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self)) {
 416       DTRACE_CLASSINIT_PROBE_WAIT(recursive, instanceKlass::cast(this_oop()), -1,wait);
 417       return;
 418     }
 419 
 420     // Step 4
 421     if (this_oop->is_initialized()) {
 422       DTRACE_CLASSINIT_PROBE_WAIT(concurrent, instanceKlass::cast(this_oop()), -1,wait);
 423       return;
 424     }
 425 
 426     // Step 5
 427     if (this_oop->is_in_error_state()) {
 428       DTRACE_CLASSINIT_PROBE_WAIT(erroneous, instanceKlass::cast(this_oop()), -1,wait);
 429       ResourceMark rm(THREAD);
 430       const char* desc = "Could not initialize class ";
 431       const char* className = this_oop->external_name();
 432       size_t msglen = strlen(desc) + strlen(className) + 1;
 433       char* message = NEW_RESOURCE_ARRAY(char, msglen);
 434       if (NULL == message) {
 435         // Out of memory: can't create detailed error message
 436         THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);
 437       } else {
 438         jio_snprintf(message, msglen, "%s%s", desc, className);
 439         THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message);
 440       }
 441     }
 442 
 443     // Step 6
 444     this_oop->set_init_state(being_initialized);
 445     this_oop->set_init_thread(self);
 446   }
 447 
 448   // Step 7
 449   klassOop super_klass = this_oop->super();
 450   if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) {
 451     Klass::cast(super_klass)->initialize(THREAD);
 452 
 453     if (HAS_PENDING_EXCEPTION) {
 454       Handle e(THREAD, PENDING_EXCEPTION);
 455       CLEAR_PENDING_EXCEPTION;
 456       {
 457         EXCEPTION_MARK;
 458         this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
 459         CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, superclass initialization error is thrown below
 460       }
 461       DTRACE_CLASSINIT_PROBE_WAIT(super__failed, instanceKlass::cast(this_oop()), -1,wait);
 462       THROW_OOP(e());
 463     }
 464   }
 465 
 466   // Step 8
 467   {
 468     assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl");
 469     JavaThread* jt = (JavaThread*)THREAD;
 470     DTRACE_CLASSINIT_PROBE_WAIT(clinit, instanceKlass::cast(this_oop()), -1,wait);
 471     // Timer includes any side effects of class initialization (resolution,
 472     // etc), but not recursive entry into call_class_initializer().
 473     PerfClassTraceTime timer(ClassLoader::perf_class_init_time(),
 474                              ClassLoader::perf_class_init_selftime(),
 475                              ClassLoader::perf_classes_inited(),
 476                              jt->get_thread_stat()->perf_recursion_counts_addr(),
 477                              jt->get_thread_stat()->perf_timers_addr(),
 478                              PerfClassTraceTime::CLASS_CLINIT);
 479     this_oop->call_class_initializer(THREAD);
 480   }
 481 
 482   // Step 9
 483   if (!HAS_PENDING_EXCEPTION) {
 484     this_oop->set_initialization_state_and_notify(fully_initialized, CHECK);
 485     { ResourceMark rm(THREAD);
 486       debug_only(this_oop->vtable()->verify(tty, true);)
 487     }
 488   }
 489   else {
 490     // Step 10 and 11
 491     Handle e(THREAD, PENDING_EXCEPTION);
 492     CLEAR_PENDING_EXCEPTION;
 493     {
 494       EXCEPTION_MARK;
 495       this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
 496       CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, class initialization error is thrown below
 497     }
 498     DTRACE_CLASSINIT_PROBE_WAIT(error, instanceKlass::cast(this_oop()), -1,wait);
 499     if (e->is_a(SystemDictionary::Error_klass())) {
 500       THROW_OOP(e());
 501     } else {
 502       JavaCallArguments args(e);
 503       THROW_ARG(vmSymbols::java_lang_ExceptionInInitializerError(),
 504                 vmSymbols::throwable_void_signature(),
 505                 &args);
 506     }
 507   }
 508   DTRACE_CLASSINIT_PROBE_WAIT(end, instanceKlass::cast(this_oop()), -1,wait);
 509 }
 510 
 511 
 512 // Note: implementation moved to static method to expose the this pointer.
 513 void instanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) {
 514   instanceKlassHandle kh(THREAD, this->as_klassOop());
 515   set_initialization_state_and_notify_impl(kh, state, CHECK);
 516 }
 517 
 518 void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
 519   ObjectLocker ol(this_oop, THREAD);
 520   this_oop->set_init_state(state);
 521   ol.notify_all(CHECK);
 522 }
 523 
 524 void instanceKlass::add_implementor(klassOop k) {
 525   assert(Compile_lock->owned_by_self(), "");
 526   // Filter out my subinterfaces.
 527   // (Note: Interfaces are never on the subklass list.)
 528   if (instanceKlass::cast(k)->is_interface()) return;
 529 
 530   // Filter out subclasses whose supers already implement me.
 531   // (Note: CHA must walk subclasses of direct implementors
 532   // in order to locate indirect implementors.)
 533   klassOop sk = instanceKlass::cast(k)->super();
 534   if (sk != NULL && instanceKlass::cast(sk)->implements_interface(as_klassOop()))
 535     // We only need to check one immediate superclass, since the
 536     // implements_interface query looks at transitive_interfaces.
 537     // Any supers of the super have the same (or fewer) transitive_interfaces.
 538     return;
 539 
 540   // Update number of implementors
 541   int i = _nof_implementors++;
 542 
 543   // Record this implementor, if there are not too many already
 544   if (i < implementors_limit) {
 545     assert(_implementors[i] == NULL, "should be exactly one implementor");
 546     oop_store_without_check((oop*)&_implementors[i], k);
 547   } else if (i == implementors_limit) {
 548     // clear out the list on first overflow
 549     for (int i2 = 0; i2 < implementors_limit; i2++)
 550       oop_store_without_check((oop*)&_implementors[i2], NULL);
 551   }
 552 
 553   // The implementor also implements the transitive_interfaces
 554   for (int index = 0; index < local_interfaces()->length(); index++) {
 555     instanceKlass::cast(klassOop(local_interfaces()->obj_at(index)))->add_implementor(k);
 556   }
 557 }
 558 
 559 void instanceKlass::init_implementor() {
 560   for (int i = 0; i < implementors_limit; i++)
 561     oop_store_without_check((oop*)&_implementors[i], NULL);
 562   _nof_implementors = 0;
 563 }
 564 
 565 
 566 void instanceKlass::process_interfaces(Thread *thread) {
 567   // link this class into the implementors list of every interface it implements
 568   KlassHandle this_as_oop (thread, this->as_klassOop());
 569   for (int i = local_interfaces()->length() - 1; i >= 0; i--) {
 570     assert(local_interfaces()->obj_at(i)->is_klass(), "must be a klass");
 571     instanceKlass* interf = instanceKlass::cast(klassOop(local_interfaces()->obj_at(i)));
 572     assert(interf->is_interface(), "expected interface");
 573     interf->add_implementor(this_as_oop());
 574   }
 575 }
 576 
 577 bool instanceKlass::can_be_primary_super_slow() const {
 578   if (is_interface())
 579     return false;
 580   else
 581     return Klass::can_be_primary_super_slow();
 582 }
 583 
 584 objArrayOop instanceKlass::compute_secondary_supers(int num_extra_slots, TRAPS) {
 585   // The secondaries are the implemented interfaces.
 586   instanceKlass* ik = instanceKlass::cast(as_klassOop());
 587   objArrayHandle interfaces (THREAD, ik->transitive_interfaces());
 588   int num_secondaries = num_extra_slots + interfaces->length();
 589   if (num_secondaries == 0) {
 590     return Universe::the_empty_system_obj_array();
 591   } else if (num_extra_slots == 0) {
 592     return interfaces();
 593   } else {
 594     // a mix of both
 595     objArrayOop secondaries = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL);
 596     for (int i = 0; i < interfaces->length(); i++) {
 597       secondaries->obj_at_put(num_extra_slots+i, interfaces->obj_at(i));
 598     }
 599     return secondaries;
 600   }
 601 }
 602 
 603 bool instanceKlass::compute_is_subtype_of(klassOop k) {
 604   if (Klass::cast(k)->is_interface()) {
 605     return implements_interface(k);
 606   } else {
 607     return Klass::compute_is_subtype_of(k);
 608   }
 609 }
 610 
 611 bool instanceKlass::implements_interface(klassOop k) const {
 612   if (as_klassOop() == k) return true;
 613   assert(Klass::cast(k)->is_interface(), "should be an interface class");
 614   for (int i = 0; i < transitive_interfaces()->length(); i++) {
 615     if (transitive_interfaces()->obj_at(i) == k) {
 616       return true;
 617     }
 618   }
 619   return false;
 620 }
 621 
 622 objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) {
 623   if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
 624   if (length > arrayOopDesc::max_array_length(T_OBJECT)) {
 625     report_java_out_of_memory("Requested array size exceeds VM limit");
 626     THROW_OOP_0(Universe::out_of_memory_error_array_size());
 627   }
 628   int size = objArrayOopDesc::object_size(length);
 629   klassOop ak = array_klass(n, CHECK_NULL);
 630   KlassHandle h_ak (THREAD, ak);
 631   objArrayOop o =
 632     (objArrayOop)CollectedHeap::array_allocate(h_ak, size, length, CHECK_NULL);
 633   return o;
 634 }
 635 
 636 instanceOop instanceKlass::register_finalizer(instanceOop i, TRAPS) {
 637   if (TraceFinalizerRegistration) {
 638     tty->print("Registered ");
 639     i->print_value_on(tty);
 640     tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", (address)i);
 641   }
 642   instanceHandle h_i(THREAD, i);
 643   // Pass the handle as argument, JavaCalls::call expects oop as jobjects
 644   JavaValue result(T_VOID);
 645   JavaCallArguments args(h_i);
 646   methodHandle mh (THREAD, Universe::finalizer_register_method());
 647   JavaCalls::call(&result, mh, &args, CHECK_NULL);
 648   return h_i();
 649 }
 650 
 651 instanceOop instanceKlass::allocate_instance(TRAPS) {
 652   bool has_finalizer_flag = has_finalizer(); // Query before possible GC
 653   int size = size_helper();  // Query before forming handle.
 654 
 655   KlassHandle h_k(THREAD, as_klassOop());
 656 
 657   instanceOop i;
 658 
 659   i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
 660   if (has_finalizer_flag && !RegisterFinalizersAtInit) {
 661     i = register_finalizer(i, CHECK_NULL);
 662   }
 663   return i;
 664 }
 665 
 666 instanceOop instanceKlass::allocate_permanent_instance(TRAPS) {
 667   // Finalizer registration occurs in the Object.<init> constructor
 668   // and constructors normally aren't run when allocating perm
 669   // instances so simply disallow finalizable perm objects.  This can
 670   // be relaxed if a need for it is found.
 671   assert(!has_finalizer(), "perm objects not allowed to have finalizers");
 672   int size = size_helper();  // Query before forming handle.
 673   KlassHandle h_k(THREAD, as_klassOop());
 674   instanceOop i = (instanceOop)
 675     CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL);
 676   return i;
 677 }
 678 
 679 void instanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) {
 680   if (is_interface() || is_abstract()) {
 681     ResourceMark rm(THREAD);
 682     THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError()
 683               : vmSymbols::java_lang_InstantiationException(), external_name());
 684   }
 685   if (as_klassOop() == SystemDictionary::Class_klass()) {
 686     ResourceMark rm(THREAD);
 687     THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError()
 688               : vmSymbols::java_lang_IllegalAccessException(), external_name());
 689   }
 690 }
 691 
 692 klassOop instanceKlass::array_klass_impl(bool or_null, int n, TRAPS) {
 693   instanceKlassHandle this_oop(THREAD, as_klassOop());
 694   return array_klass_impl(this_oop, or_null, n, THREAD);
 695 }
 696 
 697 klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) {
 698   if (this_oop->array_klasses() == NULL) {
 699     if (or_null) return NULL;
 700 
 701     ResourceMark rm;
 702     JavaThread *jt = (JavaThread *)THREAD;
 703     {
 704       // Atomic creation of array_klasses
 705       MutexLocker mc(Compile_lock, THREAD);   // for vtables
 706       MutexLocker ma(MultiArray_lock, THREAD);
 707 
 708       // Check if update has already taken place
 709       if (this_oop->array_klasses() == NULL) {
 710         objArrayKlassKlass* oakk =
 711           (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part();
 712 
 713         klassOop  k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL);
 714         this_oop->set_array_klasses(k);
 715       }
 716     }
 717   }
 718   // _this will always be set at this point
 719   objArrayKlass* oak = (objArrayKlass*)this_oop->array_klasses()->klass_part();
 720   if (or_null) {
 721     return oak->array_klass_or_null(n);
 722   }
 723   return oak->array_klass(n, CHECK_NULL);
 724 }
 725 
 726 klassOop instanceKlass::array_klass_impl(bool or_null, TRAPS) {
 727   return array_klass_impl(or_null, 1, THREAD);
 728 }
 729 
 730 void instanceKlass::call_class_initializer(TRAPS) {
 731   instanceKlassHandle ik (THREAD, as_klassOop());
 732   call_class_initializer_impl(ik, THREAD);
 733 }
 734 
 735 static int call_class_initializer_impl_counter = 0;   // for debugging
 736 
 737 methodOop instanceKlass::class_initializer() {
 738   methodOop clinit = find_method(
 739       vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
 740   if (clinit != NULL && clinit->has_valid_initializer_flags()) {
 741     return clinit;
 742   }
 743   return NULL;
 744 }
 745 
 746 void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) {
 747   methodHandle h_method(THREAD, this_oop->class_initializer());
 748   assert(!this_oop->is_initialized(), "we cannot initialize twice");
 749   if (TraceClassInitialization) {
 750     tty->print("%d Initializing ", call_class_initializer_impl_counter++);
 751     this_oop->name()->print_value();
 752     tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop());
 753   }
 754   if (h_method() != NULL) {
 755     JavaCallArguments args; // No arguments
 756     JavaValue result(T_VOID);
 757     JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
 758   }
 759 }
 760 
 761 
 762 void instanceKlass::mask_for(methodHandle method, int bci,
 763   InterpreterOopMap* entry_for) {
 764   // Dirty read, then double-check under a lock.
 765   if (_oop_map_cache == NULL) {
 766     // Otherwise, allocate a new one.
 767     MutexLocker x(OopMapCacheAlloc_lock);
 768     // First time use. Allocate a cache in C heap
 769     if (_oop_map_cache == NULL) {
 770       _oop_map_cache = new OopMapCache();
 771     }
 772   }
 773   // _oop_map_cache is constant after init; lookup below does is own locking.
 774   _oop_map_cache->lookup(method, bci, entry_for);
 775 }
 776 
 777 
 778 bool instanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
 779   const int n = fields()->length();
 780   for (int i = 0; i < n; i += next_offset ) {
 781     int name_index = fields()->ushort_at(i + name_index_offset);
 782     int sig_index  = fields()->ushort_at(i + signature_index_offset);
 783     Symbol* f_name = constants()->symbol_at(name_index);
 784     Symbol* f_sig  = constants()->symbol_at(sig_index);
 785     if (f_name == name && f_sig == sig) {
 786       fd->initialize(as_klassOop(), i);
 787       return true;
 788     }
 789   }
 790   return false;
 791 }
 792 
 793 
 794 void instanceKlass::shared_symbols_iterate(SymbolClosure* closure) {
 795   Klass::shared_symbols_iterate(closure);
 796   closure->do_symbol(&_generic_signature);
 797   closure->do_symbol(&_source_file_name);
 798   closure->do_symbol(&_source_debug_extension);
 799 
 800   const int n = fields()->length();
 801   for (int i = 0; i < n; i += next_offset ) {
 802     int name_index = fields()->ushort_at(i + name_index_offset);
 803     closure->do_symbol(constants()->symbol_at_addr(name_index));
 804     int sig_index  = fields()->ushort_at(i + signature_index_offset);
 805     closure->do_symbol(constants()->symbol_at_addr(sig_index));
 806   }
 807 }
 808 
 809 
 810 klassOop instanceKlass::find_interface_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
 811   const int n = local_interfaces()->length();
 812   for (int i = 0; i < n; i++) {
 813     klassOop intf1 = klassOop(local_interfaces()->obj_at(i));
 814     assert(Klass::cast(intf1)->is_interface(), "just checking type");
 815     // search for field in current interface
 816     if (instanceKlass::cast(intf1)->find_local_field(name, sig, fd)) {
 817       assert(fd->is_static(), "interface field must be static");
 818       return intf1;
 819     }
 820     // search for field in direct superinterfaces
 821     klassOop intf2 = instanceKlass::cast(intf1)->find_interface_field(name, sig, fd);
 822     if (intf2 != NULL) return intf2;
 823   }
 824   // otherwise field lookup fails
 825   return NULL;
 826 }
 827 
 828 
 829 klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
 830   // search order according to newest JVM spec (5.4.3.2, p.167).
 831   // 1) search for field in current klass
 832   if (find_local_field(name, sig, fd)) {
 833     return as_klassOop();
 834   }
 835   // 2) search for field recursively in direct superinterfaces
 836   { klassOop intf = find_interface_field(name, sig, fd);
 837     if (intf != NULL) return intf;
 838   }
 839   // 3) apply field lookup recursively if superclass exists
 840   { klassOop supr = super();
 841     if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, fd);
 842   }
 843   // 4) otherwise field lookup fails
 844   return NULL;
 845 }
 846 
 847 
 848 klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, bool is_static, fieldDescriptor* fd) const {
 849   // search order according to newest JVM spec (5.4.3.2, p.167).
 850   // 1) search for field in current klass
 851   if (find_local_field(name, sig, fd)) {
 852     if (fd->is_static() == is_static) return as_klassOop();
 853   }
 854   // 2) search for field recursively in direct superinterfaces
 855   if (is_static) {
 856     klassOop intf = find_interface_field(name, sig, fd);
 857     if (intf != NULL) return intf;
 858   }
 859   // 3) apply field lookup recursively if superclass exists
 860   { klassOop supr = super();
 861     if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, is_static, fd);
 862   }
 863   // 4) otherwise field lookup fails
 864   return NULL;
 865 }
 866 
 867 
 868 bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
 869   int length = fields()->length();
 870   for (int i = 0; i < length; i += next_offset) {
 871     if (offset_from_fields( i ) == offset) {
 872       fd->initialize(as_klassOop(), i);
 873       if (fd->is_static() == is_static) return true;
 874     }
 875   }
 876   return false;
 877 }
 878 
 879 
 880 bool instanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
 881   klassOop klass = as_klassOop();
 882   while (klass != NULL) {
 883     if (instanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) {
 884       return true;
 885     }
 886     klass = Klass::cast(klass)->super();
 887   }
 888   return false;
 889 }
 890 
 891 
 892 void instanceKlass::methods_do(void f(methodOop method)) {
 893   int len = methods()->length();
 894   for (int index = 0; index < len; index++) {
 895     methodOop m = methodOop(methods()->obj_at(index));
 896     assert(m->is_method(), "must be method");
 897     f(m);
 898   }
 899 }
 900 
 901 void instanceKlass::do_local_static_fields(FieldClosure* cl) {
 902   fieldDescriptor fd;
 903   int length = fields()->length();
 904   for (int i = 0; i < length; i += next_offset) {
 905     fd.initialize(as_klassOop(), i);
 906     if (fd.is_static()) cl->do_field(&fd);
 907   }
 908 }
 909 
 910 
 911 void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
 912   instanceKlassHandle h_this(THREAD, as_klassOop());
 913   do_local_static_fields_impl(h_this, f, CHECK);
 914 }
 915 
 916 
 917 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
 918   fieldDescriptor fd;
 919   int length = this_oop->fields()->length();
 920   for (int i = 0; i < length; i += next_offset) {
 921     fd.initialize(this_oop(), i);
 922     if (fd.is_static()) { f(&fd, CHECK); } // Do NOT remove {}! (CHECK macro expands into several statements)
 923   }
 924 }
 925 
 926 
 927 static int compare_fields_by_offset(int* a, int* b) {
 928   return a[0] - b[0];
 929 }
 930 
 931 void instanceKlass::do_nonstatic_fields(FieldClosure* cl) {
 932   instanceKlass* super = superklass();
 933   if (super != NULL) {
 934     super->do_nonstatic_fields(cl);
 935   }
 936   fieldDescriptor fd;
 937   int length = fields()->length();
 938   // In DebugInfo nonstatic fields are sorted by offset.
 939   int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1));
 940   int j = 0;
 941   for (int i = 0; i < length; i += next_offset) {
 942     fd.initialize(as_klassOop(), i);
 943     if (!fd.is_static()) {
 944       fields_sorted[j + 0] = fd.offset();
 945       fields_sorted[j + 1] = i;
 946       j += 2;
 947     }
 948   }
 949   if (j > 0) {
 950     length = j;
 951     // _sort_Fn is defined in growableArray.hpp.
 952     qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
 953     for (int i = 0; i < length; i += 2) {
 954       fd.initialize(as_klassOop(), fields_sorted[i + 1]);
 955       assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
 956       cl->do_field(&fd);
 957     }
 958   }
 959   FREE_C_HEAP_ARRAY(int, fields_sorted);
 960 }
 961 
 962 
 963 void instanceKlass::array_klasses_do(void f(klassOop k)) {
 964   if (array_klasses() != NULL)
 965     arrayKlass::cast(array_klasses())->array_klasses_do(f);
 966 }
 967 
 968 
 969 void instanceKlass::with_array_klasses_do(void f(klassOop k)) {
 970   f(as_klassOop());
 971   array_klasses_do(f);
 972 }
 973 
 974 #ifdef ASSERT
 975 static int linear_search(objArrayOop methods, Symbol* name, Symbol* signature) {
 976   int len = methods->length();
 977   for (int index = 0; index < len; index++) {
 978     methodOop m = (methodOop)(methods->obj_at(index));
 979     assert(m->is_method(), "must be method");
 980     if (m->signature() == signature && m->name() == name) {
 981        return index;
 982     }
 983   }
 984   return -1;
 985 }
 986 #endif
 987 
 988 methodOop instanceKlass::find_method(Symbol* name, Symbol* signature) const {
 989   return instanceKlass::find_method(methods(), name, signature);
 990 }
 991 
 992 methodOop instanceKlass::find_method(objArrayOop methods, Symbol* name, Symbol* signature) {
 993   int len = methods->length();
 994   // methods are sorted, so do binary search
 995   int l = 0;
 996   int h = len - 1;
 997   while (l <= h) {
 998     int mid = (l + h) >> 1;
 999     methodOop m = (methodOop)methods->obj_at(mid);
1000     assert(m->is_method(), "must be method");
1001     int res = m->name()->fast_compare(name);
1002     if (res == 0) {
1003       // found matching name; do linear search to find matching signature
1004       // first, quick check for common case
1005       if (m->signature() == signature) return m;
1006       // search downwards through overloaded methods
1007       int i;
1008       for (i = mid - 1; i >= l; i--) {
1009         methodOop m = (methodOop)methods->obj_at(i);
1010         assert(m->is_method(), "must be method");
1011         if (m->name() != name) break;
1012         if (m->signature() == signature) return m;
1013       }
1014       // search upwards
1015       for (i = mid + 1; i <= h; i++) {
1016         methodOop m = (methodOop)methods->obj_at(i);
1017         assert(m->is_method(), "must be method");
1018         if (m->name() != name) break;
1019         if (m->signature() == signature) return m;
1020       }
1021       // not found
1022 #ifdef ASSERT
1023       int index = linear_search(methods, name, signature);
1024       assert(index == -1, err_msg("binary search should have found entry %d", index));
1025 #endif
1026       return NULL;
1027     } else if (res < 0) {
1028       l = mid + 1;
1029     } else {
1030       h = mid - 1;
1031     }
1032   }
1033 #ifdef ASSERT
1034   int index = linear_search(methods, name, signature);
1035   assert(index == -1, err_msg("binary search should have found entry %d", index));
1036 #endif
1037   return NULL;
1038 }
1039 
1040 methodOop instanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
1041   klassOop klass = as_klassOop();
1042   while (klass != NULL) {
1043     methodOop method = instanceKlass::cast(klass)->find_method(name, signature);
1044     if (method != NULL) return method;
1045     klass = instanceKlass::cast(klass)->super();
1046   }
1047   return NULL;
1048 }
1049 
1050 // lookup a method in all the interfaces that this class implements
1051 methodOop instanceKlass::lookup_method_in_all_interfaces(Symbol* name,
1052                                                          Symbol* signature) const {
1053   objArrayOop all_ifs = instanceKlass::cast(as_klassOop())->transitive_interfaces();
1054   int num_ifs = all_ifs->length();
1055   instanceKlass *ik = NULL;
1056   for (int i = 0; i < num_ifs; i++) {
1057     ik = instanceKlass::cast(klassOop(all_ifs->obj_at(i)));
1058     methodOop m = ik->lookup_method(name, signature);
1059     if (m != NULL) {
1060       return m;
1061     }
1062   }
1063   return NULL;
1064 }
1065 
1066 /* jni_id_for_impl for jfieldIds only */
1067 JNIid* instanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) {
1068   MutexLocker ml(JfieldIdCreation_lock);
1069   // Retry lookup after we got the lock
1070   JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset);
1071   if (probe == NULL) {
1072     // Slow case, allocate new static field identifier
1073     probe = new JNIid(this_oop->as_klassOop(), offset, this_oop->jni_ids());
1074     this_oop->set_jni_ids(probe);
1075   }
1076   return probe;
1077 }
1078 
1079 
1080 /* jni_id_for for jfieldIds only */
1081 JNIid* instanceKlass::jni_id_for(int offset) {
1082   JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset);
1083   if (probe == NULL) {
1084     probe = jni_id_for_impl(this->as_klassOop(), offset);
1085   }
1086   return probe;
1087 }
1088 
1089 
1090 // Lookup or create a jmethodID.
1091 // This code is called by the VMThread and JavaThreads so the
1092 // locking has to be done very carefully to avoid deadlocks
1093 // and/or other cache consistency problems.
1094 //
1095 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) {
1096   size_t idnum = (size_t)method_h->method_idnum();
1097   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1098   size_t length = 0;
1099   jmethodID id = NULL;
1100 
1101   // We use a double-check locking idiom here because this cache is
1102   // performance sensitive. In the normal system, this cache only
1103   // transitions from NULL to non-NULL which is safe because we use
1104   // release_set_methods_jmethod_ids() to advertise the new cache.
1105   // A partially constructed cache should never be seen by a racing
1106   // thread. We also use release_store_ptr() to save a new jmethodID
1107   // in the cache so a partially constructed jmethodID should never be
1108   // seen either. Cache reads of existing jmethodIDs proceed without a
1109   // lock, but cache writes of a new jmethodID requires uniqueness and
1110   // creation of the cache itself requires no leaks so a lock is
1111   // generally acquired in those two cases.
1112   //
1113   // If the RedefineClasses() API has been used, then this cache can
1114   // grow and we'll have transitions from non-NULL to bigger non-NULL.
1115   // Cache creation requires no leaks and we require safety between all
1116   // cache accesses and freeing of the old cache so a lock is generally
1117   // acquired when the RedefineClasses() API has been used.
1118 
1119   if (jmeths != NULL) {
1120     // the cache already exists
1121     if (!ik_h->idnum_can_increment()) {
1122       // the cache can't grow so we can just get the current values
1123       get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1124     } else {
1125       // cache can grow so we have to be more careful
1126       if (Threads::number_of_threads() == 0 ||
1127           SafepointSynchronize::is_at_safepoint()) {
1128         // we're single threaded or at a safepoint - no locking needed
1129         get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1130       } else {
1131         MutexLocker ml(JmethodIdCreation_lock);
1132         get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1133       }
1134     }
1135   }
1136   // implied else:
1137   // we need to allocate a cache so default length and id values are good
1138 
1139   if (jmeths == NULL ||   // no cache yet
1140       length <= idnum ||  // cache is too short
1141       id == NULL) {       // cache doesn't contain entry
1142 
1143     // This function can be called by the VMThread so we have to do all
1144     // things that might block on a safepoint before grabbing the lock.
1145     // Otherwise, we can deadlock with the VMThread or have a cache
1146     // consistency issue. These vars keep track of what we might have
1147     // to free after the lock is dropped.
1148     jmethodID  to_dealloc_id     = NULL;
1149     jmethodID* to_dealloc_jmeths = NULL;
1150 
1151     // may not allocate new_jmeths or use it if we allocate it
1152     jmethodID* new_jmeths = NULL;
1153     if (length <= idnum) {
1154       // allocate a new cache that might be used
1155       size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
1156       new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1);
1157       memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
1158       // cache size is stored in element[0], other elements offset by one
1159       new_jmeths[0] = (jmethodID)size;
1160     }
1161 
1162     // allocate a new jmethodID that might be used
1163     jmethodID new_id = NULL;
1164     if (method_h->is_old() && !method_h->is_obsolete()) {
1165       // The method passed in is old (but not obsolete), we need to use the current version
1166       methodOop current_method = ik_h->method_with_idnum((int)idnum);
1167       assert(current_method != NULL, "old and but not obsolete, so should exist");
1168       methodHandle current_method_h(current_method == NULL? method_h() : current_method);
1169       new_id = JNIHandles::make_jmethod_id(current_method_h);
1170     } else {
1171       // It is the current version of the method or an obsolete method,
1172       // use the version passed in
1173       new_id = JNIHandles::make_jmethod_id(method_h);
1174     }
1175 
1176     if (Threads::number_of_threads() == 0 ||
1177         SafepointSynchronize::is_at_safepoint()) {
1178       // we're single threaded or at a safepoint - no locking needed
1179       id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1180                                           &to_dealloc_id, &to_dealloc_jmeths);
1181     } else {
1182       MutexLocker ml(JmethodIdCreation_lock);
1183       id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1184                                           &to_dealloc_id, &to_dealloc_jmeths);
1185     }
1186 
1187     // The lock has been dropped so we can free resources.
1188     // Free up either the old cache or the new cache if we allocated one.
1189     if (to_dealloc_jmeths != NULL) {
1190       FreeHeap(to_dealloc_jmeths);
1191     }
1192     // free up the new ID since it wasn't needed
1193     if (to_dealloc_id != NULL) {
1194       JNIHandles::destroy_jmethod_id(to_dealloc_id);
1195     }
1196   }
1197   return id;
1198 }
1199 
1200 
1201 // Common code to fetch the jmethodID from the cache or update the
1202 // cache with the new jmethodID. This function should never do anything
1203 // that causes the caller to go to a safepoint or we can deadlock with
1204 // the VMThread or have cache consistency issues.
1205 //
1206 jmethodID instanceKlass::get_jmethod_id_fetch_or_update(
1207             instanceKlassHandle ik_h, size_t idnum, jmethodID new_id,
1208             jmethodID* new_jmeths, jmethodID* to_dealloc_id_p,
1209             jmethodID** to_dealloc_jmeths_p) {
1210   assert(new_id != NULL, "sanity check");
1211   assert(to_dealloc_id_p != NULL, "sanity check");
1212   assert(to_dealloc_jmeths_p != NULL, "sanity check");
1213   assert(Threads::number_of_threads() == 0 ||
1214          SafepointSynchronize::is_at_safepoint() ||
1215          JmethodIdCreation_lock->owned_by_self(), "sanity check");
1216 
1217   // reacquire the cache - we are locked, single threaded or at a safepoint
1218   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1219   jmethodID  id     = NULL;
1220   size_t     length = 0;
1221 
1222   if (jmeths == NULL ||                         // no cache yet
1223       (length = (size_t)jmeths[0]) <= idnum) {  // cache is too short
1224     if (jmeths != NULL) {
1225       // copy any existing entries from the old cache
1226       for (size_t index = 0; index < length; index++) {
1227         new_jmeths[index+1] = jmeths[index+1];
1228       }
1229       *to_dealloc_jmeths_p = jmeths;  // save old cache for later delete
1230     }
1231     ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths);
1232   } else {
1233     // fetch jmethodID (if any) from the existing cache
1234     id = jmeths[idnum+1];
1235     *to_dealloc_jmeths_p = new_jmeths;  // save new cache for later delete
1236   }
1237   if (id == NULL) {
1238     // No matching jmethodID in the existing cache or we have a new
1239     // cache or we just grew the cache. This cache write is done here
1240     // by the first thread to win the foot race because a jmethodID
1241     // needs to be unique once it is generally available.
1242     id = new_id;
1243 
1244     // The jmethodID cache can be read while unlocked so we have to
1245     // make sure the new jmethodID is complete before installing it
1246     // in the cache.
1247     OrderAccess::release_store_ptr(&jmeths[idnum+1], id);
1248   } else {
1249     *to_dealloc_id_p = new_id; // save new id for later delete
1250   }
1251   return id;
1252 }
1253 
1254 
1255 // Common code to get the jmethodID cache length and the jmethodID
1256 // value at index idnum if there is one.
1257 //
1258 void instanceKlass::get_jmethod_id_length_value(jmethodID* cache,
1259        size_t idnum, size_t *length_p, jmethodID* id_p) {
1260   assert(cache != NULL, "sanity check");
1261   assert(length_p != NULL, "sanity check");
1262   assert(id_p != NULL, "sanity check");
1263 
1264   // cache size is stored in element[0], other elements offset by one
1265   *length_p = (size_t)cache[0];
1266   if (*length_p <= idnum) {  // cache is too short
1267     *id_p = NULL;
1268   } else {
1269     *id_p = cache[idnum+1];  // fetch jmethodID (if any)
1270   }
1271 }
1272 
1273 
1274 // Lookup a jmethodID, NULL if not found.  Do no blocking, no allocations, no handles
1275 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) {
1276   size_t idnum = (size_t)method->method_idnum();
1277   jmethodID* jmeths = methods_jmethod_ids_acquire();
1278   size_t length;                                // length assigned as debugging crumb
1279   jmethodID id = NULL;
1280   if (jmeths != NULL &&                         // If there is a cache
1281       (length = (size_t)jmeths[0]) > idnum) {   // and if it is long enough,
1282     id = jmeths[idnum+1];                       // Look up the id (may be NULL)
1283   }
1284   return id;
1285 }
1286 
1287 
1288 // Cache an itable index
1289 void instanceKlass::set_cached_itable_index(size_t idnum, int index) {
1290   int* indices = methods_cached_itable_indices_acquire();
1291   int* to_dealloc_indices = NULL;
1292 
1293   // We use a double-check locking idiom here because this cache is
1294   // performance sensitive. In the normal system, this cache only
1295   // transitions from NULL to non-NULL which is safe because we use
1296   // release_set_methods_cached_itable_indices() to advertise the
1297   // new cache. A partially constructed cache should never be seen
1298   // by a racing thread. Cache reads and writes proceed without a
1299   // lock, but creation of the cache itself requires no leaks so a
1300   // lock is generally acquired in that case.
1301   //
1302   // If the RedefineClasses() API has been used, then this cache can
1303   // grow and we'll have transitions from non-NULL to bigger non-NULL.
1304   // Cache creation requires no leaks and we require safety between all
1305   // cache accesses and freeing of the old cache so a lock is generally
1306   // acquired when the RedefineClasses() API has been used.
1307 
1308   if (indices == NULL || idnum_can_increment()) {
1309     // we need a cache or the cache can grow
1310     MutexLocker ml(JNICachedItableIndex_lock);
1311     // reacquire the cache to see if another thread already did the work
1312     indices = methods_cached_itable_indices_acquire();
1313     size_t length = 0;
1314     // cache size is stored in element[0], other elements offset by one
1315     if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
1316       size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
1317       int* new_indices = NEW_C_HEAP_ARRAY(int, size+1);
1318       new_indices[0] = (int)size;
1319       // copy any existing entries
1320       size_t i;
1321       for (i = 0; i < length; i++) {
1322         new_indices[i+1] = indices[i+1];
1323       }
1324       // Set all the rest to -1
1325       for (i = length; i < size; i++) {
1326         new_indices[i+1] = -1;
1327       }
1328       if (indices != NULL) {
1329         // We have an old cache to delete so save it for after we
1330         // drop the lock.
1331         to_dealloc_indices = indices;
1332       }
1333       release_set_methods_cached_itable_indices(indices = new_indices);
1334     }
1335 
1336     if (idnum_can_increment()) {
1337       // this cache can grow so we have to write to it safely
1338       indices[idnum+1] = index;
1339     }
1340   } else {
1341     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
1342   }
1343 
1344   if (!idnum_can_increment()) {
1345     // The cache cannot grow and this JNI itable index value does not
1346     // have to be unique like a jmethodID. If there is a race to set it,
1347     // it doesn't matter.
1348     indices[idnum+1] = index;
1349   }
1350 
1351   if (to_dealloc_indices != NULL) {
1352     // we allocated a new cache so free the old one
1353     FreeHeap(to_dealloc_indices);
1354   }
1355 }
1356 
1357 
1358 // Retrieve a cached itable index
1359 int instanceKlass::cached_itable_index(size_t idnum) {
1360   int* indices = methods_cached_itable_indices_acquire();
1361   if (indices != NULL && ((size_t)indices[0]) > idnum) {
1362      // indices exist and are long enough, retrieve possible cached
1363     return indices[idnum+1];
1364   }
1365   return -1;
1366 }
1367 
1368 
1369 //
1370 // nmethodBucket is used to record dependent nmethods for
1371 // deoptimization.  nmethod dependencies are actually <klass, method>
1372 // pairs but we really only care about the klass part for purposes of
1373 // finding nmethods which might need to be deoptimized.  Instead of
1374 // recording the method, a count of how many times a particular nmethod
1375 // was recorded is kept.  This ensures that any recording errors are
1376 // noticed since an nmethod should be removed as many times are it's
1377 // added.
1378 //
1379 class nmethodBucket {
1380  private:
1381   nmethod*       _nmethod;
1382   int            _count;
1383   nmethodBucket* _next;
1384 
1385  public:
1386   nmethodBucket(nmethod* nmethod, nmethodBucket* next) {
1387     _nmethod = nmethod;
1388     _next = next;
1389     _count = 1;
1390   }
1391   int count()                             { return _count; }
1392   int increment()                         { _count += 1; return _count; }
1393   int decrement()                         { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
1394   nmethodBucket* next()                   { return _next; }
1395   void set_next(nmethodBucket* b)         { _next = b; }
1396   nmethod* get_nmethod()                  { return _nmethod; }
1397 };
1398 
1399 
1400 //
1401 // Walk the list of dependent nmethods searching for nmethods which
1402 // are dependent on the klassOop that was passed in and mark them for
1403 // deoptimization.  Returns the number of nmethods found.
1404 //
1405 int instanceKlass::mark_dependent_nmethods(DepChange& changes) {
1406   assert_locked_or_safepoint(CodeCache_lock);
1407   int found = 0;
1408   nmethodBucket* b = _dependencies;
1409   while (b != NULL) {
1410     nmethod* nm = b->get_nmethod();
1411     // since dependencies aren't removed until an nmethod becomes a zombie,
1412     // the dependency list may contain nmethods which aren't alive.
1413     if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
1414       if (TraceDependencies) {
1415         ResourceMark rm;
1416         tty->print_cr("Marked for deoptimization");
1417         tty->print_cr("  context = %s", this->external_name());
1418         changes.print();
1419         nm->print();
1420         nm->print_dependencies();
1421       }
1422       nm->mark_for_deoptimization();
1423       found++;
1424     }
1425     b = b->next();
1426   }
1427   return found;
1428 }
1429 
1430 
1431 //
1432 // Add an nmethodBucket to the list of dependencies for this nmethod.
1433 // It's possible that an nmethod has multiple dependencies on this klass
1434 // so a count is kept for each bucket to guarantee that creation and
1435 // deletion of dependencies is consistent.
1436 //
1437 void instanceKlass::add_dependent_nmethod(nmethod* nm) {
1438   assert_locked_or_safepoint(CodeCache_lock);
1439   nmethodBucket* b = _dependencies;
1440   nmethodBucket* last = NULL;
1441   while (b != NULL) {
1442     if (nm == b->get_nmethod()) {
1443       b->increment();
1444       return;
1445     }
1446     b = b->next();
1447   }
1448   _dependencies = new nmethodBucket(nm, _dependencies);
1449 }
1450 
1451 
1452 //
1453 // Decrement count of the nmethod in the dependency list and remove
1454 // the bucket competely when the count goes to 0.  This method must
1455 // find a corresponding bucket otherwise there's a bug in the
1456 // recording of dependecies.
1457 //
1458 void instanceKlass::remove_dependent_nmethod(nmethod* nm) {
1459   assert_locked_or_safepoint(CodeCache_lock);
1460   nmethodBucket* b = _dependencies;
1461   nmethodBucket* last = NULL;
1462   while (b != NULL) {
1463     if (nm == b->get_nmethod()) {
1464       if (b->decrement() == 0) {
1465         if (last == NULL) {
1466           _dependencies = b->next();
1467         } else {
1468           last->set_next(b->next());
1469         }
1470         delete b;
1471       }
1472       return;
1473     }
1474     last = b;
1475     b = b->next();
1476   }
1477 #ifdef ASSERT
1478   tty->print_cr("### %s can't find dependent nmethod:", this->external_name());
1479   nm->print();
1480 #endif // ASSERT
1481   ShouldNotReachHere();
1482 }
1483 
1484 
1485 #ifndef PRODUCT
1486 void instanceKlass::print_dependent_nmethods(bool verbose) {
1487   nmethodBucket* b = _dependencies;
1488   int idx = 0;
1489   while (b != NULL) {
1490     nmethod* nm = b->get_nmethod();
1491     tty->print("[%d] count=%d { ", idx++, b->count());
1492     if (!verbose) {
1493       nm->print_on(tty, "nmethod");
1494       tty->print_cr(" } ");
1495     } else {
1496       nm->print();
1497       nm->print_dependencies();
1498       tty->print_cr("--- } ");
1499     }
1500     b = b->next();
1501   }
1502 }
1503 
1504 
1505 bool instanceKlass::is_dependent_nmethod(nmethod* nm) {
1506   nmethodBucket* b = _dependencies;
1507   while (b != NULL) {
1508     if (nm == b->get_nmethod()) {
1509       return true;
1510     }
1511     b = b->next();
1512   }
1513   return false;
1514 }
1515 #endif //PRODUCT
1516 
1517 
1518 #ifdef ASSERT
1519 template <class T> void assert_is_in(T *p) {
1520   T heap_oop = oopDesc::load_heap_oop(p);
1521   if (!oopDesc::is_null(heap_oop)) {
1522     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1523     assert(Universe::heap()->is_in(o), "should be in heap");
1524   }
1525 }
1526 template <class T> void assert_is_in_closed_subset(T *p) {
1527   T heap_oop = oopDesc::load_heap_oop(p);
1528   if (!oopDesc::is_null(heap_oop)) {
1529     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1530     assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
1531   }
1532 }
1533 template <class T> void assert_is_in_reserved(T *p) {
1534   T heap_oop = oopDesc::load_heap_oop(p);
1535   if (!oopDesc::is_null(heap_oop)) {
1536     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1537     assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
1538   }
1539 }
1540 template <class T> void assert_nothing(T *p) {}
1541 
1542 #else
1543 template <class T> void assert_is_in(T *p) {}
1544 template <class T> void assert_is_in_closed_subset(T *p) {}
1545 template <class T> void assert_is_in_reserved(T *p) {}
1546 template <class T> void assert_nothing(T *p) {}
1547 #endif // ASSERT
1548 
1549 //
1550 // Macros that iterate over areas of oops which are specialized on type of
1551 // oop pointer either narrow or wide, depending on UseCompressedOops
1552 //
1553 // Parameters are:
1554 //   T         - type of oop to point to (either oop or narrowOop)
1555 //   start_p   - starting pointer for region to iterate over
1556 //   count     - number of oops or narrowOops to iterate over
1557 //   do_oop    - action to perform on each oop (it's arbitrary C code which
1558 //               makes it more efficient to put in a macro rather than making
1559 //               it a template function)
1560 //   assert_fn - assert function which is template function because performance
1561 //               doesn't matter when enabled.
1562 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
1563   T, start_p, count, do_oop,                \
1564   assert_fn)                                \
1565 {                                           \
1566   T* p         = (T*)(start_p);             \
1567   T* const end = p + (count);               \
1568   while (p < end) {                         \
1569     (assert_fn)(p);                         \
1570     do_oop;                                 \
1571     ++p;                                    \
1572   }                                         \
1573 }
1574 
1575 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
1576   T, start_p, count, do_oop,                \
1577   assert_fn)                                \
1578 {                                           \
1579   T* const start = (T*)(start_p);           \
1580   T*       p     = start + (count);         \
1581   while (start < p) {                       \
1582     --p;                                    \
1583     (assert_fn)(p);                         \
1584     do_oop;                                 \
1585   }                                         \
1586 }
1587 
1588 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
1589   T, start_p, count, low, high,             \
1590   do_oop, assert_fn)                        \
1591 {                                           \
1592   T* const l = (T*)(low);                   \
1593   T* const h = (T*)(high);                  \
1594   assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
1595          mask_bits((intptr_t)h, sizeof(T)-1) == 0,   \
1596          "bounded region must be properly aligned"); \
1597   T* p       = (T*)(start_p);               \
1598   T* end     = p + (count);                 \
1599   if (p < l) p = l;                         \
1600   if (end > h) end = h;                     \
1601   while (p < end) {                         \
1602     (assert_fn)(p);                         \
1603     do_oop;                                 \
1604     ++p;                                    \
1605   }                                         \
1606 }
1607 
1608 
1609 // The following macros call specialized macros, passing either oop or
1610 // narrowOop as the specialization type.  These test the UseCompressedOops
1611 // flag.
1612 #define InstanceKlass_OOP_ITERATE(start_p, count,    \
1613                                   do_oop, assert_fn) \
1614 {                                                    \
1615   if (UseCompressedOops) {                           \
1616     InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
1617       start_p, count,                                \
1618       do_oop, assert_fn)                             \
1619   } else {                                           \
1620     InstanceKlass_SPECIALIZED_OOP_ITERATE(oop,       \
1621       start_p, count,                                \
1622       do_oop, assert_fn)                             \
1623   }                                                  \
1624 }
1625 
1626 #define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high,    \
1627                                           do_oop, assert_fn) \
1628 {                                                            \
1629   if (UseCompressedOops) {                                   \
1630     InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
1631       start_p, count,                                        \
1632       low, high,                                             \
1633       do_oop, assert_fn)                                     \
1634   } else {                                                   \
1635     InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop,       \
1636       start_p, count,                                        \
1637       low, high,                                             \
1638       do_oop, assert_fn)                                     \
1639   }                                                          \
1640 }
1641 
1642 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn)            \
1643 {                                                                        \
1644   /* Compute oopmap block range. The common case                         \
1645      is nonstatic_oop_map_size == 1. */                                  \
1646   OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
1647   OopMapBlock* const end_map = map + nonstatic_oop_map_count();          \
1648   if (UseCompressedOops) {                                               \
1649     while (map < end_map) {                                              \
1650       InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop,                   \
1651         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
1652         do_oop, assert_fn)                                               \
1653       ++map;                                                             \
1654     }                                                                    \
1655   } else {                                                               \
1656     while (map < end_map) {                                              \
1657       InstanceKlass_SPECIALIZED_OOP_ITERATE(oop,                         \
1658         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
1659         do_oop, assert_fn)                                               \
1660       ++map;                                                             \
1661     }                                                                    \
1662   }                                                                      \
1663 }
1664 
1665 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn)    \
1666 {                                                                        \
1667   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();          \
1668   OopMapBlock* map             = start_map + nonstatic_oop_map_count();  \
1669   if (UseCompressedOops) {                                               \
1670     while (start_map < map) {                                            \
1671       --map;                                                             \
1672       InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop,           \
1673         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
1674         do_oop, assert_fn)                                               \
1675     }                                                                    \
1676   } else {                                                               \
1677     while (start_map < map) {                                            \
1678       --map;                                                             \
1679       InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop,                 \
1680         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
1681         do_oop, assert_fn)                                               \
1682     }                                                                    \
1683   }                                                                      \
1684 }
1685 
1686 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop,    \
1687                                               assert_fn)                 \
1688 {                                                                        \
1689   /* Compute oopmap block range. The common case is                      \
1690      nonstatic_oop_map_size == 1, so we accept the                       \
1691      usually non-existent extra overhead of examining                    \
1692      all the maps. */                                                    \
1693   OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
1694   OopMapBlock* const end_map = map + nonstatic_oop_map_count();          \
1695   if (UseCompressedOops) {                                               \
1696     while (map < end_map) {                                              \
1697       InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop,           \
1698         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
1699         low, high,                                                       \
1700         do_oop, assert_fn)                                               \
1701       ++map;                                                             \
1702     }                                                                    \
1703   } else {                                                               \
1704     while (map < end_map) {                                              \
1705       InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop,                 \
1706         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
1707         low, high,                                                       \
1708         do_oop, assert_fn)                                               \
1709       ++map;                                                             \
1710     }                                                                    \
1711   }                                                                      \
1712 }
1713 
1714 void instanceKlass::follow_static_fields() {
1715   InstanceKlass_OOP_ITERATE( \
1716     start_of_static_fields(), static_oop_field_size(), \
1717     MarkSweep::mark_and_push(p), \
1718     assert_is_in_closed_subset)
1719 }
1720 
1721 #ifndef SERIALGC
1722 void instanceKlass::follow_static_fields(ParCompactionManager* cm) {
1723   InstanceKlass_OOP_ITERATE( \
1724     start_of_static_fields(), static_oop_field_size(), \
1725     PSParallelCompact::mark_and_push(cm, p), \
1726     assert_is_in)
1727 }
1728 #endif // SERIALGC
1729 
1730 void instanceKlass::adjust_static_fields() {
1731   InstanceKlass_OOP_ITERATE( \
1732     start_of_static_fields(), static_oop_field_size(), \
1733     MarkSweep::adjust_pointer(p), \
1734     assert_nothing)
1735 }
1736 
1737 #ifndef SERIALGC
1738 void instanceKlass::update_static_fields() {
1739   InstanceKlass_OOP_ITERATE( \
1740     start_of_static_fields(), static_oop_field_size(), \
1741     PSParallelCompact::adjust_pointer(p), \
1742     assert_nothing)
1743 }
1744 #endif // SERIALGC
1745 
1746 void instanceKlass::oop_follow_contents(oop obj) {
1747   assert(obj != NULL, "can't follow the content of NULL object");
1748   obj->follow_header();
1749   InstanceKlass_OOP_MAP_ITERATE( \
1750     obj, \
1751     MarkSweep::mark_and_push(p), \
1752     assert_is_in_closed_subset)
1753 }
1754 
1755 #ifndef SERIALGC
1756 void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
1757                                         oop obj) {
1758   assert(obj != NULL, "can't follow the content of NULL object");
1759   obj->follow_header(cm);
1760   InstanceKlass_OOP_MAP_ITERATE( \
1761     obj, \
1762     PSParallelCompact::mark_and_push(cm, p), \
1763     assert_is_in)
1764 }
1765 #endif // SERIALGC
1766 
1767 // closure's do_header() method dicates whether the given closure should be
1768 // applied to the klass ptr in the object header.
1769 
1770 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)        \
1771                                                                              \
1772 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
1773   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1774   /* header */                                                          \
1775   if (closure->do_header()) {                                           \
1776     obj->oop_iterate_header(closure);                                   \
1777   }                                                                     \
1778   InstanceKlass_OOP_MAP_ITERATE(                                        \
1779     obj,                                                                \
1780     SpecializationStats::                                               \
1781       record_do_oop_call##nv_suffix(SpecializationStats::ik);           \
1782     (closure)->do_oop##nv_suffix(p),                                    \
1783     assert_is_in_closed_subset)                                         \
1784   return size_helper();                                                 \
1785 }
1786 
1787 #ifndef SERIALGC
1788 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
1789                                                                                 \
1790 int instanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj,                \
1791                                               OopClosureType* closure) {        \
1792   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
1793   /* header */                                                                  \
1794   if (closure->do_header()) {                                                   \
1795     obj->oop_iterate_header(closure);                                           \
1796   }                                                                             \
1797   /* instance variables */                                                      \
1798   InstanceKlass_OOP_MAP_REVERSE_ITERATE(                                        \
1799     obj,                                                                        \
1800     SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\
1801     (closure)->do_oop##nv_suffix(p),                                            \
1802     assert_is_in_closed_subset)                                                 \
1803    return size_helper();                                                        \
1804 }
1805 #endif // !SERIALGC
1806 
1807 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
1808                                                                         \
1809 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj,              \
1810                                                   OopClosureType* closure, \
1811                                                   MemRegion mr) {          \
1812   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1813   if (closure->do_header()) {                                            \
1814     obj->oop_iterate_header(closure, mr);                                \
1815   }                                                                      \
1816   InstanceKlass_BOUNDED_OOP_MAP_ITERATE(                                 \
1817     obj, mr.start(), mr.end(),                                           \
1818     (closure)->do_oop##nv_suffix(p),                                     \
1819     assert_is_in_closed_subset)                                          \
1820   return size_helper();                                                  \
1821 }
1822 
1823 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1824 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1825 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1826 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1827 #ifndef SERIALGC
1828 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1829 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1830 #endif // !SERIALGC
1831 
1832 void instanceKlass::iterate_static_fields(OopClosure* closure) {
1833     InstanceKlass_OOP_ITERATE( \
1834       start_of_static_fields(), static_oop_field_size(), \
1835       closure->do_oop(p), \
1836       assert_is_in_reserved)
1837 }
1838 
1839 void instanceKlass::iterate_static_fields(OopClosure* closure,
1840                                           MemRegion mr) {
1841   InstanceKlass_BOUNDED_OOP_ITERATE( \
1842     start_of_static_fields(), static_oop_field_size(), \
1843     mr.start(), mr.end(), \
1844     (closure)->do_oop_v(p), \
1845     assert_is_in_closed_subset)
1846 }
1847 
1848 int instanceKlass::oop_adjust_pointers(oop obj) {
1849   int size = size_helper();
1850   InstanceKlass_OOP_MAP_ITERATE( \
1851     obj, \
1852     MarkSweep::adjust_pointer(p), \
1853     assert_is_in)
1854   obj->adjust_header();
1855   return size;
1856 }
1857 
1858 #ifndef SERIALGC
1859 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
1860   InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1861     obj, \
1862     if (PSScavenge::should_scavenge(p)) { \
1863       pm->claim_or_forward_depth(p); \
1864     }, \
1865     assert_nothing )
1866 }
1867 
1868 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
1869   InstanceKlass_OOP_MAP_ITERATE( \
1870     obj, \
1871     PSParallelCompact::adjust_pointer(p), \
1872     assert_nothing)
1873   return size_helper();
1874 }
1875 
1876 void instanceKlass::push_static_fields(PSPromotionManager* pm) {
1877   InstanceKlass_OOP_ITERATE( \
1878     start_of_static_fields(), static_oop_field_size(), \
1879     if (PSScavenge::should_scavenge(p)) { \
1880       pm->claim_or_forward_depth(p); \
1881     }, \
1882     assert_nothing )
1883 }
1884 
1885 void instanceKlass::copy_static_fields(ParCompactionManager* cm) {
1886   InstanceKlass_OOP_ITERATE( \
1887     start_of_static_fields(), static_oop_field_size(), \
1888     PSParallelCompact::adjust_pointer(p), \
1889     assert_is_in)
1890 }
1891 #endif // SERIALGC
1892 
1893 // This klass is alive but the implementor link is not followed/updated.
1894 // Subklass and sibling links are handled by Klass::follow_weak_klass_links
1895 
1896 void instanceKlass::follow_weak_klass_links(
1897   BoolObjectClosure* is_alive, OopClosure* keep_alive) {
1898   assert(is_alive->do_object_b(as_klassOop()), "this oop should be live");
1899   if (ClassUnloading) {
1900     for (int i = 0; i < implementors_limit; i++) {
1901       klassOop impl = _implementors[i];
1902       if (impl == NULL)  break;  // no more in the list
1903       if (!is_alive->do_object_b(impl)) {
1904         // remove this guy from the list by overwriting him with the tail
1905         int lasti = --_nof_implementors;
1906         assert(lasti >= i && lasti < implementors_limit, "just checking");
1907         _implementors[i] = _implementors[lasti];
1908         _implementors[lasti] = NULL;
1909         --i; // rerun the loop at this index
1910       }
1911     }
1912   } else {
1913     for (int i = 0; i < implementors_limit; i++) {
1914       keep_alive->do_oop(&adr_implementors()[i]);
1915     }
1916   }
1917   Klass::follow_weak_klass_links(is_alive, keep_alive);
1918 }
1919 
1920 void instanceKlass::remove_unshareable_info() {
1921   Klass::remove_unshareable_info();
1922   init_implementor();
1923 }
1924 
1925 static void clear_all_breakpoints(methodOop m) {
1926   m->clear_all_breakpoints();
1927 }
1928 
1929 void instanceKlass::release_C_heap_structures() {
1930   // Deallocate oop map cache
1931   if (_oop_map_cache != NULL) {
1932     delete _oop_map_cache;
1933     _oop_map_cache = NULL;
1934   }
1935 
1936   // Deallocate JNI identifiers for jfieldIDs
1937   JNIid::deallocate(jni_ids());
1938   set_jni_ids(NULL);
1939 
1940   jmethodID* jmeths = methods_jmethod_ids_acquire();
1941   if (jmeths != (jmethodID*)NULL) {
1942     release_set_methods_jmethod_ids(NULL);
1943     FreeHeap(jmeths);
1944   }
1945 
1946   int* indices = methods_cached_itable_indices_acquire();
1947   if (indices != (int*)NULL) {
1948     release_set_methods_cached_itable_indices(NULL);
1949     FreeHeap(indices);
1950   }
1951 
1952   // release dependencies
1953   nmethodBucket* b = _dependencies;
1954   _dependencies = NULL;
1955   while (b != NULL) {
1956     nmethodBucket* next = b->next();
1957     delete b;
1958     b = next;
1959   }
1960 
1961   // Deallocate breakpoint records
1962   if (breakpoints() != 0x0) {
1963     methods_do(clear_all_breakpoints);
1964     assert(breakpoints() == 0x0, "should have cleared breakpoints");
1965   }
1966 
1967   // deallocate information about previous versions
1968   if (_previous_versions != NULL) {
1969     for (int i = _previous_versions->length() - 1; i >= 0; i--) {
1970       PreviousVersionNode * pv_node = _previous_versions->at(i);
1971       delete pv_node;
1972     }
1973     delete _previous_versions;
1974     _previous_versions = NULL;
1975   }
1976 
1977   // deallocate the cached class file
1978   if (_cached_class_file_bytes != NULL) {
1979     os::free(_cached_class_file_bytes);
1980     _cached_class_file_bytes = NULL;
1981     _cached_class_file_len = 0;
1982   }
1983 
1984   // Decrement symbol reference counts associated with the unloaded class.
1985   if (_name != NULL) _name->decrement_refcount();
1986   // unreference array name derived from this class name (arrays of an unloaded
1987   // class can't be referenced anymore).
1988   if (_array_name != NULL)  _array_name->decrement_refcount();
1989   if (_source_file_name != NULL) _source_file_name->decrement_refcount();
1990   if (_source_debug_extension != NULL) _source_debug_extension->decrement_refcount();
1991   // walk constant pool and decrement symbol reference counts
1992   _constants->unreference_symbols();
1993 }
1994 
1995 void instanceKlass::set_source_file_name(Symbol* n) {
1996   _source_file_name = n;
1997   if (_source_file_name != NULL) _source_file_name->increment_refcount();
1998 }
1999 
2000 void instanceKlass::set_source_debug_extension(Symbol* n) {
2001   _source_debug_extension = n;
2002   if (_source_debug_extension != NULL) _source_debug_extension->increment_refcount();
2003 }
2004 
2005 const char* instanceKlass::signature_name() const {
2006   const char* src = (const char*) (name()->as_C_string());
2007   const int src_length = (int)strlen(src);
2008   char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3);
2009   int src_index = 0;
2010   int dest_index = 0;
2011   dest[dest_index++] = 'L';
2012   while (src_index < src_length) {
2013     dest[dest_index++] = src[src_index++];
2014   }
2015   dest[dest_index++] = ';';
2016   dest[dest_index] = '\0';
2017   return dest;
2018 }
2019 
2020 // different verisons of is_same_class_package
2021 bool instanceKlass::is_same_class_package(klassOop class2) {
2022   klassOop class1 = as_klassOop();
2023   oop classloader1 = instanceKlass::cast(class1)->class_loader();
2024   Symbol* classname1 = Klass::cast(class1)->name();
2025 
2026   if (Klass::cast(class2)->oop_is_objArray()) {
2027     class2 = objArrayKlass::cast(class2)->bottom_klass();
2028   }
2029   oop classloader2;
2030   if (Klass::cast(class2)->oop_is_instance()) {
2031     classloader2 = instanceKlass::cast(class2)->class_loader();
2032   } else {
2033     assert(Klass::cast(class2)->oop_is_typeArray(), "should be type array");
2034     classloader2 = NULL;
2035   }
2036   Symbol* classname2 = Klass::cast(class2)->name();
2037 
2038   return instanceKlass::is_same_class_package(classloader1, classname1,
2039                                               classloader2, classname2);
2040 }
2041 
2042 bool instanceKlass::is_same_class_package(oop classloader2, Symbol* classname2) {
2043   klassOop class1 = as_klassOop();
2044   oop classloader1 = instanceKlass::cast(class1)->class_loader();
2045   Symbol* classname1 = Klass::cast(class1)->name();
2046 
2047   return instanceKlass::is_same_class_package(classloader1, classname1,
2048                                               classloader2, classname2);
2049 }
2050 
2051 // return true if two classes are in the same package, classloader
2052 // and classname information is enough to determine a class's package
2053 bool instanceKlass::is_same_class_package(oop class_loader1, Symbol* class_name1,
2054                                           oop class_loader2, Symbol* class_name2) {
2055   if (class_loader1 != class_loader2) {
2056     return false;
2057   } else if (class_name1 == class_name2) {
2058     return true;                // skip painful bytewise comparison
2059   } else {
2060     ResourceMark rm;
2061 
2062     // The Symbol*'s are in UTF8 encoding. Since we only need to check explicitly
2063     // for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding.
2064     // Otherwise, we just compare jbyte values between the strings.
2065     const jbyte *name1 = class_name1->base();
2066     const jbyte *name2 = class_name2->base();
2067 
2068     const jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/');
2069     const jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/');
2070 
2071     if ((last_slash1 == NULL) || (last_slash2 == NULL)) {
2072       // One of the two doesn't have a package.  Only return true
2073       // if the other one also doesn't have a package.
2074       return last_slash1 == last_slash2;
2075     } else {
2076       // Skip over '['s
2077       if (*name1 == '[') {
2078         do {
2079           name1++;
2080         } while (*name1 == '[');
2081         if (*name1 != 'L') {
2082           // Something is terribly wrong.  Shouldn't be here.
2083           return false;
2084         }
2085       }
2086       if (*name2 == '[') {
2087         do {
2088           name2++;
2089         } while (*name2 == '[');
2090         if (*name2 != 'L') {
2091           // Something is terribly wrong.  Shouldn't be here.
2092           return false;
2093         }
2094       }
2095 
2096       // Check that package part is identical
2097       int length1 = last_slash1 - name1;
2098       int length2 = last_slash2 - name2;
2099 
2100       return UTF8::equal(name1, length1, name2, length2);
2101     }
2102   }
2103 }
2104 
2105 // Returns true iff super_method can be overridden by a method in targetclassname
2106 // See JSL 3rd edition 8.4.6.1
2107 // Assumes name-signature match
2108 // "this" is instanceKlass of super_method which must exist
2109 // note that the instanceKlass of the method in the targetclassname has not always been created yet
2110 bool instanceKlass::is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS) {
2111    // Private methods can not be overridden
2112    if (super_method->is_private()) {
2113      return false;
2114    }
2115    // If super method is accessible, then override
2116    if ((super_method->is_protected()) ||
2117        (super_method->is_public())) {
2118      return true;
2119    }
2120    // Package-private methods are not inherited outside of package
2121    assert(super_method->is_package_private(), "must be package private");
2122    return(is_same_class_package(targetclassloader(), targetclassname));
2123 }
2124 
2125 /* defined for now in jvm.cpp, for historical reasons *--
2126 klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle self,
2127                                                      Symbol*& simple_name_result, TRAPS) {
2128   ...
2129 }
2130 */
2131 
2132 // tell if two classes have the same enclosing class (at package level)
2133 bool instanceKlass::is_same_package_member_impl(instanceKlassHandle class1,
2134                                                 klassOop class2_oop, TRAPS) {
2135   if (class2_oop == class1->as_klassOop())          return true;
2136   if (!Klass::cast(class2_oop)->oop_is_instance())  return false;
2137   instanceKlassHandle class2(THREAD, class2_oop);
2138 
2139   // must be in same package before we try anything else
2140   if (!class1->is_same_class_package(class2->class_loader(), class2->name()))
2141     return false;
2142 
2143   // As long as there is an outer1.getEnclosingClass,
2144   // shift the search outward.
2145   instanceKlassHandle outer1 = class1;
2146   for (;;) {
2147     // As we walk along, look for equalities between outer1 and class2.
2148     // Eventually, the walks will terminate as outer1 stops
2149     // at the top-level class around the original class.
2150     bool ignore_inner_is_member;
2151     klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member,
2152                                                     CHECK_false);
2153     if (next == NULL)  break;
2154     if (next == class2())  return true;
2155     outer1 = instanceKlassHandle(THREAD, next);
2156   }
2157 
2158   // Now do the same for class2.
2159   instanceKlassHandle outer2 = class2;
2160   for (;;) {
2161     bool ignore_inner_is_member;
2162     klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member,
2163                                                     CHECK_false);
2164     if (next == NULL)  break;
2165     // Might as well check the new outer against all available values.
2166     if (next == class1())  return true;
2167     if (next == outer1())  return true;
2168     outer2 = instanceKlassHandle(THREAD, next);
2169   }
2170 
2171   // If by this point we have not found an equality between the
2172   // two classes, we know they are in separate package members.
2173   return false;
2174 }
2175 
2176 
2177 jint instanceKlass::compute_modifier_flags(TRAPS) const {
2178   klassOop k = as_klassOop();
2179   jint access = access_flags().as_int();
2180 
2181   // But check if it happens to be member class.
2182   typeArrayOop inner_class_list = inner_classes();
2183   int length = (inner_class_list == NULL) ? 0 : inner_class_list->length();
2184   assert (length % instanceKlass::inner_class_next_offset == 0, "just checking");
2185   if (length > 0) {
2186     typeArrayHandle inner_class_list_h(THREAD, inner_class_list);
2187     instanceKlassHandle ik(THREAD, k);
2188     for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) {
2189       int ioff = inner_class_list_h->ushort_at(
2190                       i + instanceKlass::inner_class_inner_class_info_offset);
2191 
2192       // Inner class attribute can be zero, skip it.
2193       // Strange but true:  JVM spec. allows null inner class refs.
2194       if (ioff == 0) continue;
2195 
2196       // only look at classes that are already loaded
2197       // since we are looking for the flags for our self.
2198       Symbol* inner_name = ik->constants()->klass_name_at(ioff);
2199       if ((ik->name() == inner_name)) {
2200         // This is really a member class.
2201         access = inner_class_list_h->ushort_at(i + instanceKlass::inner_class_access_flags_offset);
2202         break;
2203       }
2204     }
2205   }
2206   // Remember to strip ACC_SUPER bit
2207   return (access & (~JVM_ACC_SUPER)) & JVM_ACC_WRITTEN_FLAGS;
2208 }
2209 
2210 jint instanceKlass::jvmti_class_status() const {
2211   jint result = 0;
2212 
2213   if (is_linked()) {
2214     result |= JVMTI_CLASS_STATUS_VERIFIED | JVMTI_CLASS_STATUS_PREPARED;
2215   }
2216 
2217   if (is_initialized()) {
2218     assert(is_linked(), "Class status is not consistent");
2219     result |= JVMTI_CLASS_STATUS_INITIALIZED;
2220   }
2221   if (is_in_error_state()) {
2222     result |= JVMTI_CLASS_STATUS_ERROR;
2223   }
2224   return result;
2225 }
2226 
2227 methodOop instanceKlass::method_at_itable(klassOop holder, int index, TRAPS) {
2228   itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable();
2229   int method_table_offset_in_words = ioe->offset()/wordSize;
2230   int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words())
2231                        / itableOffsetEntry::size();
2232 
2233   for (int cnt = 0 ; ; cnt ++, ioe ++) {
2234     // If the interface isn't implemented by the receiver class,
2235     // the VM should throw IncompatibleClassChangeError.
2236     if (cnt >= nof_interfaces) {
2237       THROW_0(vmSymbols::java_lang_IncompatibleClassChangeError());
2238     }
2239 
2240     klassOop ik = ioe->interface_klass();
2241     if (ik == holder) break;
2242   }
2243 
2244   itableMethodEntry* ime = ioe->first_method_entry(as_klassOop());
2245   methodOop m = ime[index].method();
2246   if (m == NULL) {
2247     THROW_0(vmSymbols::java_lang_AbstractMethodError());
2248   }
2249   return m;
2250 }
2251 
2252 // On-stack replacement stuff
2253 void instanceKlass::add_osr_nmethod(nmethod* n) {
2254   // only one compilation can be active
2255   NEEDS_CLEANUP
2256   // This is a short non-blocking critical region, so the no safepoint check is ok.
2257   OsrList_lock->lock_without_safepoint_check();
2258   assert(n->is_osr_method(), "wrong kind of nmethod");
2259   n->set_osr_link(osr_nmethods_head());
2260   set_osr_nmethods_head(n);
2261   // Raise the highest osr level if necessary
2262   if (TieredCompilation) {
2263     methodOop m = n->method();
2264     m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
2265   }
2266   // Remember to unlock again
2267   OsrList_lock->unlock();
2268 
2269   // Get rid of the osr methods for the same bci that have lower levels.
2270   if (TieredCompilation) {
2271     for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
2272       nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true);
2273       if (inv != NULL && inv->is_in_use()) {
2274         inv->make_not_entrant();
2275       }
2276     }
2277   }
2278 }
2279 
2280 
2281 void instanceKlass::remove_osr_nmethod(nmethod* n) {
2282   // This is a short non-blocking critical region, so the no safepoint check is ok.
2283   OsrList_lock->lock_without_safepoint_check();
2284   assert(n->is_osr_method(), "wrong kind of nmethod");
2285   nmethod* last = NULL;
2286   nmethod* cur  = osr_nmethods_head();
2287   int max_level = CompLevel_none;  // Find the max comp level excluding n
2288   methodOop m = n->method();
2289   // Search for match
2290   while(cur != NULL && cur != n) {
2291     if (TieredCompilation) {
2292       // Find max level before n
2293       max_level = MAX2(max_level, cur->comp_level());
2294     }
2295     last = cur;
2296     cur = cur->osr_link();
2297   }
2298   nmethod* next = NULL;
2299   if (cur == n) {
2300     next = cur->osr_link();
2301     if (last == NULL) {
2302       // Remove first element
2303       set_osr_nmethods_head(next);
2304     } else {
2305       last->set_osr_link(next);
2306     }
2307   }
2308   n->set_osr_link(NULL);
2309   if (TieredCompilation) {
2310     cur = next;
2311     while (cur != NULL) {
2312       // Find max level after n
2313       max_level = MAX2(max_level, cur->comp_level());
2314       cur = cur->osr_link();
2315     }
2316     m->set_highest_osr_comp_level(max_level);
2317   }
2318   // Remember to unlock again
2319   OsrList_lock->unlock();
2320 }
2321 
2322 nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const {
2323   // This is a short non-blocking critical region, so the no safepoint check is ok.
2324   OsrList_lock->lock_without_safepoint_check();
2325   nmethod* osr = osr_nmethods_head();
2326   nmethod* best = NULL;
2327   while (osr != NULL) {
2328     assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
2329     // There can be a time when a c1 osr method exists but we are waiting
2330     // for a c2 version. When c2 completes its osr nmethod we will trash
2331     // the c1 version and only be able to find the c2 version. However
2332     // while we overflow in the c1 code at back branches we don't want to
2333     // try and switch to the same code as we are already running
2334 
2335     if (osr->method() == m &&
2336         (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
2337       if (match_level) {
2338         if (osr->comp_level() == comp_level) {
2339           // Found a match - return it.
2340           OsrList_lock->unlock();
2341           return osr;
2342         }
2343       } else {
2344         if (best == NULL || (osr->comp_level() > best->comp_level())) {
2345           if (osr->comp_level() == CompLevel_highest_tier) {
2346             // Found the best possible - return it.
2347             OsrList_lock->unlock();
2348             return osr;
2349           }
2350           best = osr;
2351         }
2352       }
2353     }
2354     osr = osr->osr_link();
2355   }
2356   OsrList_lock->unlock();
2357   if (best != NULL && best->comp_level() >= comp_level && match_level == false) {
2358     return best;
2359   }
2360   return NULL;
2361 }
2362 
2363 // -----------------------------------------------------------------------------------------------------
2364 #ifndef PRODUCT
2365 
2366 // Printing
2367 
2368 #define BULLET  " - "
2369 
2370 void FieldPrinter::do_field(fieldDescriptor* fd) {
2371   _st->print(BULLET);
2372    if (fd->is_static() || (_obj == NULL)) {
2373      fd->print_on(_st);
2374      _st->cr();
2375    } else {
2376      fd->print_on_for(_st, _obj);
2377      _st->cr();
2378    }
2379 }
2380 
2381 
2382 void instanceKlass::oop_print_on(oop obj, outputStream* st) {
2383   Klass::oop_print_on(obj, st);
2384 
2385   if (as_klassOop() == SystemDictionary::String_klass()) {
2386     typeArrayOop value  = java_lang_String::value(obj);
2387     juint        offset = java_lang_String::offset(obj);
2388     juint        length = java_lang_String::length(obj);
2389     if (value != NULL &&
2390         value->is_typeArray() &&
2391         offset          <= (juint) value->length() &&
2392         offset + length <= (juint) value->length()) {
2393       st->print(BULLET"string: ");
2394       Handle h_obj(obj);
2395       java_lang_String::print(h_obj, st);
2396       st->cr();
2397       if (!WizardMode)  return;  // that is enough
2398     }
2399   }
2400 
2401   st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj));
2402   FieldPrinter print_nonstatic_field(st, obj);
2403   do_nonstatic_fields(&print_nonstatic_field);
2404 
2405   if (as_klassOop() == SystemDictionary::Class_klass()) {
2406     st->print(BULLET"signature: ");
2407     java_lang_Class::print_signature(obj, st);
2408     st->cr();
2409     klassOop mirrored_klass = java_lang_Class::as_klassOop(obj);
2410     st->print(BULLET"fake entry for mirror: ");
2411     mirrored_klass->print_value_on(st);
2412     st->cr();
2413     st->print(BULLET"fake entry resolved_constructor: ");
2414     methodOop ctor = java_lang_Class::resolved_constructor(obj);
2415     ctor->print_value_on(st);
2416     klassOop array_klass = java_lang_Class::array_klass(obj);
2417     st->cr();
2418     st->print(BULLET"fake entry for array: ");
2419     array_klass->print_value_on(st);
2420     st->cr();
2421   } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
2422     st->print(BULLET"signature: ");
2423     java_dyn_MethodType::print_signature(obj, st);
2424     st->cr();
2425   }
2426 }
2427 
2428 #endif //PRODUCT
2429 
2430 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
2431   st->print("a ");
2432   name()->print_value_on(st);
2433   obj->print_address_on(st);
2434   if (as_klassOop() == SystemDictionary::String_klass()
2435       && java_lang_String::value(obj) != NULL) {
2436     ResourceMark rm;
2437     int len = java_lang_String::length(obj);
2438     int plen = (len < 24 ? len : 12);
2439     char* str = java_lang_String::as_utf8_string(obj, 0, plen);
2440     st->print(" = \"%s\"", str);
2441     if (len > plen)
2442       st->print("...[%d]", len);
2443   } else if (as_klassOop() == SystemDictionary::Class_klass()) {
2444     klassOop k = java_lang_Class::as_klassOop(obj);
2445     st->print(" = ");
2446     if (k != NULL) {
2447       k->print_value_on(st);
2448     } else {
2449       const char* tname = type2name(java_lang_Class::primitive_type(obj));
2450       st->print("%s", tname ? tname : "type?");
2451     }
2452   } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
2453     st->print(" = ");
2454     java_dyn_MethodType::print_signature(obj, st);
2455   } else if (java_lang_boxing_object::is_instance(obj)) {
2456     st->print(" = ");
2457     java_lang_boxing_object::print(obj, st);
2458   }
2459 }
2460 
2461 const char* instanceKlass::internal_name() const {
2462   return external_name();
2463 }
2464 
2465 // Verification
2466 
2467 class VerifyFieldClosure: public OopClosure {
2468  protected:
2469   template <class T> void do_oop_work(T* p) {
2470     guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
2471     oop obj = oopDesc::load_decode_heap_oop(p);
2472     if (!obj->is_oop_or_null()) {
2473       tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj);
2474       Universe::print();
2475       guarantee(false, "boom");
2476     }
2477   }
2478  public:
2479   virtual void do_oop(oop* p)       { VerifyFieldClosure::do_oop_work(p); }
2480   virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
2481 };
2482 
2483 void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
2484   Klass::oop_verify_on(obj, st);
2485   VerifyFieldClosure blk;
2486   oop_oop_iterate(obj, &blk);
2487 }
2488 
2489 #ifndef PRODUCT
2490 
2491 void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) {
2492   // This verification code is disabled.  JDK_Version::is_gte_jdk14x_version()
2493   // cannot be called since this function is called before the VM is
2494   // able to determine what JDK version is running with.
2495   // The check below always is false since 1.4.
2496   return;
2497 
2498   // This verification code temporarily disabled for the 1.4
2499   // reflection implementation since java.lang.Class now has
2500   // Java-level instance fields. Should rewrite this to handle this
2501   // case.
2502   if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) {
2503     // Verify that java.lang.Class instances have a fake oop field added.
2504     instanceKlass* ik = instanceKlass::cast(k);
2505 
2506     // Check that we have the right class
2507     static bool first_time = true;
2508     guarantee(k == SystemDictionary::Class_klass() && first_time, "Invalid verify of maps");
2509     first_time = false;
2510     const int extra = java_lang_Class::number_of_fake_oop_fields;
2511     guarantee(ik->nonstatic_field_size() == extra, "just checking");
2512     guarantee(ik->nonstatic_oop_map_count() == 1, "just checking");
2513     guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking");
2514 
2515     // Check that the map is (2,extra)
2516     int offset = java_lang_Class::klass_offset;
2517 
2518     OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
2519     guarantee(map->offset() == offset && map->count() == (unsigned int) extra,
2520               "sanity");
2521   }
2522 }
2523 
2524 #endif // ndef PRODUCT
2525 
2526 // JNIid class for jfieldIDs only
2527 // Note to reviewers:
2528 // These JNI functions are just moved over to column 1 and not changed
2529 // in the compressed oops workspace.
2530 JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
2531   _holder = holder;
2532   _offset = offset;
2533   _next = next;
2534   debug_only(_is_static_field_id = false;)
2535 }
2536 
2537 
2538 JNIid* JNIid::find(int offset) {
2539   JNIid* current = this;
2540   while (current != NULL) {
2541     if (current->offset() == offset) return current;
2542     current = current->next();
2543   }
2544   return NULL;
2545 }
2546 
2547 void JNIid::oops_do(OopClosure* f) {
2548   for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
2549     f->do_oop(cur->holder_addr());
2550   }
2551 }
2552 
2553 void JNIid::deallocate(JNIid* current) {
2554   while (current != NULL) {
2555     JNIid* next = current->next();
2556     delete current;
2557     current = next;
2558   }
2559 }
2560 
2561 
2562 void JNIid::verify(klassOop holder) {
2563   int first_field_offset  = instanceKlass::cast(holder)->offset_of_static_fields();
2564   int end_field_offset;
2565   end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
2566 
2567   JNIid* current = this;
2568   while (current != NULL) {
2569     guarantee(current->holder() == holder, "Invalid klass in JNIid");
2570 #ifdef ASSERT
2571     int o = current->offset();
2572     if (current->is_static_field_id()) {
2573       guarantee(o >= first_field_offset  && o < end_field_offset,  "Invalid static field offset in JNIid");
2574     }
2575 #endif
2576     current = current->next();
2577   }
2578 }
2579 
2580 
2581 #ifdef ASSERT
2582 void instanceKlass::set_init_state(ClassState state) {
2583   bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
2584                                                : (_init_state < state);
2585   assert(good_state || state == allocated, "illegal state transition");
2586   _init_state = state;
2587 }
2588 #endif
2589 
2590 
2591 // RedefineClasses() support for previous versions:
2592 
2593 // Add an information node that contains weak references to the
2594 // interesting parts of the previous version of the_class.
2595 // This is also where we clean out any unused weak references.
2596 // Note that while we delete nodes from the _previous_versions
2597 // array, we never delete the array itself until the klass is
2598 // unloaded. The has_been_redefined() query depends on that fact.
2599 //
2600 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
2601        BitMap* emcp_methods, int emcp_method_count) {
2602   assert(Thread::current()->is_VM_thread(),
2603          "only VMThread can add previous versions");
2604 
2605   if (_previous_versions == NULL) {
2606     // This is the first previous version so make some space.
2607     // Start with 2 elements under the assumption that the class
2608     // won't be redefined much.
2609     _previous_versions =  new (ResourceObj::C_HEAP)
2610                             GrowableArray<PreviousVersionNode *>(2, true);
2611   }
2612 
2613   // RC_TRACE macro has an embedded ResourceMark
2614   RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d",
2615     ikh->external_name(), _previous_versions->length(), emcp_method_count));
2616   constantPoolHandle cp_h(ikh->constants());
2617   jobject cp_ref;
2618   if (cp_h->is_shared()) {
2619     // a shared ConstantPool requires a regular reference; a weak
2620     // reference would be collectible
2621     cp_ref = JNIHandles::make_global(cp_h);
2622   } else {
2623     cp_ref = JNIHandles::make_weak_global(cp_h);
2624   }
2625   PreviousVersionNode * pv_node = NULL;
2626   objArrayOop old_methods = ikh->methods();
2627 
2628   if (emcp_method_count == 0) {
2629     // non-shared ConstantPool gets a weak reference
2630     pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL);
2631     RC_TRACE(0x00000400,
2632       ("add: all methods are obsolete; flushing any EMCP weak refs"));
2633   } else {
2634     int local_count = 0;
2635     GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP)
2636       GrowableArray<jweak>(emcp_method_count, true);
2637     for (int i = 0; i < old_methods->length(); i++) {
2638       if (emcp_methods->at(i)) {
2639         // this old method is EMCP so save a weak ref
2640         methodOop old_method = (methodOop) old_methods->obj_at(i);
2641         methodHandle old_method_h(old_method);
2642         jweak method_ref = JNIHandles::make_weak_global(old_method_h);
2643         method_refs->append(method_ref);
2644         if (++local_count >= emcp_method_count) {
2645           // no more EMCP methods so bail out now
2646           break;
2647         }
2648       }
2649     }
2650     // non-shared ConstantPool gets a weak reference
2651     pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs);
2652   }
2653 
2654   _previous_versions->append(pv_node);
2655 
2656   // Using weak references allows the interesting parts of previous
2657   // classes to be GC'ed when they are no longer needed. Since the
2658   // caller is the VMThread and we are at a safepoint, this is a good
2659   // time to clear out unused weak references.
2660 
2661   RC_TRACE(0x00000400, ("add: previous version length=%d",
2662     _previous_versions->length()));
2663 
2664   // skip the last entry since we just added it
2665   for (int i = _previous_versions->length() - 2; i >= 0; i--) {
2666     // check the previous versions array for a GC'ed weak refs
2667     pv_node = _previous_versions->at(i);
2668     cp_ref = pv_node->prev_constant_pool();
2669     assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
2670     if (cp_ref == NULL) {
2671       delete pv_node;
2672       _previous_versions->remove_at(i);
2673       // Since we are traversing the array backwards, we don't have to
2674       // do anything special with the index.
2675       continue;  // robustness
2676     }
2677 
2678     constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2679     if (cp == NULL) {
2680       // this entry has been GC'ed so remove it
2681       delete pv_node;
2682       _previous_versions->remove_at(i);
2683       // Since we are traversing the array backwards, we don't have to
2684       // do anything special with the index.
2685       continue;
2686     } else {
2687       RC_TRACE(0x00000400, ("add: previous version @%d is alive", i));
2688     }
2689 
2690     GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2691     if (method_refs != NULL) {
2692       RC_TRACE(0x00000400, ("add: previous methods length=%d",
2693         method_refs->length()));
2694       for (int j = method_refs->length() - 1; j >= 0; j--) {
2695         jweak method_ref = method_refs->at(j);
2696         assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
2697         if (method_ref == NULL) {
2698           method_refs->remove_at(j);
2699           // Since we are traversing the array backwards, we don't have to
2700           // do anything special with the index.
2701           continue;  // robustness
2702         }
2703 
2704         methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2705         if (method == NULL || emcp_method_count == 0) {
2706           // This method entry has been GC'ed or the current
2707           // RedefineClasses() call has made all methods obsolete
2708           // so remove it.
2709           JNIHandles::destroy_weak_global(method_ref);
2710           method_refs->remove_at(j);
2711         } else {
2712           // RC_TRACE macro has an embedded ResourceMark
2713           RC_TRACE(0x00000400,
2714             ("add: %s(%s): previous method @%d in version @%d is alive",
2715             method->name()->as_C_string(), method->signature()->as_C_string(),
2716             j, i));
2717         }
2718       }
2719     }
2720   }
2721 
2722   int obsolete_method_count = old_methods->length() - emcp_method_count;
2723 
2724   if (emcp_method_count != 0 && obsolete_method_count != 0 &&
2725       _previous_versions->length() > 1) {
2726     // We have a mix of obsolete and EMCP methods. If there is more
2727     // than the previous version that we just added, then we have to
2728     // clear out any matching EMCP method entries the hard way.
2729     int local_count = 0;
2730     for (int i = 0; i < old_methods->length(); i++) {
2731       if (!emcp_methods->at(i)) {
2732         // only obsolete methods are interesting
2733         methodOop old_method = (methodOop) old_methods->obj_at(i);
2734         Symbol* m_name = old_method->name();
2735         Symbol* m_signature = old_method->signature();
2736 
2737         // skip the last entry since we just added it
2738         for (int j = _previous_versions->length() - 2; j >= 0; j--) {
2739           // check the previous versions array for a GC'ed weak refs
2740           pv_node = _previous_versions->at(j);
2741           cp_ref = pv_node->prev_constant_pool();
2742           assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
2743           if (cp_ref == NULL) {
2744             delete pv_node;
2745             _previous_versions->remove_at(j);
2746             // Since we are traversing the array backwards, we don't have to
2747             // do anything special with the index.
2748             continue;  // robustness
2749           }
2750 
2751           constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2752           if (cp == NULL) {
2753             // this entry has been GC'ed so remove it
2754             delete pv_node;
2755             _previous_versions->remove_at(j);
2756             // Since we are traversing the array backwards, we don't have to
2757             // do anything special with the index.
2758             continue;
2759           }
2760 
2761           GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2762           if (method_refs == NULL) {
2763             // We have run into a PreviousVersion generation where
2764             // all methods were made obsolete during that generation's
2765             // RedefineClasses() operation. At the time of that
2766             // operation, all EMCP methods were flushed so we don't
2767             // have to go back any further.
2768             //
2769             // A NULL method_refs is different than an empty method_refs.
2770             // We cannot infer any optimizations about older generations
2771             // from an empty method_refs for the current generation.
2772             break;
2773           }
2774 
2775           for (int k = method_refs->length() - 1; k >= 0; k--) {
2776             jweak method_ref = method_refs->at(k);
2777             assert(method_ref != NULL,
2778               "weak method ref was unexpectedly cleared");
2779             if (method_ref == NULL) {
2780               method_refs->remove_at(k);
2781               // Since we are traversing the array backwards, we don't
2782               // have to do anything special with the index.
2783               continue;  // robustness
2784             }
2785 
2786             methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2787             if (method == NULL) {
2788               // this method entry has been GC'ed so skip it
2789               JNIHandles::destroy_weak_global(method_ref);
2790               method_refs->remove_at(k);
2791               continue;
2792             }
2793 
2794             if (method->name() == m_name &&
2795                 method->signature() == m_signature) {
2796               // The current RedefineClasses() call has made all EMCP
2797               // versions of this method obsolete so mark it as obsolete
2798               // and remove the weak ref.
2799               RC_TRACE(0x00000400,
2800                 ("add: %s(%s): flush obsolete method @%d in version @%d",
2801                 m_name->as_C_string(), m_signature->as_C_string(), k, j));
2802 
2803               method->set_is_obsolete();
2804               JNIHandles::destroy_weak_global(method_ref);
2805               method_refs->remove_at(k);
2806               break;
2807             }
2808           }
2809 
2810           // The previous loop may not find a matching EMCP method, but
2811           // that doesn't mean that we can optimize and not go any
2812           // further back in the PreviousVersion generations. The EMCP
2813           // method for this generation could have already been GC'ed,
2814           // but there still may be an older EMCP method that has not
2815           // been GC'ed.
2816         }
2817 
2818         if (++local_count >= obsolete_method_count) {
2819           // no more obsolete methods so bail out now
2820           break;
2821         }
2822       }
2823     }
2824   }
2825 } // end add_previous_version()
2826 
2827 
2828 // Determine if instanceKlass has a previous version.
2829 bool instanceKlass::has_previous_version() const {
2830   if (_previous_versions == NULL) {
2831     // no previous versions array so answer is easy
2832     return false;
2833   }
2834 
2835   for (int i = _previous_versions->length() - 1; i >= 0; i--) {
2836     // Check the previous versions array for an info node that hasn't
2837     // been GC'ed
2838     PreviousVersionNode * pv_node = _previous_versions->at(i);
2839 
2840     jobject cp_ref = pv_node->prev_constant_pool();
2841     assert(cp_ref != NULL, "cp reference was unexpectedly cleared");
2842     if (cp_ref == NULL) {
2843       continue;  // robustness
2844     }
2845 
2846     constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2847     if (cp != NULL) {
2848       // we have at least one previous version
2849       return true;
2850     }
2851 
2852     // We don't have to check the method refs. If the constant pool has
2853     // been GC'ed then so have the methods.
2854   }
2855 
2856   // all of the underlying nodes' info has been GC'ed
2857   return false;
2858 } // end has_previous_version()
2859 
2860 methodOop instanceKlass::method_with_idnum(int idnum) {
2861   methodOop m = NULL;
2862   if (idnum < methods()->length()) {
2863     m = (methodOop) methods()->obj_at(idnum);
2864   }
2865   if (m == NULL || m->method_idnum() != idnum) {
2866     for (int index = 0; index < methods()->length(); ++index) {
2867       m = (methodOop) methods()->obj_at(index);
2868       if (m->method_idnum() == idnum) {
2869         return m;
2870       }
2871     }
2872   }
2873   return m;
2874 }
2875 
2876 
2877 // Set the annotation at 'idnum' to 'anno'.
2878 // We don't want to create or extend the array if 'anno' is NULL, since that is the
2879 // default value.  However, if the array exists and is long enough, we must set NULL values.
2880 void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, objArrayOop* md_p) {
2881   objArrayOop md = *md_p;
2882   if (md != NULL && md->length() > idnum) {
2883     md->obj_at_put(idnum, anno);
2884   } else if (anno != NULL) {
2885     // create the array
2886     int length = MAX2(idnum+1, (int)_idnum_allocated_count);
2887     md = oopFactory::new_system_objArray(length, Thread::current());
2888     if (*md_p != NULL) {
2889       // copy the existing entries
2890       for (int index = 0; index < (*md_p)->length(); index++) {
2891         md->obj_at_put(index, (*md_p)->obj_at(index));
2892       }
2893     }
2894     set_annotations(md, md_p);
2895     md->obj_at_put(idnum, anno);
2896   } // if no array and idnum isn't included there is nothing to do
2897 }
2898 
2899 // Construct a PreviousVersionNode entry for the array hung off
2900 // the instanceKlass.
2901 PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool,
2902   bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) {
2903 
2904   _prev_constant_pool = prev_constant_pool;
2905   _prev_cp_is_weak = prev_cp_is_weak;
2906   _prev_EMCP_methods = prev_EMCP_methods;
2907 }
2908 
2909 
2910 // Destroy a PreviousVersionNode
2911 PreviousVersionNode::~PreviousVersionNode() {
2912   if (_prev_constant_pool != NULL) {
2913     if (_prev_cp_is_weak) {
2914       JNIHandles::destroy_weak_global(_prev_constant_pool);
2915     } else {
2916       JNIHandles::destroy_global(_prev_constant_pool);
2917     }
2918   }
2919 
2920   if (_prev_EMCP_methods != NULL) {
2921     for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) {
2922       jweak method_ref = _prev_EMCP_methods->at(i);
2923       if (method_ref != NULL) {
2924         JNIHandles::destroy_weak_global(method_ref);
2925       }
2926     }
2927     delete _prev_EMCP_methods;
2928   }
2929 }
2930 
2931 
2932 // Construct a PreviousVersionInfo entry
2933 PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
2934   _prev_constant_pool_handle = constantPoolHandle();  // NULL handle
2935   _prev_EMCP_method_handles = NULL;
2936 
2937   jobject cp_ref = pv_node->prev_constant_pool();
2938   assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared");
2939   if (cp_ref == NULL) {
2940     return;  // robustness
2941   }
2942 
2943   constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2944   if (cp == NULL) {
2945     // Weak reference has been GC'ed. Since the constant pool has been
2946     // GC'ed, the methods have also been GC'ed.
2947     return;
2948   }
2949 
2950   // make the constantPoolOop safe to return
2951   _prev_constant_pool_handle = constantPoolHandle(cp);
2952 
2953   GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2954   if (method_refs == NULL) {
2955     // the instanceKlass did not have any EMCP methods
2956     return;
2957   }
2958 
2959   _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
2960 
2961   int n_methods = method_refs->length();
2962   for (int i = 0; i < n_methods; i++) {
2963     jweak method_ref = method_refs->at(i);
2964     assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
2965     if (method_ref == NULL) {
2966       continue;  // robustness
2967     }
2968 
2969     methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2970     if (method == NULL) {
2971       // this entry has been GC'ed so skip it
2972       continue;
2973     }
2974 
2975     // make the methodOop safe to return
2976     _prev_EMCP_method_handles->append(methodHandle(method));
2977   }
2978 }
2979 
2980 
2981 // Destroy a PreviousVersionInfo
2982 PreviousVersionInfo::~PreviousVersionInfo() {
2983   // Since _prev_EMCP_method_handles is not C-heap allocated, we
2984   // don't have to delete it.
2985 }
2986 
2987 
2988 // Construct a helper for walking the previous versions array
2989 PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) {
2990   _previous_versions = ik->previous_versions();
2991   _current_index = 0;
2992   // _hm needs no initialization
2993   _current_p = NULL;
2994 }
2995 
2996 
2997 // Destroy a PreviousVersionWalker
2998 PreviousVersionWalker::~PreviousVersionWalker() {
2999   // Delete the current info just in case the caller didn't walk to
3000   // the end of the previous versions list. No harm if _current_p is
3001   // already NULL.
3002   delete _current_p;
3003 
3004   // When _hm is destroyed, all the Handles returned in
3005   // PreviousVersionInfo objects will be destroyed.
3006   // Also, after this destructor is finished it will be
3007   // safe to delete the GrowableArray allocated in the
3008   // PreviousVersionInfo objects.
3009 }
3010 
3011 
3012 // Return the interesting information for the next previous version
3013 // of the klass. Returns NULL if there are no more previous versions.
3014 PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
3015   if (_previous_versions == NULL) {
3016     // no previous versions so nothing to return
3017     return NULL;
3018   }
3019 
3020   delete _current_p;  // cleanup the previous info for the caller
3021   _current_p = NULL;  // reset to NULL so we don't delete same object twice
3022 
3023   int length = _previous_versions->length();
3024 
3025   while (_current_index < length) {
3026     PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
3027     PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP)
3028                                           PreviousVersionInfo(pv_node);
3029 
3030     constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
3031     if (cp_h.is_null()) {
3032       delete pv_info;
3033 
3034       // The underlying node's info has been GC'ed so try the next one.
3035       // We don't have to check the methods. If the constant pool has
3036       // GC'ed then so have the methods.
3037       continue;
3038     }
3039 
3040     // Found a node with non GC'ed info so return it. The caller will
3041     // need to delete pv_info when they are done with it.
3042     _current_p = pv_info;
3043     return pv_info;
3044   }
3045 
3046   // all of the underlying nodes' info has been GC'ed
3047   return NULL;
3048 } // end next_previous_version()