1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/verifier.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "gc_implementation/shared/markSweep.inline.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "interpreter/oopMapCache.hpp"
  34 #include "interpreter/rewriter.hpp"
  35 #include "jvmtifiles/jvmti.h"
  36 #include "memory/genOopClosures.inline.hpp"
  37 #include "memory/oopFactory.hpp"
  38 #include "memory/permGen.hpp"
  39 #include "oops/instanceKlass.hpp"
  40 #include "oops/instanceOop.hpp"
  41 #include "oops/methodOop.hpp"
  42 #include "oops/objArrayKlassKlass.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "oops/symbolOop.hpp"
  45 #include "prims/jvmtiExport.hpp"
  46 #include "prims/jvmtiRedefineClassesTrace.hpp"
  47 #include "runtime/fieldDescriptor.hpp"
  48 #include "runtime/handles.inline.hpp"
  49 #include "runtime/javaCalls.hpp"
  50 #include "runtime/mutexLocker.hpp"
  51 #include "services/threadService.hpp"
  52 #include "utilities/dtrace.hpp"
  53 #ifdef TARGET_OS_FAMILY_linux
  54 # include "thread_linux.inline.hpp"
  55 #endif
  56 #ifdef TARGET_OS_FAMILY_solaris
  57 # include "thread_solaris.inline.hpp"
  58 #endif
  59 #ifdef TARGET_OS_FAMILY_windows
  60 # include "thread_windows.inline.hpp"
  61 #endif
  62 #ifndef SERIALGC
  63 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  64 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  65 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  66 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  67 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
  68 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
  69 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
  70 #include "oops/oop.pcgc.inline.hpp"
  71 #endif
  72 #ifdef COMPILER1
  73 #include "c1/c1_Compiler.hpp"
  74 #endif
  75 
  76 #ifdef DTRACE_ENABLED
  77 
  78 HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required,
  79   char*, intptr_t, oop, intptr_t);
  80 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive,
  81   char*, intptr_t, oop, intptr_t, int);
  82 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent,
  83   char*, intptr_t, oop, intptr_t, int);
  84 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous,
  85   char*, intptr_t, oop, intptr_t, int);
  86 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed,
  87   char*, intptr_t, oop, intptr_t, int);
  88 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit,
  89   char*, intptr_t, oop, intptr_t, int);
  90 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error,
  91   char*, intptr_t, oop, intptr_t, int);
  92 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
  93   char*, intptr_t, oop, intptr_t, int);
  94 
  95 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)          \
  96   {                                                              \
  97     char* data = NULL;                                           \
  98     int len = 0;                                                 \
  99     symbolOop name = (clss)->name();                             \
 100     if (name != NULL) {                                          \
 101       data = (char*)name->bytes();                               \
 102       len = name->utf8_length();                                 \
 103     }                                                            \
 104     HS_DTRACE_PROBE4(hotspot, class__initialization__##type,     \
 105       data, len, (clss)->class_loader(), thread_type);           \
 106   }
 107 
 108 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
 109   {                                                              \
 110     char* data = NULL;                                           \
 111     int len = 0;                                                 \
 112     symbolOop name = (clss)->name();                             \
 113     if (name != NULL) {                                          \
 114       data = (char*)name->bytes();                               \
 115       len = name->utf8_length();                                 \
 116     }                                                            \
 117     HS_DTRACE_PROBE5(hotspot, class__initialization__##type,     \
 118       data, len, (clss)->class_loader(), thread_type, wait);     \
 119   }
 120 
 121 #else //  ndef DTRACE_ENABLED
 122 
 123 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)
 124 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait)
 125 
 126 #endif //  ndef DTRACE_ENABLED
 127 
 128 bool instanceKlass::should_be_initialized() const {
 129   return !is_initialized();
 130 }
 131 
 132 klassVtable* instanceKlass::vtable() const {
 133   return new klassVtable(as_klassOop(), start_of_vtable(), vtable_length() / vtableEntry::size());
 134 }
 135 
 136 klassItable* instanceKlass::itable() const {
 137   return new klassItable(as_klassOop());
 138 }
 139 
 140 void instanceKlass::eager_initialize(Thread *thread) {
 141   if (!EagerInitialization) return;
 142 
 143   if (this->is_not_initialized()) {
 144     // abort if the the class has a class initializer
 145     if (this->class_initializer() != NULL) return;
 146 
 147     // abort if it is java.lang.Object (initialization is handled in genesis)
 148     klassOop super = this->super();
 149     if (super == NULL) return;
 150 
 151     // abort if the super class should be initialized
 152     if (!instanceKlass::cast(super)->is_initialized()) return;
 153 
 154     // call body to expose the this pointer
 155     instanceKlassHandle this_oop(thread, this->as_klassOop());
 156     eager_initialize_impl(this_oop);
 157   }
 158 }
 159 
 160 
 161 void instanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
 162   EXCEPTION_MARK;
 163   ObjectLocker ol(this_oop, THREAD);
 164 
 165   // abort if someone beat us to the initialization
 166   if (!this_oop->is_not_initialized()) return;  // note: not equivalent to is_initialized()
 167 
 168   ClassState old_state = this_oop->_init_state;
 169   link_class_impl(this_oop, true, THREAD);
 170   if (HAS_PENDING_EXCEPTION) {
 171     CLEAR_PENDING_EXCEPTION;
 172     // Abort if linking the class throws an exception.
 173 
 174     // Use a test to avoid redundantly resetting the state if there's
 175     // no change.  Set_init_state() asserts that state changes make
 176     // progress, whereas here we might just be spinning in place.
 177     if( old_state != this_oop->_init_state )
 178       this_oop->set_init_state (old_state);
 179   } else {
 180     // linking successfull, mark class as initialized
 181     this_oop->set_init_state (fully_initialized);
 182     // trace
 183     if (TraceClassInitialization) {
 184       ResourceMark rm(THREAD);
 185       tty->print_cr("[Initialized %s without side effects]", this_oop->external_name());
 186     }
 187   }
 188 }
 189 
 190 
 191 // See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization
 192 // process. The step comments refers to the procedure described in that section.
 193 // Note: implementation moved to static method to expose the this pointer.
 194 void instanceKlass::initialize(TRAPS) {
 195   if (this->should_be_initialized()) {
 196     HandleMark hm(THREAD);
 197     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
 198     initialize_impl(this_oop, CHECK);
 199     // Note: at this point the class may be initialized
 200     //       OR it may be in the state of being initialized
 201     //       in case of recursive initialization!
 202   } else {
 203     assert(is_initialized(), "sanity check");
 204   }
 205 }
 206 
 207 
 208 bool instanceKlass::verify_code(
 209     instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
 210   // 1) Verify the bytecodes
 211   Verifier::Mode mode =
 212     throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
 213   return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false);
 214 }
 215 
 216 
 217 // Used exclusively by the shared spaces dump mechanism to prevent
 218 // classes mapped into the shared regions in new VMs from appearing linked.
 219 
 220 void instanceKlass::unlink_class() {
 221   assert(is_linked(), "must be linked");
 222   _init_state = loaded;
 223 }
 224 
 225 void instanceKlass::link_class(TRAPS) {
 226   assert(is_loaded(), "must be loaded");
 227   if (!is_linked()) {
 228     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
 229     link_class_impl(this_oop, true, CHECK);
 230   }
 231 }
 232 
 233 // Called to verify that a class can link during initialization, without
 234 // throwing a VerifyError.
 235 bool instanceKlass::link_class_or_fail(TRAPS) {
 236   assert(is_loaded(), "must be loaded");
 237   if (!is_linked()) {
 238     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
 239     link_class_impl(this_oop, false, CHECK_false);
 240   }
 241   return is_linked();
 242 }
 243 
 244 bool instanceKlass::link_class_impl(
 245     instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
 246   // check for error state
 247   if (this_oop->is_in_error_state()) {
 248     ResourceMark rm(THREAD);
 249     THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(),
 250                this_oop->external_name(), false);
 251   }
 252   // return if already verified
 253   if (this_oop->is_linked()) {
 254     return true;
 255   }
 256 
 257   // Timing
 258   // timer handles recursion
 259   assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl");
 260   JavaThread* jt = (JavaThread*)THREAD;
 261 
 262   // link super class before linking this class
 263   instanceKlassHandle super(THREAD, this_oop->super());
 264   if (super.not_null()) {
 265     if (super->is_interface()) {  // check if super class is an interface
 266       ResourceMark rm(THREAD);
 267       Exceptions::fthrow(
 268         THREAD_AND_LOCATION,
 269         vmSymbolHandles::java_lang_IncompatibleClassChangeError(),
 270         "class %s has interface %s as super class",
 271         this_oop->external_name(),
 272         super->external_name()
 273       );
 274       return false;
 275     }
 276 
 277     link_class_impl(super, throw_verifyerror, CHECK_false);
 278   }
 279 
 280   // link all interfaces implemented by this class before linking this class
 281   objArrayHandle interfaces (THREAD, this_oop->local_interfaces());
 282   int num_interfaces = interfaces->length();
 283   for (int index = 0; index < num_interfaces; index++) {
 284     HandleMark hm(THREAD);
 285     instanceKlassHandle ih(THREAD, klassOop(interfaces->obj_at(index)));
 286     link_class_impl(ih, throw_verifyerror, CHECK_false);
 287   }
 288 
 289   // in case the class is linked in the process of linking its superclasses
 290   if (this_oop->is_linked()) {
 291     return true;
 292   }
 293 
 294   // trace only the link time for this klass that includes
 295   // the verification time
 296   PerfClassTraceTime vmtimer(ClassLoader::perf_class_link_time(),
 297                              ClassLoader::perf_class_link_selftime(),
 298                              ClassLoader::perf_classes_linked(),
 299                              jt->get_thread_stat()->perf_recursion_counts_addr(),
 300                              jt->get_thread_stat()->perf_timers_addr(),
 301                              PerfClassTraceTime::CLASS_LINK);
 302 
 303   // verification & rewriting
 304   {
 305     ObjectLocker ol(this_oop, THREAD);
 306     // rewritten will have been set if loader constraint error found
 307     // on an earlier link attempt
 308     // don't verify or rewrite if already rewritten
 309     if (!this_oop->is_linked()) {
 310       if (!this_oop->is_rewritten()) {
 311         {
 312           // Timer includes any side effects of class verification (resolution,
 313           // etc), but not recursive entry into verify_code().
 314           PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(),
 315                                    ClassLoader::perf_class_verify_selftime(),
 316                                    ClassLoader::perf_classes_verified(),
 317                                    jt->get_thread_stat()->perf_recursion_counts_addr(),
 318                                    jt->get_thread_stat()->perf_timers_addr(),
 319                                    PerfClassTraceTime::CLASS_VERIFY);
 320           bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD);
 321           if (!verify_ok) {
 322             return false;
 323           }
 324         }
 325 
 326         // Just in case a side-effect of verify linked this class already
 327         // (which can sometimes happen since the verifier loads classes
 328         // using custom class loaders, which are free to initialize things)
 329         if (this_oop->is_linked()) {
 330           return true;
 331         }
 332 
 333         // also sets rewritten
 334         this_oop->rewrite_class(CHECK_false);
 335       }
 336 
 337       // Initialize the vtable and interface table after
 338       // methods have been rewritten since rewrite may
 339       // fabricate new methodOops.
 340       // also does loader constraint checking
 341       if (!this_oop()->is_shared()) {
 342         ResourceMark rm(THREAD);
 343         this_oop->vtable()->initialize_vtable(true, CHECK_false);
 344         this_oop->itable()->initialize_itable(true, CHECK_false);
 345       }
 346 #ifdef ASSERT
 347       else {
 348         ResourceMark rm(THREAD);
 349         this_oop->vtable()->verify(tty, true);
 350         // In case itable verification is ever added.
 351         // this_oop->itable()->verify(tty, true);
 352       }
 353 #endif
 354       this_oop->set_init_state(linked);
 355       if (JvmtiExport::should_post_class_prepare()) {
 356         Thread *thread = THREAD;
 357         assert(thread->is_Java_thread(), "thread->is_Java_thread()");
 358         JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop());
 359       }
 360     }
 361   }
 362   return true;
 363 }
 364 
 365 
 366 // Rewrite the byte codes of all of the methods of a class.
 367 // Three cases:
 368 //    During the link of a newly loaded class.
 369 //    During the preloading of classes to be written to the shared spaces.
 370 //      - Rewrite the methods and update the method entry points.
 371 //
 372 //    During the link of a class in the shared spaces.
 373 //      - The methods were already rewritten, update the metho entry points.
 374 //
 375 // The rewriter must be called exactly once. Rewriting must happen after
 376 // verification but before the first method of the class is executed.
 377 
 378 void instanceKlass::rewrite_class(TRAPS) {
 379   assert(is_loaded(), "must be loaded");
 380   instanceKlassHandle this_oop(THREAD, this->as_klassOop());
 381   if (this_oop->is_rewritten()) {
 382     assert(this_oop()->is_shared(), "rewriting an unshared class?");
 383     return;
 384   }
 385   Rewriter::rewrite(this_oop, CHECK); // No exception can happen here
 386   this_oop->set_rewritten();
 387 }
 388 
 389 
 390 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
 391   // Make sure klass is linked (verified) before initialization
 392   // A class could already be verified, since it has been reflected upon.
 393   this_oop->link_class(CHECK);
 394 
 395   DTRACE_CLASSINIT_PROBE(required, instanceKlass::cast(this_oop()), -1);
 396 
 397   bool wait = false;
 398 
 399   // refer to the JVM book page 47 for description of steps
 400   // Step 1
 401   { ObjectLocker ol(this_oop, THREAD);
 402 
 403     Thread *self = THREAD; // it's passed the current thread
 404 
 405     // Step 2
 406     // If we were to use wait() instead of waitInterruptibly() then
 407     // we might end up throwing IE from link/symbol resolution sites
 408     // that aren't expected to throw.  This would wreak havoc.  See 6320309.
 409     while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) {
 410         wait = true;
 411       ol.waitUninterruptibly(CHECK);
 412     }
 413 
 414     // Step 3
 415     if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self)) {
 416       DTRACE_CLASSINIT_PROBE_WAIT(recursive, instanceKlass::cast(this_oop()), -1,wait);
 417       return;
 418     }
 419 
 420     // Step 4
 421     if (this_oop->is_initialized()) {
 422       DTRACE_CLASSINIT_PROBE_WAIT(concurrent, instanceKlass::cast(this_oop()), -1,wait);
 423       return;
 424     }
 425 
 426     // Step 5
 427     if (this_oop->is_in_error_state()) {
 428       DTRACE_CLASSINIT_PROBE_WAIT(erroneous, instanceKlass::cast(this_oop()), -1,wait);
 429       ResourceMark rm(THREAD);
 430       const char* desc = "Could not initialize class ";
 431       const char* className = this_oop->external_name();
 432       size_t msglen = strlen(desc) + strlen(className) + 1;
 433       char* message = NEW_RESOURCE_ARRAY(char, msglen);
 434       if (NULL == message) {
 435         // Out of memory: can't create detailed error message
 436         THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);
 437       } else {
 438         jio_snprintf(message, msglen, "%s%s", desc, className);
 439         THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message);
 440       }
 441     }
 442 
 443     // Step 6
 444     this_oop->set_init_state(being_initialized);
 445     this_oop->set_init_thread(self);
 446   }
 447 
 448   // Step 7
 449   klassOop super_klass = this_oop->super();
 450   if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) {
 451     Klass::cast(super_klass)->initialize(THREAD);
 452 
 453     if (HAS_PENDING_EXCEPTION) {
 454       Handle e(THREAD, PENDING_EXCEPTION);
 455       CLEAR_PENDING_EXCEPTION;
 456       {
 457         EXCEPTION_MARK;
 458         this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
 459         CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, superclass initialization error is thrown below
 460       }
 461       DTRACE_CLASSINIT_PROBE_WAIT(super__failed, instanceKlass::cast(this_oop()), -1,wait);
 462       THROW_OOP(e());
 463     }
 464   }
 465 
 466   // Step 8
 467   {
 468     assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl");
 469     JavaThread* jt = (JavaThread*)THREAD;
 470     DTRACE_CLASSINIT_PROBE_WAIT(clinit, instanceKlass::cast(this_oop()), -1,wait);
 471     // Timer includes any side effects of class initialization (resolution,
 472     // etc), but not recursive entry into call_class_initializer().
 473     PerfClassTraceTime timer(ClassLoader::perf_class_init_time(),
 474                              ClassLoader::perf_class_init_selftime(),
 475                              ClassLoader::perf_classes_inited(),
 476                              jt->get_thread_stat()->perf_recursion_counts_addr(),
 477                              jt->get_thread_stat()->perf_timers_addr(),
 478                              PerfClassTraceTime::CLASS_CLINIT);
 479     this_oop->call_class_initializer(THREAD);
 480   }
 481 
 482   // Step 9
 483   if (!HAS_PENDING_EXCEPTION) {
 484     this_oop->set_initialization_state_and_notify(fully_initialized, CHECK);
 485     { ResourceMark rm(THREAD);
 486       debug_only(this_oop->vtable()->verify(tty, true);)
 487     }
 488   }
 489   else {
 490     // Step 10 and 11
 491     Handle e(THREAD, PENDING_EXCEPTION);
 492     CLEAR_PENDING_EXCEPTION;
 493     {
 494       EXCEPTION_MARK;
 495       this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
 496       CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, class initialization error is thrown below
 497     }
 498     DTRACE_CLASSINIT_PROBE_WAIT(error, instanceKlass::cast(this_oop()), -1,wait);
 499     if (e->is_a(SystemDictionary::Error_klass())) {
 500       THROW_OOP(e());
 501     } else {
 502       JavaCallArguments args(e);
 503       THROW_ARG(vmSymbolHandles::java_lang_ExceptionInInitializerError(),
 504                 vmSymbolHandles::throwable_void_signature(),
 505                 &args);
 506     }
 507   }
 508   DTRACE_CLASSINIT_PROBE_WAIT(end, instanceKlass::cast(this_oop()), -1,wait);
 509 }
 510 
 511 
 512 // Note: implementation moved to static method to expose the this pointer.
 513 void instanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) {
 514   instanceKlassHandle kh(THREAD, this->as_klassOop());
 515   set_initialization_state_and_notify_impl(kh, state, CHECK);
 516 }
 517 
 518 void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
 519   ObjectLocker ol(this_oop, THREAD);
 520   this_oop->set_init_state(state);
 521   ol.notify_all(CHECK);
 522 }
 523 
 524 void instanceKlass::add_implementor(klassOop k) {
 525   assert(Compile_lock->owned_by_self(), "");
 526   // Filter out my subinterfaces.
 527   // (Note: Interfaces are never on the subklass list.)
 528   if (instanceKlass::cast(k)->is_interface()) return;
 529 
 530   // Filter out subclasses whose supers already implement me.
 531   // (Note: CHA must walk subclasses of direct implementors
 532   // in order to locate indirect implementors.)
 533   klassOop sk = instanceKlass::cast(k)->super();
 534   if (sk != NULL && instanceKlass::cast(sk)->implements_interface(as_klassOop()))
 535     // We only need to check one immediate superclass, since the
 536     // implements_interface query looks at transitive_interfaces.
 537     // Any supers of the super have the same (or fewer) transitive_interfaces.
 538     return;
 539 
 540   // Update number of implementors
 541   int i = _nof_implementors++;
 542 
 543   // Record this implementor, if there are not too many already
 544   if (i < implementors_limit) {
 545     assert(_implementors[i] == NULL, "should be exactly one implementor");
 546     oop_store_without_check((oop*)&_implementors[i], k);
 547   } else if (i == implementors_limit) {
 548     // clear out the list on first overflow
 549     for (int i2 = 0; i2 < implementors_limit; i2++)
 550       oop_store_without_check((oop*)&_implementors[i2], NULL);
 551   }
 552 
 553   // The implementor also implements the transitive_interfaces
 554   for (int index = 0; index < local_interfaces()->length(); index++) {
 555     instanceKlass::cast(klassOop(local_interfaces()->obj_at(index)))->add_implementor(k);
 556   }
 557 }
 558 
 559 void instanceKlass::init_implementor() {
 560   for (int i = 0; i < implementors_limit; i++)
 561     oop_store_without_check((oop*)&_implementors[i], NULL);
 562   _nof_implementors = 0;
 563 }
 564 
 565 
 566 void instanceKlass::process_interfaces(Thread *thread) {
 567   // link this class into the implementors list of every interface it implements
 568   KlassHandle this_as_oop (thread, this->as_klassOop());
 569   for (int i = local_interfaces()->length() - 1; i >= 0; i--) {
 570     assert(local_interfaces()->obj_at(i)->is_klass(), "must be a klass");
 571     instanceKlass* interf = instanceKlass::cast(klassOop(local_interfaces()->obj_at(i)));
 572     assert(interf->is_interface(), "expected interface");
 573     interf->add_implementor(this_as_oop());
 574   }
 575 }
 576 
 577 bool instanceKlass::can_be_primary_super_slow() const {
 578   if (is_interface())
 579     return false;
 580   else
 581     return Klass::can_be_primary_super_slow();
 582 }
 583 
 584 objArrayOop instanceKlass::compute_secondary_supers(int num_extra_slots, TRAPS) {
 585   // The secondaries are the implemented interfaces.
 586   instanceKlass* ik = instanceKlass::cast(as_klassOop());
 587   objArrayHandle interfaces (THREAD, ik->transitive_interfaces());
 588   int num_secondaries = num_extra_slots + interfaces->length();
 589   if (num_secondaries == 0) {
 590     return Universe::the_empty_system_obj_array();
 591   } else if (num_extra_slots == 0) {
 592     return interfaces();
 593   } else {
 594     // a mix of both
 595     objArrayOop secondaries = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL);
 596     for (int i = 0; i < interfaces->length(); i++) {
 597       secondaries->obj_at_put(num_extra_slots+i, interfaces->obj_at(i));
 598     }
 599     return secondaries;
 600   }
 601 }
 602 
 603 bool instanceKlass::compute_is_subtype_of(klassOop k) {
 604   if (Klass::cast(k)->is_interface()) {
 605     return implements_interface(k);
 606   } else {
 607     return Klass::compute_is_subtype_of(k);
 608   }
 609 }
 610 
 611 bool instanceKlass::implements_interface(klassOop k) const {
 612   if (as_klassOop() == k) return true;
 613   assert(Klass::cast(k)->is_interface(), "should be an interface class");
 614   for (int i = 0; i < transitive_interfaces()->length(); i++) {
 615     if (transitive_interfaces()->obj_at(i) == k) {
 616       return true;
 617     }
 618   }
 619   return false;
 620 }
 621 
 622 objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) {
 623   if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
 624   if (length > arrayOopDesc::max_array_length(T_OBJECT)) {
 625     report_java_out_of_memory("Requested array size exceeds VM limit");
 626     THROW_OOP_0(Universe::out_of_memory_error_array_size());
 627   }
 628   int size = objArrayOopDesc::object_size(length);
 629   klassOop ak = array_klass(n, CHECK_NULL);
 630   KlassHandle h_ak (THREAD, ak);
 631   objArrayOop o =
 632     (objArrayOop)CollectedHeap::array_allocate(h_ak, size, length, CHECK_NULL);
 633   return o;
 634 }
 635 
 636 instanceOop instanceKlass::register_finalizer(instanceOop i, TRAPS) {
 637   if (TraceFinalizerRegistration) {
 638     tty->print("Registered ");
 639     i->print_value_on(tty);
 640     tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", (address)i);
 641   }
 642   instanceHandle h_i(THREAD, i);
 643   // Pass the handle as argument, JavaCalls::call expects oop as jobjects
 644   JavaValue result(T_VOID);
 645   JavaCallArguments args(h_i);
 646   methodHandle mh (THREAD, Universe::finalizer_register_method());
 647   JavaCalls::call(&result, mh, &args, CHECK_NULL);
 648   return h_i();
 649 }
 650 
 651 instanceOop instanceKlass::allocate_instance(TRAPS) {
 652   bool has_finalizer_flag = has_finalizer(); // Query before possible GC
 653   int size = size_helper();  // Query before forming handle.
 654 
 655   KlassHandle h_k(THREAD, as_klassOop());
 656 
 657   instanceOop i;
 658 
 659   i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
 660   if (has_finalizer_flag && !RegisterFinalizersAtInit) {
 661     i = register_finalizer(i, CHECK_NULL);
 662   }
 663   return i;
 664 }
 665 
 666 instanceOop instanceKlass::allocate_permanent_instance(TRAPS) {
 667   // Finalizer registration occurs in the Object.<init> constructor
 668   // and constructors normally aren't run when allocating perm
 669   // instances so simply disallow finalizable perm objects.  This can
 670   // be relaxed if a need for it is found.
 671   assert(!has_finalizer(), "perm objects not allowed to have finalizers");
 672   int size = size_helper();  // Query before forming handle.
 673   KlassHandle h_k(THREAD, as_klassOop());
 674   instanceOop i = (instanceOop)
 675     CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL);
 676   return i;
 677 }
 678 
 679 void instanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) {
 680   if (is_interface() || is_abstract()) {
 681     ResourceMark rm(THREAD);
 682     THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError()
 683               : vmSymbols::java_lang_InstantiationException(), external_name());
 684   }
 685   if (as_klassOop() == SystemDictionary::Class_klass()) {
 686     ResourceMark rm(THREAD);
 687     THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError()
 688               : vmSymbols::java_lang_IllegalAccessException(), external_name());
 689   }
 690 }
 691 
 692 klassOop instanceKlass::array_klass_impl(bool or_null, int n, TRAPS) {
 693   instanceKlassHandle this_oop(THREAD, as_klassOop());
 694   return array_klass_impl(this_oop, or_null, n, THREAD);
 695 }
 696 
 697 klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) {
 698   if (this_oop->array_klasses() == NULL) {
 699     if (or_null) return NULL;
 700 
 701     ResourceMark rm;
 702     JavaThread *jt = (JavaThread *)THREAD;
 703     {
 704       // Atomic creation of array_klasses
 705       MutexLocker mc(Compile_lock, THREAD);   // for vtables
 706       MutexLocker ma(MultiArray_lock, THREAD);
 707 
 708       // Check if update has already taken place
 709       if (this_oop->array_klasses() == NULL) {
 710         objArrayKlassKlass* oakk =
 711           (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part();
 712 
 713         klassOop  k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL);
 714         this_oop->set_array_klasses(k);
 715       }
 716     }
 717   }
 718   // _this will always be set at this point
 719   objArrayKlass* oak = (objArrayKlass*)this_oop->array_klasses()->klass_part();
 720   if (or_null) {
 721     return oak->array_klass_or_null(n);
 722   }
 723   return oak->array_klass(n, CHECK_NULL);
 724 }
 725 
 726 klassOop instanceKlass::array_klass_impl(bool or_null, TRAPS) {
 727   return array_klass_impl(or_null, 1, THREAD);
 728 }
 729 
 730 void instanceKlass::call_class_initializer(TRAPS) {
 731   instanceKlassHandle ik (THREAD, as_klassOop());
 732   call_class_initializer_impl(ik, THREAD);
 733 }
 734 
 735 static int call_class_initializer_impl_counter = 0;   // for debugging
 736 
 737 methodOop instanceKlass::class_initializer() {
 738   return find_method(vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
 739 }
 740 
 741 void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) {
 742   methodHandle h_method(THREAD, this_oop->class_initializer());
 743   assert(!this_oop->is_initialized(), "we cannot initialize twice");
 744   if (TraceClassInitialization) {
 745     tty->print("%d Initializing ", call_class_initializer_impl_counter++);
 746     this_oop->name()->print_value();
 747     tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop());
 748   }
 749   if (h_method() != NULL) {
 750     JavaCallArguments args; // No arguments
 751     JavaValue result(T_VOID);
 752     JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
 753   }
 754 }
 755 
 756 
 757 void instanceKlass::mask_for(methodHandle method, int bci,
 758   InterpreterOopMap* entry_for) {
 759   // Dirty read, then double-check under a lock.
 760   if (_oop_map_cache == NULL) {
 761     // Otherwise, allocate a new one.
 762     MutexLocker x(OopMapCacheAlloc_lock);
 763     // First time use. Allocate a cache in C heap
 764     if (_oop_map_cache == NULL) {
 765       _oop_map_cache = new OopMapCache();
 766     }
 767   }
 768   // _oop_map_cache is constant after init; lookup below does is own locking.
 769   _oop_map_cache->lookup(method, bci, entry_for);
 770 }
 771 
 772 
 773 bool instanceKlass::find_local_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const {
 774   const int n = fields()->length();
 775   for (int i = 0; i < n; i += next_offset ) {
 776     int name_index = fields()->ushort_at(i + name_index_offset);
 777     int sig_index  = fields()->ushort_at(i + signature_index_offset);
 778     symbolOop f_name = constants()->symbol_at(name_index);
 779     symbolOop f_sig  = constants()->symbol_at(sig_index);
 780     if (f_name == name && f_sig == sig) {
 781       fd->initialize(as_klassOop(), i);
 782       return true;
 783     }
 784   }
 785   return false;
 786 }
 787 
 788 
 789 void instanceKlass::field_names_and_sigs_iterate(OopClosure* closure) {
 790   const int n = fields()->length();
 791   for (int i = 0; i < n; i += next_offset ) {
 792     int name_index = fields()->ushort_at(i + name_index_offset);
 793     symbolOop name = constants()->symbol_at(name_index);
 794     closure->do_oop((oop*)&name);
 795 
 796     int sig_index  = fields()->ushort_at(i + signature_index_offset);
 797     symbolOop sig = constants()->symbol_at(sig_index);
 798     closure->do_oop((oop*)&sig);
 799   }
 800 }
 801 
 802 
 803 klassOop instanceKlass::find_interface_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const {
 804   const int n = local_interfaces()->length();
 805   for (int i = 0; i < n; i++) {
 806     klassOop intf1 = klassOop(local_interfaces()->obj_at(i));
 807     assert(Klass::cast(intf1)->is_interface(), "just checking type");
 808     // search for field in current interface
 809     if (instanceKlass::cast(intf1)->find_local_field(name, sig, fd)) {
 810       assert(fd->is_static(), "interface field must be static");
 811       return intf1;
 812     }
 813     // search for field in direct superinterfaces
 814     klassOop intf2 = instanceKlass::cast(intf1)->find_interface_field(name, sig, fd);
 815     if (intf2 != NULL) return intf2;
 816   }
 817   // otherwise field lookup fails
 818   return NULL;
 819 }
 820 
 821 
 822 klassOop instanceKlass::find_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const {
 823   // search order according to newest JVM spec (5.4.3.2, p.167).
 824   // 1) search for field in current klass
 825   if (find_local_field(name, sig, fd)) {
 826     return as_klassOop();
 827   }
 828   // 2) search for field recursively in direct superinterfaces
 829   { klassOop intf = find_interface_field(name, sig, fd);
 830     if (intf != NULL) return intf;
 831   }
 832   // 3) apply field lookup recursively if superclass exists
 833   { klassOop supr = super();
 834     if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, fd);
 835   }
 836   // 4) otherwise field lookup fails
 837   return NULL;
 838 }
 839 
 840 
 841 klassOop instanceKlass::find_field(symbolOop name, symbolOop sig, bool is_static, fieldDescriptor* fd) const {
 842   // search order according to newest JVM spec (5.4.3.2, p.167).
 843   // 1) search for field in current klass
 844   if (find_local_field(name, sig, fd)) {
 845     if (fd->is_static() == is_static) return as_klassOop();
 846   }
 847   // 2) search for field recursively in direct superinterfaces
 848   if (is_static) {
 849     klassOop intf = find_interface_field(name, sig, fd);
 850     if (intf != NULL) return intf;
 851   }
 852   // 3) apply field lookup recursively if superclass exists
 853   { klassOop supr = super();
 854     if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, is_static, fd);
 855   }
 856   // 4) otherwise field lookup fails
 857   return NULL;
 858 }
 859 
 860 
 861 bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
 862   int length = fields()->length();
 863   for (int i = 0; i < length; i += next_offset) {
 864     if (offset_from_fields( i ) == offset) {
 865       fd->initialize(as_klassOop(), i);
 866       if (fd->is_static() == is_static) return true;
 867     }
 868   }
 869   return false;
 870 }
 871 
 872 
 873 bool instanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
 874   klassOop klass = as_klassOop();
 875   while (klass != NULL) {
 876     if (instanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) {
 877       return true;
 878     }
 879     klass = Klass::cast(klass)->super();
 880   }
 881   return false;
 882 }
 883 
 884 
 885 void instanceKlass::methods_do(void f(methodOop method)) {
 886   int len = methods()->length();
 887   for (int index = 0; index < len; index++) {
 888     methodOop m = methodOop(methods()->obj_at(index));
 889     assert(m->is_method(), "must be method");
 890     f(m);
 891   }
 892 }
 893 
 894 void instanceKlass::do_local_static_fields(FieldClosure* cl) {
 895   fieldDescriptor fd;
 896   int length = fields()->length();
 897   for (int i = 0; i < length; i += next_offset) {
 898     fd.initialize(as_klassOop(), i);
 899     if (fd.is_static()) cl->do_field(&fd);
 900   }
 901 }
 902 
 903 
 904 void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
 905   instanceKlassHandle h_this(THREAD, as_klassOop());
 906   do_local_static_fields_impl(h_this, f, CHECK);
 907 }
 908 
 909 
 910 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
 911   fieldDescriptor fd;
 912   int length = this_oop->fields()->length();
 913   for (int i = 0; i < length; i += next_offset) {
 914     fd.initialize(this_oop(), i);
 915     if (fd.is_static()) { f(&fd, CHECK); } // Do NOT remove {}! (CHECK macro expands into several statements)
 916   }
 917 }
 918 
 919 
 920 static int compare_fields_by_offset(int* a, int* b) {
 921   return a[0] - b[0];
 922 }
 923 
 924 void instanceKlass::do_nonstatic_fields(FieldClosure* cl) {
 925   instanceKlass* super = superklass();
 926   if (super != NULL) {
 927     super->do_nonstatic_fields(cl);
 928   }
 929   fieldDescriptor fd;
 930   int length = fields()->length();
 931   // In DebugInfo nonstatic fields are sorted by offset.
 932   int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1));
 933   int j = 0;
 934   for (int i = 0; i < length; i += next_offset) {
 935     fd.initialize(as_klassOop(), i);
 936     if (!fd.is_static()) {
 937       fields_sorted[j + 0] = fd.offset();
 938       fields_sorted[j + 1] = i;
 939       j += 2;
 940     }
 941   }
 942   if (j > 0) {
 943     length = j;
 944     // _sort_Fn is defined in growableArray.hpp.
 945     qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
 946     for (int i = 0; i < length; i += 2) {
 947       fd.initialize(as_klassOop(), fields_sorted[i + 1]);
 948       assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
 949       cl->do_field(&fd);
 950     }
 951   }
 952   FREE_C_HEAP_ARRAY(int, fields_sorted);
 953 }
 954 
 955 
 956 void instanceKlass::array_klasses_do(void f(klassOop k)) {
 957   if (array_klasses() != NULL)
 958     arrayKlass::cast(array_klasses())->array_klasses_do(f);
 959 }
 960 
 961 
 962 void instanceKlass::with_array_klasses_do(void f(klassOop k)) {
 963   f(as_klassOop());
 964   array_klasses_do(f);
 965 }
 966 
 967 #ifdef ASSERT
 968 static int linear_search(objArrayOop methods, symbolOop name, symbolOop signature) {
 969   int len = methods->length();
 970   for (int index = 0; index < len; index++) {
 971     methodOop m = (methodOop)(methods->obj_at(index));
 972     assert(m->is_method(), "must be method");
 973     if (m->signature() == signature && m->name() == name) {
 974        return index;
 975     }
 976   }
 977   return -1;
 978 }
 979 #endif
 980 
 981 methodOop instanceKlass::find_method(symbolOop name, symbolOop signature) const {
 982   return instanceKlass::find_method(methods(), name, signature);
 983 }
 984 
 985 methodOop instanceKlass::find_method(objArrayOop methods, symbolOop name, symbolOop signature) {
 986   int len = methods->length();
 987   // methods are sorted, so do binary search
 988   int l = 0;
 989   int h = len - 1;
 990   while (l <= h) {
 991     int mid = (l + h) >> 1;
 992     methodOop m = (methodOop)methods->obj_at(mid);
 993     assert(m->is_method(), "must be method");
 994     int res = m->name()->fast_compare(name);
 995     if (res == 0) {
 996       // found matching name; do linear search to find matching signature
 997       // first, quick check for common case
 998       if (m->signature() == signature) return m;
 999       // search downwards through overloaded methods
1000       int i;
1001       for (i = mid - 1; i >= l; i--) {
1002         methodOop m = (methodOop)methods->obj_at(i);
1003         assert(m->is_method(), "must be method");
1004         if (m->name() != name) break;
1005         if (m->signature() == signature) return m;
1006       }
1007       // search upwards
1008       for (i = mid + 1; i <= h; i++) {
1009         methodOop m = (methodOop)methods->obj_at(i);
1010         assert(m->is_method(), "must be method");
1011         if (m->name() != name) break;
1012         if (m->signature() == signature) return m;
1013       }
1014       // not found
1015 #ifdef ASSERT
1016       int index = linear_search(methods, name, signature);
1017       assert(index == -1, err_msg("binary search should have found entry %d", index));
1018 #endif
1019       return NULL;
1020     } else if (res < 0) {
1021       l = mid + 1;
1022     } else {
1023       h = mid - 1;
1024     }
1025   }
1026 #ifdef ASSERT
1027   int index = linear_search(methods, name, signature);
1028   assert(index == -1, err_msg("binary search should have found entry %d", index));
1029 #endif
1030   return NULL;
1031 }
1032 
1033 methodOop instanceKlass::uncached_lookup_method(symbolOop name, symbolOop signature) const {
1034   klassOop klass = as_klassOop();
1035   while (klass != NULL) {
1036     methodOop method = instanceKlass::cast(klass)->find_method(name, signature);
1037     if (method != NULL) return method;
1038     klass = instanceKlass::cast(klass)->super();
1039   }
1040   return NULL;
1041 }
1042 
1043 // lookup a method in all the interfaces that this class implements
1044 methodOop instanceKlass::lookup_method_in_all_interfaces(symbolOop name,
1045                                                          symbolOop signature) const {
1046   objArrayOop all_ifs = instanceKlass::cast(as_klassOop())->transitive_interfaces();
1047   int num_ifs = all_ifs->length();
1048   instanceKlass *ik = NULL;
1049   for (int i = 0; i < num_ifs; i++) {
1050     ik = instanceKlass::cast(klassOop(all_ifs->obj_at(i)));
1051     methodOop m = ik->lookup_method(name, signature);
1052     if (m != NULL) {
1053       return m;
1054     }
1055   }
1056   return NULL;
1057 }
1058 
1059 /* jni_id_for_impl for jfieldIds only */
1060 JNIid* instanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) {
1061   MutexLocker ml(JfieldIdCreation_lock);
1062   // Retry lookup after we got the lock
1063   JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset);
1064   if (probe == NULL) {
1065     // Slow case, allocate new static field identifier
1066     probe = new JNIid(this_oop->as_klassOop(), offset, this_oop->jni_ids());
1067     this_oop->set_jni_ids(probe);
1068   }
1069   return probe;
1070 }
1071 
1072 
1073 /* jni_id_for for jfieldIds only */
1074 JNIid* instanceKlass::jni_id_for(int offset) {
1075   JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset);
1076   if (probe == NULL) {
1077     probe = jni_id_for_impl(this->as_klassOop(), offset);
1078   }
1079   return probe;
1080 }
1081 
1082 
1083 // Lookup or create a jmethodID.
1084 // This code is called by the VMThread and JavaThreads so the
1085 // locking has to be done very carefully to avoid deadlocks
1086 // and/or other cache consistency problems.
1087 //
1088 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) {
1089   size_t idnum = (size_t)method_h->method_idnum();
1090   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1091   size_t length = 0;
1092   jmethodID id = NULL;
1093 
1094   // We use a double-check locking idiom here because this cache is
1095   // performance sensitive. In the normal system, this cache only
1096   // transitions from NULL to non-NULL which is safe because we use
1097   // release_set_methods_jmethod_ids() to advertise the new cache.
1098   // A partially constructed cache should never be seen by a racing
1099   // thread. We also use release_store_ptr() to save a new jmethodID
1100   // in the cache so a partially constructed jmethodID should never be
1101   // seen either. Cache reads of existing jmethodIDs proceed without a
1102   // lock, but cache writes of a new jmethodID requires uniqueness and
1103   // creation of the cache itself requires no leaks so a lock is
1104   // generally acquired in those two cases.
1105   //
1106   // If the RedefineClasses() API has been used, then this cache can
1107   // grow and we'll have transitions from non-NULL to bigger non-NULL.
1108   // Cache creation requires no leaks and we require safety between all
1109   // cache accesses and freeing of the old cache so a lock is generally
1110   // acquired when the RedefineClasses() API has been used.
1111 
1112   if (jmeths != NULL) {
1113     // the cache already exists
1114     if (!ik_h->idnum_can_increment()) {
1115       // the cache can't grow so we can just get the current values
1116       get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1117     } else {
1118       // cache can grow so we have to be more careful
1119       if (Threads::number_of_threads() == 0 ||
1120           SafepointSynchronize::is_at_safepoint()) {
1121         // we're single threaded or at a safepoint - no locking needed
1122         get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1123       } else {
1124         MutexLocker ml(JmethodIdCreation_lock);
1125         get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1126       }
1127     }
1128   }
1129   // implied else:
1130   // we need to allocate a cache so default length and id values are good
1131 
1132   if (jmeths == NULL ||   // no cache yet
1133       length <= idnum ||  // cache is too short
1134       id == NULL) {       // cache doesn't contain entry
1135 
1136     // This function can be called by the VMThread so we have to do all
1137     // things that might block on a safepoint before grabbing the lock.
1138     // Otherwise, we can deadlock with the VMThread or have a cache
1139     // consistency issue. These vars keep track of what we might have
1140     // to free after the lock is dropped.
1141     jmethodID  to_dealloc_id     = NULL;
1142     jmethodID* to_dealloc_jmeths = NULL;
1143 
1144     // may not allocate new_jmeths or use it if we allocate it
1145     jmethodID* new_jmeths = NULL;
1146     if (length <= idnum) {
1147       // allocate a new cache that might be used
1148       size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
1149       new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1);
1150       memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
1151       // cache size is stored in element[0], other elements offset by one
1152       new_jmeths[0] = (jmethodID)size;
1153     }
1154 
1155     // allocate a new jmethodID that might be used
1156     jmethodID new_id = NULL;
1157     if (method_h->is_old() && !method_h->is_obsolete()) {
1158       // The method passed in is old (but not obsolete), we need to use the current version
1159       methodOop current_method = ik_h->method_with_idnum((int)idnum);
1160       assert(current_method != NULL, "old and but not obsolete, so should exist");
1161       methodHandle current_method_h(current_method == NULL? method_h() : current_method);
1162       new_id = JNIHandles::make_jmethod_id(current_method_h);
1163     } else {
1164       // It is the current version of the method or an obsolete method,
1165       // use the version passed in
1166       new_id = JNIHandles::make_jmethod_id(method_h);
1167     }
1168 
1169     if (Threads::number_of_threads() == 0 ||
1170         SafepointSynchronize::is_at_safepoint()) {
1171       // we're single threaded or at a safepoint - no locking needed
1172       id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1173                                           &to_dealloc_id, &to_dealloc_jmeths);
1174     } else {
1175       MutexLocker ml(JmethodIdCreation_lock);
1176       id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1177                                           &to_dealloc_id, &to_dealloc_jmeths);
1178     }
1179 
1180     // The lock has been dropped so we can free resources.
1181     // Free up either the old cache or the new cache if we allocated one.
1182     if (to_dealloc_jmeths != NULL) {
1183       FreeHeap(to_dealloc_jmeths);
1184     }
1185     // free up the new ID since it wasn't needed
1186     if (to_dealloc_id != NULL) {
1187       JNIHandles::destroy_jmethod_id(to_dealloc_id);
1188     }
1189   }
1190   return id;
1191 }
1192 
1193 
1194 // Common code to fetch the jmethodID from the cache or update the
1195 // cache with the new jmethodID. This function should never do anything
1196 // that causes the caller to go to a safepoint or we can deadlock with
1197 // the VMThread or have cache consistency issues.
1198 //
1199 jmethodID instanceKlass::get_jmethod_id_fetch_or_update(
1200             instanceKlassHandle ik_h, size_t idnum, jmethodID new_id,
1201             jmethodID* new_jmeths, jmethodID* to_dealloc_id_p,
1202             jmethodID** to_dealloc_jmeths_p) {
1203   assert(new_id != NULL, "sanity check");
1204   assert(to_dealloc_id_p != NULL, "sanity check");
1205   assert(to_dealloc_jmeths_p != NULL, "sanity check");
1206   assert(Threads::number_of_threads() == 0 ||
1207          SafepointSynchronize::is_at_safepoint() ||
1208          JmethodIdCreation_lock->owned_by_self(), "sanity check");
1209 
1210   // reacquire the cache - we are locked, single threaded or at a safepoint
1211   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1212   jmethodID  id     = NULL;
1213   size_t     length = 0;
1214 
1215   if (jmeths == NULL ||                         // no cache yet
1216       (length = (size_t)jmeths[0]) <= idnum) {  // cache is too short
1217     if (jmeths != NULL) {
1218       // copy any existing entries from the old cache
1219       for (size_t index = 0; index < length; index++) {
1220         new_jmeths[index+1] = jmeths[index+1];
1221       }
1222       *to_dealloc_jmeths_p = jmeths;  // save old cache for later delete
1223     }
1224     ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths);
1225   } else {
1226     // fetch jmethodID (if any) from the existing cache
1227     id = jmeths[idnum+1];
1228     *to_dealloc_jmeths_p = new_jmeths;  // save new cache for later delete
1229   }
1230   if (id == NULL) {
1231     // No matching jmethodID in the existing cache or we have a new
1232     // cache or we just grew the cache. This cache write is done here
1233     // by the first thread to win the foot race because a jmethodID
1234     // needs to be unique once it is generally available.
1235     id = new_id;
1236 
1237     // The jmethodID cache can be read while unlocked so we have to
1238     // make sure the new jmethodID is complete before installing it
1239     // in the cache.
1240     OrderAccess::release_store_ptr(&jmeths[idnum+1], id);
1241   } else {
1242     *to_dealloc_id_p = new_id; // save new id for later delete
1243   }
1244   return id;
1245 }
1246 
1247 
1248 // Common code to get the jmethodID cache length and the jmethodID
1249 // value at index idnum if there is one.
1250 //
1251 void instanceKlass::get_jmethod_id_length_value(jmethodID* cache,
1252        size_t idnum, size_t *length_p, jmethodID* id_p) {
1253   assert(cache != NULL, "sanity check");
1254   assert(length_p != NULL, "sanity check");
1255   assert(id_p != NULL, "sanity check");
1256 
1257   // cache size is stored in element[0], other elements offset by one
1258   *length_p = (size_t)cache[0];
1259   if (*length_p <= idnum) {  // cache is too short
1260     *id_p = NULL;
1261   } else {
1262     *id_p = cache[idnum+1];  // fetch jmethodID (if any)
1263   }
1264 }
1265 
1266 
1267 // Lookup a jmethodID, NULL if not found.  Do no blocking, no allocations, no handles
1268 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) {
1269   size_t idnum = (size_t)method->method_idnum();
1270   jmethodID* jmeths = methods_jmethod_ids_acquire();
1271   size_t length;                                // length assigned as debugging crumb
1272   jmethodID id = NULL;
1273   if (jmeths != NULL &&                         // If there is a cache
1274       (length = (size_t)jmeths[0]) > idnum) {   // and if it is long enough,
1275     id = jmeths[idnum+1];                       // Look up the id (may be NULL)
1276   }
1277   return id;
1278 }
1279 
1280 
1281 // Cache an itable index
1282 void instanceKlass::set_cached_itable_index(size_t idnum, int index) {
1283   int* indices = methods_cached_itable_indices_acquire();
1284   int* to_dealloc_indices = NULL;
1285 
1286   // We use a double-check locking idiom here because this cache is
1287   // performance sensitive. In the normal system, this cache only
1288   // transitions from NULL to non-NULL which is safe because we use
1289   // release_set_methods_cached_itable_indices() to advertise the
1290   // new cache. A partially constructed cache should never be seen
1291   // by a racing thread. Cache reads and writes proceed without a
1292   // lock, but creation of the cache itself requires no leaks so a
1293   // lock is generally acquired in that case.
1294   //
1295   // If the RedefineClasses() API has been used, then this cache can
1296   // grow and we'll have transitions from non-NULL to bigger non-NULL.
1297   // Cache creation requires no leaks and we require safety between all
1298   // cache accesses and freeing of the old cache so a lock is generally
1299   // acquired when the RedefineClasses() API has been used.
1300 
1301   if (indices == NULL || idnum_can_increment()) {
1302     // we need a cache or the cache can grow
1303     MutexLocker ml(JNICachedItableIndex_lock);
1304     // reacquire the cache to see if another thread already did the work
1305     indices = methods_cached_itable_indices_acquire();
1306     size_t length = 0;
1307     // cache size is stored in element[0], other elements offset by one
1308     if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
1309       size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
1310       int* new_indices = NEW_C_HEAP_ARRAY(int, size+1);
1311       new_indices[0] = (int)size;
1312       // copy any existing entries
1313       size_t i;
1314       for (i = 0; i < length; i++) {
1315         new_indices[i+1] = indices[i+1];
1316       }
1317       // Set all the rest to -1
1318       for (i = length; i < size; i++) {
1319         new_indices[i+1] = -1;
1320       }
1321       if (indices != NULL) {
1322         // We have an old cache to delete so save it for after we
1323         // drop the lock.
1324         to_dealloc_indices = indices;
1325       }
1326       release_set_methods_cached_itable_indices(indices = new_indices);
1327     }
1328 
1329     if (idnum_can_increment()) {
1330       // this cache can grow so we have to write to it safely
1331       indices[idnum+1] = index;
1332     }
1333   } else {
1334     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
1335   }
1336 
1337   if (!idnum_can_increment()) {
1338     // The cache cannot grow and this JNI itable index value does not
1339     // have to be unique like a jmethodID. If there is a race to set it,
1340     // it doesn't matter.
1341     indices[idnum+1] = index;
1342   }
1343 
1344   if (to_dealloc_indices != NULL) {
1345     // we allocated a new cache so free the old one
1346     FreeHeap(to_dealloc_indices);
1347   }
1348 }
1349 
1350 
1351 // Retrieve a cached itable index
1352 int instanceKlass::cached_itable_index(size_t idnum) {
1353   int* indices = methods_cached_itable_indices_acquire();
1354   if (indices != NULL && ((size_t)indices[0]) > idnum) {
1355      // indices exist and are long enough, retrieve possible cached
1356     return indices[idnum+1];
1357   }
1358   return -1;
1359 }
1360 
1361 
1362 //
1363 // nmethodBucket is used to record dependent nmethods for
1364 // deoptimization.  nmethod dependencies are actually <klass, method>
1365 // pairs but we really only care about the klass part for purposes of
1366 // finding nmethods which might need to be deoptimized.  Instead of
1367 // recording the method, a count of how many times a particular nmethod
1368 // was recorded is kept.  This ensures that any recording errors are
1369 // noticed since an nmethod should be removed as many times are it's
1370 // added.
1371 //
1372 class nmethodBucket {
1373  private:
1374   nmethod*       _nmethod;
1375   int            _count;
1376   nmethodBucket* _next;
1377 
1378  public:
1379   nmethodBucket(nmethod* nmethod, nmethodBucket* next) {
1380     _nmethod = nmethod;
1381     _next = next;
1382     _count = 1;
1383   }
1384   int count()                             { return _count; }
1385   int increment()                         { _count += 1; return _count; }
1386   int decrement()                         { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
1387   nmethodBucket* next()                   { return _next; }
1388   void set_next(nmethodBucket* b)         { _next = b; }
1389   nmethod* get_nmethod()                  { return _nmethod; }
1390 };
1391 
1392 
1393 //
1394 // Walk the list of dependent nmethods searching for nmethods which
1395 // are dependent on the klassOop that was passed in and mark them for
1396 // deoptimization.  Returns the number of nmethods found.
1397 //
1398 int instanceKlass::mark_dependent_nmethods(DepChange& changes) {
1399   assert_locked_or_safepoint(CodeCache_lock);
1400   int found = 0;
1401   nmethodBucket* b = _dependencies;
1402   while (b != NULL) {
1403     nmethod* nm = b->get_nmethod();
1404     // since dependencies aren't removed until an nmethod becomes a zombie,
1405     // the dependency list may contain nmethods which aren't alive.
1406     if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
1407       if (TraceDependencies) {
1408         ResourceMark rm;
1409         tty->print_cr("Marked for deoptimization");
1410         tty->print_cr("  context = %s", this->external_name());
1411         changes.print();
1412         nm->print();
1413         nm->print_dependencies();
1414       }
1415       nm->mark_for_deoptimization();
1416       found++;
1417     }
1418     b = b->next();
1419   }
1420   return found;
1421 }
1422 
1423 
1424 //
1425 // Add an nmethodBucket to the list of dependencies for this nmethod.
1426 // It's possible that an nmethod has multiple dependencies on this klass
1427 // so a count is kept for each bucket to guarantee that creation and
1428 // deletion of dependencies is consistent.
1429 //
1430 void instanceKlass::add_dependent_nmethod(nmethod* nm) {
1431   assert_locked_or_safepoint(CodeCache_lock);
1432   nmethodBucket* b = _dependencies;
1433   nmethodBucket* last = NULL;
1434   while (b != NULL) {
1435     if (nm == b->get_nmethod()) {
1436       b->increment();
1437       return;
1438     }
1439     b = b->next();
1440   }
1441   _dependencies = new nmethodBucket(nm, _dependencies);
1442 }
1443 
1444 
1445 //
1446 // Decrement count of the nmethod in the dependency list and remove
1447 // the bucket competely when the count goes to 0.  This method must
1448 // find a corresponding bucket otherwise there's a bug in the
1449 // recording of dependecies.
1450 //
1451 void instanceKlass::remove_dependent_nmethod(nmethod* nm) {
1452   assert_locked_or_safepoint(CodeCache_lock);
1453   nmethodBucket* b = _dependencies;
1454   nmethodBucket* last = NULL;
1455   while (b != NULL) {
1456     if (nm == b->get_nmethod()) {
1457       if (b->decrement() == 0) {
1458         if (last == NULL) {
1459           _dependencies = b->next();
1460         } else {
1461           last->set_next(b->next());
1462         }
1463         delete b;
1464       }
1465       return;
1466     }
1467     last = b;
1468     b = b->next();
1469   }
1470 #ifdef ASSERT
1471   tty->print_cr("### %s can't find dependent nmethod:", this->external_name());
1472   nm->print();
1473 #endif // ASSERT
1474   ShouldNotReachHere();
1475 }
1476 
1477 
1478 #ifndef PRODUCT
1479 void instanceKlass::print_dependent_nmethods(bool verbose) {
1480   nmethodBucket* b = _dependencies;
1481   int idx = 0;
1482   while (b != NULL) {
1483     nmethod* nm = b->get_nmethod();
1484     tty->print("[%d] count=%d { ", idx++, b->count());
1485     if (!verbose) {
1486       nm->print_on(tty, "nmethod");
1487       tty->print_cr(" } ");
1488     } else {
1489       nm->print();
1490       nm->print_dependencies();
1491       tty->print_cr("--- } ");
1492     }
1493     b = b->next();
1494   }
1495 }
1496 
1497 
1498 bool instanceKlass::is_dependent_nmethod(nmethod* nm) {
1499   nmethodBucket* b = _dependencies;
1500   while (b != NULL) {
1501     if (nm == b->get_nmethod()) {
1502       return true;
1503     }
1504     b = b->next();
1505   }
1506   return false;
1507 }
1508 #endif //PRODUCT
1509 
1510 
1511 #ifdef ASSERT
1512 template <class T> void assert_is_in(T *p) {
1513   T heap_oop = oopDesc::load_heap_oop(p);
1514   if (!oopDesc::is_null(heap_oop)) {
1515     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1516     assert(Universe::heap()->is_in(o), "should be in heap");
1517   }
1518 }
1519 template <class T> void assert_is_in_closed_subset(T *p) {
1520   T heap_oop = oopDesc::load_heap_oop(p);
1521   if (!oopDesc::is_null(heap_oop)) {
1522     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1523     assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
1524   }
1525 }
1526 template <class T> void assert_is_in_reserved(T *p) {
1527   T heap_oop = oopDesc::load_heap_oop(p);
1528   if (!oopDesc::is_null(heap_oop)) {
1529     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1530     assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
1531   }
1532 }
1533 template <class T> void assert_nothing(T *p) {}
1534 
1535 #else
1536 template <class T> void assert_is_in(T *p) {}
1537 template <class T> void assert_is_in_closed_subset(T *p) {}
1538 template <class T> void assert_is_in_reserved(T *p) {}
1539 template <class T> void assert_nothing(T *p) {}
1540 #endif // ASSERT
1541 
1542 //
1543 // Macros that iterate over areas of oops which are specialized on type of
1544 // oop pointer either narrow or wide, depending on UseCompressedOops
1545 //
1546 // Parameters are:
1547 //   T         - type of oop to point to (either oop or narrowOop)
1548 //   start_p   - starting pointer for region to iterate over
1549 //   count     - number of oops or narrowOops to iterate over
1550 //   do_oop    - action to perform on each oop (it's arbitrary C code which
1551 //               makes it more efficient to put in a macro rather than making
1552 //               it a template function)
1553 //   assert_fn - assert function which is template function because performance
1554 //               doesn't matter when enabled.
1555 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
1556   T, start_p, count, do_oop,                \
1557   assert_fn)                                \
1558 {                                           \
1559   T* p         = (T*)(start_p);             \
1560   T* const end = p + (count);               \
1561   while (p < end) {                         \
1562     (assert_fn)(p);                         \
1563     do_oop;                                 \
1564     ++p;                                    \
1565   }                                         \
1566 }
1567 
1568 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
1569   T, start_p, count, do_oop,                \
1570   assert_fn)                                \
1571 {                                           \
1572   T* const start = (T*)(start_p);           \
1573   T*       p     = start + (count);         \
1574   while (start < p) {                       \
1575     --p;                                    \
1576     (assert_fn)(p);                         \
1577     do_oop;                                 \
1578   }                                         \
1579 }
1580 
1581 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
1582   T, start_p, count, low, high,             \
1583   do_oop, assert_fn)                        \
1584 {                                           \
1585   T* const l = (T*)(low);                   \
1586   T* const h = (T*)(high);                  \
1587   assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
1588          mask_bits((intptr_t)h, sizeof(T)-1) == 0,   \
1589          "bounded region must be properly aligned"); \
1590   T* p       = (T*)(start_p);               \
1591   T* end     = p + (count);                 \
1592   if (p < l) p = l;                         \
1593   if (end > h) end = h;                     \
1594   while (p < end) {                         \
1595     (assert_fn)(p);                         \
1596     do_oop;                                 \
1597     ++p;                                    \
1598   }                                         \
1599 }
1600 
1601 
1602 // The following macros call specialized macros, passing either oop or
1603 // narrowOop as the specialization type.  These test the UseCompressedOops
1604 // flag.
1605 #define InstanceKlass_OOP_ITERATE(start_p, count,    \
1606                                   do_oop, assert_fn) \
1607 {                                                    \
1608   if (UseCompressedOops) {                           \
1609     InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
1610       start_p, count,                                \
1611       do_oop, assert_fn)                             \
1612   } else {                                           \
1613     InstanceKlass_SPECIALIZED_OOP_ITERATE(oop,       \
1614       start_p, count,                                \
1615       do_oop, assert_fn)                             \
1616   }                                                  \
1617 }
1618 
1619 #define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high,    \
1620                                           do_oop, assert_fn) \
1621 {                                                            \
1622   if (UseCompressedOops) {                                   \
1623     InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
1624       start_p, count,                                        \
1625       low, high,                                             \
1626       do_oop, assert_fn)                                     \
1627   } else {                                                   \
1628     InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop,       \
1629       start_p, count,                                        \
1630       low, high,                                             \
1631       do_oop, assert_fn)                                     \
1632   }                                                          \
1633 }
1634 
1635 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn)            \
1636 {                                                                        \
1637   /* Compute oopmap block range. The common case                         \
1638      is nonstatic_oop_map_size == 1. */                                  \
1639   OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
1640   OopMapBlock* const end_map = map + nonstatic_oop_map_count();          \
1641   if (UseCompressedOops) {                                               \
1642     while (map < end_map) {                                              \
1643       InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop,                   \
1644         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
1645         do_oop, assert_fn)                                               \
1646       ++map;                                                             \
1647     }                                                                    \
1648   } else {                                                               \
1649     while (map < end_map) {                                              \
1650       InstanceKlass_SPECIALIZED_OOP_ITERATE(oop,                         \
1651         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
1652         do_oop, assert_fn)                                               \
1653       ++map;                                                             \
1654     }                                                                    \
1655   }                                                                      \
1656 }
1657 
1658 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn)    \
1659 {                                                                        \
1660   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();          \
1661   OopMapBlock* map             = start_map + nonstatic_oop_map_count();  \
1662   if (UseCompressedOops) {                                               \
1663     while (start_map < map) {                                            \
1664       --map;                                                             \
1665       InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop,           \
1666         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
1667         do_oop, assert_fn)                                               \
1668     }                                                                    \
1669   } else {                                                               \
1670     while (start_map < map) {                                            \
1671       --map;                                                             \
1672       InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop,                 \
1673         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
1674         do_oop, assert_fn)                                               \
1675     }                                                                    \
1676   }                                                                      \
1677 }
1678 
1679 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop,    \
1680                                               assert_fn)                 \
1681 {                                                                        \
1682   /* Compute oopmap block range. The common case is                      \
1683      nonstatic_oop_map_size == 1, so we accept the                       \
1684      usually non-existent extra overhead of examining                    \
1685      all the maps. */                                                    \
1686   OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
1687   OopMapBlock* const end_map = map + nonstatic_oop_map_count();          \
1688   if (UseCompressedOops) {                                               \
1689     while (map < end_map) {                                              \
1690       InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop,           \
1691         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
1692         low, high,                                                       \
1693         do_oop, assert_fn)                                               \
1694       ++map;                                                             \
1695     }                                                                    \
1696   } else {                                                               \
1697     while (map < end_map) {                                              \
1698       InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop,                 \
1699         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
1700         low, high,                                                       \
1701         do_oop, assert_fn)                                               \
1702       ++map;                                                             \
1703     }                                                                    \
1704   }                                                                      \
1705 }
1706 
1707 void instanceKlass::follow_static_fields() {
1708   InstanceKlass_OOP_ITERATE( \
1709     start_of_static_fields(), static_oop_field_size(), \
1710     MarkSweep::mark_and_push(p), \
1711     assert_is_in_closed_subset)
1712 }
1713 
1714 #ifndef SERIALGC
1715 void instanceKlass::follow_static_fields(ParCompactionManager* cm) {
1716   InstanceKlass_OOP_ITERATE( \
1717     start_of_static_fields(), static_oop_field_size(), \
1718     PSParallelCompact::mark_and_push(cm, p), \
1719     assert_is_in)
1720 }
1721 #endif // SERIALGC
1722 
1723 void instanceKlass::adjust_static_fields() {
1724   InstanceKlass_OOP_ITERATE( \
1725     start_of_static_fields(), static_oop_field_size(), \
1726     MarkSweep::adjust_pointer(p), \
1727     assert_nothing)
1728 }
1729 
1730 #ifndef SERIALGC
1731 void instanceKlass::update_static_fields() {
1732   InstanceKlass_OOP_ITERATE( \
1733     start_of_static_fields(), static_oop_field_size(), \
1734     PSParallelCompact::adjust_pointer(p), \
1735     assert_nothing)
1736 }
1737 
1738 void instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) {
1739   InstanceKlass_BOUNDED_OOP_ITERATE( \
1740     start_of_static_fields(), static_oop_field_size(), \
1741     beg_addr, end_addr, \
1742     PSParallelCompact::adjust_pointer(p), \
1743     assert_nothing )
1744 }
1745 #endif // SERIALGC
1746 
1747 void instanceKlass::oop_follow_contents(oop obj) {
1748   assert(obj != NULL, "can't follow the content of NULL object");
1749   obj->follow_header();
1750   InstanceKlass_OOP_MAP_ITERATE( \
1751     obj, \
1752     MarkSweep::mark_and_push(p), \
1753     assert_is_in_closed_subset)
1754 }
1755 
1756 #ifndef SERIALGC
1757 void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
1758                                         oop obj) {
1759   assert(obj != NULL, "can't follow the content of NULL object");
1760   obj->follow_header(cm);
1761   InstanceKlass_OOP_MAP_ITERATE( \
1762     obj, \
1763     PSParallelCompact::mark_and_push(cm, p), \
1764     assert_is_in)
1765 }
1766 #endif // SERIALGC
1767 
1768 // closure's do_header() method dicates whether the given closure should be
1769 // applied to the klass ptr in the object header.
1770 
1771 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)        \
1772                                                                              \
1773 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
1774   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1775   /* header */                                                          \
1776   if (closure->do_header()) {                                           \
1777     obj->oop_iterate_header(closure);                                   \
1778   }                                                                     \
1779   InstanceKlass_OOP_MAP_ITERATE(                                        \
1780     obj,                                                                \
1781     SpecializationStats::                                               \
1782       record_do_oop_call##nv_suffix(SpecializationStats::ik);           \
1783     (closure)->do_oop##nv_suffix(p),                                    \
1784     assert_is_in_closed_subset)                                         \
1785   return size_helper();                                                 \
1786 }
1787 
1788 #ifndef SERIALGC
1789 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
1790                                                                                 \
1791 int instanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj,                \
1792                                               OopClosureType* closure) {        \
1793   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
1794   /* header */                                                                  \
1795   if (closure->do_header()) {                                                   \
1796     obj->oop_iterate_header(closure);                                           \
1797   }                                                                             \
1798   /* instance variables */                                                      \
1799   InstanceKlass_OOP_MAP_REVERSE_ITERATE(                                        \
1800     obj,                                                                        \
1801     SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\
1802     (closure)->do_oop##nv_suffix(p),                                            \
1803     assert_is_in_closed_subset)                                                 \
1804    return size_helper();                                                        \
1805 }
1806 #endif // !SERIALGC
1807 
1808 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
1809                                                                         \
1810 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj,              \
1811                                                   OopClosureType* closure, \
1812                                                   MemRegion mr) {          \
1813   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1814   if (closure->do_header()) {                                            \
1815     obj->oop_iterate_header(closure, mr);                                \
1816   }                                                                      \
1817   InstanceKlass_BOUNDED_OOP_MAP_ITERATE(                                 \
1818     obj, mr.start(), mr.end(),                                           \
1819     (closure)->do_oop##nv_suffix(p),                                     \
1820     assert_is_in_closed_subset)                                          \
1821   return size_helper();                                                  \
1822 }
1823 
1824 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1825 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1826 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1827 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1828 #ifndef SERIALGC
1829 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1830 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1831 #endif // !SERIALGC
1832 
1833 void instanceKlass::iterate_static_fields(OopClosure* closure) {
1834     InstanceKlass_OOP_ITERATE( \
1835       start_of_static_fields(), static_oop_field_size(), \
1836       closure->do_oop(p), \
1837       assert_is_in_reserved)
1838 }
1839 
1840 void instanceKlass::iterate_static_fields(OopClosure* closure,
1841                                           MemRegion mr) {
1842   InstanceKlass_BOUNDED_OOP_ITERATE( \
1843     start_of_static_fields(), static_oop_field_size(), \
1844     mr.start(), mr.end(), \
1845     (closure)->do_oop_v(p), \
1846     assert_is_in_closed_subset)
1847 }
1848 
1849 int instanceKlass::oop_adjust_pointers(oop obj) {
1850   int size = size_helper();
1851   InstanceKlass_OOP_MAP_ITERATE( \
1852     obj, \
1853     MarkSweep::adjust_pointer(p), \
1854     assert_is_in)
1855   obj->adjust_header();
1856   return size;
1857 }
1858 
1859 #ifndef SERIALGC
1860 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
1861   InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1862     obj, \
1863     if (PSScavenge::should_scavenge(p)) { \
1864       pm->claim_or_forward_depth(p); \
1865     }, \
1866     assert_nothing )
1867 }
1868 
1869 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
1870   InstanceKlass_OOP_MAP_ITERATE( \
1871     obj, \
1872     PSParallelCompact::adjust_pointer(p), \
1873     assert_nothing)
1874   return size_helper();
1875 }
1876 
1877 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
1878                                        HeapWord* beg_addr, HeapWord* end_addr) {
1879   InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
1880     obj, beg_addr, end_addr, \
1881     PSParallelCompact::adjust_pointer(p), \
1882     assert_nothing)
1883   return size_helper();
1884 }
1885 
1886 void instanceKlass::push_static_fields(PSPromotionManager* pm) {
1887   InstanceKlass_OOP_ITERATE( \
1888     start_of_static_fields(), static_oop_field_size(), \
1889     if (PSScavenge::should_scavenge(p)) { \
1890       pm->claim_or_forward_depth(p); \
1891     }, \
1892     assert_nothing )
1893 }
1894 
1895 void instanceKlass::copy_static_fields(ParCompactionManager* cm) {
1896   InstanceKlass_OOP_ITERATE( \
1897     start_of_static_fields(), static_oop_field_size(), \
1898     PSParallelCompact::adjust_pointer(p), \
1899     assert_is_in)
1900 }
1901 #endif // SERIALGC
1902 
1903 // This klass is alive but the implementor link is not followed/updated.
1904 // Subklass and sibling links are handled by Klass::follow_weak_klass_links
1905 
1906 void instanceKlass::follow_weak_klass_links(
1907   BoolObjectClosure* is_alive, OopClosure* keep_alive) {
1908   assert(is_alive->do_object_b(as_klassOop()), "this oop should be live");
1909   if (ClassUnloading) {
1910     for (int i = 0; i < implementors_limit; i++) {
1911       klassOop impl = _implementors[i];
1912       if (impl == NULL)  break;  // no more in the list
1913       if (!is_alive->do_object_b(impl)) {
1914         // remove this guy from the list by overwriting him with the tail
1915         int lasti = --_nof_implementors;
1916         assert(lasti >= i && lasti < implementors_limit, "just checking");
1917         _implementors[i] = _implementors[lasti];
1918         _implementors[lasti] = NULL;
1919         --i; // rerun the loop at this index
1920       }
1921     }
1922   } else {
1923     for (int i = 0; i < implementors_limit; i++) {
1924       keep_alive->do_oop(&adr_implementors()[i]);
1925     }
1926   }
1927   Klass::follow_weak_klass_links(is_alive, keep_alive);
1928 }
1929 
1930 void instanceKlass::remove_unshareable_info() {
1931   Klass::remove_unshareable_info();
1932   init_implementor();
1933 }
1934 
1935 static void clear_all_breakpoints(methodOop m) {
1936   m->clear_all_breakpoints();
1937 }
1938 
1939 void instanceKlass::release_C_heap_structures() {
1940   // Deallocate oop map cache
1941   if (_oop_map_cache != NULL) {
1942     delete _oop_map_cache;
1943     _oop_map_cache = NULL;
1944   }
1945 
1946   // Deallocate JNI identifiers for jfieldIDs
1947   JNIid::deallocate(jni_ids());
1948   set_jni_ids(NULL);
1949 
1950   jmethodID* jmeths = methods_jmethod_ids_acquire();
1951   if (jmeths != (jmethodID*)NULL) {
1952     release_set_methods_jmethod_ids(NULL);
1953     FreeHeap(jmeths);
1954   }
1955 
1956   int* indices = methods_cached_itable_indices_acquire();
1957   if (indices != (int*)NULL) {
1958     release_set_methods_cached_itable_indices(NULL);
1959     FreeHeap(indices);
1960   }
1961 
1962   // release dependencies
1963   nmethodBucket* b = _dependencies;
1964   _dependencies = NULL;
1965   while (b != NULL) {
1966     nmethodBucket* next = b->next();
1967     delete b;
1968     b = next;
1969   }
1970 
1971   // Deallocate breakpoint records
1972   if (breakpoints() != 0x0) {
1973     methods_do(clear_all_breakpoints);
1974     assert(breakpoints() == 0x0, "should have cleared breakpoints");
1975   }
1976 
1977   // deallocate information about previous versions
1978   if (_previous_versions != NULL) {
1979     for (int i = _previous_versions->length() - 1; i >= 0; i--) {
1980       PreviousVersionNode * pv_node = _previous_versions->at(i);
1981       delete pv_node;
1982     }
1983     delete _previous_versions;
1984     _previous_versions = NULL;
1985   }
1986 
1987   // deallocate the cached class file
1988   if (_cached_class_file_bytes != NULL) {
1989     os::free(_cached_class_file_bytes);
1990     _cached_class_file_bytes = NULL;
1991     _cached_class_file_len = 0;
1992   }
1993 }
1994 
1995 const char* instanceKlass::signature_name() const {
1996   const char* src = (const char*) (name()->as_C_string());
1997   const int src_length = (int)strlen(src);
1998   char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3);
1999   int src_index = 0;
2000   int dest_index = 0;
2001   dest[dest_index++] = 'L';
2002   while (src_index < src_length) {
2003     dest[dest_index++] = src[src_index++];
2004   }
2005   dest[dest_index++] = ';';
2006   dest[dest_index] = '\0';
2007   return dest;
2008 }
2009 
2010 // different verisons of is_same_class_package
2011 bool instanceKlass::is_same_class_package(klassOop class2) {
2012   klassOop class1 = as_klassOop();
2013   oop classloader1 = instanceKlass::cast(class1)->class_loader();
2014   symbolOop classname1 = Klass::cast(class1)->name();
2015 
2016   if (Klass::cast(class2)->oop_is_objArray()) {
2017     class2 = objArrayKlass::cast(class2)->bottom_klass();
2018   }
2019   oop classloader2;
2020   if (Klass::cast(class2)->oop_is_instance()) {
2021     classloader2 = instanceKlass::cast(class2)->class_loader();
2022   } else {
2023     assert(Klass::cast(class2)->oop_is_typeArray(), "should be type array");
2024     classloader2 = NULL;
2025   }
2026   symbolOop classname2 = Klass::cast(class2)->name();
2027 
2028   return instanceKlass::is_same_class_package(classloader1, classname1,
2029                                               classloader2, classname2);
2030 }
2031 
2032 bool instanceKlass::is_same_class_package(oop classloader2, symbolOop classname2) {
2033   klassOop class1 = as_klassOop();
2034   oop classloader1 = instanceKlass::cast(class1)->class_loader();
2035   symbolOop classname1 = Klass::cast(class1)->name();
2036 
2037   return instanceKlass::is_same_class_package(classloader1, classname1,
2038                                               classloader2, classname2);
2039 }
2040 
2041 // return true if two classes are in the same package, classloader
2042 // and classname information is enough to determine a class's package
2043 bool instanceKlass::is_same_class_package(oop class_loader1, symbolOop class_name1,
2044                                           oop class_loader2, symbolOop class_name2) {
2045   if (class_loader1 != class_loader2) {
2046     return false;
2047   } else if (class_name1 == class_name2) {
2048     return true;                // skip painful bytewise comparison
2049   } else {
2050     ResourceMark rm;
2051 
2052     // The symbolOop's are in UTF8 encoding. Since we only need to check explicitly
2053     // for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding.
2054     // Otherwise, we just compare jbyte values between the strings.
2055     jbyte *name1 = class_name1->base();
2056     jbyte *name2 = class_name2->base();
2057 
2058     jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/');
2059     jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/');
2060 
2061     if ((last_slash1 == NULL) || (last_slash2 == NULL)) {
2062       // One of the two doesn't have a package.  Only return true
2063       // if the other one also doesn't have a package.
2064       return last_slash1 == last_slash2;
2065     } else {
2066       // Skip over '['s
2067       if (*name1 == '[') {
2068         do {
2069           name1++;
2070         } while (*name1 == '[');
2071         if (*name1 != 'L') {
2072           // Something is terribly wrong.  Shouldn't be here.
2073           return false;
2074         }
2075       }
2076       if (*name2 == '[') {
2077         do {
2078           name2++;
2079         } while (*name2 == '[');
2080         if (*name2 != 'L') {
2081           // Something is terribly wrong.  Shouldn't be here.
2082           return false;
2083         }
2084       }
2085 
2086       // Check that package part is identical
2087       int length1 = last_slash1 - name1;
2088       int length2 = last_slash2 - name2;
2089 
2090       return UTF8::equal(name1, length1, name2, length2);
2091     }
2092   }
2093 }
2094 
2095 // Returns true iff super_method can be overridden by a method in targetclassname
2096 // See JSL 3rd edition 8.4.6.1
2097 // Assumes name-signature match
2098 // "this" is instanceKlass of super_method which must exist
2099 // note that the instanceKlass of the method in the targetclassname has not always been created yet
2100 bool instanceKlass::is_override(methodHandle super_method, Handle targetclassloader, symbolHandle targetclassname, TRAPS) {
2101    // Private methods can not be overridden
2102    if (super_method->is_private()) {
2103      return false;
2104    }
2105    // If super method is accessible, then override
2106    if ((super_method->is_protected()) ||
2107        (super_method->is_public())) {
2108      return true;
2109    }
2110    // Package-private methods are not inherited outside of package
2111    assert(super_method->is_package_private(), "must be package private");
2112    return(is_same_class_package(targetclassloader(), targetclassname()));
2113 }
2114 
2115 /* defined for now in jvm.cpp, for historical reasons *--
2116 klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle self,
2117                                                      symbolOop& simple_name_result, TRAPS) {
2118   ...
2119 }
2120 */
2121 
2122 // tell if two classes have the same enclosing class (at package level)
2123 bool instanceKlass::is_same_package_member_impl(instanceKlassHandle class1,
2124                                                 klassOop class2_oop, TRAPS) {
2125   if (class2_oop == class1->as_klassOop())          return true;
2126   if (!Klass::cast(class2_oop)->oop_is_instance())  return false;
2127   instanceKlassHandle class2(THREAD, class2_oop);
2128 
2129   // must be in same package before we try anything else
2130   if (!class1->is_same_class_package(class2->class_loader(), class2->name()))
2131     return false;
2132 
2133   // As long as there is an outer1.getEnclosingClass,
2134   // shift the search outward.
2135   instanceKlassHandle outer1 = class1;
2136   for (;;) {
2137     // As we walk along, look for equalities between outer1 and class2.
2138     // Eventually, the walks will terminate as outer1 stops
2139     // at the top-level class around the original class.
2140     bool ignore_inner_is_member;
2141     klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member,
2142                                                     CHECK_false);
2143     if (next == NULL)  break;
2144     if (next == class2())  return true;
2145     outer1 = instanceKlassHandle(THREAD, next);
2146   }
2147 
2148   // Now do the same for class2.
2149   instanceKlassHandle outer2 = class2;
2150   for (;;) {
2151     bool ignore_inner_is_member;
2152     klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member,
2153                                                     CHECK_false);
2154     if (next == NULL)  break;
2155     // Might as well check the new outer against all available values.
2156     if (next == class1())  return true;
2157     if (next == outer1())  return true;
2158     outer2 = instanceKlassHandle(THREAD, next);
2159   }
2160 
2161   // If by this point we have not found an equality between the
2162   // two classes, we know they are in separate package members.
2163   return false;
2164 }
2165 
2166 
2167 jint instanceKlass::compute_modifier_flags(TRAPS) const {
2168   klassOop k = as_klassOop();
2169   jint access = access_flags().as_int();
2170 
2171   // But check if it happens to be member class.
2172   typeArrayOop inner_class_list = inner_classes();
2173   int length = (inner_class_list == NULL) ? 0 : inner_class_list->length();
2174   assert (length % instanceKlass::inner_class_next_offset == 0, "just checking");
2175   if (length > 0) {
2176     typeArrayHandle inner_class_list_h(THREAD, inner_class_list);
2177     instanceKlassHandle ik(THREAD, k);
2178     for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) {
2179       int ioff = inner_class_list_h->ushort_at(
2180                       i + instanceKlass::inner_class_inner_class_info_offset);
2181 
2182       // Inner class attribute can be zero, skip it.
2183       // Strange but true:  JVM spec. allows null inner class refs.
2184       if (ioff == 0) continue;
2185 
2186       // only look at classes that are already loaded
2187       // since we are looking for the flags for our self.
2188       symbolOop inner_name = ik->constants()->klass_name_at(ioff);
2189       if ((ik->name() == inner_name)) {
2190         // This is really a member class.
2191         access = inner_class_list_h->ushort_at(i + instanceKlass::inner_class_access_flags_offset);
2192         break;
2193       }
2194     }
2195   }
2196   // Remember to strip ACC_SUPER bit
2197   return (access & (~JVM_ACC_SUPER)) & JVM_ACC_WRITTEN_FLAGS;
2198 }
2199 
2200 jint instanceKlass::jvmti_class_status() const {
2201   jint result = 0;
2202 
2203   if (is_linked()) {
2204     result |= JVMTI_CLASS_STATUS_VERIFIED | JVMTI_CLASS_STATUS_PREPARED;
2205   }
2206 
2207   if (is_initialized()) {
2208     assert(is_linked(), "Class status is not consistent");
2209     result |= JVMTI_CLASS_STATUS_INITIALIZED;
2210   }
2211   if (is_in_error_state()) {
2212     result |= JVMTI_CLASS_STATUS_ERROR;
2213   }
2214   return result;
2215 }
2216 
2217 methodOop instanceKlass::method_at_itable(klassOop holder, int index, TRAPS) {
2218   itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable();
2219   int method_table_offset_in_words = ioe->offset()/wordSize;
2220   int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words())
2221                        / itableOffsetEntry::size();
2222 
2223   for (int cnt = 0 ; ; cnt ++, ioe ++) {
2224     // If the interface isn't implemented by the receiver class,
2225     // the VM should throw IncompatibleClassChangeError.
2226     if (cnt >= nof_interfaces) {
2227       THROW_OOP_0(vmSymbols::java_lang_IncompatibleClassChangeError());
2228     }
2229 
2230     klassOop ik = ioe->interface_klass();
2231     if (ik == holder) break;
2232   }
2233 
2234   itableMethodEntry* ime = ioe->first_method_entry(as_klassOop());
2235   methodOop m = ime[index].method();
2236   if (m == NULL) {
2237     THROW_OOP_0(vmSymbols::java_lang_AbstractMethodError());
2238   }
2239   return m;
2240 }
2241 
2242 // On-stack replacement stuff
2243 void instanceKlass::add_osr_nmethod(nmethod* n) {
2244   // only one compilation can be active
2245   NEEDS_CLEANUP
2246   // This is a short non-blocking critical region, so the no safepoint check is ok.
2247   OsrList_lock->lock_without_safepoint_check();
2248   assert(n->is_osr_method(), "wrong kind of nmethod");
2249   n->set_osr_link(osr_nmethods_head());
2250   set_osr_nmethods_head(n);
2251   // Raise the highest osr level if necessary
2252   if (TieredCompilation) {
2253     methodOop m = n->method();
2254     m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
2255   }
2256   // Remember to unlock again
2257   OsrList_lock->unlock();
2258 
2259   // Get rid of the osr methods for the same bci that have lower levels.
2260   if (TieredCompilation) {
2261     for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
2262       nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true);
2263       if (inv != NULL && inv->is_in_use()) {
2264         inv->make_not_entrant();
2265       }
2266     }
2267   }
2268 }
2269 
2270 
2271 void instanceKlass::remove_osr_nmethod(nmethod* n) {
2272   // This is a short non-blocking critical region, so the no safepoint check is ok.
2273   OsrList_lock->lock_without_safepoint_check();
2274   assert(n->is_osr_method(), "wrong kind of nmethod");
2275   nmethod* last = NULL;
2276   nmethod* cur  = osr_nmethods_head();
2277   int max_level = CompLevel_none;  // Find the max comp level excluding n
2278   methodOop m = n->method();
2279   // Search for match
2280   while(cur != NULL && cur != n) {
2281     if (TieredCompilation) {
2282       // Find max level before n
2283       max_level = MAX2(max_level, cur->comp_level());
2284     }
2285     last = cur;
2286     cur = cur->osr_link();
2287   }
2288   nmethod* next = NULL;
2289   if (cur == n) {
2290     next = cur->osr_link();
2291     if (last == NULL) {
2292       // Remove first element
2293       set_osr_nmethods_head(next);
2294     } else {
2295       last->set_osr_link(next);
2296     }
2297   }
2298   n->set_osr_link(NULL);
2299   if (TieredCompilation) {
2300     cur = next;
2301     while (cur != NULL) {
2302       // Find max level after n
2303       max_level = MAX2(max_level, cur->comp_level());
2304       cur = cur->osr_link();
2305     }
2306     m->set_highest_osr_comp_level(max_level);
2307   }
2308   // Remember to unlock again
2309   OsrList_lock->unlock();
2310 }
2311 
2312 nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const {
2313   // This is a short non-blocking critical region, so the no safepoint check is ok.
2314   OsrList_lock->lock_without_safepoint_check();
2315   nmethod* osr = osr_nmethods_head();
2316   nmethod* best = NULL;
2317   while (osr != NULL) {
2318     assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
2319     // There can be a time when a c1 osr method exists but we are waiting
2320     // for a c2 version. When c2 completes its osr nmethod we will trash
2321     // the c1 version and only be able to find the c2 version. However
2322     // while we overflow in the c1 code at back branches we don't want to
2323     // try and switch to the same code as we are already running
2324 
2325     if (osr->method() == m &&
2326         (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
2327       if (match_level) {
2328         if (osr->comp_level() == comp_level) {
2329           // Found a match - return it.
2330           OsrList_lock->unlock();
2331           return osr;
2332         }
2333       } else {
2334         if (best == NULL || (osr->comp_level() > best->comp_level())) {
2335           if (osr->comp_level() == CompLevel_highest_tier) {
2336             // Found the best possible - return it.
2337             OsrList_lock->unlock();
2338             return osr;
2339           }
2340           best = osr;
2341         }
2342       }
2343     }
2344     osr = osr->osr_link();
2345   }
2346   OsrList_lock->unlock();
2347   if (best != NULL && best->comp_level() >= comp_level && match_level == false) {
2348     return best;
2349   }
2350   return NULL;
2351 }
2352 
2353 // -----------------------------------------------------------------------------------------------------
2354 #ifndef PRODUCT
2355 
2356 // Printing
2357 
2358 #define BULLET  " - "
2359 
2360 void FieldPrinter::do_field(fieldDescriptor* fd) {
2361   _st->print(BULLET);
2362    if (fd->is_static() || (_obj == NULL)) {
2363      fd->print_on(_st);
2364      _st->cr();
2365    } else {
2366      fd->print_on_for(_st, _obj);
2367      _st->cr();
2368    }
2369 }
2370 
2371 
2372 void instanceKlass::oop_print_on(oop obj, outputStream* st) {
2373   Klass::oop_print_on(obj, st);
2374 
2375   if (as_klassOop() == SystemDictionary::String_klass()) {
2376     typeArrayOop value  = java_lang_String::value(obj);
2377     juint        offset = java_lang_String::offset(obj);
2378     juint        length = java_lang_String::length(obj);
2379     if (value != NULL &&
2380         value->is_typeArray() &&
2381         offset          <= (juint) value->length() &&
2382         offset + length <= (juint) value->length()) {
2383       st->print(BULLET"string: ");
2384       Handle h_obj(obj);
2385       java_lang_String::print(h_obj, st);
2386       st->cr();
2387       if (!WizardMode)  return;  // that is enough
2388     }
2389   }
2390 
2391   st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj));
2392   FieldPrinter print_nonstatic_field(st, obj);
2393   do_nonstatic_fields(&print_nonstatic_field);
2394 
2395   if (as_klassOop() == SystemDictionary::Class_klass()) {
2396     st->print(BULLET"signature: ");
2397     java_lang_Class::print_signature(obj, st);
2398     st->cr();
2399     klassOop mirrored_klass = java_lang_Class::as_klassOop(obj);
2400     st->print(BULLET"fake entry for mirror: ");
2401     mirrored_klass->print_value_on(st);
2402     st->cr();
2403     st->print(BULLET"fake entry resolved_constructor: ");
2404     methodOop ctor = java_lang_Class::resolved_constructor(obj);
2405     ctor->print_value_on(st);
2406     klassOop array_klass = java_lang_Class::array_klass(obj);
2407     st->cr();
2408     st->print(BULLET"fake entry for array: ");
2409     array_klass->print_value_on(st);
2410     st->cr();
2411   } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
2412     st->print(BULLET"signature: ");
2413     java_dyn_MethodType::print_signature(obj, st);
2414     st->cr();
2415   }
2416 }
2417 
2418 #endif //PRODUCT
2419 
2420 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
2421   st->print("a ");
2422   name()->print_value_on(st);
2423   obj->print_address_on(st);
2424   if (as_klassOop() == SystemDictionary::String_klass()
2425       && java_lang_String::value(obj) != NULL) {
2426     ResourceMark rm;
2427     int len = java_lang_String::length(obj);
2428     int plen = (len < 24 ? len : 12);
2429     char* str = java_lang_String::as_utf8_string(obj, 0, plen);
2430     st->print(" = \"%s\"", str);
2431     if (len > plen)
2432       st->print("...[%d]", len);
2433   } else if (as_klassOop() == SystemDictionary::Class_klass()) {
2434     klassOop k = java_lang_Class::as_klassOop(obj);
2435     st->print(" = ");
2436     if (k != NULL) {
2437       k->print_value_on(st);
2438     } else {
2439       const char* tname = type2name(java_lang_Class::primitive_type(obj));
2440       st->print("%s", tname ? tname : "type?");
2441     }
2442   } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
2443     st->print(" = ");
2444     java_dyn_MethodType::print_signature(obj, st);
2445   } else if (java_lang_boxing_object::is_instance(obj)) {
2446     st->print(" = ");
2447     java_lang_boxing_object::print(obj, st);
2448   }
2449 }
2450 
2451 const char* instanceKlass::internal_name() const {
2452   return external_name();
2453 }
2454 
2455 // Verification
2456 
2457 class VerifyFieldClosure: public OopClosure {
2458  protected:
2459   template <class T> void do_oop_work(T* p) {
2460     guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
2461     oop obj = oopDesc::load_decode_heap_oop(p);
2462     if (!obj->is_oop_or_null()) {
2463       tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj);
2464       Universe::print();
2465       guarantee(false, "boom");
2466     }
2467   }
2468  public:
2469   virtual void do_oop(oop* p)       { VerifyFieldClosure::do_oop_work(p); }
2470   virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
2471 };
2472 
2473 void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
2474   Klass::oop_verify_on(obj, st);
2475   VerifyFieldClosure blk;
2476   oop_oop_iterate(obj, &blk);
2477 }
2478 
2479 #ifndef PRODUCT
2480 
2481 void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) {
2482   // This verification code is disabled.  JDK_Version::is_gte_jdk14x_version()
2483   // cannot be called since this function is called before the VM is
2484   // able to determine what JDK version is running with.
2485   // The check below always is false since 1.4.
2486   return;
2487 
2488   // This verification code temporarily disabled for the 1.4
2489   // reflection implementation since java.lang.Class now has
2490   // Java-level instance fields. Should rewrite this to handle this
2491   // case.
2492   if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) {
2493     // Verify that java.lang.Class instances have a fake oop field added.
2494     instanceKlass* ik = instanceKlass::cast(k);
2495 
2496     // Check that we have the right class
2497     static bool first_time = true;
2498     guarantee(k == SystemDictionary::Class_klass() && first_time, "Invalid verify of maps");
2499     first_time = false;
2500     const int extra = java_lang_Class::number_of_fake_oop_fields;
2501     guarantee(ik->nonstatic_field_size() == extra, "just checking");
2502     guarantee(ik->nonstatic_oop_map_count() == 1, "just checking");
2503     guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking");
2504 
2505     // Check that the map is (2,extra)
2506     int offset = java_lang_Class::klass_offset;
2507 
2508     OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
2509     guarantee(map->offset() == offset && map->count() == (unsigned int) extra,
2510               "sanity");
2511   }
2512 }
2513 
2514 #endif // ndef PRODUCT
2515 
2516 // JNIid class for jfieldIDs only
2517 // Note to reviewers:
2518 // These JNI functions are just moved over to column 1 and not changed
2519 // in the compressed oops workspace.
2520 JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
2521   _holder = holder;
2522   _offset = offset;
2523   _next = next;
2524   debug_only(_is_static_field_id = false;)
2525 }
2526 
2527 
2528 JNIid* JNIid::find(int offset) {
2529   JNIid* current = this;
2530   while (current != NULL) {
2531     if (current->offset() == offset) return current;
2532     current = current->next();
2533   }
2534   return NULL;
2535 }
2536 
2537 void JNIid::oops_do(OopClosure* f) {
2538   for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
2539     f->do_oop(cur->holder_addr());
2540   }
2541 }
2542 
2543 void JNIid::deallocate(JNIid* current) {
2544   while (current != NULL) {
2545     JNIid* next = current->next();
2546     delete current;
2547     current = next;
2548   }
2549 }
2550 
2551 
2552 void JNIid::verify(klassOop holder) {
2553   int first_field_offset  = instanceKlass::cast(holder)->offset_of_static_fields();
2554   int end_field_offset;
2555   end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
2556 
2557   JNIid* current = this;
2558   while (current != NULL) {
2559     guarantee(current->holder() == holder, "Invalid klass in JNIid");
2560 #ifdef ASSERT
2561     int o = current->offset();
2562     if (current->is_static_field_id()) {
2563       guarantee(o >= first_field_offset  && o < end_field_offset,  "Invalid static field offset in JNIid");
2564     }
2565 #endif
2566     current = current->next();
2567   }
2568 }
2569 
2570 
2571 #ifdef ASSERT
2572 void instanceKlass::set_init_state(ClassState state) {
2573   bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
2574                                                : (_init_state < state);
2575   assert(good_state || state == allocated, "illegal state transition");
2576   _init_state = state;
2577 }
2578 #endif
2579 
2580 
2581 // RedefineClasses() support for previous versions:
2582 
2583 // Add an information node that contains weak references to the
2584 // interesting parts of the previous version of the_class.
2585 // This is also where we clean out any unused weak references.
2586 // Note that while we delete nodes from the _previous_versions
2587 // array, we never delete the array itself until the klass is
2588 // unloaded. The has_been_redefined() query depends on that fact.
2589 //
2590 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
2591        BitMap* emcp_methods, int emcp_method_count) {
2592   assert(Thread::current()->is_VM_thread(),
2593          "only VMThread can add previous versions");
2594 
2595   if (_previous_versions == NULL) {
2596     // This is the first previous version so make some space.
2597     // Start with 2 elements under the assumption that the class
2598     // won't be redefined much.
2599     _previous_versions =  new (ResourceObj::C_HEAP)
2600                             GrowableArray<PreviousVersionNode *>(2, true);
2601   }
2602 
2603   // RC_TRACE macro has an embedded ResourceMark
2604   RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d",
2605     ikh->external_name(), _previous_versions->length(), emcp_method_count));
2606   constantPoolHandle cp_h(ikh->constants());
2607   jobject cp_ref;
2608   if (cp_h->is_shared()) {
2609     // a shared ConstantPool requires a regular reference; a weak
2610     // reference would be collectible
2611     cp_ref = JNIHandles::make_global(cp_h);
2612   } else {
2613     cp_ref = JNIHandles::make_weak_global(cp_h);
2614   }
2615   PreviousVersionNode * pv_node = NULL;
2616   objArrayOop old_methods = ikh->methods();
2617 
2618   if (emcp_method_count == 0) {
2619     // non-shared ConstantPool gets a weak reference
2620     pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL);
2621     RC_TRACE(0x00000400,
2622       ("add: all methods are obsolete; flushing any EMCP weak refs"));
2623   } else {
2624     int local_count = 0;
2625     GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP)
2626       GrowableArray<jweak>(emcp_method_count, true);
2627     for (int i = 0; i < old_methods->length(); i++) {
2628       if (emcp_methods->at(i)) {
2629         // this old method is EMCP so save a weak ref
2630         methodOop old_method = (methodOop) old_methods->obj_at(i);
2631         methodHandle old_method_h(old_method);
2632         jweak method_ref = JNIHandles::make_weak_global(old_method_h);
2633         method_refs->append(method_ref);
2634         if (++local_count >= emcp_method_count) {
2635           // no more EMCP methods so bail out now
2636           break;
2637         }
2638       }
2639     }
2640     // non-shared ConstantPool gets a weak reference
2641     pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs);
2642   }
2643 
2644   _previous_versions->append(pv_node);
2645 
2646   // Using weak references allows the interesting parts of previous
2647   // classes to be GC'ed when they are no longer needed. Since the
2648   // caller is the VMThread and we are at a safepoint, this is a good
2649   // time to clear out unused weak references.
2650 
2651   RC_TRACE(0x00000400, ("add: previous version length=%d",
2652     _previous_versions->length()));
2653 
2654   // skip the last entry since we just added it
2655   for (int i = _previous_versions->length() - 2; i >= 0; i--) {
2656     // check the previous versions array for a GC'ed weak refs
2657     pv_node = _previous_versions->at(i);
2658     cp_ref = pv_node->prev_constant_pool();
2659     assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
2660     if (cp_ref == NULL) {
2661       delete pv_node;
2662       _previous_versions->remove_at(i);
2663       // Since we are traversing the array backwards, we don't have to
2664       // do anything special with the index.
2665       continue;  // robustness
2666     }
2667 
2668     constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2669     if (cp == NULL) {
2670       // this entry has been GC'ed so remove it
2671       delete pv_node;
2672       _previous_versions->remove_at(i);
2673       // Since we are traversing the array backwards, we don't have to
2674       // do anything special with the index.
2675       continue;
2676     } else {
2677       RC_TRACE(0x00000400, ("add: previous version @%d is alive", i));
2678     }
2679 
2680     GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2681     if (method_refs != NULL) {
2682       RC_TRACE(0x00000400, ("add: previous methods length=%d",
2683         method_refs->length()));
2684       for (int j = method_refs->length() - 1; j >= 0; j--) {
2685         jweak method_ref = method_refs->at(j);
2686         assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
2687         if (method_ref == NULL) {
2688           method_refs->remove_at(j);
2689           // Since we are traversing the array backwards, we don't have to
2690           // do anything special with the index.
2691           continue;  // robustness
2692         }
2693 
2694         methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2695         if (method == NULL || emcp_method_count == 0) {
2696           // This method entry has been GC'ed or the current
2697           // RedefineClasses() call has made all methods obsolete
2698           // so remove it.
2699           JNIHandles::destroy_weak_global(method_ref);
2700           method_refs->remove_at(j);
2701         } else {
2702           // RC_TRACE macro has an embedded ResourceMark
2703           RC_TRACE(0x00000400,
2704             ("add: %s(%s): previous method @%d in version @%d is alive",
2705             method->name()->as_C_string(), method->signature()->as_C_string(),
2706             j, i));
2707         }
2708       }
2709     }
2710   }
2711 
2712   int obsolete_method_count = old_methods->length() - emcp_method_count;
2713 
2714   if (emcp_method_count != 0 && obsolete_method_count != 0 &&
2715       _previous_versions->length() > 1) {
2716     // We have a mix of obsolete and EMCP methods. If there is more
2717     // than the previous version that we just added, then we have to
2718     // clear out any matching EMCP method entries the hard way.
2719     int local_count = 0;
2720     for (int i = 0; i < old_methods->length(); i++) {
2721       if (!emcp_methods->at(i)) {
2722         // only obsolete methods are interesting
2723         methodOop old_method = (methodOop) old_methods->obj_at(i);
2724         symbolOop m_name = old_method->name();
2725         symbolOop m_signature = old_method->signature();
2726 
2727         // skip the last entry since we just added it
2728         for (int j = _previous_versions->length() - 2; j >= 0; j--) {
2729           // check the previous versions array for a GC'ed weak refs
2730           pv_node = _previous_versions->at(j);
2731           cp_ref = pv_node->prev_constant_pool();
2732           assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
2733           if (cp_ref == NULL) {
2734             delete pv_node;
2735             _previous_versions->remove_at(j);
2736             // Since we are traversing the array backwards, we don't have to
2737             // do anything special with the index.
2738             continue;  // robustness
2739           }
2740 
2741           constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2742           if (cp == NULL) {
2743             // this entry has been GC'ed so remove it
2744             delete pv_node;
2745             _previous_versions->remove_at(j);
2746             // Since we are traversing the array backwards, we don't have to
2747             // do anything special with the index.
2748             continue;
2749           }
2750 
2751           GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2752           if (method_refs == NULL) {
2753             // We have run into a PreviousVersion generation where
2754             // all methods were made obsolete during that generation's
2755             // RedefineClasses() operation. At the time of that
2756             // operation, all EMCP methods were flushed so we don't
2757             // have to go back any further.
2758             //
2759             // A NULL method_refs is different than an empty method_refs.
2760             // We cannot infer any optimizations about older generations
2761             // from an empty method_refs for the current generation.
2762             break;
2763           }
2764 
2765           for (int k = method_refs->length() - 1; k >= 0; k--) {
2766             jweak method_ref = method_refs->at(k);
2767             assert(method_ref != NULL,
2768               "weak method ref was unexpectedly cleared");
2769             if (method_ref == NULL) {
2770               method_refs->remove_at(k);
2771               // Since we are traversing the array backwards, we don't
2772               // have to do anything special with the index.
2773               continue;  // robustness
2774             }
2775 
2776             methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2777             if (method == NULL) {
2778               // this method entry has been GC'ed so skip it
2779               JNIHandles::destroy_weak_global(method_ref);
2780               method_refs->remove_at(k);
2781               continue;
2782             }
2783 
2784             if (method->name() == m_name &&
2785                 method->signature() == m_signature) {
2786               // The current RedefineClasses() call has made all EMCP
2787               // versions of this method obsolete so mark it as obsolete
2788               // and remove the weak ref.
2789               RC_TRACE(0x00000400,
2790                 ("add: %s(%s): flush obsolete method @%d in version @%d",
2791                 m_name->as_C_string(), m_signature->as_C_string(), k, j));
2792 
2793               method->set_is_obsolete();
2794               JNIHandles::destroy_weak_global(method_ref);
2795               method_refs->remove_at(k);
2796               break;
2797             }
2798           }
2799 
2800           // The previous loop may not find a matching EMCP method, but
2801           // that doesn't mean that we can optimize and not go any
2802           // further back in the PreviousVersion generations. The EMCP
2803           // method for this generation could have already been GC'ed,
2804           // but there still may be an older EMCP method that has not
2805           // been GC'ed.
2806         }
2807 
2808         if (++local_count >= obsolete_method_count) {
2809           // no more obsolete methods so bail out now
2810           break;
2811         }
2812       }
2813     }
2814   }
2815 } // end add_previous_version()
2816 
2817 
2818 // Determine if instanceKlass has a previous version.
2819 bool instanceKlass::has_previous_version() const {
2820   if (_previous_versions == NULL) {
2821     // no previous versions array so answer is easy
2822     return false;
2823   }
2824 
2825   for (int i = _previous_versions->length() - 1; i >= 0; i--) {
2826     // Check the previous versions array for an info node that hasn't
2827     // been GC'ed
2828     PreviousVersionNode * pv_node = _previous_versions->at(i);
2829 
2830     jobject cp_ref = pv_node->prev_constant_pool();
2831     assert(cp_ref != NULL, "cp reference was unexpectedly cleared");
2832     if (cp_ref == NULL) {
2833       continue;  // robustness
2834     }
2835 
2836     constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2837     if (cp != NULL) {
2838       // we have at least one previous version
2839       return true;
2840     }
2841 
2842     // We don't have to check the method refs. If the constant pool has
2843     // been GC'ed then so have the methods.
2844   }
2845 
2846   // all of the underlying nodes' info has been GC'ed
2847   return false;
2848 } // end has_previous_version()
2849 
2850 methodOop instanceKlass::method_with_idnum(int idnum) {
2851   methodOop m = NULL;
2852   if (idnum < methods()->length()) {
2853     m = (methodOop) methods()->obj_at(idnum);
2854   }
2855   if (m == NULL || m->method_idnum() != idnum) {
2856     for (int index = 0; index < methods()->length(); ++index) {
2857       m = (methodOop) methods()->obj_at(index);
2858       if (m->method_idnum() == idnum) {
2859         return m;
2860       }
2861     }
2862   }
2863   return m;
2864 }
2865 
2866 
2867 // Set the annotation at 'idnum' to 'anno'.
2868 // We don't want to create or extend the array if 'anno' is NULL, since that is the
2869 // default value.  However, if the array exists and is long enough, we must set NULL values.
2870 void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, objArrayOop* md_p) {
2871   objArrayOop md = *md_p;
2872   if (md != NULL && md->length() > idnum) {
2873     md->obj_at_put(idnum, anno);
2874   } else if (anno != NULL) {
2875     // create the array
2876     int length = MAX2(idnum+1, (int)_idnum_allocated_count);
2877     md = oopFactory::new_system_objArray(length, Thread::current());
2878     if (*md_p != NULL) {
2879       // copy the existing entries
2880       for (int index = 0; index < (*md_p)->length(); index++) {
2881         md->obj_at_put(index, (*md_p)->obj_at(index));
2882       }
2883     }
2884     set_annotations(md, md_p);
2885     md->obj_at_put(idnum, anno);
2886   } // if no array and idnum isn't included there is nothing to do
2887 }
2888 
2889 // Construct a PreviousVersionNode entry for the array hung off
2890 // the instanceKlass.
2891 PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool,
2892   bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) {
2893 
2894   _prev_constant_pool = prev_constant_pool;
2895   _prev_cp_is_weak = prev_cp_is_weak;
2896   _prev_EMCP_methods = prev_EMCP_methods;
2897 }
2898 
2899 
2900 // Destroy a PreviousVersionNode
2901 PreviousVersionNode::~PreviousVersionNode() {
2902   if (_prev_constant_pool != NULL) {
2903     if (_prev_cp_is_weak) {
2904       JNIHandles::destroy_weak_global(_prev_constant_pool);
2905     } else {
2906       JNIHandles::destroy_global(_prev_constant_pool);
2907     }
2908   }
2909 
2910   if (_prev_EMCP_methods != NULL) {
2911     for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) {
2912       jweak method_ref = _prev_EMCP_methods->at(i);
2913       if (method_ref != NULL) {
2914         JNIHandles::destroy_weak_global(method_ref);
2915       }
2916     }
2917     delete _prev_EMCP_methods;
2918   }
2919 }
2920 
2921 
2922 // Construct a PreviousVersionInfo entry
2923 PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
2924   _prev_constant_pool_handle = constantPoolHandle();  // NULL handle
2925   _prev_EMCP_method_handles = NULL;
2926 
2927   jobject cp_ref = pv_node->prev_constant_pool();
2928   assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared");
2929   if (cp_ref == NULL) {
2930     return;  // robustness
2931   }
2932 
2933   constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2934   if (cp == NULL) {
2935     // Weak reference has been GC'ed. Since the constant pool has been
2936     // GC'ed, the methods have also been GC'ed.
2937     return;
2938   }
2939 
2940   // make the constantPoolOop safe to return
2941   _prev_constant_pool_handle = constantPoolHandle(cp);
2942 
2943   GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2944   if (method_refs == NULL) {
2945     // the instanceKlass did not have any EMCP methods
2946     return;
2947   }
2948 
2949   _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
2950 
2951   int n_methods = method_refs->length();
2952   for (int i = 0; i < n_methods; i++) {
2953     jweak method_ref = method_refs->at(i);
2954     assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
2955     if (method_ref == NULL) {
2956       continue;  // robustness
2957     }
2958 
2959     methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2960     if (method == NULL) {
2961       // this entry has been GC'ed so skip it
2962       continue;
2963     }
2964 
2965     // make the methodOop safe to return
2966     _prev_EMCP_method_handles->append(methodHandle(method));
2967   }
2968 }
2969 
2970 
2971 // Destroy a PreviousVersionInfo
2972 PreviousVersionInfo::~PreviousVersionInfo() {
2973   // Since _prev_EMCP_method_handles is not C-heap allocated, we
2974   // don't have to delete it.
2975 }
2976 
2977 
2978 // Construct a helper for walking the previous versions array
2979 PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) {
2980   _previous_versions = ik->previous_versions();
2981   _current_index = 0;
2982   // _hm needs no initialization
2983   _current_p = NULL;
2984 }
2985 
2986 
2987 // Destroy a PreviousVersionWalker
2988 PreviousVersionWalker::~PreviousVersionWalker() {
2989   // Delete the current info just in case the caller didn't walk to
2990   // the end of the previous versions list. No harm if _current_p is
2991   // already NULL.
2992   delete _current_p;
2993 
2994   // When _hm is destroyed, all the Handles returned in
2995   // PreviousVersionInfo objects will be destroyed.
2996   // Also, after this destructor is finished it will be
2997   // safe to delete the GrowableArray allocated in the
2998   // PreviousVersionInfo objects.
2999 }
3000 
3001 
3002 // Return the interesting information for the next previous version
3003 // of the klass. Returns NULL if there are no more previous versions.
3004 PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
3005   if (_previous_versions == NULL) {
3006     // no previous versions so nothing to return
3007     return NULL;
3008   }
3009 
3010   delete _current_p;  // cleanup the previous info for the caller
3011   _current_p = NULL;  // reset to NULL so we don't delete same object twice
3012 
3013   int length = _previous_versions->length();
3014 
3015   while (_current_index < length) {
3016     PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
3017     PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP)
3018                                           PreviousVersionInfo(pv_node);
3019 
3020     constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
3021     if (cp_h.is_null()) {
3022       delete pv_info;
3023 
3024       // The underlying node's info has been GC'ed so try the next one.
3025       // We don't have to check the methods. If the constant pool has
3026       // GC'ed then so have the methods.
3027       continue;
3028     }
3029 
3030     // Found a node with non GC'ed info so return it. The caller will
3031     // need to delete pv_info when they are done with it.
3032     _current_p = pv_info;
3033     return pv_info;
3034   }
3035 
3036   // all of the underlying nodes' info has been GC'ed
3037   return NULL;
3038 } // end next_previous_version()