1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/verifier.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "gc_implementation/shared/markSweep.inline.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "interpreter/oopMapCache.hpp"
  34 #include "interpreter/rewriter.hpp"
  35 #include "jvmtifiles/jvmti.h"
  36 #include "memory/genOopClosures.inline.hpp"
  37 #include "memory/metadataFactory.hpp"
  38 #include "memory/oopFactory.hpp"
  39 #include "oops/fieldStreams.hpp"
  40 #include "oops/instanceClassLoaderKlass.hpp"
  41 #include "oops/instanceKlass.hpp"
  42 #include "oops/instanceMirrorKlass.hpp"
  43 #include "oops/instanceOop.hpp"
  44 #include "oops/klass.inline.hpp"
  45 #include "oops/method.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "oops/symbol.hpp"
  48 #include "prims/jvmtiExport.hpp"
  49 #include "prims/jvmtiRedefineClassesTrace.hpp"
  50 #include "runtime/fieldDescriptor.hpp"
  51 #include "runtime/handles.inline.hpp"
  52 #include "runtime/javaCalls.hpp"
  53 #include "runtime/mutexLocker.hpp"
  54 #include "services/threadService.hpp"
  55 #include "utilities/dtrace.hpp"
  56 #ifdef TARGET_OS_FAMILY_linux
  57 # include "thread_linux.inline.hpp"
  58 #endif
  59 #ifdef TARGET_OS_FAMILY_solaris
  60 # include "thread_solaris.inline.hpp"
  61 #endif
  62 #ifdef TARGET_OS_FAMILY_windows
  63 # include "thread_windows.inline.hpp"
  64 #endif
  65 #ifdef TARGET_OS_FAMILY_bsd
  66 # include "thread_bsd.inline.hpp"
  67 #endif
  68 #ifndef SERIALGC
  69 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
  70 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  71 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  72 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  73 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  74 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
  75 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
  76 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
  77 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
  78 #include "oops/oop.pcgc.inline.hpp"
  79 #endif
  80 #ifdef COMPILER1
  81 #include "c1/c1_Compiler.hpp"
  82 #endif
  83 
  84 #ifdef DTRACE_ENABLED
  85 
  86 #ifndef USDT2
  87 
  88 HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required,
  89   char*, intptr_t, oop, intptr_t);
  90 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive,
  91   char*, intptr_t, oop, intptr_t, int);
  92 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent,
  93   char*, intptr_t, oop, intptr_t, int);
  94 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous,
  95   char*, intptr_t, oop, intptr_t, int);
  96 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed,
  97   char*, intptr_t, oop, intptr_t, int);
  98 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit,
  99   char*, intptr_t, oop, intptr_t, int);
 100 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error,
 101   char*, intptr_t, oop, intptr_t, int);
 102 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
 103   char*, intptr_t, oop, intptr_t, int);
 104 
 105 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)          \
 106   {                                                              \
 107     char* data = NULL;                                           \
 108     int len = 0;                                                 \
 109     Symbol* name = (clss)->name();                               \
 110     if (name != NULL) {                                          \
 111       data = (char*)name->bytes();                               \
 112       len = name->utf8_length();                                 \
 113     }                                                            \
 114     HS_DTRACE_PROBE4(hotspot, class__initialization__##type,     \
 115       data, len, (clss)->class_loader(), thread_type);           \
 116   }
 117 
 118 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
 119   {                                                              \
 120     char* data = NULL;                                           \
 121     int len = 0;                                                 \
 122     Symbol* name = (clss)->name();                               \
 123     if (name != NULL) {                                          \
 124       data = (char*)name->bytes();                               \
 125       len = name->utf8_length();                                 \
 126     }                                                            \
 127     HS_DTRACE_PROBE5(hotspot, class__initialization__##type,     \
 128       data, len, (clss)->class_loader(), thread_type, wait);     \
 129   }
 130 #else /* USDT2 */
 131 
 132 #define HOTSPOT_CLASS_INITIALIZATION_required HOTSPOT_CLASS_INITIALIZATION_REQUIRED
 133 #define HOTSPOT_CLASS_INITIALIZATION_recursive HOTSPOT_CLASS_INITIALIZATION_RECURSIVE
 134 #define HOTSPOT_CLASS_INITIALIZATION_concurrent HOTSPOT_CLASS_INITIALIZATION_CONCURRENT
 135 #define HOTSPOT_CLASS_INITIALIZATION_erroneous HOTSPOT_CLASS_INITIALIZATION_ERRONEOUS
 136 #define HOTSPOT_CLASS_INITIALIZATION_super__failed HOTSPOT_CLASS_INITIALIZATION_SUPER_FAILED
 137 #define HOTSPOT_CLASS_INITIALIZATION_clinit HOTSPOT_CLASS_INITIALIZATION_CLINIT
 138 #define HOTSPOT_CLASS_INITIALIZATION_error HOTSPOT_CLASS_INITIALIZATION_ERROR
 139 #define HOTSPOT_CLASS_INITIALIZATION_end HOTSPOT_CLASS_INITIALIZATION_END
 140 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)          \
 141   {                                                              \
 142     char* data = NULL;                                           \
 143     int len = 0;                                                 \
 144     Symbol* name = (clss)->name();                               \
 145     if (name != NULL) {                                          \
 146       data = (char*)name->bytes();                               \
 147       len = name->utf8_length();                                 \
 148     }                                                            \
 149     HOTSPOT_CLASS_INITIALIZATION_##type(                         \
 150       data, len, (clss)->class_loader(), thread_type);           \
 151   }
 152 
 153 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
 154   {                                                              \
 155     char* data = NULL;                                           \
 156     int len = 0;                                                 \
 157     Symbol* name = (clss)->name();                               \
 158     if (name != NULL) {                                          \
 159       data = (char*)name->bytes();                               \
 160       len = name->utf8_length();                                 \
 161     }                                                            \
 162     HOTSPOT_CLASS_INITIALIZATION_##type(                         \
 163       data, len, (clss)->class_loader(), thread_type, wait);     \
 164   }
 165 #endif /* USDT2 */
 166 
 167 #else //  ndef DTRACE_ENABLED
 168 
 169 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)
 170 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait)
 171 
 172 #endif //  ndef DTRACE_ENABLED
 173 
 174 Klass* InstanceKlass::allocate_instance_klass(ClassLoaderData* loader_data,
 175                                                 int vtable_len,
 176                                                 int itable_len,
 177                                                 int static_field_size,
 178                                                 int nonstatic_oop_map_size,
 179                                                 ReferenceType rt,
 180                                                 AccessFlags access_flags,
 181                                                 Symbol* name,
 182                                               Klass* super_klass,
 183                                                 KlassHandle host_klass,
 184                                                 TRAPS) {
 185 
 186   int size = InstanceKlass::size(vtable_len, itable_len, nonstatic_oop_map_size,
 187                                  access_flags.is_interface(),
 188                                  !host_klass.is_null());
 189 
 190   // Allocation
 191   InstanceKlass* ik;
 192   if (rt == REF_NONE) {
 193     if (name == vmSymbols::java_lang_Class()) {
 194       ik = new (loader_data, size, THREAD) InstanceMirrorKlass(
 195         vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt,
 196         access_flags, !host_klass.is_null());
 197     } else if (name == vmSymbols::java_lang_ClassLoader() ||
 198           (SystemDictionary::ClassLoader_klass_loaded() &&
 199           super_klass != NULL &&
 200           super_klass->is_subtype_of(SystemDictionary::ClassLoader_klass()))) {
 201       ik = new (loader_data, size, THREAD) InstanceClassLoaderKlass(
 202         vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt,
 203         access_flags, !host_klass.is_null());
 204     } else {
 205       // normal class
 206       ik = new (loader_data, size, THREAD) InstanceKlass(
 207         vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt,
 208         access_flags, !host_klass.is_null());
 209     }
 210   } else {
 211     // reference klass
 212     ik = new (loader_data, size, THREAD) InstanceRefKlass(
 213         vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt,
 214         access_flags, !host_klass.is_null());
 215   }
 216 
 217   return ik;
 218 }
 219 
 220 InstanceKlass::InstanceKlass(int vtable_len,
 221                              int itable_len,
 222                              int static_field_size,
 223                              int nonstatic_oop_map_size,
 224                              ReferenceType rt,
 225                              AccessFlags access_flags,
 226                              bool is_anonymous) {
 227   No_Safepoint_Verifier no_safepoint; // until k becomes parsable
 228 
 229   int size = InstanceKlass::size(vtable_len, itable_len, nonstatic_oop_map_size,
 230                                  access_flags.is_interface(), is_anonymous);
 231 
 232   // The sizes of these these three variables are used for determining the
 233   // size of the instanceKlassOop. It is critical that these are set to the right
 234   // sizes before the first GC, i.e., when we allocate the mirror.
 235   this->set_vtable_length(vtable_len);
 236   this->set_itable_length(itable_len);
 237   this->set_static_field_size(static_field_size);
 238   this->set_nonstatic_oop_map_size(nonstatic_oop_map_size);
 239   this->set_access_flags(access_flags);
 240   this->set_is_anonymous(is_anonymous);
 241   assert(this->size() == size, "wrong size for object");
 242 
 243   this->set_array_klasses(NULL);
 244   this->set_methods(NULL);
 245   this->set_method_ordering(NULL);
 246   this->set_local_interfaces(NULL);
 247   this->set_transitive_interfaces(NULL);
 248   this->init_implementor();
 249   this->set_fields(NULL, 0);
 250   this->set_constants(NULL);
 251   this->set_class_loader_data(NULL);
 252   this->set_protection_domain(NULL);
 253   this->set_signers(NULL);
 254   this->set_source_file_name(NULL);
 255   this->set_source_debug_extension(NULL, 0);
 256   this->set_array_name(NULL);
 257   this->set_inner_classes(NULL);
 258   this->set_static_oop_field_count(0);
 259   this->set_nonstatic_field_size(0);
 260   this->set_is_marked_dependent(false);
 261   this->set_init_state(InstanceKlass::allocated);
 262   this->set_init_thread(NULL);
 263   this->set_init_lock(NULL);
 264   this->set_reference_type(rt);
 265   this->set_oop_map_cache(NULL);
 266   this->set_jni_ids(NULL);
 267   this->set_osr_nmethods_head(NULL);
 268   this->set_breakpoints(NULL);
 269   this->init_previous_versions();
 270   this->set_generic_signature(NULL);
 271   this->release_set_methods_jmethod_ids(NULL);
 272   this->release_set_methods_cached_itable_indices(NULL);
 273   this->set_annotations(NULL);
 274   this->set_jvmti_cached_class_field_map(NULL);
 275   this->set_initial_method_idnum(0);
 276 
 277   // initialize the non-header words to zero
 278   intptr_t* p = (intptr_t*)this;
 279   for (int index = InstanceKlass::header_size(); index < size; index++) {
 280     p[index] = NULL_WORD;
 281   }
 282 
 283   // Set temporary value until parseClassFile updates it with the real instance
 284   // size.
 285   this->set_layout_helper(Klass::instance_layout_helper(0, true));
 286 }
 287 
 288 
 289 // This function deallocates the metadata and C heap pointers that the
 290 // InstanceKlass points to.
 291 void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
 292 
 293   // Orphan the mirror first, CMS thinks it's still live.
 294   java_lang_Class::set_klass(java_mirror(), NULL);
 295 
 296   // Need to take this class off the class loader data list.
 297   loader_data->remove_class(this);
 298 
 299   // The array_klass for this class is created later, after error handling.
 300   // For class redefinition, we keep the original class so this scratch class
 301   // doesn't have an array class.  Either way, assert that there is nothing
 302   // to deallocate.
 303   assert(array_klasses() == NULL, "array classes shouldn't be created for this class yet");
 304 
 305   // Release C heap allocated data that this might point to, which includes
 306   // reference counting symbol names.
 307   release_C_heap_structures();
 308 
 309   Array<Method*>* ms = methods();
 310   if (ms != Universe::the_empty_method_array()) {
 311     for (int i = 0; i <= methods()->length() -1 ; i++) {
 312       Method* method = methods()->at(i);
 313       // Only want to delete methods that are not executing for RedefineClasses.
 314       // The previous version will point to them so they're not totally dangling
 315       assert (!method->on_stack(), "shouldn't be called with methods on stack");
 316       MetadataFactory::free_metadata(loader_data, method);
 317     }
 318     MetadataFactory::free_array<Method*>(loader_data, methods());
 319   }
 320   set_methods(NULL);
 321 
 322   if (method_ordering() != Universe::the_empty_int_array()) {
 323     MetadataFactory::free_array<int>(loader_data, method_ordering());
 324   }
 325   set_method_ordering(NULL);
 326 
 327   // This array is in Klass, but remove it with the InstanceKlass since
 328   // this place would be the only caller and it can share memory with transitive
 329   // interfaces.
 330   if (secondary_supers() != Universe::the_empty_klass_array() &&
 331       secondary_supers() != transitive_interfaces()) {
 332     MetadataFactory::free_array<Klass*>(loader_data, secondary_supers());
 333   }
 334   set_secondary_supers(NULL);
 335 
 336   // Only deallocate transitive interfaces if not empty, same as super class
 337   // or same as local interfaces.   See code in parseClassFile.
 338   Array<Klass*>* ti = transitive_interfaces();
 339   if (ti != Universe::the_empty_klass_array() && ti != local_interfaces()) {
 340     // check that the interfaces don't come from super class
 341     Array<Klass*>* sti = (super() == NULL) ? NULL :
 342        InstanceKlass::cast(super())->transitive_interfaces();
 343     if (ti != sti) {
 344       MetadataFactory::free_array<Klass*>(loader_data, ti);
 345     }
 346   }
 347   set_transitive_interfaces(NULL);
 348 
 349   // local interfaces can be empty
 350   Array<Klass*>* li = local_interfaces();
 351   if (li != Universe::the_empty_klass_array()) {
 352     MetadataFactory::free_array<Klass*>(loader_data, li);
 353   }
 354   set_local_interfaces(NULL);
 355 
 356   MetadataFactory::free_array<jushort>(loader_data, fields());
 357   set_fields(NULL, 0);
 358 
 359   // If a method from a redefined class is using this constant pool, don't
 360   // delete it, yet.  The new class's previous version will point to this.
 361   assert (!constants()->on_stack(), "shouldn't be called if anything is onstack");
 362   MetadataFactory::free_metadata(loader_data, constants());
 363   set_constants(NULL);
 364 
 365   if (inner_classes() != Universe::the_empty_short_array()) {
 366     MetadataFactory::free_array<jushort>(loader_data, inner_classes());
 367   }
 368   set_inner_classes(NULL);
 369 
 370   // Null out Java heap objects, although these won't be walked to keep
 371   // alive once this InstanceKlass is deallocated.
 372   set_protection_domain(NULL);
 373   set_signers(NULL);
 374   set_init_lock(NULL);
 375   set_annotations(NULL);
 376 }
 377 
 378 volatile oop InstanceKlass::init_lock() const {
 379   volatile oop lock = _init_lock;  // read once
 380   assert((oop)lock != NULL || !is_not_initialized(), // initialized or in_error state
 381          "only fully initialized state can have a null lock");
 382   return lock;
 383 }
 384 
 385 // Set the initialization lock to null so the object can be GC'ed.  Any racing
 386 // threads to get this lock will see a null lock and will not lock.
 387 // That's okay because they all check for initialized state after getting
 388 // the lock and return.
 389 void InstanceKlass::fence_and_clear_init_lock() {
 390   // make sure previous stores are all done, notably the init_state.
 391   OrderAccess::storestore();
 392   klass_oop_store(&_init_lock, NULL);
 393   assert(!is_not_initialized(), "class must be initialized now");
 394 }
 395 
 396 
 397 bool InstanceKlass::should_be_initialized() const {
 398   return !is_initialized();
 399 }
 400 
 401 klassVtable* InstanceKlass::vtable() const {
 402   return new klassVtable(this, start_of_vtable(), vtable_length() / vtableEntry::size());
 403 }
 404 
 405 klassItable* InstanceKlass::itable() const {
 406   return new klassItable(instanceKlassHandle(this));
 407 }
 408 
 409 void InstanceKlass::eager_initialize(Thread *thread) {
 410   if (!EagerInitialization) return;
 411 
 412   if (this->is_not_initialized()) {
 413     // abort if the the class has a class initializer
 414     if (this->class_initializer() != NULL) return;
 415 
 416     // abort if it is java.lang.Object (initialization is handled in genesis)
 417     Klass* super = this->super();
 418     if (super == NULL) return;
 419 
 420     // abort if the super class should be initialized
 421     if (!InstanceKlass::cast(super)->is_initialized()) return;
 422 
 423     // call body to expose the this pointer
 424     instanceKlassHandle this_oop(thread, this);
 425     eager_initialize_impl(this_oop);
 426   }
 427 }
 428 
 429 
 430 void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
 431   EXCEPTION_MARK;
 432   volatile oop init_lock = this_oop->init_lock();
 433   ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
 434 
 435   // abort if someone beat us to the initialization
 436   if (!this_oop->is_not_initialized()) return;  // note: not equivalent to is_initialized()
 437 
 438   ClassState old_state = this_oop->init_state();
 439   link_class_impl(this_oop, true, THREAD);
 440   if (HAS_PENDING_EXCEPTION) {
 441     CLEAR_PENDING_EXCEPTION;
 442     // Abort if linking the class throws an exception.
 443 
 444     // Use a test to avoid redundantly resetting the state if there's
 445     // no change.  Set_init_state() asserts that state changes make
 446     // progress, whereas here we might just be spinning in place.
 447     if( old_state != this_oop->_init_state )
 448       this_oop->set_init_state (old_state);
 449   } else {
 450     // linking successfull, mark class as initialized
 451     this_oop->set_init_state (fully_initialized);
 452     this_oop->fence_and_clear_init_lock();
 453     // trace
 454     if (TraceClassInitialization) {
 455       ResourceMark rm(THREAD);
 456       tty->print_cr("[Initialized %s without side effects]", this_oop->external_name());
 457     }
 458   }
 459 }
 460 
 461 
 462 // See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization
 463 // process. The step comments refers to the procedure described in that section.
 464 // Note: implementation moved to static method to expose the this pointer.
 465 void InstanceKlass::initialize(TRAPS) {
 466   if (this->should_be_initialized()) {
 467     HandleMark hm(THREAD);
 468     instanceKlassHandle this_oop(THREAD, this);
 469     initialize_impl(this_oop, CHECK);
 470     // Note: at this point the class may be initialized
 471     //       OR it may be in the state of being initialized
 472     //       in case of recursive initialization!
 473   } else {
 474     assert(is_initialized(), "sanity check");
 475   }
 476 }
 477 
 478 
 479 bool InstanceKlass::verify_code(
 480     instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
 481   // 1) Verify the bytecodes
 482   Verifier::Mode mode =
 483     throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
 484   return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false);
 485 }
 486 
 487 
 488 // Used exclusively by the shared spaces dump mechanism to prevent
 489 // classes mapped into the shared regions in new VMs from appearing linked.
 490 
 491 void InstanceKlass::unlink_class() {
 492   assert(is_linked(), "must be linked");
 493   _init_state = loaded;
 494 }
 495 
 496 void InstanceKlass::link_class(TRAPS) {
 497   assert(is_loaded(), "must be loaded");
 498   if (!is_linked()) {
 499     HandleMark hm(THREAD);
 500     instanceKlassHandle this_oop(THREAD, this);
 501     link_class_impl(this_oop, true, CHECK);
 502   }
 503 }
 504 
 505 // Called to verify that a class can link during initialization, without
 506 // throwing a VerifyError.
 507 bool InstanceKlass::link_class_or_fail(TRAPS) {
 508   assert(is_loaded(), "must be loaded");
 509   if (!is_linked()) {
 510     HandleMark hm(THREAD);
 511     instanceKlassHandle this_oop(THREAD, this);
 512     link_class_impl(this_oop, false, CHECK_false);
 513   }
 514   return is_linked();
 515 }
 516 
 517 bool InstanceKlass::link_class_impl(
 518     instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
 519   // check for error state
 520   if (this_oop->is_in_error_state()) {
 521     ResourceMark rm(THREAD);
 522     THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(),
 523                this_oop->external_name(), false);
 524   }
 525   // return if already verified
 526   if (this_oop->is_linked()) {
 527     return true;
 528   }
 529 
 530   // Timing
 531   // timer handles recursion
 532   assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl");
 533   JavaThread* jt = (JavaThread*)THREAD;
 534 
 535   // link super class before linking this class
 536   instanceKlassHandle super(THREAD, this_oop->super());
 537   if (super.not_null()) {
 538     if (super->is_interface()) {  // check if super class is an interface
 539       ResourceMark rm(THREAD);
 540       Exceptions::fthrow(
 541         THREAD_AND_LOCATION,
 542         vmSymbols::java_lang_IncompatibleClassChangeError(),
 543         "class %s has interface %s as super class",
 544         this_oop->external_name(),
 545         super->external_name()
 546       );
 547       return false;
 548     }
 549 
 550     link_class_impl(super, throw_verifyerror, CHECK_false);
 551   }
 552 
 553   // link all interfaces implemented by this class before linking this class
 554   Array<Klass*>* interfaces = this_oop->local_interfaces();
 555   int num_interfaces = interfaces->length();
 556   for (int index = 0; index < num_interfaces; index++) {
 557     HandleMark hm(THREAD);
 558     instanceKlassHandle ih(THREAD, interfaces->at(index));
 559     link_class_impl(ih, throw_verifyerror, CHECK_false);
 560   }
 561 
 562   // in case the class is linked in the process of linking its superclasses
 563   if (this_oop->is_linked()) {
 564     return true;
 565   }
 566 
 567   // trace only the link time for this klass that includes
 568   // the verification time
 569   PerfClassTraceTime vmtimer(ClassLoader::perf_class_link_time(),
 570                              ClassLoader::perf_class_link_selftime(),
 571                              ClassLoader::perf_classes_linked(),
 572                              jt->get_thread_stat()->perf_recursion_counts_addr(),
 573                              jt->get_thread_stat()->perf_timers_addr(),
 574                              PerfClassTraceTime::CLASS_LINK);
 575 
 576   // verification & rewriting
 577   {
 578     volatile oop init_lock = this_oop->init_lock();
 579     ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
 580     // rewritten will have been set if loader constraint error found
 581     // on an earlier link attempt
 582     // don't verify or rewrite if already rewritten
 583 
 584     if (!this_oop->is_linked()) {
 585       if (!this_oop->is_rewritten()) {
 586         {
 587           // Timer includes any side effects of class verification (resolution,
 588           // etc), but not recursive entry into verify_code().
 589           PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(),
 590                                    ClassLoader::perf_class_verify_selftime(),
 591                                    ClassLoader::perf_classes_verified(),
 592                                    jt->get_thread_stat()->perf_recursion_counts_addr(),
 593                                    jt->get_thread_stat()->perf_timers_addr(),
 594                                    PerfClassTraceTime::CLASS_VERIFY);
 595           bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD);
 596           if (!verify_ok) {
 597             return false;
 598           }
 599         }
 600 
 601         // Just in case a side-effect of verify linked this class already
 602         // (which can sometimes happen since the verifier loads classes
 603         // using custom class loaders, which are free to initialize things)
 604         if (this_oop->is_linked()) {
 605           return true;
 606         }
 607 
 608         // also sets rewritten
 609         this_oop->rewrite_class(CHECK_false);
 610       }
 611 
 612       // relocate jsrs and link methods after they are all rewritten
 613       this_oop->relocate_and_link_methods(CHECK_false);
 614 
 615       // Initialize the vtable and interface table after
 616       // methods have been rewritten since rewrite may
 617       // fabricate new Method*s.
 618       // also does loader constraint checking
 619       if (!this_oop()->is_shared()) {
 620         ResourceMark rm(THREAD);
 621         this_oop->vtable()->initialize_vtable(true, CHECK_false);
 622         this_oop->itable()->initialize_itable(true, CHECK_false);
 623       }
 624 #ifdef ASSERT
 625       else {
 626         ResourceMark rm(THREAD);
 627         this_oop->vtable()->verify(tty, true);
 628         // In case itable verification is ever added.
 629         // this_oop->itable()->verify(tty, true);
 630       }
 631 #endif
 632       this_oop->set_init_state(linked);
 633       if (JvmtiExport::should_post_class_prepare()) {
 634         Thread *thread = THREAD;
 635         assert(thread->is_Java_thread(), "thread->is_Java_thread()");
 636         JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop());
 637       }
 638     }
 639   }
 640   return true;
 641 }
 642 
 643 
 644 // Rewrite the byte codes of all of the methods of a class.
 645 // The rewriter must be called exactly once. Rewriting must happen after
 646 // verification but before the first method of the class is executed.
 647 void InstanceKlass::rewrite_class(TRAPS) {
 648   assert(is_loaded(), "must be loaded");
 649   instanceKlassHandle this_oop(THREAD, this);
 650   if (this_oop->is_rewritten()) {
 651     assert(this_oop()->is_shared(), "rewriting an unshared class?");
 652     return;
 653   }
 654   Rewriter::rewrite(this_oop, CHECK);
 655   this_oop->set_rewritten();
 656 }
 657 
 658 // Now relocate and link method entry points after class is rewritten.
 659 // This is outside is_rewritten flag. In case of an exception, it can be
 660 // executed more than once.
 661 void InstanceKlass::relocate_and_link_methods(TRAPS) {
 662   assert(is_loaded(), "must be loaded");
 663   instanceKlassHandle this_oop(THREAD, this);
 664   Rewriter::relocate_and_link(this_oop, CHECK);
 665 }
 666 
 667 
 668 void InstanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
 669   // Make sure klass is linked (verified) before initialization
 670   // A class could already be verified, since it has been reflected upon.
 671   this_oop->link_class(CHECK);
 672 
 673   DTRACE_CLASSINIT_PROBE(required, InstanceKlass::cast(this_oop()), -1);
 674 
 675   bool wait = false;
 676 
 677   // refer to the JVM book page 47 for description of steps
 678   // Step 1
 679   {
 680     volatile oop init_lock = this_oop->init_lock();
 681     ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
 682 
 683     Thread *self = THREAD; // it's passed the current thread
 684 
 685     // Step 2
 686     // If we were to use wait() instead of waitInterruptibly() then
 687     // we might end up throwing IE from link/symbol resolution sites
 688     // that aren't expected to throw.  This would wreak havoc.  See 6320309.
 689     while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) {
 690         wait = true;
 691       ol.waitUninterruptibly(CHECK);
 692     }
 693 
 694     // Step 3
 695     if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self)) {
 696       DTRACE_CLASSINIT_PROBE_WAIT(recursive, InstanceKlass::cast(this_oop()), -1,wait);
 697       return;
 698     }
 699 
 700     // Step 4
 701     if (this_oop->is_initialized()) {
 702       DTRACE_CLASSINIT_PROBE_WAIT(concurrent, InstanceKlass::cast(this_oop()), -1,wait);
 703       return;
 704     }
 705 
 706     // Step 5
 707     if (this_oop->is_in_error_state()) {
 708       DTRACE_CLASSINIT_PROBE_WAIT(erroneous, InstanceKlass::cast(this_oop()), -1,wait);
 709       ResourceMark rm(THREAD);
 710       const char* desc = "Could not initialize class ";
 711       const char* className = this_oop->external_name();
 712       size_t msglen = strlen(desc) + strlen(className) + 1;
 713       char* message = NEW_RESOURCE_ARRAY(char, msglen);
 714       if (NULL == message) {
 715         // Out of memory: can't create detailed error message
 716         THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);
 717       } else {
 718         jio_snprintf(message, msglen, "%s%s", desc, className);
 719         THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message);
 720       }
 721     }
 722 
 723     // Step 6
 724     this_oop->set_init_state(being_initialized);
 725     this_oop->set_init_thread(self);
 726   }
 727 
 728   // Step 7
 729   Klass* super_klass = this_oop->super();
 730   if (super_klass != NULL && !this_oop->is_interface() && super_klass->should_be_initialized()) {
 731     super_klass->initialize(THREAD);
 732 
 733     if (HAS_PENDING_EXCEPTION) {
 734       Handle e(THREAD, PENDING_EXCEPTION);
 735       CLEAR_PENDING_EXCEPTION;
 736       {
 737         EXCEPTION_MARK;
 738         this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
 739         CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, superclass initialization error is thrown below
 740       }
 741       DTRACE_CLASSINIT_PROBE_WAIT(super__failed, InstanceKlass::cast(this_oop()), -1,wait);
 742       THROW_OOP(e());
 743     }
 744   }
 745 
 746   if (this_oop->has_default_methods()) {
 747     // Step 7.5: initialize any interfaces which have default methods
 748     for (int i = 0; i < this_oop->local_interfaces()->length(); ++i) {
 749       Klass* iface = this_oop->local_interfaces()->at(i);
 750       InstanceKlass* ik = InstanceKlass::cast(iface);
 751       if (ik->has_default_methods() && ik->should_be_initialized()) {
 752         ik->initialize(THREAD);
 753 
 754         if (HAS_PENDING_EXCEPTION) {
 755           Handle e(THREAD, PENDING_EXCEPTION);
 756           CLEAR_PENDING_EXCEPTION;
 757           {
 758             EXCEPTION_MARK;
 759             // Locks object, set state, and notify all waiting threads
 760             this_oop->set_initialization_state_and_notify(
 761                 initialization_error, THREAD);
 762 
 763             // ignore any exception thrown, superclass initialization error is
 764             // thrown below
 765             CLEAR_PENDING_EXCEPTION;
 766           }
 767           DTRACE_CLASSINIT_PROBE_WAIT(
 768               super__failed, InstanceKlass::cast(this_oop()), -1, wait);
 769           THROW_OOP(e());
 770         }
 771       }
 772     }
 773   }
 774 
 775   // Step 8
 776   {
 777     assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl");
 778     JavaThread* jt = (JavaThread*)THREAD;
 779     DTRACE_CLASSINIT_PROBE_WAIT(clinit, InstanceKlass::cast(this_oop()), -1,wait);
 780     // Timer includes any side effects of class initialization (resolution,
 781     // etc), but not recursive entry into call_class_initializer().
 782     PerfClassTraceTime timer(ClassLoader::perf_class_init_time(),
 783                              ClassLoader::perf_class_init_selftime(),
 784                              ClassLoader::perf_classes_inited(),
 785                              jt->get_thread_stat()->perf_recursion_counts_addr(),
 786                              jt->get_thread_stat()->perf_timers_addr(),
 787                              PerfClassTraceTime::CLASS_CLINIT);
 788     this_oop->call_class_initializer(THREAD);
 789   }
 790 
 791   // Step 9
 792   if (!HAS_PENDING_EXCEPTION) {
 793     this_oop->set_initialization_state_and_notify(fully_initialized, CHECK);
 794     { ResourceMark rm(THREAD);
 795       debug_only(this_oop->vtable()->verify(tty, true);)
 796     }
 797   }
 798   else {
 799     // Step 10 and 11
 800     Handle e(THREAD, PENDING_EXCEPTION);
 801     CLEAR_PENDING_EXCEPTION;
 802     {
 803       EXCEPTION_MARK;
 804       this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
 805       CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, class initialization error is thrown below
 806     }
 807     DTRACE_CLASSINIT_PROBE_WAIT(error, InstanceKlass::cast(this_oop()), -1,wait);
 808     if (e->is_a(SystemDictionary::Error_klass())) {
 809       THROW_OOP(e());
 810     } else {
 811       JavaCallArguments args(e);
 812       THROW_ARG(vmSymbols::java_lang_ExceptionInInitializerError(),
 813                 vmSymbols::throwable_void_signature(),
 814                 &args);
 815     }
 816   }
 817   DTRACE_CLASSINIT_PROBE_WAIT(end, InstanceKlass::cast(this_oop()), -1,wait);
 818 }
 819 
 820 
 821 // Note: implementation moved to static method to expose the this pointer.
 822 void InstanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) {
 823   instanceKlassHandle kh(THREAD, this);
 824   set_initialization_state_and_notify_impl(kh, state, CHECK);
 825 }
 826 
 827 void InstanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
 828   volatile oop init_lock = this_oop->init_lock();
 829   ObjectLocker ol(init_lock, THREAD, init_lock != NULL);
 830   this_oop->set_init_state(state);
 831   this_oop->fence_and_clear_init_lock();
 832   ol.notify_all(CHECK);
 833 }
 834 
 835 // The embedded _implementor field can only record one implementor.
 836 // When there are more than one implementors, the _implementor field
 837 // is set to the interface Klass* itself. Following are the possible
 838 // values for the _implementor field:
 839 //   NULL                  - no implementor
 840 //   implementor Klass*    - one implementor
 841 //   self                  - more than one implementor
 842 //
 843 // The _implementor field only exists for interfaces.
 844 void InstanceKlass::add_implementor(Klass* k) {
 845   assert(Compile_lock->owned_by_self(), "");
 846   assert(is_interface(), "not interface");
 847   // Filter out my subinterfaces.
 848   // (Note: Interfaces are never on the subklass list.)
 849   if (InstanceKlass::cast(k)->is_interface()) return;
 850 
 851   // Filter out subclasses whose supers already implement me.
 852   // (Note: CHA must walk subclasses of direct implementors
 853   // in order to locate indirect implementors.)
 854   Klass* sk = InstanceKlass::cast(k)->super();
 855   if (sk != NULL && InstanceKlass::cast(sk)->implements_interface(this))
 856     // We only need to check one immediate superclass, since the
 857     // implements_interface query looks at transitive_interfaces.
 858     // Any supers of the super have the same (or fewer) transitive_interfaces.
 859     return;
 860 
 861   Klass* ik = implementor();
 862   if (ik == NULL) {
 863     set_implementor(k);
 864   } else if (ik != this) {
 865     // There is already an implementor. Use itself as an indicator of
 866     // more than one implementors.
 867     set_implementor(this);
 868   }
 869 
 870   // The implementor also implements the transitive_interfaces
 871   for (int index = 0; index < local_interfaces()->length(); index++) {
 872     InstanceKlass::cast(local_interfaces()->at(index))->add_implementor(k);
 873   }
 874 }
 875 
 876 void InstanceKlass::init_implementor() {
 877   if (is_interface()) {
 878     set_implementor(NULL);
 879   }
 880 }
 881 
 882 
 883 void InstanceKlass::process_interfaces(Thread *thread) {
 884   // link this class into the implementors list of every interface it implements
 885   Klass* this_as_klass_oop = this;
 886   for (int i = local_interfaces()->length() - 1; i >= 0; i--) {
 887     assert(local_interfaces()->at(i)->is_klass(), "must be a klass");
 888     InstanceKlass* interf = InstanceKlass::cast(local_interfaces()->at(i));
 889     assert(interf->is_interface(), "expected interface");
 890     interf->add_implementor(this_as_klass_oop);
 891   }
 892 }
 893 
 894 bool InstanceKlass::can_be_primary_super_slow() const {
 895   if (is_interface())
 896     return false;
 897   else
 898     return Klass::can_be_primary_super_slow();
 899 }
 900 
 901 GrowableArray<Klass*>* InstanceKlass::compute_secondary_supers(int num_extra_slots) {
 902   // The secondaries are the implemented interfaces.
 903   InstanceKlass* ik = InstanceKlass::cast(this);
 904   Array<Klass*>* interfaces = ik->transitive_interfaces();
 905   int num_secondaries = num_extra_slots + interfaces->length();
 906   if (num_secondaries == 0) {
 907     // Must share this for correct bootstrapping!
 908     set_secondary_supers(Universe::the_empty_klass_array());
 909     return NULL;
 910   } else if (num_extra_slots == 0) {
 911     // The secondary super list is exactly the same as the transitive interfaces.
 912     // Redefine classes has to be careful not to delete this!
 913     set_secondary_supers(interfaces);
 914     return NULL;
 915   } else {
 916     // Copy transitive interfaces to a temporary growable array to be constructed
 917     // into the secondary super list with extra slots.
 918     GrowableArray<Klass*>* secondaries = new GrowableArray<Klass*>(interfaces->length());
 919     for (int i = 0; i < interfaces->length(); i++) {
 920       secondaries->push(interfaces->at(i));
 921     }
 922     return secondaries;
 923   }
 924 }
 925 
 926 bool InstanceKlass::compute_is_subtype_of(Klass* k) {
 927   if (k->is_interface()) {
 928     return implements_interface(k);
 929   } else {
 930     return Klass::compute_is_subtype_of(k);
 931   }
 932 }
 933 
 934 bool InstanceKlass::implements_interface(Klass* k) const {
 935   if (this == k) return true;
 936   assert(k->is_interface(), "should be an interface class");
 937   for (int i = 0; i < transitive_interfaces()->length(); i++) {
 938     if (transitive_interfaces()->at(i) == k) {
 939       return true;
 940     }
 941   }
 942   return false;
 943 }
 944 
 945 objArrayOop InstanceKlass::allocate_objArray(int n, int length, TRAPS) {
 946   if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
 947   if (length > arrayOopDesc::max_array_length(T_OBJECT)) {
 948     report_java_out_of_memory("Requested array size exceeds VM limit");
 949     JvmtiExport::post_array_size_exhausted();
 950     THROW_OOP_0(Universe::out_of_memory_error_array_size());
 951   }
 952   int size = objArrayOopDesc::object_size(length);
 953   Klass* ak = array_klass(n, CHECK_NULL);
 954   KlassHandle h_ak (THREAD, ak);
 955   objArrayOop o =
 956     (objArrayOop)CollectedHeap::array_allocate(h_ak, size, length, CHECK_NULL);
 957   return o;
 958 }
 959 
 960 instanceOop InstanceKlass::register_finalizer(instanceOop i, TRAPS) {
 961   if (TraceFinalizerRegistration) {
 962     tty->print("Registered ");
 963     i->print_value_on(tty);
 964     tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", (address)i);
 965   }
 966   instanceHandle h_i(THREAD, i);
 967   // Pass the handle as argument, JavaCalls::call expects oop as jobjects
 968   JavaValue result(T_VOID);
 969   JavaCallArguments args(h_i);
 970   methodHandle mh (THREAD, Universe::finalizer_register_method());
 971   JavaCalls::call(&result, mh, &args, CHECK_NULL);
 972   return h_i();
 973 }
 974 
 975 instanceOop InstanceKlass::allocate_instance(TRAPS) {
 976   bool has_finalizer_flag = has_finalizer(); // Query before possible GC
 977   int size = size_helper();  // Query before forming handle.
 978 
 979   KlassHandle h_k(THREAD, this);
 980 
 981   instanceOop i;
 982 
 983   i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
 984   if (has_finalizer_flag && !RegisterFinalizersAtInit) {
 985     i = register_finalizer(i, CHECK_NULL);
 986   }
 987   return i;
 988 }
 989 
 990 void InstanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) {
 991   if (is_interface() || is_abstract()) {
 992     ResourceMark rm(THREAD);
 993     THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError()
 994               : vmSymbols::java_lang_InstantiationException(), external_name());
 995   }
 996   if (this == SystemDictionary::Class_klass()) {
 997     ResourceMark rm(THREAD);
 998     THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError()
 999               : vmSymbols::java_lang_IllegalAccessException(), external_name());
1000   }
1001 }
1002 
1003 Klass* InstanceKlass::array_klass_impl(bool or_null, int n, TRAPS) {
1004   instanceKlassHandle this_oop(THREAD, this);
1005   return array_klass_impl(this_oop, or_null, n, THREAD);
1006 }
1007 
1008 Klass* InstanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) {
1009   if (this_oop->array_klasses() == NULL) {
1010     if (or_null) return NULL;
1011 
1012     ResourceMark rm;
1013     JavaThread *jt = (JavaThread *)THREAD;
1014     {
1015       // Atomic creation of array_klasses
1016       MutexLocker mc(Compile_lock, THREAD);   // for vtables
1017       MutexLocker ma(MultiArray_lock, THREAD);
1018 
1019       // Check if update has already taken place
1020       if (this_oop->array_klasses() == NULL) {
1021         Klass*    k = ObjArrayKlass::allocate_objArray_klass(this_oop->class_loader_data(), 1, this_oop, CHECK_NULL);
1022         this_oop->set_array_klasses(k);
1023       }
1024     }
1025   }
1026   // _this will always be set at this point
1027   ObjArrayKlass* oak = (ObjArrayKlass*)this_oop->array_klasses();
1028   if (or_null) {
1029     return oak->array_klass_or_null(n);
1030   }
1031   return oak->array_klass(n, CHECK_NULL);
1032 }
1033 
1034 Klass* InstanceKlass::array_klass_impl(bool or_null, TRAPS) {
1035   return array_klass_impl(or_null, 1, THREAD);
1036 }
1037 
1038 void InstanceKlass::call_class_initializer(TRAPS) {
1039   instanceKlassHandle ik (THREAD, this);
1040   call_class_initializer_impl(ik, THREAD);
1041 }
1042 
1043 static int call_class_initializer_impl_counter = 0;   // for debugging
1044 
1045 Method* InstanceKlass::class_initializer() {
1046   Method* clinit = find_method(
1047       vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
1048   if (clinit != NULL && clinit->has_valid_initializer_flags()) {
1049     return clinit;
1050   }
1051   return NULL;
1052 }
1053 
1054 void InstanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) {
1055   if (ReplayCompiles &&
1056       (ReplaySuppressInitializers == 1 ||
1057        ReplaySuppressInitializers >= 2 && this_oop->class_loader() != NULL)) {
1058     // Hide the existence of the initializer for the purpose of replaying the compile
1059     return;
1060   }
1061 
1062   methodHandle h_method(THREAD, this_oop->class_initializer());
1063   assert(!this_oop->is_initialized(), "we cannot initialize twice");
1064   if (TraceClassInitialization) {
1065     tty->print("%d Initializing ", call_class_initializer_impl_counter++);
1066     this_oop->name()->print_value();
1067     tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop());
1068   }
1069   if (h_method() != NULL) {
1070     JavaCallArguments args; // No arguments
1071     JavaValue result(T_VOID);
1072     JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
1073   }
1074 }
1075 
1076 
1077 void InstanceKlass::mask_for(methodHandle method, int bci,
1078   InterpreterOopMap* entry_for) {
1079   // Dirty read, then double-check under a lock.
1080   if (_oop_map_cache == NULL) {
1081     // Otherwise, allocate a new one.
1082     MutexLocker x(OopMapCacheAlloc_lock);
1083     // First time use. Allocate a cache in C heap
1084     if (_oop_map_cache == NULL) {
1085       _oop_map_cache = new OopMapCache();
1086     }
1087   }
1088   // _oop_map_cache is constant after init; lookup below does is own locking.
1089   _oop_map_cache->lookup(method, bci, entry_for);
1090 }
1091 
1092 
1093 bool InstanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
1094   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
1095     Symbol* f_name = fs.name();
1096     Symbol* f_sig  = fs.signature();
1097     if (f_name == name && f_sig == sig) {
1098       fd->initialize(const_cast<InstanceKlass*>(this), fs.index());
1099       return true;
1100     }
1101   }
1102   return false;
1103 }
1104 
1105 
1106 Klass* InstanceKlass::find_interface_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
1107   const int n = local_interfaces()->length();
1108   for (int i = 0; i < n; i++) {
1109     Klass* intf1 = local_interfaces()->at(i);
1110     assert(intf1->is_interface(), "just checking type");
1111     // search for field in current interface
1112     if (InstanceKlass::cast(intf1)->find_local_field(name, sig, fd)) {
1113       assert(fd->is_static(), "interface field must be static");
1114       return intf1;
1115     }
1116     // search for field in direct superinterfaces
1117     Klass* intf2 = InstanceKlass::cast(intf1)->find_interface_field(name, sig, fd);
1118     if (intf2 != NULL) return intf2;
1119   }
1120   // otherwise field lookup fails
1121   return NULL;
1122 }
1123 
1124 
1125 Klass* InstanceKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
1126   // search order according to newest JVM spec (5.4.3.2, p.167).
1127   // 1) search for field in current klass
1128   if (find_local_field(name, sig, fd)) {
1129     return const_cast<InstanceKlass*>(this);
1130   }
1131   // 2) search for field recursively in direct superinterfaces
1132   { Klass* intf = find_interface_field(name, sig, fd);
1133     if (intf != NULL) return intf;
1134   }
1135   // 3) apply field lookup recursively if superclass exists
1136   { Klass* supr = super();
1137     if (supr != NULL) return InstanceKlass::cast(supr)->find_field(name, sig, fd);
1138   }
1139   // 4) otherwise field lookup fails
1140   return NULL;
1141 }
1142 
1143 
1144 Klass* InstanceKlass::find_field(Symbol* name, Symbol* sig, bool is_static, fieldDescriptor* fd) const {
1145   // search order according to newest JVM spec (5.4.3.2, p.167).
1146   // 1) search for field in current klass
1147   if (find_local_field(name, sig, fd)) {
1148     if (fd->is_static() == is_static) return const_cast<InstanceKlass*>(this);
1149   }
1150   // 2) search for field recursively in direct superinterfaces
1151   if (is_static) {
1152     Klass* intf = find_interface_field(name, sig, fd);
1153     if (intf != NULL) return intf;
1154   }
1155   // 3) apply field lookup recursively if superclass exists
1156   { Klass* supr = super();
1157     if (supr != NULL) return InstanceKlass::cast(supr)->find_field(name, sig, is_static, fd);
1158   }
1159   // 4) otherwise field lookup fails
1160   return NULL;
1161 }
1162 
1163 
1164 bool InstanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
1165   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
1166     if (fs.offset() == offset) {
1167       fd->initialize(const_cast<InstanceKlass*>(this), fs.index());
1168       if (fd->is_static() == is_static) return true;
1169     }
1170   }
1171   return false;
1172 }
1173 
1174 
1175 bool InstanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
1176   Klass* klass = const_cast<InstanceKlass*>(this);
1177   while (klass != NULL) {
1178     if (InstanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) {
1179       return true;
1180     }
1181     klass = klass->super();
1182   }
1183   return false;
1184 }
1185 
1186 
1187 void InstanceKlass::methods_do(void f(Method* method)) {
1188   int len = methods()->length();
1189   for (int index = 0; index < len; index++) {
1190     Method* m = methods()->at(index);
1191     assert(m->is_method(), "must be method");
1192     f(m);
1193   }
1194 }
1195 
1196 
1197 void InstanceKlass::do_local_static_fields(FieldClosure* cl) {
1198   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
1199     if (fs.access_flags().is_static()) {
1200       fieldDescriptor fd;
1201       fd.initialize(this, fs.index());
1202       cl->do_field(&fd);
1203     }
1204   }
1205 }
1206 
1207 
1208 void InstanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
1209   instanceKlassHandle h_this(THREAD, this);
1210   do_local_static_fields_impl(h_this, f, CHECK);
1211 }
1212 
1213 
1214 void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
1215   for (JavaFieldStream fs(this_oop()); !fs.done(); fs.next()) {
1216     if (fs.access_flags().is_static()) {
1217       fieldDescriptor fd;
1218       fd.initialize(this_oop(), fs.index());
1219       f(&fd, CHECK);
1220     }
1221   }
1222 }
1223 
1224 
1225 static int compare_fields_by_offset(int* a, int* b) {
1226   return a[0] - b[0];
1227 }
1228 
1229 void InstanceKlass::do_nonstatic_fields(FieldClosure* cl) {
1230   InstanceKlass* super = superklass();
1231   if (super != NULL) {
1232     super->do_nonstatic_fields(cl);
1233   }
1234   fieldDescriptor fd;
1235   int length = java_fields_count();
1236   // In DebugInfo nonstatic fields are sorted by offset.
1237   int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1), mtClass);
1238   int j = 0;
1239   for (int i = 0; i < length; i += 1) {
1240     fd.initialize(this, i);
1241     if (!fd.is_static()) {
1242       fields_sorted[j + 0] = fd.offset();
1243       fields_sorted[j + 1] = i;
1244       j += 2;
1245     }
1246   }
1247   if (j > 0) {
1248     length = j;
1249     // _sort_Fn is defined in growableArray.hpp.
1250     qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
1251     for (int i = 0; i < length; i += 2) {
1252       fd.initialize(this, fields_sorted[i + 1]);
1253       assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
1254       cl->do_field(&fd);
1255     }
1256   }
1257   FREE_C_HEAP_ARRAY(int, fields_sorted, mtClass);
1258 }
1259 
1260 
1261 void InstanceKlass::array_klasses_do(void f(Klass* k, TRAPS), TRAPS) {
1262   if (array_klasses() != NULL)
1263     ArrayKlass::cast(array_klasses())->array_klasses_do(f, THREAD);
1264 }
1265 
1266 void InstanceKlass::array_klasses_do(void f(Klass* k)) {
1267   if (array_klasses() != NULL)
1268     ArrayKlass::cast(array_klasses())->array_klasses_do(f);
1269 }
1270 
1271 
1272 void InstanceKlass::with_array_klasses_do(void f(Klass* k)) {
1273   f(this);
1274   array_klasses_do(f);
1275 }
1276 
1277 #ifdef ASSERT
1278 static int linear_search(Array<Method*>* methods, Symbol* name, Symbol* signature) {
1279   int len = methods->length();
1280   for (int index = 0; index < len; index++) {
1281     Method* m = methods->at(index);
1282     assert(m->is_method(), "must be method");
1283     if (m->signature() == signature && m->name() == name) {
1284        return index;
1285     }
1286   }
1287   return -1;
1288 }
1289 #endif
1290 
1291 static int binary_search(Array<Method*>* methods, Symbol* name) {
1292   int len = methods->length();
1293   // methods are sorted, so do binary search
1294   int l = 0;
1295   int h = len - 1;
1296   while (l <= h) {
1297     int mid = (l + h) >> 1;
1298     Method* m = methods->at(mid);
1299     assert(m->is_method(), "must be method");
1300     int res = m->name()->fast_compare(name);
1301     if (res == 0) {
1302       return mid;
1303     } else if (res < 0) {
1304       l = mid + 1;
1305     } else {
1306       h = mid - 1;
1307     }
1308   }
1309   return -1;
1310 }
1311 
1312 Method* InstanceKlass::find_method(Symbol* name, Symbol* signature) const {
1313   return InstanceKlass::find_method(methods(), name, signature);
1314 }
1315 
1316 Method* InstanceKlass::find_method(
1317     Array<Method*>* methods, Symbol* name, Symbol* signature) {
1318   int hit = binary_search(methods, name);
1319   if (hit != -1) {
1320     Method* m = methods->at(hit);
1321     // Do linear search to find matching signature.  First, quick check
1322     // for common case
1323     if (m->signature() == signature) return m;
1324     // search downwards through overloaded methods
1325     int i;
1326     for (i = hit - 1; i >= 0; --i) {
1327         Method* m = methods->at(i);
1328         assert(m->is_method(), "must be method");
1329         if (m->name() != name) break;
1330         if (m->signature() == signature) return m;
1331     }
1332     // search upwards
1333     for (i = hit + 1; i < methods->length(); ++i) {
1334         Method* m = methods->at(i);
1335         assert(m->is_method(), "must be method");
1336         if (m->name() != name) break;
1337         if (m->signature() == signature) return m;
1338     }
1339     // not found
1340 #ifdef ASSERT
1341     int index = linear_search(methods, name, signature);
1342     assert(index == -1, err_msg("binary search should have found entry %d", index));
1343 #endif
1344   }
1345   return NULL;
1346 }
1347 
1348 int InstanceKlass::find_method_by_name(Symbol* name, int* end) {
1349   return find_method_by_name(methods(), name, end);
1350 }
1351 
1352 int InstanceKlass::find_method_by_name(
1353     Array<Method*>* methods, Symbol* name, int* end_ptr) {
1354   assert(end_ptr != NULL, "just checking");
1355   int start = binary_search(methods, name);
1356   int end = start + 1;
1357   if (start != -1) {
1358     while (start - 1 >= 0 && (methods->at(start - 1))->name() == name) --start;
1359     while (end < methods->length() && (methods->at(end))->name() == name) ++end;
1360     *end_ptr = end;
1361     return start;
1362   }
1363   return -1;
1364 }
1365 
1366 Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
1367   Klass* klass = const_cast<InstanceKlass*>(this);
1368   while (klass != NULL) {
1369     Method* method = InstanceKlass::cast(klass)->find_method(name, signature);
1370     if (method != NULL) return method;
1371     klass = InstanceKlass::cast(klass)->super();
1372   }
1373   return NULL;
1374 }
1375 
1376 // lookup a method in all the interfaces that this class implements
1377 Method* InstanceKlass::lookup_method_in_all_interfaces(Symbol* name,
1378                                                          Symbol* signature) const {
1379   Array<Klass*>* all_ifs = transitive_interfaces();
1380   int num_ifs = all_ifs->length();
1381   InstanceKlass *ik = NULL;
1382   for (int i = 0; i < num_ifs; i++) {
1383     ik = InstanceKlass::cast(all_ifs->at(i));
1384     Method* m = ik->lookup_method(name, signature);
1385     if (m != NULL) {
1386       return m;
1387     }
1388   }
1389   return NULL;
1390 }
1391 
1392 /* jni_id_for_impl for jfieldIds only */
1393 JNIid* InstanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) {
1394   MutexLocker ml(JfieldIdCreation_lock);
1395   // Retry lookup after we got the lock
1396   JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset);
1397   if (probe == NULL) {
1398     // Slow case, allocate new static field identifier
1399     probe = new JNIid(this_oop(), offset, this_oop->jni_ids());
1400     this_oop->set_jni_ids(probe);
1401   }
1402   return probe;
1403 }
1404 
1405 
1406 /* jni_id_for for jfieldIds only */
1407 JNIid* InstanceKlass::jni_id_for(int offset) {
1408   JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset);
1409   if (probe == NULL) {
1410     probe = jni_id_for_impl(this, offset);
1411   }
1412   return probe;
1413 }
1414 
1415 u2 InstanceKlass::enclosing_method_data(int offset) {
1416   Array<jushort>* inner_class_list = inner_classes();
1417   if (inner_class_list == NULL) {
1418     return 0;
1419   }
1420   int length = inner_class_list->length();
1421   if (length % inner_class_next_offset == 0) {
1422     return 0;
1423   } else {
1424     int index = length - enclosing_method_attribute_size;
1425     assert(offset < enclosing_method_attribute_size, "invalid offset");
1426     return inner_class_list->at(index + offset);
1427   }
1428 }
1429 
1430 void InstanceKlass::set_enclosing_method_indices(u2 class_index,
1431                                                  u2 method_index) {
1432   Array<jushort>* inner_class_list = inner_classes();
1433   assert (inner_class_list != NULL, "_inner_classes list is not set up");
1434   int length = inner_class_list->length();
1435   if (length % inner_class_next_offset == enclosing_method_attribute_size) {
1436     int index = length - enclosing_method_attribute_size;
1437     inner_class_list->at_put(
1438       index + enclosing_method_class_index_offset, class_index);
1439     inner_class_list->at_put(
1440       index + enclosing_method_method_index_offset, method_index);
1441   }
1442 }
1443 
1444 // Lookup or create a jmethodID.
1445 // This code is called by the VMThread and JavaThreads so the
1446 // locking has to be done very carefully to avoid deadlocks
1447 // and/or other cache consistency problems.
1448 //
1449 jmethodID InstanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) {
1450   size_t idnum = (size_t)method_h->method_idnum();
1451   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1452   size_t length = 0;
1453   jmethodID id = NULL;
1454 
1455   // We use a double-check locking idiom here because this cache is
1456   // performance sensitive. In the normal system, this cache only
1457   // transitions from NULL to non-NULL which is safe because we use
1458   // release_set_methods_jmethod_ids() to advertise the new cache.
1459   // A partially constructed cache should never be seen by a racing
1460   // thread. We also use release_store_ptr() to save a new jmethodID
1461   // in the cache so a partially constructed jmethodID should never be
1462   // seen either. Cache reads of existing jmethodIDs proceed without a
1463   // lock, but cache writes of a new jmethodID requires uniqueness and
1464   // creation of the cache itself requires no leaks so a lock is
1465   // generally acquired in those two cases.
1466   //
1467   // If the RedefineClasses() API has been used, then this cache can
1468   // grow and we'll have transitions from non-NULL to bigger non-NULL.
1469   // Cache creation requires no leaks and we require safety between all
1470   // cache accesses and freeing of the old cache so a lock is generally
1471   // acquired when the RedefineClasses() API has been used.
1472 
1473   if (jmeths != NULL) {
1474     // the cache already exists
1475     if (!ik_h->idnum_can_increment()) {
1476       // the cache can't grow so we can just get the current values
1477       get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1478     } else {
1479       // cache can grow so we have to be more careful
1480       if (Threads::number_of_threads() == 0 ||
1481           SafepointSynchronize::is_at_safepoint()) {
1482         // we're single threaded or at a safepoint - no locking needed
1483         get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1484       } else {
1485         MutexLocker ml(JmethodIdCreation_lock);
1486         get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1487       }
1488     }
1489   }
1490   // implied else:
1491   // we need to allocate a cache so default length and id values are good
1492 
1493   if (jmeths == NULL ||   // no cache yet
1494       length <= idnum ||  // cache is too short
1495       id == NULL) {       // cache doesn't contain entry
1496 
1497     // This function can be called by the VMThread so we have to do all
1498     // things that might block on a safepoint before grabbing the lock.
1499     // Otherwise, we can deadlock with the VMThread or have a cache
1500     // consistency issue. These vars keep track of what we might have
1501     // to free after the lock is dropped.
1502     jmethodID  to_dealloc_id     = NULL;
1503     jmethodID* to_dealloc_jmeths = NULL;
1504 
1505     // may not allocate new_jmeths or use it if we allocate it
1506     jmethodID* new_jmeths = NULL;
1507     if (length <= idnum) {
1508       // allocate a new cache that might be used
1509       size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
1510       new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1, mtClass);
1511       memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
1512       // cache size is stored in element[0], other elements offset by one
1513       new_jmeths[0] = (jmethodID)size;
1514     }
1515 
1516     // allocate a new jmethodID that might be used
1517     jmethodID new_id = NULL;
1518     if (method_h->is_old() && !method_h->is_obsolete()) {
1519       // The method passed in is old (but not obsolete), we need to use the current version
1520       Method* current_method = ik_h->method_with_idnum((int)idnum);
1521       assert(current_method != NULL, "old and but not obsolete, so should exist");
1522       new_id = Method::make_jmethod_id(ik_h->class_loader_data(), current_method);
1523     } else {
1524       // It is the current version of the method or an obsolete method,
1525       // use the version passed in
1526       new_id = Method::make_jmethod_id(ik_h->class_loader_data(), method_h());
1527     }
1528 
1529     if (Threads::number_of_threads() == 0 ||
1530         SafepointSynchronize::is_at_safepoint()) {
1531       // we're single threaded or at a safepoint - no locking needed
1532       id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1533                                           &to_dealloc_id, &to_dealloc_jmeths);
1534     } else {
1535       MutexLocker ml(JmethodIdCreation_lock);
1536       id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1537                                           &to_dealloc_id, &to_dealloc_jmeths);
1538     }
1539 
1540     // The lock has been dropped so we can free resources.
1541     // Free up either the old cache or the new cache if we allocated one.
1542     if (to_dealloc_jmeths != NULL) {
1543       FreeHeap(to_dealloc_jmeths);
1544     }
1545     // free up the new ID since it wasn't needed
1546     if (to_dealloc_id != NULL) {
1547       Method::destroy_jmethod_id(ik_h->class_loader_data(), to_dealloc_id);
1548     }
1549   }
1550   return id;
1551 }
1552 
1553 
1554 // Common code to fetch the jmethodID from the cache or update the
1555 // cache with the new jmethodID. This function should never do anything
1556 // that causes the caller to go to a safepoint or we can deadlock with
1557 // the VMThread or have cache consistency issues.
1558 //
1559 jmethodID InstanceKlass::get_jmethod_id_fetch_or_update(
1560             instanceKlassHandle ik_h, size_t idnum, jmethodID new_id,
1561             jmethodID* new_jmeths, jmethodID* to_dealloc_id_p,
1562             jmethodID** to_dealloc_jmeths_p) {
1563   assert(new_id != NULL, "sanity check");
1564   assert(to_dealloc_id_p != NULL, "sanity check");
1565   assert(to_dealloc_jmeths_p != NULL, "sanity check");
1566   assert(Threads::number_of_threads() == 0 ||
1567          SafepointSynchronize::is_at_safepoint() ||
1568          JmethodIdCreation_lock->owned_by_self(), "sanity check");
1569 
1570   // reacquire the cache - we are locked, single threaded or at a safepoint
1571   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1572   jmethodID  id     = NULL;
1573   size_t     length = 0;
1574 
1575   if (jmeths == NULL ||                         // no cache yet
1576       (length = (size_t)jmeths[0]) <= idnum) {  // cache is too short
1577     if (jmeths != NULL) {
1578       // copy any existing entries from the old cache
1579       for (size_t index = 0; index < length; index++) {
1580         new_jmeths[index+1] = jmeths[index+1];
1581       }
1582       *to_dealloc_jmeths_p = jmeths;  // save old cache for later delete
1583     }
1584     ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths);
1585   } else {
1586     // fetch jmethodID (if any) from the existing cache
1587     id = jmeths[idnum+1];
1588     *to_dealloc_jmeths_p = new_jmeths;  // save new cache for later delete
1589   }
1590   if (id == NULL) {
1591     // No matching jmethodID in the existing cache or we have a new
1592     // cache or we just grew the cache. This cache write is done here
1593     // by the first thread to win the foot race because a jmethodID
1594     // needs to be unique once it is generally available.
1595     id = new_id;
1596 
1597     // The jmethodID cache can be read while unlocked so we have to
1598     // make sure the new jmethodID is complete before installing it
1599     // in the cache.
1600     OrderAccess::release_store_ptr(&jmeths[idnum+1], id);
1601   } else {
1602     *to_dealloc_id_p = new_id; // save new id for later delete
1603   }
1604   return id;
1605 }
1606 
1607 
1608 // Common code to get the jmethodID cache length and the jmethodID
1609 // value at index idnum if there is one.
1610 //
1611 void InstanceKlass::get_jmethod_id_length_value(jmethodID* cache,
1612        size_t idnum, size_t *length_p, jmethodID* id_p) {
1613   assert(cache != NULL, "sanity check");
1614   assert(length_p != NULL, "sanity check");
1615   assert(id_p != NULL, "sanity check");
1616 
1617   // cache size is stored in element[0], other elements offset by one
1618   *length_p = (size_t)cache[0];
1619   if (*length_p <= idnum) {  // cache is too short
1620     *id_p = NULL;
1621   } else {
1622     *id_p = cache[idnum+1];  // fetch jmethodID (if any)
1623   }
1624 }
1625 
1626 
1627 // Lookup a jmethodID, NULL if not found.  Do no blocking, no allocations, no handles
1628 jmethodID InstanceKlass::jmethod_id_or_null(Method* method) {
1629   size_t idnum = (size_t)method->method_idnum();
1630   jmethodID* jmeths = methods_jmethod_ids_acquire();
1631   size_t length;                                // length assigned as debugging crumb
1632   jmethodID id = NULL;
1633   if (jmeths != NULL &&                         // If there is a cache
1634       (length = (size_t)jmeths[0]) > idnum) {   // and if it is long enough,
1635     id = jmeths[idnum+1];                       // Look up the id (may be NULL)
1636   }
1637   return id;
1638 }
1639 
1640 
1641 // Cache an itable index
1642 void InstanceKlass::set_cached_itable_index(size_t idnum, int index) {
1643   int* indices = methods_cached_itable_indices_acquire();
1644   int* to_dealloc_indices = NULL;
1645 
1646   // We use a double-check locking idiom here because this cache is
1647   // performance sensitive. In the normal system, this cache only
1648   // transitions from NULL to non-NULL which is safe because we use
1649   // release_set_methods_cached_itable_indices() to advertise the
1650   // new cache. A partially constructed cache should never be seen
1651   // by a racing thread. Cache reads and writes proceed without a
1652   // lock, but creation of the cache itself requires no leaks so a
1653   // lock is generally acquired in that case.
1654   //
1655   // If the RedefineClasses() API has been used, then this cache can
1656   // grow and we'll have transitions from non-NULL to bigger non-NULL.
1657   // Cache creation requires no leaks and we require safety between all
1658   // cache accesses and freeing of the old cache so a lock is generally
1659   // acquired when the RedefineClasses() API has been used.
1660 
1661   if (indices == NULL || idnum_can_increment()) {
1662     // we need a cache or the cache can grow
1663     MutexLocker ml(JNICachedItableIndex_lock);
1664     // reacquire the cache to see if another thread already did the work
1665     indices = methods_cached_itable_indices_acquire();
1666     size_t length = 0;
1667     // cache size is stored in element[0], other elements offset by one
1668     if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
1669       size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
1670       int* new_indices = NEW_C_HEAP_ARRAY(int, size+1, mtClass);
1671       new_indices[0] = (int)size;
1672       // copy any existing entries
1673       size_t i;
1674       for (i = 0; i < length; i++) {
1675         new_indices[i+1] = indices[i+1];
1676       }
1677       // Set all the rest to -1
1678       for (i = length; i < size; i++) {
1679         new_indices[i+1] = -1;
1680       }
1681       if (indices != NULL) {
1682         // We have an old cache to delete so save it for after we
1683         // drop the lock.
1684         to_dealloc_indices = indices;
1685       }
1686       release_set_methods_cached_itable_indices(indices = new_indices);
1687     }
1688 
1689     if (idnum_can_increment()) {
1690       // this cache can grow so we have to write to it safely
1691       indices[idnum+1] = index;
1692     }
1693   } else {
1694     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
1695   }
1696 
1697   if (!idnum_can_increment()) {
1698     // The cache cannot grow and this JNI itable index value does not
1699     // have to be unique like a jmethodID. If there is a race to set it,
1700     // it doesn't matter.
1701     indices[idnum+1] = index;
1702   }
1703 
1704   if (to_dealloc_indices != NULL) {
1705     // we allocated a new cache so free the old one
1706     FreeHeap(to_dealloc_indices);
1707   }
1708 }
1709 
1710 
1711 // Retrieve a cached itable index
1712 int InstanceKlass::cached_itable_index(size_t idnum) {
1713   int* indices = methods_cached_itable_indices_acquire();
1714   if (indices != NULL && ((size_t)indices[0]) > idnum) {
1715      // indices exist and are long enough, retrieve possible cached
1716     return indices[idnum+1];
1717   }
1718   return -1;
1719 }
1720 
1721 
1722 //
1723 // Walk the list of dependent nmethods searching for nmethods which
1724 // are dependent on the changes that were passed in and mark them for
1725 // deoptimization.  Returns the number of nmethods found.
1726 //
1727 int InstanceKlass::mark_dependent_nmethods(DepChange& changes) {
1728   assert_locked_or_safepoint(CodeCache_lock);
1729   int found = 0;
1730   nmethodBucket* b = _dependencies;
1731   while (b != NULL) {
1732     nmethod* nm = b->get_nmethod();
1733     // since dependencies aren't removed until an nmethod becomes a zombie,
1734     // the dependency list may contain nmethods which aren't alive.
1735     if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
1736       if (TraceDependencies) {
1737         ResourceMark rm;
1738         tty->print_cr("Marked for deoptimization");
1739         tty->print_cr("  context = %s", this->external_name());
1740         changes.print();
1741         nm->print();
1742         nm->print_dependencies();
1743       }
1744       nm->mark_for_deoptimization();
1745       found++;
1746     }
1747     b = b->next();
1748   }
1749   return found;
1750 }
1751 
1752 
1753 //
1754 // Add an nmethodBucket to the list of dependencies for this nmethod.
1755 // It's possible that an nmethod has multiple dependencies on this klass
1756 // so a count is kept for each bucket to guarantee that creation and
1757 // deletion of dependencies is consistent.
1758 //
1759 void InstanceKlass::add_dependent_nmethod(nmethod* nm) {
1760   assert_locked_or_safepoint(CodeCache_lock);
1761   nmethodBucket* b = _dependencies;
1762   nmethodBucket* last = NULL;
1763   while (b != NULL) {
1764     if (nm == b->get_nmethod()) {
1765       b->increment();
1766       return;
1767     }
1768     b = b->next();
1769   }
1770   _dependencies = new nmethodBucket(nm, _dependencies);
1771 }
1772 
1773 
1774 //
1775 // Decrement count of the nmethod in the dependency list and remove
1776 // the bucket competely when the count goes to 0.  This method must
1777 // find a corresponding bucket otherwise there's a bug in the
1778 // recording of dependecies.
1779 //
1780 void InstanceKlass::remove_dependent_nmethod(nmethod* nm) {
1781   assert_locked_or_safepoint(CodeCache_lock);
1782   nmethodBucket* b = _dependencies;
1783   nmethodBucket* last = NULL;
1784   while (b != NULL) {
1785     if (nm == b->get_nmethod()) {
1786       if (b->decrement() == 0) {
1787         if (last == NULL) {
1788           _dependencies = b->next();
1789         } else {
1790           last->set_next(b->next());
1791         }
1792         delete b;
1793       }
1794       return;
1795     }
1796     last = b;
1797     b = b->next();
1798   }
1799 #ifdef ASSERT
1800   tty->print_cr("### %s can't find dependent nmethod:", this->external_name());
1801   nm->print();
1802 #endif // ASSERT
1803   ShouldNotReachHere();
1804 }
1805 
1806 
1807 #ifndef PRODUCT
1808 void InstanceKlass::print_dependent_nmethods(bool verbose) {
1809   nmethodBucket* b = _dependencies;
1810   int idx = 0;
1811   while (b != NULL) {
1812     nmethod* nm = b->get_nmethod();
1813     tty->print("[%d] count=%d { ", idx++, b->count());
1814     if (!verbose) {
1815       nm->print_on(tty, "nmethod");
1816       tty->print_cr(" } ");
1817     } else {
1818       nm->print();
1819       nm->print_dependencies();
1820       tty->print_cr("--- } ");
1821     }
1822     b = b->next();
1823   }
1824 }
1825 
1826 
1827 bool InstanceKlass::is_dependent_nmethod(nmethod* nm) {
1828   nmethodBucket* b = _dependencies;
1829   while (b != NULL) {
1830     if (nm == b->get_nmethod()) {
1831       return true;
1832     }
1833     b = b->next();
1834   }
1835   return false;
1836 }
1837 #endif //PRODUCT
1838 
1839 
1840 // Garbage collection
1841 
1842 void InstanceKlass::oops_do(OopClosure* cl) {
1843   Klass::oops_do(cl);
1844 
1845   cl->do_oop(adr_protection_domain());
1846   cl->do_oop(adr_signers());
1847   cl->do_oop(adr_init_lock());
1848 
1849   // Don't walk the arrays since they are walked from the ClassLoaderData objects.
1850 }
1851 
1852 #ifdef ASSERT
1853 template <class T> void assert_is_in(T *p) {
1854   T heap_oop = oopDesc::load_heap_oop(p);
1855   if (!oopDesc::is_null(heap_oop)) {
1856     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1857     assert(Universe::heap()->is_in(o), "should be in heap");
1858   }
1859 }
1860 template <class T> void assert_is_in_closed_subset(T *p) {
1861   T heap_oop = oopDesc::load_heap_oop(p);
1862   if (!oopDesc::is_null(heap_oop)) {
1863     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1864     assert(Universe::heap()->is_in_closed_subset(o),
1865            err_msg("should be in closed *p " INTPTR_FORMAT " " INTPTR_FORMAT, (address)p, (address)o));
1866   }
1867 }
1868 template <class T> void assert_is_in_reserved(T *p) {
1869   T heap_oop = oopDesc::load_heap_oop(p);
1870   if (!oopDesc::is_null(heap_oop)) {
1871     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1872     assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
1873   }
1874 }
1875 template <class T> void assert_nothing(T *p) {}
1876 
1877 #else
1878 template <class T> void assert_is_in(T *p) {}
1879 template <class T> void assert_is_in_closed_subset(T *p) {}
1880 template <class T> void assert_is_in_reserved(T *p) {}
1881 template <class T> void assert_nothing(T *p) {}
1882 #endif // ASSERT
1883 
1884 //
1885 // Macros that iterate over areas of oops which are specialized on type of
1886 // oop pointer either narrow or wide, depending on UseCompressedOops
1887 //
1888 // Parameters are:
1889 //   T         - type of oop to point to (either oop or narrowOop)
1890 //   start_p   - starting pointer for region to iterate over
1891 //   count     - number of oops or narrowOops to iterate over
1892 //   do_oop    - action to perform on each oop (it's arbitrary C code which
1893 //               makes it more efficient to put in a macro rather than making
1894 //               it a template function)
1895 //   assert_fn - assert function which is template function because performance
1896 //               doesn't matter when enabled.
1897 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
1898   T, start_p, count, do_oop,                \
1899   assert_fn)                                \
1900 {                                           \
1901   T* p         = (T*)(start_p);             \
1902   T* const end = p + (count);               \
1903   while (p < end) {                         \
1904     (assert_fn)(p);                         \
1905     do_oop;                                 \
1906     ++p;                                    \
1907   }                                         \
1908 }
1909 
1910 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
1911   T, start_p, count, do_oop,                \
1912   assert_fn)                                \
1913 {                                           \
1914   T* const start = (T*)(start_p);           \
1915   T*       p     = start + (count);         \
1916   while (start < p) {                       \
1917     --p;                                    \
1918     (assert_fn)(p);                         \
1919     do_oop;                                 \
1920   }                                         \
1921 }
1922 
1923 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
1924   T, start_p, count, low, high,             \
1925   do_oop, assert_fn)                        \
1926 {                                           \
1927   T* const l = (T*)(low);                   \
1928   T* const h = (T*)(high);                  \
1929   assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
1930          mask_bits((intptr_t)h, sizeof(T)-1) == 0,   \
1931          "bounded region must be properly aligned"); \
1932   T* p       = (T*)(start_p);               \
1933   T* end     = p + (count);                 \
1934   if (p < l) p = l;                         \
1935   if (end > h) end = h;                     \
1936   while (p < end) {                         \
1937     (assert_fn)(p);                         \
1938     do_oop;                                 \
1939     ++p;                                    \
1940   }                                         \
1941 }
1942 
1943 
1944 // The following macros call specialized macros, passing either oop or
1945 // narrowOop as the specialization type.  These test the UseCompressedOops
1946 // flag.
1947 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn)            \
1948 {                                                                        \
1949   /* Compute oopmap block range. The common case                         \
1950      is nonstatic_oop_map_size == 1. */                                  \
1951   OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
1952   OopMapBlock* const end_map = map + nonstatic_oop_map_count();          \
1953   if (UseCompressedOops) {                                               \
1954     while (map < end_map) {                                              \
1955       InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop,                   \
1956         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
1957         do_oop, assert_fn)                                               \
1958       ++map;                                                             \
1959     }                                                                    \
1960   } else {                                                               \
1961     while (map < end_map) {                                              \
1962       InstanceKlass_SPECIALIZED_OOP_ITERATE(oop,                         \
1963         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
1964         do_oop, assert_fn)                                               \
1965       ++map;                                                             \
1966     }                                                                    \
1967   }                                                                      \
1968 }
1969 
1970 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn)    \
1971 {                                                                        \
1972   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();          \
1973   OopMapBlock* map             = start_map + nonstatic_oop_map_count();  \
1974   if (UseCompressedOops) {                                               \
1975     while (start_map < map) {                                            \
1976       --map;                                                             \
1977       InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop,           \
1978         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
1979         do_oop, assert_fn)                                               \
1980     }                                                                    \
1981   } else {                                                               \
1982     while (start_map < map) {                                            \
1983       --map;                                                             \
1984       InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop,                 \
1985         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
1986         do_oop, assert_fn)                                               \
1987     }                                                                    \
1988   }                                                                      \
1989 }
1990 
1991 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop,    \
1992                                               assert_fn)                 \
1993 {                                                                        \
1994   /* Compute oopmap block range. The common case is                      \
1995      nonstatic_oop_map_size == 1, so we accept the                       \
1996      usually non-existent extra overhead of examining                    \
1997      all the maps. */                                                    \
1998   OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
1999   OopMapBlock* const end_map = map + nonstatic_oop_map_count();          \
2000   if (UseCompressedOops) {                                               \
2001     while (map < end_map) {                                              \
2002       InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop,           \
2003         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
2004         low, high,                                                       \
2005         do_oop, assert_fn)                                               \
2006       ++map;                                                             \
2007     }                                                                    \
2008   } else {                                                               \
2009     while (map < end_map) {                                              \
2010       InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop,                 \
2011         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
2012         low, high,                                                       \
2013         do_oop, assert_fn)                                               \
2014       ++map;                                                             \
2015     }                                                                    \
2016   }                                                                      \
2017 }
2018 
2019 void InstanceKlass::oop_follow_contents(oop obj) {
2020   assert(obj != NULL, "can't follow the content of NULL object");
2021   MarkSweep::follow_klass(obj->klass());
2022   InstanceKlass_OOP_MAP_ITERATE( \
2023     obj, \
2024     MarkSweep::mark_and_push(p), \
2025     assert_is_in_closed_subset)
2026 }
2027 
2028 #ifndef SERIALGC
2029 void InstanceKlass::oop_follow_contents(ParCompactionManager* cm,
2030                                         oop obj) {
2031   assert(obj != NULL, "can't follow the content of NULL object");
2032   PSParallelCompact::follow_klass(cm, obj->klass());
2033   // Only mark the header and let the scan of the meta-data mark
2034   // everything else.
2035   InstanceKlass_OOP_MAP_ITERATE( \
2036     obj, \
2037     PSParallelCompact::mark_and_push(cm, p), \
2038     assert_is_in)
2039 }
2040 #endif // SERIALGC
2041 
2042 // closure's do_metadata() method dictates whether the given closure should be
2043 // applied to the klass ptr in the object header.
2044 
2045 #define if_do_metadata_checked(closure, nv_suffix)                    \
2046   /* Make sure the non-virtual and the virtual versions match. */     \
2047   assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
2048       "Inconsistency in do_metadata");                                \
2049   if (closure->do_metadata##nv_suffix())
2050 
2051 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)        \
2052                                                                              \
2053 int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
2054   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
2055   /* header */                                                          \
2056   if_do_metadata_checked(closure, nv_suffix) {                          \
2057     closure->do_klass##nv_suffix(obj->klass());                         \
2058   }                                                                     \
2059   InstanceKlass_OOP_MAP_ITERATE(                                        \
2060     obj,                                                                \
2061     SpecializationStats::                                               \
2062       record_do_oop_call##nv_suffix(SpecializationStats::ik);           \
2063     (closure)->do_oop##nv_suffix(p),                                    \
2064     assert_is_in_closed_subset)                                         \
2065   return size_helper();                                                 \
2066 }
2067 
2068 #ifndef SERIALGC
2069 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
2070                                                                                 \
2071 int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj,                \
2072                                               OopClosureType* closure) {        \
2073   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
2074   /* header */                                                                  \
2075   if_do_metadata_checked(closure, nv_suffix) {                                  \
2076     closure->do_klass##nv_suffix(obj->klass());                                 \
2077   }                                                                             \
2078   /* instance variables */                                                      \
2079   InstanceKlass_OOP_MAP_REVERSE_ITERATE(                                        \
2080     obj,                                                                        \
2081     SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\
2082     (closure)->do_oop##nv_suffix(p),                                            \
2083     assert_is_in_closed_subset)                                                 \
2084    return size_helper();                                                        \
2085 }
2086 #endif // !SERIALGC
2087 
2088 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
2089                                                                         \
2090 int InstanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj,              \
2091                                                   OopClosureType* closure, \
2092                                                   MemRegion mr) {          \
2093   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
2094   if_do_metadata_checked(closure, nv_suffix) {                           \
2095     if (mr.contains(obj)) {                                              \
2096       closure->do_klass##nv_suffix(obj->klass());                        \
2097     }                                                                    \
2098   }                                                                      \
2099   InstanceKlass_BOUNDED_OOP_MAP_ITERATE(                                 \
2100     obj, mr.start(), mr.end(),                                           \
2101     (closure)->do_oop##nv_suffix(p),                                     \
2102     assert_is_in_closed_subset)                                          \
2103   return size_helper();                                                  \
2104 }
2105 
2106 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
2107 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN)
2108 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
2109 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
2110 #ifndef SERIALGC
2111 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
2112 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
2113 #endif // !SERIALGC
2114 
2115 int InstanceKlass::oop_adjust_pointers(oop obj) {
2116   int size = size_helper();
2117   InstanceKlass_OOP_MAP_ITERATE( \
2118     obj, \
2119     MarkSweep::adjust_pointer(p), \
2120     assert_is_in)
2121   MarkSweep::adjust_klass(obj->klass());
2122   return size;
2123 }
2124 
2125 #ifndef SERIALGC
2126 void InstanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
2127   InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
2128     obj, \
2129     if (PSScavenge::should_scavenge(p)) { \
2130       pm->claim_or_forward_depth(p); \
2131     }, \
2132     assert_nothing )
2133 }
2134 
2135 int InstanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
2136   int size = size_helper();
2137   InstanceKlass_OOP_MAP_ITERATE( \
2138     obj, \
2139     PSParallelCompact::adjust_pointer(p), \
2140     assert_is_in)
2141   obj->update_header(cm);
2142   return size;
2143 }
2144 
2145 #endif // SERIALGC
2146 
2147 void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
2148   assert(is_loader_alive(is_alive), "this klass should be live");
2149   if (is_interface()) {
2150     if (ClassUnloading) {
2151       Klass* impl = implementor();
2152       if (impl != NULL) {
2153         if (!impl->is_loader_alive(is_alive)) {
2154           // remove this guy
2155           *adr_implementor() = NULL;
2156         }
2157       }
2158     }
2159   }
2160 }
2161 
2162 void InstanceKlass::clean_method_data(BoolObjectClosure* is_alive) {
2163 #ifdef COMPILER2
2164   // Currently only used by C2.
2165   for (int m = 0; m < methods()->length(); m++) {
2166     MethodData* mdo = methods()->at(m)->method_data();
2167     if (mdo != NULL) {
2168       for (ProfileData* data = mdo->first_data();
2169            mdo->is_valid(data);
2170            data = mdo->next_data(data)) {
2171         data->clean_weak_klass_links(is_alive);
2172       }
2173     }
2174   }
2175 #else
2176 #ifdef ASSERT
2177   // Verify that we haven't started to use MDOs for C1.
2178   for (int m = 0; m < methods()->length(); m++) {
2179     MethodData* mdo = methods()->at(m)->method_data();
2180     assert(mdo == NULL, "Didn't expect C1 to use MDOs");
2181   }
2182 #endif // ASSERT
2183 #endif // !COMPILER2
2184 }
2185 
2186 
2187 static void remove_unshareable_in_class(Klass* k) {
2188   // remove klass's unshareable info
2189   k->remove_unshareable_info();
2190 }
2191 
2192 void InstanceKlass::remove_unshareable_info() {
2193   Klass::remove_unshareable_info();
2194   // Unlink the class
2195   if (is_linked()) {
2196     unlink_class();
2197   }
2198   init_implementor();
2199 
2200   constants()->remove_unshareable_info();
2201 
2202   for (int i = 0; i < methods()->length(); i++) {
2203     Method* m = methods()->at(i);
2204     m->remove_unshareable_info();
2205   }
2206 
2207   // Need to reinstate when reading back the class.
2208   set_init_lock(NULL);
2209 
2210   // do array classes also.
2211   array_klasses_do(remove_unshareable_in_class);
2212 }
2213 
2214 void restore_unshareable_in_class(Klass* k, TRAPS) {
2215   k->restore_unshareable_info(CHECK);
2216 }
2217 
2218 void InstanceKlass::restore_unshareable_info(TRAPS) {
2219   Klass::restore_unshareable_info(CHECK);
2220   instanceKlassHandle ik(THREAD, this);
2221 
2222   Array<Method*>* methods = ik->methods();
2223   int num_methods = methods->length();
2224   for (int index2 = 0; index2 < num_methods; ++index2) {
2225     methodHandle m(THREAD, methods->at(index2));
2226     m()->link_method(m, CHECK);
2227     // restore method's vtable by calling a virtual function
2228     m->restore_vtable();
2229   }
2230   if (JvmtiExport::has_redefined_a_class()) {
2231     // Reinitialize vtable because RedefineClasses may have changed some
2232     // entries in this vtable for super classes so the CDS vtable might
2233     // point to old or obsolete entries.  RedefineClasses doesn't fix up
2234     // vtables in the shared system dictionary, only the main one.
2235     // It also redefines the itable too so fix that too.
2236     ResourceMark rm(THREAD);
2237     ik->vtable()->initialize_vtable(false, CHECK);
2238     ik->itable()->initialize_itable(false, CHECK);
2239   }
2240 
2241   // Allocate a simple java object for a lock.
2242   // This needs to be a java object because during class initialization
2243   // it can be held across a java call.
2244   typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK);
2245   Handle h(THREAD, (oop)r);
2246   ik->set_init_lock(h());
2247 
2248   // restore constant pool resolved references
2249   ik->constants()->restore_unshareable_info(CHECK);
2250 
2251   ik->array_klasses_do(restore_unshareable_in_class, CHECK);
2252 }
2253 
2254 static void clear_all_breakpoints(Method* m) {
2255   m->clear_all_breakpoints();
2256 }
2257 
2258 void InstanceKlass::release_C_heap_structures() {
2259   // Deallocate oop map cache
2260   if (_oop_map_cache != NULL) {
2261     delete _oop_map_cache;
2262     _oop_map_cache = NULL;
2263   }
2264 
2265   // Deallocate JNI identifiers for jfieldIDs
2266   JNIid::deallocate(jni_ids());
2267   set_jni_ids(NULL);
2268 
2269   jmethodID* jmeths = methods_jmethod_ids_acquire();
2270   if (jmeths != (jmethodID*)NULL) {
2271     release_set_methods_jmethod_ids(NULL);
2272     FreeHeap(jmeths);
2273   }
2274 
2275   int* indices = methods_cached_itable_indices_acquire();
2276   if (indices != (int*)NULL) {
2277     release_set_methods_cached_itable_indices(NULL);
2278     FreeHeap(indices);
2279   }
2280 
2281   // release dependencies
2282   nmethodBucket* b = _dependencies;
2283   _dependencies = NULL;
2284   while (b != NULL) {
2285     nmethodBucket* next = b->next();
2286     delete b;
2287     b = next;
2288   }
2289 
2290   // Deallocate breakpoint records
2291   if (breakpoints() != 0x0) {
2292     methods_do(clear_all_breakpoints);
2293     assert(breakpoints() == 0x0, "should have cleared breakpoints");
2294   }
2295 
2296   // deallocate information about previous versions
2297   if (_previous_versions != NULL) {
2298     for (int i = _previous_versions->length() - 1; i >= 0; i--) {
2299       PreviousVersionNode * pv_node = _previous_versions->at(i);
2300       delete pv_node;
2301     }
2302     delete _previous_versions;
2303     _previous_versions = NULL;
2304   }
2305 
2306   // deallocate the cached class file
2307   if (_cached_class_file_bytes != NULL) {
2308     os::free(_cached_class_file_bytes, mtClass);
2309     _cached_class_file_bytes = NULL;
2310     _cached_class_file_len = 0;
2311   }
2312 
2313   // Decrement symbol reference counts associated with the unloaded class.
2314   if (_name != NULL) _name->decrement_refcount();
2315   // unreference array name derived from this class name (arrays of an unloaded
2316   // class can't be referenced anymore).
2317   if (_array_name != NULL)  _array_name->decrement_refcount();
2318   if (_source_file_name != NULL) _source_file_name->decrement_refcount();
2319   if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension, mtClass);
2320 }
2321 
2322 void InstanceKlass::set_source_file_name(Symbol* n) {
2323   _source_file_name = n;
2324   if (_source_file_name != NULL) _source_file_name->increment_refcount();
2325 }
2326 
2327 void InstanceKlass::set_source_debug_extension(char* array, int length) {
2328   if (array == NULL) {
2329     _source_debug_extension = NULL;
2330   } else {
2331     // Adding one to the attribute length in order to store a null terminator
2332     // character could cause an overflow because the attribute length is
2333     // already coded with an u4 in the classfile, but in practice, it's
2334     // unlikely to happen.
2335     assert((length+1) > length, "Overflow checking");
2336     char* sde = NEW_C_HEAP_ARRAY(char, (length + 1), mtClass);
2337     for (int i = 0; i < length; i++) {
2338       sde[i] = array[i];
2339     }
2340     sde[length] = '\0';
2341     _source_debug_extension = sde;
2342   }
2343 }
2344 
2345 address InstanceKlass::static_field_addr(int offset) {
2346   return (address)(offset + InstanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror());
2347 }
2348 
2349 
2350 const char* InstanceKlass::signature_name() const {
2351   const char* src = (const char*) (name()->as_C_string());
2352   const int src_length = (int)strlen(src);
2353   char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3);
2354   int src_index = 0;
2355   int dest_index = 0;
2356   dest[dest_index++] = 'L';
2357   while (src_index < src_length) {
2358     dest[dest_index++] = src[src_index++];
2359   }
2360   dest[dest_index++] = ';';
2361   dest[dest_index] = '\0';
2362   return dest;
2363 }
2364 
2365 // different verisons of is_same_class_package
2366 bool InstanceKlass::is_same_class_package(Klass* class2) {
2367   Klass* class1 = this;
2368   oop classloader1 = InstanceKlass::cast(class1)->class_loader();
2369   Symbol* classname1 = class1->name();
2370 
2371   if (class2->oop_is_objArray()) {
2372     class2 = ObjArrayKlass::cast(class2)->bottom_klass();
2373   }
2374   oop classloader2;
2375   if (class2->oop_is_instance()) {
2376     classloader2 = InstanceKlass::cast(class2)->class_loader();
2377   } else {
2378     assert(class2->oop_is_typeArray(), "should be type array");
2379     classloader2 = NULL;
2380   }
2381   Symbol* classname2 = class2->name();
2382 
2383   return InstanceKlass::is_same_class_package(classloader1, classname1,
2384                                               classloader2, classname2);
2385 }
2386 
2387 bool InstanceKlass::is_same_class_package(oop classloader2, Symbol* classname2) {
2388   Klass* class1 = this;
2389   oop classloader1 = InstanceKlass::cast(class1)->class_loader();
2390   Symbol* classname1 = class1->name();
2391 
2392   return InstanceKlass::is_same_class_package(classloader1, classname1,
2393                                               classloader2, classname2);
2394 }
2395 
2396 // return true if two classes are in the same package, classloader
2397 // and classname information is enough to determine a class's package
2398 bool InstanceKlass::is_same_class_package(oop class_loader1, Symbol* class_name1,
2399                                           oop class_loader2, Symbol* class_name2) {
2400   if (class_loader1 != class_loader2) {
2401     return false;
2402   } else if (class_name1 == class_name2) {
2403     return true;                // skip painful bytewise comparison
2404   } else {
2405     ResourceMark rm;
2406 
2407     // The Symbol*'s are in UTF8 encoding. Since we only need to check explicitly
2408     // for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding.
2409     // Otherwise, we just compare jbyte values between the strings.
2410     const jbyte *name1 = class_name1->base();
2411     const jbyte *name2 = class_name2->base();
2412 
2413     const jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/');
2414     const jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/');
2415 
2416     if ((last_slash1 == NULL) || (last_slash2 == NULL)) {
2417       // One of the two doesn't have a package.  Only return true
2418       // if the other one also doesn't have a package.
2419       return last_slash1 == last_slash2;
2420     } else {
2421       // Skip over '['s
2422       if (*name1 == '[') {
2423         do {
2424           name1++;
2425         } while (*name1 == '[');
2426         if (*name1 != 'L') {
2427           // Something is terribly wrong.  Shouldn't be here.
2428           return false;
2429         }
2430       }
2431       if (*name2 == '[') {
2432         do {
2433           name2++;
2434         } while (*name2 == '[');
2435         if (*name2 != 'L') {
2436           // Something is terribly wrong.  Shouldn't be here.
2437           return false;
2438         }
2439       }
2440 
2441       // Check that package part is identical
2442       int length1 = last_slash1 - name1;
2443       int length2 = last_slash2 - name2;
2444 
2445       return UTF8::equal(name1, length1, name2, length2);
2446     }
2447   }
2448 }
2449 
2450 // Returns true iff super_method can be overridden by a method in targetclassname
2451 // See JSL 3rd edition 8.4.6.1
2452 // Assumes name-signature match
2453 // "this" is InstanceKlass of super_method which must exist
2454 // note that the InstanceKlass of the method in the targetclassname has not always been created yet
2455 bool InstanceKlass::is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS) {
2456    // Private methods can not be overridden
2457    if (super_method->is_private()) {
2458      return false;
2459    }
2460    // If super method is accessible, then override
2461    if ((super_method->is_protected()) ||
2462        (super_method->is_public())) {
2463      return true;
2464    }
2465    // Package-private methods are not inherited outside of package
2466    assert(super_method->is_package_private(), "must be package private");
2467    return(is_same_class_package(targetclassloader(), targetclassname));
2468 }
2469 
2470 /* defined for now in jvm.cpp, for historical reasons *--
2471 Klass* InstanceKlass::compute_enclosing_class_impl(instanceKlassHandle self,
2472                                                      Symbol*& simple_name_result, TRAPS) {
2473   ...
2474 }
2475 */
2476 
2477 // tell if two classes have the same enclosing class (at package level)
2478 bool InstanceKlass::is_same_package_member_impl(instanceKlassHandle class1,
2479                                                 Klass* class2_oop, TRAPS) {
2480   if (class2_oop == class1())                       return true;
2481   if (!class2_oop->oop_is_instance())  return false;
2482   instanceKlassHandle class2(THREAD, class2_oop);
2483 
2484   // must be in same package before we try anything else
2485   if (!class1->is_same_class_package(class2->class_loader(), class2->name()))
2486     return false;
2487 
2488   // As long as there is an outer1.getEnclosingClass,
2489   // shift the search outward.
2490   instanceKlassHandle outer1 = class1;
2491   for (;;) {
2492     // As we walk along, look for equalities between outer1 and class2.
2493     // Eventually, the walks will terminate as outer1 stops
2494     // at the top-level class around the original class.
2495     bool ignore_inner_is_member;
2496     Klass* next = outer1->compute_enclosing_class(&ignore_inner_is_member,
2497                                                     CHECK_false);
2498     if (next == NULL)  break;
2499     if (next == class2())  return true;
2500     outer1 = instanceKlassHandle(THREAD, next);
2501   }
2502 
2503   // Now do the same for class2.
2504   instanceKlassHandle outer2 = class2;
2505   for (;;) {
2506     bool ignore_inner_is_member;
2507     Klass* next = outer2->compute_enclosing_class(&ignore_inner_is_member,
2508                                                     CHECK_false);
2509     if (next == NULL)  break;
2510     // Might as well check the new outer against all available values.
2511     if (next == class1())  return true;
2512     if (next == outer1())  return true;
2513     outer2 = instanceKlassHandle(THREAD, next);
2514   }
2515 
2516   // If by this point we have not found an equality between the
2517   // two classes, we know they are in separate package members.
2518   return false;
2519 }
2520 
2521 
2522 jint InstanceKlass::compute_modifier_flags(TRAPS) const {
2523   jint access = access_flags().as_int();
2524 
2525   // But check if it happens to be member class.
2526   instanceKlassHandle ik(THREAD, this);
2527   InnerClassesIterator iter(ik);
2528   for (; !iter.done(); iter.next()) {
2529     int ioff = iter.inner_class_info_index();
2530     // Inner class attribute can be zero, skip it.
2531     // Strange but true:  JVM spec. allows null inner class refs.
2532     if (ioff == 0) continue;
2533 
2534     // only look at classes that are already loaded
2535     // since we are looking for the flags for our self.
2536     Symbol* inner_name = ik->constants()->klass_name_at(ioff);
2537     if ((ik->name() == inner_name)) {
2538       // This is really a member class.
2539       access = iter.inner_access_flags();
2540       break;
2541     }
2542   }
2543   // Remember to strip ACC_SUPER bit
2544   return (access & (~JVM_ACC_SUPER)) & JVM_ACC_WRITTEN_FLAGS;
2545 }
2546 
2547 jint InstanceKlass::jvmti_class_status() const {
2548   jint result = 0;
2549 
2550   if (is_linked()) {
2551     result |= JVMTI_CLASS_STATUS_VERIFIED | JVMTI_CLASS_STATUS_PREPARED;
2552   }
2553 
2554   if (is_initialized()) {
2555     assert(is_linked(), "Class status is not consistent");
2556     result |= JVMTI_CLASS_STATUS_INITIALIZED;
2557   }
2558   if (is_in_error_state()) {
2559     result |= JVMTI_CLASS_STATUS_ERROR;
2560   }
2561   return result;
2562 }
2563 
2564 Method* InstanceKlass::method_at_itable(Klass* holder, int index, TRAPS) {
2565   itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable();
2566   int method_table_offset_in_words = ioe->offset()/wordSize;
2567   int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words())
2568                        / itableOffsetEntry::size();
2569 
2570   for (int cnt = 0 ; ; cnt ++, ioe ++) {
2571     // If the interface isn't implemented by the receiver class,
2572     // the VM should throw IncompatibleClassChangeError.
2573     if (cnt >= nof_interfaces) {
2574       THROW_NULL(vmSymbols::java_lang_IncompatibleClassChangeError());
2575     }
2576 
2577     Klass* ik = ioe->interface_klass();
2578     if (ik == holder) break;
2579   }
2580 
2581   itableMethodEntry* ime = ioe->first_method_entry(this);
2582   Method* m = ime[index].method();
2583   if (m == NULL) {
2584     THROW_NULL(vmSymbols::java_lang_AbstractMethodError());
2585   }
2586   return m;
2587 }
2588 
2589 // On-stack replacement stuff
2590 void InstanceKlass::add_osr_nmethod(nmethod* n) {
2591   // only one compilation can be active
2592   NEEDS_CLEANUP
2593   // This is a short non-blocking critical region, so the no safepoint check is ok.
2594   OsrList_lock->lock_without_safepoint_check();
2595   assert(n->is_osr_method(), "wrong kind of nmethod");
2596   n->set_osr_link(osr_nmethods_head());
2597   set_osr_nmethods_head(n);
2598   // Raise the highest osr level if necessary
2599   if (TieredCompilation) {
2600     Method* m = n->method();
2601     m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
2602   }
2603   // Remember to unlock again
2604   OsrList_lock->unlock();
2605 
2606   // Get rid of the osr methods for the same bci that have lower levels.
2607   if (TieredCompilation) {
2608     for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
2609       nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true);
2610       if (inv != NULL && inv->is_in_use()) {
2611         inv->make_not_entrant();
2612       }
2613     }
2614   }
2615 }
2616 
2617 
2618 void InstanceKlass::remove_osr_nmethod(nmethod* n) {
2619   // This is a short non-blocking critical region, so the no safepoint check is ok.
2620   OsrList_lock->lock_without_safepoint_check();
2621   assert(n->is_osr_method(), "wrong kind of nmethod");
2622   nmethod* last = NULL;
2623   nmethod* cur  = osr_nmethods_head();
2624   int max_level = CompLevel_none;  // Find the max comp level excluding n
2625   Method* m = n->method();
2626   // Search for match
2627   while(cur != NULL && cur != n) {
2628     if (TieredCompilation) {
2629       // Find max level before n
2630       max_level = MAX2(max_level, cur->comp_level());
2631     }
2632     last = cur;
2633     cur = cur->osr_link();
2634   }
2635   nmethod* next = NULL;
2636   if (cur == n) {
2637     next = cur->osr_link();
2638     if (last == NULL) {
2639       // Remove first element
2640       set_osr_nmethods_head(next);
2641     } else {
2642       last->set_osr_link(next);
2643     }
2644   }
2645   n->set_osr_link(NULL);
2646   if (TieredCompilation) {
2647     cur = next;
2648     while (cur != NULL) {
2649       // Find max level after n
2650       max_level = MAX2(max_level, cur->comp_level());
2651       cur = cur->osr_link();
2652     }
2653     m->set_highest_osr_comp_level(max_level);
2654   }
2655   // Remember to unlock again
2656   OsrList_lock->unlock();
2657 }
2658 
2659 nmethod* InstanceKlass::lookup_osr_nmethod(Method* const m, int bci, int comp_level, bool match_level) const {
2660   // This is a short non-blocking critical region, so the no safepoint check is ok.
2661   OsrList_lock->lock_without_safepoint_check();
2662   nmethod* osr = osr_nmethods_head();
2663   nmethod* best = NULL;
2664   while (osr != NULL) {
2665     assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
2666     // There can be a time when a c1 osr method exists but we are waiting
2667     // for a c2 version. When c2 completes its osr nmethod we will trash
2668     // the c1 version and only be able to find the c2 version. However
2669     // while we overflow in the c1 code at back branches we don't want to
2670     // try and switch to the same code as we are already running
2671 
2672     if (osr->method() == m &&
2673         (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
2674       if (match_level) {
2675         if (osr->comp_level() == comp_level) {
2676           // Found a match - return it.
2677           OsrList_lock->unlock();
2678           return osr;
2679         }
2680       } else {
2681         if (best == NULL || (osr->comp_level() > best->comp_level())) {
2682           if (osr->comp_level() == CompLevel_highest_tier) {
2683             // Found the best possible - return it.
2684             OsrList_lock->unlock();
2685             return osr;
2686           }
2687           best = osr;
2688         }
2689       }
2690     }
2691     osr = osr->osr_link();
2692   }
2693   OsrList_lock->unlock();
2694   if (best != NULL && best->comp_level() >= comp_level && match_level == false) {
2695     return best;
2696   }
2697   return NULL;
2698 }
2699 
2700 // -----------------------------------------------------------------------------------------------------
2701 // Printing
2702 
2703 #ifndef PRODUCT
2704 
2705 #define BULLET  " - "
2706 
2707 static const char* state_names[] = {
2708   "allocated", "loaded", "linked", "being_initialized", "fully_initialized", "initialization_error"
2709 };
2710 
2711 void InstanceKlass::print_on(outputStream* st) const {
2712   assert(is_klass(), "must be klass");
2713   Klass::print_on(st);
2714 
2715   st->print(BULLET"instance size:     %d", size_helper());                        st->cr();
2716   st->print(BULLET"klass size:        %d", size());                               st->cr();
2717   st->print(BULLET"access:            "); access_flags().print_on(st);            st->cr();
2718   st->print(BULLET"state:             "); st->print_cr(state_names[_init_state]);
2719   st->print(BULLET"name:              "); name()->print_value_on(st);             st->cr();
2720   st->print(BULLET"super:             "); super()->print_value_on_maybe_null(st); st->cr();
2721   st->print(BULLET"sub:               ");
2722   Klass* sub = subklass();
2723   int n;
2724   for (n = 0; sub != NULL; n++, sub = sub->next_sibling()) {
2725     if (n < MaxSubklassPrintSize) {
2726       sub->print_value_on(st);
2727       st->print("   ");
2728     }
2729   }
2730   if (n >= MaxSubklassPrintSize) st->print("(%d more klasses...)", n - MaxSubklassPrintSize);
2731   st->cr();
2732 
2733   if (is_interface()) {
2734     st->print_cr(BULLET"nof implementors:  %d", nof_implementors());
2735     if (nof_implementors() == 1) {
2736       st->print_cr(BULLET"implementor:    ");
2737       st->print("   ");
2738       implementor()->print_value_on(st);
2739       st->cr();
2740     }
2741   }
2742 
2743   st->print(BULLET"arrays:            "); array_klasses()->print_value_on_maybe_null(st); st->cr();
2744   st->print(BULLET"methods:           "); methods()->print_value_on(st);                  st->cr();
2745   if (Verbose) {
2746     Array<Method*>* method_array = methods();
2747     for(int i = 0; i < method_array->length(); i++) {
2748       st->print("%d : ", i); method_array->at(i)->print_value(); st->cr();
2749     }
2750   }
2751   st->print(BULLET"method ordering:   "); method_ordering()->print_value_on(st);       st->cr();
2752   st->print(BULLET"local interfaces:  "); local_interfaces()->print_value_on(st);      st->cr();
2753   st->print(BULLET"trans. interfaces: "); transitive_interfaces()->print_value_on(st); st->cr();
2754   st->print(BULLET"constants:         "); constants()->print_value_on(st);         st->cr();
2755   if (class_loader_data() != NULL) {
2756     st->print(BULLET"class loader data:  ");
2757     class_loader_data()->print_value_on(st);
2758     st->cr();
2759   }
2760   st->print(BULLET"protection domain: "); ((InstanceKlass*)this)->protection_domain()->print_value_on(st); st->cr();
2761   st->print(BULLET"host class:        "); host_klass()->print_value_on_maybe_null(st); st->cr();
2762   st->print(BULLET"signers:           "); signers()->print_value_on(st);               st->cr();
2763   st->print(BULLET"init_lock:         "); ((oop)init_lock())->print_value_on(st);             st->cr();
2764   if (source_file_name() != NULL) {
2765     st->print(BULLET"source file:       ");
2766     source_file_name()->print_value_on(st);
2767     st->cr();
2768   }
2769   if (source_debug_extension() != NULL) {
2770     st->print(BULLET"source debug extension:       ");
2771     st->print("%s", source_debug_extension());
2772     st->cr();
2773   }
2774   st->print(BULLET"annotations:       "); annotations()->print_value_on(st); st->cr();
2775   {
2776     ResourceMark rm;
2777     // PreviousVersionInfo objects returned via PreviousVersionWalker
2778     // contain a GrowableArray of handles. We have to clean up the
2779     // GrowableArray _after_ the PreviousVersionWalker destructor
2780     // has destroyed the handles.
2781     {
2782       bool have_pv = false;
2783       PreviousVersionWalker pvw((InstanceKlass*)this);
2784       for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
2785            pv_info != NULL; pv_info = pvw.next_previous_version()) {
2786         if (!have_pv)
2787           st->print(BULLET"previous version:  ");
2788         have_pv = true;
2789         pv_info->prev_constant_pool_handle()()->print_value_on(st);
2790       }
2791       if (have_pv)  st->cr();
2792     } // pvw is cleaned up
2793   } // rm is cleaned up
2794 
2795   if (generic_signature() != NULL) {
2796     st->print(BULLET"generic signature: ");
2797     generic_signature()->print_value_on(st);
2798     st->cr();
2799   }
2800   st->print(BULLET"inner classes:     "); inner_classes()->print_value_on(st);     st->cr();
2801   st->print(BULLET"java mirror:       "); java_mirror()->print_value_on(st);       st->cr();
2802   st->print(BULLET"vtable length      %d  (start addr: " INTPTR_FORMAT ")", vtable_length(), start_of_vtable());  st->cr();
2803   st->print(BULLET"itable length      %d (start addr: " INTPTR_FORMAT ")", itable_length(), start_of_itable()); st->cr();
2804   st->print_cr(BULLET"---- static fields (%d words):", static_field_size());
2805   FieldPrinter print_static_field(st);
2806   ((InstanceKlass*)this)->do_local_static_fields(&print_static_field);
2807   st->print_cr(BULLET"---- non-static fields (%d words):", nonstatic_field_size());
2808   FieldPrinter print_nonstatic_field(st);
2809   ((InstanceKlass*)this)->do_nonstatic_fields(&print_nonstatic_field);
2810 
2811   st->print(BULLET"non-static oop maps: ");
2812   OopMapBlock* map     = start_of_nonstatic_oop_maps();
2813   OopMapBlock* end_map = map + nonstatic_oop_map_count();
2814   while (map < end_map) {
2815     st->print("%d-%d ", map->offset(), map->offset() + heapOopSize*(map->count() - 1));
2816     map++;
2817   }
2818   st->cr();
2819 }
2820 
2821 #endif //PRODUCT
2822 
2823 void InstanceKlass::print_value_on(outputStream* st) const {
2824   assert(is_klass(), "must be klass");
2825   name()->print_value_on(st);
2826 }
2827 
2828 #ifndef PRODUCT
2829 
2830 void FieldPrinter::do_field(fieldDescriptor* fd) {
2831   _st->print(BULLET);
2832    if (_obj == NULL) {
2833      fd->print_on(_st);
2834      _st->cr();
2835    } else {
2836      fd->print_on_for(_st, _obj);
2837      _st->cr();
2838    }
2839 }
2840 
2841 
2842 void InstanceKlass::oop_print_on(oop obj, outputStream* st) {
2843   Klass::oop_print_on(obj, st);
2844 
2845   if (this == SystemDictionary::String_klass()) {
2846     typeArrayOop value  = java_lang_String::value(obj);
2847     juint        offset = java_lang_String::offset(obj);
2848     juint        length = java_lang_String::length(obj);
2849     if (value != NULL &&
2850         value->is_typeArray() &&
2851         offset          <= (juint) value->length() &&
2852         offset + length <= (juint) value->length()) {
2853       st->print(BULLET"string: ");
2854       Handle h_obj(obj);
2855       java_lang_String::print(h_obj, st);
2856       st->cr();
2857       if (!WizardMode)  return;  // that is enough
2858     }
2859   }
2860 
2861   st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj));
2862   FieldPrinter print_field(st, obj);
2863   do_nonstatic_fields(&print_field);
2864 
2865   if (this == SystemDictionary::Class_klass()) {
2866     st->print(BULLET"signature: ");
2867     java_lang_Class::print_signature(obj, st);
2868     st->cr();
2869     Klass* mirrored_klass = java_lang_Class::as_Klass(obj);
2870     st->print(BULLET"fake entry for mirror: ");
2871     mirrored_klass->print_value_on_maybe_null(st);
2872     st->cr();
2873     st->print(BULLET"fake entry resolved_constructor: ");
2874     Method* ctor = java_lang_Class::resolved_constructor(obj);
2875     ctor->print_value_on_maybe_null(st);
2876     Klass* array_klass = java_lang_Class::array_klass(obj);
2877     st->cr();
2878     st->print(BULLET"fake entry for array: ");
2879     array_klass->print_value_on_maybe_null(st);
2880     st->cr();
2881     st->print_cr(BULLET"fake entry for oop_size: %d", java_lang_Class::oop_size(obj));
2882     st->print_cr(BULLET"fake entry for static_oop_field_count: %d", java_lang_Class::static_oop_field_count(obj));
2883     Klass* real_klass = java_lang_Class::as_Klass(obj);
2884     if (real_klass != NULL && real_klass->oop_is_instance()) {
2885       InstanceKlass::cast(real_klass)->do_local_static_fields(&print_field);
2886     }
2887   } else if (this == SystemDictionary::MethodType_klass()) {
2888     st->print(BULLET"signature: ");
2889     java_lang_invoke_MethodType::print_signature(obj, st);
2890     st->cr();
2891   }
2892 }
2893 
2894 #endif //PRODUCT
2895 
2896 void InstanceKlass::oop_print_value_on(oop obj, outputStream* st) {
2897   st->print("a ");
2898   name()->print_value_on(st);
2899   obj->print_address_on(st);
2900   if (this == SystemDictionary::String_klass()
2901       && java_lang_String::value(obj) != NULL) {
2902     ResourceMark rm;
2903     int len = java_lang_String::length(obj);
2904     int plen = (len < 24 ? len : 12);
2905     char* str = java_lang_String::as_utf8_string(obj, 0, plen);
2906     st->print(" = \"%s\"", str);
2907     if (len > plen)
2908       st->print("...[%d]", len);
2909   } else if (this == SystemDictionary::Class_klass()) {
2910     Klass* k = java_lang_Class::as_Klass(obj);
2911     st->print(" = ");
2912     if (k != NULL) {
2913       k->print_value_on(st);
2914     } else {
2915       const char* tname = type2name(java_lang_Class::primitive_type(obj));
2916       st->print("%s", tname ? tname : "type?");
2917     }
2918   } else if (this == SystemDictionary::MethodType_klass()) {
2919     st->print(" = ");
2920     java_lang_invoke_MethodType::print_signature(obj, st);
2921   } else if (java_lang_boxing_object::is_instance(obj)) {
2922     st->print(" = ");
2923     java_lang_boxing_object::print(obj, st);
2924   } else if (this == SystemDictionary::LambdaForm_klass()) {
2925     oop vmentry = java_lang_invoke_LambdaForm::vmentry(obj);
2926     if (vmentry != NULL) {
2927       st->print(" => ");
2928       vmentry->print_value_on(st);
2929     }
2930   } else if (this == SystemDictionary::MemberName_klass()) {
2931     Metadata* vmtarget = java_lang_invoke_MemberName::vmtarget(obj);
2932     if (vmtarget != NULL) {
2933       st->print(" = ");
2934       vmtarget->print_value_on(st);
2935     } else {
2936       java_lang_invoke_MemberName::clazz(obj)->print_value_on(st);
2937       st->print(".");
2938       java_lang_invoke_MemberName::name(obj)->print_value_on(st);
2939     }
2940   }
2941 }
2942 
2943 const char* InstanceKlass::internal_name() const {
2944   return external_name();
2945 }
2946 
2947 // Verification
2948 
2949 class VerifyFieldClosure: public OopClosure {
2950  protected:
2951   template <class T> void do_oop_work(T* p) {
2952     oop obj = oopDesc::load_decode_heap_oop(p);
2953     if (!obj->is_oop_or_null()) {
2954       tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj);
2955       Universe::print();
2956       guarantee(false, "boom");
2957     }
2958   }
2959  public:
2960   virtual void do_oop(oop* p)       { VerifyFieldClosure::do_oop_work(p); }
2961   virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
2962 };
2963 
2964 void InstanceKlass::verify_on(outputStream* st) {
2965   Klass::verify_on(st);
2966   Thread *thread = Thread::current();
2967 
2968 #ifndef PRODUCT
2969   // Avoid redundant verifies
2970   if (_verify_count == Universe::verify_count()) return;
2971   _verify_count = Universe::verify_count();
2972 #endif
2973   // Verify that klass is present in SystemDictionary
2974   if (is_loaded() && !is_anonymous()) {
2975     Symbol* h_name = name();
2976     SystemDictionary::verify_obj_klass_present(h_name, class_loader_data());
2977   }
2978 
2979   // Verify static fields
2980   VerifyFieldClosure blk;
2981 
2982   // Verify vtables
2983   if (is_linked()) {
2984     ResourceMark rm(thread);
2985     // $$$ This used to be done only for m/s collections.  Doing it
2986     // always seemed a valid generalization.  (DLD -- 6/00)
2987     vtable()->verify(st);
2988   }
2989 
2990   // Verify first subklass
2991   if (subklass_oop() != NULL) {
2992     guarantee(subklass_oop()->is_metadata(), "should be in metaspace");
2993     guarantee(subklass_oop()->is_klass(), "should be klass");
2994   }
2995 
2996   // Verify siblings
2997   Klass* super = this->super();
2998   Klass* sib = next_sibling();
2999   if (sib != NULL) {
3000     if (sib == this) {
3001       fatal(err_msg("subclass points to itself " PTR_FORMAT, sib));
3002     }
3003 
3004     guarantee(sib->is_metadata(), "should be in metaspace");
3005     guarantee(sib->is_klass(), "should be klass");
3006     guarantee(sib->super() == super, "siblings should have same superklass");
3007   }
3008 
3009   // Verify implementor fields
3010   Klass* im = implementor();
3011   if (im != NULL) {
3012     guarantee(is_interface(), "only interfaces should have implementor set");
3013     guarantee(im->is_klass(), "should be klass");
3014     guarantee(!im->is_interface() || im == this,
3015       "implementors cannot be interfaces");
3016   }
3017 
3018   // Verify local interfaces
3019   if (local_interfaces()) {
3020     Array<Klass*>* local_interfaces = this->local_interfaces();
3021     for (int j = 0; j < local_interfaces->length(); j++) {
3022       Klass* e = local_interfaces->at(j);
3023       guarantee(e->is_klass() && e->is_interface(), "invalid local interface");
3024     }
3025   }
3026 
3027   // Verify transitive interfaces
3028   if (transitive_interfaces() != NULL) {
3029     Array<Klass*>* transitive_interfaces = this->transitive_interfaces();
3030     for (int j = 0; j < transitive_interfaces->length(); j++) {
3031       Klass* e = transitive_interfaces->at(j);
3032       guarantee(e->is_klass() && e->is_interface(), "invalid transitive interface");
3033     }
3034   }
3035 
3036   // Verify methods
3037   if (methods() != NULL) {
3038     Array<Method*>* methods = this->methods();
3039     for (int j = 0; j < methods->length(); j++) {
3040       guarantee(methods->at(j)->is_metadata(), "should be in metaspace");
3041       guarantee(methods->at(j)->is_method(), "non-method in methods array");
3042     }
3043     for (int j = 0; j < methods->length() - 1; j++) {
3044       Method* m1 = methods->at(j);
3045       Method* m2 = methods->at(j + 1);
3046       guarantee(m1->name()->fast_compare(m2->name()) <= 0, "methods not sorted correctly");
3047     }
3048   }
3049 
3050   // Verify method ordering
3051   if (method_ordering() != NULL) {
3052     Array<int>* method_ordering = this->method_ordering();
3053     int length = method_ordering->length();
3054     if (JvmtiExport::can_maintain_original_method_order() ||
3055         (UseSharedSpaces && length != 0)) {
3056       guarantee(length == methods()->length(), "invalid method ordering length");
3057       jlong sum = 0;
3058       for (int j = 0; j < length; j++) {
3059         int original_index = method_ordering->at(j);
3060         guarantee(original_index >= 0, "invalid method ordering index");
3061         guarantee(original_index < length, "invalid method ordering index");
3062         sum += original_index;
3063       }
3064       // Verify sum of indices 0,1,...,length-1
3065       guarantee(sum == ((jlong)length*(length-1))/2, "invalid method ordering sum");
3066     } else {
3067       guarantee(length == 0, "invalid method ordering length");
3068     }
3069   }
3070 
3071   // Verify JNI static field identifiers
3072   if (jni_ids() != NULL) {
3073     jni_ids()->verify(this);
3074   }
3075 
3076   // Verify other fields
3077   if (array_klasses() != NULL) {
3078     guarantee(array_klasses()->is_metadata(), "should be in metaspace");
3079     guarantee(array_klasses()->is_klass(), "should be klass");
3080   }
3081   if (constants() != NULL) {
3082     guarantee(constants()->is_metadata(), "should be in metaspace");
3083     guarantee(constants()->is_constantPool(), "should be constant pool");
3084   }
3085   if (protection_domain() != NULL) {
3086     guarantee(protection_domain()->is_oop(), "should be oop");
3087   }
3088   if (host_klass() != NULL) {
3089     guarantee(host_klass()->is_metadata(), "should be in metaspace");
3090     guarantee(host_klass()->is_klass(), "should be klass");
3091   }
3092   if (signers() != NULL) {
3093     guarantee(signers()->is_objArray(), "should be obj array");
3094   }
3095 }
3096 
3097 void InstanceKlass::oop_verify_on(oop obj, outputStream* st) {
3098   Klass::oop_verify_on(obj, st);
3099   VerifyFieldClosure blk;
3100   obj->oop_iterate_no_header(&blk);
3101 }
3102 
3103 
3104 // JNIid class for jfieldIDs only
3105 // Note to reviewers:
3106 // These JNI functions are just moved over to column 1 and not changed
3107 // in the compressed oops workspace.
3108 JNIid::JNIid(Klass* holder, int offset, JNIid* next) {
3109   _holder = holder;
3110   _offset = offset;
3111   _next = next;
3112   debug_only(_is_static_field_id = false;)
3113 }
3114 
3115 
3116 JNIid* JNIid::find(int offset) {
3117   JNIid* current = this;
3118   while (current != NULL) {
3119     if (current->offset() == offset) return current;
3120     current = current->next();
3121   }
3122   return NULL;
3123 }
3124 
3125 void JNIid::deallocate(JNIid* current) {
3126   while (current != NULL) {
3127     JNIid* next = current->next();
3128     delete current;
3129     current = next;
3130   }
3131 }
3132 
3133 
3134 void JNIid::verify(Klass* holder) {
3135   int first_field_offset  = InstanceMirrorKlass::offset_of_static_fields();
3136   int end_field_offset;
3137   end_field_offset = first_field_offset + (InstanceKlass::cast(holder)->static_field_size() * wordSize);
3138 
3139   JNIid* current = this;
3140   while (current != NULL) {
3141     guarantee(current->holder() == holder, "Invalid klass in JNIid");
3142 #ifdef ASSERT
3143     int o = current->offset();
3144     if (current->is_static_field_id()) {
3145       guarantee(o >= first_field_offset  && o < end_field_offset,  "Invalid static field offset in JNIid");
3146     }
3147 #endif
3148     current = current->next();
3149   }
3150 }
3151 
3152 
3153 #ifdef ASSERT
3154 void InstanceKlass::set_init_state(ClassState state) {
3155   bool good_state = is_shared() ? (_init_state <= state)
3156                                                : (_init_state < state);
3157   assert(good_state || state == allocated, "illegal state transition");
3158   _init_state = (u1)state;
3159 }
3160 #endif
3161 
3162 
3163 // RedefineClasses() support for previous versions:
3164 
3165 // Purge previous versions
3166 static void purge_previous_versions_internal(InstanceKlass* ik, int emcp_method_count) {
3167   if (ik->previous_versions() != NULL) {
3168     // This klass has previous versions so see what we can cleanup
3169     // while it is safe to do so.
3170 
3171     int deleted_count = 0;    // leave debugging breadcrumbs
3172     int live_count = 0;
3173     ClassLoaderData* loader_data = ik->class_loader_data() == NULL ?
3174                        ClassLoaderData::the_null_class_loader_data() :
3175                        ik->class_loader_data();
3176 
3177     // RC_TRACE macro has an embedded ResourceMark
3178     RC_TRACE(0x00000200, ("purge: %s: previous version length=%d",
3179       ik->external_name(), ik->previous_versions()->length()));
3180 
3181     for (int i = ik->previous_versions()->length() - 1; i >= 0; i--) {
3182       // check the previous versions array
3183       PreviousVersionNode * pv_node = ik->previous_versions()->at(i);
3184       ConstantPool* cp_ref = pv_node->prev_constant_pool();
3185       assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
3186 
3187       ConstantPool* pvcp = cp_ref;
3188       if (!pvcp->on_stack()) {
3189         // If the constant pool isn't on stack, none of the methods
3190         // are executing.  Delete all the methods, the constant pool and
3191         // and this previous version node.
3192         GrowableArray<Method*>* method_refs = pv_node->prev_EMCP_methods();
3193         if (method_refs != NULL) {
3194           for (int j = method_refs->length() - 1; j >= 0; j--) {
3195             Method* method = method_refs->at(j);
3196             assert(method != NULL, "method ref was unexpectedly cleared");
3197             method_refs->remove_at(j);
3198             // method will be freed with associated class.
3199           }
3200         }
3201         // Remove the constant pool
3202         delete pv_node;
3203         // Since we are traversing the array backwards, we don't have to
3204         // do anything special with the index.
3205         ik->previous_versions()->remove_at(i);
3206         deleted_count++;
3207         continue;
3208       } else {
3209         RC_TRACE(0x00000200, ("purge: previous version @%d is alive", i));
3210         assert(pvcp->pool_holder() != NULL, "Constant pool with no holder");
3211         guarantee (!loader_data->is_unloading(), "unloaded classes can't be on the stack");
3212         live_count++;
3213       }
3214 
3215       // At least one method is live in this previous version, clean out
3216       // the others or mark them as obsolete.
3217       GrowableArray<Method*>* method_refs = pv_node->prev_EMCP_methods();
3218       if (method_refs != NULL) {
3219         RC_TRACE(0x00000200, ("purge: previous methods length=%d",
3220           method_refs->length()));
3221         for (int j = method_refs->length() - 1; j >= 0; j--) {
3222           Method* method = method_refs->at(j);
3223           assert(method != NULL, "method ref was unexpectedly cleared");
3224 
3225           // Remove the emcp method if it's not executing
3226           // If it's been made obsolete by a redefinition of a non-emcp
3227           // method, mark it as obsolete but leave it to clean up later.
3228           if (!method->on_stack()) {
3229             method_refs->remove_at(j);
3230           } else if (emcp_method_count == 0) {
3231             method->set_is_obsolete();
3232           } else {
3233             // RC_TRACE macro has an embedded ResourceMark
3234             RC_TRACE(0x00000200,
3235               ("purge: %s(%s): prev method @%d in version @%d is alive",
3236               method->name()->as_C_string(),
3237               method->signature()->as_C_string(), j, i));
3238           }
3239         }
3240       }
3241     }
3242     assert(ik->previous_versions()->length() == live_count, "sanity check");
3243     RC_TRACE(0x00000200,
3244       ("purge: previous version stats: live=%d, deleted=%d", live_count,
3245       deleted_count));
3246   }
3247 }
3248 
3249 // External interface for use during class unloading.
3250 void InstanceKlass::purge_previous_versions(InstanceKlass* ik) {
3251   // Call with >0 emcp methods since they are not currently being redefined.
3252   purge_previous_versions_internal(ik, 1);
3253 }
3254 
3255 
3256 // Potentially add an information node that contains pointers to the
3257 // interesting parts of the previous version of the_class.
3258 // This is also where we clean out any unused references.
3259 // Note that while we delete nodes from the _previous_versions
3260 // array, we never delete the array itself until the klass is
3261 // unloaded. The has_been_redefined() query depends on that fact.
3262 //
3263 void InstanceKlass::add_previous_version(instanceKlassHandle ikh,
3264        BitMap* emcp_methods, int emcp_method_count) {
3265   assert(Thread::current()->is_VM_thread(),
3266          "only VMThread can add previous versions");
3267 
3268   if (_previous_versions == NULL) {
3269     // This is the first previous version so make some space.
3270     // Start with 2 elements under the assumption that the class
3271     // won't be redefined much.
3272     _previous_versions =  new (ResourceObj::C_HEAP, mtClass)
3273                             GrowableArray<PreviousVersionNode *>(2, true);
3274   }
3275 
3276   ConstantPool* cp_ref = ikh->constants();
3277 
3278   // RC_TRACE macro has an embedded ResourceMark
3279   RC_TRACE(0x00000400, ("adding previous version ref for %s @%d, EMCP_cnt=%d "
3280                         "on_stack=%d",
3281     ikh->external_name(), _previous_versions->length(), emcp_method_count,
3282     cp_ref->on_stack()));
3283 
3284   // If the constant pool for this previous version of the class
3285   // is not marked as being on the stack, then none of the methods
3286   // in this previous version of the class are on the stack so
3287   // we don't need to create a new PreviousVersionNode. However,
3288   // we still need to examine older previous versions below.
3289   Array<Method*>* old_methods = ikh->methods();
3290 
3291   if (cp_ref->on_stack()) {
3292   PreviousVersionNode * pv_node = NULL;
3293   if (emcp_method_count == 0) {
3294       // non-shared ConstantPool gets a reference
3295       pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), NULL);
3296     RC_TRACE(0x00000400,
3297         ("add: all methods are obsolete; flushing any EMCP refs"));
3298   } else {
3299     int local_count = 0;
3300       GrowableArray<Method*>* method_refs = new (ResourceObj::C_HEAP, mtClass)
3301         GrowableArray<Method*>(emcp_method_count, true);
3302     for (int i = 0; i < old_methods->length(); i++) {
3303       if (emcp_methods->at(i)) {
3304           // this old method is EMCP. Save it only if it's on the stack
3305           Method* old_method = old_methods->at(i);
3306           if (old_method->on_stack()) {
3307             method_refs->append(old_method);
3308           }
3309         if (++local_count >= emcp_method_count) {
3310           // no more EMCP methods so bail out now
3311           break;
3312         }
3313       }
3314     }
3315       // non-shared ConstantPool gets a reference
3316       pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), method_refs);
3317     }
3318     // append new previous version.
3319   _previous_versions->append(pv_node);
3320   }
3321 
3322   // Since the caller is the VMThread and we are at a safepoint, this
3323   // is a good time to clear out unused references.
3324 
3325   RC_TRACE(0x00000400, ("add: previous version length=%d",
3326     _previous_versions->length()));
3327 
3328   // Purge previous versions not executing on the stack
3329   purge_previous_versions_internal(this, emcp_method_count);
3330 
3331   int obsolete_method_count = old_methods->length() - emcp_method_count;
3332 
3333   if (emcp_method_count != 0 && obsolete_method_count != 0 &&
3334       _previous_versions->length() > 0) {
3335     // We have a mix of obsolete and EMCP methods so we have to
3336     // clear out any matching EMCP method entries the hard way.
3337     int local_count = 0;
3338     for (int i = 0; i < old_methods->length(); i++) {
3339       if (!emcp_methods->at(i)) {
3340         // only obsolete methods are interesting
3341         Method* old_method = old_methods->at(i);
3342         Symbol* m_name = old_method->name();
3343         Symbol* m_signature = old_method->signature();
3344 
3345         // we might not have added the last entry
3346         for (int j = _previous_versions->length() - 1; j >= 0; j--) {
3347           // check the previous versions array for non executing obsolete methods
3348           PreviousVersionNode * pv_node = _previous_versions->at(j);
3349 
3350           GrowableArray<Method*>* method_refs = pv_node->prev_EMCP_methods();
3351           if (method_refs == NULL) {
3352             // We have run into a PreviousVersion generation where
3353             // all methods were made obsolete during that generation's
3354             // RedefineClasses() operation. At the time of that
3355             // operation, all EMCP methods were flushed so we don't
3356             // have to go back any further.
3357             //
3358             // A NULL method_refs is different than an empty method_refs.
3359             // We cannot infer any optimizations about older generations
3360             // from an empty method_refs for the current generation.
3361             break;
3362           }
3363 
3364           for (int k = method_refs->length() - 1; k >= 0; k--) {
3365             Method* method = method_refs->at(k);
3366 
3367             if (!method->is_obsolete() &&
3368                 method->name() == m_name &&
3369                 method->signature() == m_signature) {
3370               // The current RedefineClasses() call has made all EMCP
3371               // versions of this method obsolete so mark it as obsolete
3372               // and remove the reference.
3373               RC_TRACE(0x00000400,
3374                 ("add: %s(%s): flush obsolete method @%d in version @%d",
3375                 m_name->as_C_string(), m_signature->as_C_string(), k, j));
3376 
3377               method->set_is_obsolete();
3378               // Leave obsolete methods on the previous version list to
3379               // clean up later.
3380               break;
3381             }
3382           }
3383 
3384           // The previous loop may not find a matching EMCP method, but
3385           // that doesn't mean that we can optimize and not go any
3386           // further back in the PreviousVersion generations. The EMCP
3387           // method for this generation could have already been deleted,
3388           // but there still may be an older EMCP method that has not
3389           // been deleted.
3390         }
3391 
3392         if (++local_count >= obsolete_method_count) {
3393           // no more obsolete methods so bail out now
3394           break;
3395         }
3396       }
3397     }
3398   }
3399 } // end add_previous_version()
3400 
3401 
3402 // Determine if InstanceKlass has a previous version.
3403 bool InstanceKlass::has_previous_version() const {
3404   return (_previous_versions != NULL && _previous_versions->length() > 0);
3405 } // end has_previous_version()
3406 
3407 
3408 Method* InstanceKlass::method_with_idnum(int idnum) {
3409   Method* m = NULL;
3410   if (idnum < methods()->length()) {
3411     m = methods()->at(idnum);
3412   }
3413   if (m == NULL || m->method_idnum() != idnum) {
3414     for (int index = 0; index < methods()->length(); ++index) {
3415       m = methods()->at(index);
3416       if (m->method_idnum() == idnum) {
3417         return m;
3418       }
3419     }
3420   }
3421   return m;
3422 }
3423 
3424 
3425 // Construct a PreviousVersionNode entry for the array hung off
3426 // the InstanceKlass.
3427 PreviousVersionNode::PreviousVersionNode(ConstantPool* prev_constant_pool,
3428   bool prev_cp_is_weak, GrowableArray<Method*>* prev_EMCP_methods) {
3429 
3430   _prev_constant_pool = prev_constant_pool;
3431   _prev_cp_is_weak = prev_cp_is_weak;
3432   _prev_EMCP_methods = prev_EMCP_methods;
3433 }
3434 
3435 
3436 // Destroy a PreviousVersionNode
3437 PreviousVersionNode::~PreviousVersionNode() {
3438   if (_prev_constant_pool != NULL) {
3439     _prev_constant_pool = NULL;
3440   }
3441 
3442   if (_prev_EMCP_methods != NULL) {
3443     delete _prev_EMCP_methods;
3444   }
3445 }
3446 
3447 
3448 // Construct a PreviousVersionInfo entry
3449 PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
3450   _prev_constant_pool_handle = constantPoolHandle();  // NULL handle
3451   _prev_EMCP_method_handles = NULL;
3452 
3453   ConstantPool* cp = pv_node->prev_constant_pool();
3454   assert(cp != NULL, "constant pool ref was unexpectedly cleared");
3455   if (cp == NULL) {
3456     return;  // robustness
3457   }
3458 
3459   // make the ConstantPool* safe to return
3460   _prev_constant_pool_handle = constantPoolHandle(cp);
3461 
3462   GrowableArray<Method*>* method_refs = pv_node->prev_EMCP_methods();
3463   if (method_refs == NULL) {
3464     // the InstanceKlass did not have any EMCP methods
3465     return;
3466   }
3467 
3468   _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
3469 
3470   int n_methods = method_refs->length();
3471   for (int i = 0; i < n_methods; i++) {
3472     Method* method = method_refs->at(i);
3473     assert (method != NULL, "method has been cleared");
3474     if (method == NULL) {
3475       continue;  // robustness
3476     }
3477     // make the Method* safe to return
3478     _prev_EMCP_method_handles->append(methodHandle(method));
3479   }
3480 }
3481 
3482 
3483 // Destroy a PreviousVersionInfo
3484 PreviousVersionInfo::~PreviousVersionInfo() {
3485   // Since _prev_EMCP_method_handles is not C-heap allocated, we
3486   // don't have to delete it.
3487 }
3488 
3489 
3490 // Construct a helper for walking the previous versions array
3491 PreviousVersionWalker::PreviousVersionWalker(InstanceKlass *ik) {
3492   _previous_versions = ik->previous_versions();
3493   _current_index = 0;
3494   // _hm needs no initialization
3495   _current_p = NULL;
3496 }
3497 
3498 
3499 // Destroy a PreviousVersionWalker
3500 PreviousVersionWalker::~PreviousVersionWalker() {
3501   // Delete the current info just in case the caller didn't walk to
3502   // the end of the previous versions list. No harm if _current_p is
3503   // already NULL.
3504   delete _current_p;
3505 
3506   // When _hm is destroyed, all the Handles returned in
3507   // PreviousVersionInfo objects will be destroyed.
3508   // Also, after this destructor is finished it will be
3509   // safe to delete the GrowableArray allocated in the
3510   // PreviousVersionInfo objects.
3511 }
3512 
3513 
3514 // Return the interesting information for the next previous version
3515 // of the klass. Returns NULL if there are no more previous versions.
3516 PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
3517   if (_previous_versions == NULL) {
3518     // no previous versions so nothing to return
3519     return NULL;
3520   }
3521 
3522   delete _current_p;  // cleanup the previous info for the caller
3523   _current_p = NULL;  // reset to NULL so we don't delete same object twice
3524 
3525   int length = _previous_versions->length();
3526 
3527   while (_current_index < length) {
3528     PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
3529     PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP, mtClass)
3530                                           PreviousVersionInfo(pv_node);
3531 
3532     constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
3533     assert (!cp_h.is_null(), "null cp found in previous version");
3534 
3535     // The caller will need to delete pv_info when they are done with it.
3536     _current_p = pv_info;
3537     return pv_info;
3538   }
3539 
3540   // all of the underlying nodes' info has been deleted
3541   return NULL;
3542 } // end next_previous_version()