1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/verifier.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "compiler/compileBroker.hpp"
  31 #include "gc_implementation/shared/markSweep.inline.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "interpreter/oopMapCache.hpp"
  34 #include "interpreter/rewriter.hpp"
  35 #include "jvmtifiles/jvmti.h"
  36 #include "memory/genOopClosures.inline.hpp"
  37 #include "memory/oopFactory.hpp"
  38 #include "memory/permGen.hpp"
  39 #include "oops/fieldStreams.hpp"
  40 #include "oops/instanceKlass.hpp"
  41 #include "oops/instanceMirrorKlass.hpp"
  42 #include "oops/instanceOop.hpp"
  43 #include "oops/methodOop.hpp"
  44 #include "oops/objArrayKlassKlass.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "oops/symbol.hpp"
  47 #include "prims/jvmtiExport.hpp"
  48 #include "prims/jvmtiRedefineClassesTrace.hpp"
  49 #include "runtime/fieldDescriptor.hpp"
  50 #include "runtime/handles.inline.hpp"
  51 #include "runtime/javaCalls.hpp"
  52 #include "runtime/mutexLocker.hpp"
  53 #include "services/threadService.hpp"
  54 #include "utilities/dtrace.hpp"
  55 #ifdef TARGET_OS_FAMILY_linux
  56 # include "thread_linux.inline.hpp"
  57 #endif
  58 #ifdef TARGET_OS_FAMILY_solaris
  59 # include "thread_solaris.inline.hpp"
  60 #endif
  61 #ifdef TARGET_OS_FAMILY_windows
  62 # include "thread_windows.inline.hpp"
  63 #endif
  64 #ifdef TARGET_OS_FAMILY_bsd
  65 # include "thread_bsd.inline.hpp"
  66 #endif
  67 #ifndef SERIALGC
  68 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  69 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  70 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  71 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  72 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
  73 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
  74 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
  75 #include "oops/oop.pcgc.inline.hpp"
  76 #endif
  77 #ifdef COMPILER1
  78 #include "c1/c1_Compiler.hpp"
  79 #endif
  80 
  81 #ifdef DTRACE_ENABLED
  82 
  83 #ifndef USDT2
  84 
  85 HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required,
  86   char*, intptr_t, oop, intptr_t);
  87 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive,
  88   char*, intptr_t, oop, intptr_t, int);
  89 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent,
  90   char*, intptr_t, oop, intptr_t, int);
  91 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous,
  92   char*, intptr_t, oop, intptr_t, int);
  93 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed,
  94   char*, intptr_t, oop, intptr_t, int);
  95 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit,
  96   char*, intptr_t, oop, intptr_t, int);
  97 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error,
  98   char*, intptr_t, oop, intptr_t, int);
  99 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
 100   char*, intptr_t, oop, intptr_t, int);
 101 
 102 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)          \
 103   {                                                              \
 104     char* data = NULL;                                           \
 105     int len = 0;                                                 \
 106     Symbol* name = (clss)->name();                               \
 107     if (name != NULL) {                                          \
 108       data = (char*)name->bytes();                               \
 109       len = name->utf8_length();                                 \
 110     }                                                            \
 111     HS_DTRACE_PROBE4(hotspot, class__initialization__##type,     \
 112       data, len, (clss)->class_loader(), thread_type);           \
 113   }
 114 
 115 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
 116   {                                                              \
 117     char* data = NULL;                                           \
 118     int len = 0;                                                 \
 119     Symbol* name = (clss)->name();                               \
 120     if (name != NULL) {                                          \
 121       data = (char*)name->bytes();                               \
 122       len = name->utf8_length();                                 \
 123     }                                                            \
 124     HS_DTRACE_PROBE5(hotspot, class__initialization__##type,     \
 125       data, len, (clss)->class_loader(), thread_type, wait);     \
 126   }
 127 #else /* USDT2 */
 128 
 129 #define HOTSPOT_CLASS_INITIALIZATION_required HOTSPOT_CLASS_INITIALIZATION_REQUIRED
 130 #define HOTSPOT_CLASS_INITIALIZATION_recursive HOTSPOT_CLASS_INITIALIZATION_RECURSIVE
 131 #define HOTSPOT_CLASS_INITIALIZATION_concurrent HOTSPOT_CLASS_INITIALIZATION_CONCURRENT
 132 #define HOTSPOT_CLASS_INITIALIZATION_erroneous HOTSPOT_CLASS_INITIALIZATION_ERRONEOUS
 133 #define HOTSPOT_CLASS_INITIALIZATION_super__failed HOTSPOT_CLASS_INITIALIZATION_SUPER_FAILED
 134 #define HOTSPOT_CLASS_INITIALIZATION_clinit HOTSPOT_CLASS_INITIALIZATION_CLINIT
 135 #define HOTSPOT_CLASS_INITIALIZATION_error HOTSPOT_CLASS_INITIALIZATION_ERROR
 136 #define HOTSPOT_CLASS_INITIALIZATION_end HOTSPOT_CLASS_INITIALIZATION_END
 137 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)          \
 138   {                                                              \
 139     char* data = NULL;                                           \
 140     int len = 0;                                                 \
 141     Symbol* name = (clss)->name();                               \
 142     if (name != NULL) {                                          \
 143       data = (char*)name->bytes();                               \
 144       len = name->utf8_length();                                 \
 145     }                                                            \
 146     HOTSPOT_CLASS_INITIALIZATION_##type(                         \
 147       data, len, (clss)->class_loader(), thread_type);           \
 148   }
 149 
 150 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
 151   {                                                              \
 152     char* data = NULL;                                           \
 153     int len = 0;                                                 \
 154     Symbol* name = (clss)->name();                               \
 155     if (name != NULL) {                                          \
 156       data = (char*)name->bytes();                               \
 157       len = name->utf8_length();                                 \
 158     }                                                            \
 159     HOTSPOT_CLASS_INITIALIZATION_##type(                         \
 160       data, len, (clss)->class_loader(), thread_type, wait);     \
 161   }
 162 #endif /* USDT2 */
 163 
 164 #else //  ndef DTRACE_ENABLED
 165 
 166 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)
 167 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait)
 168 
 169 #endif //  ndef DTRACE_ENABLED
 170 
 171 volatile int instanceKlass::_total_instanceKlass_count = 0;
 172 
 173 bool instanceKlass::should_be_initialized() const {
 174   return !is_initialized();
 175 }
 176 
 177 klassVtable* instanceKlass::vtable() const {
 178   return new klassVtable(as_klassOop(), start_of_vtable(), vtable_length() / vtableEntry::size());
 179 }
 180 
 181 klassItable* instanceKlass::itable() const {
 182   return new klassItable(as_klassOop());
 183 }
 184 
 185 void instanceKlass::eager_initialize(Thread *thread) {
 186   if (!EagerInitialization) return;
 187 
 188   if (this->is_not_initialized()) {
 189     // abort if the the class has a class initializer
 190     if (this->class_initializer() != NULL) return;
 191 
 192     // abort if it is java.lang.Object (initialization is handled in genesis)
 193     klassOop super = this->super();
 194     if (super == NULL) return;
 195 
 196     // abort if the super class should be initialized
 197     if (!instanceKlass::cast(super)->is_initialized()) return;
 198 
 199     // call body to expose the this pointer
 200     instanceKlassHandle this_oop(thread, this->as_klassOop());
 201     eager_initialize_impl(this_oop);
 202   }
 203 }
 204 
 205 
 206 void instanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
 207   EXCEPTION_MARK;
 208   ObjectLocker ol(this_oop, THREAD);
 209 
 210   // abort if someone beat us to the initialization
 211   if (!this_oop->is_not_initialized()) return;  // note: not equivalent to is_initialized()
 212 
 213   ClassState old_state = this_oop->init_state();
 214   link_class_impl(this_oop, true, THREAD);
 215   if (HAS_PENDING_EXCEPTION) {
 216     CLEAR_PENDING_EXCEPTION;
 217     // Abort if linking the class throws an exception.
 218 
 219     // Use a test to avoid redundantly resetting the state if there's
 220     // no change.  Set_init_state() asserts that state changes make
 221     // progress, whereas here we might just be spinning in place.
 222     if( old_state != this_oop->_init_state )
 223       this_oop->set_init_state (old_state);
 224   } else {
 225     // linking successfull, mark class as initialized
 226     this_oop->set_init_state (fully_initialized);
 227     // trace
 228     if (TraceClassInitialization) {
 229       ResourceMark rm(THREAD);
 230       tty->print_cr("[Initialized %s without side effects]", this_oop->external_name());
 231     }
 232   }
 233 }
 234 
 235 
 236 // See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization
 237 // process. The step comments refers to the procedure described in that section.
 238 // Note: implementation moved to static method to expose the this pointer.
 239 void instanceKlass::initialize(TRAPS) {
 240   if (this->should_be_initialized()) {
 241     HandleMark hm(THREAD);
 242     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
 243     initialize_impl(this_oop, CHECK);
 244     // Note: at this point the class may be initialized
 245     //       OR it may be in the state of being initialized
 246     //       in case of recursive initialization!
 247   } else {
 248     assert(is_initialized(), "sanity check");
 249   }
 250 }
 251 
 252 
 253 bool instanceKlass::verify_code(
 254     instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
 255   // 1) Verify the bytecodes
 256   Verifier::Mode mode =
 257     throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
 258   return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false);
 259 }
 260 
 261 
 262 // Used exclusively by the shared spaces dump mechanism to prevent
 263 // classes mapped into the shared regions in new VMs from appearing linked.
 264 
 265 void instanceKlass::unlink_class() {
 266   assert(is_linked(), "must be linked");
 267   _init_state = loaded;
 268 }
 269 
 270 void instanceKlass::link_class(TRAPS) {
 271   assert(is_loaded(), "must be loaded");
 272   if (!is_linked()) {
 273     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
 274     link_class_impl(this_oop, true, CHECK);
 275   }
 276 }
 277 
 278 // Called to verify that a class can link during initialization, without
 279 // throwing a VerifyError.
 280 bool instanceKlass::link_class_or_fail(TRAPS) {
 281   assert(is_loaded(), "must be loaded");
 282   if (!is_linked()) {
 283     instanceKlassHandle this_oop(THREAD, this->as_klassOop());
 284     link_class_impl(this_oop, false, CHECK_false);
 285   }
 286   return is_linked();
 287 }
 288 
 289 bool instanceKlass::link_class_impl(
 290     instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
 291   // check for error state
 292   if (this_oop->is_in_error_state()) {
 293     ResourceMark rm(THREAD);
 294     THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(),
 295                this_oop->external_name(), false);
 296   }
 297   // return if already verified
 298   if (this_oop->is_linked()) {
 299     return true;
 300   }
 301 
 302   // Timing
 303   // timer handles recursion
 304   assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl");
 305   JavaThread* jt = (JavaThread*)THREAD;
 306 
 307   // link super class before linking this class
 308   instanceKlassHandle super(THREAD, this_oop->super());
 309   if (super.not_null()) {
 310     if (super->is_interface()) {  // check if super class is an interface
 311       ResourceMark rm(THREAD);
 312       Exceptions::fthrow(
 313         THREAD_AND_LOCATION,
 314         vmSymbols::java_lang_IncompatibleClassChangeError(),
 315         "class %s has interface %s as super class",
 316         this_oop->external_name(),
 317         super->external_name()
 318       );
 319       return false;
 320     }
 321 
 322     link_class_impl(super, throw_verifyerror, CHECK_false);
 323   }
 324 
 325   // link all interfaces implemented by this class before linking this class
 326   objArrayHandle interfaces (THREAD, this_oop->local_interfaces());
 327   int num_interfaces = interfaces->length();
 328   for (int index = 0; index < num_interfaces; index++) {
 329     HandleMark hm(THREAD);
 330     instanceKlassHandle ih(THREAD, klassOop(interfaces->obj_at(index)));
 331     link_class_impl(ih, throw_verifyerror, CHECK_false);
 332   }
 333 
 334   // in case the class is linked in the process of linking its superclasses
 335   if (this_oop->is_linked()) {
 336     return true;
 337   }
 338 
 339   // trace only the link time for this klass that includes
 340   // the verification time
 341   PerfClassTraceTime vmtimer(ClassLoader::perf_class_link_time(),
 342                              ClassLoader::perf_class_link_selftime(),
 343                              ClassLoader::perf_classes_linked(),
 344                              jt->get_thread_stat()->perf_recursion_counts_addr(),
 345                              jt->get_thread_stat()->perf_timers_addr(),
 346                              PerfClassTraceTime::CLASS_LINK);
 347 
 348   // verification & rewriting
 349   {
 350     ObjectLocker ol(this_oop, THREAD);
 351     // rewritten will have been set if loader constraint error found
 352     // on an earlier link attempt
 353     // don't verify or rewrite if already rewritten
 354     if (!this_oop->is_linked()) {
 355       if (!this_oop->is_rewritten()) {
 356         {
 357           // Timer includes any side effects of class verification (resolution,
 358           // etc), but not recursive entry into verify_code().
 359           PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(),
 360                                    ClassLoader::perf_class_verify_selftime(),
 361                                    ClassLoader::perf_classes_verified(),
 362                                    jt->get_thread_stat()->perf_recursion_counts_addr(),
 363                                    jt->get_thread_stat()->perf_timers_addr(),
 364                                    PerfClassTraceTime::CLASS_VERIFY);
 365           bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD);
 366           if (!verify_ok) {
 367             return false;
 368           }
 369         }
 370 
 371         // Just in case a side-effect of verify linked this class already
 372         // (which can sometimes happen since the verifier loads classes
 373         // using custom class loaders, which are free to initialize things)
 374         if (this_oop->is_linked()) {
 375           return true;
 376         }
 377 
 378         // also sets rewritten
 379         this_oop->rewrite_class(CHECK_false);
 380       }
 381 
 382       // relocate jsrs and link methods after they are all rewritten
 383       this_oop->relocate_and_link_methods(CHECK_false);
 384 
 385       // Initialize the vtable and interface table after
 386       // methods have been rewritten since rewrite may
 387       // fabricate new methodOops.
 388       // also does loader constraint checking
 389       if (!this_oop()->is_shared()) {
 390         ResourceMark rm(THREAD);
 391         this_oop->vtable()->initialize_vtable(true, CHECK_false);
 392         this_oop->itable()->initialize_itable(true, CHECK_false);
 393       }
 394 #ifdef ASSERT
 395       else {
 396         ResourceMark rm(THREAD);
 397         this_oop->vtable()->verify(tty, true);
 398         // In case itable verification is ever added.
 399         // this_oop->itable()->verify(tty, true);
 400       }
 401 #endif
 402       this_oop->set_init_state(linked);
 403       if (JvmtiExport::should_post_class_prepare()) {
 404         Thread *thread = THREAD;
 405         assert(thread->is_Java_thread(), "thread->is_Java_thread()");
 406         JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop());
 407       }
 408     }
 409   }
 410   return true;
 411 }
 412 
 413 
 414 // Rewrite the byte codes of all of the methods of a class.
 415 // The rewriter must be called exactly once. Rewriting must happen after
 416 // verification but before the first method of the class is executed.
 417 void instanceKlass::rewrite_class(TRAPS) {
 418   assert(is_loaded(), "must be loaded");
 419   instanceKlassHandle this_oop(THREAD, this->as_klassOop());
 420   if (this_oop->is_rewritten()) {
 421     assert(this_oop()->is_shared(), "rewriting an unshared class?");
 422     return;
 423   }
 424   Rewriter::rewrite(this_oop, CHECK);
 425   this_oop->set_rewritten();
 426 }
 427 
 428 // Now relocate and link method entry points after class is rewritten.
 429 // This is outside is_rewritten flag. In case of an exception, it can be
 430 // executed more than once.
 431 void instanceKlass::relocate_and_link_methods(TRAPS) {
 432   assert(is_loaded(), "must be loaded");
 433   instanceKlassHandle this_oop(THREAD, this->as_klassOop());
 434   Rewriter::relocate_and_link(this_oop, CHECK);
 435 }
 436 
 437 
 438 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
 439   // Make sure klass is linked (verified) before initialization
 440   // A class could already be verified, since it has been reflected upon.
 441   this_oop->link_class(CHECK);
 442 
 443   DTRACE_CLASSINIT_PROBE(required, instanceKlass::cast(this_oop()), -1);
 444 
 445   bool wait = false;
 446 
 447   // refer to the JVM book page 47 for description of steps
 448   // Step 1
 449   { ObjectLocker ol(this_oop, THREAD);
 450 
 451     Thread *self = THREAD; // it's passed the current thread
 452 
 453     // Step 2
 454     // If we were to use wait() instead of waitInterruptibly() then
 455     // we might end up throwing IE from link/symbol resolution sites
 456     // that aren't expected to throw.  This would wreak havoc.  See 6320309.
 457     while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) {
 458         wait = true;
 459       ol.waitUninterruptibly(CHECK);
 460     }
 461 
 462     // Step 3
 463     if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self)) {
 464       DTRACE_CLASSINIT_PROBE_WAIT(recursive, instanceKlass::cast(this_oop()), -1,wait);
 465       return;
 466     }
 467 
 468     // Step 4
 469     if (this_oop->is_initialized()) {
 470       DTRACE_CLASSINIT_PROBE_WAIT(concurrent, instanceKlass::cast(this_oop()), -1,wait);
 471       return;
 472     }
 473 
 474     // Step 5
 475     if (this_oop->is_in_error_state()) {
 476       DTRACE_CLASSINIT_PROBE_WAIT(erroneous, instanceKlass::cast(this_oop()), -1,wait);
 477       ResourceMark rm(THREAD);
 478       const char* desc = "Could not initialize class ";
 479       const char* className = this_oop->external_name();
 480       size_t msglen = strlen(desc) + strlen(className) + 1;
 481       char* message = NEW_RESOURCE_ARRAY(char, msglen);
 482       if (NULL == message) {
 483         // Out of memory: can't create detailed error message
 484         THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);
 485       } else {
 486         jio_snprintf(message, msglen, "%s%s", desc, className);
 487         THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message);
 488       }
 489     }
 490 
 491     // Step 6
 492     this_oop->set_init_state(being_initialized);
 493     this_oop->set_init_thread(self);
 494   }
 495 
 496   // Step 7
 497   klassOop super_klass = this_oop->super();
 498   if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) {
 499     Klass::cast(super_klass)->initialize(THREAD);
 500 
 501     if (HAS_PENDING_EXCEPTION) {
 502       Handle e(THREAD, PENDING_EXCEPTION);
 503       CLEAR_PENDING_EXCEPTION;
 504       {
 505         EXCEPTION_MARK;
 506         this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
 507         CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, superclass initialization error is thrown below
 508       }
 509       DTRACE_CLASSINIT_PROBE_WAIT(super__failed, instanceKlass::cast(this_oop()), -1,wait);
 510       THROW_OOP(e());
 511     }
 512   }
 513 
 514   // Step 8
 515   {
 516     assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl");
 517     JavaThread* jt = (JavaThread*)THREAD;
 518     DTRACE_CLASSINIT_PROBE_WAIT(clinit, instanceKlass::cast(this_oop()), -1,wait);
 519     // Timer includes any side effects of class initialization (resolution,
 520     // etc), but not recursive entry into call_class_initializer().
 521     PerfClassTraceTime timer(ClassLoader::perf_class_init_time(),
 522                              ClassLoader::perf_class_init_selftime(),
 523                              ClassLoader::perf_classes_inited(),
 524                              jt->get_thread_stat()->perf_recursion_counts_addr(),
 525                              jt->get_thread_stat()->perf_timers_addr(),
 526                              PerfClassTraceTime::CLASS_CLINIT);
 527     this_oop->call_class_initializer(THREAD);
 528   }
 529 
 530   // Step 9
 531   if (!HAS_PENDING_EXCEPTION) {
 532     this_oop->set_initialization_state_and_notify(fully_initialized, CHECK);
 533     { ResourceMark rm(THREAD);
 534       debug_only(this_oop->vtable()->verify(tty, true);)
 535     }
 536   }
 537   else {
 538     // Step 10 and 11
 539     Handle e(THREAD, PENDING_EXCEPTION);
 540     CLEAR_PENDING_EXCEPTION;
 541     {
 542       EXCEPTION_MARK;
 543       this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
 544       CLEAR_PENDING_EXCEPTION;   // ignore any exception thrown, class initialization error is thrown below
 545     }
 546     DTRACE_CLASSINIT_PROBE_WAIT(error, instanceKlass::cast(this_oop()), -1,wait);
 547     if (e->is_a(SystemDictionary::Error_klass())) {
 548       THROW_OOP(e());
 549     } else {
 550       JavaCallArguments args(e);
 551       THROW_ARG(vmSymbols::java_lang_ExceptionInInitializerError(),
 552                 vmSymbols::throwable_void_signature(),
 553                 &args);
 554     }
 555   }
 556   DTRACE_CLASSINIT_PROBE_WAIT(end, instanceKlass::cast(this_oop()), -1,wait);
 557 }
 558 
 559 
 560 // Note: implementation moved to static method to expose the this pointer.
 561 void instanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) {
 562   instanceKlassHandle kh(THREAD, this->as_klassOop());
 563   set_initialization_state_and_notify_impl(kh, state, CHECK);
 564 }
 565 
 566 void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
 567   ObjectLocker ol(this_oop, THREAD);
 568   this_oop->set_init_state(state);
 569   ol.notify_all(CHECK);
 570 }
 571 
 572 // The embedded _implementor field can only record one implementor.
 573 // When there are more than one implementors, the _implementor field
 574 // is set to the interface klassOop itself. Following are the possible
 575 // values for the _implementor field:
 576 //   NULL                  - no implementor
 577 //   implementor klassOop  - one implementor
 578 //   self                  - more than one implementor
 579 //
 580 // The _implementor field only exists for interfaces.
 581 void instanceKlass::add_implementor(klassOop k) {
 582   assert(Compile_lock->owned_by_self(), "");
 583   assert(is_interface(), "not interface");
 584   // Filter out my subinterfaces.
 585   // (Note: Interfaces are never on the subklass list.)
 586   if (instanceKlass::cast(k)->is_interface()) return;
 587 
 588   // Filter out subclasses whose supers already implement me.
 589   // (Note: CHA must walk subclasses of direct implementors
 590   // in order to locate indirect implementors.)
 591   klassOop sk = instanceKlass::cast(k)->super();
 592   if (sk != NULL && instanceKlass::cast(sk)->implements_interface(as_klassOop()))
 593     // We only need to check one immediate superclass, since the
 594     // implements_interface query looks at transitive_interfaces.
 595     // Any supers of the super have the same (or fewer) transitive_interfaces.
 596     return;
 597 
 598   klassOop ik = implementor();
 599   if (ik == NULL) {
 600     set_implementor(k);
 601   } else if (ik != this->as_klassOop()) {
 602     // There is already an implementor. Use itself as an indicator of
 603     // more than one implementors.
 604     set_implementor(this->as_klassOop());
 605   }
 606 
 607   // The implementor also implements the transitive_interfaces
 608   for (int index = 0; index < local_interfaces()->length(); index++) {
 609     instanceKlass::cast(klassOop(local_interfaces()->obj_at(index)))->add_implementor(k);
 610   }
 611 }
 612 
 613 void instanceKlass::init_implementor() {
 614   if (is_interface()) {
 615     set_implementor(NULL);
 616   }
 617 }
 618 
 619 
 620 void instanceKlass::process_interfaces(Thread *thread) {
 621   // link this class into the implementors list of every interface it implements
 622   KlassHandle this_as_oop (thread, this->as_klassOop());
 623   for (int i = local_interfaces()->length() - 1; i >= 0; i--) {
 624     assert(local_interfaces()->obj_at(i)->is_klass(), "must be a klass");
 625     instanceKlass* interf = instanceKlass::cast(klassOop(local_interfaces()->obj_at(i)));
 626     assert(interf->is_interface(), "expected interface");
 627     interf->add_implementor(this_as_oop());
 628   }
 629 }
 630 
 631 bool instanceKlass::can_be_primary_super_slow() const {
 632   if (is_interface())
 633     return false;
 634   else
 635     return Klass::can_be_primary_super_slow();
 636 }
 637 
 638 objArrayOop instanceKlass::compute_secondary_supers(int num_extra_slots, TRAPS) {
 639   // The secondaries are the implemented interfaces.
 640   instanceKlass* ik = instanceKlass::cast(as_klassOop());
 641   objArrayHandle interfaces (THREAD, ik->transitive_interfaces());
 642   int num_secondaries = num_extra_slots + interfaces->length();
 643   if (num_secondaries == 0) {
 644     return Universe::the_empty_system_obj_array();
 645   } else if (num_extra_slots == 0) {
 646     return interfaces();
 647   } else {
 648     // a mix of both
 649     objArrayOop secondaries = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL);
 650     for (int i = 0; i < interfaces->length(); i++) {
 651       secondaries->obj_at_put(num_extra_slots+i, interfaces->obj_at(i));
 652     }
 653     return secondaries;
 654   }
 655 }
 656 
 657 bool instanceKlass::compute_is_subtype_of(klassOop k) {
 658   if (Klass::cast(k)->is_interface()) {
 659     return implements_interface(k);
 660   } else {
 661     return Klass::compute_is_subtype_of(k);
 662   }
 663 }
 664 
 665 bool instanceKlass::implements_interface(klassOop k) const {
 666   if (as_klassOop() == k) return true;
 667   assert(Klass::cast(k)->is_interface(), "should be an interface class");
 668   for (int i = 0; i < transitive_interfaces()->length(); i++) {
 669     if (transitive_interfaces()->obj_at(i) == k) {
 670       return true;
 671     }
 672   }
 673   return false;
 674 }
 675 
 676 objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) {
 677   if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
 678   if (length > arrayOopDesc::max_array_length(T_OBJECT)) {
 679     report_java_out_of_memory("Requested array size exceeds VM limit");
 680     JvmtiExport::post_array_size_exhausted();
 681     THROW_OOP_0(Universe::out_of_memory_error_array_size());
 682   }
 683   int size = objArrayOopDesc::object_size(length);
 684   klassOop ak = array_klass(n, CHECK_NULL);
 685   KlassHandle h_ak (THREAD, ak);
 686   objArrayOop o =
 687     (objArrayOop)CollectedHeap::array_allocate(h_ak, size, length, CHECK_NULL);
 688   return o;
 689 }
 690 
 691 instanceOop instanceKlass::register_finalizer(instanceOop i, TRAPS) {
 692   if (TraceFinalizerRegistration) {
 693     tty->print("Registered ");
 694     i->print_value_on(tty);
 695     tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", (address)i);
 696   }
 697   instanceHandle h_i(THREAD, i);
 698   // Pass the handle as argument, JavaCalls::call expects oop as jobjects
 699   JavaValue result(T_VOID);
 700   JavaCallArguments args(h_i);
 701   methodHandle mh (THREAD, Universe::finalizer_register_method());
 702   JavaCalls::call(&result, mh, &args, CHECK_NULL);
 703   return h_i();
 704 }
 705 
 706 instanceOop instanceKlass::allocate_instance(TRAPS) {
 707   assert(!oop_is_instanceMirror(), "wrong allocation path");
 708   bool has_finalizer_flag = has_finalizer(); // Query before possible GC
 709   int size = size_helper();  // Query before forming handle.
 710 
 711   KlassHandle h_k(THREAD, as_klassOop());
 712 
 713   instanceOop i;
 714 
 715   i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
 716   if (has_finalizer_flag && !RegisterFinalizersAtInit) {
 717     i = register_finalizer(i, CHECK_NULL);
 718   }
 719   return i;
 720 }
 721 
 722 instanceOop instanceKlass::allocate_permanent_instance(TRAPS) {
 723   // Finalizer registration occurs in the Object.<init> constructor
 724   // and constructors normally aren't run when allocating perm
 725   // instances so simply disallow finalizable perm objects.  This can
 726   // be relaxed if a need for it is found.
 727   assert(!has_finalizer(), "perm objects not allowed to have finalizers");
 728   assert(!oop_is_instanceMirror(), "wrong allocation path");
 729   int size = size_helper();  // Query before forming handle.
 730   KlassHandle h_k(THREAD, as_klassOop());
 731   instanceOop i = (instanceOop)
 732     CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL);
 733   return i;
 734 }
 735 
 736 void instanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) {
 737   if (is_interface() || is_abstract()) {
 738     ResourceMark rm(THREAD);
 739     THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError()
 740               : vmSymbols::java_lang_InstantiationException(), external_name());
 741   }
 742   if (as_klassOop() == SystemDictionary::Class_klass()) {
 743     ResourceMark rm(THREAD);
 744     THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError()
 745               : vmSymbols::java_lang_IllegalAccessException(), external_name());
 746   }
 747 }
 748 
 749 klassOop instanceKlass::array_klass_impl(bool or_null, int n, TRAPS) {
 750   instanceKlassHandle this_oop(THREAD, as_klassOop());
 751   return array_klass_impl(this_oop, or_null, n, THREAD);
 752 }
 753 
 754 klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) {
 755   if (this_oop->array_klasses() == NULL) {
 756     if (or_null) return NULL;
 757 
 758     ResourceMark rm;
 759     JavaThread *jt = (JavaThread *)THREAD;
 760     {
 761       // Atomic creation of array_klasses
 762       MutexLocker mc(Compile_lock, THREAD);   // for vtables
 763       MutexLocker ma(MultiArray_lock, THREAD);
 764 
 765       // Check if update has already taken place
 766       if (this_oop->array_klasses() == NULL) {
 767         objArrayKlassKlass* oakk =
 768           (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part();
 769 
 770         klassOop  k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL);
 771         this_oop->set_array_klasses(k);
 772       }
 773     }
 774   }
 775   // _this will always be set at this point
 776   objArrayKlass* oak = (objArrayKlass*)this_oop->array_klasses()->klass_part();
 777   if (or_null) {
 778     return oak->array_klass_or_null(n);
 779   }
 780   return oak->array_klass(n, CHECK_NULL);
 781 }
 782 
 783 klassOop instanceKlass::array_klass_impl(bool or_null, TRAPS) {
 784   return array_klass_impl(or_null, 1, THREAD);
 785 }
 786 
 787 void instanceKlass::call_class_initializer(TRAPS) {
 788   instanceKlassHandle ik (THREAD, as_klassOop());
 789   call_class_initializer_impl(ik, THREAD);
 790 }
 791 
 792 static int call_class_initializer_impl_counter = 0;   // for debugging
 793 
 794 methodOop instanceKlass::class_initializer() {
 795   methodOop clinit = find_method(
 796       vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
 797   if (clinit != NULL && clinit->has_valid_initializer_flags()) {
 798     return clinit;
 799   }
 800   return NULL;
 801 }
 802 
 803 void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) {
 804   methodHandle h_method(THREAD, this_oop->class_initializer());
 805   assert(!this_oop->is_initialized(), "we cannot initialize twice");
 806   if (TraceClassInitialization) {
 807     tty->print("%d Initializing ", call_class_initializer_impl_counter++);
 808     this_oop->name()->print_value();
 809     tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop());
 810   }
 811   if (h_method() != NULL) {
 812     JavaCallArguments args; // No arguments
 813     JavaValue result(T_VOID);
 814     JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
 815   }
 816 }
 817 
 818 
 819 void instanceKlass::mask_for(methodHandle method, int bci,
 820   InterpreterOopMap* entry_for) {
 821   // Dirty read, then double-check under a lock.
 822   if (_oop_map_cache == NULL) {
 823     // Otherwise, allocate a new one.
 824     MutexLocker x(OopMapCacheAlloc_lock);
 825     // First time use. Allocate a cache in C heap
 826     if (_oop_map_cache == NULL) {
 827       _oop_map_cache = new OopMapCache();
 828     }
 829   }
 830   // _oop_map_cache is constant after init; lookup below does is own locking.
 831   _oop_map_cache->lookup(method, bci, entry_for);
 832 }
 833 
 834 
 835 bool instanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
 836   for (JavaFieldStream fs(as_klassOop()); !fs.done(); fs.next()) {
 837     Symbol* f_name = fs.name();
 838     Symbol* f_sig  = fs.signature();
 839     if (f_name == name && f_sig == sig) {
 840       fd->initialize(as_klassOop(), fs.index());
 841       return true;
 842     }
 843   }
 844   return false;
 845 }
 846 
 847 
 848 void instanceKlass::shared_symbols_iterate(SymbolClosure* closure) {
 849   Klass::shared_symbols_iterate(closure);
 850   closure->do_symbol(&_generic_signature);
 851   closure->do_symbol(&_source_file_name);
 852 
 853   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
 854     int name_index = fs.name_index();
 855     closure->do_symbol(constants()->symbol_at_addr(name_index));
 856     int sig_index  = fs.signature_index();
 857     closure->do_symbol(constants()->symbol_at_addr(sig_index));
 858   }
 859 }
 860 
 861 
 862 klassOop instanceKlass::find_interface_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
 863   const int n = local_interfaces()->length();
 864   for (int i = 0; i < n; i++) {
 865     klassOop intf1 = klassOop(local_interfaces()->obj_at(i));
 866     assert(Klass::cast(intf1)->is_interface(), "just checking type");
 867     // search for field in current interface
 868     if (instanceKlass::cast(intf1)->find_local_field(name, sig, fd)) {
 869       assert(fd->is_static(), "interface field must be static");
 870       return intf1;
 871     }
 872     // search for field in direct superinterfaces
 873     klassOop intf2 = instanceKlass::cast(intf1)->find_interface_field(name, sig, fd);
 874     if (intf2 != NULL) return intf2;
 875   }
 876   // otherwise field lookup fails
 877   return NULL;
 878 }
 879 
 880 
 881 klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
 882   // search order according to newest JVM spec (5.4.3.2, p.167).
 883   // 1) search for field in current klass
 884   if (find_local_field(name, sig, fd)) {
 885     return as_klassOop();
 886   }
 887   // 2) search for field recursively in direct superinterfaces
 888   { klassOop intf = find_interface_field(name, sig, fd);
 889     if (intf != NULL) return intf;
 890   }
 891   // 3) apply field lookup recursively if superclass exists
 892   { klassOop supr = super();
 893     if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, fd);
 894   }
 895   // 4) otherwise field lookup fails
 896   return NULL;
 897 }
 898 
 899 
 900 klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, bool is_static, fieldDescriptor* fd) const {
 901   // search order according to newest JVM spec (5.4.3.2, p.167).
 902   // 1) search for field in current klass
 903   if (find_local_field(name, sig, fd)) {
 904     if (fd->is_static() == is_static) return as_klassOop();
 905   }
 906   // 2) search for field recursively in direct superinterfaces
 907   if (is_static) {
 908     klassOop intf = find_interface_field(name, sig, fd);
 909     if (intf != NULL) return intf;
 910   }
 911   // 3) apply field lookup recursively if superclass exists
 912   { klassOop supr = super();
 913     if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, is_static, fd);
 914   }
 915   // 4) otherwise field lookup fails
 916   return NULL;
 917 }
 918 
 919 
 920 bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
 921   for (JavaFieldStream fs(as_klassOop()); !fs.done(); fs.next()) {
 922     if (fs.offset() == offset) {
 923       fd->initialize(as_klassOop(), fs.index());
 924       if (fd->is_static() == is_static) return true;
 925     }
 926   }
 927   return false;
 928 }
 929 
 930 
 931 bool instanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
 932   klassOop klass = as_klassOop();
 933   while (klass != NULL) {
 934     if (instanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) {
 935       return true;
 936     }
 937     klass = Klass::cast(klass)->super();
 938   }
 939   return false;
 940 }
 941 
 942 
 943 void instanceKlass::methods_do(void f(methodOop method)) {
 944   int len = methods()->length();
 945   for (int index = 0; index < len; index++) {
 946     methodOop m = methodOop(methods()->obj_at(index));
 947     assert(m->is_method(), "must be method");
 948     f(m);
 949   }
 950 }
 951 
 952 
 953 void instanceKlass::do_local_static_fields(FieldClosure* cl) {
 954   for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
 955     if (fs.access_flags().is_static()) {
 956       fieldDescriptor fd;
 957       fd.initialize(as_klassOop(), fs.index());
 958       cl->do_field(&fd);
 959     }
 960   }
 961 }
 962 
 963 
 964 void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
 965   instanceKlassHandle h_this(THREAD, as_klassOop());
 966   do_local_static_fields_impl(h_this, f, CHECK);
 967 }
 968 
 969 
 970 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
 971   for (JavaFieldStream fs(this_oop()); !fs.done(); fs.next()) {
 972     if (fs.access_flags().is_static()) {
 973       fieldDescriptor fd;
 974       fd.initialize(this_oop(), fs.index());
 975       f(&fd, CHECK);
 976     }
 977   }
 978 }
 979 
 980 
 981 static int compare_fields_by_offset(int* a, int* b) {
 982   return a[0] - b[0];
 983 }
 984 
 985 void instanceKlass::do_nonstatic_fields(FieldClosure* cl) {
 986   instanceKlass* super = superklass();
 987   if (super != NULL) {
 988     super->do_nonstatic_fields(cl);
 989   }
 990   fieldDescriptor fd;
 991   int length = java_fields_count();
 992   // In DebugInfo nonstatic fields are sorted by offset.
 993   int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1), mtClass);
 994   int j = 0;
 995   for (int i = 0; i < length; i += 1) {
 996     fd.initialize(as_klassOop(), i);
 997     if (!fd.is_static()) {
 998       fields_sorted[j + 0] = fd.offset();
 999       fields_sorted[j + 1] = i;
1000       j += 2;
1001     }
1002   }
1003   if (j > 0) {
1004     length = j;
1005     // _sort_Fn is defined in growableArray.hpp.
1006     qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
1007     for (int i = 0; i < length; i += 2) {
1008       fd.initialize(as_klassOop(), fields_sorted[i + 1]);
1009       assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
1010       cl->do_field(&fd);
1011     }
1012   }
1013   FREE_C_HEAP_ARRAY(int, fields_sorted, mtClass);
1014 }
1015 
1016 
1017 void instanceKlass::array_klasses_do(void f(klassOop k)) {
1018   if (array_klasses() != NULL)
1019     arrayKlass::cast(array_klasses())->array_klasses_do(f);
1020 }
1021 
1022 
1023 void instanceKlass::with_array_klasses_do(void f(klassOop k)) {
1024   f(as_klassOop());
1025   array_klasses_do(f);
1026 }
1027 
1028 #ifdef ASSERT
1029 static int linear_search(objArrayOop methods, Symbol* name, Symbol* signature) {
1030   int len = methods->length();
1031   for (int index = 0; index < len; index++) {
1032     methodOop m = (methodOop)(methods->obj_at(index));
1033     assert(m->is_method(), "must be method");
1034     if (m->signature() == signature && m->name() == name) {
1035        return index;
1036     }
1037   }
1038   return -1;
1039 }
1040 #endif
1041 
1042 methodOop instanceKlass::find_method(Symbol* name, Symbol* signature) const {
1043   return instanceKlass::find_method(methods(), name, signature);
1044 }
1045 
1046 methodOop instanceKlass::find_method(objArrayOop methods, Symbol* name, Symbol* signature) {
1047   int len = methods->length();
1048   // methods are sorted, so do binary search
1049   int l = 0;
1050   int h = len - 1;
1051   while (l <= h) {
1052     int mid = (l + h) >> 1;
1053     methodOop m = (methodOop)methods->obj_at(mid);
1054     assert(m->is_method(), "must be method");
1055     int res = m->name()->fast_compare(name);
1056     if (res == 0) {
1057       // found matching name; do linear search to find matching signature
1058       // first, quick check for common case
1059       if (m->signature() == signature) return m;
1060       // search downwards through overloaded methods
1061       int i;
1062       for (i = mid - 1; i >= l; i--) {
1063         methodOop m = (methodOop)methods->obj_at(i);
1064         assert(m->is_method(), "must be method");
1065         if (m->name() != name) break;
1066         if (m->signature() == signature) return m;
1067       }
1068       // search upwards
1069       for (i = mid + 1; i <= h; i++) {
1070         methodOop m = (methodOop)methods->obj_at(i);
1071         assert(m->is_method(), "must be method");
1072         if (m->name() != name) break;
1073         if (m->signature() == signature) return m;
1074       }
1075       // not found
1076 #ifdef ASSERT
1077       int index = linear_search(methods, name, signature);
1078       assert(index == -1, err_msg("binary search should have found entry %d", index));
1079 #endif
1080       return NULL;
1081     } else if (res < 0) {
1082       l = mid + 1;
1083     } else {
1084       h = mid - 1;
1085     }
1086   }
1087 #ifdef ASSERT
1088   int index = linear_search(methods, name, signature);
1089   assert(index == -1, err_msg("binary search should have found entry %d", index));
1090 #endif
1091   return NULL;
1092 }
1093 
1094 methodOop instanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
1095   klassOop klass = as_klassOop();
1096   while (klass != NULL) {
1097     methodOop method = instanceKlass::cast(klass)->find_method(name, signature);
1098     if (method != NULL) return method;
1099     klass = instanceKlass::cast(klass)->super();
1100   }
1101   return NULL;
1102 }
1103 
1104 // lookup a method in all the interfaces that this class implements
1105 methodOop instanceKlass::lookup_method_in_all_interfaces(Symbol* name,
1106                                                          Symbol* signature) const {
1107   objArrayOop all_ifs = instanceKlass::cast(as_klassOop())->transitive_interfaces();
1108   int num_ifs = all_ifs->length();
1109   instanceKlass *ik = NULL;
1110   for (int i = 0; i < num_ifs; i++) {
1111     ik = instanceKlass::cast(klassOop(all_ifs->obj_at(i)));
1112     methodOop m = ik->lookup_method(name, signature);
1113     if (m != NULL) {
1114       return m;
1115     }
1116   }
1117   return NULL;
1118 }
1119 
1120 /* jni_id_for_impl for jfieldIds only */
1121 JNIid* instanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) {
1122   MutexLocker ml(JfieldIdCreation_lock);
1123   // Retry lookup after we got the lock
1124   JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset);
1125   if (probe == NULL) {
1126     // Slow case, allocate new static field identifier
1127     probe = new JNIid(this_oop->as_klassOop(), offset, this_oop->jni_ids());
1128     this_oop->set_jni_ids(probe);
1129   }
1130   return probe;
1131 }
1132 
1133 
1134 /* jni_id_for for jfieldIds only */
1135 JNIid* instanceKlass::jni_id_for(int offset) {
1136   JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset);
1137   if (probe == NULL) {
1138     probe = jni_id_for_impl(this->as_klassOop(), offset);
1139   }
1140   return probe;
1141 }
1142 
1143 u2 instanceKlass::enclosing_method_data(int offset) {
1144   typeArrayOop inner_class_list = inner_classes();
1145   if (inner_class_list == NULL) {
1146     return 0;
1147   }
1148   int length = inner_class_list->length();
1149   if (length % inner_class_next_offset == 0) {
1150     return 0;
1151   } else {
1152     int index = length - enclosing_method_attribute_size;
1153     typeArrayHandle inner_class_list_h(inner_class_list);
1154     assert(offset < enclosing_method_attribute_size, "invalid offset");
1155     return inner_class_list_h->ushort_at(index + offset);
1156   }
1157 }
1158 
1159 void instanceKlass::set_enclosing_method_indices(u2 class_index,
1160                                                  u2 method_index) {
1161   typeArrayOop inner_class_list = inner_classes();
1162   assert (inner_class_list != NULL, "_inner_classes list is not set up");
1163   int length = inner_class_list->length();
1164   if (length % inner_class_next_offset == enclosing_method_attribute_size) {
1165     int index = length - enclosing_method_attribute_size;
1166     typeArrayHandle inner_class_list_h(inner_class_list);
1167     inner_class_list_h->ushort_at_put(
1168       index + enclosing_method_class_index_offset, class_index);
1169     inner_class_list_h->ushort_at_put(
1170       index + enclosing_method_method_index_offset, method_index);
1171   }
1172 }
1173 
1174 // Lookup or create a jmethodID.
1175 // This code is called by the VMThread and JavaThreads so the
1176 // locking has to be done very carefully to avoid deadlocks
1177 // and/or other cache consistency problems.
1178 //
1179 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) {
1180   size_t idnum = (size_t)method_h->method_idnum();
1181   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1182   size_t length = 0;
1183   jmethodID id = NULL;
1184 
1185   // We use a double-check locking idiom here because this cache is
1186   // performance sensitive. In the normal system, this cache only
1187   // transitions from NULL to non-NULL which is safe because we use
1188   // release_set_methods_jmethod_ids() to advertise the new cache.
1189   // A partially constructed cache should never be seen by a racing
1190   // thread. We also use release_store_ptr() to save a new jmethodID
1191   // in the cache so a partially constructed jmethodID should never be
1192   // seen either. Cache reads of existing jmethodIDs proceed without a
1193   // lock, but cache writes of a new jmethodID requires uniqueness and
1194   // creation of the cache itself requires no leaks so a lock is
1195   // generally acquired in those two cases.
1196   //
1197   // If the RedefineClasses() API has been used, then this cache can
1198   // grow and we'll have transitions from non-NULL to bigger non-NULL.
1199   // Cache creation requires no leaks and we require safety between all
1200   // cache accesses and freeing of the old cache so a lock is generally
1201   // acquired when the RedefineClasses() API has been used.
1202 
1203   if (jmeths != NULL) {
1204     // the cache already exists
1205     if (!ik_h->idnum_can_increment()) {
1206       // the cache can't grow so we can just get the current values
1207       get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1208     } else {
1209       // cache can grow so we have to be more careful
1210       if (Threads::number_of_threads() == 0 ||
1211           SafepointSynchronize::is_at_safepoint()) {
1212         // we're single threaded or at a safepoint - no locking needed
1213         get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1214       } else {
1215         MutexLocker ml(JmethodIdCreation_lock);
1216         get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1217       }
1218     }
1219   }
1220   // implied else:
1221   // we need to allocate a cache so default length and id values are good
1222 
1223   if (jmeths == NULL ||   // no cache yet
1224       length <= idnum ||  // cache is too short
1225       id == NULL) {       // cache doesn't contain entry
1226 
1227     // This function can be called by the VMThread so we have to do all
1228     // things that might block on a safepoint before grabbing the lock.
1229     // Otherwise, we can deadlock with the VMThread or have a cache
1230     // consistency issue. These vars keep track of what we might have
1231     // to free after the lock is dropped.
1232     jmethodID  to_dealloc_id     = NULL;
1233     jmethodID* to_dealloc_jmeths = NULL;
1234 
1235     // may not allocate new_jmeths or use it if we allocate it
1236     jmethodID* new_jmeths = NULL;
1237     if (length <= idnum) {
1238       // allocate a new cache that might be used
1239       size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
1240       new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1, mtClass);
1241       memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
1242       // cache size is stored in element[0], other elements offset by one
1243       new_jmeths[0] = (jmethodID)size;
1244     }
1245 
1246     // allocate a new jmethodID that might be used
1247     jmethodID new_id = NULL;
1248     if (method_h->is_old() && !method_h->is_obsolete()) {
1249       // The method passed in is old (but not obsolete), we need to use the current version
1250       methodOop current_method = ik_h->method_with_idnum((int)idnum);
1251       assert(current_method != NULL, "old and but not obsolete, so should exist");
1252       methodHandle current_method_h(current_method == NULL? method_h() : current_method);
1253       new_id = JNIHandles::make_jmethod_id(current_method_h);
1254     } else {
1255       // It is the current version of the method or an obsolete method,
1256       // use the version passed in
1257       new_id = JNIHandles::make_jmethod_id(method_h);
1258     }
1259 
1260     if (Threads::number_of_threads() == 0 ||
1261         SafepointSynchronize::is_at_safepoint()) {
1262       // we're single threaded or at a safepoint - no locking needed
1263       id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1264                                           &to_dealloc_id, &to_dealloc_jmeths);
1265     } else {
1266       MutexLocker ml(JmethodIdCreation_lock);
1267       id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1268                                           &to_dealloc_id, &to_dealloc_jmeths);
1269     }
1270 
1271     // The lock has been dropped so we can free resources.
1272     // Free up either the old cache or the new cache if we allocated one.
1273     if (to_dealloc_jmeths != NULL) {
1274       FreeHeap(to_dealloc_jmeths);
1275     }
1276     // free up the new ID since it wasn't needed
1277     if (to_dealloc_id != NULL) {
1278       JNIHandles::destroy_jmethod_id(to_dealloc_id);
1279     }
1280   }
1281   return id;
1282 }
1283 
1284 
1285 // Common code to fetch the jmethodID from the cache or update the
1286 // cache with the new jmethodID. This function should never do anything
1287 // that causes the caller to go to a safepoint or we can deadlock with
1288 // the VMThread or have cache consistency issues.
1289 //
1290 jmethodID instanceKlass::get_jmethod_id_fetch_or_update(
1291             instanceKlassHandle ik_h, size_t idnum, jmethodID new_id,
1292             jmethodID* new_jmeths, jmethodID* to_dealloc_id_p,
1293             jmethodID** to_dealloc_jmeths_p) {
1294   assert(new_id != NULL, "sanity check");
1295   assert(to_dealloc_id_p != NULL, "sanity check");
1296   assert(to_dealloc_jmeths_p != NULL, "sanity check");
1297   assert(Threads::number_of_threads() == 0 ||
1298          SafepointSynchronize::is_at_safepoint() ||
1299          JmethodIdCreation_lock->owned_by_self(), "sanity check");
1300 
1301   // reacquire the cache - we are locked, single threaded or at a safepoint
1302   jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1303   jmethodID  id     = NULL;
1304   size_t     length = 0;
1305 
1306   if (jmeths == NULL ||                         // no cache yet
1307       (length = (size_t)jmeths[0]) <= idnum) {  // cache is too short
1308     if (jmeths != NULL) {
1309       // copy any existing entries from the old cache
1310       for (size_t index = 0; index < length; index++) {
1311         new_jmeths[index+1] = jmeths[index+1];
1312       }
1313       *to_dealloc_jmeths_p = jmeths;  // save old cache for later delete
1314     }
1315     ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths);
1316   } else {
1317     // fetch jmethodID (if any) from the existing cache
1318     id = jmeths[idnum+1];
1319     *to_dealloc_jmeths_p = new_jmeths;  // save new cache for later delete
1320   }
1321   if (id == NULL) {
1322     // No matching jmethodID in the existing cache or we have a new
1323     // cache or we just grew the cache. This cache write is done here
1324     // by the first thread to win the foot race because a jmethodID
1325     // needs to be unique once it is generally available.
1326     id = new_id;
1327 
1328     // The jmethodID cache can be read while unlocked so we have to
1329     // make sure the new jmethodID is complete before installing it
1330     // in the cache.
1331     OrderAccess::release_store_ptr(&jmeths[idnum+1], id);
1332   } else {
1333     *to_dealloc_id_p = new_id; // save new id for later delete
1334   }
1335   return id;
1336 }
1337 
1338 
1339 // Common code to get the jmethodID cache length and the jmethodID
1340 // value at index idnum if there is one.
1341 //
1342 void instanceKlass::get_jmethod_id_length_value(jmethodID* cache,
1343        size_t idnum, size_t *length_p, jmethodID* id_p) {
1344   assert(cache != NULL, "sanity check");
1345   assert(length_p != NULL, "sanity check");
1346   assert(id_p != NULL, "sanity check");
1347 
1348   // cache size is stored in element[0], other elements offset by one
1349   *length_p = (size_t)cache[0];
1350   if (*length_p <= idnum) {  // cache is too short
1351     *id_p = NULL;
1352   } else {
1353     *id_p = cache[idnum+1];  // fetch jmethodID (if any)
1354   }
1355 }
1356 
1357 
1358 // Lookup a jmethodID, NULL if not found.  Do no blocking, no allocations, no handles
1359 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) {
1360   size_t idnum = (size_t)method->method_idnum();
1361   jmethodID* jmeths = methods_jmethod_ids_acquire();
1362   size_t length;                                // length assigned as debugging crumb
1363   jmethodID id = NULL;
1364   if (jmeths != NULL &&                         // If there is a cache
1365       (length = (size_t)jmeths[0]) > idnum) {   // and if it is long enough,
1366     id = jmeths[idnum+1];                       // Look up the id (may be NULL)
1367   }
1368   return id;
1369 }
1370 
1371 
1372 // Cache an itable index
1373 void instanceKlass::set_cached_itable_index(size_t idnum, int index) {
1374   int* indices = methods_cached_itable_indices_acquire();
1375   int* to_dealloc_indices = NULL;
1376 
1377   // We use a double-check locking idiom here because this cache is
1378   // performance sensitive. In the normal system, this cache only
1379   // transitions from NULL to non-NULL which is safe because we use
1380   // release_set_methods_cached_itable_indices() to advertise the
1381   // new cache. A partially constructed cache should never be seen
1382   // by a racing thread. Cache reads and writes proceed without a
1383   // lock, but creation of the cache itself requires no leaks so a
1384   // lock is generally acquired in that case.
1385   //
1386   // If the RedefineClasses() API has been used, then this cache can
1387   // grow and we'll have transitions from non-NULL to bigger non-NULL.
1388   // Cache creation requires no leaks and we require safety between all
1389   // cache accesses and freeing of the old cache so a lock is generally
1390   // acquired when the RedefineClasses() API has been used.
1391 
1392   if (indices == NULL || idnum_can_increment()) {
1393     // we need a cache or the cache can grow
1394     MutexLocker ml(JNICachedItableIndex_lock);
1395     // reacquire the cache to see if another thread already did the work
1396     indices = methods_cached_itable_indices_acquire();
1397     size_t length = 0;
1398     // cache size is stored in element[0], other elements offset by one
1399     if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
1400       size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
1401       int* new_indices = NEW_C_HEAP_ARRAY(int, size+1, mtClass);
1402       new_indices[0] = (int)size;
1403       // copy any existing entries
1404       size_t i;
1405       for (i = 0; i < length; i++) {
1406         new_indices[i+1] = indices[i+1];
1407       }
1408       // Set all the rest to -1
1409       for (i = length; i < size; i++) {
1410         new_indices[i+1] = -1;
1411       }
1412       if (indices != NULL) {
1413         // We have an old cache to delete so save it for after we
1414         // drop the lock.
1415         to_dealloc_indices = indices;
1416       }
1417       release_set_methods_cached_itable_indices(indices = new_indices);
1418     }
1419 
1420     if (idnum_can_increment()) {
1421       // this cache can grow so we have to write to it safely
1422       indices[idnum+1] = index;
1423     }
1424   } else {
1425     CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
1426   }
1427 
1428   if (!idnum_can_increment()) {
1429     // The cache cannot grow and this JNI itable index value does not
1430     // have to be unique like a jmethodID. If there is a race to set it,
1431     // it doesn't matter.
1432     indices[idnum+1] = index;
1433   }
1434 
1435   if (to_dealloc_indices != NULL) {
1436     // we allocated a new cache so free the old one
1437     FreeHeap(to_dealloc_indices);
1438   }
1439 }
1440 
1441 
1442 // Retrieve a cached itable index
1443 int instanceKlass::cached_itable_index(size_t idnum) {
1444   int* indices = methods_cached_itable_indices_acquire();
1445   if (indices != NULL && ((size_t)indices[0]) > idnum) {
1446      // indices exist and are long enough, retrieve possible cached
1447     return indices[idnum+1];
1448   }
1449   return -1;
1450 }
1451 
1452 
1453 //
1454 // Walk the list of dependent nmethods searching for nmethods which
1455 // are dependent on the changes that were passed in and mark them for
1456 // deoptimization.  Returns the number of nmethods found.
1457 //
1458 int instanceKlass::mark_dependent_nmethods(DepChange& changes) {
1459   assert_locked_or_safepoint(CodeCache_lock);
1460   int found = 0;
1461   nmethodBucket* b = _dependencies;
1462   while (b != NULL) {
1463     nmethod* nm = b->get_nmethod();
1464     // since dependencies aren't removed until an nmethod becomes a zombie,
1465     // the dependency list may contain nmethods which aren't alive.
1466     if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
1467       if (TraceDependencies) {
1468         ResourceMark rm;
1469         tty->print_cr("Marked for deoptimization");
1470         tty->print_cr("  context = %s", this->external_name());
1471         changes.print();
1472         nm->print();
1473         nm->print_dependencies();
1474       }
1475       nm->mark_for_deoptimization();
1476       found++;
1477     }
1478     b = b->next();
1479   }
1480   return found;
1481 }
1482 
1483 
1484 //
1485 // Add an nmethodBucket to the list of dependencies for this nmethod.
1486 // It's possible that an nmethod has multiple dependencies on this klass
1487 // so a count is kept for each bucket to guarantee that creation and
1488 // deletion of dependencies is consistent.
1489 //
1490 void instanceKlass::add_dependent_nmethod(nmethod* nm) {
1491   assert_locked_or_safepoint(CodeCache_lock);
1492   nmethodBucket* b = _dependencies;
1493   nmethodBucket* last = NULL;
1494   while (b != NULL) {
1495     if (nm == b->get_nmethod()) {
1496       b->increment();
1497       return;
1498     }
1499     b = b->next();
1500   }
1501   _dependencies = new nmethodBucket(nm, _dependencies);
1502 }
1503 
1504 
1505 //
1506 // Decrement count of the nmethod in the dependency list and remove
1507 // the bucket competely when the count goes to 0.  This method must
1508 // find a corresponding bucket otherwise there's a bug in the
1509 // recording of dependecies.
1510 //
1511 void instanceKlass::remove_dependent_nmethod(nmethod* nm) {
1512   assert_locked_or_safepoint(CodeCache_lock);
1513   nmethodBucket* b = _dependencies;
1514   nmethodBucket* last = NULL;
1515   while (b != NULL) {
1516     if (nm == b->get_nmethod()) {
1517       if (b->decrement() == 0) {
1518         if (last == NULL) {
1519           _dependencies = b->next();
1520         } else {
1521           last->set_next(b->next());
1522         }
1523         delete b;
1524       }
1525       return;
1526     }
1527     last = b;
1528     b = b->next();
1529   }
1530 #ifdef ASSERT
1531   tty->print_cr("### %s can't find dependent nmethod:", this->external_name());
1532   nm->print();
1533 #endif // ASSERT
1534   ShouldNotReachHere();
1535 }
1536 
1537 
1538 #ifndef PRODUCT
1539 void instanceKlass::print_dependent_nmethods(bool verbose) {
1540   nmethodBucket* b = _dependencies;
1541   int idx = 0;
1542   while (b != NULL) {
1543     nmethod* nm = b->get_nmethod();
1544     tty->print("[%d] count=%d { ", idx++, b->count());
1545     if (!verbose) {
1546       nm->print_on(tty, "nmethod");
1547       tty->print_cr(" } ");
1548     } else {
1549       nm->print();
1550       nm->print_dependencies();
1551       tty->print_cr("--- } ");
1552     }
1553     b = b->next();
1554   }
1555 }
1556 
1557 
1558 bool instanceKlass::is_dependent_nmethod(nmethod* nm) {
1559   nmethodBucket* b = _dependencies;
1560   while (b != NULL) {
1561     if (nm == b->get_nmethod()) {
1562       return true;
1563     }
1564     b = b->next();
1565   }
1566   return false;
1567 }
1568 #endif //PRODUCT
1569 
1570 
1571 #ifdef ASSERT
1572 template <class T> void assert_is_in(T *p) {
1573   T heap_oop = oopDesc::load_heap_oop(p);
1574   if (!oopDesc::is_null(heap_oop)) {
1575     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1576     assert(Universe::heap()->is_in(o), "should be in heap");
1577   }
1578 }
1579 template <class T> void assert_is_in_closed_subset(T *p) {
1580   T heap_oop = oopDesc::load_heap_oop(p);
1581   if (!oopDesc::is_null(heap_oop)) {
1582     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1583     assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
1584   }
1585 }
1586 template <class T> void assert_is_in_reserved(T *p) {
1587   T heap_oop = oopDesc::load_heap_oop(p);
1588   if (!oopDesc::is_null(heap_oop)) {
1589     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1590     assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
1591   }
1592 }
1593 template <class T> void assert_nothing(T *p) {}
1594 
1595 #else
1596 template <class T> void assert_is_in(T *p) {}
1597 template <class T> void assert_is_in_closed_subset(T *p) {}
1598 template <class T> void assert_is_in_reserved(T *p) {}
1599 template <class T> void assert_nothing(T *p) {}
1600 #endif // ASSERT
1601 
1602 //
1603 // Macros that iterate over areas of oops which are specialized on type of
1604 // oop pointer either narrow or wide, depending on UseCompressedOops
1605 //
1606 // Parameters are:
1607 //   T         - type of oop to point to (either oop or narrowOop)
1608 //   start_p   - starting pointer for region to iterate over
1609 //   count     - number of oops or narrowOops to iterate over
1610 //   do_oop    - action to perform on each oop (it's arbitrary C code which
1611 //               makes it more efficient to put in a macro rather than making
1612 //               it a template function)
1613 //   assert_fn - assert function which is template function because performance
1614 //               doesn't matter when enabled.
1615 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
1616   T, start_p, count, do_oop,                \
1617   assert_fn)                                \
1618 {                                           \
1619   T* p         = (T*)(start_p);             \
1620   T* const end = p + (count);               \
1621   while (p < end) {                         \
1622     (assert_fn)(p);                         \
1623     do_oop;                                 \
1624     ++p;                                    \
1625   }                                         \
1626 }
1627 
1628 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
1629   T, start_p, count, do_oop,                \
1630   assert_fn)                                \
1631 {                                           \
1632   T* const start = (T*)(start_p);           \
1633   T*       p     = start + (count);         \
1634   while (start < p) {                       \
1635     --p;                                    \
1636     (assert_fn)(p);                         \
1637     do_oop;                                 \
1638   }                                         \
1639 }
1640 
1641 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
1642   T, start_p, count, low, high,             \
1643   do_oop, assert_fn)                        \
1644 {                                           \
1645   T* const l = (T*)(low);                   \
1646   T* const h = (T*)(high);                  \
1647   assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
1648          mask_bits((intptr_t)h, sizeof(T)-1) == 0,   \
1649          "bounded region must be properly aligned"); \
1650   T* p       = (T*)(start_p);               \
1651   T* end     = p + (count);                 \
1652   if (p < l) p = l;                         \
1653   if (end > h) end = h;                     \
1654   while (p < end) {                         \
1655     (assert_fn)(p);                         \
1656     do_oop;                                 \
1657     ++p;                                    \
1658   }                                         \
1659 }
1660 
1661 
1662 // The following macros call specialized macros, passing either oop or
1663 // narrowOop as the specialization type.  These test the UseCompressedOops
1664 // flag.
1665 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn)            \
1666 {                                                                        \
1667   /* Compute oopmap block range. The common case                         \
1668      is nonstatic_oop_map_size == 1. */                                  \
1669   OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
1670   OopMapBlock* const end_map = map + nonstatic_oop_map_count();          \
1671   if (UseCompressedOops) {                                               \
1672     while (map < end_map) {                                              \
1673       InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop,                   \
1674         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
1675         do_oop, assert_fn)                                               \
1676       ++map;                                                             \
1677     }                                                                    \
1678   } else {                                                               \
1679     while (map < end_map) {                                              \
1680       InstanceKlass_SPECIALIZED_OOP_ITERATE(oop,                         \
1681         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
1682         do_oop, assert_fn)                                               \
1683       ++map;                                                             \
1684     }                                                                    \
1685   }                                                                      \
1686 }
1687 
1688 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn)    \
1689 {                                                                        \
1690   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();          \
1691   OopMapBlock* map             = start_map + nonstatic_oop_map_count();  \
1692   if (UseCompressedOops) {                                               \
1693     while (start_map < map) {                                            \
1694       --map;                                                             \
1695       InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop,           \
1696         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
1697         do_oop, assert_fn)                                               \
1698     }                                                                    \
1699   } else {                                                               \
1700     while (start_map < map) {                                            \
1701       --map;                                                             \
1702       InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop,                 \
1703         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
1704         do_oop, assert_fn)                                               \
1705     }                                                                    \
1706   }                                                                      \
1707 }
1708 
1709 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop,    \
1710                                               assert_fn)                 \
1711 {                                                                        \
1712   /* Compute oopmap block range. The common case is                      \
1713      nonstatic_oop_map_size == 1, so we accept the                       \
1714      usually non-existent extra overhead of examining                    \
1715      all the maps. */                                                    \
1716   OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
1717   OopMapBlock* const end_map = map + nonstatic_oop_map_count();          \
1718   if (UseCompressedOops) {                                               \
1719     while (map < end_map) {                                              \
1720       InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop,           \
1721         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
1722         low, high,                                                       \
1723         do_oop, assert_fn)                                               \
1724       ++map;                                                             \
1725     }                                                                    \
1726   } else {                                                               \
1727     while (map < end_map) {                                              \
1728       InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop,                 \
1729         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
1730         low, high,                                                       \
1731         do_oop, assert_fn)                                               \
1732       ++map;                                                             \
1733     }                                                                    \
1734   }                                                                      \
1735 }
1736 
1737 void instanceKlass::oop_follow_contents(oop obj) {
1738   assert(obj != NULL, "can't follow the content of NULL object");
1739   obj->follow_header();
1740   InstanceKlass_OOP_MAP_ITERATE( \
1741     obj, \
1742     MarkSweep::mark_and_push(p), \
1743     assert_is_in_closed_subset)
1744 }
1745 
1746 #ifndef SERIALGC
1747 void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
1748                                         oop obj) {
1749   assert(obj != NULL, "can't follow the content of NULL object");
1750   obj->follow_header(cm);
1751   InstanceKlass_OOP_MAP_ITERATE( \
1752     obj, \
1753     PSParallelCompact::mark_and_push(cm, p), \
1754     assert_is_in)
1755 }
1756 #endif // SERIALGC
1757 
1758 // closure's do_header() method dicates whether the given closure should be
1759 // applied to the klass ptr in the object header.
1760 
1761 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)        \
1762                                                                              \
1763 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
1764   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1765   /* header */                                                          \
1766   if (closure->do_header()) {                                           \
1767     obj->oop_iterate_header(closure);                                   \
1768   }                                                                     \
1769   InstanceKlass_OOP_MAP_ITERATE(                                        \
1770     obj,                                                                \
1771     SpecializationStats::                                               \
1772       record_do_oop_call##nv_suffix(SpecializationStats::ik);           \
1773     (closure)->do_oop##nv_suffix(p),                                    \
1774     assert_is_in_closed_subset)                                         \
1775   return size_helper();                                                 \
1776 }
1777 
1778 #ifndef SERIALGC
1779 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
1780                                                                                 \
1781 int instanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj,                \
1782                                               OopClosureType* closure) {        \
1783   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
1784   /* header */                                                                  \
1785   if (closure->do_header()) {                                                   \
1786     obj->oop_iterate_header(closure);                                           \
1787   }                                                                             \
1788   /* instance variables */                                                      \
1789   InstanceKlass_OOP_MAP_REVERSE_ITERATE(                                        \
1790     obj,                                                                        \
1791     SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\
1792     (closure)->do_oop##nv_suffix(p),                                            \
1793     assert_is_in_closed_subset)                                                 \
1794    return size_helper();                                                        \
1795 }
1796 #endif // !SERIALGC
1797 
1798 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
1799                                                                         \
1800 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj,              \
1801                                                   OopClosureType* closure, \
1802                                                   MemRegion mr) {          \
1803   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1804   if (closure->do_header()) {                                            \
1805     obj->oop_iterate_header(closure, mr);                                \
1806   }                                                                      \
1807   InstanceKlass_BOUNDED_OOP_MAP_ITERATE(                                 \
1808     obj, mr.start(), mr.end(),                                           \
1809     (closure)->do_oop##nv_suffix(p),                                     \
1810     assert_is_in_closed_subset)                                          \
1811   return size_helper();                                                  \
1812 }
1813 
1814 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1815 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1816 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1817 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1818 #ifndef SERIALGC
1819 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1820 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1821 #endif // !SERIALGC
1822 
1823 int instanceKlass::oop_adjust_pointers(oop obj) {
1824   int size = size_helper();
1825   InstanceKlass_OOP_MAP_ITERATE( \
1826     obj, \
1827     MarkSweep::adjust_pointer(p), \
1828     assert_is_in)
1829   obj->adjust_header();
1830   return size;
1831 }
1832 
1833 #ifndef SERIALGC
1834 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
1835   InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1836     obj, \
1837     if (PSScavenge::should_scavenge(p)) { \
1838       pm->claim_or_forward_depth(p); \
1839     }, \
1840     assert_nothing )
1841 }
1842 
1843 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
1844   InstanceKlass_OOP_MAP_ITERATE( \
1845     obj, \
1846     PSParallelCompact::adjust_pointer(p), \
1847     assert_nothing)
1848   return size_helper();
1849 }
1850 
1851 #endif // SERIALGC
1852 
1853 // This klass is alive but the implementor link is not followed/updated.
1854 // Subklass and sibling links are handled by Klass::follow_weak_klass_links
1855 
1856 void instanceKlass::follow_weak_klass_links(
1857   BoolObjectClosure* is_alive, OopClosure* keep_alive) {
1858   assert(is_alive->do_object_b(as_klassOop()), "this oop should be live");
1859 
1860   if (is_interface()) {
1861     if (ClassUnloading) {
1862       klassOop impl = implementor();
1863       if (impl != NULL) {
1864         if (!is_alive->do_object_b(impl)) {
1865           // remove this guy
1866           *adr_implementor() = NULL;
1867         }
1868       }
1869     } else {
1870       assert(adr_implementor() != NULL, "just checking");
1871       keep_alive->do_oop(adr_implementor());
1872     }
1873   }
1874 
1875   Klass::follow_weak_klass_links(is_alive, keep_alive);
1876 }
1877 
1878 void instanceKlass::remove_unshareable_info() {
1879   Klass::remove_unshareable_info();
1880   init_implementor();
1881 }
1882 
1883 static void clear_all_breakpoints(methodOop m) {
1884   m->clear_all_breakpoints();
1885 }
1886 
1887 void instanceKlass::release_C_heap_structures() {
1888   // Deallocate oop map cache
1889   if (_oop_map_cache != NULL) {
1890     delete _oop_map_cache;
1891     _oop_map_cache = NULL;
1892   }
1893 
1894   // Deallocate JNI identifiers for jfieldIDs
1895   JNIid::deallocate(jni_ids());
1896   set_jni_ids(NULL);
1897 
1898   jmethodID* jmeths = methods_jmethod_ids_acquire();
1899   if (jmeths != (jmethodID*)NULL) {
1900     release_set_methods_jmethod_ids(NULL);
1901     FreeHeap(jmeths);
1902   }
1903 
1904   // Deallocate MemberNameTable
1905   {
1906     Mutex* lock_or_null = SafepointSynchronize::is_at_safepoint() ? NULL : MemberNameTable_lock;
1907     MutexLockerEx ml(lock_or_null, Mutex::_no_safepoint_check_flag);
1908     MemberNameTable* mnt = member_names();
1909     if (mnt != NULL) {
1910       delete mnt;
1911       set_member_names(NULL);
1912     }
1913   }
1914 
1915   int* indices = methods_cached_itable_indices_acquire();
1916   if (indices != (int*)NULL) {
1917     release_set_methods_cached_itable_indices(NULL);
1918     FreeHeap(indices);
1919   }
1920 
1921   // release dependencies
1922   nmethodBucket* b = _dependencies;
1923   _dependencies = NULL;
1924   while (b != NULL) {
1925     nmethodBucket* next = b->next();
1926     delete b;
1927     b = next;
1928   }
1929 
1930   // Deallocate breakpoint records
1931   if (breakpoints() != 0x0) {
1932     methods_do(clear_all_breakpoints);
1933     assert(breakpoints() == 0x0, "should have cleared breakpoints");
1934   }
1935 
1936   // deallocate information about previous versions
1937   if (_previous_versions != NULL) {
1938     for (int i = _previous_versions->length() - 1; i >= 0; i--) {
1939       PreviousVersionNode * pv_node = _previous_versions->at(i);
1940       delete pv_node;
1941     }
1942     delete _previous_versions;
1943     _previous_versions = NULL;
1944   }
1945 
1946   // deallocate the cached class file
1947   if (_cached_class_file_bytes != NULL) {
1948     os::free(_cached_class_file_bytes, mtClass);
1949     _cached_class_file_bytes = NULL;
1950     _cached_class_file_len = 0;
1951   }
1952 
1953   // Decrement symbol reference counts associated with the unloaded class.
1954   if (_name != NULL) _name->decrement_refcount();
1955   // unreference array name derived from this class name (arrays of an unloaded
1956   // class can't be referenced anymore).
1957   if (_array_name != NULL)  _array_name->decrement_refcount();
1958   if (_source_file_name != NULL) _source_file_name->decrement_refcount();
1959   // walk constant pool and decrement symbol reference counts
1960   _constants->unreference_symbols();
1961 
1962   if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension, mtClass);
1963 
1964   assert(_total_instanceKlass_count >= 1, "Sanity check");
1965   Atomic::dec(&_total_instanceKlass_count);
1966 }
1967 
1968 void instanceKlass::set_source_file_name(Symbol* n) {
1969   _source_file_name = n;
1970   if (_source_file_name != NULL) _source_file_name->increment_refcount();
1971 }
1972 
1973 void instanceKlass::set_source_debug_extension(char* array, int length) {
1974   if (array == NULL) {
1975     _source_debug_extension = NULL;
1976   } else {
1977     // Adding one to the attribute length in order to store a null terminator
1978     // character could cause an overflow because the attribute length is
1979     // already coded with an u4 in the classfile, but in practice, it's
1980     // unlikely to happen.
1981     assert((length+1) > length, "Overflow checking");
1982     char* sde = NEW_C_HEAP_ARRAY(char, (length + 1), mtClass);
1983     for (int i = 0; i < length; i++) {
1984       sde[i] = array[i];
1985     }
1986     sde[length] = '\0';
1987     _source_debug_extension = sde;
1988   }
1989 }
1990 
1991 address instanceKlass::static_field_addr(int offset) {
1992   return (address)(offset + instanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror());
1993 }
1994 
1995 
1996 const char* instanceKlass::signature_name() const {
1997   const char* src = (const char*) (name()->as_C_string());
1998   const int src_length = (int)strlen(src);
1999   char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3);
2000   int src_index = 0;
2001   int dest_index = 0;
2002   dest[dest_index++] = 'L';
2003   while (src_index < src_length) {
2004     dest[dest_index++] = src[src_index++];
2005   }
2006   dest[dest_index++] = ';';
2007   dest[dest_index] = '\0';
2008   return dest;
2009 }
2010 
2011 // different verisons of is_same_class_package
2012 bool instanceKlass::is_same_class_package(klassOop class2) {
2013   klassOop class1 = as_klassOop();
2014   oop classloader1 = instanceKlass::cast(class1)->class_loader();
2015   Symbol* classname1 = Klass::cast(class1)->name();
2016 
2017   if (Klass::cast(class2)->oop_is_objArray()) {
2018     class2 = objArrayKlass::cast(class2)->bottom_klass();
2019   }
2020   oop classloader2;
2021   if (Klass::cast(class2)->oop_is_instance()) {
2022     classloader2 = instanceKlass::cast(class2)->class_loader();
2023   } else {
2024     assert(Klass::cast(class2)->oop_is_typeArray(), "should be type array");
2025     classloader2 = NULL;
2026   }
2027   Symbol* classname2 = Klass::cast(class2)->name();
2028 
2029   return instanceKlass::is_same_class_package(classloader1, classname1,
2030                                               classloader2, classname2);
2031 }
2032 
2033 bool instanceKlass::is_same_class_package(oop classloader2, Symbol* classname2) {
2034   klassOop class1 = as_klassOop();
2035   oop classloader1 = instanceKlass::cast(class1)->class_loader();
2036   Symbol* classname1 = Klass::cast(class1)->name();
2037 
2038   return instanceKlass::is_same_class_package(classloader1, classname1,
2039                                               classloader2, classname2);
2040 }
2041 
2042 // return true if two classes are in the same package, classloader
2043 // and classname information is enough to determine a class's package
2044 bool instanceKlass::is_same_class_package(oop class_loader1, Symbol* class_name1,
2045                                           oop class_loader2, Symbol* class_name2) {
2046   if (class_loader1 != class_loader2) {
2047     return false;
2048   } else if (class_name1 == class_name2) {
2049     return true;                // skip painful bytewise comparison
2050   } else {
2051     ResourceMark rm;
2052 
2053     // The Symbol*'s are in UTF8 encoding. Since we only need to check explicitly
2054     // for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding.
2055     // Otherwise, we just compare jbyte values between the strings.
2056     const jbyte *name1 = class_name1->base();
2057     const jbyte *name2 = class_name2->base();
2058 
2059     const jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/');
2060     const jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/');
2061 
2062     if ((last_slash1 == NULL) || (last_slash2 == NULL)) {
2063       // One of the two doesn't have a package.  Only return true
2064       // if the other one also doesn't have a package.
2065       return last_slash1 == last_slash2;
2066     } else {
2067       // Skip over '['s
2068       if (*name1 == '[') {
2069         do {
2070           name1++;
2071         } while (*name1 == '[');
2072         if (*name1 != 'L') {
2073           // Something is terribly wrong.  Shouldn't be here.
2074           return false;
2075         }
2076       }
2077       if (*name2 == '[') {
2078         do {
2079           name2++;
2080         } while (*name2 == '[');
2081         if (*name2 != 'L') {
2082           // Something is terribly wrong.  Shouldn't be here.
2083           return false;
2084         }
2085       }
2086 
2087       // Check that package part is identical
2088       int length1 = last_slash1 - name1;
2089       int length2 = last_slash2 - name2;
2090 
2091       return UTF8::equal(name1, length1, name2, length2);
2092     }
2093   }
2094 }
2095 
2096 // Returns true iff super_method can be overridden by a method in targetclassname
2097 // See JSL 3rd edition 8.4.6.1
2098 // Assumes name-signature match
2099 // "this" is instanceKlass of super_method which must exist
2100 // note that the instanceKlass of the method in the targetclassname has not always been created yet
2101 bool instanceKlass::is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS) {
2102    // Private methods can not be overridden
2103    if (super_method->is_private()) {
2104      return false;
2105    }
2106    // If super method is accessible, then override
2107    if ((super_method->is_protected()) ||
2108        (super_method->is_public())) {
2109      return true;
2110    }
2111    // Package-private methods are not inherited outside of package
2112    assert(super_method->is_package_private(), "must be package private");
2113    return(is_same_class_package(targetclassloader(), targetclassname));
2114 }
2115 
2116 /* defined for now in jvm.cpp, for historical reasons *--
2117 klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle self,
2118                                                      Symbol*& simple_name_result, TRAPS) {
2119   ...
2120 }
2121 */
2122 
2123 // tell if two classes have the same enclosing class (at package level)
2124 bool instanceKlass::is_same_package_member_impl(instanceKlassHandle class1,
2125                                                 klassOop class2_oop, TRAPS) {
2126   if (class2_oop == class1->as_klassOop())          return true;
2127   if (!Klass::cast(class2_oop)->oop_is_instance())  return false;
2128   instanceKlassHandle class2(THREAD, class2_oop);
2129 
2130   // must be in same package before we try anything else
2131   if (!class1->is_same_class_package(class2->class_loader(), class2->name()))
2132     return false;
2133 
2134   // As long as there is an outer1.getEnclosingClass,
2135   // shift the search outward.
2136   instanceKlassHandle outer1 = class1;
2137   for (;;) {
2138     // As we walk along, look for equalities between outer1 and class2.
2139     // Eventually, the walks will terminate as outer1 stops
2140     // at the top-level class around the original class.
2141     bool ignore_inner_is_member;
2142     klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member,
2143                                                     CHECK_false);
2144     if (next == NULL)  break;
2145     if (next == class2())  return true;
2146     outer1 = instanceKlassHandle(THREAD, next);
2147   }
2148 
2149   // Now do the same for class2.
2150   instanceKlassHandle outer2 = class2;
2151   for (;;) {
2152     bool ignore_inner_is_member;
2153     klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member,
2154                                                     CHECK_false);
2155     if (next == NULL)  break;
2156     // Might as well check the new outer against all available values.
2157     if (next == class1())  return true;
2158     if (next == outer1())  return true;
2159     outer2 = instanceKlassHandle(THREAD, next);
2160   }
2161 
2162   // If by this point we have not found an equality between the
2163   // two classes, we know they are in separate package members.
2164   return false;
2165 }
2166 
2167 
2168 jint instanceKlass::compute_modifier_flags(TRAPS) const {
2169   klassOop k = as_klassOop();
2170   jint access = access_flags().as_int();
2171 
2172   // But check if it happens to be member class.
2173   instanceKlassHandle ik(THREAD, k);
2174   InnerClassesIterator iter(ik);
2175   for (; !iter.done(); iter.next()) {
2176     int ioff = iter.inner_class_info_index();
2177     // Inner class attribute can be zero, skip it.
2178     // Strange but true:  JVM spec. allows null inner class refs.
2179     if (ioff == 0) continue;
2180 
2181     // only look at classes that are already loaded
2182     // since we are looking for the flags for our self.
2183     Symbol* inner_name = ik->constants()->klass_name_at(ioff);
2184     if ((ik->name() == inner_name)) {
2185       // This is really a member class.
2186       access = iter.inner_access_flags();
2187       break;
2188     }
2189   }
2190   // Remember to strip ACC_SUPER bit
2191   return (access & (~JVM_ACC_SUPER)) & JVM_ACC_WRITTEN_FLAGS;
2192 }
2193 
2194 jint instanceKlass::jvmti_class_status() const {
2195   jint result = 0;
2196 
2197   if (is_linked()) {
2198     result |= JVMTI_CLASS_STATUS_VERIFIED | JVMTI_CLASS_STATUS_PREPARED;
2199   }
2200 
2201   if (is_initialized()) {
2202     assert(is_linked(), "Class status is not consistent");
2203     result |= JVMTI_CLASS_STATUS_INITIALIZED;
2204   }
2205   if (is_in_error_state()) {
2206     result |= JVMTI_CLASS_STATUS_ERROR;
2207   }
2208   return result;
2209 }
2210 
2211 methodOop instanceKlass::method_at_itable(klassOop holder, int index, TRAPS) {
2212   itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable();
2213   int method_table_offset_in_words = ioe->offset()/wordSize;
2214   int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words())
2215                        / itableOffsetEntry::size();
2216 
2217   for (int cnt = 0 ; ; cnt ++, ioe ++) {
2218     // If the interface isn't implemented by the receiver class,
2219     // the VM should throw IncompatibleClassChangeError.
2220     if (cnt >= nof_interfaces) {
2221       THROW_0(vmSymbols::java_lang_IncompatibleClassChangeError());
2222     }
2223 
2224     klassOop ik = ioe->interface_klass();
2225     if (ik == holder) break;
2226   }
2227 
2228   itableMethodEntry* ime = ioe->first_method_entry(as_klassOop());
2229   methodOop m = ime[index].method();
2230   if (m == NULL) {
2231     THROW_0(vmSymbols::java_lang_AbstractMethodError());
2232   }
2233   return m;
2234 }
2235 
2236 // On-stack replacement stuff
2237 void instanceKlass::add_osr_nmethod(nmethod* n) {
2238   // only one compilation can be active
2239   NEEDS_CLEANUP
2240   // This is a short non-blocking critical region, so the no safepoint check is ok.
2241   OsrList_lock->lock_without_safepoint_check();
2242   assert(n->is_osr_method(), "wrong kind of nmethod");
2243   n->set_osr_link(osr_nmethods_head());
2244   set_osr_nmethods_head(n);
2245   // Raise the highest osr level if necessary
2246   if (TieredCompilation) {
2247     methodOop m = n->method();
2248     m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
2249   }
2250   // Remember to unlock again
2251   OsrList_lock->unlock();
2252 
2253   // Get rid of the osr methods for the same bci that have lower levels.
2254   if (TieredCompilation) {
2255     for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
2256       nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true);
2257       if (inv != NULL && inv->is_in_use()) {
2258         inv->make_not_entrant();
2259       }
2260     }
2261   }
2262 }
2263 
2264 
2265 void instanceKlass::remove_osr_nmethod(nmethod* n) {
2266   // This is a short non-blocking critical region, so the no safepoint check is ok.
2267   OsrList_lock->lock_without_safepoint_check();
2268   assert(n->is_osr_method(), "wrong kind of nmethod");
2269   nmethod* last = NULL;
2270   nmethod* cur  = osr_nmethods_head();
2271   int max_level = CompLevel_none;  // Find the max comp level excluding n
2272   methodOop m = n->method();
2273   // Search for match
2274   while(cur != NULL && cur != n) {
2275     if (TieredCompilation) {
2276       // Find max level before n
2277       max_level = MAX2(max_level, cur->comp_level());
2278     }
2279     last = cur;
2280     cur = cur->osr_link();
2281   }
2282   nmethod* next = NULL;
2283   if (cur == n) {
2284     next = cur->osr_link();
2285     if (last == NULL) {
2286       // Remove first element
2287       set_osr_nmethods_head(next);
2288     } else {
2289       last->set_osr_link(next);
2290     }
2291   }
2292   n->set_osr_link(NULL);
2293   if (TieredCompilation) {
2294     cur = next;
2295     while (cur != NULL) {
2296       // Find max level after n
2297       max_level = MAX2(max_level, cur->comp_level());
2298       cur = cur->osr_link();
2299     }
2300     m->set_highest_osr_comp_level(max_level);
2301   }
2302   // Remember to unlock again
2303   OsrList_lock->unlock();
2304 }
2305 
2306 nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const {
2307   // This is a short non-blocking critical region, so the no safepoint check is ok.
2308   OsrList_lock->lock_without_safepoint_check();
2309   nmethod* osr = osr_nmethods_head();
2310   nmethod* best = NULL;
2311   while (osr != NULL) {
2312     assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
2313     // There can be a time when a c1 osr method exists but we are waiting
2314     // for a c2 version. When c2 completes its osr nmethod we will trash
2315     // the c1 version and only be able to find the c2 version. However
2316     // while we overflow in the c1 code at back branches we don't want to
2317     // try and switch to the same code as we are already running
2318 
2319     if (osr->method() == m &&
2320         (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
2321       if (match_level) {
2322         if (osr->comp_level() == comp_level) {
2323           // Found a match - return it.
2324           OsrList_lock->unlock();
2325           return osr;
2326         }
2327       } else {
2328         if (best == NULL || (osr->comp_level() > best->comp_level())) {
2329           if (osr->comp_level() == CompLevel_highest_tier) {
2330             // Found the best possible - return it.
2331             OsrList_lock->unlock();
2332             return osr;
2333           }
2334           best = osr;
2335         }
2336       }
2337     }
2338     osr = osr->osr_link();
2339   }
2340   OsrList_lock->unlock();
2341   if (best != NULL && best->comp_level() >= comp_level && match_level == false) {
2342     return best;
2343   }
2344   return NULL;
2345 }
2346 
2347 bool instanceKlass::add_member_name(Handle mem_name) {
2348   jweak mem_name_wref = JNIHandles::make_weak_global(mem_name);
2349   MutexLocker ml(MemberNameTable_lock);
2350   DEBUG_ONLY(No_Safepoint_Verifier nsv);
2351 
2352   // Check if method has been redefined while taking out MemberNameTable_lock, if so
2353   // return false.  We cannot cache obsolete methods. They will crash when the function
2354   // is called!
2355   methodOop method = (methodOop) java_lang_invoke_MemberName::vmtarget(mem_name());
2356   if (method->is_obsolete()) {
2357     return false;
2358   } else if (method->is_old()) {
2359     // Replace method with redefined version
2360     java_lang_invoke_MemberName::set_vmtarget(mem_name(), method_with_idnum(method->method_idnum()));
2361   }
2362 
2363   if (_member_names == NULL) {
2364     _member_names = new (ResourceObj::C_HEAP, mtClass) MemberNameTable(idnum_allocated_count());
2365   }
2366   _member_names->add_member_name(mem_name_wref);
2367   return true;
2368 }
2369 
2370 // -----------------------------------------------------------------------------------------------------
2371 #ifndef PRODUCT
2372 
2373 // Printing
2374 
2375 #define BULLET  " - "
2376 
2377 void FieldPrinter::do_field(fieldDescriptor* fd) {
2378   _st->print(BULLET);
2379    if (_obj == NULL) {
2380      fd->print_on(_st);
2381      _st->cr();
2382    } else {
2383      fd->print_on_for(_st, _obj);
2384      _st->cr();
2385    }
2386 }
2387 
2388 
2389 void instanceKlass::oop_print_on(oop obj, outputStream* st) {
2390   Klass::oop_print_on(obj, st);
2391 
2392   if (as_klassOop() == SystemDictionary::String_klass()) {
2393     typeArrayOop value  = java_lang_String::value(obj);
2394     juint        offset = java_lang_String::offset(obj);
2395     juint        length = java_lang_String::length(obj);
2396     if (value != NULL &&
2397         value->is_typeArray() &&
2398         offset          <= (juint) value->length() &&
2399         offset + length <= (juint) value->length()) {
2400       st->print(BULLET"string: ");
2401       Handle h_obj(obj);
2402       java_lang_String::print(h_obj, st);
2403       st->cr();
2404       if (!WizardMode)  return;  // that is enough
2405     }
2406   }
2407 
2408   st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj));
2409   FieldPrinter print_field(st, obj);
2410   do_nonstatic_fields(&print_field);
2411 
2412   if (as_klassOop() == SystemDictionary::Class_klass()) {
2413     st->print(BULLET"signature: ");
2414     java_lang_Class::print_signature(obj, st);
2415     st->cr();
2416     klassOop mirrored_klass = java_lang_Class::as_klassOop(obj);
2417     st->print(BULLET"fake entry for mirror: ");
2418     mirrored_klass->print_value_on(st);
2419     st->cr();
2420     st->print(BULLET"fake entry resolved_constructor: ");
2421     methodOop ctor = java_lang_Class::resolved_constructor(obj);
2422     ctor->print_value_on(st);
2423     klassOop array_klass = java_lang_Class::array_klass(obj);
2424     st->cr();
2425     st->print(BULLET"fake entry for array: ");
2426     array_klass->print_value_on(st);
2427     st->cr();
2428     st->print_cr(BULLET"fake entry for oop_size: %d", java_lang_Class::oop_size(obj));
2429     st->print_cr(BULLET"fake entry for static_oop_field_count: %d", java_lang_Class::static_oop_field_count(obj));
2430     klassOop real_klass = java_lang_Class::as_klassOop(obj);
2431     if (real_klass != NULL && real_klass->klass_part()->oop_is_instance()) {
2432       instanceKlass::cast(real_klass)->do_local_static_fields(&print_field);
2433     }
2434   } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
2435     st->print(BULLET"signature: ");
2436     java_lang_invoke_MethodType::print_signature(obj, st);
2437     st->cr();
2438   }
2439 }
2440 
2441 #endif //PRODUCT
2442 
2443 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
2444   st->print("a ");
2445   name()->print_value_on(st);
2446   obj->print_address_on(st);
2447   if (as_klassOop() == SystemDictionary::String_klass()
2448       && java_lang_String::value(obj) != NULL) {
2449     ResourceMark rm;
2450     int len = java_lang_String::length(obj);
2451     int plen = (len < 24 ? len : 12);
2452     char* str = java_lang_String::as_utf8_string(obj, 0, plen);
2453     st->print(" = \"%s\"", str);
2454     if (len > plen)
2455       st->print("...[%d]", len);
2456   } else if (as_klassOop() == SystemDictionary::Class_klass()) {
2457     klassOop k = java_lang_Class::as_klassOop(obj);
2458     st->print(" = ");
2459     if (k != NULL) {
2460       k->print_value_on(st);
2461     } else {
2462       const char* tname = type2name(java_lang_Class::primitive_type(obj));
2463       st->print("%s", tname ? tname : "type?");
2464     }
2465   } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
2466     st->print(" = ");
2467     java_lang_invoke_MethodType::print_signature(obj, st);
2468   } else if (java_lang_boxing_object::is_instance(obj)) {
2469     st->print(" = ");
2470     java_lang_boxing_object::print(obj, st);
2471   } else if (as_klassOop() == SystemDictionary::LambdaForm_klass()) {
2472     oop vmentry = java_lang_invoke_LambdaForm::vmentry(obj);
2473     if (vmentry != NULL) {
2474       st->print(" => ");
2475       vmentry->print_value_on(st);
2476     }
2477   } else if (as_klassOop() == SystemDictionary::MemberName_klass()) {
2478     oop vmtarget = java_lang_invoke_MemberName::vmtarget(obj);
2479     if (vmtarget != NULL) {
2480       st->print(" = ");
2481       vmtarget->print_value_on(st);
2482     } else {
2483       java_lang_invoke_MemberName::clazz(obj)->print_value_on(st);
2484       st->print(".");
2485       java_lang_invoke_MemberName::name(obj)->print_value_on(st);
2486     }
2487   }
2488 }
2489 
2490 const char* instanceKlass::internal_name() const {
2491   return external_name();
2492 }
2493 
2494 // Verification
2495 
2496 class VerifyFieldClosure: public OopClosure {
2497  protected:
2498   template <class T> void do_oop_work(T* p) {
2499     guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
2500     oop obj = oopDesc::load_decode_heap_oop(p);
2501     if (!obj->is_oop_or_null()) {
2502       tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj);
2503       Universe::print();
2504       guarantee(false, "boom");
2505     }
2506   }
2507  public:
2508   virtual void do_oop(oop* p)       { VerifyFieldClosure::do_oop_work(p); }
2509   virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
2510 };
2511 
2512 void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
2513   Klass::oop_verify_on(obj, st);
2514   VerifyFieldClosure blk;
2515   oop_oop_iterate(obj, &blk);
2516 }
2517 
2518 // JNIid class for jfieldIDs only
2519 // Note to reviewers:
2520 // These JNI functions are just moved over to column 1 and not changed
2521 // in the compressed oops workspace.
2522 JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
2523   _holder = holder;
2524   _offset = offset;
2525   _next = next;
2526   debug_only(_is_static_field_id = false;)
2527 }
2528 
2529 
2530 JNIid* JNIid::find(int offset) {
2531   JNIid* current = this;
2532   while (current != NULL) {
2533     if (current->offset() == offset) return current;
2534     current = current->next();
2535   }
2536   return NULL;
2537 }
2538 
2539 void JNIid::oops_do(OopClosure* f) {
2540   for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
2541     f->do_oop(cur->holder_addr());
2542   }
2543 }
2544 
2545 void JNIid::deallocate(JNIid* current) {
2546   while (current != NULL) {
2547     JNIid* next = current->next();
2548     delete current;
2549     current = next;
2550   }
2551 }
2552 
2553 
2554 void JNIid::verify(klassOop holder) {
2555   int first_field_offset  = instanceMirrorKlass::offset_of_static_fields();
2556   int end_field_offset;
2557   end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
2558 
2559   JNIid* current = this;
2560   while (current != NULL) {
2561     guarantee(current->holder() == holder, "Invalid klass in JNIid");
2562 #ifdef ASSERT
2563     int o = current->offset();
2564     if (current->is_static_field_id()) {
2565       guarantee(o >= first_field_offset  && o < end_field_offset,  "Invalid static field offset in JNIid");
2566     }
2567 #endif
2568     current = current->next();
2569   }
2570 }
2571 
2572 
2573 #ifdef ASSERT
2574 void instanceKlass::set_init_state(ClassState state) {
2575   bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
2576                                                : (_init_state < state);
2577   assert(good_state || state == allocated, "illegal state transition");
2578   _init_state = (u1)state;
2579 }
2580 #endif
2581 
2582 
2583 // RedefineClasses() support for previous versions:
2584 
2585 // Add an information node that contains weak references to the
2586 // interesting parts of the previous version of the_class.
2587 // This is also where we clean out any unused weak references.
2588 // Note that while we delete nodes from the _previous_versions
2589 // array, we never delete the array itself until the klass is
2590 // unloaded. The has_been_redefined() query depends on that fact.
2591 //
2592 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
2593        BitMap* emcp_methods, int emcp_method_count) {
2594   assert(Thread::current()->is_VM_thread(),
2595          "only VMThread can add previous versions");
2596 
2597   if (_previous_versions == NULL) {
2598     // This is the first previous version so make some space.
2599     // Start with 2 elements under the assumption that the class
2600     // won't be redefined much.
2601     _previous_versions =  new (ResourceObj::C_HEAP, mtClass)
2602                             GrowableArray<PreviousVersionNode *>(2, true);
2603   }
2604 
2605   // RC_TRACE macro has an embedded ResourceMark
2606   RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d",
2607     ikh->external_name(), _previous_versions->length(), emcp_method_count));
2608   constantPoolHandle cp_h(ikh->constants());
2609   jobject cp_ref;
2610   if (cp_h->is_shared()) {
2611     // a shared ConstantPool requires a regular reference; a weak
2612     // reference would be collectible
2613     cp_ref = JNIHandles::make_global(cp_h);
2614   } else {
2615     cp_ref = JNIHandles::make_weak_global(cp_h);
2616   }
2617   PreviousVersionNode * pv_node = NULL;
2618   objArrayOop old_methods = ikh->methods();
2619 
2620   if (emcp_method_count == 0) {
2621     // non-shared ConstantPool gets a weak reference
2622     pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL);
2623     RC_TRACE(0x00000400,
2624       ("add: all methods are obsolete; flushing any EMCP weak refs"));
2625   } else {
2626     int local_count = 0;
2627     GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP, mtClass)
2628       GrowableArray<jweak>(emcp_method_count, true);
2629     for (int i = 0; i < old_methods->length(); i++) {
2630       if (emcp_methods->at(i)) {
2631         // this old method is EMCP so save a weak ref
2632         methodOop old_method = (methodOop) old_methods->obj_at(i);
2633         methodHandle old_method_h(old_method);
2634         jweak method_ref = JNIHandles::make_weak_global(old_method_h);
2635         method_refs->append(method_ref);
2636         if (++local_count >= emcp_method_count) {
2637           // no more EMCP methods so bail out now
2638           break;
2639         }
2640       }
2641     }
2642     // non-shared ConstantPool gets a weak reference
2643     pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs);
2644   }
2645 
2646   _previous_versions->append(pv_node);
2647 
2648   // Using weak references allows the interesting parts of previous
2649   // classes to be GC'ed when they are no longer needed. Since the
2650   // caller is the VMThread and we are at a safepoint, this is a good
2651   // time to clear out unused weak references.
2652 
2653   RC_TRACE(0x00000400, ("add: previous version length=%d",
2654     _previous_versions->length()));
2655 
2656   // skip the last entry since we just added it
2657   for (int i = _previous_versions->length() - 2; i >= 0; i--) {
2658     // check the previous versions array for a GC'ed weak refs
2659     pv_node = _previous_versions->at(i);
2660     cp_ref = pv_node->prev_constant_pool();
2661     assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
2662     if (cp_ref == NULL) {
2663       delete pv_node;
2664       _previous_versions->remove_at(i);
2665       // Since we are traversing the array backwards, we don't have to
2666       // do anything special with the index.
2667       continue;  // robustness
2668     }
2669 
2670     constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2671     if (cp == NULL) {
2672       // this entry has been GC'ed so remove it
2673       delete pv_node;
2674       _previous_versions->remove_at(i);
2675       // Since we are traversing the array backwards, we don't have to
2676       // do anything special with the index.
2677       continue;
2678     } else {
2679       RC_TRACE(0x00000400, ("add: previous version @%d is alive", i));
2680     }
2681 
2682     GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2683     if (method_refs != NULL) {
2684       RC_TRACE(0x00000400, ("add: previous methods length=%d",
2685         method_refs->length()));
2686       for (int j = method_refs->length() - 1; j >= 0; j--) {
2687         jweak method_ref = method_refs->at(j);
2688         assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
2689         if (method_ref == NULL) {
2690           method_refs->remove_at(j);
2691           // Since we are traversing the array backwards, we don't have to
2692           // do anything special with the index.
2693           continue;  // robustness
2694         }
2695 
2696         methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2697         if (method == NULL || emcp_method_count == 0) {
2698           // This method entry has been GC'ed or the current
2699           // RedefineClasses() call has made all methods obsolete
2700           // so remove it.
2701           JNIHandles::destroy_weak_global(method_ref);
2702           method_refs->remove_at(j);
2703         } else {
2704           // RC_TRACE macro has an embedded ResourceMark
2705           RC_TRACE(0x00000400,
2706             ("add: %s(%s): previous method @%d in version @%d is alive",
2707             method->name()->as_C_string(), method->signature()->as_C_string(),
2708             j, i));
2709         }
2710       }
2711     }
2712   }
2713 
2714   int obsolete_method_count = old_methods->length() - emcp_method_count;
2715 
2716   if (emcp_method_count != 0 && obsolete_method_count != 0 &&
2717       _previous_versions->length() > 1) {
2718     // We have a mix of obsolete and EMCP methods. If there is more
2719     // than the previous version that we just added, then we have to
2720     // clear out any matching EMCP method entries the hard way.
2721     int local_count = 0;
2722     for (int i = 0; i < old_methods->length(); i++) {
2723       if (!emcp_methods->at(i)) {
2724         // only obsolete methods are interesting
2725         methodOop old_method = (methodOop) old_methods->obj_at(i);
2726         Symbol* m_name = old_method->name();
2727         Symbol* m_signature = old_method->signature();
2728 
2729         // skip the last entry since we just added it
2730         for (int j = _previous_versions->length() - 2; j >= 0; j--) {
2731           // check the previous versions array for a GC'ed weak refs
2732           pv_node = _previous_versions->at(j);
2733           cp_ref = pv_node->prev_constant_pool();
2734           assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
2735           if (cp_ref == NULL) {
2736             delete pv_node;
2737             _previous_versions->remove_at(j);
2738             // Since we are traversing the array backwards, we don't have to
2739             // do anything special with the index.
2740             continue;  // robustness
2741           }
2742 
2743           constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2744           if (cp == NULL) {
2745             // this entry has been GC'ed so remove it
2746             delete pv_node;
2747             _previous_versions->remove_at(j);
2748             // Since we are traversing the array backwards, we don't have to
2749             // do anything special with the index.
2750             continue;
2751           }
2752 
2753           GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2754           if (method_refs == NULL) {
2755             // We have run into a PreviousVersion generation where
2756             // all methods were made obsolete during that generation's
2757             // RedefineClasses() operation. At the time of that
2758             // operation, all EMCP methods were flushed so we don't
2759             // have to go back any further.
2760             //
2761             // A NULL method_refs is different than an empty method_refs.
2762             // We cannot infer any optimizations about older generations
2763             // from an empty method_refs for the current generation.
2764             break;
2765           }
2766 
2767           for (int k = method_refs->length() - 1; k >= 0; k--) {
2768             jweak method_ref = method_refs->at(k);
2769             assert(method_ref != NULL,
2770               "weak method ref was unexpectedly cleared");
2771             if (method_ref == NULL) {
2772               method_refs->remove_at(k);
2773               // Since we are traversing the array backwards, we don't
2774               // have to do anything special with the index.
2775               continue;  // robustness
2776             }
2777 
2778             methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2779             if (method == NULL) {
2780               // this method entry has been GC'ed so skip it
2781               JNIHandles::destroy_weak_global(method_ref);
2782               method_refs->remove_at(k);
2783               continue;
2784             }
2785 
2786             if (method->name() == m_name &&
2787                 method->signature() == m_signature) {
2788               // The current RedefineClasses() call has made all EMCP
2789               // versions of this method obsolete so mark it as obsolete
2790               // and remove the weak ref.
2791               RC_TRACE(0x00000400,
2792                 ("add: %s(%s): flush obsolete method @%d in version @%d",
2793                 m_name->as_C_string(), m_signature->as_C_string(), k, j));
2794 
2795               method->set_is_obsolete();
2796               JNIHandles::destroy_weak_global(method_ref);
2797               method_refs->remove_at(k);
2798               break;
2799             }
2800           }
2801 
2802           // The previous loop may not find a matching EMCP method, but
2803           // that doesn't mean that we can optimize and not go any
2804           // further back in the PreviousVersion generations. The EMCP
2805           // method for this generation could have already been GC'ed,
2806           // but there still may be an older EMCP method that has not
2807           // been GC'ed.
2808         }
2809 
2810         if (++local_count >= obsolete_method_count) {
2811           // no more obsolete methods so bail out now
2812           break;
2813         }
2814       }
2815     }
2816   }
2817 } // end add_previous_version()
2818 
2819 
2820 // Determine if instanceKlass has a previous version.
2821 bool instanceKlass::has_previous_version() const {
2822   if (_previous_versions == NULL) {
2823     // no previous versions array so answer is easy
2824     return false;
2825   }
2826 
2827   for (int i = _previous_versions->length() - 1; i >= 0; i--) {
2828     // Check the previous versions array for an info node that hasn't
2829     // been GC'ed
2830     PreviousVersionNode * pv_node = _previous_versions->at(i);
2831 
2832     jobject cp_ref = pv_node->prev_constant_pool();
2833     assert(cp_ref != NULL, "cp reference was unexpectedly cleared");
2834     if (cp_ref == NULL) {
2835       continue;  // robustness
2836     }
2837 
2838     constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2839     if (cp != NULL) {
2840       // we have at least one previous version
2841       return true;
2842     }
2843 
2844     // We don't have to check the method refs. If the constant pool has
2845     // been GC'ed then so have the methods.
2846   }
2847 
2848   // all of the underlying nodes' info has been GC'ed
2849   return false;
2850 } // end has_previous_version()
2851 
2852 methodOop instanceKlass::method_with_idnum(int idnum) {
2853   methodOop m = NULL;
2854   if (idnum < methods()->length()) {
2855     m = (methodOop) methods()->obj_at(idnum);
2856   }
2857   if (m == NULL || m->method_idnum() != idnum) {
2858     for (int index = 0; index < methods()->length(); ++index) {
2859       m = (methodOop) methods()->obj_at(index);
2860       if (m->method_idnum() == idnum) {
2861         return m;
2862       }
2863     }
2864   }
2865   return m;
2866 }
2867 
2868 
2869 // Set the annotation at 'idnum' to 'anno'.
2870 // We don't want to create or extend the array if 'anno' is NULL, since that is the
2871 // default value.  However, if the array exists and is long enough, we must set NULL values.
2872 void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, objArrayOop* md_p) {
2873   objArrayOop md = *md_p;
2874   if (md != NULL && md->length() > idnum) {
2875     md->obj_at_put(idnum, anno);
2876   } else if (anno != NULL) {
2877     // create the array
2878     int length = MAX2(idnum+1, (int)_idnum_allocated_count);
2879     md = oopFactory::new_system_objArray(length, Thread::current());
2880     if (*md_p != NULL) {
2881       // copy the existing entries
2882       for (int index = 0; index < (*md_p)->length(); index++) {
2883         md->obj_at_put(index, (*md_p)->obj_at(index));
2884       }
2885     }
2886     set_annotations(md, md_p);
2887     md->obj_at_put(idnum, anno);
2888   } // if no array and idnum isn't included there is nothing to do
2889 }
2890 
2891 // Construct a PreviousVersionNode entry for the array hung off
2892 // the instanceKlass.
2893 PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool,
2894   bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) {
2895 
2896   _prev_constant_pool = prev_constant_pool;
2897   _prev_cp_is_weak = prev_cp_is_weak;
2898   _prev_EMCP_methods = prev_EMCP_methods;
2899 }
2900 
2901 
2902 // Destroy a PreviousVersionNode
2903 PreviousVersionNode::~PreviousVersionNode() {
2904   if (_prev_constant_pool != NULL) {
2905     if (_prev_cp_is_weak) {
2906       JNIHandles::destroy_weak_global(_prev_constant_pool);
2907     } else {
2908       JNIHandles::destroy_global(_prev_constant_pool);
2909     }
2910   }
2911 
2912   if (_prev_EMCP_methods != NULL) {
2913     for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) {
2914       jweak method_ref = _prev_EMCP_methods->at(i);
2915       if (method_ref != NULL) {
2916         JNIHandles::destroy_weak_global(method_ref);
2917       }
2918     }
2919     delete _prev_EMCP_methods;
2920   }
2921 }
2922 
2923 
2924 // Construct a PreviousVersionInfo entry
2925 PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
2926   _prev_constant_pool_handle = constantPoolHandle();  // NULL handle
2927   _prev_EMCP_method_handles = NULL;
2928 
2929   jobject cp_ref = pv_node->prev_constant_pool();
2930   assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared");
2931   if (cp_ref == NULL) {
2932     return;  // robustness
2933   }
2934 
2935   constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2936   if (cp == NULL) {
2937     // Weak reference has been GC'ed. Since the constant pool has been
2938     // GC'ed, the methods have also been GC'ed.
2939     return;
2940   }
2941 
2942   // make the constantPoolOop safe to return
2943   _prev_constant_pool_handle = constantPoolHandle(cp);
2944 
2945   GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2946   if (method_refs == NULL) {
2947     // the instanceKlass did not have any EMCP methods
2948     return;
2949   }
2950 
2951   _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
2952 
2953   int n_methods = method_refs->length();
2954   for (int i = 0; i < n_methods; i++) {
2955     jweak method_ref = method_refs->at(i);
2956     assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
2957     if (method_ref == NULL) {
2958       continue;  // robustness
2959     }
2960 
2961     methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2962     if (method == NULL) {
2963       // this entry has been GC'ed so skip it
2964       continue;
2965     }
2966 
2967     // make the methodOop safe to return
2968     _prev_EMCP_method_handles->append(methodHandle(method));
2969   }
2970 }
2971 
2972 
2973 // Destroy a PreviousVersionInfo
2974 PreviousVersionInfo::~PreviousVersionInfo() {
2975   // Since _prev_EMCP_method_handles is not C-heap allocated, we
2976   // don't have to delete it.
2977 }
2978 
2979 
2980 // Construct a helper for walking the previous versions array
2981 PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) {
2982   _previous_versions = ik->previous_versions();
2983   _current_index = 0;
2984   // _hm needs no initialization
2985   _current_p = NULL;
2986 }
2987 
2988 
2989 // Destroy a PreviousVersionWalker
2990 PreviousVersionWalker::~PreviousVersionWalker() {
2991   // Delete the current info just in case the caller didn't walk to
2992   // the end of the previous versions list. No harm if _current_p is
2993   // already NULL.
2994   delete _current_p;
2995 
2996   // When _hm is destroyed, all the Handles returned in
2997   // PreviousVersionInfo objects will be destroyed.
2998   // Also, after this destructor is finished it will be
2999   // safe to delete the GrowableArray allocated in the
3000   // PreviousVersionInfo objects.
3001 }
3002 
3003 
3004 // Return the interesting information for the next previous version
3005 // of the klass. Returns NULL if there are no more previous versions.
3006 PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
3007   if (_previous_versions == NULL) {
3008     // no previous versions so nothing to return
3009     return NULL;
3010   }
3011 
3012   delete _current_p;  // cleanup the previous info for the caller
3013   _current_p = NULL;  // reset to NULL so we don't delete same object twice
3014 
3015   int length = _previous_versions->length();
3016 
3017   while (_current_index < length) {
3018     PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
3019     PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP, mtClass)
3020                                           PreviousVersionInfo(pv_node);
3021 
3022     constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
3023     if (cp_h.is_null()) {
3024       delete pv_info;
3025 
3026       // The underlying node's info has been GC'ed so try the next one.
3027       // We don't have to check the methods. If the constant pool has
3028       // GC'ed then so have the methods.
3029       continue;
3030     }
3031 
3032     // Found a node with non GC'ed info so return it. The caller will
3033     // need to delete pv_info when they are done with it.
3034     _current_p = pv_info;
3035     return pv_info;
3036   }
3037 
3038   // all of the underlying nodes' info has been GC'ed
3039   return NULL;
3040 } // end next_previous_version()