1 /*
   2  * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/markSweep.inline.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "interpreter/rewriter.hpp"
  29 #include "memory/universe.inline.hpp"
  30 #include "oops/cpCacheOop.hpp"
  31 #include "oops/objArrayOop.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "prims/jvmtiRedefineClassesTrace.hpp"
  34 #include "runtime/handles.inline.hpp"
  35 
  36 
  37 // Implememtation of ConstantPoolCacheEntry
  38 
  39 void ConstantPoolCacheEntry::initialize_entry(int index) {
  40   assert(0 < index && index < 0x10000, "sanity check");
  41   _indices = index;
  42   assert(constant_pool_index() == index, "");
  43 }
  44 
  45 void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
  46   assert(0 <= main_index && main_index < 0x10000, "sanity check");
  47   _indices = (main_index << 16);
  48   assert(main_entry_index() == main_index, "");
  49 }
  50 
  51 int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
  52                     bool is_vfinal, bool is_volatile,
  53                     bool is_method_interface, bool is_method) {
  54   int f = state;
  55 
  56   assert( state < number_of_states, "Invalid state in as_flags");
  57 
  58   f <<= 1;
  59   if (is_final) f |= 1;
  60   f <<= 1;
  61   if (is_vfinal) f |= 1;
  62   f <<= 1;
  63   if (is_volatile) f |= 1;
  64   f <<= 1;
  65   if (is_method_interface) f |= 1;
  66   f <<= 1;
  67   if (is_method) f |= 1;
  68   f <<= ConstantPoolCacheEntry::hotSwapBit;
  69   // Preserve existing flag bit values
  70 #ifdef ASSERT
  71   int old_state = ((_flags >> tosBits) & 0x0F);
  72   assert(old_state == 0 || old_state == state,
  73          "inconsistent cpCache flags state");
  74 #endif
  75   return (_flags | f) ;
  76 }
  77 
  78 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
  79 #ifdef ASSERT
  80   // Read once.
  81   volatile Bytecodes::Code c = bytecode_1();
  82   assert(c == 0 || c == code || code == 0, "update must be consistent");
  83 #endif
  84   // Need to flush pending stores here before bytecode is written.
  85   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16));
  86 }
  87 
  88 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
  89 #ifdef ASSERT
  90   // Read once.
  91   volatile Bytecodes::Code c = bytecode_2();
  92   assert(c == 0 || c == code || code == 0, "update must be consistent");
  93 #endif
  94   // Need to flush pending stores here before bytecode is written.
  95   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24));
  96 }
  97 
  98 // Atomically sets f1 if it is still NULL, otherwise it keeps the
  99 // current value.
 100 void ConstantPoolCacheEntry::set_f1_if_null_atomic(oop f1) {
 101   // Use barriers as in oop_store
 102   oop* f1_addr = (oop*) &_f1;
 103   update_barrier_set_pre(f1_addr, f1);
 104   void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL);
 105   bool success = (result == NULL);
 106   if (success) {
 107     update_barrier_set((void*) f1_addr, f1);
 108   }
 109 }
 110 
 111 #ifdef ASSERT
 112 // It is possible to have two different dummy methodOops created
 113 // when the resolve code for invoke interface executes concurrently
 114 // Hence the assertion below is weakened a bit for the invokeinterface
 115 // case.
 116 bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) {
 117   return (cur_f1 == f1 || ((methodOop)cur_f1)->name() ==
 118          ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() ==
 119          ((methodOop)f1)->signature());
 120 }
 121 #endif
 122 
 123 // Note that concurrent update of both bytecodes can leave one of them
 124 // reset to zero.  This is harmless; the interpreter will simply re-resolve
 125 // the damaged entry.  More seriously, the memory synchronization is needed
 126 // to flush other fields (f1, f2) completely to memory before the bytecodes
 127 // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
 128 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
 129                                        Bytecodes::Code put_code,
 130                                        KlassHandle field_holder,
 131                                        int field_index,
 132                                        int field_offset,
 133                                        TosState field_type,
 134                                        bool is_final,
 135                                        bool is_volatile) {
 136   set_f1(field_holder()->java_mirror());
 137   set_f2(field_offset);
 138   assert(field_index <= field_index_mask,
 139          "field index does not fit in low flag bits");
 140   set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) |
 141             (field_index & field_index_mask));
 142   set_bytecode_1(get_code);
 143   set_bytecode_2(put_code);
 144   NOT_PRODUCT(verify(tty));
 145 }
 146 
 147 int  ConstantPoolCacheEntry::field_index() const {
 148   return (_flags & field_index_mask);
 149 }
 150 
 151 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
 152                                         methodHandle method,
 153                                         int vtable_index) {
 154   assert(!is_secondary_entry(), "");
 155   assert(method->interpreter_entry() != NULL, "should have been set at this point");
 156   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
 157   bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface);
 158 
 159   int byte_no = -1;
 160   bool needs_vfinal_flag = false;
 161   switch (invoke_code) {
 162     case Bytecodes::_invokevirtual:
 163     case Bytecodes::_invokeinterface: {
 164         if (method->can_be_statically_bound()) {
 165           set_f2((intptr_t)method());
 166           needs_vfinal_flag = true;
 167         } else {
 168           assert(vtable_index >= 0, "valid index");
 169           set_f2(vtable_index);
 170         }
 171         byte_no = 2;
 172         break;
 173     }
 174 
 175     case Bytecodes::_invokedynamic:  // similar to _invokevirtual
 176       if (TraceInvokeDynamic) {
 177         tty->print_cr("InvokeDynamic set_method%s method="PTR_FORMAT" index=%d",
 178                       (is_secondary_entry() ? " secondary" : ""),
 179                       (intptr_t)method(), vtable_index);
 180         method->print();
 181         this->print(tty, 0);
 182       }
 183       assert(method->can_be_statically_bound(), "must be a MH invoker method");
 184       assert(_f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized");
 185       // SystemDictionary::find_method_handle_invoke only caches
 186       // methods which signature classes are on the boot classpath,
 187       // otherwise the newly created method is returned.  To avoid
 188       // races in that case we store the first one coming in into the
 189       // cp-cache atomically if it's still unset.
 190       set_f1_if_null_atomic(method());
 191       needs_vfinal_flag = false;  // _f2 is not an oop
 192       assert(!is_vfinal(), "f2 not an oop");
 193       byte_no = 1;  // coordinate this with bytecode_number & is_resolved
 194       break;
 195 
 196     case Bytecodes::_invokespecial:
 197       // Preserve the value of the vfinal flag on invokevirtual bytecode
 198       // which may be shared with this constant pool cache entry.
 199       needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal();
 200       // fall through
 201     case Bytecodes::_invokestatic:
 202       set_f1(method());
 203       byte_no = 1;
 204       break;
 205     default:
 206       ShouldNotReachHere();
 207       break;
 208   }
 209 
 210   set_flags(as_flags(as_TosState(method->result_type()),
 211                      method->is_final_method(),
 212                      needs_vfinal_flag,
 213                      false,
 214                      change_to_virtual,
 215                      true)|
 216             method()->size_of_parameters());
 217 
 218   // Note:  byte_no also appears in TemplateTable::resolve.
 219   if (byte_no == 1) {
 220     set_bytecode_1(invoke_code);
 221   } else if (byte_no == 2)  {
 222     if (change_to_virtual) {
 223       // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
 224       //
 225       // Workaround for the case where we encounter an invokeinterface, but we
 226       // should really have an _invokevirtual since the resolved method is a
 227       // virtual method in java.lang.Object. This is a corner case in the spec
 228       // but is presumably legal. javac does not generate this code.
 229       //
 230       // We set bytecode_1() to _invokeinterface, because that is the
 231       // bytecode # used by the interpreter to see if it is resolved.
 232       // We set bytecode_2() to _invokevirtual.
 233       // See also interpreterRuntime.cpp. (8/25/2000)
 234       // Only set resolved for the invokeinterface case if method is public.
 235       // Otherwise, the method needs to be reresolved with caller for each
 236       // interface call.
 237       if (method->is_public()) set_bytecode_1(invoke_code);
 238       set_bytecode_2(Bytecodes::_invokevirtual);
 239     } else {
 240       set_bytecode_2(invoke_code);
 241     }
 242   } else {
 243     ShouldNotReachHere();
 244   }
 245   NOT_PRODUCT(verify(tty));
 246 }
 247 
 248 
 249 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
 250   assert(!is_secondary_entry(), "");
 251   klassOop interf = method->method_holder();
 252   assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
 253   set_f1(interf);
 254   set_f2(index);
 255   set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters());
 256   set_bytecode_1(Bytecodes::_invokeinterface);
 257 }
 258 
 259 
 260 void ConstantPoolCacheEntry::initialize_bootstrap_method_index_in_cache(int bsm_cache_index) {
 261   assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
 262   assert(_f2 == 0, "initialize once");
 263   assert(bsm_cache_index == (int)(u2)bsm_cache_index, "oob");
 264   set_f2(bsm_cache_index + constantPoolOopDesc::CPCACHE_INDEX_TAG);
 265 }
 266 
 267 int ConstantPoolCacheEntry::bootstrap_method_index_in_cache() {
 268   assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
 269   intptr_t bsm_cache_index = (intptr_t) _f2 - constantPoolOopDesc::CPCACHE_INDEX_TAG;
 270   assert(bsm_cache_index == (intptr_t)(u2)bsm_cache_index, "oob");
 271   return (int) bsm_cache_index;
 272 }
 273 
 274 void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, methodHandle signature_invoker) {
 275   assert(is_secondary_entry(), "");
 276   // NOTE: it's important that all other values are set before f1 is
 277   // set since some users short circuit on f1 being set
 278   // (i.e. non-null) and that may result in uninitialized values for
 279   // other racing threads (e.g. flags).
 280   int param_size = signature_invoker->size_of_parameters();
 281   assert(param_size >= 1, "method argument size must include MH.this");
 282   param_size -= 1;  // do not count MH.this; it is not stacked for invokedynamic
 283   bool is_final = true;
 284   assert(signature_invoker->is_final_method(), "is_final");
 285   int flags = as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size;
 286   assert(_flags == 0 || _flags == flags, "flags should be the same");
 287   set_flags(flags);
 288   // do not do set_bytecode on a secondary CP cache entry
 289   //set_bytecode_1(Bytecodes::_invokedynamic);
 290   set_f1_if_null_atomic(call_site());  // This must be the last one to set (see NOTE above)!
 291 }
 292 
 293 
 294 methodOop ConstantPoolCacheEntry::get_method_if_resolved(Bytecodes::Code invoke_code, constantPoolHandle cpool) {
 295   assert(invoke_code > (Bytecodes::Code)0, "bad query");
 296   if (is_secondary_entry()) {
 297     return cpool->cache()->entry_at(main_entry_index())->get_method_if_resolved(invoke_code, cpool);
 298   }
 299   // Decode the action of set_method and set_interface_call
 300   if (bytecode_1() == invoke_code) {
 301     oop f1 = _f1;
 302     if (f1 != NULL) {
 303       switch (invoke_code) {
 304       case Bytecodes::_invokeinterface:
 305         assert(f1->is_klass(), "");
 306         return klassItable::method_for_itable_index(klassOop(f1), (int) f2());
 307       case Bytecodes::_invokestatic:
 308       case Bytecodes::_invokespecial:
 309         assert(f1->is_method(), "");
 310         return methodOop(f1);
 311       }
 312     }
 313   }
 314   if (bytecode_2() == invoke_code) {
 315     switch (invoke_code) {
 316     case Bytecodes::_invokevirtual:
 317       if (is_vfinal()) {
 318         // invokevirtual
 319         methodOop m = methodOop((intptr_t) f2());
 320         assert(m->is_method(), "");
 321         return m;
 322       } else {
 323         int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
 324         if (cpool->tag_at(holder_index).is_klass()) {
 325           klassOop klass = cpool->resolved_klass_at(holder_index);
 326           if (!Klass::cast(klass)->oop_is_instance())
 327             klass = SystemDictionary::Object_klass();
 328           return instanceKlass::cast(klass)->method_at_vtable((int) f2());
 329         }
 330       }
 331     }
 332   }
 333   return NULL;
 334 }
 335 
 336 
 337 
 338 class LocalOopClosure: public OopClosure {
 339  private:
 340   void (*_f)(oop*);
 341 
 342  public:
 343   LocalOopClosure(void f(oop*))        { _f = f; }
 344   virtual void do_oop(oop* o)          { _f(o); }
 345   virtual void do_oop(narrowOop *o)    { ShouldNotReachHere(); }
 346 };
 347 
 348 
 349 void ConstantPoolCacheEntry::oops_do(void f(oop*)) {
 350   LocalOopClosure blk(f);
 351   oop_iterate(&blk);
 352 }
 353 
 354 
 355 void ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) {
 356   assert(in_words(size()) == 4, "check code below - may need adjustment");
 357   // field[1] is always oop or NULL
 358   blk->do_oop((oop*)&_f1);
 359   if (is_vfinal()) {
 360     blk->do_oop((oop*)&_f2);
 361   }
 362 }
 363 
 364 
 365 void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) {
 366   assert(in_words(size()) == 4, "check code below - may need adjustment");
 367   // field[1] is always oop or NULL
 368   if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1);
 369   if (is_vfinal()) {
 370     if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2);
 371   }
 372 }
 373 
 374 
 375 void ConstantPoolCacheEntry::follow_contents() {
 376   assert(in_words(size()) == 4, "check code below - may need adjustment");
 377   // field[1] is always oop or NULL
 378   MarkSweep::mark_and_push((oop*)&_f1);
 379   if (is_vfinal()) {
 380     MarkSweep::mark_and_push((oop*)&_f2);
 381   }
 382 }
 383 
 384 #ifndef SERIALGC
 385 void ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) {
 386   assert(in_words(size()) == 4, "check code below - may need adjustment");
 387   // field[1] is always oop or NULL
 388   PSParallelCompact::mark_and_push(cm, (oop*)&_f1);
 389   if (is_vfinal()) {
 390     PSParallelCompact::mark_and_push(cm, (oop*)&_f2);
 391   }
 392 }
 393 #endif // SERIALGC
 394 
 395 void ConstantPoolCacheEntry::adjust_pointers() {
 396   assert(in_words(size()) == 4, "check code below - may need adjustment");
 397   // field[1] is always oop or NULL
 398   MarkSweep::adjust_pointer((oop*)&_f1);
 399   if (is_vfinal()) {
 400     MarkSweep::adjust_pointer((oop*)&_f2);
 401   }
 402 }
 403 
 404 #ifndef SERIALGC
 405 void ConstantPoolCacheEntry::update_pointers() {
 406   assert(in_words(size()) == 4, "check code below - may need adjustment");
 407   // field[1] is always oop or NULL
 408   PSParallelCompact::adjust_pointer((oop*)&_f1);
 409   if (is_vfinal()) {
 410     PSParallelCompact::adjust_pointer((oop*)&_f2);
 411   }
 412 }
 413 #endif // SERIALGC
 414 
 415 // RedefineClasses() API support:
 416 // If this constantPoolCacheEntry refers to old_method then update it
 417 // to refer to new_method.
 418 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
 419        methodOop new_method, bool * trace_name_printed) {
 420 
 421   if (is_vfinal()) {
 422     // virtual and final so f2() contains method ptr instead of vtable index
 423     if (f2() == (intptr_t)old_method) {
 424       // match old_method so need an update
 425       _f2 = (intptr_t)new_method;
 426       if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
 427         if (!(*trace_name_printed)) {
 428           // RC_TRACE_MESG macro has an embedded ResourceMark
 429           RC_TRACE_MESG(("adjust: name=%s",
 430             Klass::cast(old_method->method_holder())->external_name()));
 431           *trace_name_printed = true;
 432         }
 433         // RC_TRACE macro has an embedded ResourceMark
 434         RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
 435           new_method->name()->as_C_string(),
 436           new_method->signature()->as_C_string()));
 437       }
 438 
 439       return true;
 440     }
 441 
 442     // f1() is not used with virtual entries so bail out
 443     return false;
 444   }
 445 
 446   if ((oop)_f1 == NULL) {
 447     // NULL f1() means this is a virtual entry so bail out
 448     // We are assuming that the vtable index does not need change.
 449     return false;
 450   }
 451 
 452   if ((oop)_f1 == old_method) {
 453     _f1 = new_method;
 454     if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
 455       if (!(*trace_name_printed)) {
 456         // RC_TRACE_MESG macro has an embedded ResourceMark
 457         RC_TRACE_MESG(("adjust: name=%s",
 458           Klass::cast(old_method->method_holder())->external_name()));
 459         *trace_name_printed = true;
 460       }
 461       // RC_TRACE macro has an embedded ResourceMark
 462       RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
 463         new_method->name()->as_C_string(),
 464         new_method->signature()->as_C_string()));
 465     }
 466 
 467     return true;
 468   }
 469 
 470   return false;
 471 }
 472 
 473 bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) {
 474   if (!is_method_entry()) {
 475     // not a method entry so not interesting by default
 476     return false;
 477   }
 478 
 479   methodOop m = NULL;
 480   if (is_vfinal()) {
 481     // virtual and final so _f2 contains method ptr instead of vtable index
 482     m = (methodOop)_f2;
 483   } else if ((oop)_f1 == NULL) {
 484     // NULL _f1 means this is a virtual entry so also not interesting
 485     return false;
 486   } else {
 487     if (!((oop)_f1)->is_method()) {
 488       // _f1 can also contain a klassOop for an interface
 489       return false;
 490     }
 491     m = (methodOop)_f1;
 492   }
 493 
 494   assert(m != NULL && m->is_method(), "sanity check");
 495   if (m == NULL || !m->is_method() || m->method_holder() != k) {
 496     // robustness for above sanity checks or method is not in
 497     // the interesting class
 498     return false;
 499   }
 500 
 501   // the method is in the interesting class so the entry is interesting
 502   return true;
 503 }
 504 
 505 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
 506   // print separator
 507   if (index == 0) tty->print_cr("                 -------------");
 508   // print entry
 509   tty->print("%3d  ("PTR_FORMAT")  ", index, (intptr_t)this);
 510   if (is_secondary_entry())
 511     tty->print_cr("[%5d|secondary]", main_entry_index());
 512   else
 513     tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
 514   tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)(oop)_f1);
 515   tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f2);
 516   tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_flags);
 517   tty->print_cr("                 -------------");
 518 }
 519 
 520 void ConstantPoolCacheEntry::verify(outputStream* st) const {
 521   // not implemented yet
 522 }
 523 
 524 // Implementation of ConstantPoolCache
 525 
 526 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
 527   assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
 528   for (int i = 0; i < length(); i++) {
 529     ConstantPoolCacheEntry* e = entry_at(i);
 530     int original_index = inverse_index_map[i];
 531     if ((original_index & Rewriter::_secondary_entry_tag) != 0) {
 532       int main_index = (original_index - Rewriter::_secondary_entry_tag);
 533       assert(!entry_at(main_index)->is_secondary_entry(), "valid main index");
 534       e->initialize_secondary_entry(main_index);
 535     } else {
 536       e->initialize_entry(original_index);
 537     }
 538     assert(entry_at(i) == e, "sanity");
 539   }
 540 }
 541 
 542 // RedefineClasses() API support:
 543 // If any entry of this constantPoolCache points to any of
 544 // old_methods, replace it with the corresponding new_method.
 545 void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
 546                                                      int methods_length, bool * trace_name_printed) {
 547 
 548   if (methods_length == 0) {
 549     // nothing to do if there are no methods
 550     return;
 551   }
 552 
 553   // get shorthand for the interesting class
 554   klassOop old_holder = old_methods[0]->method_holder();
 555 
 556   for (int i = 0; i < length(); i++) {
 557     if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
 558       // skip uninteresting methods
 559       continue;
 560     }
 561 
 562     // The constantPoolCache contains entries for several different
 563     // things, but we only care about methods. In fact, we only care
 564     // about methods in the same class as the one that contains the
 565     // old_methods. At this point, we have an interesting entry.
 566 
 567     for (int j = 0; j < methods_length; j++) {
 568       methodOop old_method = old_methods[j];
 569       methodOop new_method = new_methods[j];
 570 
 571       if (entry_at(i)->adjust_method_entry(old_method, new_method,
 572           trace_name_printed)) {
 573         // current old_method matched this entry and we updated it so
 574         // break out and get to the next interesting entry if there one
 575         break;
 576       }
 577     }
 578   }
 579 }