1 /*
   2  * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/markSweep.inline.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "interpreter/rewriter.hpp"
  29 #include "memory/universe.inline.hpp"
  30 #include "oops/cpCacheOop.hpp"
  31 #include "oops/objArrayOop.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "prims/jvmtiRedefineClassesTrace.hpp"
  34 #include "prims/methodHandles.hpp"
  35 #include "runtime/handles.inline.hpp"
  36 
  37 
  38 // Implememtation of ConstantPoolCacheEntry
  39 
  40 void ConstantPoolCacheEntry::initialize_entry(int index) {
  41   assert(0 < index && index < 0x10000, "sanity check");
  42   _indices = index;
  43   assert(constant_pool_index() == index, "");
  44 }
  45 
  46 void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
  47   assert(0 <= main_index && main_index < 0x10000, "sanity check");
  48   _indices = (main_index << main_cp_index_bits);
  49   assert(main_entry_index() == main_index, "");
  50 }
  51 
  52 int ConstantPoolCacheEntry::make_flags(TosState state,
  53                                        int option_bits,
  54                                        int field_index_or_method_params) {
  55   assert(state < number_of_states, "Invalid state in make_flags");
  56   int f = ((int)state << tos_state_shift) | option_bits | field_index_or_method_params;
  57   // Preserve existing flag bit values
  58   // The low bits are a field offset, or else the method parameter size.
  59 #ifdef ASSERT
  60   TosState old_state = flag_state();
  61   assert(old_state == (TosState)0 || old_state == state,
  62          "inconsistent cpCache flags state");
  63 #endif
  64   return (_flags | f) ;
  65 }
  66 
  67 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
  68   assert(!is_secondary_entry(), "must not overwrite main_entry_index");
  69 #ifdef ASSERT
  70   // Read once.
  71   volatile Bytecodes::Code c = bytecode_1();
  72   assert(c == 0 || c == code || code == 0, "update must be consistent");
  73 #endif
  74   // Need to flush pending stores here before bytecode is written.
  75   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_1_shift));
  76 }
  77 
  78 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
  79   assert(!is_secondary_entry(), "must not overwrite main_entry_index");
  80 #ifdef ASSERT
  81   // Read once.
  82   volatile Bytecodes::Code c = bytecode_2();
  83   assert(c == 0 || c == code || code == 0, "update must be consistent");
  84 #endif
  85   // Need to flush pending stores here before bytecode is written.
  86   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << bytecode_2_shift));
  87 }
  88 
  89 // Sets f1, ordering with previous writes.
  90 void ConstantPoolCacheEntry::release_set_f1(oop f1) {
  91   // Use barriers as in oop_store
  92   assert(f1 != NULL, "");
  93   oop* f1_addr = (oop*) &_f1;
  94   update_barrier_set_pre(f1_addr, f1);
  95   OrderAccess::release_store_ptr((intptr_t*)f1_addr, f1);
  96   update_barrier_set((void*) f1_addr, f1);
  97 }
  98 
  99 // Sets flags, but only if the value was previously zero.
 100 bool ConstantPoolCacheEntry::init_flags_atomic(intptr_t flags) {
 101   intptr_t result = Atomic::cmpxchg_ptr(flags, &_flags, 0);
 102   return (result == 0);
 103 }
 104 
 105 #ifdef ASSERT
 106 // It is possible to have two different dummy methodOops created
 107 // when the resolve code for invoke interface executes concurrently
 108 // Hence the assertion below is weakened a bit for the invokeinterface
 109 // case.
 110 bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) {
 111   return (cur_f1 == f1 || ((methodOop)cur_f1)->name() ==
 112          ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() ==
 113          ((methodOop)f1)->signature());
 114 }
 115 #endif
 116 
 117 // Note that concurrent update of both bytecodes can leave one of them
 118 // reset to zero.  This is harmless; the interpreter will simply re-resolve
 119 // the damaged entry.  More seriously, the memory synchronization is needed
 120 // to flush other fields (f1, f2) completely to memory before the bytecodes
 121 // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
 122 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
 123                                        Bytecodes::Code put_code,
 124                                        KlassHandle field_holder,
 125                                        int field_index,
 126                                        int field_offset,
 127                                        TosState field_type,
 128                                        bool is_final,
 129                                        bool is_volatile) {
 130   set_f1(field_holder()->java_mirror());
 131   set_f2(field_offset);
 132   assert((field_index & field_index_mask) == field_index,
 133          "field index does not fit in low flag bits");
 134   set_field_flags(field_type,
 135                   ((is_volatile ? 1 : 0) << is_volatile_shift) |
 136                   ((is_final    ? 1 : 0) << is_final_shift),
 137                   field_index);
 138   set_bytecode_1(get_code);
 139   set_bytecode_2(put_code);
 140   NOT_PRODUCT(verify(tty));
 141 }
 142 
 143 void ConstantPoolCacheEntry::set_parameter_size(int value) {
 144   // This routine is called only in corner cases where the CPCE is not yet initialized.
 145   // See AbstractInterpreter::deopt_continue_after_entry.
 146   assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value,
 147          err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
 148   // Setting the parameter size by itself is only safe if the
 149   // current value of _flags is 0, otherwise another thread may have
 150   // updated it and we don't want to overwrite that value.  Don't
 151   // bother trying to update it once it's nonzero but always make
 152   // sure that the final parameter size agrees with what was passed.
 153   if (_flags == 0) {
 154     Atomic::cmpxchg_ptr((value & parameter_size_mask), &_flags, 0);
 155   }
 156   guarantee(parameter_size() == value,
 157             err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
 158 }
 159 
 160 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
 161                                         methodHandle method,
 162                                         int vtable_index) {
 163   assert(!is_secondary_entry(), "");
 164   assert(method->interpreter_entry() != NULL, "should have been set at this point");
 165   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
 166 
 167   int byte_no = -1;
 168   bool change_to_virtual = false;
 169 
 170   switch (invoke_code) {
 171     case Bytecodes::_invokeinterface:
 172       // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface
 173       // instruction somehow links to a non-interface method (in Object).
 174       // In that case, the method has no itable index and must be invoked as a virtual.
 175       // Set a flag to keep track of this corner case.
 176       change_to_virtual = true;
 177 
 178       // ...and fall through as if we were handling invokevirtual:
 179     case Bytecodes::_invokevirtual:
 180       {
 181         if (method->can_be_statically_bound()) {
 182           // set_f2_as_vfinal_method checks if is_vfinal flag is true.
 183           set_method_flags(as_TosState(method->result_type()),
 184                            (                             1      << is_vfinal_shift) |
 185                            ((method->is_final_method() ? 1 : 0) << is_final_shift)  |
 186                            ((change_to_virtual         ? 1 : 0) << is_forced_virtual_shift),
 187                            method()->size_of_parameters());
 188           set_f2_as_vfinal_method(method());
 189         } else {
 190           assert(vtable_index >= 0, "valid index");
 191           assert(!method->is_final_method(), "sanity");
 192           set_method_flags(as_TosState(method->result_type()),
 193                            ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift),
 194                            method()->size_of_parameters());
 195           set_f2(vtable_index);
 196         }
 197         byte_no = 2;
 198         break;
 199       }
 200 
 201     case Bytecodes::_invokespecial:
 202     case Bytecodes::_invokestatic:
 203       // Note:  Read and preserve the value of the is_vfinal flag on any
 204       // invokevirtual bytecode shared with this constant pool cache entry.
 205       // It is cheap and safe to consult is_vfinal() at all times.
 206       // Once is_vfinal is set, it must stay that way, lest we get a dangling oop.
 207       set_method_flags(as_TosState(method->result_type()),
 208                        ((is_vfinal()               ? 1 : 0) << is_vfinal_shift) |
 209                        ((method->is_final_method() ? 1 : 0) << is_final_shift),
 210                        method()->size_of_parameters());
 211       set_f1(method());
 212       byte_no = 1;
 213       break;
 214     default:
 215       ShouldNotReachHere();
 216       break;
 217   }
 218 
 219   // Note:  byte_no also appears in TemplateTable::resolve.
 220   if (byte_no == 1) {
 221     assert(invoke_code != Bytecodes::_invokevirtual &&
 222            invoke_code != Bytecodes::_invokeinterface, "");
 223     set_bytecode_1(invoke_code);
 224   } else if (byte_no == 2)  {
 225     if (change_to_virtual) {
 226       assert(invoke_code == Bytecodes::_invokeinterface, "");
 227       // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
 228       //
 229       // Workaround for the case where we encounter an invokeinterface, but we
 230       // should really have an _invokevirtual since the resolved method is a
 231       // virtual method in java.lang.Object. This is a corner case in the spec
 232       // but is presumably legal. javac does not generate this code.
 233       //
 234       // We set bytecode_1() to _invokeinterface, because that is the
 235       // bytecode # used by the interpreter to see if it is resolved.
 236       // We set bytecode_2() to _invokevirtual.
 237       // See also interpreterRuntime.cpp. (8/25/2000)
 238       // Only set resolved for the invokeinterface case if method is public.
 239       // Otherwise, the method needs to be reresolved with caller for each
 240       // interface call.
 241       if (method->is_public()) set_bytecode_1(invoke_code);
 242     } else {
 243       assert(invoke_code == Bytecodes::_invokevirtual, "");
 244     }
 245     // set up for invokevirtual, even if linking for invokeinterface also:
 246     set_bytecode_2(Bytecodes::_invokevirtual);
 247   } else {
 248     ShouldNotReachHere();
 249   }
 250   NOT_PRODUCT(verify(tty));
 251 }
 252 
 253 
 254 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
 255   assert(!is_secondary_entry(), "");
 256   klassOop interf = method->method_holder();
 257   assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
 258   assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
 259   set_f1(interf);
 260   set_f2(index);
 261   set_method_flags(as_TosState(method->result_type()),
 262                    0,  // no option bits
 263                    method()->size_of_parameters());
 264   set_bytecode_1(Bytecodes::_invokeinterface);
 265 }
 266 
 267 
 268 void ConstantPoolCacheEntry::set_method_handle(constantPoolHandle cpool,
 269                                                methodHandle adapter,
 270                                                Handle appendix, Handle method_type) {
 271   assert(!is_secondary_entry(), "");
 272   set_method_handle_common(cpool, Bytecodes::_invokehandle, adapter, appendix, method_type);
 273 }
 274 
 275 void ConstantPoolCacheEntry::set_dynamic_call(constantPoolHandle cpool,
 276                                               methodHandle adapter,
 277                                               Handle appendix, Handle method_type) {
 278   assert(is_secondary_entry(), "");
 279   set_method_handle_common(cpool, Bytecodes::_invokedynamic, adapter, appendix, method_type);
 280 }
 281 
 282 void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool,
 283                                                       Bytecodes::Code invoke_code,
 284                                                       methodHandle adapter,
 285                                                       Handle appendix, Handle method_type) {
 286   // NOTE: This CPCE can be the subject of data races.
 287   // There are three words to update: flags, f2, f1 (in that order).
 288   // Writers must store all other values before f1.
 289   // Readers must test f1 first for non-null before reading other fields.
 290   // Competing writers must acquire exclusive access via a lock.
 291   // A losing writer waits on the lock until the winner writes f1 and leaves
 292   // the lock, so that when the losing writer returns, he can use the linked
 293   // cache entry.
 294 
 295   Thread* THREAD = Thread::current();
 296   ObjectLocker ol(cpool, THREAD);
 297   if (!is_f1_null()) {
 298     return;
 299   }
 300 
 301   const bool has_appendix    = appendix.not_null();
 302   const bool has_method_type = method_type.not_null();
 303 
 304   if (!has_appendix) {
 305     // The extra argument is not used, but we need a non-null value to signify linkage state.
 306     // Set it to something benign that will never leak memory.
 307     appendix = Universe::void_mirror();
 308   }
 309 
 310   // Write the flags.
 311   set_method_flags(as_TosState(adapter->result_type()),
 312                    ((has_appendix    ? 1 : 0) << has_appendix_shift)    |
 313                    ((has_method_type ? 1 : 0) << has_method_type_shift) |
 314                    (                   1      << is_vfinal_shift)       |
 315                    (                   1      << is_final_shift),
 316                    adapter->size_of_parameters());
 317 
 318   if (TraceInvokeDynamic) {
 319     tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ",
 320                   invoke_code,
 321                   (intptr_t)appendix(),    (has_appendix    ? "" : " (unused)"),
 322                   (intptr_t)method_type(), (has_method_type ? "" : " (unused)"),
 323                   (intptr_t)adapter());
 324     adapter->print();
 325     if (has_appendix)  appendix()->print();
 326   }
 327 
 328   // Method handle invokes and invokedynamic sites use both cp cache words.
 329   // f1, if not null, contains a value passed as a trailing argument to the adapter.
 330   // In the general case, this could be the call site's MethodType,
 331   // for use with java.lang.Invokers.checkExactType, or else a CallSite object.
 332   // f2 contains the adapter method which manages the actual call.
 333   // In the general case, this is a compiled LambdaForm.
 334   // (The Java code is free to optimize these calls by binding other
 335   // sorts of methods and appendices to call sites.)
 336   // JVM-level linking is via f2, as if for invokevfinal, and signatures are erased.
 337   // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits.
 338   // In principle this means that the method (with appendix) could take up to 256 parameter slots.
 339   //
 340   // This means that given a call site like (List)mh.invoke("foo"),
 341   // the f2 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;',
 342   // not '(Ljava/lang/String;)Ljava/util/List;'.
 343   // The fact that String and List are involved is encoded in the MethodType in f1.
 344   // This allows us to create fewer method oops, while keeping type safety.
 345   //
 346 
 347   set_f2_as_vfinal_method(adapter());
 348 
 349   // Store MethodType, if any.
 350   if (has_method_type) {
 351     ConstantPoolCacheEntry* e2 = cpool->cache()->find_secondary_entry_for(this);
 352 
 353     // Write the flags.
 354     e2->set_method_flags(as_TosState(adapter->result_type()),
 355                      ((has_method_type ? 1 : 0) << has_method_type_shift) |
 356                      (                   1      << is_vfinal_shift)       |
 357                      (                   1      << is_final_shift),
 358                      adapter->size_of_parameters());
 359     e2->release_set_f1(method_type());
 360   }
 361 
 362   assert(appendix.not_null(), "needed for linkage state");
 363   release_set_f1(appendix());  // This must be the last one to set (see NOTE above)!
 364 
 365   if (!is_secondary_entry()) {
 366     // The interpreter assembly code does not check byte_2,
 367     // but it is used by is_resolved, method_if_resolved, etc.
 368     set_bytecode_2(invoke_code);
 369   }
 370 
 371   NOT_PRODUCT(verify(tty));
 372   if (TraceInvokeDynamic) {
 373     this->print(tty, 0);
 374   }
 375 }
 376 
 377 methodOop ConstantPoolCacheEntry::method_if_resolved(constantPoolHandle cpool) {
 378   if (is_secondary_entry()) {
 379     if (!is_f1_null())
 380       return f2_as_vfinal_method();
 381     return NULL;
 382   }
 383   // Decode the action of set_method and set_interface_call
 384   Bytecodes::Code invoke_code = bytecode_1();
 385   if (invoke_code != (Bytecodes::Code)0) {
 386     oop f1 = _f1;
 387     if (f1 != NULL) {
 388       switch (invoke_code) {
 389       case Bytecodes::_invokeinterface:
 390         assert(f1->is_klass(), "");
 391         return klassItable::method_for_itable_index(klassOop(f1), f2_as_index());
 392       case Bytecodes::_invokestatic:
 393       case Bytecodes::_invokespecial:
 394         assert(!has_appendix(), "");
 395         assert(f1->is_method(), "");
 396         return methodOop(f1);
 397       }
 398     }
 399   }
 400   invoke_code = bytecode_2();
 401   if (invoke_code != (Bytecodes::Code)0) {
 402     switch (invoke_code) {
 403     case Bytecodes::_invokevirtual:
 404       if (is_vfinal()) {
 405         // invokevirtual
 406         methodOop m = f2_as_vfinal_method();
 407         assert(m->is_method(), "");
 408         return m;
 409       } else {
 410         int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index());
 411         if (cpool->tag_at(holder_index).is_klass()) {
 412           klassOop klass = cpool->resolved_klass_at(holder_index);
 413           if (!Klass::cast(klass)->oop_is_instance())
 414             klass = SystemDictionary::Object_klass();
 415           return instanceKlass::cast(klass)->method_at_vtable(f2_as_index());
 416         }
 417       }
 418       break;
 419     case Bytecodes::_invokehandle:
 420     case Bytecodes::_invokedynamic:
 421       return f2_as_vfinal_method();
 422     }
 423   }
 424   return NULL;
 425 }
 426 
 427 
 428 oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) {
 429   if (is_f1_null() || !has_appendix())
 430     return NULL;
 431   return f1_appendix();
 432 }
 433 
 434 
 435 oop ConstantPoolCacheEntry::method_type_if_resolved(constantPoolHandle cpool) {
 436   if (is_f1_null() || !has_method_type())
 437     return NULL;
 438   return f1_as_instance();
 439 }
 440 
 441 
 442 class LocalOopClosure: public OopClosure {
 443  private:
 444   void (*_f)(oop*);
 445 
 446  public:
 447   LocalOopClosure(void f(oop*))        { _f = f; }
 448   virtual void do_oop(oop* o)          { _f(o); }
 449   virtual void do_oop(narrowOop *o)    { ShouldNotReachHere(); }
 450 };
 451 
 452 
 453 void ConstantPoolCacheEntry::oops_do(void f(oop*)) {
 454   LocalOopClosure blk(f);
 455   oop_iterate(&blk);
 456 }
 457 
 458 
 459 void ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) {
 460   assert(in_words(size()) == 4, "check code below - may need adjustment");
 461   // field[1] is always oop or NULL
 462   blk->do_oop((oop*)&_f1);
 463   if (is_vfinal()) {
 464     blk->do_oop((oop*)&_f2);
 465   }
 466 }
 467 
 468 
 469 void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) {
 470   assert(in_words(size()) == 4, "check code below - may need adjustment");
 471   // field[1] is always oop or NULL
 472   if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1);
 473   if (is_vfinal()) {
 474     if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2);
 475   }
 476 }
 477 
 478 
 479 void ConstantPoolCacheEntry::follow_contents() {
 480   assert(in_words(size()) == 4, "check code below - may need adjustment");
 481   // field[1] is always oop or NULL
 482   MarkSweep::mark_and_push((oop*)&_f1);
 483   if (is_vfinal()) {
 484     MarkSweep::mark_and_push((oop*)&_f2);
 485   }
 486 }
 487 
 488 #ifndef SERIALGC
 489 void ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) {
 490   assert(in_words(size()) == 4, "check code below - may need adjustment");
 491   // field[1] is always oop or NULL
 492   PSParallelCompact::mark_and_push(cm, (oop*)&_f1);
 493   if (is_vfinal()) {
 494     PSParallelCompact::mark_and_push(cm, (oop*)&_f2);
 495   }
 496 }
 497 #endif // SERIALGC
 498 
 499 void ConstantPoolCacheEntry::adjust_pointers() {
 500   assert(in_words(size()) == 4, "check code below - may need adjustment");
 501   // field[1] is always oop or NULL
 502   MarkSweep::adjust_pointer((oop*)&_f1);
 503   if (is_vfinal()) {
 504     MarkSweep::adjust_pointer((oop*)&_f2);
 505   }
 506 }
 507 
 508 #ifndef SERIALGC
 509 void ConstantPoolCacheEntry::update_pointers() {
 510   assert(in_words(size()) == 4, "check code below - may need adjustment");
 511   // field[1] is always oop or NULL
 512   PSParallelCompact::adjust_pointer((oop*)&_f1);
 513   if (is_vfinal()) {
 514     PSParallelCompact::adjust_pointer((oop*)&_f2);
 515   }
 516 }
 517 #endif // SERIALGC
 518 
 519 // RedefineClasses() API support:
 520 // If this constantPoolCacheEntry refers to old_method then update it
 521 // to refer to new_method.
 522 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
 523        methodOop new_method, bool * trace_name_printed) {
 524 
 525   if (is_vfinal()) {
 526     // virtual and final so _f2 contains method ptr instead of vtable index
 527     if (f2_as_vfinal_method() == old_method) {
 528       // match old_method so need an update
 529       // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values
 530       _f2 = (intptr_t)new_method;
 531       if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
 532         if (!(*trace_name_printed)) {
 533           // RC_TRACE_MESG macro has an embedded ResourceMark
 534           RC_TRACE_MESG(("adjust: name=%s",
 535             Klass::cast(old_method->method_holder())->external_name()));
 536           *trace_name_printed = true;
 537         }
 538         // RC_TRACE macro has an embedded ResourceMark
 539         RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
 540           new_method->name()->as_C_string(),
 541           new_method->signature()->as_C_string()));
 542       }
 543 
 544       return true;
 545     }
 546 
 547     // f1() is not used with virtual entries so bail out
 548     return false;
 549   }
 550 
 551   if ((oop)_f1 == NULL) {
 552     // NULL f1() means this is a virtual entry so bail out
 553     // We are assuming that the vtable index does not need change.
 554     return false;
 555   }
 556 
 557   if ((oop)_f1 == old_method) {
 558     _f1 = new_method;
 559     if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
 560       if (!(*trace_name_printed)) {
 561         // RC_TRACE_MESG macro has an embedded ResourceMark
 562         RC_TRACE_MESG(("adjust: name=%s",
 563           Klass::cast(old_method->method_holder())->external_name()));
 564         *trace_name_printed = true;
 565       }
 566       // RC_TRACE macro has an embedded ResourceMark
 567       RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
 568         new_method->name()->as_C_string(),
 569         new_method->signature()->as_C_string()));
 570     }
 571 
 572     return true;
 573   }
 574 
 575   return false;
 576 }
 577 
 578 bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) {
 579   if (!is_method_entry()) {
 580     // not a method entry so not interesting by default
 581     return false;
 582   }
 583 
 584   methodOop m = NULL;
 585   if (is_vfinal()) {
 586     // virtual and final so _f2 contains method ptr instead of vtable index
 587     m = f2_as_vfinal_method();
 588   } else if (is_f1_null()) {
 589     // NULL _f1 means this is a virtual entry so also not interesting
 590     return false;
 591   } else {
 592     oop f1 = _f1;  // _f1 is volatile
 593     if (!f1->is_method()) {
 594       // _f1 can also contain a klassOop for an interface
 595       return false;
 596     }
 597     m = f1_as_method();
 598   }
 599 
 600   assert(m != NULL && m->is_method(), "sanity check");
 601   if (m == NULL || !m->is_method() || m->method_holder() != k) {
 602     // robustness for above sanity checks or method is not in
 603     // the interesting class
 604     return false;
 605   }
 606 
 607   // the method is in the interesting class so the entry is interesting
 608   return true;
 609 }
 610 
 611 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
 612   // print separator
 613   if (index == 0) st->print_cr("                 -------------");
 614   // print entry
 615   st->print("%3d  ("PTR_FORMAT")  ", index, (intptr_t)this);
 616   if (is_secondary_entry())
 617     st->print_cr("[%5d|secondary]", main_entry_index());
 618   else
 619     st->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
 620   st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)(oop)_f1);
 621   st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f2);
 622   st->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_flags);
 623   st->print_cr("                 -------------");
 624 }
 625 
 626 void ConstantPoolCacheEntry::verify(outputStream* st) const {
 627   // not implemented yet
 628 }
 629 
 630 // Implementation of ConstantPoolCache
 631 
 632 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
 633   assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
 634   for (int i = 0; i < length(); i++) {
 635     ConstantPoolCacheEntry* e = entry_at(i);
 636     int original_index = inverse_index_map[i];
 637     if ((original_index & Rewriter::_secondary_entry_tag) != 0) {
 638       int main_index = (original_index - Rewriter::_secondary_entry_tag);
 639       assert(!entry_at(main_index)->is_secondary_entry(), "valid main index");
 640       e->initialize_secondary_entry(main_index);
 641     } else {
 642       e->initialize_entry(original_index);
 643     }
 644     assert(entry_at(i) == e, "sanity");
 645   }
 646 }
 647 
 648 // RedefineClasses() API support:
 649 // If any entry of this constantPoolCache points to any of
 650 // old_methods, replace it with the corresponding new_method.
 651 void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
 652                                                      int methods_length, bool * trace_name_printed) {
 653 
 654   if (methods_length == 0) {
 655     // nothing to do if there are no methods
 656     return;
 657   }
 658 
 659   // get shorthand for the interesting class
 660   klassOop old_holder = old_methods[0]->method_holder();
 661 
 662   for (int i = 0; i < length(); i++) {
 663     if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
 664       // skip uninteresting methods
 665       continue;
 666     }
 667 
 668     // The constantPoolCache contains entries for several different
 669     // things, but we only care about methods. In fact, we only care
 670     // about methods in the same class as the one that contains the
 671     // old_methods. At this point, we have an interesting entry.
 672 
 673     for (int j = 0; j < methods_length; j++) {
 674       methodOop old_method = old_methods[j];
 675       methodOop new_method = new_methods[j];
 676 
 677       if (entry_at(i)->adjust_method_entry(old_method, new_method,
 678           trace_name_printed)) {
 679         // current old_method matched this entry and we updated it so
 680         // break out and get to the next interesting entry if there one
 681         break;
 682       }
 683     }
 684   }
 685 }