1 /*
   2  * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_cpCacheOop.cpp.incl"
  27 
  28 
  29 // Implememtation of ConstantPoolCacheEntry
  30 
  31 void ConstantPoolCacheEntry::initialize_entry(int index) {
  32   assert(0 < index && index < 0x10000, "sanity check");
  33   _indices = index;
  34   assert(constant_pool_index() == index, "");
  35 }
  36 
  37 void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
  38   assert(0 <= main_index && main_index < 0x10000, "sanity check");
  39   _indices = (main_index << 16);
  40   assert(main_entry_index() == main_index, "");
  41 }
  42 
  43 int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
  44                     bool is_vfinal, bool is_volatile,
  45                     bool is_method_interface, bool is_method) {
  46   int f = state;
  47 
  48   assert( state < number_of_states, "Invalid state in as_flags");
  49 
  50   f <<= 1;
  51   if (is_final) f |= 1;
  52   f <<= 1;
  53   if (is_vfinal) f |= 1;
  54   f <<= 1;
  55   if (is_volatile) f |= 1;
  56   f <<= 1;
  57   if (is_method_interface) f |= 1;
  58   f <<= 1;
  59   if (is_method) f |= 1;
  60   f <<= ConstantPoolCacheEntry::hotSwapBit;
  61   // Preserve existing flag bit values
  62 #ifdef ASSERT
  63   int old_state = ((_flags >> tosBits) & 0x0F);
  64   assert(old_state == 0 || old_state == state,
  65          "inconsistent cpCache flags state");
  66 #endif
  67   return (_flags | f) ;
  68 }
  69 
  70 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) {
  71 #ifdef ASSERT
  72   // Read once.
  73   volatile Bytecodes::Code c = bytecode_1();
  74   assert(c == 0 || c == code || code == 0, "update must be consistent");
  75 #endif
  76   // Need to flush pending stores here before bytecode is written.
  77   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16));
  78 }
  79 
  80 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) {
  81 #ifdef ASSERT
  82   // Read once.
  83   volatile Bytecodes::Code c = bytecode_2();
  84   assert(c == 0 || c == code || code == 0, "update must be consistent");
  85 #endif
  86   // Need to flush pending stores here before bytecode is written.
  87   OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24));
  88 }
  89 
  90 #ifdef ASSERT
  91 // It is possible to have two different dummy methodOops created
  92 // when the resolve code for invoke interface executes concurrently
  93 // Hence the assertion below is weakened a bit for the invokeinterface
  94 // case.
  95 bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) {
  96   return (cur_f1 == f1 || ((methodOop)cur_f1)->name() ==
  97          ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() ==
  98          ((methodOop)f1)->signature());
  99 }
 100 #endif
 101 
 102 // Note that concurrent update of both bytecodes can leave one of them
 103 // reset to zero.  This is harmless; the interpreter will simply re-resolve
 104 // the damaged entry.  More seriously, the memory synchronization is needed
 105 // to flush other fields (f1, f2) completely to memory before the bytecodes
 106 // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
 107 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
 108                                        Bytecodes::Code put_code,
 109                                        KlassHandle field_holder,
 110                                        int orig_field_index,
 111                                        int field_offset,
 112                                        TosState field_type,
 113                                        bool is_final,
 114                                        bool is_volatile) {
 115   set_f1(field_holder());
 116   set_f2(field_offset);
 117   // The field index is used by jvm/ti and is the index into fields() array
 118   // in holder instanceKlass.  This is scaled by instanceKlass::next_offset.
 119   assert((orig_field_index % instanceKlass::next_offset) == 0, "wierd index");
 120   const int field_index = orig_field_index / instanceKlass::next_offset;
 121   assert(field_index <= field_index_mask,
 122          "field index does not fit in low flag bits");
 123   set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) |
 124             (field_index & field_index_mask));
 125   set_bytecode_1(get_code);
 126   set_bytecode_2(put_code);
 127   NOT_PRODUCT(verify(tty));
 128 }
 129 
 130 int  ConstantPoolCacheEntry::field_index() const {
 131   return (_flags & field_index_mask) * instanceKlass::next_offset;
 132 }
 133 
 134 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
 135                                         methodHandle method,
 136                                         int vtable_index) {
 137   assert(!is_secondary_entry(), "");
 138   assert(method->interpreter_entry() != NULL, "should have been set at this point");
 139   assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
 140   bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface);
 141 
 142   int byte_no = -1;
 143   bool needs_vfinal_flag = false;
 144   switch (invoke_code) {
 145     case Bytecodes::_invokevirtual:
 146     case Bytecodes::_invokeinterface: {
 147         if (method->can_be_statically_bound()) {
 148           set_f2((intptr_t)method());
 149           needs_vfinal_flag = true;
 150         } else {
 151           assert(vtable_index >= 0, "valid index");
 152           set_f2(vtable_index);
 153         }
 154         byte_no = 2;
 155         break;
 156     }
 157 
 158     case Bytecodes::_invokedynamic:  // similar to _invokevirtual
 159       if (TraceInvokeDynamic) {
 160         tty->print_cr("InvokeDynamic set_method%s method="PTR_FORMAT" index=%d",
 161                       (is_secondary_entry() ? " secondary" : ""),
 162                       (intptr_t)method(), vtable_index);
 163         method->print();
 164         this->print(tty, 0);
 165       }
 166       assert(method->can_be_statically_bound(), "must be a MH invoker method");
 167       assert(AllowTransitionalJSR292 || _f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized");
 168       set_f1(method());
 169       needs_vfinal_flag = false;  // _f2 is not an oop
 170       assert(!is_vfinal(), "f2 not an oop");
 171       byte_no = 1;  // coordinate this with bytecode_number & is_resolved
 172       break;
 173 
 174     case Bytecodes::_invokespecial:
 175       // Preserve the value of the vfinal flag on invokevirtual bytecode
 176       // which may be shared with this constant pool cache entry.
 177       needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal();
 178       // fall through
 179     case Bytecodes::_invokestatic:
 180       set_f1(method());
 181       byte_no = 1;
 182       break;
 183     default:
 184       ShouldNotReachHere();
 185       break;
 186   }
 187 
 188   set_flags(as_flags(as_TosState(method->result_type()),
 189                      method->is_final_method(),
 190                      needs_vfinal_flag,
 191                      false,
 192                      change_to_virtual,
 193                      true)|
 194             method()->size_of_parameters());
 195 
 196   // Note:  byte_no also appears in TemplateTable::resolve.
 197   if (byte_no == 1) {
 198     set_bytecode_1(invoke_code);
 199   } else if (byte_no == 2)  {
 200     if (change_to_virtual) {
 201       // NOTE: THIS IS A HACK - BE VERY CAREFUL!!!
 202       //
 203       // Workaround for the case where we encounter an invokeinterface, but we
 204       // should really have an _invokevirtual since the resolved method is a
 205       // virtual method in java.lang.Object. This is a corner case in the spec
 206       // but is presumably legal. javac does not generate this code.
 207       //
 208       // We set bytecode_1() to _invokeinterface, because that is the
 209       // bytecode # used by the interpreter to see if it is resolved.
 210       // We set bytecode_2() to _invokevirtual.
 211       // See also interpreterRuntime.cpp. (8/25/2000)
 212       // Only set resolved for the invokeinterface case if method is public.
 213       // Otherwise, the method needs to be reresolved with caller for each
 214       // interface call.
 215       if (method->is_public()) set_bytecode_1(invoke_code);
 216       set_bytecode_2(Bytecodes::_invokevirtual);
 217     } else {
 218       set_bytecode_2(invoke_code);
 219     }
 220   } else {
 221     ShouldNotReachHere();
 222   }
 223   NOT_PRODUCT(verify(tty));
 224 }
 225 
 226 
 227 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
 228   assert(!is_secondary_entry(), "");
 229   klassOop interf = method->method_holder();
 230   assert(instanceKlass::cast(interf)->is_interface(), "must be an interface");
 231   set_f1(interf);
 232   set_f2(index);
 233   set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters());
 234   set_bytecode_1(Bytecodes::_invokeinterface);
 235 }
 236 
 237 
 238 void ConstantPoolCacheEntry::initialize_bootstrap_method_index_in_cache(int bsm_cache_index) {
 239   assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
 240   assert(_f2 == 0, "initialize once");
 241   assert(bsm_cache_index == (int)(u2)bsm_cache_index, "oob");
 242   set_f2(bsm_cache_index + constantPoolOopDesc::CPCACHE_INDEX_TAG);
 243 }
 244 
 245 int ConstantPoolCacheEntry::bootstrap_method_index_in_cache() {
 246   assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry");
 247   intptr_t bsm_cache_index = (intptr_t) _f2 - constantPoolOopDesc::CPCACHE_INDEX_TAG;
 248   assert(bsm_cache_index == (intptr_t)(u2)bsm_cache_index, "oob");
 249   return (int) bsm_cache_index;
 250 }
 251 
 252 void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site,
 253                                               methodHandle signature_invoker) {
 254   assert(is_secondary_entry(), "");
 255   int param_size = signature_invoker->size_of_parameters();
 256   assert(param_size >= 1, "method argument size must include MH.this");
 257   param_size -= 1;              // do not count MH.this; it is not stacked for invokedynamic
 258   if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
 259     // racing threads might be trying to install their own favorites
 260     set_f1(call_site());
 261   }
 262   bool is_final = true;
 263   assert(signature_invoker->is_final_method(), "is_final");
 264   set_flags(as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size);
 265   // do not do set_bytecode on a secondary CP cache entry
 266   //set_bytecode_1(Bytecodes::_invokedynamic);
 267 }
 268 
 269 
 270 class LocalOopClosure: public OopClosure {
 271  private:
 272   void (*_f)(oop*);
 273 
 274  public:
 275   LocalOopClosure(void f(oop*))        { _f = f; }
 276   virtual void do_oop(oop* o)          { _f(o); }
 277   virtual void do_oop(narrowOop *o)    { ShouldNotReachHere(); }
 278 };
 279 
 280 
 281 void ConstantPoolCacheEntry::oops_do(void f(oop*)) {
 282   LocalOopClosure blk(f);
 283   oop_iterate(&blk);
 284 }
 285 
 286 
 287 void ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) {
 288   assert(in_words(size()) == 4, "check code below - may need adjustment");
 289   // field[1] is always oop or NULL
 290   blk->do_oop((oop*)&_f1);
 291   if (is_vfinal()) {
 292     blk->do_oop((oop*)&_f2);
 293   }
 294 }
 295 
 296 
 297 void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) {
 298   assert(in_words(size()) == 4, "check code below - may need adjustment");
 299   // field[1] is always oop or NULL
 300   if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1);
 301   if (is_vfinal()) {
 302     if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2);
 303   }
 304 }
 305 
 306 
 307 void ConstantPoolCacheEntry::follow_contents() {
 308   assert(in_words(size()) == 4, "check code below - may need adjustment");
 309   // field[1] is always oop or NULL
 310   MarkSweep::mark_and_push((oop*)&_f1);
 311   if (is_vfinal()) {
 312     MarkSweep::mark_and_push((oop*)&_f2);
 313   }
 314 }
 315 
 316 #ifndef SERIALGC
 317 void ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) {
 318   assert(in_words(size()) == 4, "check code below - may need adjustment");
 319   // field[1] is always oop or NULL
 320   PSParallelCompact::mark_and_push(cm, (oop*)&_f1);
 321   if (is_vfinal()) {
 322     PSParallelCompact::mark_and_push(cm, (oop*)&_f2);
 323   }
 324 }
 325 #endif // SERIALGC
 326 
 327 void ConstantPoolCacheEntry::adjust_pointers() {
 328   assert(in_words(size()) == 4, "check code below - may need adjustment");
 329   // field[1] is always oop or NULL
 330   MarkSweep::adjust_pointer((oop*)&_f1);
 331   if (is_vfinal()) {
 332     MarkSweep::adjust_pointer((oop*)&_f2);
 333   }
 334 }
 335 
 336 #ifndef SERIALGC
 337 void ConstantPoolCacheEntry::update_pointers() {
 338   assert(in_words(size()) == 4, "check code below - may need adjustment");
 339   // field[1] is always oop or NULL
 340   PSParallelCompact::adjust_pointer((oop*)&_f1);
 341   if (is_vfinal()) {
 342     PSParallelCompact::adjust_pointer((oop*)&_f2);
 343   }
 344 }
 345 
 346 void ConstantPoolCacheEntry::update_pointers(HeapWord* beg_addr,
 347                                              HeapWord* end_addr) {
 348   assert(in_words(size()) == 4, "check code below - may need adjustment");
 349   // field[1] is always oop or NULL
 350   PSParallelCompact::adjust_pointer((oop*)&_f1, beg_addr, end_addr);
 351   if (is_vfinal()) {
 352     PSParallelCompact::adjust_pointer((oop*)&_f2, beg_addr, end_addr);
 353   }
 354 }
 355 #endif // SERIALGC
 356 
 357 // RedefineClasses() API support:
 358 // If this constantPoolCacheEntry refers to old_method then update it
 359 // to refer to new_method.
 360 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
 361        methodOop new_method, bool * trace_name_printed) {
 362 
 363   if (is_vfinal()) {
 364     // virtual and final so f2() contains method ptr instead of vtable index
 365     if (f2() == (intptr_t)old_method) {
 366       // match old_method so need an update
 367       _f2 = (intptr_t)new_method;
 368       if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
 369         if (!(*trace_name_printed)) {
 370           // RC_TRACE_MESG macro has an embedded ResourceMark
 371           RC_TRACE_MESG(("adjust: name=%s",
 372             Klass::cast(old_method->method_holder())->external_name()));
 373           *trace_name_printed = true;
 374         }
 375         // RC_TRACE macro has an embedded ResourceMark
 376         RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
 377           new_method->name()->as_C_string(),
 378           new_method->signature()->as_C_string()));
 379       }
 380 
 381       return true;
 382     }
 383 
 384     // f1() is not used with virtual entries so bail out
 385     return false;
 386   }
 387 
 388   if ((oop)_f1 == NULL) {
 389     // NULL f1() means this is a virtual entry so bail out
 390     // We are assuming that the vtable index does not need change.
 391     return false;
 392   }
 393 
 394   if ((oop)_f1 == old_method) {
 395     _f1 = new_method;
 396     if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
 397       if (!(*trace_name_printed)) {
 398         // RC_TRACE_MESG macro has an embedded ResourceMark
 399         RC_TRACE_MESG(("adjust: name=%s",
 400           Klass::cast(old_method->method_holder())->external_name()));
 401         *trace_name_printed = true;
 402       }
 403       // RC_TRACE macro has an embedded ResourceMark
 404       RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
 405         new_method->name()->as_C_string(),
 406         new_method->signature()->as_C_string()));
 407     }
 408 
 409     return true;
 410   }
 411 
 412   return false;
 413 }
 414 
 415 bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) {
 416   if (!is_method_entry()) {
 417     // not a method entry so not interesting by default
 418     return false;
 419   }
 420 
 421   methodOop m = NULL;
 422   if (is_vfinal()) {
 423     // virtual and final so _f2 contains method ptr instead of vtable index
 424     m = (methodOop)_f2;
 425   } else if ((oop)_f1 == NULL) {
 426     // NULL _f1 means this is a virtual entry so also not interesting
 427     return false;
 428   } else {
 429     if (!((oop)_f1)->is_method()) {
 430       // _f1 can also contain a klassOop for an interface
 431       return false;
 432     }
 433     m = (methodOop)_f1;
 434   }
 435 
 436   assert(m != NULL && m->is_method(), "sanity check");
 437   if (m == NULL || !m->is_method() || m->method_holder() != k) {
 438     // robustness for above sanity checks or method is not in
 439     // the interesting class
 440     return false;
 441   }
 442 
 443   // the method is in the interesting class so the entry is interesting
 444   return true;
 445 }
 446 
 447 void ConstantPoolCacheEntry::print(outputStream* st, int index) const {
 448   // print separator
 449   if (index == 0) tty->print_cr("                 -------------");
 450   // print entry
 451   tty->print("%3d  ("PTR_FORMAT")  ", index, (intptr_t)this);
 452   if (is_secondary_entry())
 453     tty->print_cr("[%5d|secondary]", main_entry_index());
 454   else
 455     tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index());
 456   tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)(oop)_f1);
 457   tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_f2);
 458   tty->print_cr("                 [   "PTR_FORMAT"]", (intptr_t)_flags);
 459   tty->print_cr("                 -------------");
 460 }
 461 
 462 void ConstantPoolCacheEntry::verify(outputStream* st) const {
 463   // not implemented yet
 464 }
 465 
 466 // Implementation of ConstantPoolCache
 467 
 468 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
 469   assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
 470   for (int i = 0; i < length(); i++) {
 471     ConstantPoolCacheEntry* e = entry_at(i);
 472     int original_index = inverse_index_map[i];
 473     if ((original_index & Rewriter::_secondary_entry_tag) != 0) {
 474       int main_index = (original_index - Rewriter::_secondary_entry_tag);
 475       assert(!entry_at(main_index)->is_secondary_entry(), "valid main index");
 476       e->initialize_secondary_entry(main_index);
 477     } else {
 478       e->initialize_entry(original_index);
 479     }
 480     assert(entry_at(i) == e, "sanity");
 481   }
 482 }
 483 
 484 // RedefineClasses() API support:
 485 // If any entry of this constantPoolCache points to any of
 486 // old_methods, replace it with the corresponding new_method.
 487 void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods,
 488                                                      int methods_length, bool * trace_name_printed) {
 489 
 490   if (methods_length == 0) {
 491     // nothing to do if there are no methods
 492     return;
 493   }
 494 
 495   // get shorthand for the interesting class
 496   klassOop old_holder = old_methods[0]->method_holder();
 497 
 498   for (int i = 0; i < length(); i++) {
 499     if (!entry_at(i)->is_interesting_method_entry(old_holder)) {
 500       // skip uninteresting methods
 501       continue;
 502     }
 503 
 504     // The constantPoolCache contains entries for several different
 505     // things, but we only care about methods. In fact, we only care
 506     // about methods in the same class as the one that contains the
 507     // old_methods. At this point, we have an interesting entry.
 508 
 509     for (int j = 0; j < methods_length; j++) {
 510       methodOop old_method = old_methods[j];
 511       methodOop new_method = new_methods[j];
 512 
 513       if (entry_at(i)->adjust_method_entry(old_method, new_method,
 514           trace_name_printed)) {
 515         // current old_method matched this entry and we updated it so
 516         // break out and get to the next interesting entry if there one
 517         break;
 518       }
 519     }
 520   }
 521 }