1 /* 2 * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/shared/markSweep.inline.hpp" 27 #include "interpreter/interpreter.hpp" 28 #include "interpreter/rewriter.hpp" 29 #include "memory/universe.inline.hpp" 30 #include "oops/cpCacheOop.hpp" 31 #include "oops/objArrayOop.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "prims/jvmtiRedefineClassesTrace.hpp" 34 #include "runtime/handles.inline.hpp" 35 36 37 // Implememtation of ConstantPoolCacheEntry 38 39 void ConstantPoolCacheEntry::initialize_entry(int index) { 40 assert(0 < index && index < 0x10000, "sanity check"); 41 _indices = index; 42 assert(constant_pool_index() == index, ""); 43 } 44 45 void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) { 46 assert(0 <= main_index && main_index < 0x10000, "sanity check"); 47 _indices = (main_index << 16); 48 assert(main_entry_index() == main_index, ""); 49 } 50 51 int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final, 52 bool is_vfinal, bool is_volatile, 53 bool is_method_interface, bool is_method) { 54 int f = state; 55 56 assert( state < number_of_states, "Invalid state in as_flags"); 57 58 f <<= 1; 59 if (is_final) f |= 1; 60 f <<= 1; 61 if (is_vfinal) f |= 1; 62 f <<= 1; 63 if (is_volatile) f |= 1; 64 f <<= 1; 65 if (is_method_interface) f |= 1; 66 f <<= 1; 67 if (is_method) f |= 1; 68 f <<= ConstantPoolCacheEntry::hotSwapBit; 69 // Preserve existing flag bit values 70 #ifdef ASSERT 71 int old_state = ((_flags >> tosBits) & 0x0F); 72 assert(old_state == 0 || old_state == state, 73 "inconsistent cpCache flags state"); 74 #endif 75 return (_flags | f) ; 76 } 77 78 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) { 79 #ifdef ASSERT 80 // Read once. 81 volatile Bytecodes::Code c = bytecode_1(); 82 assert(c == 0 || c == code || code == 0, "update must be consistent"); 83 #endif 84 // Need to flush pending stores here before bytecode is written. 85 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 16)); 86 } 87 88 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) { 89 #ifdef ASSERT 90 // Read once. 91 volatile Bytecodes::Code c = bytecode_2(); 92 assert(c == 0 || c == code || code == 0, "update must be consistent"); 93 #endif 94 // Need to flush pending stores here before bytecode is written. 95 OrderAccess::release_store_ptr(&_indices, _indices | ((u_char)code << 24)); 96 } 97 98 // Atomically sets f1 if it is still NULL, otherwise it keeps the 99 // current value. 100 void ConstantPoolCacheEntry::set_f1_if_null_atomic(oop f1) { 101 // Use barriers as in oop_store 102 oop* f1_addr = (oop*) &_f1; 103 update_barrier_set_pre(f1_addr, f1); 104 void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL); 105 bool success = (result == NULL); 106 if (success) { 107 update_barrier_set(f1_addr, f1); 108 } 109 } 110 111 #ifdef ASSERT 112 // It is possible to have two different dummy methodOops created 113 // when the resolve code for invoke interface executes concurrently 114 // Hence the assertion below is weakened a bit for the invokeinterface 115 // case. 116 bool ConstantPoolCacheEntry::same_methodOop(oop cur_f1, oop f1) { 117 return (cur_f1 == f1 || ((methodOop)cur_f1)->name() == 118 ((methodOop)f1)->name() || ((methodOop)cur_f1)->signature() == 119 ((methodOop)f1)->signature()); 120 } 121 #endif 122 123 // Note that concurrent update of both bytecodes can leave one of them 124 // reset to zero. This is harmless; the interpreter will simply re-resolve 125 // the damaged entry. More seriously, the memory synchronization is needed 126 // to flush other fields (f1, f2) completely to memory before the bytecodes 127 // are updated, lest other processors see a non-zero bytecode but zero f1/f2. 128 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code, 129 Bytecodes::Code put_code, 130 KlassHandle field_holder, 131 int orig_field_index, 132 int field_offset, 133 TosState field_type, 134 bool is_final, 135 bool is_volatile) { 136 set_f1(field_holder()); 137 set_f2(field_offset); 138 // The field index is used by jvm/ti and is the index into fields() array 139 // in holder instanceKlass. This is scaled by instanceKlass::next_offset. 140 assert((orig_field_index % instanceKlass::next_offset) == 0, "wierd index"); 141 const int field_index = orig_field_index / instanceKlass::next_offset; 142 assert(field_index <= field_index_mask, 143 "field index does not fit in low flag bits"); 144 set_flags(as_flags(field_type, is_final, false, is_volatile, false, false) | 145 (field_index & field_index_mask)); 146 set_bytecode_1(get_code); 147 set_bytecode_2(put_code); 148 NOT_PRODUCT(verify(tty)); 149 } 150 151 int ConstantPoolCacheEntry::field_index() const { 152 return (_flags & field_index_mask) * instanceKlass::next_offset; 153 } 154 155 void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code, 156 methodHandle method, 157 int vtable_index) { 158 assert(!is_secondary_entry(), ""); 159 assert(method->interpreter_entry() != NULL, "should have been set at this point"); 160 assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); 161 bool change_to_virtual = (invoke_code == Bytecodes::_invokeinterface); 162 163 int byte_no = -1; 164 bool needs_vfinal_flag = false; 165 switch (invoke_code) { 166 case Bytecodes::_invokevirtual: 167 case Bytecodes::_invokeinterface: { 168 if (method->can_be_statically_bound()) { 169 set_f2((intptr_t)method()); 170 needs_vfinal_flag = true; 171 } else { 172 assert(vtable_index >= 0, "valid index"); 173 set_f2(vtable_index); 174 } 175 byte_no = 2; 176 break; 177 } 178 179 case Bytecodes::_invokedynamic: // similar to _invokevirtual 180 if (TraceInvokeDynamic) { 181 tty->print_cr("InvokeDynamic set_method%s method="PTR_FORMAT" index=%d", 182 (is_secondary_entry() ? " secondary" : ""), 183 (intptr_t)method(), vtable_index); 184 method->print(); 185 this->print(tty, 0); 186 } 187 assert(method->can_be_statically_bound(), "must be a MH invoker method"); 188 assert(AllowTransitionalJSR292 || _f2 >= constantPoolOopDesc::CPCACHE_INDEX_TAG, "BSM index initialized"); 189 // SystemDictionary::find_method_handle_invoke only caches 190 // methods which signature classes are on the boot classpath, 191 // otherwise the newly created method is returned. To avoid 192 // races in that case we store the first one coming in into the 193 // cp-cache atomically if it's still unset. 194 set_f1_if_null_atomic(method()); 195 needs_vfinal_flag = false; // _f2 is not an oop 196 assert(!is_vfinal(), "f2 not an oop"); 197 byte_no = 1; // coordinate this with bytecode_number & is_resolved 198 break; 199 200 case Bytecodes::_invokespecial: 201 // Preserve the value of the vfinal flag on invokevirtual bytecode 202 // which may be shared with this constant pool cache entry. 203 needs_vfinal_flag = is_resolved(Bytecodes::_invokevirtual) && is_vfinal(); 204 // fall through 205 case Bytecodes::_invokestatic: 206 set_f1(method()); 207 byte_no = 1; 208 break; 209 default: 210 ShouldNotReachHere(); 211 break; 212 } 213 214 set_flags(as_flags(as_TosState(method->result_type()), 215 method->is_final_method(), 216 needs_vfinal_flag, 217 false, 218 change_to_virtual, 219 true)| 220 method()->size_of_parameters()); 221 222 // Note: byte_no also appears in TemplateTable::resolve. 223 if (byte_no == 1) { 224 set_bytecode_1(invoke_code); 225 } else if (byte_no == 2) { 226 if (change_to_virtual) { 227 // NOTE: THIS IS A HACK - BE VERY CAREFUL!!! 228 // 229 // Workaround for the case where we encounter an invokeinterface, but we 230 // should really have an _invokevirtual since the resolved method is a 231 // virtual method in java.lang.Object. This is a corner case in the spec 232 // but is presumably legal. javac does not generate this code. 233 // 234 // We set bytecode_1() to _invokeinterface, because that is the 235 // bytecode # used by the interpreter to see if it is resolved. 236 // We set bytecode_2() to _invokevirtual. 237 // See also interpreterRuntime.cpp. (8/25/2000) 238 // Only set resolved for the invokeinterface case if method is public. 239 // Otherwise, the method needs to be reresolved with caller for each 240 // interface call. 241 if (method->is_public()) set_bytecode_1(invoke_code); 242 set_bytecode_2(Bytecodes::_invokevirtual); 243 } else { 244 set_bytecode_2(invoke_code); 245 } 246 } else { 247 ShouldNotReachHere(); 248 } 249 NOT_PRODUCT(verify(tty)); 250 } 251 252 253 void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) { 254 assert(!is_secondary_entry(), ""); 255 klassOop interf = method->method_holder(); 256 assert(instanceKlass::cast(interf)->is_interface(), "must be an interface"); 257 set_f1(interf); 258 set_f2(index); 259 set_flags(as_flags(as_TosState(method->result_type()), method->is_final_method(), false, false, false, true) | method()->size_of_parameters()); 260 set_bytecode_1(Bytecodes::_invokeinterface); 261 } 262 263 264 void ConstantPoolCacheEntry::initialize_bootstrap_method_index_in_cache(int bsm_cache_index) { 265 assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry"); 266 assert(_f2 == 0, "initialize once"); 267 assert(bsm_cache_index == (int)(u2)bsm_cache_index, "oob"); 268 set_f2(bsm_cache_index + constantPoolOopDesc::CPCACHE_INDEX_TAG); 269 } 270 271 int ConstantPoolCacheEntry::bootstrap_method_index_in_cache() { 272 assert(!is_secondary_entry(), "only for JVM_CONSTANT_InvokeDynamic main entry"); 273 intptr_t bsm_cache_index = (intptr_t) _f2 - constantPoolOopDesc::CPCACHE_INDEX_TAG; 274 assert(bsm_cache_index == (intptr_t)(u2)bsm_cache_index, "oob"); 275 return (int) bsm_cache_index; 276 } 277 278 void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, 279 methodHandle signature_invoker) { 280 assert(is_secondary_entry(), ""); 281 int param_size = signature_invoker->size_of_parameters(); 282 assert(param_size >= 1, "method argument size must include MH.this"); 283 param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic 284 if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) { 285 // racing threads might be trying to install their own favorites 286 set_f1(call_site()); 287 } 288 bool is_final = true; 289 assert(signature_invoker->is_final_method(), "is_final"); 290 set_flags(as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size); 291 // do not do set_bytecode on a secondary CP cache entry 292 //set_bytecode_1(Bytecodes::_invokedynamic); 293 } 294 295 296 class LocalOopClosure: public OopClosure { 297 private: 298 void (*_f)(oop*); 299 300 public: 301 LocalOopClosure(void f(oop*)) { _f = f; } 302 virtual void do_oop(oop* o) { _f(o); } 303 virtual void do_oop(narrowOop *o) { ShouldNotReachHere(); } 304 }; 305 306 307 void ConstantPoolCacheEntry::oops_do(void f(oop*)) { 308 LocalOopClosure blk(f); 309 oop_iterate(&blk); 310 } 311 312 313 void ConstantPoolCacheEntry::oop_iterate(OopClosure* blk) { 314 assert(in_words(size()) == 4, "check code below - may need adjustment"); 315 // field[1] is always oop or NULL 316 blk->do_oop((oop*)&_f1); 317 if (is_vfinal()) { 318 blk->do_oop((oop*)&_f2); 319 } 320 } 321 322 323 void ConstantPoolCacheEntry::oop_iterate_m(OopClosure* blk, MemRegion mr) { 324 assert(in_words(size()) == 4, "check code below - may need adjustment"); 325 // field[1] is always oop or NULL 326 if (mr.contains((oop *)&_f1)) blk->do_oop((oop*)&_f1); 327 if (is_vfinal()) { 328 if (mr.contains((oop *)&_f2)) blk->do_oop((oop*)&_f2); 329 } 330 } 331 332 333 void ConstantPoolCacheEntry::follow_contents() { 334 assert(in_words(size()) == 4, "check code below - may need adjustment"); 335 // field[1] is always oop or NULL 336 MarkSweep::mark_and_push((oop*)&_f1); 337 if (is_vfinal()) { 338 MarkSweep::mark_and_push((oop*)&_f2); 339 } 340 } 341 342 #ifndef SERIALGC 343 void ConstantPoolCacheEntry::follow_contents(ParCompactionManager* cm) { 344 assert(in_words(size()) == 4, "check code below - may need adjustment"); 345 // field[1] is always oop or NULL 346 PSParallelCompact::mark_and_push(cm, (oop*)&_f1); 347 if (is_vfinal()) { 348 PSParallelCompact::mark_and_push(cm, (oop*)&_f2); 349 } 350 } 351 #endif // SERIALGC 352 353 void ConstantPoolCacheEntry::adjust_pointers() { 354 assert(in_words(size()) == 4, "check code below - may need adjustment"); 355 // field[1] is always oop or NULL 356 MarkSweep::adjust_pointer((oop*)&_f1); 357 if (is_vfinal()) { 358 MarkSweep::adjust_pointer((oop*)&_f2); 359 } 360 } 361 362 #ifndef SERIALGC 363 void ConstantPoolCacheEntry::update_pointers() { 364 assert(in_words(size()) == 4, "check code below - may need adjustment"); 365 // field[1] is always oop or NULL 366 PSParallelCompact::adjust_pointer((oop*)&_f1); 367 if (is_vfinal()) { 368 PSParallelCompact::adjust_pointer((oop*)&_f2); 369 } 370 } 371 #endif // SERIALGC 372 373 // RedefineClasses() API support: 374 // If this constantPoolCacheEntry refers to old_method then update it 375 // to refer to new_method. 376 bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method, 377 methodOop new_method, bool * trace_name_printed) { 378 379 if (is_vfinal()) { 380 // virtual and final so f2() contains method ptr instead of vtable index 381 if (f2() == (intptr_t)old_method) { 382 // match old_method so need an update 383 _f2 = (intptr_t)new_method; 384 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { 385 if (!(*trace_name_printed)) { 386 // RC_TRACE_MESG macro has an embedded ResourceMark 387 RC_TRACE_MESG(("adjust: name=%s", 388 Klass::cast(old_method->method_holder())->external_name())); 389 *trace_name_printed = true; 390 } 391 // RC_TRACE macro has an embedded ResourceMark 392 RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)", 393 new_method->name()->as_C_string(), 394 new_method->signature()->as_C_string())); 395 } 396 397 return true; 398 } 399 400 // f1() is not used with virtual entries so bail out 401 return false; 402 } 403 404 if ((oop)_f1 == NULL) { 405 // NULL f1() means this is a virtual entry so bail out 406 // We are assuming that the vtable index does not need change. 407 return false; 408 } 409 410 if ((oop)_f1 == old_method) { 411 _f1 = new_method; 412 if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { 413 if (!(*trace_name_printed)) { 414 // RC_TRACE_MESG macro has an embedded ResourceMark 415 RC_TRACE_MESG(("adjust: name=%s", 416 Klass::cast(old_method->method_holder())->external_name())); 417 *trace_name_printed = true; 418 } 419 // RC_TRACE macro has an embedded ResourceMark 420 RC_TRACE(0x00400000, ("cpc entry update: %s(%s)", 421 new_method->name()->as_C_string(), 422 new_method->signature()->as_C_string())); 423 } 424 425 return true; 426 } 427 428 return false; 429 } 430 431 bool ConstantPoolCacheEntry::is_interesting_method_entry(klassOop k) { 432 if (!is_method_entry()) { 433 // not a method entry so not interesting by default 434 return false; 435 } 436 437 methodOop m = NULL; 438 if (is_vfinal()) { 439 // virtual and final so _f2 contains method ptr instead of vtable index 440 m = (methodOop)_f2; 441 } else if ((oop)_f1 == NULL) { 442 // NULL _f1 means this is a virtual entry so also not interesting 443 return false; 444 } else { 445 if (!((oop)_f1)->is_method()) { 446 // _f1 can also contain a klassOop for an interface 447 return false; 448 } 449 m = (methodOop)_f1; 450 } 451 452 assert(m != NULL && m->is_method(), "sanity check"); 453 if (m == NULL || !m->is_method() || m->method_holder() != k) { 454 // robustness for above sanity checks or method is not in 455 // the interesting class 456 return false; 457 } 458 459 // the method is in the interesting class so the entry is interesting 460 return true; 461 } 462 463 void ConstantPoolCacheEntry::print(outputStream* st, int index) const { 464 // print separator 465 if (index == 0) tty->print_cr(" -------------"); 466 // print entry 467 tty->print("%3d ("PTR_FORMAT") ", index, (intptr_t)this); 468 if (is_secondary_entry()) 469 tty->print_cr("[%5d|secondary]", main_entry_index()); 470 else 471 tty->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), constant_pool_index()); 472 tty->print_cr(" [ "PTR_FORMAT"]", (intptr_t)(oop)_f1); 473 tty->print_cr(" [ "PTR_FORMAT"]", (intptr_t)_f2); 474 tty->print_cr(" [ "PTR_FORMAT"]", (intptr_t)_flags); 475 tty->print_cr(" -------------"); 476 } 477 478 void ConstantPoolCacheEntry::verify(outputStream* st) const { 479 // not implemented yet 480 } 481 482 // Implementation of ConstantPoolCache 483 484 void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) { 485 assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache"); 486 for (int i = 0; i < length(); i++) { 487 ConstantPoolCacheEntry* e = entry_at(i); 488 int original_index = inverse_index_map[i]; 489 if ((original_index & Rewriter::_secondary_entry_tag) != 0) { 490 int main_index = (original_index - Rewriter::_secondary_entry_tag); 491 assert(!entry_at(main_index)->is_secondary_entry(), "valid main index"); 492 e->initialize_secondary_entry(main_index); 493 } else { 494 e->initialize_entry(original_index); 495 } 496 assert(entry_at(i) == e, "sanity"); 497 } 498 } 499 500 // RedefineClasses() API support: 501 // If any entry of this constantPoolCache points to any of 502 // old_methods, replace it with the corresponding new_method. 503 void constantPoolCacheOopDesc::adjust_method_entries(methodOop* old_methods, methodOop* new_methods, 504 int methods_length, bool * trace_name_printed) { 505 506 if (methods_length == 0) { 507 // nothing to do if there are no methods 508 return; 509 } 510 511 // get shorthand for the interesting class 512 klassOop old_holder = old_methods[0]->method_holder(); 513 514 for (int i = 0; i < length(); i++) { 515 if (!entry_at(i)->is_interesting_method_entry(old_holder)) { 516 // skip uninteresting methods 517 continue; 518 } 519 520 // The constantPoolCache contains entries for several different 521 // things, but we only care about methods. In fact, we only care 522 // about methods in the same class as the one that contains the 523 // old_methods. At this point, we have an interesting entry. 524 525 for (int j = 0; j < methods_length; j++) { 526 methodOop old_method = old_methods[j]; 527 methodOop new_method = new_methods[j]; 528 529 if (entry_at(i)->adjust_method_entry(old_method, new_method, 530 trace_name_printed)) { 531 // current old_method matched this entry and we updated it so 532 // break out and get to the next interesting entry if there one 533 break; 534 } 535 } 536 } 537 }