1 /* 2 * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/resolutionErrors.hpp" 27 #include "interpreter/bytecodeStream.hpp" 28 #include "interpreter/bytecodes.hpp" 29 #include "interpreter/interpreter.hpp" 30 #include "interpreter/rewriter.hpp" 31 #include "logging/log.hpp" 32 #include "memory/metadataFactory.hpp" 33 #include "memory/metaspaceClosure.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "memory/universe.inline.hpp" 36 #include "oops/access.inline.hpp" 37 #include "oops/cpCache.hpp" 38 #include "oops/objArrayOop.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "prims/methodHandles.hpp" 41 #include "runtime/atomic.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/orderAccess.inline.hpp" 44 #include "utilities/macros.hpp" 45 46 // Implementation of ConstantPoolCacheEntry 47 48 void ConstantPoolCacheEntry::initialize_entry(int index) { 49 assert(0 < index && index < 0x10000, "sanity check"); 50 _indices = index; 51 _f1 = NULL; 52 _f2 = _flags = 0; 53 assert(constant_pool_index() == index, ""); 54 } 55 56 void ConstantPoolCacheEntry::verify_just_initialized(bool f2_used) { 57 assert((_indices & (~cp_index_mask)) == 0, "sanity"); 58 assert(_f1 == NULL, "sanity"); 59 assert(_flags == 0, "sanity"); 60 if (!f2_used) { 61 assert(_f2 == 0, "sanity"); 62 } 63 } 64 65 void ConstantPoolCacheEntry::reinitialize(bool f2_used) { 66 _indices &= cp_index_mask; 67 _f1 = NULL; 68 _flags = 0; 69 if (!f2_used) { 70 _f2 = 0; 71 } 72 } 73 74 int ConstantPoolCacheEntry::make_flags(TosState state, 75 int option_bits, 76 int field_index_or_method_params) { 77 assert(state < number_of_states, "Invalid state in make_flags"); 78 int f = ((int)state << tos_state_shift) | option_bits | field_index_or_method_params; 79 // Preserve existing flag bit values 80 // The low bits are a field offset, or else the method parameter size. 81 #ifdef ASSERT 82 TosState old_state = flag_state(); 83 assert(old_state == (TosState)0 || old_state == state, 84 "inconsistent cpCache flags state"); 85 #endif 86 return (_flags | f) ; 87 } 88 89 void ConstantPoolCacheEntry::set_bytecode_1(Bytecodes::Code code) { 90 #ifdef ASSERT 91 // Read once. 92 volatile Bytecodes::Code c = bytecode_1(); 93 assert(c == 0 || c == code || code == 0, "update must be consistent"); 94 #endif 95 // Need to flush pending stores here before bytecode is written. 96 OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_1_shift)); 97 } 98 99 void ConstantPoolCacheEntry::set_bytecode_2(Bytecodes::Code code) { 100 #ifdef ASSERT 101 // Read once. 102 volatile Bytecodes::Code c = bytecode_2(); 103 assert(c == 0 || c == code || code == 0, "update must be consistent"); 104 #endif 105 // Need to flush pending stores here before bytecode is written. 106 OrderAccess::release_store(&_indices, _indices | ((u_char)code << bytecode_2_shift)); 107 } 108 109 // Sets f1, ordering with previous writes. 110 void ConstantPoolCacheEntry::release_set_f1(Metadata* f1) { 111 assert(f1 != NULL, ""); 112 OrderAccess::release_store(&_f1, f1); 113 } 114 115 void ConstantPoolCacheEntry::set_indy_resolution_failed() { 116 OrderAccess::release_store(&_flags, _flags | (1 << indy_resolution_failed_shift)); 117 } 118 119 // Note that concurrent update of both bytecodes can leave one of them 120 // reset to zero. This is harmless; the interpreter will simply re-resolve 121 // the damaged entry. More seriously, the memory synchronization is needed 122 // to flush other fields (f1, f2) completely to memory before the bytecodes 123 // are updated, lest other processors see a non-zero bytecode but zero f1/f2. 124 void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code, 125 Bytecodes::Code put_code, 126 Klass* field_holder, 127 int field_index, 128 int field_offset, 129 TosState field_type, 130 bool is_final, 131 bool is_volatile, 132 bool is_flatten, 133 Klass* root_klass) { 134 set_f1(field_holder); 135 set_f2(field_offset); 136 assert((field_index & field_index_mask) == field_index, 137 "field index does not fit in low flag bits"); 138 set_field_flags(field_type, 139 ((is_volatile ? 1 : 0) << is_volatile_shift) | 140 ((is_final ? 1 : 0) << is_final_shift) | 141 ((is_flatten ? 1 : 0) << is_flatten_field), 142 field_index); 143 set_bytecode_1(get_code); 144 set_bytecode_2(put_code); 145 NOT_PRODUCT(verify(tty)); 146 } 147 148 void ConstantPoolCacheEntry::set_parameter_size(int value) { 149 // This routine is called only in corner cases where the CPCE is not yet initialized. 150 // See AbstractInterpreter::deopt_continue_after_entry. 151 assert(_flags == 0 || parameter_size() == 0 || parameter_size() == value, 152 "size must not change: parameter_size=%d, value=%d", parameter_size(), value); 153 // Setting the parameter size by itself is only safe if the 154 // current value of _flags is 0, otherwise another thread may have 155 // updated it and we don't want to overwrite that value. Don't 156 // bother trying to update it once it's nonzero but always make 157 // sure that the final parameter size agrees with what was passed. 158 if (_flags == 0) { 159 intx newflags = (value & parameter_size_mask); 160 Atomic::cmpxchg(newflags, &_flags, (intx)0); 161 } 162 guarantee(parameter_size() == value, 163 "size must not change: parameter_size=%d, value=%d", parameter_size(), value); 164 } 165 166 void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code, 167 const methodHandle& method, 168 int vtable_index, 169 bool sender_is_interface) { 170 bool is_vtable_call = (vtable_index >= 0); // FIXME: split this method on this boolean 171 assert(method->interpreter_entry() != NULL, "should have been set at this point"); 172 assert(!method->is_obsolete(), "attempt to write obsolete method to cpCache"); 173 174 int byte_no = -1; 175 bool change_to_virtual = false; 176 177 switch (invoke_code) { 178 case Bytecodes::_invokeinterface: 179 // We get here from InterpreterRuntime::resolve_invoke when an invokeinterface 180 // instruction somehow links to a non-interface method (in Object). 181 // In that case, the method has no itable index and must be invoked as a virtual. 182 // Set a flag to keep track of this corner case. 183 assert(method->is_public(), "Calling non-public method in Object with invokeinterface"); 184 change_to_virtual = true; 185 186 // ...and fall through as if we were handling invokevirtual: 187 case Bytecodes::_invokevirtual: 188 { 189 if (!is_vtable_call) { 190 assert(method->can_be_statically_bound(), ""); 191 // set_f2_as_vfinal_method checks if is_vfinal flag is true. 192 set_method_flags(as_TosState(method->result_type()), 193 ( 1 << is_vfinal_shift) | 194 ((method->is_final_method() ? 1 : 0) << is_final_shift) | 195 ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift), 196 method()->size_of_parameters()); 197 set_f2_as_vfinal_method(method()); 198 } else { 199 assert(!method->can_be_statically_bound(), ""); 200 assert(vtable_index >= 0, "valid index"); 201 assert(!method->is_final_method(), "sanity"); 202 set_method_flags(as_TosState(method->result_type()), 203 ((change_to_virtual ? 1 : 0) << is_forced_virtual_shift), 204 method()->size_of_parameters()); 205 set_f2(vtable_index); 206 } 207 byte_no = 2; 208 break; 209 } 210 211 case Bytecodes::_invokespecial: 212 case Bytecodes::_invokestatic: 213 assert(!is_vtable_call, ""); 214 // Note: Read and preserve the value of the is_vfinal flag on any 215 // invokevirtual bytecode shared with this constant pool cache entry. 216 // It is cheap and safe to consult is_vfinal() at all times. 217 // Once is_vfinal is set, it must stay that way, lest we get a dangling oop. 218 set_method_flags(as_TosState(method->result_type()), 219 ((is_vfinal() ? 1 : 0) << is_vfinal_shift) | 220 ((method->is_final_method() ? 1 : 0) << is_final_shift), 221 method()->size_of_parameters()); 222 set_f1(method()); 223 byte_no = 1; 224 break; 225 default: 226 ShouldNotReachHere(); 227 break; 228 } 229 230 // Note: byte_no also appears in TemplateTable::resolve. 231 if (byte_no == 1) { 232 assert(invoke_code != Bytecodes::_invokevirtual && 233 invoke_code != Bytecodes::_invokeinterface, ""); 234 // Don't mark invokespecial to method as resolved if sender is an interface. The receiver 235 // has to be checked that it is a subclass of the current class every time this bytecode 236 // is executed. 237 if (invoke_code != Bytecodes::_invokespecial || !sender_is_interface || 238 method->name() == vmSymbols::object_initializer_name()) { 239 set_bytecode_1(invoke_code); 240 } 241 } else if (byte_no == 2) { 242 if (change_to_virtual) { 243 assert(invoke_code == Bytecodes::_invokeinterface, ""); 244 // NOTE: THIS IS A HACK - BE VERY CAREFUL!!! 245 // 246 // Workaround for the case where we encounter an invokeinterface, but we 247 // should really have an _invokevirtual since the resolved method is a 248 // virtual method in java.lang.Object. This is a corner case in the spec 249 // but is presumably legal. javac does not generate this code. 250 // 251 // We set bytecode_1() to _invokeinterface, because that is the 252 // bytecode # used by the interpreter to see if it is resolved. 253 // We set bytecode_2() to _invokevirtual. 254 // See also interpreterRuntime.cpp. (8/25/2000) 255 // Only set resolved for the invokeinterface case if method is public. 256 // Otherwise, the method needs to be reresolved with caller for each 257 // interface call. 258 if (method->is_public()) set_bytecode_1(invoke_code); 259 invoke_code = Bytecodes::_invokevirtual; 260 } else { 261 assert(invoke_code == Bytecodes::_invokevirtual, ""); 262 } 263 // set up for invokevirtual, even if linking for invokeinterface also: 264 set_bytecode_2(invoke_code); 265 } else { 266 ShouldNotReachHere(); 267 } 268 NOT_PRODUCT(verify(tty)); 269 } 270 271 void ConstantPoolCacheEntry::set_direct_call(Bytecodes::Code invoke_code, const methodHandle& method, 272 bool sender_is_interface) { 273 int index = Method::nonvirtual_vtable_index; 274 // index < 0; FIXME: inline and customize set_direct_or_vtable_call 275 set_direct_or_vtable_call(invoke_code, method, index, sender_is_interface); 276 } 277 278 void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, const methodHandle& method, int index) { 279 // either the method is a miranda or its holder should accept the given index 280 assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), ""); 281 // index >= 0; FIXME: inline and customize set_direct_or_vtable_call 282 set_direct_or_vtable_call(invoke_code, method, index, false); 283 } 284 285 void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code, 286 Klass* referenced_klass, 287 const methodHandle& method, int index) { 288 assert(method->method_holder()->verify_itable_index(index), ""); 289 assert(invoke_code == Bytecodes::_invokeinterface, ""); 290 InstanceKlass* interf = method->method_holder(); 291 assert(interf->is_interface(), "must be an interface"); 292 assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here"); 293 set_f1(referenced_klass); 294 set_f2((intx)method()); 295 set_method_flags(as_TosState(method->result_type()), 296 0, // no option bits 297 method()->size_of_parameters()); 298 set_bytecode_1(Bytecodes::_invokeinterface); 299 } 300 301 302 void ConstantPoolCacheEntry::set_method_handle(const constantPoolHandle& cpool, const CallInfo &call_info) { 303 set_method_handle_common(cpool, Bytecodes::_invokehandle, call_info); 304 } 305 306 void ConstantPoolCacheEntry::set_dynamic_call(const constantPoolHandle& cpool, const CallInfo &call_info) { 307 set_method_handle_common(cpool, Bytecodes::_invokedynamic, call_info); 308 } 309 310 void ConstantPoolCacheEntry::set_method_handle_common(const constantPoolHandle& cpool, 311 Bytecodes::Code invoke_code, 312 const CallInfo &call_info) { 313 // NOTE: This CPCE can be the subject of data races. 314 // There are three words to update: flags, refs[f2], f1 (in that order). 315 // Writers must store all other values before f1. 316 // Readers must test f1 first for non-null before reading other fields. 317 // Competing writers must acquire exclusive access via a lock. 318 // A losing writer waits on the lock until the winner writes f1 and leaves 319 // the lock, so that when the losing writer returns, he can use the linked 320 // cache entry. 321 322 objArrayHandle resolved_references(Thread::current(), cpool->resolved_references()); 323 // Use the resolved_references() lock for this cpCache entry. 324 // resolved_references are created for all classes with Invokedynamic, MethodHandle 325 // or MethodType constant pool cache entries. 326 assert(resolved_references() != NULL, 327 "a resolved_references array should have been created for this class"); 328 ObjectLocker ol(resolved_references, Thread::current()); 329 if (!is_f1_null()) { 330 return; 331 } 332 333 if (indy_resolution_failed()) { 334 // Before we got here, another thread got a LinkageError exception during 335 // resolution. Ignore our success and throw their exception. 336 ConstantPoolCache* cpCache = cpool->cache(); 337 int index = -1; 338 for (int i = 0; i < cpCache->length(); i++) { 339 if (cpCache->entry_at(i) == this) { 340 index = i; 341 break; 342 } 343 } 344 guarantee(index >= 0, "Didn't find cpCache entry!"); 345 int encoded_index = ResolutionErrorTable::encode_cpcache_index( 346 ConstantPool::encode_invokedynamic_index(index)); 347 Thread* THREAD = Thread::current(); 348 ConstantPool::throw_resolution_error(cpool, encoded_index, THREAD); 349 return; 350 } 351 352 const methodHandle adapter = call_info.resolved_method(); 353 const Handle appendix = call_info.resolved_appendix(); 354 const Handle method_type = call_info.resolved_method_type(); 355 const bool has_appendix = appendix.not_null(); 356 const bool has_method_type = method_type.not_null(); 357 358 // Write the flags. 359 set_method_flags(as_TosState(adapter->result_type()), 360 ((has_appendix ? 1 : 0) << has_appendix_shift ) | 361 ((has_method_type ? 1 : 0) << has_method_type_shift) | 362 ( 1 << is_final_shift ), 363 adapter->size_of_parameters()); 364 365 if (TraceInvokeDynamic) { 366 ttyLocker ttyl; 367 tty->print_cr("set_method_handle bc=%d appendix=" PTR_FORMAT "%s method_type=" PTR_FORMAT "%s method=" PTR_FORMAT " ", 368 invoke_code, 369 p2i(appendix()), (has_appendix ? "" : " (unused)"), 370 p2i(method_type()), (has_method_type ? "" : " (unused)"), 371 p2i(adapter())); 372 adapter->print(); 373 if (has_appendix) appendix()->print(); 374 } 375 376 // Method handle invokes and invokedynamic sites use both cp cache words. 377 // refs[f2], if not null, contains a value passed as a trailing argument to the adapter. 378 // In the general case, this could be the call site's MethodType, 379 // for use with java.lang.Invokers.checkExactType, or else a CallSite object. 380 // f1 contains the adapter method which manages the actual call. 381 // In the general case, this is a compiled LambdaForm. 382 // (The Java code is free to optimize these calls by binding other 383 // sorts of methods and appendices to call sites.) 384 // JVM-level linking is via f1, as if for invokespecial, and signatures are erased. 385 // The appendix argument (if any) is added to the signature, and is counted in the parameter_size bits. 386 // Even with the appendix, the method will never take more than 255 parameter slots. 387 // 388 // This means that given a call site like (List)mh.invoke("foo"), 389 // the f1 method has signature '(Ljl/Object;Ljl/invoke/MethodType;)Ljl/Object;', 390 // not '(Ljava/lang/String;)Ljava/util/List;'. 391 // The fact that String and List are involved is encoded in the MethodType in refs[f2]. 392 // This allows us to create fewer Methods, while keeping type safety. 393 // 394 395 // Store appendix, if any. 396 if (has_appendix) { 397 const int appendix_index = f2_as_index() + _indy_resolved_references_appendix_offset; 398 assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob"); 399 assert(resolved_references->obj_at(appendix_index) == NULL, "init just once"); 400 resolved_references->obj_at_put(appendix_index, appendix()); 401 } 402 403 // Store MethodType, if any. 404 if (has_method_type) { 405 const int method_type_index = f2_as_index() + _indy_resolved_references_method_type_offset; 406 assert(method_type_index >= 0 && method_type_index < resolved_references->length(), "oob"); 407 assert(resolved_references->obj_at(method_type_index) == NULL, "init just once"); 408 resolved_references->obj_at_put(method_type_index, method_type()); 409 } 410 411 release_set_f1(adapter()); // This must be the last one to set (see NOTE above)! 412 413 // The interpreter assembly code does not check byte_2, 414 // but it is used by is_resolved, method_if_resolved, etc. 415 set_bytecode_1(invoke_code); 416 NOT_PRODUCT(verify(tty)); 417 if (TraceInvokeDynamic) { 418 ttyLocker ttyl; 419 this->print(tty, 0); 420 } 421 } 422 423 bool ConstantPoolCacheEntry::save_and_throw_indy_exc( 424 const constantPoolHandle& cpool, int cpool_index, int index, constantTag tag, TRAPS) { 425 426 assert(HAS_PENDING_EXCEPTION, "No exception got thrown!"); 427 assert(PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass()), 428 "No LinkageError exception"); 429 430 // Use the resolved_references() lock for this cpCache entry. 431 // resolved_references are created for all classes with Invokedynamic, MethodHandle 432 // or MethodType constant pool cache entries. 433 objArrayHandle resolved_references(Thread::current(), cpool->resolved_references()); 434 assert(resolved_references() != NULL, 435 "a resolved_references array should have been created for this class"); 436 ObjectLocker ol(resolved_references, THREAD); 437 438 // if f1 is not null or the indy_resolution_failed flag is set then another 439 // thread either succeeded in resolving the method or got a LinkageError 440 // exception, before this thread was able to record its failure. So, clear 441 // this thread's exception and return false so caller can use the earlier 442 // thread's result. 443 if (!is_f1_null() || indy_resolution_failed()) { 444 CLEAR_PENDING_EXCEPTION; 445 return false; 446 } 447 448 Symbol* error = PENDING_EXCEPTION->klass()->name(); 449 Symbol* message = java_lang_Throwable::detail_message(PENDING_EXCEPTION); 450 assert(message != NULL, "Missing detail message"); 451 452 SystemDictionary::add_resolution_error(cpool, index, error, message); 453 set_indy_resolution_failed(); 454 return true; 455 } 456 457 Method* ConstantPoolCacheEntry::method_if_resolved(const constantPoolHandle& cpool) { 458 // Decode the action of set_method and set_interface_call 459 Bytecodes::Code invoke_code = bytecode_1(); 460 if (invoke_code != (Bytecodes::Code)0) { 461 Metadata* f1 = f1_ord(); 462 if (f1 != NULL) { 463 switch (invoke_code) { 464 case Bytecodes::_invokeinterface: 465 assert(f1->is_klass(), ""); 466 return klassItable::method_for_itable_index((Klass*)f1, f2_as_index()); 467 case Bytecodes::_invokestatic: 468 case Bytecodes::_invokespecial: 469 assert(!has_appendix(), ""); 470 case Bytecodes::_invokehandle: 471 case Bytecodes::_invokedynamic: 472 assert(f1->is_method(), ""); 473 return (Method*)f1; 474 default: 475 break; 476 } 477 } 478 } 479 invoke_code = bytecode_2(); 480 if (invoke_code != (Bytecodes::Code)0) { 481 switch (invoke_code) { 482 case Bytecodes::_invokevirtual: 483 if (is_vfinal()) { 484 // invokevirtual 485 Method* m = f2_as_vfinal_method(); 486 assert(m->is_method(), ""); 487 return m; 488 } else { 489 int holder_index = cpool->uncached_klass_ref_index_at(constant_pool_index()); 490 if (cpool->tag_at(holder_index).is_klass()) { 491 Klass* klass = cpool->resolved_klass_at(holder_index); 492 return klass->method_at_vtable(f2_as_index()); 493 } 494 } 495 break; 496 default: 497 break; 498 } 499 } 500 return NULL; 501 } 502 503 504 oop ConstantPoolCacheEntry::appendix_if_resolved(const constantPoolHandle& cpool) { 505 if (!has_appendix()) 506 return NULL; 507 const int ref_index = f2_as_index() + _indy_resolved_references_appendix_offset; 508 objArrayOop resolved_references = cpool->resolved_references(); 509 return resolved_references->obj_at(ref_index); 510 } 511 512 513 oop ConstantPoolCacheEntry::method_type_if_resolved(const constantPoolHandle& cpool) { 514 if (!has_method_type()) 515 return NULL; 516 const int ref_index = f2_as_index() + _indy_resolved_references_method_type_offset; 517 objArrayOop resolved_references = cpool->resolved_references(); 518 return resolved_references->obj_at(ref_index); 519 } 520 521 522 #if INCLUDE_JVMTI 523 524 void log_adjust(const char* entry_type, Method* old_method, Method* new_method, bool* trace_name_printed) { 525 if (log_is_enabled(Info, redefine, class, update)) { 526 ResourceMark rm; 527 if (!(*trace_name_printed)) { 528 log_info(redefine, class, update)("adjust: name=%s", old_method->method_holder()->external_name()); 529 *trace_name_printed = true; 530 } 531 log_debug(redefine, class, update, constantpool) 532 ("cpc %s entry update: %s(%s)", entry_type, new_method->name()->as_C_string(), new_method->signature()->as_C_string()); 533 } 534 } 535 536 // RedefineClasses() API support: 537 // If this ConstantPoolCacheEntry refers to old_method then update it 538 // to refer to new_method. 539 void ConstantPoolCacheEntry::adjust_method_entry(Method* old_method, 540 Method* new_method, bool * trace_name_printed) { 541 542 if (is_vfinal()) { 543 // virtual and final so _f2 contains method ptr instead of vtable index 544 if (f2_as_vfinal_method() == old_method) { 545 // match old_method so need an update 546 // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values 547 _f2 = (intptr_t)new_method; 548 log_adjust("vfinal", old_method, new_method, trace_name_printed); 549 } 550 return; 551 } 552 553 assert (_f1 != NULL, "should not call with uninteresting entry"); 554 555 if (!(_f1->is_method())) { 556 // _f1 is a Klass* for an interface, _f2 is the method 557 if (f2_as_interface_method() == old_method) { 558 _f2 = (intptr_t)new_method; 559 log_adjust("interface", old_method, new_method, trace_name_printed); 560 } 561 } else if (_f1 == old_method) { 562 _f1 = new_method; 563 log_adjust("special, static or dynamic", old_method, new_method, trace_name_printed); 564 } 565 } 566 567 // a constant pool cache entry should never contain old or obsolete methods 568 bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() { 569 Method* m = get_interesting_method_entry(NULL); 570 // return false if m refers to a non-deleted old or obsolete method 571 if (m != NULL) { 572 assert(m->is_valid() && m->is_method(), "m is a valid method"); 573 return !m->is_old() && !m->is_obsolete(); // old is always set for old and obsolete 574 } else { 575 return true; 576 } 577 } 578 579 Method* ConstantPoolCacheEntry::get_interesting_method_entry(Klass* k) { 580 if (!is_method_entry()) { 581 // not a method entry so not interesting by default 582 return NULL; 583 } 584 Method* m = NULL; 585 if (is_vfinal()) { 586 // virtual and final so _f2 contains method ptr instead of vtable index 587 m = f2_as_vfinal_method(); 588 } else if (is_f1_null()) { 589 // NULL _f1 means this is a virtual entry so also not interesting 590 return NULL; 591 } else { 592 if (!(_f1->is_method())) { 593 // _f1 is a Klass* for an interface 594 m = f2_as_interface_method(); 595 } else { 596 m = f1_as_method(); 597 } 598 } 599 assert(m != NULL && m->is_method(), "sanity check"); 600 if (m == NULL || !m->is_method() || (k != NULL && m->method_holder() != k)) { 601 // robustness for above sanity checks or method is not in 602 // the interesting class 603 return NULL; 604 } 605 // the method is in the interesting class so the entry is interesting 606 return m; 607 } 608 #endif // INCLUDE_JVMTI 609 610 void ConstantPoolCacheEntry::print(outputStream* st, int index) const { 611 // print separator 612 if (index == 0) st->print_cr(" -------------"); 613 // print entry 614 st->print("%3d (" PTR_FORMAT ") ", index, (intptr_t)this); 615 st->print_cr("[%02x|%02x|%5d]", bytecode_2(), bytecode_1(), 616 constant_pool_index()); 617 st->print_cr(" [ " PTR_FORMAT "]", (intptr_t)_f1); 618 st->print_cr(" [ " PTR_FORMAT "]", (intptr_t)_f2); 619 st->print_cr(" [ " PTR_FORMAT "]", (intptr_t)_flags); 620 st->print_cr(" -------------"); 621 } 622 623 void ConstantPoolCacheEntry::verify(outputStream* st) const { 624 // not implemented yet 625 } 626 627 // Implementation of ConstantPoolCache 628 629 ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data, 630 const intStack& index_map, 631 const intStack& invokedynamic_index_map, 632 const intStack& invokedynamic_map, TRAPS) { 633 634 const int length = index_map.length() + invokedynamic_index_map.length(); 635 int size = ConstantPoolCache::size(length); 636 637 return new (loader_data, size, MetaspaceObj::ConstantPoolCacheType, THREAD) 638 ConstantPoolCache(length, index_map, invokedynamic_index_map, invokedynamic_map); 639 } 640 641 void ConstantPoolCache::initialize(const intArray& inverse_index_map, 642 const intArray& invokedynamic_inverse_index_map, 643 const intArray& invokedynamic_references_map) { 644 for (int i = 0; i < inverse_index_map.length(); i++) { 645 ConstantPoolCacheEntry* e = entry_at(i); 646 int original_index = inverse_index_map.at(i); 647 e->initialize_entry(original_index); 648 assert(entry_at(i) == e, "sanity"); 649 } 650 651 // Append invokedynamic entries at the end 652 int invokedynamic_offset = inverse_index_map.length(); 653 for (int i = 0; i < invokedynamic_inverse_index_map.length(); i++) { 654 int offset = i + invokedynamic_offset; 655 ConstantPoolCacheEntry* e = entry_at(offset); 656 int original_index = invokedynamic_inverse_index_map.at(i); 657 e->initialize_entry(original_index); 658 assert(entry_at(offset) == e, "sanity"); 659 } 660 661 for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) { 662 const int cpci = invokedynamic_references_map.at(ref); 663 if (cpci >= 0) { 664 #ifdef ASSERT 665 // invokedynamic and invokehandle have more entries; check if they 666 // all point to the same constant pool cache entry. 667 for (int entry = 1; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) { 668 const int cpci_next = invokedynamic_references_map.at(ref + entry); 669 assert(cpci == cpci_next, "%d == %d", cpci, cpci_next); 670 } 671 #endif 672 entry_at(cpci)->initialize_resolved_reference_index(ref); 673 ref += ConstantPoolCacheEntry::_indy_resolved_references_entries - 1; // skip extra entries 674 } 675 } 676 } 677 678 void ConstantPoolCache::verify_just_initialized() { 679 DEBUG_ONLY(walk_entries_for_initialization(/*check_only = */ true)); 680 } 681 682 void ConstantPoolCache::remove_unshareable_info() { 683 walk_entries_for_initialization(/*check_only = */ false); 684 } 685 686 void ConstantPoolCache::walk_entries_for_initialization(bool check_only) { 687 assert(DumpSharedSpaces, "sanity"); 688 // When dumping the archive, we want to clean up the ConstantPoolCache 689 // to remove any effect of linking due to the execution of Java code -- 690 // each ConstantPoolCacheEntry will have the same contents as if 691 // ConstantPoolCache::initialize has just returned: 692 // 693 // - We keep the ConstantPoolCache::constant_pool_index() bits for all entries. 694 // - We keep the "f2" field for entries used by invokedynamic and invokehandle 695 // - All other bits in the entries are cleared to zero. 696 ResourceMark rm; 697 698 InstanceKlass* ik = constant_pool()->pool_holder(); 699 bool* f2_used = NEW_RESOURCE_ARRAY(bool, length()); 700 memset(f2_used, 0, sizeof(bool) * length()); 701 702 // Find all the slots that we need to preserve f2 703 for (int i = 0; i < ik->methods()->length(); i++) { 704 Method* m = ik->methods()->at(i); 705 RawBytecodeStream bcs(m); 706 while (!bcs.is_last_bytecode()) { 707 Bytecodes::Code opcode = bcs.raw_next(); 708 switch (opcode) { 709 case Bytecodes::_invokedynamic: { 710 int index = Bytes::get_native_u4(bcs.bcp() + 1); 711 int cp_cache_index = constant_pool()->invokedynamic_cp_cache_index(index); 712 f2_used[cp_cache_index] = 1; 713 } 714 break; 715 case Bytecodes::_invokehandle: { 716 int cp_cache_index = Bytes::get_native_u2(bcs.bcp() + 1); 717 f2_used[cp_cache_index] = 1; 718 } 719 break; 720 default: 721 break; 722 } 723 } 724 } 725 726 if (check_only) { 727 DEBUG_ONLY( 728 for (int i=0; i<length(); i++) { 729 entry_at(i)->verify_just_initialized(f2_used[i]); 730 }) 731 } else { 732 for (int i=0; i<length(); i++) { 733 entry_at(i)->reinitialize(f2_used[i]); 734 } 735 } 736 } 737 738 void ConstantPoolCache::deallocate_contents(ClassLoaderData* data) { 739 assert(!is_shared(), "shared caches are not deallocated"); 740 data->remove_handle(_resolved_references); 741 set_resolved_references(NULL); 742 MetadataFactory::free_array<u2>(data, _reference_map); 743 set_reference_map(NULL); 744 } 745 746 #if INCLUDE_CDS_JAVA_HEAP 747 oop ConstantPoolCache::archived_references() { 748 // Loading an archive root forces the oop to become strongly reachable. 749 // For example, if it is loaded during concurrent marking in a SATB 750 // collector, it will be enqueued to the SATB queue, effectively 751 // shading the previously white object gray. 752 return RootAccess<IN_ARCHIVE_ROOT>::oop_load(&_archived_references); 753 } 754 755 void ConstantPoolCache::set_archived_references(oop o) { 756 assert(DumpSharedSpaces, "called only during runtime"); 757 RootAccess<IN_ARCHIVE_ROOT>::oop_store(&_archived_references, o); 758 } 759 #endif 760 761 #if INCLUDE_JVMTI 762 // RedefineClasses() API support: 763 // If any entry of this ConstantPoolCache points to any of 764 // old_methods, replace it with the corresponding new_method. 765 void ConstantPoolCache::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) { 766 for (int i = 0; i < length(); i++) { 767 ConstantPoolCacheEntry* entry = entry_at(i); 768 Method* old_method = entry->get_interesting_method_entry(holder); 769 if (old_method == NULL || !old_method->is_old()) { 770 continue; // skip uninteresting entries 771 } 772 if (old_method->is_deleted()) { 773 // clean up entries with deleted methods 774 entry->initialize_entry(entry->constant_pool_index()); 775 continue; 776 } 777 Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum()); 778 779 assert(new_method != NULL, "method_with_idnum() should not be NULL"); 780 assert(old_method != new_method, "sanity check"); 781 782 entry_at(i)->adjust_method_entry(old_method, new_method, trace_name_printed); 783 } 784 } 785 786 // the constant pool cache should never contain old or obsolete methods 787 bool ConstantPoolCache::check_no_old_or_obsolete_entries() { 788 for (int i = 1; i < length(); i++) { 789 if (entry_at(i)->get_interesting_method_entry(NULL) != NULL && 790 !entry_at(i)->check_no_old_or_obsolete_entries()) { 791 return false; 792 } 793 } 794 return true; 795 } 796 797 void ConstantPoolCache::dump_cache() { 798 for (int i = 1; i < length(); i++) { 799 if (entry_at(i)->get_interesting_method_entry(NULL) != NULL) { 800 entry_at(i)->print(tty, i); 801 } 802 } 803 } 804 #endif // INCLUDE_JVMTI 805 806 void ConstantPoolCache::metaspace_pointers_do(MetaspaceClosure* it) { 807 log_trace(cds)("Iter(ConstantPoolCache): %p", this); 808 it->push(&_constant_pool); 809 it->push(&_reference_map); 810 } 811 812 // Printing 813 814 void ConstantPoolCache::print_on(outputStream* st) const { 815 assert(is_constantPoolCache(), "obj must be constant pool cache"); 816 st->print_cr("%s", internal_name()); 817 // print constant pool cache entries 818 for (int i = 0; i < length(); i++) entry_at(i)->print(st, i); 819 } 820 821 void ConstantPoolCache::print_value_on(outputStream* st) const { 822 assert(is_constantPoolCache(), "obj must be constant pool cache"); 823 st->print("cache [%d]", length()); 824 print_address_on(st); 825 st->print(" for "); 826 constant_pool()->print_value_on(st); 827 } 828 829 830 // Verification 831 832 void ConstantPoolCache::verify_on(outputStream* st) { 833 guarantee(is_constantPoolCache(), "obj must be constant pool cache"); 834 // print constant pool cache entries 835 for (int i = 0; i < length(); i++) entry_at(i)->verify(st); 836 }