1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_METHODOOP_HPP 26 #define SHARE_VM_OOPS_METHODOOP_HPP 27 28 #include "classfile/vmSymbols.hpp" 29 #include "code/compressedStream.hpp" 30 #include "compiler/compilerDefinitions.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "interpreter/invocationCounter.hpp" 33 #include "oops/annotations.hpp" 34 #include "oops/constantPool.hpp" 35 #include "oops/methodCounters.hpp" 36 #include "oops/instanceKlass.hpp" 37 #include "oops/oop.hpp" 38 #include "oops/typeArrayOop.hpp" 39 #include "utilities/accessFlags.hpp" 40 #include "utilities/growableArray.hpp" 41 42 // A Method represents a Java method. 43 // 44 // Note that most applications load thousands of methods, so keeping the size of this 45 // class small has a big impact on footprint. 46 // 47 // Note that native_function and signature_handler have to be at fixed offsets 48 // (required by the interpreter) 49 // 50 // Method embedded field layout (after declared fields): 51 // [EMBEDDED native_function (present only if native) ] 52 // [EMBEDDED signature_handler (present only if native) ] 53 54 class CheckedExceptionElement; 55 class LocalVariableTableElement; 56 class AdapterHandlerEntry; 57 class MethodData; 58 class MethodCounters; 59 class ConstMethod; 60 class InlineTableSizes; 61 class KlassSizeStats; 62 class CompiledMethod; 63 64 class Method : public Metadata { 65 friend class VMStructs; 66 friend class JVMCIVMStructs; 67 private: 68 ConstMethod* _constMethod; // Method read-only data. 69 MethodData* _method_data; 70 MethodCounters* _method_counters; 71 AccessFlags _access_flags; // Access flags 72 int _vtable_index; // vtable index of this method (see VtableIndexFlag) 73 // note: can have vtables with >2**16 elements (because of inheritance) 74 u2 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) 75 76 // Flags 77 enum Flags { 78 _jfr_towrite = 1 << 0, 79 _caller_sensitive = 1 << 1, 80 _force_inline = 1 << 2, 81 _dont_inline = 1 << 3, 82 _hidden = 1 << 4, 83 _has_injected_profile = 1 << 5, 84 _running_emcp = 1 << 6, 85 _intrinsic_candidate = 1 << 7, 86 _reserved_stack_access = 1 << 8 87 }; 88 mutable u2 _flags; 89 90 #ifndef PRODUCT 91 int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) 92 #endif 93 // Entry point for calling both from and to the interpreter. 94 address _i2i_entry; // All-args-on-stack calling convention 95 // Entry point for calling from compiled code, to compiled code if it exists 96 // or else the interpreter. 97 volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry() 98 // The entry point for calling both from and to compiled code is 99 // "_code->entry_point()". Because of tiered compilation and de-opt, this 100 // field can come and go. It can transition from NULL to not-null at any 101 // time (whenever a compile completes). It can transition from not-null to 102 // NULL only at safepoints (because of a de-opt). 103 CompiledMethod* volatile _code; // Points to the corresponding piece of native code 104 volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry 105 106 #if INCLUDE_AOT && defined(TIERED) 107 CompiledMethod* _aot_code; 108 #endif 109 110 // Constructor 111 Method(ConstMethod* xconst, AccessFlags access_flags); 112 public: 113 114 static Method* allocate(ClassLoaderData* loader_data, 115 int byte_code_size, 116 AccessFlags access_flags, 117 InlineTableSizes* sizes, 118 ConstMethod::MethodType method_type, 119 TRAPS); 120 121 // CDS and vtbl checking can create an empty Method to get vtbl pointer. 122 Method(){} 123 124 bool is_method() const volatile { return true; } 125 126 void restore_unshareable_info(TRAPS); 127 128 // accessors for instance variables 129 130 ConstMethod* constMethod() const { return _constMethod; } 131 void set_constMethod(ConstMethod* xconst) { _constMethod = xconst; } 132 133 134 static address make_adapters(methodHandle mh, TRAPS); 135 volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); } 136 volatile address from_compiled_entry_no_trampoline() const; 137 volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); } 138 139 // access flag 140 AccessFlags access_flags() const { return _access_flags; } 141 void set_access_flags(AccessFlags flags) { _access_flags = flags; } 142 143 // name 144 Symbol* name() const { return constants()->symbol_at(name_index()); } 145 int name_index() const { return constMethod()->name_index(); } 146 void set_name_index(int index) { constMethod()->set_name_index(index); } 147 148 // signature 149 Symbol* signature() const { return constants()->symbol_at(signature_index()); } 150 int signature_index() const { return constMethod()->signature_index(); } 151 void set_signature_index(int index) { constMethod()->set_signature_index(index); } 152 153 // generics support 154 Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); } 155 int generic_signature_index() const { return constMethod()->generic_signature_index(); } 156 void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); } 157 158 // annotations support 159 AnnotationArray* annotations() const { 160 return constMethod()->method_annotations(); 161 } 162 AnnotationArray* parameter_annotations() const { 163 return constMethod()->parameter_annotations(); 164 } 165 AnnotationArray* annotation_default() const { 166 return constMethod()->default_annotations(); 167 } 168 AnnotationArray* type_annotations() const { 169 return constMethod()->type_annotations(); 170 } 171 172 // Helper routine: get klass name + "." + method name + signature as 173 // C string, for the purpose of providing more useful NoSuchMethodErrors 174 // and fatal error handling. The string is allocated in resource 175 // area if a buffer is not provided by the caller. 176 char* name_and_sig_as_C_string() const; 177 char* name_and_sig_as_C_string(char* buf, int size) const; 178 179 // Static routine in the situations we don't have a Method* 180 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature); 181 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size); 182 183 Bytecodes::Code java_code_at(int bci) const { 184 return Bytecodes::java_code_at(this, bcp_from(bci)); 185 } 186 Bytecodes::Code code_at(int bci) const { 187 return Bytecodes::code_at(this, bcp_from(bci)); 188 } 189 190 // JVMTI breakpoints 191 #if !INCLUDE_JVMTI 192 Bytecodes::Code orig_bytecode_at(int bci) const { 193 ShouldNotReachHere(); 194 return Bytecodes::_shouldnotreachhere; 195 } 196 void set_orig_bytecode_at(int bci, Bytecodes::Code code) { 197 ShouldNotReachHere(); 198 }; 199 u2 number_of_breakpoints() const {return 0;} 200 #else // !INCLUDE_JVMTI 201 Bytecodes::Code orig_bytecode_at(int bci) const; 202 void set_orig_bytecode_at(int bci, Bytecodes::Code code); 203 void set_breakpoint(int bci); 204 void clear_breakpoint(int bci); 205 void clear_all_breakpoints(); 206 // Tracking number of breakpoints, for fullspeed debugging. 207 // Only mutated by VM thread. 208 u2 number_of_breakpoints() const { 209 MethodCounters* mcs = method_counters(); 210 if (mcs == NULL) { 211 return 0; 212 } else { 213 return mcs->number_of_breakpoints(); 214 } 215 } 216 void incr_number_of_breakpoints(TRAPS) { 217 MethodCounters* mcs = get_method_counters(CHECK); 218 if (mcs != NULL) { 219 mcs->incr_number_of_breakpoints(); 220 } 221 } 222 void decr_number_of_breakpoints(TRAPS) { 223 MethodCounters* mcs = get_method_counters(CHECK); 224 if (mcs != NULL) { 225 mcs->decr_number_of_breakpoints(); 226 } 227 } 228 // Initialization only 229 void clear_number_of_breakpoints() { 230 MethodCounters* mcs = method_counters(); 231 if (mcs != NULL) { 232 mcs->clear_number_of_breakpoints(); 233 } 234 } 235 #endif // !INCLUDE_JVMTI 236 237 // index into InstanceKlass methods() array 238 // note: also used by jfr 239 u2 method_idnum() const { return constMethod()->method_idnum(); } 240 void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); } 241 242 u2 orig_method_idnum() const { return constMethod()->orig_method_idnum(); } 243 void set_orig_method_idnum(u2 idnum) { constMethod()->set_orig_method_idnum(idnum); } 244 245 // code size 246 int code_size() const { return constMethod()->code_size(); } 247 248 // method size in words 249 int method_size() const { return sizeof(Method)/wordSize + ( is_native() ? 2 : 0 ); } 250 251 // constant pool for Klass* holding this method 252 ConstantPool* constants() const { return constMethod()->constants(); } 253 void set_constants(ConstantPool* c) { constMethod()->set_constants(c); } 254 255 // max stack 256 // return original max stack size for method verification 257 int verifier_max_stack() const { return constMethod()->max_stack(); } 258 int max_stack() const { return constMethod()->max_stack() + extra_stack_entries(); } 259 void set_max_stack(int size) { constMethod()->set_max_stack(size); } 260 261 // max locals 262 int max_locals() const { return constMethod()->max_locals(); } 263 void set_max_locals(int size) { constMethod()->set_max_locals(size); } 264 265 int highest_comp_level() const; 266 void set_highest_comp_level(int level); 267 int highest_osr_comp_level() const; 268 void set_highest_osr_comp_level(int level); 269 270 #if defined(COMPILER2) || INCLUDE_JVMCI 271 // Count of times method was exited via exception while interpreting 272 void interpreter_throwout_increment(TRAPS) { 273 MethodCounters* mcs = get_method_counters(CHECK); 274 if (mcs != NULL) { 275 mcs->interpreter_throwout_increment(); 276 } 277 } 278 #endif 279 280 int interpreter_throwout_count() const { 281 MethodCounters* mcs = method_counters(); 282 if (mcs == NULL) { 283 return 0; 284 } else { 285 return mcs->interpreter_throwout_count(); 286 } 287 } 288 289 // size of parameters 290 int size_of_parameters() const { return constMethod()->size_of_parameters(); } 291 void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); } 292 293 bool has_stackmap_table() const { 294 return constMethod()->has_stackmap_table(); 295 } 296 297 Array<u1>* stackmap_data() const { 298 return constMethod()->stackmap_data(); 299 } 300 301 void set_stackmap_data(Array<u1>* sd) { 302 constMethod()->set_stackmap_data(sd); 303 } 304 305 // exception handler table 306 bool has_exception_handler() const 307 { return constMethod()->has_exception_handler(); } 308 int exception_table_length() const 309 { return constMethod()->exception_table_length(); } 310 ExceptionTableElement* exception_table_start() const 311 { return constMethod()->exception_table_start(); } 312 313 // Finds the first entry point bci of an exception handler for an 314 // exception of klass ex_klass thrown at throw_bci. A value of NULL 315 // for ex_klass indicates that the exception klass is not known; in 316 // this case it matches any constraint class. Returns -1 if the 317 // exception cannot be handled in this method. The handler 318 // constraint classes are loaded if necessary. Note that this may 319 // throw an exception if loading of the constraint classes causes 320 // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError. 321 // If an exception is thrown, returns the bci of the 322 // exception handler which caused the exception to be thrown, which 323 // is needed for proper retries. See, for example, 324 // InterpreterRuntime::exception_handler_for_exception. 325 static int fast_exception_handler_bci_for(methodHandle mh, KlassHandle ex_klass, int throw_bci, TRAPS); 326 327 // method data access 328 MethodData* method_data() const { 329 return _method_data; 330 } 331 332 void set_method_data(MethodData* data) { 333 // The store into method must be released. On platforms without 334 // total store order (TSO) the reference may become visible before 335 // the initialization of data otherwise. 336 OrderAccess::release_store_ptr((volatile void *)&_method_data, data); 337 } 338 339 MethodCounters* method_counters() const { 340 return _method_counters; 341 } 342 343 void clear_method_counters() { 344 _method_counters = NULL; 345 } 346 347 bool init_method_counters(MethodCounters* counters) { 348 // Try to install a pointer to MethodCounters, return true on success. 349 return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL; 350 } 351 352 #ifdef TIERED 353 // We are reusing interpreter_invocation_count as a holder for the previous event count! 354 // We can do that since interpreter_invocation_count is not used in tiered. 355 int prev_event_count() const { 356 if (method_counters() == NULL) { 357 return 0; 358 } else { 359 return method_counters()->interpreter_invocation_count(); 360 } 361 } 362 void set_prev_event_count(int count) { 363 MethodCounters* mcs = method_counters(); 364 if (mcs != NULL) { 365 mcs->set_interpreter_invocation_count(count); 366 } 367 } 368 jlong prev_time() const { 369 MethodCounters* mcs = method_counters(); 370 return mcs == NULL ? 0 : mcs->prev_time(); 371 } 372 void set_prev_time(jlong time) { 373 MethodCounters* mcs = method_counters(); 374 if (mcs != NULL) { 375 mcs->set_prev_time(time); 376 } 377 } 378 float rate() const { 379 MethodCounters* mcs = method_counters(); 380 return mcs == NULL ? 0 : mcs->rate(); 381 } 382 void set_rate(float rate) { 383 MethodCounters* mcs = method_counters(); 384 if (mcs != NULL) { 385 mcs->set_rate(rate); 386 } 387 } 388 389 #if INCLUDE_AOT 390 void set_aot_code(CompiledMethod* aot_code) { 391 _aot_code = aot_code; 392 } 393 394 CompiledMethod* aot_code() const { 395 return _aot_code; 396 } 397 #else 398 CompiledMethod* aot_code() const { return NULL; } 399 #endif // INCLUDE_AOT 400 #endif // TIERED 401 402 int nmethod_age() const { 403 if (method_counters() == NULL) { 404 return INT_MAX; 405 } else { 406 return method_counters()->nmethod_age(); 407 } 408 } 409 410 int invocation_count(); 411 int backedge_count(); 412 413 bool was_executed_more_than(int n); 414 bool was_never_executed() { return !was_executed_more_than(0); } 415 416 static void build_interpreter_method_data(const methodHandle& method, TRAPS); 417 418 static MethodCounters* build_method_counters(Method* m, TRAPS); 419 420 int interpreter_invocation_count() { 421 if (TieredCompilation) { 422 return invocation_count(); 423 } else { 424 MethodCounters* mcs = method_counters(); 425 return (mcs == NULL) ? 0 : mcs->interpreter_invocation_count(); 426 } 427 } 428 #if defined(COMPILER2) || INCLUDE_JVMCI 429 int increment_interpreter_invocation_count(TRAPS) { 430 if (TieredCompilation) ShouldNotReachHere(); 431 MethodCounters* mcs = get_method_counters(CHECK_0); 432 return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count(); 433 } 434 #endif 435 436 #ifndef PRODUCT 437 int compiled_invocation_count() const { return _compiled_invocation_count; } 438 void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; } 439 #else 440 // for PrintMethodData in a product build 441 int compiled_invocation_count() const { return 0; } 442 #endif // not PRODUCT 443 444 // Clear (non-shared space) pointers which could not be relevant 445 // if this (shared) method were mapped into another JVM. 446 void remove_unshareable_info(); 447 448 // nmethod/verified compiler entry 449 address verified_code_entry(); 450 bool check_code() const; // Not inline to avoid circular ref 451 CompiledMethod* volatile code() const { assert( check_code(), "" ); return (CompiledMethod *)OrderAccess::load_ptr_acquire(&_code); } 452 void clear_code(bool acquire_lock = true); // Clear out any compiled code 453 static void set_code(methodHandle mh, CompiledMethod* code); 454 void set_adapter_entry(AdapterHandlerEntry* adapter) { 455 constMethod()->set_adapter_entry(adapter); 456 } 457 void update_adapter_trampoline(AdapterHandlerEntry* adapter) { 458 constMethod()->update_adapter_trampoline(adapter); 459 } 460 461 address get_i2c_entry(); 462 address get_c2i_entry(); 463 address get_c2i_unverified_entry(); 464 AdapterHandlerEntry* adapter() const { 465 return constMethod()->adapter(); 466 } 467 // setup entry points 468 void link_method(const methodHandle& method, TRAPS); 469 // clear entry points. Used by sharing code during dump time 470 void unlink_method() NOT_CDS_RETURN; 471 472 // vtable index 473 enum VtableIndexFlag { 474 // Valid vtable indexes are non-negative (>= 0). 475 // These few negative values are used as sentinels. 476 itable_index_max = -10, // first itable index, growing downward 477 pending_itable_index = -9, // itable index will be assigned 478 invalid_vtable_index = -4, // distinct from any valid vtable index 479 garbage_vtable_index = -3, // not yet linked; no vtable layout yet 480 nonvirtual_vtable_index = -2 // there is no need for vtable dispatch 481 // 6330203 Note: Do not use -1, which was overloaded with many meanings. 482 }; 483 DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; }) 484 bool has_vtable_index() const { return _vtable_index >= 0; } 485 int vtable_index() const { return _vtable_index; } 486 void set_vtable_index(int index); 487 DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; }) 488 bool has_itable_index() const { return _vtable_index <= itable_index_max; } 489 int itable_index() const { assert(valid_itable_index(), ""); 490 return itable_index_max - _vtable_index; } 491 void set_itable_index(int index); 492 493 // interpreter entry 494 address interpreter_entry() const { return _i2i_entry; } 495 // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry 496 void set_interpreter_entry(address entry) { 497 assert(!is_shared(), "shared method's interpreter entry should not be changed at run time"); 498 if (_i2i_entry != entry) { 499 _i2i_entry = entry; 500 } 501 if (_from_interpreted_entry != entry) { 502 _from_interpreted_entry = entry; 503 } 504 } 505 506 // native function (used for native methods only) 507 enum { 508 native_bind_event_is_interesting = true 509 }; 510 address native_function() const { return *(native_function_addr()); } 511 address critical_native_function(); 512 513 // Must specify a real function (not NULL). 514 // Use clear_native_function() to unregister. 515 void set_native_function(address function, bool post_event_flag); 516 bool has_native_function() const; 517 void clear_native_function(); 518 519 // signature handler (used for native methods only) 520 address signature_handler() const { return *(signature_handler_addr()); } 521 void set_signature_handler(address handler); 522 523 // Interpreter oopmap support 524 void mask_for(int bci, InterpreterOopMap* mask); 525 526 // operations on invocation counter 527 void print_invocation_count(); 528 529 // byte codes 530 void set_code(address code) { return constMethod()->set_code(code); } 531 address code_base() const { return constMethod()->code_base(); } 532 bool contains(address bcp) const { return constMethod()->contains(bcp); } 533 534 // prints byte codes 535 void print_codes() const { print_codes_on(tty); } 536 void print_codes_on(outputStream* st) const; 537 void print_codes_on(int from, int to, outputStream* st) const; 538 539 // method parameters 540 bool has_method_parameters() const 541 { return constMethod()->has_method_parameters(); } 542 int method_parameters_length() const 543 { return constMethod()->method_parameters_length(); } 544 MethodParametersElement* method_parameters_start() const 545 { return constMethod()->method_parameters_start(); } 546 547 // checked exceptions 548 int checked_exceptions_length() const 549 { return constMethod()->checked_exceptions_length(); } 550 CheckedExceptionElement* checked_exceptions_start() const 551 { return constMethod()->checked_exceptions_start(); } 552 553 // localvariable table 554 bool has_localvariable_table() const 555 { return constMethod()->has_localvariable_table(); } 556 int localvariable_table_length() const 557 { return constMethod()->localvariable_table_length(); } 558 LocalVariableTableElement* localvariable_table_start() const 559 { return constMethod()->localvariable_table_start(); } 560 561 bool has_linenumber_table() const 562 { return constMethod()->has_linenumber_table(); } 563 u_char* compressed_linenumber_table() const 564 { return constMethod()->compressed_linenumber_table(); } 565 566 // method holder (the Klass* holding this method) 567 InstanceKlass* method_holder() const { return constants()->pool_holder(); } 568 569 void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments) 570 Symbol* klass_name() const; // returns the name of the method holder 571 BasicType result_type() const; // type of the method result 572 bool is_returning_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); } 573 bool is_returning_fp() const { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); } 574 575 // Checked exceptions thrown by this method (resolved to mirrors) 576 objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); } 577 578 // Access flags 579 bool is_public() const { return access_flags().is_public(); } 580 bool is_private() const { return access_flags().is_private(); } 581 bool is_protected() const { return access_flags().is_protected(); } 582 bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); } 583 bool is_static() const { return access_flags().is_static(); } 584 bool is_final() const { return access_flags().is_final(); } 585 bool is_synchronized() const { return access_flags().is_synchronized();} 586 bool is_native() const { return access_flags().is_native(); } 587 bool is_abstract() const { return access_flags().is_abstract(); } 588 bool is_strict() const { return access_flags().is_strict(); } 589 bool is_synthetic() const { return access_flags().is_synthetic(); } 590 591 // returns true if contains only return operation 592 bool is_empty_method() const; 593 594 // returns true if this is a vanilla constructor 595 bool is_vanilla_constructor() const; 596 597 // checks method and its method holder 598 bool is_final_method() const; 599 bool is_final_method(AccessFlags class_access_flags) const; 600 // interface method declared with 'default' - excludes private interface methods 601 bool is_default_method() const; 602 603 // true if method needs no dynamic dispatch (final and/or no vtable entry) 604 bool can_be_statically_bound() const; 605 bool can_be_statically_bound(AccessFlags class_access_flags) const; 606 607 // returns true if the method has any backward branches. 608 bool has_loops() { 609 return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag(); 610 }; 611 612 bool compute_has_loops_flag(); 613 614 bool has_jsrs() { 615 return access_flags().has_jsrs(); 616 }; 617 void set_has_jsrs() { 618 _access_flags.set_has_jsrs(); 619 } 620 621 // returns true if the method has any monitors. 622 bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); } 623 bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); } 624 625 void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); } 626 627 // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes 628 // propererly nest in the method. It might return false, even though they actually nest properly, since the info. 629 // has not been computed yet. 630 bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); } 631 void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); } 632 633 // returns true if the method is an accessor function (setter/getter). 634 bool is_accessor() const; 635 636 // returns true if the method is a getter 637 bool is_getter() const; 638 639 // returns true if the method is a setter 640 bool is_setter() const; 641 642 // returns true if the method does nothing but return a constant of primitive type 643 bool is_constant_getter() const; 644 645 // returns true if the method is an initializer (<init> or <clinit>). 646 bool is_initializer() const; 647 648 // returns true if the method is static OR if the classfile version < 51 649 bool has_valid_initializer_flags() const; 650 651 // returns true if the method name is <clinit> and the method has 652 // valid static initializer flags. 653 bool is_static_initializer() const; 654 655 // returns true if the method name is <init> 656 bool is_object_initializer() const; 657 658 // compiled code support 659 // NOTE: code() is inherently racy as deopt can be clearing code 660 // simultaneously. Use with caution. 661 bool has_compiled_code() const { return code() != NULL; } 662 663 #ifdef TIERED 664 bool has_aot_code() const { return aot_code() != NULL; } 665 #endif 666 667 // sizing 668 static int header_size() { return sizeof(Method)/wordSize; } 669 static int size(bool is_native); 670 int size() const { return method_size(); } 671 #if INCLUDE_SERVICES 672 void collect_statistics(KlassSizeStats *sz) const; 673 #endif 674 void log_touched(TRAPS); 675 static void print_touched_methods(outputStream* out); 676 677 // interpreter support 678 static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); } 679 static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); } 680 static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); } 681 static ByteSize code_offset() { return byte_offset_of(Method, _code); } 682 static ByteSize method_data_offset() { 683 return byte_offset_of(Method, _method_data); 684 } 685 static ByteSize method_counters_offset() { 686 return byte_offset_of(Method, _method_counters); 687 } 688 #ifndef PRODUCT 689 static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); } 690 #endif // not PRODUCT 691 static ByteSize native_function_offset() { return in_ByteSize(sizeof(Method)); } 692 static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); } 693 static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); } 694 static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); } 695 696 // for code generation 697 static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); } 698 static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); } 699 static int intrinsic_id_size_in_bytes() { return sizeof(u2); } 700 701 // Static methods that are used to implement member methods where an exposed this pointer 702 // is needed due to possible GCs 703 static objArrayHandle resolved_checked_exceptions_impl(Method* method, TRAPS); 704 705 // Returns the byte code index from the byte code pointer 706 int bci_from(address bcp) const; 707 address bcp_from(int bci) const; 708 address bcp_from(address bcp) const; 709 int validate_bci_from_bcp(address bcp) const; 710 int validate_bci(int bci) const; 711 712 // Returns the line number for a bci if debugging information for the method is prowided, 713 // -1 is returned otherwise. 714 int line_number_from_bci(int bci) const; 715 716 // Reflection support 717 bool is_overridden_in(Klass* k) const; 718 719 // Stack walking support 720 bool is_ignored_by_security_stack_walk() const; 721 722 // JSR 292 support 723 bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id) 724 bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm 725 bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc. 726 static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual 727 Symbol* signature, //anything at all 728 TRAPS); 729 static Klass* check_non_bcp_klass(Klass* klass); 730 731 enum { 732 // How many extra stack entries for invokedynamic 733 extra_stack_entries_for_jsr292 = 1 734 }; 735 736 // this operates only on invoke methods: 737 // presize interpreter frames for extra interpreter stack entries, if needed 738 // Account for the extra appendix argument for invokehandle/invokedynamic 739 static int extra_stack_entries() { return extra_stack_entries_for_jsr292; } 740 static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize 741 742 // RedefineClasses() support: 743 bool is_old() const { return access_flags().is_old(); } 744 void set_is_old() { _access_flags.set_is_old(); } 745 bool is_obsolete() const { return access_flags().is_obsolete(); } 746 void set_is_obsolete() { _access_flags.set_is_obsolete(); } 747 bool is_deleted() const { return access_flags().is_deleted(); } 748 void set_is_deleted() { _access_flags.set_is_deleted(); } 749 750 bool is_running_emcp() const { 751 // EMCP methods are old but not obsolete or deleted. Equivalent 752 // Modulo Constant Pool means the method is equivalent except 753 // the constant pool and instructions that access the constant 754 // pool might be different. 755 // If a breakpoint is set in a redefined method, its EMCP methods that are 756 // still running must have a breakpoint also. 757 return (_flags & _running_emcp) != 0; 758 } 759 760 void set_running_emcp(bool x) { 761 _flags = x ? (_flags | _running_emcp) : (_flags & ~_running_emcp); 762 } 763 764 bool on_stack() const { return access_flags().on_stack(); } 765 void set_on_stack(const bool value); 766 767 // see the definition in Method*.cpp for the gory details 768 bool should_not_be_cached() const; 769 770 // JVMTI Native method prefixing support: 771 bool is_prefixed_native() const { return access_flags().is_prefixed_native(); } 772 void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); } 773 774 // Rewriting support 775 static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, 776 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS); 777 778 // jmethodID handling 779 // Because the useful life-span of a jmethodID cannot be determined, 780 // once created they are never reclaimed. The methods to which they refer, 781 // however, can be GC'ed away if the class is unloaded or if the method is 782 // made obsolete or deleted -- in these cases, the jmethodID 783 // refers to NULL (as is the case for any weak reference). 784 static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh); 785 static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid); 786 787 // Ensure there is enough capacity in the internal tracking data 788 // structures to hold the number of jmethodIDs you plan to generate. 789 // This saves substantial time doing allocations. 790 static void ensure_jmethod_ids(ClassLoaderData* loader_data, int capacity); 791 792 // Use resolve_jmethod_id() in situations where the caller is expected 793 // to provide a valid jmethodID; the only sanity checks are in asserts; 794 // result guaranteed not to be NULL. 795 inline static Method* resolve_jmethod_id(jmethodID mid) { 796 assert(mid != NULL, "JNI method id should not be null"); 797 return *((Method**)mid); 798 } 799 800 // Use checked_resolve_jmethod_id() in situations where the caller 801 // should provide a valid jmethodID, but might not. NULL is returned 802 // when the jmethodID does not refer to a valid method. 803 static Method* checked_resolve_jmethod_id(jmethodID mid); 804 805 static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method); 806 static bool is_method_id(jmethodID mid); 807 808 // Clear methods 809 static void clear_jmethod_ids(ClassLoaderData* loader_data); 810 static void print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN; 811 812 // Get this method's jmethodID -- allocate if it doesn't exist 813 jmethodID jmethod_id() { methodHandle this_h(this); 814 return InstanceKlass::get_jmethod_id(method_holder(), this_h); } 815 816 // Lookup the jmethodID for this method. Return NULL if not found. 817 // NOTE that this function can be called from a signal handler 818 // (see AsyncGetCallTrace support for Forte Analyzer) and this 819 // needs to be async-safe. No allocation should be done and 820 // so handles are not used to avoid deadlock. 821 jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); } 822 823 // Support for inlining of intrinsic methods 824 vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; } 825 void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u2) id; } 826 827 // Helper routines for intrinsic_id() and vmIntrinsics::method(). 828 void init_intrinsic_id(); // updates from _none if a match 829 static vmSymbols::SID klass_id_for_intrinsics(const Klass* holder); 830 831 bool jfr_towrite() const { 832 return (_flags & _jfr_towrite) != 0; 833 } 834 void set_jfr_towrite(bool x) const { 835 _flags = x ? (_flags | _jfr_towrite) : (_flags & ~_jfr_towrite); 836 } 837 838 bool caller_sensitive() { 839 return (_flags & _caller_sensitive) != 0; 840 } 841 void set_caller_sensitive(bool x) { 842 _flags = x ? (_flags | _caller_sensitive) : (_flags & ~_caller_sensitive); 843 } 844 845 bool force_inline() { 846 return (_flags & _force_inline) != 0; 847 } 848 void set_force_inline(bool x) { 849 _flags = x ? (_flags | _force_inline) : (_flags & ~_force_inline); 850 } 851 852 bool dont_inline() { 853 return (_flags & _dont_inline) != 0; 854 } 855 void set_dont_inline(bool x) { 856 _flags = x ? (_flags | _dont_inline) : (_flags & ~_dont_inline); 857 } 858 859 bool is_hidden() { 860 return (_flags & _hidden) != 0; 861 } 862 void set_hidden(bool x) { 863 _flags = x ? (_flags | _hidden) : (_flags & ~_hidden); 864 } 865 866 bool intrinsic_candidate() { 867 return (_flags & _intrinsic_candidate) != 0; 868 } 869 void set_intrinsic_candidate(bool x) { 870 _flags = x ? (_flags | _intrinsic_candidate) : (_flags & ~_intrinsic_candidate); 871 } 872 873 bool has_injected_profile() { 874 return (_flags & _has_injected_profile) != 0; 875 } 876 void set_has_injected_profile(bool x) { 877 _flags = x ? (_flags | _has_injected_profile) : (_flags & ~_has_injected_profile); 878 } 879 880 bool has_reserved_stack_access() { 881 return (_flags & _reserved_stack_access) != 0; 882 } 883 884 void set_has_reserved_stack_access(bool x) { 885 _flags = x ? (_flags | _reserved_stack_access) : (_flags & ~_reserved_stack_access); 886 } 887 888 ConstMethod::MethodType method_type() const { 889 return _constMethod->method_type(); 890 } 891 bool is_overpass() const { return method_type() == ConstMethod::OVERPASS; } 892 893 // On-stack replacement support 894 bool has_osr_nmethod(int level, bool match_level) { 895 return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL; 896 } 897 898 int mark_osr_nmethods() { 899 return method_holder()->mark_osr_nmethods(this); 900 } 901 902 nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) { 903 return method_holder()->lookup_osr_nmethod(this, bci, level, match_level); 904 } 905 906 // Inline cache support 907 void cleanup_inline_caches(); 908 909 // Find if klass for method is loaded 910 bool is_klass_loaded_by_klass_index(int klass_index) const; 911 bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const; 912 913 // Indicates whether compilation failed earlier for this method, or 914 // whether it is not compilable for another reason like having a 915 // breakpoint set in it. 916 bool is_not_compilable(int comp_level = CompLevel_any) const; 917 void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 918 void set_not_compilable_quietly(int comp_level = CompLevel_all) { 919 set_not_compilable(comp_level, false); 920 } 921 bool is_not_osr_compilable(int comp_level = CompLevel_any) const; 922 void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 923 void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) { 924 set_not_osr_compilable(comp_level, false); 925 } 926 bool is_always_compilable() const; 927 928 private: 929 void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); 930 931 public: 932 MethodCounters* get_method_counters(TRAPS) { 933 if (_method_counters == NULL) { 934 build_method_counters(this, CHECK_AND_CLEAR_NULL); 935 } 936 return _method_counters; 937 } 938 939 bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); } 940 void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); } 941 void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); } 942 bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); } 943 void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); } 944 void clear_not_c2_compilable() { _access_flags.clear_not_c2_compilable(); } 945 946 bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit 947 void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit 948 void clear_not_c1_osr_compilable() { clear_not_c1_compilable(); } // don't waste an accessFlags bit 949 bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); } 950 void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); } 951 void clear_not_c2_osr_compilable() { _access_flags.clear_not_c2_osr_compilable(); } 952 953 // Background compilation support 954 bool queued_for_compilation() const { return access_flags().queued_for_compilation(); } 955 void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); } 956 void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); } 957 958 // Resolve all classes in signature, return 'true' if successful 959 static bool load_signature_classes(methodHandle m, TRAPS); 960 961 // Return if true if not all classes references in signature, including return type, has been loaded 962 static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS); 963 964 // Printing 965 void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM 966 #if INCLUDE_JVMTI 967 void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses 968 #else 969 void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)" 970 #endif 971 972 // Helper routine used for method sorting 973 static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true); 974 975 // Deallocation function for redefine classes or if an error occurs 976 void deallocate_contents(ClassLoaderData* loader_data); 977 978 // Printing 979 #ifndef PRODUCT 980 void print_on(outputStream* st) const; 981 #endif 982 void print_value_on(outputStream* st) const; 983 void print_linkage_flags(outputStream* st) PRODUCT_RETURN; 984 985 const char* internal_name() const { return "{method}"; } 986 987 // Check for valid method pointer 988 static bool has_method_vptr(const void* ptr); 989 bool is_valid_method() const; 990 991 // Verify 992 void verify() { verify_on(tty); } 993 void verify_on(outputStream* st); 994 995 private: 996 997 // Inlined elements 998 address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); } 999 address* signature_handler_addr() const { return native_function_addr() + 1; } 1000 }; 1001 1002 1003 // Utility class for compressing line number tables 1004 1005 class CompressedLineNumberWriteStream: public CompressedWriteStream { 1006 private: 1007 int _bci; 1008 int _line; 1009 public: 1010 // Constructor 1011 CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {} 1012 CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {} 1013 1014 // Write (bci, line number) pair to stream 1015 void write_pair_regular(int bci_delta, int line_delta); 1016 1017 inline void write_pair_inline(int bci, int line) { 1018 int bci_delta = bci - _bci; 1019 int line_delta = line - _line; 1020 _bci = bci; 1021 _line = line; 1022 // Skip (0,0) deltas - they do not add information and conflict with terminator. 1023 if (bci_delta == 0 && line_delta == 0) return; 1024 // Check if bci is 5-bit and line number 3-bit unsigned. 1025 if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) { 1026 // Compress into single byte. 1027 jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta; 1028 // Check that value doesn't match escape character. 1029 if (value != 0xFF) { 1030 write_byte(value); 1031 return; 1032 } 1033 } 1034 write_pair_regular(bci_delta, line_delta); 1035 } 1036 1037 // Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair. 1038 // Disabling optimization doesn't work for methods in header files 1039 // so we force it to call through the non-optimized version in the .cpp. 1040 // It's gross, but it's the only way we can ensure that all callers are 1041 // fixed. _MSC_VER is defined by the windows compiler 1042 #if defined(_M_AMD64) && _MSC_VER >= 1400 1043 void write_pair(int bci, int line); 1044 #else 1045 void write_pair(int bci, int line) { write_pair_inline(bci, line); } 1046 #endif 1047 1048 // Write end-of-stream marker 1049 void write_terminator() { write_byte(0); } 1050 }; 1051 1052 1053 // Utility class for decompressing line number tables 1054 1055 class CompressedLineNumberReadStream: public CompressedReadStream { 1056 private: 1057 int _bci; 1058 int _line; 1059 public: 1060 // Constructor 1061 CompressedLineNumberReadStream(u_char* buffer); 1062 // Read (bci, line number) pair from stream. Returns false at end-of-stream. 1063 bool read_pair(); 1064 // Accessing bci and line number (after calling read_pair) 1065 int bci() const { return _bci; } 1066 int line() const { return _line; } 1067 }; 1068 1069 1070 #if INCLUDE_JVMTI 1071 1072 /// Fast Breakpoints. 1073 1074 // If this structure gets more complicated (because bpts get numerous), 1075 // move it into its own header. 1076 1077 // There is presently no provision for concurrent access 1078 // to breakpoint lists, which is only OK for JVMTI because 1079 // breakpoints are written only at safepoints, and are read 1080 // concurrently only outside of safepoints. 1081 1082 class BreakpointInfo : public CHeapObj<mtClass> { 1083 friend class VMStructs; 1084 private: 1085 Bytecodes::Code _orig_bytecode; 1086 int _bci; 1087 u2 _name_index; // of method 1088 u2 _signature_index; // of method 1089 BreakpointInfo* _next; // simple storage allocation 1090 1091 public: 1092 BreakpointInfo(Method* m, int bci); 1093 1094 // accessors 1095 Bytecodes::Code orig_bytecode() { return _orig_bytecode; } 1096 void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; } 1097 int bci() { return _bci; } 1098 1099 BreakpointInfo* next() const { return _next; } 1100 void set_next(BreakpointInfo* n) { _next = n; } 1101 1102 // helps for searchers 1103 bool match(const Method* m, int bci) { 1104 return bci == _bci && match(m); 1105 } 1106 1107 bool match(const Method* m) { 1108 return _name_index == m->name_index() && 1109 _signature_index == m->signature_index(); 1110 } 1111 1112 void set(Method* method); 1113 void clear(Method* method); 1114 }; 1115 1116 #endif // INCLUDE_JVMTI 1117 1118 // Utility class for access exception handlers 1119 class ExceptionTable : public StackObj { 1120 private: 1121 ExceptionTableElement* _table; 1122 u2 _length; 1123 1124 public: 1125 ExceptionTable(const Method* m) { 1126 if (m->has_exception_handler()) { 1127 _table = m->exception_table_start(); 1128 _length = m->exception_table_length(); 1129 } else { 1130 _table = NULL; 1131 _length = 0; 1132 } 1133 } 1134 1135 int length() const { 1136 return _length; 1137 } 1138 1139 u2 start_pc(int idx) const { 1140 assert(idx < _length, "out of bounds"); 1141 return _table[idx].start_pc; 1142 } 1143 1144 void set_start_pc(int idx, u2 value) { 1145 assert(idx < _length, "out of bounds"); 1146 _table[idx].start_pc = value; 1147 } 1148 1149 u2 end_pc(int idx) const { 1150 assert(idx < _length, "out of bounds"); 1151 return _table[idx].end_pc; 1152 } 1153 1154 void set_end_pc(int idx, u2 value) { 1155 assert(idx < _length, "out of bounds"); 1156 _table[idx].end_pc = value; 1157 } 1158 1159 u2 handler_pc(int idx) const { 1160 assert(idx < _length, "out of bounds"); 1161 return _table[idx].handler_pc; 1162 } 1163 1164 void set_handler_pc(int idx, u2 value) { 1165 assert(idx < _length, "out of bounds"); 1166 _table[idx].handler_pc = value; 1167 } 1168 1169 u2 catch_type_index(int idx) const { 1170 assert(idx < _length, "out of bounds"); 1171 return _table[idx].catch_type_index; 1172 } 1173 1174 void set_catch_type_index(int idx, u2 value) { 1175 assert(idx < _length, "out of bounds"); 1176 _table[idx].catch_type_index = value; 1177 } 1178 }; 1179 1180 #endif // SHARE_VM_OOPS_METHODOOP_HPP