1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_METHODOOP_HPP 26 #define SHARE_VM_OOPS_METHODOOP_HPP 27 28 #include "classfile/vmSymbols.hpp" 29 #include "code/compressedStream.hpp" 30 #include "compiler/compilerDefinitions.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "interpreter/invocationCounter.hpp" 33 #include "oops/annotations.hpp" 34 #include "oops/constantPool.hpp" 35 #include "oops/methodCounters.hpp" 36 #include "oops/instanceKlass.hpp" 37 #include "oops/oop.hpp" 38 #include "oops/typeArrayOop.hpp" 39 #include "utilities/accessFlags.hpp" 40 #include "utilities/align.hpp" 41 #include "utilities/growableArray.hpp" 42 #include "utilities/macros.hpp" 43 #if INCLUDE_JFR 44 #include "jfr/support/jfrTraceIdExtension.hpp" 45 #endif 46 47 48 // A Method represents a Java method. 49 // 50 // Note that most applications load thousands of methods, so keeping the size of this 51 // class small has a big impact on footprint. 52 // 53 // Note that native_function and signature_handler have to be at fixed offsets 54 // (required by the interpreter) 55 // 56 // Method embedded field layout (after declared fields): 57 // [EMBEDDED native_function (present only if native) ] 58 // [EMBEDDED signature_handler (present only if native) ] 59 60 class CheckedExceptionElement; 61 class LocalVariableTableElement; 62 class AdapterHandlerEntry; 63 class MethodData; 64 class MethodCounters; 65 class ConstMethod; 66 class InlineTableSizes; 67 class KlassSizeStats; 68 class CompiledMethod; 69 70 class Method : public Metadata { 71 friend class VMStructs; 72 friend class JVMCIVMStructs; 73 private: 74 // If you add a new field that points to any metaspace object, you 75 // must add this field to Method::metaspace_pointers_do(). 76 ConstMethod* _constMethod; // Method read-only data. 77 MethodData* _method_data; 78 MethodCounters* _method_counters; 79 AccessFlags _access_flags; // Access flags 80 int _vtable_index; // vtable index of this method (see VtableIndexFlag) 81 // note: can have vtables with >2**16 elements (because of inheritance) 82 u2 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) 83 84 // Flags 85 enum Flags { 86 _caller_sensitive = 1 << 0, 87 _force_inline = 1 << 1, 88 _dont_inline = 1 << 2, 89 _hidden = 1 << 3, 90 _has_injected_profile = 1 << 4, 91 _running_emcp = 1 << 5, 92 _intrinsic_candidate = 1 << 6, 93 _reserved_stack_access = 1 << 7 94 }; 95 mutable u2 _flags; 96 97 JFR_ONLY(DEFINE_TRACE_FLAG;) 98 99 #ifndef PRODUCT 100 int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) 101 #endif 102 // Entry point for calling both from and to the interpreter. 103 address _i2i_entry; // All-args-on-stack calling convention 104 // Entry point for calling from compiled code, to compiled code if it exists 105 // or else the interpreter. 106 volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry() 107 // The entry point for calling both from and to compiled code is 108 // "_code->entry_point()". Because of tiered compilation and de-opt, this 109 // field can come and go. It can transition from NULL to not-null at any 110 // time (whenever a compile completes). It can transition from not-null to 111 // NULL only at safepoints (because of a de-opt). 112 CompiledMethod* volatile _code; // Points to the corresponding piece of native code 113 volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry 114 int _max_vt_buffer; // max number of VT buffer chunk to use before recycling 115 116 #if INCLUDE_AOT && defined(TIERED) 117 CompiledMethod* _aot_code; 118 #endif 119 120 // Constructor 121 Method(ConstMethod* xconst, AccessFlags access_flags); 122 public: 123 124 static Method* allocate(ClassLoaderData* loader_data, 125 int byte_code_size, 126 AccessFlags access_flags, 127 InlineTableSizes* sizes, 128 ConstMethod::MethodType method_type, 129 TRAPS); 130 131 // CDS and vtbl checking can create an empty Method to get vtbl pointer. 132 Method(){} 133 134 bool is_method() const volatile { return true; } 135 136 void restore_unshareable_info(TRAPS); 137 138 // accessors for instance variables 139 140 ConstMethod* constMethod() const { return _constMethod; } 141 void set_constMethod(ConstMethod* xconst) { _constMethod = xconst; } 142 143 144 static address make_adapters(const methodHandle& mh, TRAPS); 145 address from_compiled_entry() const; 146 address from_compiled_entry_no_trampoline() const; 147 address from_interpreted_entry() const; 148 149 // access flag 150 AccessFlags access_flags() const { return _access_flags; } 151 void set_access_flags(AccessFlags flags) { _access_flags = flags; } 152 153 // name 154 Symbol* name() const { return constants()->symbol_at(name_index()); } 155 int name_index() const { return constMethod()->name_index(); } 156 void set_name_index(int index) { constMethod()->set_name_index(index); } 157 158 // signature 159 Symbol* signature() const { return constants()->symbol_at(signature_index()); } 160 int signature_index() const { return constMethod()->signature_index(); } 161 void set_signature_index(int index) { constMethod()->set_signature_index(index); } 162 163 // generics support 164 Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); } 165 int generic_signature_index() const { return constMethod()->generic_signature_index(); } 166 void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); } 167 168 // annotations support 169 AnnotationArray* annotations() const { 170 return constMethod()->method_annotations(); 171 } 172 AnnotationArray* parameter_annotations() const { 173 return constMethod()->parameter_annotations(); 174 } 175 AnnotationArray* annotation_default() const { 176 return constMethod()->default_annotations(); 177 } 178 AnnotationArray* type_annotations() const { 179 return constMethod()->type_annotations(); 180 } 181 182 // Helper routine: get klass name + "." + method name + signature as 183 // C string, for the purpose of providing more useful NoSuchMethodErrors 184 // and fatal error handling. The string is allocated in resource 185 // area if a buffer is not provided by the caller. 186 char* name_and_sig_as_C_string() const; 187 char* name_and_sig_as_C_string(char* buf, int size) const; 188 189 // Static routine in the situations we don't have a Method* 190 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature); 191 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size); 192 193 Bytecodes::Code java_code_at(int bci) const { 194 return Bytecodes::java_code_at(this, bcp_from(bci)); 195 } 196 Bytecodes::Code code_at(int bci) const { 197 return Bytecodes::code_at(this, bcp_from(bci)); 198 } 199 200 // JVMTI breakpoints 201 #if !INCLUDE_JVMTI 202 Bytecodes::Code orig_bytecode_at(int bci) const { 203 ShouldNotReachHere(); 204 return Bytecodes::_shouldnotreachhere; 205 } 206 void set_orig_bytecode_at(int bci, Bytecodes::Code code) { 207 ShouldNotReachHere(); 208 }; 209 u2 number_of_breakpoints() const {return 0;} 210 #else // !INCLUDE_JVMTI 211 Bytecodes::Code orig_bytecode_at(int bci) const; 212 void set_orig_bytecode_at(int bci, Bytecodes::Code code); 213 void set_breakpoint(int bci); 214 void clear_breakpoint(int bci); 215 void clear_all_breakpoints(); 216 // Tracking number of breakpoints, for fullspeed debugging. 217 // Only mutated by VM thread. 218 u2 number_of_breakpoints() const { 219 MethodCounters* mcs = method_counters(); 220 if (mcs == NULL) { 221 return 0; 222 } else { 223 return mcs->number_of_breakpoints(); 224 } 225 } 226 void incr_number_of_breakpoints(TRAPS) { 227 MethodCounters* mcs = get_method_counters(CHECK); 228 if (mcs != NULL) { 229 mcs->incr_number_of_breakpoints(); 230 } 231 } 232 void decr_number_of_breakpoints(TRAPS) { 233 MethodCounters* mcs = get_method_counters(CHECK); 234 if (mcs != NULL) { 235 mcs->decr_number_of_breakpoints(); 236 } 237 } 238 // Initialization only 239 void clear_number_of_breakpoints() { 240 MethodCounters* mcs = method_counters(); 241 if (mcs != NULL) { 242 mcs->clear_number_of_breakpoints(); 243 } 244 } 245 #endif // !INCLUDE_JVMTI 246 247 // index into InstanceKlass methods() array 248 // note: also used by jfr 249 u2 method_idnum() const { return constMethod()->method_idnum(); } 250 void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); } 251 252 u2 orig_method_idnum() const { return constMethod()->orig_method_idnum(); } 253 void set_orig_method_idnum(u2 idnum) { constMethod()->set_orig_method_idnum(idnum); } 254 255 // code size 256 int code_size() const { return constMethod()->code_size(); } 257 258 // method size in words 259 int method_size() const { return sizeof(Method)/wordSize + ( is_native() ? 2 : 0 ); } 260 261 // constant pool for Klass* holding this method 262 ConstantPool* constants() const { return constMethod()->constants(); } 263 void set_constants(ConstantPool* c) { constMethod()->set_constants(c); } 264 265 // max stack 266 // return original max stack size for method verification 267 int verifier_max_stack() const { return constMethod()->max_stack(); } 268 int max_stack() const { return constMethod()->max_stack() + extra_stack_entries(); } 269 void set_max_stack(int size) { constMethod()->set_max_stack(size); } 270 271 // max locals 272 int max_locals() const { return constMethod()->max_locals(); } 273 void set_max_locals(int size) { constMethod()->set_max_locals(size); } 274 275 // value type buffering 276 void initialize_max_vt_buffer(); 277 int max_vt_buffer() const { return _max_vt_buffer; } 278 void set_max_vt_buffer(int size) { _max_vt_buffer = size; } 279 280 281 int highest_comp_level() const; 282 void set_highest_comp_level(int level); 283 int highest_osr_comp_level() const; 284 void set_highest_osr_comp_level(int level); 285 286 #if COMPILER2_OR_JVMCI 287 // Count of times method was exited via exception while interpreting 288 void interpreter_throwout_increment(TRAPS) { 289 MethodCounters* mcs = get_method_counters(CHECK); 290 if (mcs != NULL) { 291 mcs->interpreter_throwout_increment(); 292 } 293 } 294 #endif 295 296 int interpreter_throwout_count() const { 297 MethodCounters* mcs = method_counters(); 298 if (mcs == NULL) { 299 return 0; 300 } else { 301 return mcs->interpreter_throwout_count(); 302 } 303 } 304 305 // size of parameters 306 int size_of_parameters() const { return constMethod()->size_of_parameters(); } 307 void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); } 308 309 bool has_stackmap_table() const { 310 return constMethod()->has_stackmap_table(); 311 } 312 313 Array<u1>* stackmap_data() const { 314 return constMethod()->stackmap_data(); 315 } 316 317 void set_stackmap_data(Array<u1>* sd) { 318 constMethod()->set_stackmap_data(sd); 319 } 320 321 // exception handler table 322 bool has_exception_handler() const 323 { return constMethod()->has_exception_handler(); } 324 int exception_table_length() const 325 { return constMethod()->exception_table_length(); } 326 ExceptionTableElement* exception_table_start() const 327 { return constMethod()->exception_table_start(); } 328 329 // Finds the first entry point bci of an exception handler for an 330 // exception of klass ex_klass thrown at throw_bci. A value of NULL 331 // for ex_klass indicates that the exception klass is not known; in 332 // this case it matches any constraint class. Returns -1 if the 333 // exception cannot be handled in this method. The handler 334 // constraint classes are loaded if necessary. Note that this may 335 // throw an exception if loading of the constraint classes causes 336 // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError. 337 // If an exception is thrown, returns the bci of the 338 // exception handler which caused the exception to be thrown, which 339 // is needed for proper retries. See, for example, 340 // InterpreterRuntime::exception_handler_for_exception. 341 static int fast_exception_handler_bci_for(const methodHandle& mh, Klass* ex_klass, int throw_bci, TRAPS); 342 343 // method data access 344 MethodData* method_data() const { 345 return _method_data; 346 } 347 348 void set_method_data(MethodData* data); 349 350 MethodCounters* method_counters() const { 351 return _method_counters; 352 } 353 354 void clear_method_counters() { 355 _method_counters = NULL; 356 } 357 358 bool init_method_counters(MethodCounters* counters); 359 360 #ifdef TIERED 361 // We are reusing interpreter_invocation_count as a holder for the previous event count! 362 // We can do that since interpreter_invocation_count is not used in tiered. 363 int prev_event_count() const { 364 if (method_counters() == NULL) { 365 return 0; 366 } else { 367 return method_counters()->interpreter_invocation_count(); 368 } 369 } 370 void set_prev_event_count(int count) { 371 MethodCounters* mcs = method_counters(); 372 if (mcs != NULL) { 373 mcs->set_interpreter_invocation_count(count); 374 } 375 } 376 jlong prev_time() const { 377 MethodCounters* mcs = method_counters(); 378 return mcs == NULL ? 0 : mcs->prev_time(); 379 } 380 void set_prev_time(jlong time) { 381 MethodCounters* mcs = method_counters(); 382 if (mcs != NULL) { 383 mcs->set_prev_time(time); 384 } 385 } 386 float rate() const { 387 MethodCounters* mcs = method_counters(); 388 return mcs == NULL ? 0 : mcs->rate(); 389 } 390 void set_rate(float rate) { 391 MethodCounters* mcs = method_counters(); 392 if (mcs != NULL) { 393 mcs->set_rate(rate); 394 } 395 } 396 397 #if INCLUDE_AOT 398 void set_aot_code(CompiledMethod* aot_code) { 399 _aot_code = aot_code; 400 } 401 402 CompiledMethod* aot_code() const { 403 return _aot_code; 404 } 405 #else 406 CompiledMethod* aot_code() const { return NULL; } 407 #endif // INCLUDE_AOT 408 #endif // TIERED 409 410 int nmethod_age() const { 411 if (method_counters() == NULL) { 412 return INT_MAX; 413 } else { 414 return method_counters()->nmethod_age(); 415 } 416 } 417 418 int invocation_count(); 419 int backedge_count(); 420 421 bool was_executed_more_than(int n); 422 bool was_never_executed() { return !was_executed_more_than(0); } 423 424 static void build_interpreter_method_data(const methodHandle& method, TRAPS); 425 426 static MethodCounters* build_method_counters(Method* m, TRAPS); 427 428 int interpreter_invocation_count() { 429 if (TieredCompilation) { 430 return invocation_count(); 431 } else { 432 MethodCounters* mcs = method_counters(); 433 return (mcs == NULL) ? 0 : mcs->interpreter_invocation_count(); 434 } 435 } 436 #if COMPILER2_OR_JVMCI 437 int increment_interpreter_invocation_count(TRAPS) { 438 if (TieredCompilation) ShouldNotReachHere(); 439 MethodCounters* mcs = get_method_counters(CHECK_0); 440 return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count(); 441 } 442 #endif 443 444 #ifndef PRODUCT 445 int compiled_invocation_count() const { return _compiled_invocation_count; } 446 void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; } 447 #else 448 // for PrintMethodData in a product build 449 int compiled_invocation_count() const { return 0; } 450 #endif // not PRODUCT 451 452 // Clear (non-shared space) pointers which could not be relevant 453 // if this (shared) method were mapped into another JVM. 454 void remove_unshareable_info(); 455 456 // nmethod/verified compiler entry 457 address verified_code_entry(); 458 bool check_code() const; // Not inline to avoid circular ref 459 CompiledMethod* volatile code() const; 460 void clear_code(bool acquire_lock = true); // Clear out any compiled code 461 static void set_code(const methodHandle& mh, CompiledMethod* code); 462 void set_adapter_entry(AdapterHandlerEntry* adapter) { 463 constMethod()->set_adapter_entry(adapter); 464 } 465 void update_adapter_trampoline(AdapterHandlerEntry* adapter) { 466 constMethod()->update_adapter_trampoline(adapter); 467 } 468 469 address get_i2c_entry(); 470 address get_c2i_entry(); 471 address get_c2i_unverified_entry(); 472 AdapterHandlerEntry* adapter() const { 473 return constMethod()->adapter(); 474 } 475 // setup entry points 476 void link_method(const methodHandle& method, TRAPS); 477 // clear entry points. Used by sharing code during dump time 478 void unlink_method() NOT_CDS_RETURN; 479 480 virtual void metaspace_pointers_do(MetaspaceClosure* iter); 481 virtual MetaspaceObj::Type type() const { return MethodType; } 482 483 // vtable index 484 enum VtableIndexFlag { 485 // Valid vtable indexes are non-negative (>= 0). 486 // These few negative values are used as sentinels. 487 itable_index_max = -10, // first itable index, growing downward 488 pending_itable_index = -9, // itable index will be assigned 489 invalid_vtable_index = -4, // distinct from any valid vtable index 490 garbage_vtable_index = -3, // not yet linked; no vtable layout yet 491 nonvirtual_vtable_index = -2 // there is no need for vtable dispatch 492 // 6330203 Note: Do not use -1, which was overloaded with many meanings. 493 }; 494 DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; }) 495 bool has_vtable_index() const { return _vtable_index >= 0; } 496 int vtable_index() const { return _vtable_index; } 497 void set_vtable_index(int index); 498 DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; }) 499 bool has_itable_index() const { return _vtable_index <= itable_index_max; } 500 int itable_index() const { assert(valid_itable_index(), ""); 501 return itable_index_max - _vtable_index; } 502 void set_itable_index(int index); 503 504 // interpreter entry 505 address interpreter_entry() const { return _i2i_entry; } 506 // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry 507 void set_interpreter_entry(address entry) { 508 assert(!is_shared(), "shared method's interpreter entry should not be changed at run time"); 509 if (_i2i_entry != entry) { 510 _i2i_entry = entry; 511 } 512 if (_from_interpreted_entry != entry) { 513 _from_interpreted_entry = entry; 514 } 515 } 516 517 // native function (used for native methods only) 518 enum { 519 native_bind_event_is_interesting = true 520 }; 521 address native_function() const { return *(native_function_addr()); } 522 address critical_native_function(); 523 524 // Must specify a real function (not NULL). 525 // Use clear_native_function() to unregister. 526 void set_native_function(address function, bool post_event_flag); 527 bool has_native_function() const; 528 void clear_native_function(); 529 530 // signature handler (used for native methods only) 531 address signature_handler() const { return *(signature_handler_addr()); } 532 void set_signature_handler(address handler); 533 534 // Interpreter oopmap support 535 void mask_for(int bci, InterpreterOopMap* mask); 536 537 // operations on invocation counter 538 void print_invocation_count(); 539 540 // byte codes 541 void set_code(address code) { return constMethod()->set_code(code); } 542 address code_base() const { return constMethod()->code_base(); } 543 bool contains(address bcp) const { return constMethod()->contains(bcp); } 544 545 // prints byte codes 546 void print_codes() const { print_codes_on(tty); } 547 void print_codes_on(outputStream* st) const; 548 void print_codes_on(int from, int to, outputStream* st) const; 549 550 // method parameters 551 bool has_method_parameters() const 552 { return constMethod()->has_method_parameters(); } 553 int method_parameters_length() const 554 { return constMethod()->method_parameters_length(); } 555 MethodParametersElement* method_parameters_start() const 556 { return constMethod()->method_parameters_start(); } 557 558 // checked exceptions 559 int checked_exceptions_length() const 560 { return constMethod()->checked_exceptions_length(); } 561 CheckedExceptionElement* checked_exceptions_start() const 562 { return constMethod()->checked_exceptions_start(); } 563 564 // localvariable table 565 bool has_localvariable_table() const 566 { return constMethod()->has_localvariable_table(); } 567 int localvariable_table_length() const 568 { return constMethod()->localvariable_table_length(); } 569 LocalVariableTableElement* localvariable_table_start() const 570 { return constMethod()->localvariable_table_start(); } 571 572 bool has_linenumber_table() const 573 { return constMethod()->has_linenumber_table(); } 574 u_char* compressed_linenumber_table() const 575 { return constMethod()->compressed_linenumber_table(); } 576 577 // method holder (the Klass* holding this method) 578 InstanceKlass* method_holder() const { return constants()->pool_holder(); } 579 580 void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments) 581 Symbol* klass_name() const; // returns the name of the method holder 582 BasicType result_type() const; // type of the method result 583 bool may_return_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY || r == T_VALUETYPE); } 584 bool is_returning_vt() const { BasicType r = result_type(); return r == T_VALUETYPE; } 585 #ifdef ASSERT 586 ValueKlass* returned_value_type(Thread* thread) const; 587 #endif 588 589 // Checked exceptions thrown by this method (resolved to mirrors) 590 objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); } 591 592 // Access flags 593 bool is_public() const { return access_flags().is_public(); } 594 bool is_private() const { return access_flags().is_private(); } 595 bool is_protected() const { return access_flags().is_protected(); } 596 bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); } 597 bool is_static() const { return access_flags().is_static(); } 598 bool is_final() const { return access_flags().is_final(); } 599 bool is_synchronized() const { return access_flags().is_synchronized();} 600 bool is_native() const { return access_flags().is_native(); } 601 bool is_abstract() const { return access_flags().is_abstract(); } 602 bool is_strict() const { return access_flags().is_strict(); } 603 bool is_synthetic() const { return access_flags().is_synthetic(); } 604 605 // returns true if contains only return operation 606 bool is_empty_method() const; 607 608 // returns true if this is a vanilla constructor 609 bool is_vanilla_constructor() const; 610 611 // checks method and its method holder 612 bool is_final_method() const; 613 bool is_final_method(AccessFlags class_access_flags) const; 614 // interface method declared with 'default' - excludes private interface methods 615 bool is_default_method() const; 616 617 // true if method needs no dynamic dispatch (final and/or no vtable entry) 618 bool can_be_statically_bound() const; 619 bool can_be_statically_bound(AccessFlags class_access_flags) const; 620 621 // returns true if the method has any backward branches. 622 bool has_loops() { 623 return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag(); 624 }; 625 626 bool compute_has_loops_flag(); 627 628 bool has_jsrs() { 629 return access_flags().has_jsrs(); 630 }; 631 void set_has_jsrs() { 632 _access_flags.set_has_jsrs(); 633 } 634 635 // returns true if the method has any monitors. 636 bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); } 637 bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); } 638 639 void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); } 640 641 // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes 642 // propererly nest in the method. It might return false, even though they actually nest properly, since the info. 643 // has not been computed yet. 644 bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); } 645 void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); } 646 647 // returns true if the method is an accessor function (setter/getter). 648 bool is_accessor() const; 649 650 // returns true if the method is a getter 651 bool is_getter() const; 652 653 // returns true if the method is a setter 654 bool is_setter() const; 655 656 // returns true if the method does nothing but return a constant of primitive type 657 bool is_constant_getter() const; 658 659 // returns true if the method is an initializer (<init> or <clinit>). 660 bool is_initializer() const; 661 662 // returns true if the method is static OR if the classfile version < 51 663 bool has_valid_initializer_flags() const; 664 665 // returns true if the method name is <clinit> and the method has 666 // valid static initializer flags. 667 bool is_static_initializer() const; 668 669 // returns true if the method name is <init> 670 bool is_object_initializer() const; 671 672 // compiled code support 673 // NOTE: code() is inherently racy as deopt can be clearing code 674 // simultaneously. Use with caution. 675 bool has_compiled_code() const; 676 677 #ifdef TIERED 678 bool has_aot_code() const { return aot_code() != NULL; } 679 #endif 680 681 // sizing 682 static int header_size() { 683 return align_up((int)sizeof(Method), wordSize) / wordSize; 684 } 685 static int size(bool is_native); 686 int size() const { return method_size(); } 687 #if INCLUDE_SERVICES 688 void collect_statistics(KlassSizeStats *sz) const; 689 #endif 690 void log_touched(TRAPS); 691 static void print_touched_methods(outputStream* out); 692 693 // interpreter support 694 static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); } 695 static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); } 696 static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); } 697 static ByteSize code_offset() { return byte_offset_of(Method, _code); } 698 static ByteSize method_data_offset() { 699 return byte_offset_of(Method, _method_data); 700 } 701 static ByteSize method_counters_offset() { 702 return byte_offset_of(Method, _method_counters); 703 } 704 #ifndef PRODUCT 705 static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); } 706 #endif // not PRODUCT 707 static ByteSize native_function_offset() { return in_ByteSize(sizeof(Method)); } 708 static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); } 709 static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); } 710 static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); } 711 static ByteSize itable_index_offset() { return byte_offset_of(Method, _vtable_index ); } 712 713 // for code generation 714 static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); } 715 static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); } 716 static int intrinsic_id_size_in_bytes() { return sizeof(u2); } 717 718 static ByteSize max_vt_buffer_offset() { return byte_offset_of(Method, _max_vt_buffer); } 719 720 // Static methods that are used to implement member methods where an exposed this pointer 721 // is needed due to possible GCs 722 static objArrayHandle resolved_checked_exceptions_impl(Method* method, TRAPS); 723 724 // Returns the byte code index from the byte code pointer 725 int bci_from(address bcp) const; 726 address bcp_from(int bci) const; 727 address bcp_from(address bcp) const; 728 int validate_bci_from_bcp(address bcp) const; 729 int validate_bci(int bci) const; 730 731 // Returns the line number for a bci if debugging information for the method is prowided, 732 // -1 is returned otherwise. 733 int line_number_from_bci(int bci) const; 734 735 // Reflection support 736 bool is_overridden_in(Klass* k) const; 737 738 // Stack walking support 739 bool is_ignored_by_security_stack_walk() const; 740 741 // JSR 292 support 742 bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id) 743 bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm 744 bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc. 745 static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual 746 Symbol* signature, //anything at all 747 TRAPS); 748 static Klass* check_non_bcp_klass(Klass* klass); 749 750 enum { 751 // How many extra stack entries for invokedynamic 752 extra_stack_entries_for_jsr292 = 1 753 }; 754 755 // this operates only on invoke methods: 756 // presize interpreter frames for extra interpreter stack entries, if needed 757 // Account for the extra appendix argument for invokehandle/invokedynamic 758 static int extra_stack_entries() { return extra_stack_entries_for_jsr292; } 759 static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize 760 761 // RedefineClasses() support: 762 bool is_old() const { return access_flags().is_old(); } 763 void set_is_old() { _access_flags.set_is_old(); } 764 bool is_obsolete() const { return access_flags().is_obsolete(); } 765 void set_is_obsolete() { _access_flags.set_is_obsolete(); } 766 bool is_deleted() const { return access_flags().is_deleted(); } 767 void set_is_deleted() { _access_flags.set_is_deleted(); } 768 769 bool is_running_emcp() const { 770 // EMCP methods are old but not obsolete or deleted. Equivalent 771 // Modulo Constant Pool means the method is equivalent except 772 // the constant pool and instructions that access the constant 773 // pool might be different. 774 // If a breakpoint is set in a redefined method, its EMCP methods that are 775 // still running must have a breakpoint also. 776 return (_flags & _running_emcp) != 0; 777 } 778 779 void set_running_emcp(bool x) { 780 _flags = x ? (_flags | _running_emcp) : (_flags & ~_running_emcp); 781 } 782 783 bool on_stack() const { return access_flags().on_stack(); } 784 void set_on_stack(const bool value); 785 786 // see the definition in Method*.cpp for the gory details 787 bool should_not_be_cached() const; 788 789 // JVMTI Native method prefixing support: 790 bool is_prefixed_native() const { return access_flags().is_prefixed_native(); } 791 void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); } 792 793 // Rewriting support 794 static methodHandle clone_with_new_data(const methodHandle& m, u_char* new_code, int new_code_length, 795 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS); 796 797 // jmethodID handling 798 // Because the useful life-span of a jmethodID cannot be determined, 799 // once created they are never reclaimed. The methods to which they refer, 800 // however, can be GC'ed away if the class is unloaded or if the method is 801 // made obsolete or deleted -- in these cases, the jmethodID 802 // refers to NULL (as is the case for any weak reference). 803 static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh); 804 static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid); 805 806 // Ensure there is enough capacity in the internal tracking data 807 // structures to hold the number of jmethodIDs you plan to generate. 808 // This saves substantial time doing allocations. 809 static void ensure_jmethod_ids(ClassLoaderData* loader_data, int capacity); 810 811 // Use resolve_jmethod_id() in situations where the caller is expected 812 // to provide a valid jmethodID; the only sanity checks are in asserts; 813 // result guaranteed not to be NULL. 814 inline static Method* resolve_jmethod_id(jmethodID mid) { 815 assert(mid != NULL, "JNI method id should not be null"); 816 return *((Method**)mid); 817 } 818 819 // Use checked_resolve_jmethod_id() in situations where the caller 820 // should provide a valid jmethodID, but might not. NULL is returned 821 // when the jmethodID does not refer to a valid method. 822 static Method* checked_resolve_jmethod_id(jmethodID mid); 823 824 static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method); 825 static bool is_method_id(jmethodID mid); 826 827 // Clear methods 828 static void clear_jmethod_ids(ClassLoaderData* loader_data); 829 static void print_jmethod_ids(const ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN; 830 831 // Get this method's jmethodID -- allocate if it doesn't exist 832 jmethodID jmethod_id() { return method_holder()->get_jmethod_id(this); } 833 834 // Lookup the jmethodID for this method. Return NULL if not found. 835 // NOTE that this function can be called from a signal handler 836 // (see AsyncGetCallTrace support for Forte Analyzer) and this 837 // needs to be async-safe. No allocation should be done and 838 // so handles are not used to avoid deadlock. 839 jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); } 840 841 // Support for inlining of intrinsic methods 842 vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; } 843 void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u2) id; } 844 845 // Helper routines for intrinsic_id() and vmIntrinsics::method(). 846 void init_intrinsic_id(); // updates from _none if a match 847 static vmSymbols::SID klass_id_for_intrinsics(const Klass* holder); 848 849 bool caller_sensitive() { 850 return (_flags & _caller_sensitive) != 0; 851 } 852 void set_caller_sensitive(bool x) { 853 _flags = x ? (_flags | _caller_sensitive) : (_flags & ~_caller_sensitive); 854 } 855 856 bool force_inline() { 857 return (_flags & _force_inline) != 0; 858 } 859 void set_force_inline(bool x) { 860 _flags = x ? (_flags | _force_inline) : (_flags & ~_force_inline); 861 } 862 863 bool dont_inline() { 864 return (_flags & _dont_inline) != 0; 865 } 866 void set_dont_inline(bool x) { 867 _flags = x ? (_flags | _dont_inline) : (_flags & ~_dont_inline); 868 } 869 870 bool is_hidden() { 871 return (_flags & _hidden) != 0; 872 } 873 void set_hidden(bool x) { 874 _flags = x ? (_flags | _hidden) : (_flags & ~_hidden); 875 } 876 877 bool intrinsic_candidate() { 878 return (_flags & _intrinsic_candidate) != 0; 879 } 880 void set_intrinsic_candidate(bool x) { 881 _flags = x ? (_flags | _intrinsic_candidate) : (_flags & ~_intrinsic_candidate); 882 } 883 884 bool has_injected_profile() { 885 return (_flags & _has_injected_profile) != 0; 886 } 887 void set_has_injected_profile(bool x) { 888 _flags = x ? (_flags | _has_injected_profile) : (_flags & ~_has_injected_profile); 889 } 890 891 bool has_reserved_stack_access() { 892 return (_flags & _reserved_stack_access) != 0; 893 } 894 895 void set_has_reserved_stack_access(bool x) { 896 _flags = x ? (_flags | _reserved_stack_access) : (_flags & ~_reserved_stack_access); 897 } 898 899 JFR_ONLY(DEFINE_TRACE_FLAG_ACCESSOR;) 900 901 ConstMethod::MethodType method_type() const { 902 return _constMethod->method_type(); 903 } 904 bool is_overpass() const { return method_type() == ConstMethod::OVERPASS; } 905 906 // On-stack replacement support 907 bool has_osr_nmethod(int level, bool match_level) { 908 return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL; 909 } 910 911 int mark_osr_nmethods() { 912 return method_holder()->mark_osr_nmethods(this); 913 } 914 915 nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) { 916 return method_holder()->lookup_osr_nmethod(this, bci, level, match_level); 917 } 918 919 // Inline cache support 920 void cleanup_inline_caches(); 921 922 // Find if klass for method is loaded 923 bool is_klass_loaded_by_klass_index(int klass_index) const; 924 bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const; 925 926 // Indicates whether compilation failed earlier for this method, or 927 // whether it is not compilable for another reason like having a 928 // breakpoint set in it. 929 bool is_not_compilable(int comp_level = CompLevel_any) const; 930 void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 931 void set_not_compilable_quietly(int comp_level = CompLevel_all) { 932 set_not_compilable(comp_level, false); 933 } 934 bool is_not_osr_compilable(int comp_level = CompLevel_any) const; 935 void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 936 void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) { 937 set_not_osr_compilable(comp_level, false); 938 } 939 bool is_always_compilable() const; 940 941 private: 942 void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); 943 944 public: 945 MethodCounters* get_method_counters(TRAPS) { 946 if (_method_counters == NULL) { 947 build_method_counters(this, CHECK_AND_CLEAR_NULL); 948 } 949 return _method_counters; 950 } 951 952 bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); } 953 void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); } 954 void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); } 955 bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); } 956 void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); } 957 void clear_not_c2_compilable() { _access_flags.clear_not_c2_compilable(); } 958 959 bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit 960 void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit 961 void clear_not_c1_osr_compilable() { clear_not_c1_compilable(); } // don't waste an accessFlags bit 962 bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); } 963 void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); } 964 void clear_not_c2_osr_compilable() { _access_flags.clear_not_c2_osr_compilable(); } 965 966 // Background compilation support 967 bool queued_for_compilation() const { return access_flags().queued_for_compilation(); } 968 void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); } 969 void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); } 970 971 // Resolve all classes in signature, return 'true' if successful 972 static bool load_signature_classes(const methodHandle& m, TRAPS); 973 974 // Return if true if not all classes references in signature, including return type, has been loaded 975 static bool has_unloaded_classes_in_signature(const methodHandle& m, TRAPS); 976 977 // Printing 978 void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM 979 #if INCLUDE_JVMTI 980 void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses 981 #else 982 void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)" 983 #endif 984 985 // Helper routine used for method sorting 986 static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true); 987 988 // Deallocation function for redefine classes or if an error occurs 989 void deallocate_contents(ClassLoaderData* loader_data); 990 991 // Printing 992 #ifndef PRODUCT 993 void print_on(outputStream* st) const; 994 #endif 995 void print_value_on(outputStream* st) const; 996 void print_linkage_flags(outputStream* st) PRODUCT_RETURN; 997 998 const char* internal_name() const { return "{method}"; } 999 1000 // Check for valid method pointer 1001 static bool has_method_vptr(const void* ptr); 1002 bool is_valid_method() const; 1003 1004 // Verify 1005 void verify() { verify_on(tty); } 1006 void verify_on(outputStream* st); 1007 1008 private: 1009 1010 // Inlined elements 1011 address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); } 1012 address* signature_handler_addr() const { return native_function_addr() + 1; } 1013 }; 1014 1015 1016 // Utility class for compressing line number tables 1017 1018 class CompressedLineNumberWriteStream: public CompressedWriteStream { 1019 private: 1020 int _bci; 1021 int _line; 1022 public: 1023 // Constructor 1024 CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {} 1025 CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {} 1026 1027 // Write (bci, line number) pair to stream 1028 void write_pair_regular(int bci_delta, int line_delta); 1029 1030 inline void write_pair_inline(int bci, int line) { 1031 int bci_delta = bci - _bci; 1032 int line_delta = line - _line; 1033 _bci = bci; 1034 _line = line; 1035 // Skip (0,0) deltas - they do not add information and conflict with terminator. 1036 if (bci_delta == 0 && line_delta == 0) return; 1037 // Check if bci is 5-bit and line number 3-bit unsigned. 1038 if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) { 1039 // Compress into single byte. 1040 jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta; 1041 // Check that value doesn't match escape character. 1042 if (value != 0xFF) { 1043 write_byte(value); 1044 return; 1045 } 1046 } 1047 write_pair_regular(bci_delta, line_delta); 1048 } 1049 1050 // Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair. 1051 // Disabling optimization doesn't work for methods in header files 1052 // so we force it to call through the non-optimized version in the .cpp. 1053 // It's gross, but it's the only way we can ensure that all callers are 1054 // fixed. _MSC_VER is defined by the windows compiler 1055 #if defined(_M_AMD64) && _MSC_VER >= 1400 1056 void write_pair(int bci, int line); 1057 #else 1058 void write_pair(int bci, int line) { write_pair_inline(bci, line); } 1059 #endif 1060 1061 // Write end-of-stream marker 1062 void write_terminator() { write_byte(0); } 1063 }; 1064 1065 1066 // Utility class for decompressing line number tables 1067 1068 class CompressedLineNumberReadStream: public CompressedReadStream { 1069 private: 1070 int _bci; 1071 int _line; 1072 public: 1073 // Constructor 1074 CompressedLineNumberReadStream(u_char* buffer); 1075 // Read (bci, line number) pair from stream. Returns false at end-of-stream. 1076 bool read_pair(); 1077 // Accessing bci and line number (after calling read_pair) 1078 int bci() const { return _bci; } 1079 int line() const { return _line; } 1080 }; 1081 1082 1083 #if INCLUDE_JVMTI 1084 1085 /// Fast Breakpoints. 1086 1087 // If this structure gets more complicated (because bpts get numerous), 1088 // move it into its own header. 1089 1090 // There is presently no provision for concurrent access 1091 // to breakpoint lists, which is only OK for JVMTI because 1092 // breakpoints are written only at safepoints, and are read 1093 // concurrently only outside of safepoints. 1094 1095 class BreakpointInfo : public CHeapObj<mtClass> { 1096 friend class VMStructs; 1097 private: 1098 Bytecodes::Code _orig_bytecode; 1099 int _bci; 1100 u2 _name_index; // of method 1101 u2 _signature_index; // of method 1102 BreakpointInfo* _next; // simple storage allocation 1103 1104 public: 1105 BreakpointInfo(Method* m, int bci); 1106 1107 // accessors 1108 Bytecodes::Code orig_bytecode() { return _orig_bytecode; } 1109 void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; } 1110 int bci() { return _bci; } 1111 1112 BreakpointInfo* next() const { return _next; } 1113 void set_next(BreakpointInfo* n) { _next = n; } 1114 1115 // helps for searchers 1116 bool match(const Method* m, int bci) { 1117 return bci == _bci && match(m); 1118 } 1119 1120 bool match(const Method* m) { 1121 return _name_index == m->name_index() && 1122 _signature_index == m->signature_index(); 1123 } 1124 1125 void set(Method* method); 1126 void clear(Method* method); 1127 }; 1128 1129 #endif // INCLUDE_JVMTI 1130 1131 // Utility class for access exception handlers 1132 class ExceptionTable : public StackObj { 1133 private: 1134 ExceptionTableElement* _table; 1135 u2 _length; 1136 1137 public: 1138 ExceptionTable(const Method* m) { 1139 if (m->has_exception_handler()) { 1140 _table = m->exception_table_start(); 1141 _length = m->exception_table_length(); 1142 } else { 1143 _table = NULL; 1144 _length = 0; 1145 } 1146 } 1147 1148 int length() const { 1149 return _length; 1150 } 1151 1152 u2 start_pc(int idx) const { 1153 assert(idx < _length, "out of bounds"); 1154 return _table[idx].start_pc; 1155 } 1156 1157 void set_start_pc(int idx, u2 value) { 1158 assert(idx < _length, "out of bounds"); 1159 _table[idx].start_pc = value; 1160 } 1161 1162 u2 end_pc(int idx) const { 1163 assert(idx < _length, "out of bounds"); 1164 return _table[idx].end_pc; 1165 } 1166 1167 void set_end_pc(int idx, u2 value) { 1168 assert(idx < _length, "out of bounds"); 1169 _table[idx].end_pc = value; 1170 } 1171 1172 u2 handler_pc(int idx) const { 1173 assert(idx < _length, "out of bounds"); 1174 return _table[idx].handler_pc; 1175 } 1176 1177 void set_handler_pc(int idx, u2 value) { 1178 assert(idx < _length, "out of bounds"); 1179 _table[idx].handler_pc = value; 1180 } 1181 1182 u2 catch_type_index(int idx) const { 1183 assert(idx < _length, "out of bounds"); 1184 return _table[idx].catch_type_index; 1185 } 1186 1187 void set_catch_type_index(int idx, u2 value) { 1188 assert(idx < _length, "out of bounds"); 1189 _table[idx].catch_type_index = value; 1190 } 1191 }; 1192 1193 #endif // SHARE_VM_OOPS_METHODOOP_HPP