1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_METHODOOP_HPP 26 #define SHARE_VM_OOPS_METHODOOP_HPP 27 28 #include "classfile/vmSymbols.hpp" 29 #include "code/compressedStream.hpp" 30 #include "compiler/oopMap.hpp" 31 #include "interpreter/invocationCounter.hpp" 32 #include "oops/annotations.hpp" 33 #include "oops/constantPool.hpp" 34 #include "oops/methodCounters.hpp" 35 #include "oops/instanceKlass.hpp" 36 #include "oops/oop.hpp" 37 #include "oops/typeArrayOop.hpp" 38 #include "utilities/accessFlags.hpp" 39 #include "utilities/growableArray.hpp" 40 41 // A Method represents a Java method. 42 // 43 // Note that most applications load thousands of methods, so keeping the size of this 44 // class small has a big impact on footprint. 45 // 46 // Note that native_function and signature_handler have to be at fixed offsets 47 // (required by the interpreter) 48 // 49 // Method embedded field layout (after declared fields): 50 // [EMBEDDED native_function (present only if native) ] 51 // [EMBEDDED signature_handler (present only if native) ] 52 53 class CheckedExceptionElement; 54 class LocalVariableTableElement; 55 class AdapterHandlerEntry; 56 class MethodData; 57 class MethodCounters; 58 class ConstMethod; 59 class InlineTableSizes; 60 class KlassSizeStats; 61 62 class Method : public Metadata { 63 friend class VMStructs; 64 friend class JVMCIVMStructs; 65 private: 66 ConstMethod* _constMethod; // Method read-only data. 67 #if defined(COMPILER2) || INCLUDE_JVMCI 68 MethodData* _method_data; 69 #endif 70 MethodCounters* _method_counters; 71 AccessFlags _access_flags; // Access flags 72 int _vtable_index; // vtable index of this method (see VtableIndexFlag) 73 // note: can have vtables with >2**16 elements (because of inheritance) 74 #ifdef CC_INTERP 75 int _result_index; // C++ interpreter needs for converting results to/from stack 76 #endif 77 u2 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) 78 79 // Flags 80 enum Flags { 81 _jfr_towrite = 1 << 0, 82 _caller_sensitive = 1 << 1, 83 _force_inline = 1 << 2, 84 _dont_inline = 1 << 3, 85 _hidden = 1 << 4, 86 _has_injected_profile = 1 << 5, 87 _running_emcp = 1 << 6, 88 _intrinsic_candidate = 1 << 7, 89 _reserved_stack_access = 1 << 8 90 }; 91 mutable u2 _flags; 92 93 #ifndef PRODUCT 94 int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) 95 #endif 96 // Entry point for calling both from and to the interpreter. 97 address _i2i_entry; // All-args-on-stack calling convention 98 // Adapter blob (i2c/c2i) for this Method*. Set once when method is linked. 99 AdapterHandlerEntry* _adapter; 100 // Entry point for calling from compiled code, to compiled code if it exists 101 // or else the interpreter. 102 volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry() 103 // The entry point for calling both from and to compiled code is 104 // "_code->entry_point()". Because of tiered compilation and de-opt, this 105 // field can come and go. It can transition from NULL to not-null at any 106 // time (whenever a compile completes). It can transition from not-null to 107 // NULL only at safepoints (because of a de-opt). 108 nmethod* volatile _code; // Points to the corresponding piece of native code 109 volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry 110 111 // Constructor 112 Method(ConstMethod* xconst, AccessFlags access_flags); 113 public: 114 115 static Method* allocate(ClassLoaderData* loader_data, 116 int byte_code_size, 117 AccessFlags access_flags, 118 InlineTableSizes* sizes, 119 ConstMethod::MethodType method_type, 120 TRAPS); 121 122 // CDS and vtbl checking can create an empty Method to get vtbl pointer. 123 Method(){} 124 125 // The Method vtable is restored by this call when the Method is in the 126 // shared archive. See patch_klass_vtables() in metaspaceShared.cpp for 127 // all the gory details. SA, dtrace and pstack helpers distinguish metadata 128 // by their vtable. 129 void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); } 130 bool is_method() const volatile { return true; } 131 132 void restore_unshareable_info(TRAPS); 133 134 // accessors for instance variables 135 136 ConstMethod* constMethod() const { return _constMethod; } 137 void set_constMethod(ConstMethod* xconst) { _constMethod = xconst; } 138 139 140 static address make_adapters(methodHandle mh, TRAPS); 141 volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); } 142 volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); } 143 144 // access flag 145 AccessFlags access_flags() const { return _access_flags; } 146 void set_access_flags(AccessFlags flags) { _access_flags = flags; } 147 148 // name 149 Symbol* name() const { return constants()->symbol_at(name_index()); } 150 int name_index() const { return constMethod()->name_index(); } 151 void set_name_index(int index) { constMethod()->set_name_index(index); } 152 153 // signature 154 Symbol* signature() const { return constants()->symbol_at(signature_index()); } 155 int signature_index() const { return constMethod()->signature_index(); } 156 void set_signature_index(int index) { constMethod()->set_signature_index(index); } 157 158 // generics support 159 Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); } 160 int generic_signature_index() const { return constMethod()->generic_signature_index(); } 161 void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); } 162 163 // annotations support 164 AnnotationArray* annotations() const { 165 return constMethod()->method_annotations(); 166 } 167 AnnotationArray* parameter_annotations() const { 168 return constMethod()->parameter_annotations(); 169 } 170 AnnotationArray* annotation_default() const { 171 return constMethod()->default_annotations(); 172 } 173 AnnotationArray* type_annotations() const { 174 return constMethod()->type_annotations(); 175 } 176 177 #ifdef CC_INTERP 178 void set_result_index(BasicType type); 179 int result_index() { return _result_index; } 180 #endif 181 182 // Helper routine: get klass name + "." + method name + signature as 183 // C string, for the purpose of providing more useful NoSuchMethodErrors 184 // and fatal error handling. The string is allocated in resource 185 // area if a buffer is not provided by the caller. 186 char* name_and_sig_as_C_string() const; 187 char* name_and_sig_as_C_string(char* buf, int size) const; 188 189 // Static routine in the situations we don't have a Method* 190 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature); 191 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size); 192 193 Bytecodes::Code java_code_at(int bci) const { 194 return Bytecodes::java_code_at(this, bcp_from(bci)); 195 } 196 Bytecodes::Code code_at(int bci) const { 197 return Bytecodes::code_at(this, bcp_from(bci)); 198 } 199 200 // JVMTI breakpoints 201 Bytecodes::Code orig_bytecode_at(int bci) const; 202 void set_orig_bytecode_at(int bci, Bytecodes::Code code); 203 void set_breakpoint(int bci); 204 void clear_breakpoint(int bci); 205 void clear_all_breakpoints(); 206 // Tracking number of breakpoints, for fullspeed debugging. 207 // Only mutated by VM thread. 208 u2 number_of_breakpoints() const { 209 MethodCounters* mcs = method_counters(); 210 if (mcs == NULL) { 211 return 0; 212 } else { 213 return mcs->number_of_breakpoints(); 214 } 215 } 216 void incr_number_of_breakpoints(TRAPS) { 217 MethodCounters* mcs = get_method_counters(CHECK); 218 if (mcs != NULL) { 219 mcs->incr_number_of_breakpoints(); 220 } 221 } 222 void decr_number_of_breakpoints(TRAPS) { 223 MethodCounters* mcs = get_method_counters(CHECK); 224 if (mcs != NULL) { 225 mcs->decr_number_of_breakpoints(); 226 } 227 } 228 // Initialization only 229 void clear_number_of_breakpoints() { 230 MethodCounters* mcs = method_counters(); 231 if (mcs != NULL) { 232 mcs->clear_number_of_breakpoints(); 233 } 234 } 235 236 // index into InstanceKlass methods() array 237 // note: also used by jfr 238 u2 method_idnum() const { return constMethod()->method_idnum(); } 239 void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); } 240 241 u2 orig_method_idnum() const { return constMethod()->orig_method_idnum(); } 242 void set_orig_method_idnum(u2 idnum) { constMethod()->set_orig_method_idnum(idnum); } 243 244 // code size 245 int code_size() const { return constMethod()->code_size(); } 246 247 // method size in words 248 int method_size() const { return sizeof(Method)/wordSize + is_native() ? 2 : 0; } 249 250 // constant pool for Klass* holding this method 251 ConstantPool* constants() const { return constMethod()->constants(); } 252 void set_constants(ConstantPool* c) { constMethod()->set_constants(c); } 253 254 // max stack 255 // return original max stack size for method verification 256 int verifier_max_stack() const { return constMethod()->max_stack(); } 257 int max_stack() const { return constMethod()->max_stack() + extra_stack_entries(); } 258 void set_max_stack(int size) { constMethod()->set_max_stack(size); } 259 260 // max locals 261 int max_locals() const { return constMethod()->max_locals(); } 262 void set_max_locals(int size) { constMethod()->set_max_locals(size); } 263 264 int highest_comp_level() const; 265 void set_highest_comp_level(int level); 266 int highest_osr_comp_level() const; 267 void set_highest_osr_comp_level(int level); 268 269 // Count of times method was exited via exception while interpreting 270 void interpreter_throwout_increment(TRAPS) { 271 MethodCounters* mcs = get_method_counters(CHECK); 272 if (mcs != NULL) { 273 mcs->interpreter_throwout_increment(); 274 } 275 } 276 277 int interpreter_throwout_count() const { 278 MethodCounters* mcs = method_counters(); 279 if (mcs == NULL) { 280 return 0; 281 } else { 282 return mcs->interpreter_throwout_count(); 283 } 284 } 285 286 // size of parameters 287 int size_of_parameters() const { return constMethod()->size_of_parameters(); } 288 void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); } 289 290 bool has_stackmap_table() const { 291 return constMethod()->has_stackmap_table(); 292 } 293 294 Array<u1>* stackmap_data() const { 295 return constMethod()->stackmap_data(); 296 } 297 298 void set_stackmap_data(Array<u1>* sd) { 299 constMethod()->set_stackmap_data(sd); 300 } 301 302 // exception handler table 303 bool has_exception_handler() const 304 { return constMethod()->has_exception_handler(); } 305 int exception_table_length() const 306 { return constMethod()->exception_table_length(); } 307 ExceptionTableElement* exception_table_start() const 308 { return constMethod()->exception_table_start(); } 309 310 // Finds the first entry point bci of an exception handler for an 311 // exception of klass ex_klass thrown at throw_bci. A value of NULL 312 // for ex_klass indicates that the exception klass is not known; in 313 // this case it matches any constraint class. Returns -1 if the 314 // exception cannot be handled in this method. The handler 315 // constraint classes are loaded if necessary. Note that this may 316 // throw an exception if loading of the constraint classes causes 317 // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError. 318 // If an exception is thrown, returns the bci of the 319 // exception handler which caused the exception to be thrown, which 320 // is needed for proper retries. See, for example, 321 // InterpreterRuntime::exception_handler_for_exception. 322 static int fast_exception_handler_bci_for(methodHandle mh, KlassHandle ex_klass, int throw_bci, TRAPS); 323 324 #if defined(COMPILER2) || INCLUDE_JVMCI 325 // method data access 326 MethodData* method_data() const { 327 return _method_data; 328 } 329 330 void set_method_data(MethodData* data) { 331 // The store into method must be released. On platforms without 332 // total store order (TSO) the reference may become visible before 333 // the initialization of data otherwise. 334 OrderAccess::release_store_ptr((volatile void *)&_method_data, data); 335 } 336 #else 337 MethodData* method_data() const { return NULL; } 338 void set_method_data(MethodData* data) { } 339 #endif 340 341 MethodCounters* method_counters() const { 342 return _method_counters; 343 } 344 345 void clear_method_counters() { 346 _method_counters = NULL; 347 } 348 349 bool init_method_counters(MethodCounters* counters) { 350 // Try to install a pointer to MethodCounters, return true on success. 351 return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL; 352 } 353 354 #ifdef TIERED 355 // We are reusing interpreter_invocation_count as a holder for the previous event count! 356 // We can do that since interpreter_invocation_count is not used in tiered. 357 int prev_event_count() const { 358 if (method_counters() == NULL) { 359 return 0; 360 } else { 361 return method_counters()->interpreter_invocation_count(); 362 } 363 } 364 void set_prev_event_count(int count) { 365 MethodCounters* mcs = method_counters(); 366 if (mcs != NULL) { 367 mcs->set_interpreter_invocation_count(count); 368 } 369 } 370 jlong prev_time() const { 371 MethodCounters* mcs = method_counters(); 372 return mcs == NULL ? 0 : mcs->prev_time(); 373 } 374 void set_prev_time(jlong time) { 375 MethodCounters* mcs = method_counters(); 376 if (mcs != NULL) { 377 mcs->set_prev_time(time); 378 } 379 } 380 float rate() const { 381 MethodCounters* mcs = method_counters(); 382 return mcs == NULL ? 0 : mcs->rate(); 383 } 384 void set_rate(float rate) { 385 MethodCounters* mcs = method_counters(); 386 if (mcs != NULL) { 387 mcs->set_rate(rate); 388 } 389 } 390 #endif 391 int nmethod_age() const { 392 if (method_counters() == NULL) { 393 return INT_MAX; 394 } else { 395 return method_counters()->nmethod_age(); 396 } 397 } 398 399 int invocation_count(); 400 int backedge_count(); 401 402 bool was_executed_more_than(int n); 403 bool was_never_executed() { return !was_executed_more_than(0); } 404 405 static void build_interpreter_method_data(const methodHandle& method, TRAPS); 406 407 static MethodCounters* build_method_counters(Method* m, TRAPS); 408 409 int interpreter_invocation_count() { 410 if (TieredCompilation) { 411 return invocation_count(); 412 } else { 413 MethodCounters* mcs = method_counters(); 414 return (mcs == NULL) ? 0 : mcs->interpreter_invocation_count(); 415 } 416 } 417 int increment_interpreter_invocation_count(TRAPS) { 418 if (TieredCompilation) ShouldNotReachHere(); 419 MethodCounters* mcs = get_method_counters(CHECK_0); 420 return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count(); 421 } 422 423 #ifndef PRODUCT 424 int compiled_invocation_count() const { return _compiled_invocation_count; } 425 void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; } 426 #else 427 // for PrintMethodData in a product build 428 int compiled_invocation_count() const { return 0; } 429 #endif // not PRODUCT 430 431 // Clear (non-shared space) pointers which could not be relevant 432 // if this (shared) method were mapped into another JVM. 433 void remove_unshareable_info(); 434 435 // nmethod/verified compiler entry 436 address verified_code_entry(); 437 bool check_code() const; // Not inline to avoid circular ref 438 nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); } 439 void clear_code(); // Clear out any compiled code 440 static void set_code(methodHandle mh, nmethod* code); 441 void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; } 442 address get_i2c_entry(); 443 address get_c2i_entry(); 444 address get_c2i_unverified_entry(); 445 AdapterHandlerEntry* adapter() { return _adapter; } 446 // setup entry points 447 void link_method(const methodHandle& method, TRAPS); 448 // clear entry points. Used by sharing code 449 void unlink_method(); 450 451 // vtable index 452 enum VtableIndexFlag { 453 // Valid vtable indexes are non-negative (>= 0). 454 // These few negative values are used as sentinels. 455 itable_index_max = -10, // first itable index, growing downward 456 pending_itable_index = -9, // itable index will be assigned 457 invalid_vtable_index = -4, // distinct from any valid vtable index 458 garbage_vtable_index = -3, // not yet linked; no vtable layout yet 459 nonvirtual_vtable_index = -2 // there is no need for vtable dispatch 460 // 6330203 Note: Do not use -1, which was overloaded with many meanings. 461 }; 462 DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; }) 463 bool has_vtable_index() const { return _vtable_index >= 0; } 464 int vtable_index() const { return _vtable_index; } 465 void set_vtable_index(int index) { _vtable_index = index; } 466 DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; }) 467 bool has_itable_index() const { return _vtable_index <= itable_index_max; } 468 int itable_index() const { assert(valid_itable_index(), ""); 469 return itable_index_max - _vtable_index; } 470 void set_itable_index(int index) { _vtable_index = itable_index_max - index; assert(valid_itable_index(), ""); } 471 472 // interpreter entry 473 address interpreter_entry() const { return _i2i_entry; } 474 // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry 475 void set_interpreter_entry(address entry) { _i2i_entry = entry; _from_interpreted_entry = entry; } 476 477 // native function (used for native methods only) 478 enum { 479 native_bind_event_is_interesting = true 480 }; 481 address native_function() const { return *(native_function_addr()); } 482 address critical_native_function(); 483 484 // Must specify a real function (not NULL). 485 // Use clear_native_function() to unregister. 486 void set_native_function(address function, bool post_event_flag); 487 bool has_native_function() const; 488 void clear_native_function(); 489 490 // signature handler (used for native methods only) 491 address signature_handler() const { return *(signature_handler_addr()); } 492 void set_signature_handler(address handler); 493 494 // Interpreter oopmap support 495 void mask_for(int bci, InterpreterOopMap* mask); 496 497 // operations on invocation counter 498 void print_invocation_count(); 499 500 // byte codes 501 void set_code(address code) { return constMethod()->set_code(code); } 502 address code_base() const { return constMethod()->code_base(); } 503 bool contains(address bcp) const { return constMethod()->contains(bcp); } 504 505 // prints byte codes 506 void print_codes() const { print_codes_on(tty); } 507 void print_codes_on(outputStream* st) const; 508 void print_codes_on(int from, int to, outputStream* st) const; 509 510 // method parameters 511 bool has_method_parameters() const 512 { return constMethod()->has_method_parameters(); } 513 int method_parameters_length() const 514 { return constMethod()->method_parameters_length(); } 515 MethodParametersElement* method_parameters_start() const 516 { return constMethod()->method_parameters_start(); } 517 518 // checked exceptions 519 int checked_exceptions_length() const 520 { return constMethod()->checked_exceptions_length(); } 521 CheckedExceptionElement* checked_exceptions_start() const 522 { return constMethod()->checked_exceptions_start(); } 523 524 // localvariable table 525 bool has_localvariable_table() const 526 { return constMethod()->has_localvariable_table(); } 527 int localvariable_table_length() const 528 { return constMethod()->localvariable_table_length(); } 529 LocalVariableTableElement* localvariable_table_start() const 530 { return constMethod()->localvariable_table_start(); } 531 532 bool has_linenumber_table() const 533 { return constMethod()->has_linenumber_table(); } 534 u_char* compressed_linenumber_table() const 535 { return constMethod()->compressed_linenumber_table(); } 536 537 // method holder (the Klass* holding this method) 538 InstanceKlass* method_holder() const { return constants()->pool_holder(); } 539 540 void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments) 541 Symbol* klass_name() const; // returns the name of the method holder 542 BasicType result_type() const; // type of the method result 543 int result_type_index() const; // type index of the method result 544 bool is_returning_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); } 545 bool is_returning_fp() const { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); } 546 547 // Checked exceptions thrown by this method (resolved to mirrors) 548 objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); } 549 550 // Access flags 551 bool is_public() const { return access_flags().is_public(); } 552 bool is_private() const { return access_flags().is_private(); } 553 bool is_protected() const { return access_flags().is_protected(); } 554 bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); } 555 bool is_static() const { return access_flags().is_static(); } 556 bool is_final() const { return access_flags().is_final(); } 557 bool is_synchronized() const { return access_flags().is_synchronized();} 558 bool is_native() const { return access_flags().is_native(); } 559 bool is_abstract() const { return access_flags().is_abstract(); } 560 bool is_strict() const { return access_flags().is_strict(); } 561 bool is_synthetic() const { return access_flags().is_synthetic(); } 562 563 // returns true if contains only return operation 564 bool is_empty_method() const; 565 566 // returns true if this is a vanilla constructor 567 bool is_vanilla_constructor() const; 568 569 // checks method and its method holder 570 bool is_final_method() const; 571 bool is_final_method(AccessFlags class_access_flags) const; 572 bool is_default_method() const; 573 574 // true if method needs no dynamic dispatch (final and/or no vtable entry) 575 bool can_be_statically_bound() const; 576 bool can_be_statically_bound(AccessFlags class_access_flags) const; 577 578 // returns true if the method has any backward branches. 579 bool has_loops() { 580 return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag(); 581 }; 582 583 bool compute_has_loops_flag(); 584 585 bool has_jsrs() { 586 return access_flags().has_jsrs(); 587 }; 588 void set_has_jsrs() { 589 _access_flags.set_has_jsrs(); 590 } 591 592 // returns true if the method has any monitors. 593 bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); } 594 bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); } 595 596 void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); } 597 598 // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes 599 // propererly nest in the method. It might return false, even though they actually nest properly, since the info. 600 // has not been computed yet. 601 bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); } 602 void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); } 603 604 // returns true if the method is an accessor function (setter/getter). 605 bool is_accessor() const; 606 607 // returns true if the method is a getter 608 bool is_getter() const; 609 610 // returns true if the method is a setter 611 bool is_setter() const; 612 613 // returns true if the method does nothing but return a constant of primitive type 614 bool is_constant_getter() const; 615 616 // returns true if the method is an initializer (<init> or <clinit>). 617 bool is_initializer() const; 618 619 // returns true if the method is static OR if the classfile version < 51 620 bool has_valid_initializer_flags() const; 621 622 // returns true if the method name is <clinit> and the method has 623 // valid static initializer flags. 624 bool is_static_initializer() const; 625 626 // compiled code support 627 // NOTE: code() is inherently racy as deopt can be clearing code 628 // simultaneously. Use with caution. 629 bool has_compiled_code() const { return code() != NULL; } 630 631 // sizing 632 static int header_size() { return sizeof(Method)/wordSize; } 633 static int size(bool is_native); 634 int size() const { return method_size(); } 635 #if INCLUDE_SERVICES 636 void collect_statistics(KlassSizeStats *sz) const; 637 #endif 638 void log_touched(TRAPS); 639 static void print_touched_methods(outputStream* out); 640 641 // interpreter support 642 static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); } 643 static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); } 644 #ifdef CC_INTERP 645 static ByteSize result_index_offset() { return byte_offset_of(Method, _result_index ); } 646 #endif /* CC_INTERP */ 647 static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); } 648 static ByteSize code_offset() { return byte_offset_of(Method, _code); } 649 #if defined(COMPILER2) || INCLUDE_JVMCI 650 static ByteSize method_data_offset() { 651 return byte_offset_of(Method, _method_data); 652 } 653 #else 654 static ByteSize method_data_offset() { 655 ShouldNotReachHere(); 656 return in_ByteSize(0); 657 } 658 #endif 659 static ByteSize method_counters_offset() { 660 return byte_offset_of(Method, _method_counters); 661 } 662 #ifndef PRODUCT 663 static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); } 664 #endif // not PRODUCT 665 static ByteSize native_function_offset() { return in_ByteSize(sizeof(Method)); } 666 static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); } 667 static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); } 668 static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); } 669 670 // for code generation 671 #if defined(COMPILER2) || INCLUDE_JVMCI 672 static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); } 673 #else 674 static int method_data_offset_in_bytes() { ShouldNotReachHere(); return 0; } 675 #endif 676 static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); } 677 static int intrinsic_id_size_in_bytes() { return sizeof(u2); } 678 679 // Static methods that are used to implement member methods where an exposed this pointer 680 // is needed due to possible GCs 681 static objArrayHandle resolved_checked_exceptions_impl(Method* method, TRAPS); 682 683 // Returns the byte code index from the byte code pointer 684 int bci_from(address bcp) const; 685 address bcp_from(int bci) const; 686 address bcp_from(address bcp) const; 687 int validate_bci_from_bcp(address bcp) const; 688 int validate_bci(int bci) const; 689 690 // Returns the line number for a bci if debugging information for the method is prowided, 691 // -1 is returned otherwise. 692 int line_number_from_bci(int bci) const; 693 694 // Reflection support 695 bool is_overridden_in(Klass* k) const; 696 697 // Stack walking support 698 bool is_ignored_by_security_stack_walk() const; 699 700 // JSR 292 support 701 bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id) 702 bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm 703 bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc. 704 static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual 705 Symbol* signature, //anything at all 706 TRAPS); 707 static Klass* check_non_bcp_klass(Klass* klass); 708 709 enum { 710 // How many extra stack entries for invokedynamic 711 extra_stack_entries_for_jsr292 = 1 712 }; 713 714 // this operates only on invoke methods: 715 // presize interpreter frames for extra interpreter stack entries, if needed 716 // Account for the extra appendix argument for invokehandle/invokedynamic 717 static int extra_stack_entries() { return extra_stack_entries_for_jsr292; } 718 static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize 719 720 // RedefineClasses() support: 721 bool is_old() const { return access_flags().is_old(); } 722 void set_is_old() { _access_flags.set_is_old(); } 723 bool is_obsolete() const { return access_flags().is_obsolete(); } 724 void set_is_obsolete() { _access_flags.set_is_obsolete(); } 725 bool is_deleted() const { return access_flags().is_deleted(); } 726 void set_is_deleted() { _access_flags.set_is_deleted(); } 727 728 bool is_running_emcp() const { 729 // EMCP methods are old but not obsolete or deleted. Equivalent 730 // Modulo Constant Pool means the method is equivalent except 731 // the constant pool and instructions that access the constant 732 // pool might be different. 733 // If a breakpoint is set in a redefined method, its EMCP methods that are 734 // still running must have a breakpoint also. 735 return (_flags & _running_emcp) != 0; 736 } 737 738 void set_running_emcp(bool x) { 739 _flags = x ? (_flags | _running_emcp) : (_flags & ~_running_emcp); 740 } 741 742 bool on_stack() const { return access_flags().on_stack(); } 743 void set_on_stack(const bool value); 744 745 // see the definition in Method*.cpp for the gory details 746 bool should_not_be_cached() const; 747 748 // JVMTI Native method prefixing support: 749 bool is_prefixed_native() const { return access_flags().is_prefixed_native(); } 750 void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); } 751 752 // Rewriting support 753 static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, 754 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS); 755 756 // jmethodID handling 757 // Because the useful life-span of a jmethodID cannot be determined, 758 // once created they are never reclaimed. The methods to which they refer, 759 // however, can be GC'ed away if the class is unloaded or if the method is 760 // made obsolete or deleted -- in these cases, the jmethodID 761 // refers to NULL (as is the case for any weak reference). 762 static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh); 763 static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid); 764 765 // Ensure there is enough capacity in the internal tracking data 766 // structures to hold the number of jmethodIDs you plan to generate. 767 // This saves substantial time doing allocations. 768 static void ensure_jmethod_ids(ClassLoaderData* loader_data, int capacity); 769 770 // Use resolve_jmethod_id() in situations where the caller is expected 771 // to provide a valid jmethodID; the only sanity checks are in asserts; 772 // result guaranteed not to be NULL. 773 inline static Method* resolve_jmethod_id(jmethodID mid) { 774 assert(mid != NULL, "JNI method id should not be null"); 775 return *((Method**)mid); 776 } 777 778 // Use checked_resolve_jmethod_id() in situations where the caller 779 // should provide a valid jmethodID, but might not. NULL is returned 780 // when the jmethodID does not refer to a valid method. 781 static Method* checked_resolve_jmethod_id(jmethodID mid); 782 783 static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method); 784 static bool is_method_id(jmethodID mid); 785 786 // Clear methods 787 static void clear_jmethod_ids(ClassLoaderData* loader_data); 788 static void print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN; 789 790 // Get this method's jmethodID -- allocate if it doesn't exist 791 jmethodID jmethod_id() { methodHandle this_h(this); 792 return InstanceKlass::get_jmethod_id(method_holder(), this_h); } 793 794 // Lookup the jmethodID for this method. Return NULL if not found. 795 // NOTE that this function can be called from a signal handler 796 // (see AsyncGetCallTrace support for Forte Analyzer) and this 797 // needs to be async-safe. No allocation should be done and 798 // so handles are not used to avoid deadlock. 799 jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); } 800 801 // Support for inlining of intrinsic methods 802 vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; } 803 void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u2) id; } 804 805 // Helper routines for intrinsic_id() and vmIntrinsics::method(). 806 void init_intrinsic_id(); // updates from _none if a match 807 static vmSymbols::SID klass_id_for_intrinsics(const Klass* holder); 808 809 bool jfr_towrite() const { 810 return (_flags & _jfr_towrite) != 0; 811 } 812 void set_jfr_towrite(bool x) const { 813 _flags = x ? (_flags | _jfr_towrite) : (_flags & ~_jfr_towrite); 814 } 815 816 bool caller_sensitive() { 817 return (_flags & _caller_sensitive) != 0; 818 } 819 void set_caller_sensitive(bool x) { 820 _flags = x ? (_flags | _caller_sensitive) : (_flags & ~_caller_sensitive); 821 } 822 823 bool force_inline() { 824 return (_flags & _force_inline) != 0; 825 } 826 void set_force_inline(bool x) { 827 _flags = x ? (_flags | _force_inline) : (_flags & ~_force_inline); 828 } 829 830 bool dont_inline() { 831 return (_flags & _dont_inline) != 0; 832 } 833 void set_dont_inline(bool x) { 834 _flags = x ? (_flags | _dont_inline) : (_flags & ~_dont_inline); 835 } 836 837 bool is_hidden() { 838 return (_flags & _hidden) != 0; 839 } 840 void set_hidden(bool x) { 841 _flags = x ? (_flags | _hidden) : (_flags & ~_hidden); 842 } 843 844 bool intrinsic_candidate() { 845 return (_flags & _intrinsic_candidate) != 0; 846 } 847 void set_intrinsic_candidate(bool x) { 848 _flags = x ? (_flags | _intrinsic_candidate) : (_flags & ~_intrinsic_candidate); 849 } 850 851 bool has_injected_profile() { 852 return (_flags & _has_injected_profile) != 0; 853 } 854 void set_has_injected_profile(bool x) { 855 _flags = x ? (_flags | _has_injected_profile) : (_flags & ~_has_injected_profile); 856 } 857 858 bool has_reserved_stack_access() { 859 return (_flags & _reserved_stack_access) != 0; 860 } 861 862 void set_has_reserved_stack_access(bool x) { 863 _flags = x ? (_flags | _reserved_stack_access) : (_flags & ~_reserved_stack_access); 864 } 865 866 ConstMethod::MethodType method_type() const { 867 return _constMethod->method_type(); 868 } 869 bool is_overpass() const { return method_type() == ConstMethod::OVERPASS; } 870 871 // On-stack replacement support 872 bool has_osr_nmethod(int level, bool match_level) { 873 return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL; 874 } 875 876 int mark_osr_nmethods() { 877 return method_holder()->mark_osr_nmethods(this); 878 } 879 880 nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) { 881 return method_holder()->lookup_osr_nmethod(this, bci, level, match_level); 882 } 883 884 // Inline cache support 885 void cleanup_inline_caches(); 886 887 // Find if klass for method is loaded 888 bool is_klass_loaded_by_klass_index(int klass_index) const; 889 bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const; 890 891 // Indicates whether compilation failed earlier for this method, or 892 // whether it is not compilable for another reason like having a 893 // breakpoint set in it. 894 bool is_not_compilable(int comp_level = CompLevel_any) const; 895 void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 896 void set_not_compilable_quietly(int comp_level = CompLevel_all) { 897 set_not_compilable(comp_level, false); 898 } 899 bool is_not_osr_compilable(int comp_level = CompLevel_any) const; 900 void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 901 void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) { 902 set_not_osr_compilable(comp_level, false); 903 } 904 bool is_always_compilable() const; 905 906 private: 907 void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); 908 909 public: 910 MethodCounters* get_method_counters(TRAPS) { 911 if (_method_counters == NULL) { 912 build_method_counters(this, CHECK_AND_CLEAR_NULL); 913 } 914 return _method_counters; 915 } 916 917 bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); } 918 void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); } 919 void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); } 920 bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); } 921 void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); } 922 void clear_not_c2_compilable() { _access_flags.clear_not_c2_compilable(); } 923 924 bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit 925 void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit 926 void clear_not_c1_osr_compilable() { clear_not_c1_compilable(); } // don't waste an accessFlags bit 927 bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); } 928 void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); } 929 void clear_not_c2_osr_compilable() { _access_flags.clear_not_c2_osr_compilable(); } 930 931 // Background compilation support 932 bool queued_for_compilation() const { return access_flags().queued_for_compilation(); } 933 void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); } 934 void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); } 935 936 // Resolve all classes in signature, return 'true' if successful 937 static bool load_signature_classes(methodHandle m, TRAPS); 938 939 // Return if true if not all classes references in signature, including return type, has been loaded 940 static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS); 941 942 // Printing 943 void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM 944 #if INCLUDE_JVMTI 945 void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses 946 #else 947 void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)" 948 #endif 949 950 // Helper routine used for method sorting 951 static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true); 952 953 // Deallocation function for redefine classes or if an error occurs 954 void deallocate_contents(ClassLoaderData* loader_data); 955 956 // Printing 957 #ifndef PRODUCT 958 void print_on(outputStream* st) const; 959 #endif 960 void print_value_on(outputStream* st) const; 961 void print_linkage_flags(outputStream* st) PRODUCT_RETURN; 962 963 const char* internal_name() const { return "{method}"; } 964 965 // Check for valid method pointer 966 static bool has_method_vptr(const void* ptr); 967 bool is_valid_method() const; 968 969 // Verify 970 void verify() { verify_on(tty); } 971 void verify_on(outputStream* st); 972 973 private: 974 975 // Inlined elements 976 address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); } 977 address* signature_handler_addr() const { return native_function_addr() + 1; } 978 }; 979 980 981 // Utility class for compressing line number tables 982 983 class CompressedLineNumberWriteStream: public CompressedWriteStream { 984 private: 985 int _bci; 986 int _line; 987 public: 988 // Constructor 989 CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {} 990 CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {} 991 992 // Write (bci, line number) pair to stream 993 void write_pair_regular(int bci_delta, int line_delta); 994 995 inline void write_pair_inline(int bci, int line) { 996 int bci_delta = bci - _bci; 997 int line_delta = line - _line; 998 _bci = bci; 999 _line = line; 1000 // Skip (0,0) deltas - they do not add information and conflict with terminator. 1001 if (bci_delta == 0 && line_delta == 0) return; 1002 // Check if bci is 5-bit and line number 3-bit unsigned. 1003 if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) { 1004 // Compress into single byte. 1005 jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta; 1006 // Check that value doesn't match escape character. 1007 if (value != 0xFF) { 1008 write_byte(value); 1009 return; 1010 } 1011 } 1012 write_pair_regular(bci_delta, line_delta); 1013 } 1014 1015 // Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair. 1016 // Disabling optimization doesn't work for methods in header files 1017 // so we force it to call through the non-optimized version in the .cpp. 1018 // It's gross, but it's the only way we can ensure that all callers are 1019 // fixed. _MSC_VER is defined by the windows compiler 1020 #if defined(_M_AMD64) && _MSC_VER >= 1400 1021 void write_pair(int bci, int line); 1022 #else 1023 void write_pair(int bci, int line) { write_pair_inline(bci, line); } 1024 #endif 1025 1026 // Write end-of-stream marker 1027 void write_terminator() { write_byte(0); } 1028 }; 1029 1030 1031 // Utility class for decompressing line number tables 1032 1033 class CompressedLineNumberReadStream: public CompressedReadStream { 1034 private: 1035 int _bci; 1036 int _line; 1037 public: 1038 // Constructor 1039 CompressedLineNumberReadStream(u_char* buffer); 1040 // Read (bci, line number) pair from stream. Returns false at end-of-stream. 1041 bool read_pair(); 1042 // Accessing bci and line number (after calling read_pair) 1043 int bci() const { return _bci; } 1044 int line() const { return _line; } 1045 }; 1046 1047 1048 /// Fast Breakpoints. 1049 1050 // If this structure gets more complicated (because bpts get numerous), 1051 // move it into its own header. 1052 1053 // There is presently no provision for concurrent access 1054 // to breakpoint lists, which is only OK for JVMTI because 1055 // breakpoints are written only at safepoints, and are read 1056 // concurrently only outside of safepoints. 1057 1058 class BreakpointInfo : public CHeapObj<mtClass> { 1059 friend class VMStructs; 1060 private: 1061 Bytecodes::Code _orig_bytecode; 1062 int _bci; 1063 u2 _name_index; // of method 1064 u2 _signature_index; // of method 1065 BreakpointInfo* _next; // simple storage allocation 1066 1067 public: 1068 BreakpointInfo(Method* m, int bci); 1069 1070 // accessors 1071 Bytecodes::Code orig_bytecode() { return _orig_bytecode; } 1072 void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; } 1073 int bci() { return _bci; } 1074 1075 BreakpointInfo* next() const { return _next; } 1076 void set_next(BreakpointInfo* n) { _next = n; } 1077 1078 // helps for searchers 1079 bool match(const Method* m, int bci) { 1080 return bci == _bci && match(m); 1081 } 1082 1083 bool match(const Method* m) { 1084 return _name_index == m->name_index() && 1085 _signature_index == m->signature_index(); 1086 } 1087 1088 void set(Method* method); 1089 void clear(Method* method); 1090 }; 1091 1092 // Utility class for access exception handlers 1093 class ExceptionTable : public StackObj { 1094 private: 1095 ExceptionTableElement* _table; 1096 u2 _length; 1097 1098 public: 1099 ExceptionTable(const Method* m) { 1100 if (m->has_exception_handler()) { 1101 _table = m->exception_table_start(); 1102 _length = m->exception_table_length(); 1103 } else { 1104 _table = NULL; 1105 _length = 0; 1106 } 1107 } 1108 1109 int length() const { 1110 return _length; 1111 } 1112 1113 u2 start_pc(int idx) const { 1114 assert(idx < _length, "out of bounds"); 1115 return _table[idx].start_pc; 1116 } 1117 1118 void set_start_pc(int idx, u2 value) { 1119 assert(idx < _length, "out of bounds"); 1120 _table[idx].start_pc = value; 1121 } 1122 1123 u2 end_pc(int idx) const { 1124 assert(idx < _length, "out of bounds"); 1125 return _table[idx].end_pc; 1126 } 1127 1128 void set_end_pc(int idx, u2 value) { 1129 assert(idx < _length, "out of bounds"); 1130 _table[idx].end_pc = value; 1131 } 1132 1133 u2 handler_pc(int idx) const { 1134 assert(idx < _length, "out of bounds"); 1135 return _table[idx].handler_pc; 1136 } 1137 1138 void set_handler_pc(int idx, u2 value) { 1139 assert(idx < _length, "out of bounds"); 1140 _table[idx].handler_pc = value; 1141 } 1142 1143 u2 catch_type_index(int idx) const { 1144 assert(idx < _length, "out of bounds"); 1145 return _table[idx].catch_type_index; 1146 } 1147 1148 void set_catch_type_index(int idx, u2 value) { 1149 assert(idx < _length, "out of bounds"); 1150 _table[idx].catch_type_index = value; 1151 } 1152 }; 1153 1154 #endif // SHARE_VM_OOPS_METHODOOP_HPP