1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_METHODOOP_HPP 26 #define SHARE_VM_OOPS_METHODOOP_HPP 27 28 #include "classfile/vmSymbols.hpp" 29 #include "code/compressedStream.hpp" 30 #include "compiler/oopMap.hpp" 31 #include "interpreter/invocationCounter.hpp" 32 #include "oops/annotations.hpp" 33 #include "oops/constantPool.hpp" 34 #include "oops/methodCounters.hpp" 35 #include "oops/instanceKlass.hpp" 36 #include "oops/oop.hpp" 37 #include "oops/typeArrayOop.hpp" 38 #include "utilities/accessFlags.hpp" 39 #include "utilities/growableArray.hpp" 40 41 // A Method* represents a Java method. 42 // 43 // Memory layout (each line represents a word). Note that most applications load thousands of methods, 44 // so keeping the size of this structure small has a big impact on footprint. 45 // 46 // We put all oops and method_size first for better gc cache locality. 47 // 48 // The actual bytecodes are inlined after the end of the Method struct. 49 // 50 // There are bits in the access_flags telling whether inlined tables are present. 51 // Note that accessing the line number and local variable tables is not performance critical at all. 52 // Accessing the checked exceptions table is used by reflection, so we put that last to make access 53 // to it fast. 54 // 55 // The line number table is compressed and inlined following the byte codes. It is found as the first 56 // byte following the byte codes. The checked exceptions table and the local variable table are inlined 57 // after the line number table, and indexed from the end of the method. We do not compress the checked 58 // exceptions table since the average length is less than 2, and do not bother to compress the local 59 // variable table either since it is mostly absent. 60 // 61 // Note that native_function and signature_handler has to be at fixed offsets (required by the interpreter) 62 // 63 // |------------------------------------------------------| 64 // | header | 65 // | klass | 66 // |------------------------------------------------------| 67 // | ConstMethod* (oop) | 68 // |------------------------------------------------------| 69 // | methodData (oop) | 70 // | methodCounters | 71 // |------------------------------------------------------| 72 // | access_flags | 73 // | vtable_index | 74 // |------------------------------------------------------| 75 // | result_index (C++ interpreter only) | 76 // |------------------------------------------------------| 77 // | method_size | intrinsic_id| flags | 78 // |------------------------------------------------------| 79 // | code (pointer) | 80 // | i2i (pointer) | 81 // | adapter (pointer) | 82 // | from_compiled_entry (pointer) | 83 // | from_interpreted_entry (pointer) | 84 // |------------------------------------------------------| 85 // | native_function (present only if native) | 86 // | signature_handler (present only if native) | 87 // |------------------------------------------------------| 88 89 90 class CheckedExceptionElement; 91 class LocalVariableTableElement; 92 class AdapterHandlerEntry; 93 class MethodData; 94 class MethodCounters; 95 class ConstMethod; 96 class InlineTableSizes; 97 class KlassSizeStats; 98 99 class Method : public Metadata { 100 friend class VMStructs; 101 private: 102 ConstMethod* _constMethod; // Method read-only data. 103 MethodData* _method_data; 104 MethodCounters* _method_counters; 105 AccessFlags _access_flags; // Access flags 106 int _vtable_index; // vtable index of this method (see VtableIndexFlag) 107 // note: can have vtables with >2**16 elements (because of inheritance) 108 u2 _method_size; // size of this object 109 u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) 110 u1 _jfr_towrite : 1, // Flags 111 _caller_sensitive : 1, 112 _force_inline : 1, 113 _hidden : 1, 114 _running_emcp : 1, 115 _dont_inline : 1, 116 _has_injected_profile : 1, 117 : 2; 118 119 TRACE_DEFINE_FLAG; 120 121 #ifndef PRODUCT 122 int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) 123 #endif 124 // Entry point for calling both from and to the interpreter. 125 address _i2i_entry; // All-args-on-stack calling convention 126 // Adapter blob (i2c/c2i) for this Method*. Set once when method is linked. 127 AdapterHandlerEntry* _adapter; 128 // Entry point for calling from compiled code, to compiled code if it exists 129 // or else the interpreter. 130 volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry() 131 // The entry point for calling both from and to compiled code is 132 // "_code->entry_point()". Because of tiered compilation and de-opt, this 133 // field can come and go. It can transition from NULL to not-null at any 134 // time (whenever a compile completes). It can transition from not-null to 135 // NULL only at safepoints (because of a de-opt). 136 nmethod* volatile _code; // Points to the corresponding piece of native code 137 volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry 138 139 // Constructor 140 Method(ConstMethod* xconst, AccessFlags access_flags, int size); 141 public: 142 143 static Method* allocate(ClassLoaderData* loader_data, 144 int byte_code_size, 145 AccessFlags access_flags, 146 InlineTableSizes* sizes, 147 ConstMethod::MethodType method_type, 148 TRAPS); 149 150 // CDS and vtbl checking can create an empty Method to get vtbl pointer. 151 Method(){} 152 153 // The Method vtable is restored by this call when the Method is in the 154 // shared archive. See patch_klass_vtables() in metaspaceShared.cpp for 155 // all the gory details. SA, dtrace and pstack helpers distinguish metadata 156 // by their vtable. 157 void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); } 158 bool is_method() const volatile { return true; } 159 160 void restore_unshareable_info(TRAPS); 161 162 // accessors for instance variables 163 164 ConstMethod* constMethod() const { return _constMethod; } 165 void set_constMethod(ConstMethod* xconst) { _constMethod = xconst; } 166 167 168 static address make_adapters(methodHandle mh, TRAPS); 169 volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); } 170 volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); } 171 172 // access flag 173 AccessFlags access_flags() const { return _access_flags; } 174 void set_access_flags(AccessFlags flags) { _access_flags = flags; } 175 176 // name 177 Symbol* name() const { return constants()->symbol_at(name_index()); } 178 int name_index() const { return constMethod()->name_index(); } 179 void set_name_index(int index) { constMethod()->set_name_index(index); } 180 181 // signature 182 Symbol* signature() const { return constants()->symbol_at(signature_index()); } 183 int signature_index() const { return constMethod()->signature_index(); } 184 void set_signature_index(int index) { constMethod()->set_signature_index(index); } 185 186 // generics support 187 Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); } 188 int generic_signature_index() const { return constMethod()->generic_signature_index(); } 189 void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); } 190 191 // annotations support 192 AnnotationArray* annotations() const { 193 return constMethod()->method_annotations(); 194 } 195 AnnotationArray* parameter_annotations() const { 196 return constMethod()->parameter_annotations(); 197 } 198 AnnotationArray* annotation_default() const { 199 return constMethod()->default_annotations(); 200 } 201 AnnotationArray* type_annotations() const { 202 return constMethod()->type_annotations(); 203 } 204 205 // Helper routine: get klass name + "." + method name + signature as 206 // C string, for the purpose of providing more useful NoSuchMethodErrors 207 // and fatal error handling. The string is allocated in resource 208 // area if a buffer is not provided by the caller. 209 char* name_and_sig_as_C_string() const; 210 char* name_and_sig_as_C_string(char* buf, int size) const; 211 212 // Static routine in the situations we don't have a Method* 213 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature); 214 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size); 215 216 Bytecodes::Code java_code_at(int bci) const { 217 return Bytecodes::java_code_at(this, bcp_from(bci)); 218 } 219 Bytecodes::Code code_at(int bci) const { 220 return Bytecodes::code_at(this, bcp_from(bci)); 221 } 222 223 // JVMTI breakpoints 224 Bytecodes::Code orig_bytecode_at(int bci) const; 225 void set_orig_bytecode_at(int bci, Bytecodes::Code code); 226 void set_breakpoint(int bci); 227 void clear_breakpoint(int bci); 228 void clear_all_breakpoints(); 229 // Tracking number of breakpoints, for fullspeed debugging. 230 // Only mutated by VM thread. 231 u2 number_of_breakpoints() const { 232 MethodCounters* mcs = method_counters(); 233 if (mcs == NULL) { 234 return 0; 235 } else { 236 return mcs->number_of_breakpoints(); 237 } 238 } 239 void incr_number_of_breakpoints(TRAPS) { 240 MethodCounters* mcs = get_method_counters(CHECK); 241 if (mcs != NULL) { 242 mcs->incr_number_of_breakpoints(); 243 } 244 } 245 void decr_number_of_breakpoints(TRAPS) { 246 MethodCounters* mcs = get_method_counters(CHECK); 247 if (mcs != NULL) { 248 mcs->decr_number_of_breakpoints(); 249 } 250 } 251 // Initialization only 252 void clear_number_of_breakpoints() { 253 MethodCounters* mcs = method_counters(); 254 if (mcs != NULL) { 255 mcs->clear_number_of_breakpoints(); 256 } 257 } 258 259 // index into InstanceKlass methods() array 260 // note: also used by jfr 261 u2 method_idnum() const { return constMethod()->method_idnum(); } 262 void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); } 263 264 u2 orig_method_idnum() const { return constMethod()->orig_method_idnum(); } 265 void set_orig_method_idnum(u2 idnum) { constMethod()->set_orig_method_idnum(idnum); } 266 267 // code size 268 int code_size() const { return constMethod()->code_size(); } 269 270 // method size 271 int method_size() const { return _method_size; } 272 void set_method_size(int size) { 273 assert(0 <= size && size < (1 << 16), "invalid method size"); 274 _method_size = size; 275 } 276 277 // constant pool for Klass* holding this method 278 ConstantPool* constants() const { return constMethod()->constants(); } 279 void set_constants(ConstantPool* c) { constMethod()->set_constants(c); } 280 281 // max stack 282 // return original max stack size for method verification 283 int verifier_max_stack() const { return constMethod()->max_stack(); } 284 int max_stack() const { return constMethod()->max_stack() + extra_stack_entries(); } 285 void set_max_stack(int size) { constMethod()->set_max_stack(size); } 286 287 // max locals 288 int max_locals() const { return constMethod()->max_locals(); } 289 void set_max_locals(int size) { constMethod()->set_max_locals(size); } 290 291 int highest_comp_level() const; 292 void set_highest_comp_level(int level); 293 int highest_osr_comp_level() const; 294 void set_highest_osr_comp_level(int level); 295 296 // Count of times method was exited via exception while interpreting 297 void interpreter_throwout_increment(TRAPS) { 298 MethodCounters* mcs = get_method_counters(CHECK); 299 if (mcs != NULL) { 300 mcs->interpreter_throwout_increment(); 301 } 302 } 303 304 int interpreter_throwout_count() const { 305 MethodCounters* mcs = method_counters(); 306 if (mcs == NULL) { 307 return 0; 308 } else { 309 return mcs->interpreter_throwout_count(); 310 } 311 } 312 313 // size of parameters 314 int size_of_parameters() const { return constMethod()->size_of_parameters(); } 315 void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); } 316 317 bool has_stackmap_table() const { 318 return constMethod()->has_stackmap_table(); 319 } 320 321 Array<u1>* stackmap_data() const { 322 return constMethod()->stackmap_data(); 323 } 324 325 void set_stackmap_data(Array<u1>* sd) { 326 constMethod()->set_stackmap_data(sd); 327 } 328 329 // exception handler table 330 bool has_exception_handler() const 331 { return constMethod()->has_exception_handler(); } 332 int exception_table_length() const 333 { return constMethod()->exception_table_length(); } 334 ExceptionTableElement* exception_table_start() const 335 { return constMethod()->exception_table_start(); } 336 337 // Finds the first entry point bci of an exception handler for an 338 // exception of klass ex_klass thrown at throw_bci. A value of NULL 339 // for ex_klass indicates that the exception klass is not known; in 340 // this case it matches any constraint class. Returns -1 if the 341 // exception cannot be handled in this method. The handler 342 // constraint classes are loaded if necessary. Note that this may 343 // throw an exception if loading of the constraint classes causes 344 // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError. 345 // If an exception is thrown, returns the bci of the 346 // exception handler which caused the exception to be thrown, which 347 // is needed for proper retries. See, for example, 348 // InterpreterRuntime::exception_handler_for_exception. 349 static int fast_exception_handler_bci_for(methodHandle mh, KlassHandle ex_klass, int throw_bci, TRAPS); 350 351 // method data access 352 MethodData* method_data() const { 353 return _method_data; 354 } 355 356 void set_method_data(MethodData* data) { 357 // The store into method must be released. On platforms without 358 // total store order (TSO) the reference may become visible before 359 // the initialization of data otherwise. 360 OrderAccess::release_store_ptr((volatile void *)&_method_data, data); 361 } 362 363 MethodCounters* method_counters() const { 364 return _method_counters; 365 } 366 367 void clear_method_counters() { 368 _method_counters = NULL; 369 } 370 371 bool init_method_counters(MethodCounters* counters) { 372 // Try to install a pointer to MethodCounters, return true on success. 373 return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL; 374 } 375 376 #ifdef TIERED 377 // We are reusing interpreter_invocation_count as a holder for the previous event count! 378 // We can do that since interpreter_invocation_count is not used in tiered. 379 int prev_event_count() const { 380 if (method_counters() == NULL) { 381 return 0; 382 } else { 383 return method_counters()->interpreter_invocation_count(); 384 } 385 } 386 void set_prev_event_count(int count) { 387 MethodCounters* mcs = method_counters(); 388 if (mcs != NULL) { 389 mcs->set_interpreter_invocation_count(count); 390 } 391 } 392 jlong prev_time() const { 393 MethodCounters* mcs = method_counters(); 394 return mcs == NULL ? 0 : mcs->prev_time(); 395 } 396 void set_prev_time(jlong time) { 397 MethodCounters* mcs = method_counters(); 398 if (mcs != NULL) { 399 mcs->set_prev_time(time); 400 } 401 } 402 float rate() const { 403 MethodCounters* mcs = method_counters(); 404 return mcs == NULL ? 0 : mcs->rate(); 405 } 406 void set_rate(float rate) { 407 MethodCounters* mcs = method_counters(); 408 if (mcs != NULL) { 409 mcs->set_rate(rate); 410 } 411 } 412 #endif 413 414 int invocation_count(); 415 int backedge_count(); 416 417 bool was_executed_more_than(int n); 418 bool was_never_executed() { return !was_executed_more_than(0); } 419 420 static void build_interpreter_method_data(methodHandle method, TRAPS); 421 422 static MethodCounters* build_method_counters(Method* m, TRAPS); 423 424 int interpreter_invocation_count() { 425 if (TieredCompilation) { 426 return invocation_count(); 427 } else { 428 MethodCounters* mcs = method_counters(); 429 return (mcs == NULL) ? 0 : mcs->interpreter_invocation_count(); 430 } 431 } 432 int increment_interpreter_invocation_count(TRAPS) { 433 if (TieredCompilation) ShouldNotReachHere(); 434 MethodCounters* mcs = get_method_counters(CHECK_0); 435 return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count(); 436 } 437 438 #ifndef PRODUCT 439 int compiled_invocation_count() const { return _compiled_invocation_count; } 440 void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; } 441 #endif // not PRODUCT 442 443 // Clear (non-shared space) pointers which could not be relevant 444 // if this (shared) method were mapped into another JVM. 445 void remove_unshareable_info(); 446 447 // nmethod/verified compiler entry 448 address verified_code_entry(); 449 bool check_code() const; // Not inline to avoid circular ref 450 nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); } 451 void clear_code(bool acquire_lock = true); // Clear out any compiled code 452 static void set_code(methodHandle mh, nmethod* code); 453 void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; } 454 address get_i2c_entry(); 455 address get_c2i_entry(); 456 address get_c2i_unverified_entry(); 457 AdapterHandlerEntry* adapter() { return _adapter; } 458 // setup entry points 459 void link_method(methodHandle method, TRAPS); 460 // clear entry points. Used by sharing code 461 void unlink_method(); 462 463 // vtable index 464 enum VtableIndexFlag { 465 // Valid vtable indexes are non-negative (>= 0). 466 // These few negative values are used as sentinels. 467 itable_index_max = -10, // first itable index, growing downward 468 pending_itable_index = -9, // itable index will be assigned 469 invalid_vtable_index = -4, // distinct from any valid vtable index 470 garbage_vtable_index = -3, // not yet linked; no vtable layout yet 471 nonvirtual_vtable_index = -2 // there is no need for vtable dispatch 472 // 6330203 Note: Do not use -1, which was overloaded with many meanings. 473 }; 474 DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; }) 475 bool has_vtable_index() const { return _vtable_index >= 0; } 476 int vtable_index() const { return _vtable_index; } 477 void set_vtable_index(int index); 478 DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; }) 479 bool has_itable_index() const { return _vtable_index <= itable_index_max; } 480 int itable_index() const { assert(valid_itable_index(), ""); 481 return itable_index_max - _vtable_index; } 482 void set_itable_index(int index); 483 484 // interpreter entry 485 address interpreter_entry() const { return _i2i_entry; } 486 // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry 487 void set_interpreter_entry(address entry) { _i2i_entry = entry; _from_interpreted_entry = entry; } 488 489 // native function (used for native methods only) 490 enum { 491 native_bind_event_is_interesting = true 492 }; 493 address native_function() const { return *(native_function_addr()); } 494 address critical_native_function(); 495 496 // Must specify a real function (not NULL). 497 // Use clear_native_function() to unregister. 498 void set_native_function(address function, bool post_event_flag); 499 bool has_native_function() const; 500 void clear_native_function(); 501 502 // signature handler (used for native methods only) 503 address signature_handler() const { return *(signature_handler_addr()); } 504 void set_signature_handler(address handler); 505 506 // Interpreter oopmap support 507 void mask_for(int bci, InterpreterOopMap* mask); 508 509 #ifndef PRODUCT 510 // operations on invocation counter 511 void print_invocation_count(); 512 #endif 513 514 // byte codes 515 void set_code(address code) { return constMethod()->set_code(code); } 516 address code_base() const { return constMethod()->code_base(); } 517 bool contains(address bcp) const { return constMethod()->contains(bcp); } 518 519 // prints byte codes 520 void print_codes() const { print_codes_on(tty); } 521 void print_codes_on(outputStream* st) const PRODUCT_RETURN; 522 void print_codes_on(int from, int to, outputStream* st) const PRODUCT_RETURN; 523 524 // method parameters 525 bool has_method_parameters() const 526 { return constMethod()->has_method_parameters(); } 527 int method_parameters_length() const 528 { return constMethod()->method_parameters_length(); } 529 MethodParametersElement* method_parameters_start() const 530 { return constMethod()->method_parameters_start(); } 531 532 // checked exceptions 533 int checked_exceptions_length() const 534 { return constMethod()->checked_exceptions_length(); } 535 CheckedExceptionElement* checked_exceptions_start() const 536 { return constMethod()->checked_exceptions_start(); } 537 538 // localvariable table 539 bool has_localvariable_table() const 540 { return constMethod()->has_localvariable_table(); } 541 int localvariable_table_length() const 542 { return constMethod()->localvariable_table_length(); } 543 LocalVariableTableElement* localvariable_table_start() const 544 { return constMethod()->localvariable_table_start(); } 545 546 bool has_linenumber_table() const 547 { return constMethod()->has_linenumber_table(); } 548 u_char* compressed_linenumber_table() const 549 { return constMethod()->compressed_linenumber_table(); } 550 551 // method holder (the Klass* holding this method) 552 InstanceKlass* method_holder() const { return constants()->pool_holder(); } 553 554 void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments) 555 Symbol* klass_name() const; // returns the name of the method holder 556 BasicType result_type() const; // type of the method result 557 bool is_returning_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); } 558 bool is_returning_fp() const { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); } 559 560 // Checked exceptions thrown by this method (resolved to mirrors) 561 objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); } 562 563 // Access flags 564 bool is_public() const { return access_flags().is_public(); } 565 bool is_private() const { return access_flags().is_private(); } 566 bool is_protected() const { return access_flags().is_protected(); } 567 bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); } 568 bool is_static() const { return access_flags().is_static(); } 569 bool is_final() const { return access_flags().is_final(); } 570 bool is_synchronized() const { return access_flags().is_synchronized();} 571 bool is_native() const { return access_flags().is_native(); } 572 bool is_abstract() const { return access_flags().is_abstract(); } 573 bool is_strict() const { return access_flags().is_strict(); } 574 bool is_synthetic() const { return access_flags().is_synthetic(); } 575 576 // returns true if contains only return operation 577 bool is_empty_method() const; 578 579 // returns true if this is a vanilla constructor 580 bool is_vanilla_constructor() const; 581 582 // checks method and its method holder 583 bool is_final_method() const; 584 bool is_final_method(AccessFlags class_access_flags) const; 585 bool is_default_method() const; 586 587 // true if method needs no dynamic dispatch (final and/or no vtable entry) 588 bool can_be_statically_bound() const; 589 bool can_be_statically_bound(AccessFlags class_access_flags) const; 590 591 // returns true if the method has any backward branches. 592 bool has_loops() { 593 return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag(); 594 }; 595 596 bool compute_has_loops_flag(); 597 598 bool has_jsrs() { 599 return access_flags().has_jsrs(); 600 }; 601 void set_has_jsrs() { 602 _access_flags.set_has_jsrs(); 603 } 604 605 // returns true if the method has any monitors. 606 bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); } 607 bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); } 608 609 void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); } 610 611 // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes 612 // propererly nest in the method. It might return false, even though they actually nest properly, since the info. 613 // has not been computed yet. 614 bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); } 615 void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); } 616 617 // returns true if the method is an accessor function (setter/getter). 618 bool is_accessor() const; 619 620 // returns true if the method does nothing but return a constant of primitive type 621 bool is_constant_getter() const; 622 623 // returns true if the method is an initializer (<init> or <clinit>). 624 bool is_initializer() const; 625 626 // returns true if the method is static OR if the classfile version < 51 627 bool has_valid_initializer_flags() const; 628 629 // returns true if the method name is <clinit> and the method has 630 // valid static initializer flags. 631 bool is_static_initializer() const; 632 633 // returns true if the method name is <init> 634 bool is_object_initializer() const; 635 636 // compiled code support 637 // NOTE: code() is inherently racy as deopt can be clearing code 638 // simultaneously. Use with caution. 639 bool has_compiled_code() const { return code() != NULL; } 640 641 // sizing 642 static int header_size() { return sizeof(Method)/HeapWordSize; } 643 static int size(bool is_native); 644 int size() const { return method_size(); } 645 #if INCLUDE_SERVICES 646 void collect_statistics(KlassSizeStats *sz) const; 647 #endif 648 649 // interpreter support 650 static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); } 651 static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); } 652 static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); } 653 static ByteSize code_offset() { return byte_offset_of(Method, _code); } 654 static ByteSize method_data_offset() { 655 return byte_offset_of(Method, _method_data); 656 } 657 static ByteSize method_counters_offset() { 658 return byte_offset_of(Method, _method_counters); 659 } 660 #ifndef PRODUCT 661 static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); } 662 #endif // not PRODUCT 663 static ByteSize native_function_offset() { return in_ByteSize(sizeof(Method)); } 664 static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); } 665 static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); } 666 static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); } 667 static ByteSize itable_index_offset() { return byte_offset_of(Method, _vtable_index ); } 668 669 // for code generation 670 static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); } 671 static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); } 672 static int intrinsic_id_size_in_bytes() { return sizeof(u1); } 673 674 // Static methods that are used to implement member methods where an exposed this pointer 675 // is needed due to possible GCs 676 static objArrayHandle resolved_checked_exceptions_impl(Method* this_oop, TRAPS); 677 678 // Returns the byte code index from the byte code pointer 679 int bci_from(address bcp) const; 680 address bcp_from(int bci) const; 681 int validate_bci_from_bcx(intptr_t bcx) const; 682 683 // Returns the line number for a bci if debugging information for the method is prowided, 684 // -1 is returned otherwise. 685 int line_number_from_bci(int bci) const; 686 687 // Reflection support 688 bool is_overridden_in(Klass* k) const; 689 690 // Stack walking support 691 bool is_ignored_by_security_stack_walk() const; 692 693 // JSR 292 support 694 bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id) 695 bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm 696 bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc. 697 static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual 698 Symbol* signature, //anything at all 699 TRAPS); 700 static Klass* check_non_bcp_klass(Klass* klass); 701 702 // How many extra stack entries for invokedynamic when it's enabled 703 static const int extra_stack_entries_for_jsr292 = 1; 704 705 // this operates only on invoke methods: 706 // presize interpreter frames for extra interpreter stack entries, if needed 707 // Account for the extra appendix argument for invokehandle/invokedynamic 708 static int extra_stack_entries() { return EnableInvokeDynamic ? extra_stack_entries_for_jsr292 : 0; } 709 static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize 710 711 // RedefineClasses() support: 712 bool is_old() const { return access_flags().is_old(); } 713 void set_is_old() { _access_flags.set_is_old(); } 714 bool is_obsolete() const { return access_flags().is_obsolete(); } 715 void set_is_obsolete() { _access_flags.set_is_obsolete(); } 716 bool is_deleted() const { return access_flags().is_deleted(); } 717 void set_is_deleted() { _access_flags.set_is_deleted(); } 718 719 bool is_running_emcp() const { 720 // EMCP methods are old but not obsolete or deleted. Equivalent 721 // Modulo Constant Pool means the method is equivalent except 722 // the constant pool and instructions that access the constant 723 // pool might be different. 724 // If a breakpoint is set in a redefined method, its EMCP methods that are 725 // still running must have a breakpoint also. 726 return _running_emcp; 727 } 728 729 void set_running_emcp(bool x) { 730 _running_emcp = x; 731 } 732 733 bool on_stack() const { return access_flags().on_stack(); } 734 void set_on_stack(const bool value); 735 736 // see the definition in Method*.cpp for the gory details 737 bool should_not_be_cached() const; 738 739 // JVMTI Native method prefixing support: 740 bool is_prefixed_native() const { return access_flags().is_prefixed_native(); } 741 void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); } 742 743 // Rewriting support 744 static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, 745 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS); 746 747 // jmethodID handling 748 // Because the useful life-span of a jmethodID cannot be determined, 749 // once created they are never reclaimed. The methods to which they refer, 750 // however, can be GC'ed away if the class is unloaded or if the method is 751 // made obsolete or deleted -- in these cases, the jmethodID 752 // refers to NULL (as is the case for any weak reference). 753 static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh); 754 static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid); 755 756 // Use resolve_jmethod_id() in situations where the caller is expected 757 // to provide a valid jmethodID; the only sanity checks are in asserts; 758 // result guaranteed not to be NULL. 759 inline static Method* resolve_jmethod_id(jmethodID mid) { 760 assert(mid != NULL, "JNI method id should not be null"); 761 return *((Method**)mid); 762 } 763 764 // Use checked_resolve_jmethod_id() in situations where the caller 765 // should provide a valid jmethodID, but might not. NULL is returned 766 // when the jmethodID does not refer to a valid method. 767 static Method* checked_resolve_jmethod_id(jmethodID mid); 768 769 static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method); 770 static bool is_method_id(jmethodID mid); 771 772 // Clear methods 773 static void clear_jmethod_ids(ClassLoaderData* loader_data); 774 static void print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN; 775 776 // Get this method's jmethodID -- allocate if it doesn't exist 777 jmethodID jmethod_id() { methodHandle this_h(this); 778 return InstanceKlass::get_jmethod_id(method_holder(), this_h); } 779 780 // Lookup the jmethodID for this method. Return NULL if not found. 781 // NOTE that this function can be called from a signal handler 782 // (see AsyncGetCallTrace support for Forte Analyzer) and this 783 // needs to be async-safe. No allocation should be done and 784 // so handles are not used to avoid deadlock. 785 jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); } 786 787 // Support for inlining of intrinsic methods 788 vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; } 789 void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; } 790 791 // Helper routines for intrinsic_id() and vmIntrinsics::method(). 792 void init_intrinsic_id(); // updates from _none if a match 793 void clear_jmethod_id(ClassLoaderData* loader_data); 794 795 static vmSymbols::SID klass_id_for_intrinsics(Klass* holder); 796 797 bool jfr_towrite() { return _jfr_towrite; } 798 void set_jfr_towrite(bool x) { _jfr_towrite = x; } 799 bool caller_sensitive() { return _caller_sensitive; } 800 void set_caller_sensitive(bool x) { _caller_sensitive = x; } 801 bool force_inline() { return _force_inline; } 802 void set_force_inline(bool x) { _force_inline = x; } 803 bool dont_inline() { return _dont_inline; } 804 void set_dont_inline(bool x) { _dont_inline = x; } 805 bool is_hidden() { return _hidden; } 806 void set_hidden(bool x) { _hidden = x; } 807 bool has_injected_profile() { return _has_injected_profile; } 808 void set_has_injected_profile(bool x) { _has_injected_profile = x; } 809 810 TRACE_DEFINE_FLAG_ACCESSOR; 811 812 ConstMethod::MethodType method_type() const { 813 return _constMethod->method_type(); 814 } 815 bool is_overpass() const { return method_type() == ConstMethod::OVERPASS; } 816 817 // On-stack replacement support 818 bool has_osr_nmethod(int level, bool match_level) { 819 return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL; 820 } 821 822 int mark_osr_nmethods() { 823 return method_holder()->mark_osr_nmethods(this); 824 } 825 826 nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) { 827 return method_holder()->lookup_osr_nmethod(this, bci, level, match_level); 828 } 829 830 // Inline cache support 831 void cleanup_inline_caches(); 832 833 // Find if klass for method is loaded 834 bool is_klass_loaded_by_klass_index(int klass_index) const; 835 bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const; 836 837 // Indicates whether compilation failed earlier for this method, or 838 // whether it is not compilable for another reason like having a 839 // breakpoint set in it. 840 bool is_not_compilable(int comp_level = CompLevel_any) const; 841 void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 842 void set_not_compilable_quietly(int comp_level = CompLevel_all) { 843 set_not_compilable(comp_level, false); 844 } 845 bool is_not_osr_compilable(int comp_level = CompLevel_any) const; 846 void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 847 void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) { 848 set_not_osr_compilable(comp_level, false); 849 } 850 bool is_always_compilable() const; 851 852 private: 853 void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); 854 855 public: 856 MethodCounters* get_method_counters(TRAPS) { 857 if (_method_counters == NULL) { 858 build_method_counters(this, CHECK_AND_CLEAR_NULL); 859 } 860 return _method_counters; 861 } 862 863 bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); } 864 void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); } 865 void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); } 866 bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); } 867 void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); } 868 void clear_not_c2_compilable() { _access_flags.clear_not_c2_compilable(); } 869 870 bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit 871 void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit 872 void clear_not_c1_osr_compilable() { clear_not_c1_compilable(); } // don't waste an accessFlags bit 873 bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); } 874 void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); } 875 void clear_not_c2_osr_compilable() { _access_flags.clear_not_c2_osr_compilable(); } 876 877 // Background compilation support 878 bool queued_for_compilation() const { return access_flags().queued_for_compilation(); } 879 void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); } 880 void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); } 881 882 // Resolve all classes in signature, return 'true' if successful 883 static bool load_signature_classes(methodHandle m, TRAPS); 884 885 // Return if true if not all classes references in signature, including return type, has been loaded 886 static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS); 887 888 // Printing 889 void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM 890 #if INCLUDE_JVMTI 891 void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses 892 #else 893 void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)" 894 #endif 895 896 // Helper routine used for method sorting 897 static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true); 898 899 // Deallocation function for redefine classes or if an error occurs 900 void deallocate_contents(ClassLoaderData* loader_data); 901 902 // Printing 903 #ifndef PRODUCT 904 void print_on(outputStream* st) const; 905 #endif 906 void print_value_on(outputStream* st) const; 907 908 const char* internal_name() const { return "{method}"; } 909 910 // Check for valid method pointer 911 static bool has_method_vptr(const void* ptr); 912 bool is_valid_method() const; 913 914 // Verify 915 void verify() { verify_on(tty); } 916 void verify_on(outputStream* st); 917 918 private: 919 920 // Inlined elements 921 address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); } 922 address* signature_handler_addr() const { return native_function_addr() + 1; } 923 }; 924 925 926 // Utility class for compressing line number tables 927 928 class CompressedLineNumberWriteStream: public CompressedWriteStream { 929 private: 930 int _bci; 931 int _line; 932 public: 933 // Constructor 934 CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {} 935 CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {} 936 937 // Write (bci, line number) pair to stream 938 void write_pair_regular(int bci_delta, int line_delta); 939 940 inline void write_pair_inline(int bci, int line) { 941 int bci_delta = bci - _bci; 942 int line_delta = line - _line; 943 _bci = bci; 944 _line = line; 945 // Skip (0,0) deltas - they do not add information and conflict with terminator. 946 if (bci_delta == 0 && line_delta == 0) return; 947 // Check if bci is 5-bit and line number 3-bit unsigned. 948 if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) { 949 // Compress into single byte. 950 jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta; 951 // Check that value doesn't match escape character. 952 if (value != 0xFF) { 953 write_byte(value); 954 return; 955 } 956 } 957 write_pair_regular(bci_delta, line_delta); 958 } 959 960 // Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair. 961 // Disabling optimization doesn't work for methods in header files 962 // so we force it to call through the non-optimized version in the .cpp. 963 // It's gross, but it's the only way we can ensure that all callers are 964 // fixed. _MSC_VER is defined by the windows compiler 965 #if defined(_M_AMD64) && _MSC_VER >= 1400 966 void write_pair(int bci, int line); 967 #else 968 void write_pair(int bci, int line) { write_pair_inline(bci, line); } 969 #endif 970 971 // Write end-of-stream marker 972 void write_terminator() { write_byte(0); } 973 }; 974 975 976 // Utility class for decompressing line number tables 977 978 class CompressedLineNumberReadStream: public CompressedReadStream { 979 private: 980 int _bci; 981 int _line; 982 public: 983 // Constructor 984 CompressedLineNumberReadStream(u_char* buffer); 985 // Read (bci, line number) pair from stream. Returns false at end-of-stream. 986 bool read_pair(); 987 // Accessing bci and line number (after calling read_pair) 988 int bci() const { return _bci; } 989 int line() const { return _line; } 990 }; 991 992 993 /// Fast Breakpoints. 994 995 // If this structure gets more complicated (because bpts get numerous), 996 // move it into its own header. 997 998 // There is presently no provision for concurrent access 999 // to breakpoint lists, which is only OK for JVMTI because 1000 // breakpoints are written only at safepoints, and are read 1001 // concurrently only outside of safepoints. 1002 1003 class BreakpointInfo : public CHeapObj<mtClass> { 1004 friend class VMStructs; 1005 private: 1006 Bytecodes::Code _orig_bytecode; 1007 int _bci; 1008 u2 _name_index; // of method 1009 u2 _signature_index; // of method 1010 BreakpointInfo* _next; // simple storage allocation 1011 1012 public: 1013 BreakpointInfo(Method* m, int bci); 1014 1015 // accessors 1016 Bytecodes::Code orig_bytecode() { return _orig_bytecode; } 1017 void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; } 1018 int bci() { return _bci; } 1019 1020 BreakpointInfo* next() const { return _next; } 1021 void set_next(BreakpointInfo* n) { _next = n; } 1022 1023 // helps for searchers 1024 bool match(const Method* m, int bci) { 1025 return bci == _bci && match(m); 1026 } 1027 1028 bool match(const Method* m) { 1029 return _name_index == m->name_index() && 1030 _signature_index == m->signature_index(); 1031 } 1032 1033 void set(Method* method); 1034 void clear(Method* method); 1035 }; 1036 1037 // Utility class for access exception handlers 1038 class ExceptionTable : public StackObj { 1039 private: 1040 ExceptionTableElement* _table; 1041 u2 _length; 1042 1043 public: 1044 ExceptionTable(const Method* m) { 1045 if (m->has_exception_handler()) { 1046 _table = m->exception_table_start(); 1047 _length = m->exception_table_length(); 1048 } else { 1049 _table = NULL; 1050 _length = 0; 1051 } 1052 } 1053 1054 int length() const { 1055 return _length; 1056 } 1057 1058 u2 start_pc(int idx) const { 1059 assert(idx < _length, "out of bounds"); 1060 return _table[idx].start_pc; 1061 } 1062 1063 void set_start_pc(int idx, u2 value) { 1064 assert(idx < _length, "out of bounds"); 1065 _table[idx].start_pc = value; 1066 } 1067 1068 u2 end_pc(int idx) const { 1069 assert(idx < _length, "out of bounds"); 1070 return _table[idx].end_pc; 1071 } 1072 1073 void set_end_pc(int idx, u2 value) { 1074 assert(idx < _length, "out of bounds"); 1075 _table[idx].end_pc = value; 1076 } 1077 1078 u2 handler_pc(int idx) const { 1079 assert(idx < _length, "out of bounds"); 1080 return _table[idx].handler_pc; 1081 } 1082 1083 void set_handler_pc(int idx, u2 value) { 1084 assert(idx < _length, "out of bounds"); 1085 _table[idx].handler_pc = value; 1086 } 1087 1088 u2 catch_type_index(int idx) const { 1089 assert(idx < _length, "out of bounds"); 1090 return _table[idx].catch_type_index; 1091 } 1092 1093 void set_catch_type_index(int idx, u2 value) { 1094 assert(idx < _length, "out of bounds"); 1095 _table[idx].catch_type_index = value; 1096 } 1097 }; 1098 1099 #endif // SHARE_VM_OOPS_METHODOOP_HPP