1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_METHODOOP_HPP 26 #define SHARE_VM_OOPS_METHODOOP_HPP 27 28 #include "classfile/vmSymbols.hpp" 29 #include "code/compressedStream.hpp" 30 #include "compiler/oopMap.hpp" 31 #include "interpreter/invocationCounter.hpp" 32 #include "oops/annotations.hpp" 33 #include "oops/constantPool.hpp" 34 #include "oops/methodCounters.hpp" 35 #include "oops/instanceKlass.hpp" 36 #include "oops/oop.hpp" 37 #include "oops/typeArrayOop.hpp" 38 #include "utilities/accessFlags.hpp" 39 #include "utilities/growableArray.hpp" 40 41 // A Method represents a Java method. 42 // 43 // Memory layout (each line represents a word). Note that most applications load thousands of methods, 44 // so keeping the size of this structure small has a big impact on footprint. 45 // 46 // The actual bytecodes are inlined after the end of the Method struct. 47 // 48 // There are bits in the access_flags telling whether inlined tables are present. 49 // Note that accessing the line number and local variable tables is not performance critical at all. 50 // Accessing the checked exceptions table is used by reflection, so we put that last to make access 51 // to it fast. 52 // 53 // The line number table is compressed and inlined following the byte codes. It is found as the first 54 // byte following the byte codes. The checked exceptions table and the local variable table are inlined 55 // after the line number table, and indexed from the end of the method. We do not compress the checked 56 // exceptions table since the average length is less than 2, and do not bother to compress the local 57 // variable table either since it is mostly absent. 58 // 59 // Note that native_function and signature_handler has to be at fixed offsets (required by the interpreter) 60 // 61 // |------------------------------------------------------| 62 // | header | 63 // | klass | 64 // |------------------------------------------------------| 65 // | ConstMethod* (metadata) | 66 // |------------------------------------------------------| 67 // | MethodData* (metadata) | 68 // | MethodCounters | 69 // |------------------------------------------------------| 70 // | access_flags | 71 // | vtable_index | 72 // |------------------------------------------------------| 73 // | result_index (C++ interpreter only) | 74 // |------------------------------------------------------| 75 // | method_size | intrinsic_id | flags | 76 // |------------------------------------------------------| 77 // | code (pointer) | 78 // | i2i (pointer) | 79 // | adapter (pointer) | 80 // | from_compiled_entry (pointer) | 81 // | from_interpreted_entry (pointer) | 82 // |------------------------------------------------------| 83 // | native_function (present only if native) | 84 // | signature_handler (present only if native) | 85 // |------------------------------------------------------| 86 87 88 class CheckedExceptionElement; 89 class LocalVariableTableElement; 90 class AdapterHandlerEntry; 91 class MethodData; 92 class MethodCounters; 93 class ConstMethod; 94 class InlineTableSizes; 95 class KlassSizeStats; 96 97 class Method : public Metadata { 98 friend class VMStructs; 99 private: 100 ConstMethod* _constMethod; // Method read-only data. 101 MethodData* _method_data; 102 MethodCounters* _method_counters; 103 AccessFlags _access_flags; // Access flags 104 int _vtable_index; // vtable index of this method (see VtableIndexFlag) 105 // note: can have vtables with >2**16 elements (because of inheritance) 106 #ifdef CC_INTERP 107 int _result_index; // C++ interpreter needs for converting results to/from stack 108 #endif 109 u2 _method_size; // size of this object 110 u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) 111 u1 _jfr_towrite : 1, // Flags 112 _caller_sensitive : 1, 113 _force_inline : 1, 114 _hidden : 1, 115 _dont_inline : 1, 116 : 3; 117 118 #ifndef PRODUCT 119 int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) 120 #endif 121 // Entry point for calling both from and to the interpreter. 122 address _i2i_entry; // All-args-on-stack calling convention 123 // Adapter blob (i2c/c2i) for this Method*. Set once when method is linked. 124 AdapterHandlerEntry* _adapter; 125 // Entry point for calling from compiled code, to compiled code if it exists 126 // or else the interpreter. 127 volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry() 128 // The entry point for calling both from and to compiled code is 129 // "_code->entry_point()". Because of tiered compilation and de-opt, this 130 // field can come and go. It can transition from NULL to not-null at any 131 // time (whenever a compile completes). It can transition from not-null to 132 // NULL only at safepoints (because of a de-opt). 133 nmethod* volatile _code; // Points to the corresponding piece of native code 134 volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry 135 136 // Constructor 137 Method(ConstMethod* xconst, AccessFlags access_flags, int size); 138 public: 139 140 static Method* allocate(ClassLoaderData* loader_data, 141 int byte_code_size, 142 AccessFlags access_flags, 143 InlineTableSizes* sizes, 144 ConstMethod::MethodType method_type, 145 TRAPS); 146 147 // CDS and vtbl checking can create an empty Method to get vtbl pointer. 148 Method(){} 149 150 // The Method vtable is restored by this call when the Method is in the 151 // shared archive. See patch_klass_vtables() in metaspaceShared.cpp for 152 // all the gory details. SA, dtrace and pstack helpers distinguish metadata 153 // by their vtable. 154 void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); } 155 bool is_method() const volatile { return true; } 156 157 // accessors for instance variables 158 159 ConstMethod* constMethod() const { return _constMethod; } 160 void set_constMethod(ConstMethod* xconst) { _constMethod = xconst; } 161 162 163 static address make_adapters(methodHandle mh, TRAPS); 164 volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); } 165 volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); } 166 167 // access flag 168 AccessFlags access_flags() const { return _access_flags; } 169 void set_access_flags(AccessFlags flags) { _access_flags = flags; } 170 171 // name 172 Symbol* name() const { return constants()->symbol_at(name_index()); } 173 int name_index() const { return constMethod()->name_index(); } 174 void set_name_index(int index) { constMethod()->set_name_index(index); } 175 176 // signature 177 Symbol* signature() const { return constants()->symbol_at(signature_index()); } 178 int signature_index() const { return constMethod()->signature_index(); } 179 void set_signature_index(int index) { constMethod()->set_signature_index(index); } 180 181 // generics support 182 Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); } 183 int generic_signature_index() const { return constMethod()->generic_signature_index(); } 184 void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); } 185 186 // annotations support 187 AnnotationArray* annotations() const { 188 return constMethod()->method_annotations(); 189 } 190 AnnotationArray* parameter_annotations() const { 191 return constMethod()->parameter_annotations(); 192 } 193 AnnotationArray* annotation_default() const { 194 return constMethod()->default_annotations(); 195 } 196 AnnotationArray* type_annotations() const { 197 return constMethod()->type_annotations(); 198 } 199 200 #ifdef CC_INTERP 201 void set_result_index(BasicType type); 202 int result_index() { return _result_index; } 203 #endif 204 205 // Helper routine: get klass name + "." + method name + signature as 206 // C string, for the purpose of providing more useful NoSuchMethodErrors 207 // and fatal error handling. The string is allocated in resource 208 // area if a buffer is not provided by the caller. 209 char* name_and_sig_as_C_string() const; 210 char* name_and_sig_as_C_string(char* buf, int size) const; 211 212 // Static routine in the situations we don't have a Method* 213 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature); 214 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size); 215 216 Bytecodes::Code java_code_at(int bci) const { 217 return Bytecodes::java_code_at(this, bcp_from(bci)); 218 } 219 Bytecodes::Code code_at(int bci) const { 220 return Bytecodes::code_at(this, bcp_from(bci)); 221 } 222 223 // JVMTI breakpoints 224 Bytecodes::Code orig_bytecode_at(int bci) const; 225 void set_orig_bytecode_at(int bci, Bytecodes::Code code); 226 void set_breakpoint(int bci); 227 void clear_breakpoint(int bci); 228 void clear_all_breakpoints(); 229 // Tracking number of breakpoints, for fullspeed debugging. 230 // Only mutated by VM thread. 231 u2 number_of_breakpoints() const { 232 if (method_counters() == NULL) { 233 return 0; 234 } else { 235 return method_counters()->number_of_breakpoints(); 236 } 237 } 238 void incr_number_of_breakpoints(TRAPS) { 239 MethodCounters* mcs = get_method_counters(CHECK); 240 if (mcs != NULL) { 241 mcs->incr_number_of_breakpoints(); 242 } 243 } 244 void decr_number_of_breakpoints(TRAPS) { 245 MethodCounters* mcs = get_method_counters(CHECK); 246 if (mcs != NULL) { 247 mcs->decr_number_of_breakpoints(); 248 } 249 } 250 // Initialization only 251 void clear_number_of_breakpoints() { 252 if (method_counters() != NULL) { 253 method_counters()->clear_number_of_breakpoints(); 254 } 255 } 256 257 // index into InstanceKlass methods() array 258 // note: also used by jfr 259 u2 method_idnum() const { return constMethod()->method_idnum(); } 260 void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); } 261 262 // code size 263 int code_size() const { return constMethod()->code_size(); } 264 265 // method size 266 int method_size() const { return _method_size; } 267 void set_method_size(int size) { 268 assert(0 <= size && size < (1 << 16), "invalid method size"); 269 _method_size = size; 270 } 271 272 // constant pool for Klass* holding this method 273 ConstantPool* constants() const { return constMethod()->constants(); } 274 void set_constants(ConstantPool* c) { constMethod()->set_constants(c); } 275 276 // max stack 277 // return original max stack size for method verification 278 int verifier_max_stack() const { return constMethod()->max_stack(); } 279 int max_stack() const { return constMethod()->max_stack() + extra_stack_entries(); } 280 void set_max_stack(int size) { constMethod()->set_max_stack(size); } 281 282 // max locals 283 int max_locals() const { return constMethod()->max_locals(); } 284 void set_max_locals(int size) { constMethod()->set_max_locals(size); } 285 286 int highest_comp_level() const; 287 void set_highest_comp_level(int level); 288 int highest_osr_comp_level() const; 289 void set_highest_osr_comp_level(int level); 290 291 // Count of times method was exited via exception while interpreting 292 void interpreter_throwout_increment(TRAPS) { 293 MethodCounters* mcs = get_method_counters(CHECK); 294 if (mcs != NULL) { 295 mcs->interpreter_throwout_increment(); 296 } 297 } 298 299 int interpreter_throwout_count() const { 300 if (method_counters() == NULL) { 301 return 0; 302 } else { 303 return method_counters()->interpreter_throwout_count(); 304 } 305 } 306 307 // size of parameters 308 int size_of_parameters() const { return constMethod()->size_of_parameters(); } 309 void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); } 310 311 bool has_stackmap_table() const { 312 return constMethod()->has_stackmap_table(); 313 } 314 315 Array<u1>* stackmap_data() const { 316 return constMethod()->stackmap_data(); 317 } 318 319 void set_stackmap_data(Array<u1>* sd) { 320 constMethod()->set_stackmap_data(sd); 321 } 322 323 // exception handler table 324 bool has_exception_handler() const 325 { return constMethod()->has_exception_handler(); } 326 int exception_table_length() const 327 { return constMethod()->exception_table_length(); } 328 ExceptionTableElement* exception_table_start() const 329 { return constMethod()->exception_table_start(); } 330 331 // Finds the first entry point bci of an exception handler for an 332 // exception of klass ex_klass thrown at throw_bci. A value of NULL 333 // for ex_klass indicates that the exception klass is not known; in 334 // this case it matches any constraint class. Returns -1 if the 335 // exception cannot be handled in this method. The handler 336 // constraint classes are loaded if necessary. Note that this may 337 // throw an exception if loading of the constraint classes causes 338 // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError. 339 // If an exception is thrown, returns the bci of the 340 // exception handler which caused the exception to be thrown, which 341 // is needed for proper retries. See, for example, 342 // InterpreterRuntime::exception_handler_for_exception. 343 static int fast_exception_handler_bci_for(methodHandle mh, KlassHandle ex_klass, int throw_bci, TRAPS); 344 345 // method data access 346 MethodData* method_data() const { 347 return _method_data; 348 } 349 350 void set_method_data(MethodData* data) { 351 // The store into method must be released. On platforms without 352 // total store order (TSO) the reference may become visible before 353 // the initialization of data otherwise. 354 OrderAccess::release_store_ptr((volatile void *)&_method_data, data); 355 } 356 357 MethodCounters* method_counters() const { 358 return _method_counters; 359 } 360 361 void set_method_counters(MethodCounters* counters) { 362 // The store into method must be released. On platforms without 363 // total store order (TSO) the reference may become visible before 364 // the initialization of data otherwise. 365 OrderAccess::release_store_ptr((volatile void *)&_method_counters, counters); 366 } 367 368 #ifdef TIERED 369 // We are reusing interpreter_invocation_count as a holder for the previous event count! 370 // We can do that since interpreter_invocation_count is not used in tiered. 371 int prev_event_count() const { 372 if (method_counters() == NULL) { 373 return 0; 374 } else { 375 return method_counters()->interpreter_invocation_count(); 376 } 377 } 378 void set_prev_event_count(int count, TRAPS) { 379 MethodCounters* mcs = get_method_counters(CHECK); 380 if (mcs != NULL) { 381 mcs->set_interpreter_invocation_count(count); 382 } 383 } 384 jlong prev_time() const { 385 return method_counters() == NULL ? 0 : method_counters()->prev_time(); 386 } 387 void set_prev_time(jlong time, TRAPS) { 388 MethodCounters* mcs = get_method_counters(CHECK); 389 if (mcs != NULL) { 390 mcs->set_prev_time(time); 391 } 392 } 393 float rate() const { 394 return method_counters() == NULL ? 0 : method_counters()->rate(); 395 } 396 void set_rate(float rate, TRAPS) { 397 MethodCounters* mcs = get_method_counters(CHECK); 398 if (mcs != NULL) { 399 mcs->set_rate(rate); 400 } 401 } 402 #endif 403 404 int invocation_count(); 405 int backedge_count(); 406 407 bool was_executed_more_than(int n); 408 bool was_never_executed() { return !was_executed_more_than(0); } 409 410 static void build_interpreter_method_data(methodHandle method, TRAPS); 411 412 static MethodCounters* build_method_counters(Method* m, TRAPS); 413 414 int interpreter_invocation_count() { 415 if (TieredCompilation) return invocation_count(); 416 else return (method_counters() == NULL) ? 0 : 417 method_counters()->interpreter_invocation_count(); 418 } 419 int increment_interpreter_invocation_count(TRAPS) { 420 if (TieredCompilation) ShouldNotReachHere(); 421 MethodCounters* mcs = get_method_counters(CHECK_0); 422 return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count(); 423 } 424 425 #ifndef PRODUCT 426 int compiled_invocation_count() const { return _compiled_invocation_count; } 427 void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; } 428 #endif // not PRODUCT 429 430 // Clear (non-shared space) pointers which could not be relevant 431 // if this (shared) method were mapped into another JVM. 432 void remove_unshareable_info(); 433 434 // nmethod/verified compiler entry 435 address verified_code_entry(); 436 bool check_code() const; // Not inline to avoid circular ref 437 nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); } 438 void clear_code(); // Clear out any compiled code 439 static void set_code(methodHandle mh, nmethod* code); 440 void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; } 441 address get_i2c_entry(); 442 address get_c2i_entry(); 443 address get_c2i_unverified_entry(); 444 AdapterHandlerEntry* adapter() { return _adapter; } 445 // setup entry points 446 void link_method(methodHandle method, TRAPS); 447 // clear entry points. Used by sharing code 448 void unlink_method(); 449 450 // vtable index 451 enum VtableIndexFlag { 452 // Valid vtable indexes are non-negative (>= 0). 453 // These few negative values are used as sentinels. 454 itable_index_max = -10, // first itable index, growing downward 455 pending_itable_index = -9, // itable index will be assigned 456 invalid_vtable_index = -4, // distinct from any valid vtable index 457 garbage_vtable_index = -3, // not yet linked; no vtable layout yet 458 nonvirtual_vtable_index = -2 // there is no need for vtable dispatch 459 // 6330203 Note: Do not use -1, which was overloaded with many meanings. 460 }; 461 DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; }) 462 bool has_vtable_index() const { return _vtable_index >= 0; } 463 int vtable_index() const { return _vtable_index; } 464 void set_vtable_index(int index) { _vtable_index = index; } 465 DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; }) 466 bool has_itable_index() const { return _vtable_index <= itable_index_max; } 467 int itable_index() const { assert(valid_itable_index(), ""); 468 return itable_index_max - _vtable_index; } 469 void set_itable_index(int index) { _vtable_index = itable_index_max - index; assert(valid_itable_index(), ""); } 470 471 // interpreter entry 472 address interpreter_entry() const { return _i2i_entry; } 473 // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry 474 void set_interpreter_entry(address entry) { _i2i_entry = entry; _from_interpreted_entry = entry; } 475 476 // native function (used for native methods only) 477 enum { 478 native_bind_event_is_interesting = true 479 }; 480 address native_function() const { return *(native_function_addr()); } 481 address critical_native_function(); 482 483 // Must specify a real function (not NULL). 484 // Use clear_native_function() to unregister. 485 void set_native_function(address function, bool post_event_flag); 486 bool has_native_function() const; 487 void clear_native_function(); 488 489 // signature handler (used for native methods only) 490 address signature_handler() const { return *(signature_handler_addr()); } 491 void set_signature_handler(address handler); 492 493 // Interpreter oopmap support 494 void mask_for(int bci, InterpreterOopMap* mask); 495 496 #ifndef PRODUCT 497 // operations on invocation counter 498 void print_invocation_count(); 499 #endif 500 501 // byte codes 502 void set_code(address code) { return constMethod()->set_code(code); } 503 address code_base() const { return constMethod()->code_base(); } 504 bool contains(address bcp) const { return constMethod()->contains(bcp); } 505 506 // prints byte codes 507 void print_codes() const { print_codes_on(tty); } 508 void print_codes_on(outputStream* st) const PRODUCT_RETURN; 509 void print_codes_on(int from, int to, outputStream* st) const PRODUCT_RETURN; 510 511 // method parameters 512 bool has_method_parameters() const 513 { return constMethod()->has_method_parameters(); } 514 int method_parameters_length() const 515 { return constMethod()->method_parameters_length(); } 516 MethodParametersElement* method_parameters_start() const 517 { return constMethod()->method_parameters_start(); } 518 519 // checked exceptions 520 int checked_exceptions_length() const 521 { return constMethod()->checked_exceptions_length(); } 522 CheckedExceptionElement* checked_exceptions_start() const 523 { return constMethod()->checked_exceptions_start(); } 524 525 // localvariable table 526 bool has_localvariable_table() const 527 { return constMethod()->has_localvariable_table(); } 528 int localvariable_table_length() const 529 { return constMethod()->localvariable_table_length(); } 530 LocalVariableTableElement* localvariable_table_start() const 531 { return constMethod()->localvariable_table_start(); } 532 533 bool has_linenumber_table() const 534 { return constMethod()->has_linenumber_table(); } 535 u_char* compressed_linenumber_table() const 536 { return constMethod()->compressed_linenumber_table(); } 537 538 // method holder (the Klass* holding this method) 539 InstanceKlass* method_holder() const { return constants()->pool_holder(); } 540 541 void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments) 542 Symbol* klass_name() const; // returns the name of the method holder 543 BasicType result_type() const; // type of the method result 544 int result_type_index() const; // type index of the method result 545 bool is_returning_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); } 546 bool is_returning_fp() const { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); } 547 548 // Checked exceptions thrown by this method (resolved to mirrors) 549 objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); } 550 551 // Access flags 552 bool is_public() const { return access_flags().is_public(); } 553 bool is_private() const { return access_flags().is_private(); } 554 bool is_protected() const { return access_flags().is_protected(); } 555 bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); } 556 bool is_static() const { return access_flags().is_static(); } 557 bool is_final() const { return access_flags().is_final(); } 558 bool is_synchronized() const { return access_flags().is_synchronized();} 559 bool is_native() const { return access_flags().is_native(); } 560 bool is_abstract() const { return access_flags().is_abstract(); } 561 bool is_strict() const { return access_flags().is_strict(); } 562 bool is_synthetic() const { return access_flags().is_synthetic(); } 563 564 // returns true if contains only return operation 565 bool is_empty_method() const; 566 567 // returns true if this is a vanilla constructor 568 bool is_vanilla_constructor() const; 569 570 // checks method and its method holder 571 bool is_final_method() const; 572 bool is_final_method(AccessFlags class_access_flags) const; 573 bool is_default_method() const; 574 575 // true if method needs no dynamic dispatch (final and/or no vtable entry) 576 bool can_be_statically_bound() const; 577 bool can_be_statically_bound(AccessFlags class_access_flags) const; 578 579 // returns true if the method has any backward branches. 580 bool has_loops() { 581 return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag(); 582 }; 583 584 bool compute_has_loops_flag(); 585 586 bool has_jsrs() { 587 return access_flags().has_jsrs(); 588 }; 589 void set_has_jsrs() { 590 _access_flags.set_has_jsrs(); 591 } 592 593 // returns true if the method has any monitors. 594 bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); } 595 bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); } 596 597 void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); } 598 599 // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes 600 // propererly nest in the method. It might return false, even though they actually nest properly, since the info. 601 // has not been computed yet. 602 bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); } 603 void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); } 604 605 // returns true if the method is an accessor function (setter/getter). 606 bool is_accessor() const; 607 608 // returns true if the method is an initializer (<init> or <clinit>). 609 bool is_initializer() const; 610 611 // returns true if the method is static OR if the classfile version < 51 612 bool has_valid_initializer_flags() const; 613 614 // returns true if the method name is <clinit> and the method has 615 // valid static initializer flags. 616 bool is_static_initializer() const; 617 618 // compiled code support 619 // NOTE: code() is inherently racy as deopt can be clearing code 620 // simultaneously. Use with caution. 621 bool has_compiled_code() const { return code() != NULL; } 622 623 // sizing 624 static int header_size() { return sizeof(Method)/HeapWordSize; } 625 static int size(bool is_native); 626 int size() const { return method_size(); } 627 #if INCLUDE_SERVICES 628 void collect_statistics(KlassSizeStats *sz) const; 629 #endif 630 631 // interpreter support 632 static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); } 633 static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); } 634 #ifdef CC_INTERP 635 static ByteSize result_index_offset() { return byte_offset_of(Method, _result_index ); } 636 #endif /* CC_INTERP */ 637 static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); } 638 static ByteSize code_offset() { return byte_offset_of(Method, _code); } 639 static ByteSize method_data_offset() { 640 return byte_offset_of(Method, _method_data); 641 } 642 static ByteSize method_counters_offset() { 643 return byte_offset_of(Method, _method_counters); 644 } 645 #ifndef PRODUCT 646 static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); } 647 #endif // not PRODUCT 648 static ByteSize native_function_offset() { return in_ByteSize(sizeof(Method)); } 649 static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); } 650 static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); } 651 static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); } 652 653 // for code generation 654 static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); } 655 static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); } 656 static int intrinsic_id_size_in_bytes() { return sizeof(u1); } 657 658 // Static methods that are used to implement member methods where an exposed this pointer 659 // is needed due to possible GCs 660 static objArrayHandle resolved_checked_exceptions_impl(Method* this_oop, TRAPS); 661 662 // Returns the byte code index from the byte code pointer 663 int bci_from(address bcp) const; 664 address bcp_from(int bci) const; 665 int validate_bci_from_bcx(intptr_t bcx) const; 666 667 // Returns the line number for a bci if debugging information for the method is prowided, 668 // -1 is returned otherwise. 669 int line_number_from_bci(int bci) const; 670 671 // Reflection support 672 bool is_overridden_in(Klass* k) const; 673 674 // Stack walking support 675 bool is_ignored_by_security_stack_walk() const; 676 677 // JSR 292 support 678 bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id) 679 bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm 680 bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc. 681 static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual 682 Symbol* signature, //anything at all 683 TRAPS); 684 static Klass* check_non_bcp_klass(Klass* klass); 685 686 // How many extra stack entries for invokedynamic when it's enabled 687 static const int extra_stack_entries_for_jsr292 = 1; 688 689 // this operates only on invoke methods: 690 // presize interpreter frames for extra interpreter stack entries, if needed 691 // Account for the extra appendix argument for invokehandle/invokedynamic 692 static int extra_stack_entries() { return EnableInvokeDynamic ? extra_stack_entries_for_jsr292 : 0; } 693 static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize 694 695 // RedefineClasses() support: 696 bool is_old() const { return access_flags().is_old(); } 697 void set_is_old() { _access_flags.set_is_old(); } 698 bool is_obsolete() const { return access_flags().is_obsolete(); } 699 void set_is_obsolete() { _access_flags.set_is_obsolete(); } 700 bool on_stack() const { return access_flags().on_stack(); } 701 void set_on_stack(const bool value); 702 703 // see the definition in Method*.cpp for the gory details 704 bool should_not_be_cached() const; 705 706 // JVMTI Native method prefixing support: 707 bool is_prefixed_native() const { return access_flags().is_prefixed_native(); } 708 void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); } 709 710 // Rewriting support 711 static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, 712 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS); 713 714 // jmethodID handling 715 // Because the useful life-span of a jmethodID cannot be determined, 716 // once created they are never reclaimed. The methods to which they refer, 717 // however, can be GC'ed away if the class is unloaded or if the method is 718 // made obsolete or deleted -- in these cases, the jmethodID 719 // refers to NULL (as is the case for any weak reference). 720 static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh); 721 static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid); 722 723 // Use resolve_jmethod_id() in situations where the caller is expected 724 // to provide a valid jmethodID; the only sanity checks are in asserts; 725 // result guaranteed not to be NULL. 726 inline static Method* resolve_jmethod_id(jmethodID mid) { 727 assert(mid != NULL, "JNI method id should not be null"); 728 return *((Method**)mid); 729 } 730 731 // Use checked_resolve_jmethod_id() in situations where the caller 732 // should provide a valid jmethodID, but might not. NULL is returned 733 // when the jmethodID does not refer to a valid method. 734 static Method* checked_resolve_jmethod_id(jmethodID mid); 735 736 static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method); 737 static bool is_method_id(jmethodID mid); 738 739 // Clear methods 740 static void clear_jmethod_ids(ClassLoaderData* loader_data); 741 static void print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN; 742 743 // Get this method's jmethodID -- allocate if it doesn't exist 744 jmethodID jmethod_id() { methodHandle this_h(this); 745 return InstanceKlass::get_jmethod_id(method_holder(), this_h); } 746 747 // Lookup the jmethodID for this method. Return NULL if not found. 748 // NOTE that this function can be called from a signal handler 749 // (see AsyncGetCallTrace support for Forte Analyzer) and this 750 // needs to be async-safe. No allocation should be done and 751 // so handles are not used to avoid deadlock. 752 jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); } 753 754 // Support for inlining of intrinsic methods 755 vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; } 756 void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; } 757 758 // Helper routines for intrinsic_id() and vmIntrinsics::method(). 759 void init_intrinsic_id(); // updates from _none if a match 760 static vmSymbols::SID klass_id_for_intrinsics(Klass* holder); 761 762 bool jfr_towrite() { return _jfr_towrite; } 763 void set_jfr_towrite(bool x) { _jfr_towrite = x; } 764 bool caller_sensitive() { return _caller_sensitive; } 765 void set_caller_sensitive(bool x) { _caller_sensitive = x; } 766 bool force_inline() { return _force_inline; } 767 void set_force_inline(bool x) { _force_inline = x; } 768 bool dont_inline() { return _dont_inline; } 769 void set_dont_inline(bool x) { _dont_inline = x; } 770 bool is_hidden() { return _hidden; } 771 void set_hidden(bool x) { _hidden = x; } 772 ConstMethod::MethodType method_type() const { 773 return _constMethod->method_type(); 774 } 775 bool is_overpass() const { return method_type() == ConstMethod::OVERPASS; } 776 777 // On-stack replacement support 778 bool has_osr_nmethod(int level, bool match_level) { 779 return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL; 780 } 781 782 nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) { 783 return method_holder()->lookup_osr_nmethod(this, bci, level, match_level); 784 } 785 786 // Inline cache support 787 void cleanup_inline_caches(); 788 789 // Find if klass for method is loaded 790 bool is_klass_loaded_by_klass_index(int klass_index) const; 791 bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const; 792 793 // Indicates whether compilation failed earlier for this method, or 794 // whether it is not compilable for another reason like having a 795 // breakpoint set in it. 796 bool is_not_compilable(int comp_level = CompLevel_any) const; 797 void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 798 void set_not_compilable_quietly(int comp_level = CompLevel_all) { 799 set_not_compilable(comp_level, false); 800 } 801 bool is_not_osr_compilable(int comp_level = CompLevel_any) const; 802 void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 803 void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) { 804 set_not_osr_compilable(comp_level, false); 805 } 806 bool is_always_compilable() const; 807 808 private: 809 void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); 810 811 public: 812 MethodCounters* get_method_counters(TRAPS) { 813 if (_method_counters == NULL) { 814 build_method_counters(this, CHECK_AND_CLEAR_NULL); 815 } 816 return _method_counters; 817 } 818 819 bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); } 820 void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); } 821 void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); } 822 bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); } 823 void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); } 824 void clear_not_c2_compilable() { _access_flags.clear_not_c2_compilable(); } 825 826 bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit 827 void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit 828 void clear_not_c1_osr_compilable() { clear_not_c1_compilable(); } // don't waste an accessFlags bit 829 bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); } 830 void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); } 831 void clear_not_c2_osr_compilable() { _access_flags.clear_not_c2_osr_compilable(); } 832 833 // Background compilation support 834 bool queued_for_compilation() const { return access_flags().queued_for_compilation(); } 835 void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); } 836 void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); } 837 838 // Resolve all classes in signature, return 'true' if successful 839 static bool load_signature_classes(methodHandle m, TRAPS); 840 841 // Return if true if not all classes references in signature, including return type, has been loaded 842 static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS); 843 844 // Printing 845 void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM 846 #if INCLUDE_JVMTI 847 void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses 848 #else 849 void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)" 850 #endif 851 852 // Helper routine used for method sorting 853 static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true); 854 855 // Deallocation function for redefine classes or if an error occurs 856 void deallocate_contents(ClassLoaderData* loader_data); 857 858 // Printing 859 #ifndef PRODUCT 860 void print_on(outputStream* st) const; 861 #endif 862 void print_value_on(outputStream* st) const; 863 864 const char* internal_name() const { return "{method}"; } 865 866 // Check for valid method pointer 867 bool is_valid_method() const; 868 869 // Verify 870 void verify() { verify_on(tty); } 871 void verify_on(outputStream* st); 872 873 private: 874 875 // Inlined elements 876 address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); } 877 address* signature_handler_addr() const { return native_function_addr() + 1; } 878 }; 879 880 881 // Utility class for compressing line number tables 882 883 class CompressedLineNumberWriteStream: public CompressedWriteStream { 884 private: 885 int _bci; 886 int _line; 887 public: 888 // Constructor 889 CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {} 890 CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {} 891 892 // Write (bci, line number) pair to stream 893 void write_pair_regular(int bci_delta, int line_delta); 894 895 inline void write_pair_inline(int bci, int line) { 896 int bci_delta = bci - _bci; 897 int line_delta = line - _line; 898 _bci = bci; 899 _line = line; 900 // Skip (0,0) deltas - they do not add information and conflict with terminator. 901 if (bci_delta == 0 && line_delta == 0) return; 902 // Check if bci is 5-bit and line number 3-bit unsigned. 903 if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) { 904 // Compress into single byte. 905 jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta; 906 // Check that value doesn't match escape character. 907 if (value != 0xFF) { 908 write_byte(value); 909 return; 910 } 911 } 912 write_pair_regular(bci_delta, line_delta); 913 } 914 915 // Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair. 916 // Disabling optimization doesn't work for methods in header files 917 // so we force it to call through the non-optimized version in the .cpp. 918 // It's gross, but it's the only way we can ensure that all callers are 919 // fixed. _MSC_VER is defined by the windows compiler 920 #if defined(_M_AMD64) && _MSC_VER >= 1400 921 void write_pair(int bci, int line); 922 #else 923 void write_pair(int bci, int line) { write_pair_inline(bci, line); } 924 #endif 925 926 // Write end-of-stream marker 927 void write_terminator() { write_byte(0); } 928 }; 929 930 931 // Utility class for decompressing line number tables 932 933 class CompressedLineNumberReadStream: public CompressedReadStream { 934 private: 935 int _bci; 936 int _line; 937 public: 938 // Constructor 939 CompressedLineNumberReadStream(u_char* buffer); 940 // Read (bci, line number) pair from stream. Returns false at end-of-stream. 941 bool read_pair(); 942 // Accessing bci and line number (after calling read_pair) 943 int bci() const { return _bci; } 944 int line() const { return _line; } 945 }; 946 947 948 /// Fast Breakpoints. 949 950 // If this structure gets more complicated (because bpts get numerous), 951 // move it into its own header. 952 953 // There is presently no provision for concurrent access 954 // to breakpoint lists, which is only OK for JVMTI because 955 // breakpoints are written only at safepoints, and are read 956 // concurrently only outside of safepoints. 957 958 class BreakpointInfo : public CHeapObj<mtClass> { 959 friend class VMStructs; 960 private: 961 Bytecodes::Code _orig_bytecode; 962 int _bci; 963 u2 _name_index; // of method 964 u2 _signature_index; // of method 965 BreakpointInfo* _next; // simple storage allocation 966 967 public: 968 BreakpointInfo(Method* m, int bci); 969 970 // accessors 971 Bytecodes::Code orig_bytecode() { return _orig_bytecode; } 972 void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; } 973 int bci() { return _bci; } 974 975 BreakpointInfo* next() const { return _next; } 976 void set_next(BreakpointInfo* n) { _next = n; } 977 978 // helps for searchers 979 bool match(const Method* m, int bci) { 980 return bci == _bci && match(m); 981 } 982 983 bool match(const Method* m) { 984 return _name_index == m->name_index() && 985 _signature_index == m->signature_index(); 986 } 987 988 void set(Method* method); 989 void clear(Method* method); 990 }; 991 992 // Utility class for access exception handlers 993 class ExceptionTable : public StackObj { 994 private: 995 ExceptionTableElement* _table; 996 u2 _length; 997 998 public: 999 ExceptionTable(const Method* m) { 1000 if (m->has_exception_handler()) { 1001 _table = m->exception_table_start(); 1002 _length = m->exception_table_length(); 1003 } else { 1004 _table = NULL; 1005 _length = 0; 1006 } 1007 } 1008 1009 int length() const { 1010 return _length; 1011 } 1012 1013 u2 start_pc(int idx) const { 1014 assert(idx < _length, "out of bounds"); 1015 return _table[idx].start_pc; 1016 } 1017 1018 void set_start_pc(int idx, u2 value) { 1019 assert(idx < _length, "out of bounds"); 1020 _table[idx].start_pc = value; 1021 } 1022 1023 u2 end_pc(int idx) const { 1024 assert(idx < _length, "out of bounds"); 1025 return _table[idx].end_pc; 1026 } 1027 1028 void set_end_pc(int idx, u2 value) { 1029 assert(idx < _length, "out of bounds"); 1030 _table[idx].end_pc = value; 1031 } 1032 1033 u2 handler_pc(int idx) const { 1034 assert(idx < _length, "out of bounds"); 1035 return _table[idx].handler_pc; 1036 } 1037 1038 void set_handler_pc(int idx, u2 value) { 1039 assert(idx < _length, "out of bounds"); 1040 _table[idx].handler_pc = value; 1041 } 1042 1043 u2 catch_type_index(int idx) const { 1044 assert(idx < _length, "out of bounds"); 1045 return _table[idx].catch_type_index; 1046 } 1047 1048 void set_catch_type_index(int idx, u2 value) { 1049 assert(idx < _length, "out of bounds"); 1050 _table[idx].catch_type_index = value; 1051 } 1052 }; 1053 1054 #endif // SHARE_VM_OOPS_METHODOOP_HPP