1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_METHODOOP_HPP 26 #define SHARE_VM_OOPS_METHODOOP_HPP 27 28 #include "classfile/vmSymbols.hpp" 29 #include "code/compressedStream.hpp" 30 #include "compiler/oopMap.hpp" 31 #include "interpreter/invocationCounter.hpp" 32 #include "oops/annotations.hpp" 33 #include "oops/constantPool.hpp" 34 #include "oops/methodCounters.hpp" 35 #include "oops/instanceKlass.hpp" 36 #include "oops/oop.hpp" 37 #include "oops/typeArrayOop.hpp" 38 #include "utilities/accessFlags.hpp" 39 #include "utilities/growableArray.hpp" 40 41 // A Method represents a Java method. 42 // 43 // Note that most applications load thousands of methods, so keeping the size of this 44 // class small has a big impact on footprint. 45 // 46 // Note that native_function and signature_handler have to be at fixed offsets 47 // (required by the interpreter) 48 // 49 // Method embedded field layout (after declared fields): 50 // [EMBEDDED native_function (present only if native) ] 51 // [EMBEDDED signature_handler (present only if native) ] 52 53 class CheckedExceptionElement; 54 class LocalVariableTableElement; 55 class AdapterHandlerEntry; 56 class MethodData; 57 class MethodCounters; 58 class ConstMethod; 59 class InlineTableSizes; 60 class KlassSizeStats; 61 62 class Method : public Metadata { 63 friend class VMStructs; 64 private: 65 ConstMethod* _constMethod; // Method read-only data. 66 MethodData* _method_data; 67 MethodCounters* _method_counters; 68 AccessFlags _access_flags; // Access flags 69 int _vtable_index; // vtable index of this method (see VtableIndexFlag) 70 // note: can have vtables with >2**16 elements (because of inheritance) 71 #ifdef CC_INTERP 72 int _result_index; // C++ interpreter needs for converting results to/from stack 73 #endif 74 u2 _method_size; // size of this object 75 u1 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) 76 77 // Flags 78 enum Flags { 79 _jfr_towrite = 1 << 0, 80 _caller_sensitive = 1 << 1, 81 _force_inline = 1 << 2, 82 _dont_inline = 1 << 3, 83 _hidden = 1 << 4 84 }; 85 u1 _flags; 86 87 #ifndef PRODUCT 88 int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) 89 #endif 90 // Entry point for calling both from and to the interpreter. 91 address _i2i_entry; // All-args-on-stack calling convention 92 // Adapter blob (i2c/c2i) for this Method*. Set once when method is linked. 93 AdapterHandlerEntry* _adapter; 94 // Entry point for calling from compiled code, to compiled code if it exists 95 // or else the interpreter. 96 volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry() 97 // The entry point for calling both from and to compiled code is 98 // "_code->entry_point()". Because of tiered compilation and de-opt, this 99 // field can come and go. It can transition from NULL to not-null at any 100 // time (whenever a compile completes). It can transition from not-null to 101 // NULL only at safepoints (because of a de-opt). 102 nmethod* volatile _code; // Points to the corresponding piece of native code 103 volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry 104 105 // Constructor 106 Method(ConstMethod* xconst, AccessFlags access_flags, int size); 107 public: 108 109 static Method* allocate(ClassLoaderData* loader_data, 110 int byte_code_size, 111 AccessFlags access_flags, 112 InlineTableSizes* sizes, 113 ConstMethod::MethodType method_type, 114 TRAPS); 115 116 // CDS and vtbl checking can create an empty Method to get vtbl pointer. 117 Method(){} 118 119 // The Method vtable is restored by this call when the Method is in the 120 // shared archive. See patch_klass_vtables() in metaspaceShared.cpp for 121 // all the gory details. SA, dtrace and pstack helpers distinguish metadata 122 // by their vtable. 123 void restore_vtable() { guarantee(is_method(), "vtable restored by this call"); } 124 bool is_method() const volatile { return true; } 125 126 void restore_unshareable_info(TRAPS); 127 128 // accessors for instance variables 129 130 ConstMethod* constMethod() const { return _constMethod; } 131 void set_constMethod(ConstMethod* xconst) { _constMethod = xconst; } 132 133 134 static address make_adapters(methodHandle mh, TRAPS); 135 volatile address from_compiled_entry() const { return (address)OrderAccess::load_ptr_acquire(&_from_compiled_entry); } 136 volatile address from_interpreted_entry() const{ return (address)OrderAccess::load_ptr_acquire(&_from_interpreted_entry); } 137 138 // access flag 139 AccessFlags access_flags() const { return _access_flags; } 140 void set_access_flags(AccessFlags flags) { _access_flags = flags; } 141 142 // name 143 Symbol* name() const { return constants()->symbol_at(name_index()); } 144 int name_index() const { return constMethod()->name_index(); } 145 void set_name_index(int index) { constMethod()->set_name_index(index); } 146 147 // signature 148 Symbol* signature() const { return constants()->symbol_at(signature_index()); } 149 int signature_index() const { return constMethod()->signature_index(); } 150 void set_signature_index(int index) { constMethod()->set_signature_index(index); } 151 152 // generics support 153 Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); } 154 int generic_signature_index() const { return constMethod()->generic_signature_index(); } 155 void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); } 156 157 // annotations support 158 AnnotationArray* annotations() const { 159 return constMethod()->method_annotations(); 160 } 161 AnnotationArray* parameter_annotations() const { 162 return constMethod()->parameter_annotations(); 163 } 164 AnnotationArray* annotation_default() const { 165 return constMethod()->default_annotations(); 166 } 167 AnnotationArray* type_annotations() const { 168 return constMethod()->type_annotations(); 169 } 170 171 #ifdef CC_INTERP 172 void set_result_index(BasicType type); 173 int result_index() { return _result_index; } 174 #endif 175 176 // Helper routine: get klass name + "." + method name + signature as 177 // C string, for the purpose of providing more useful NoSuchMethodErrors 178 // and fatal error handling. The string is allocated in resource 179 // area if a buffer is not provided by the caller. 180 char* name_and_sig_as_C_string() const; 181 char* name_and_sig_as_C_string(char* buf, int size) const; 182 183 // Static routine in the situations we don't have a Method* 184 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature); 185 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size); 186 187 Bytecodes::Code java_code_at(int bci) const { 188 return Bytecodes::java_code_at(this, bcp_from(bci)); 189 } 190 Bytecodes::Code code_at(int bci) const { 191 return Bytecodes::code_at(this, bcp_from(bci)); 192 } 193 194 // JVMTI breakpoints 195 Bytecodes::Code orig_bytecode_at(int bci) const; 196 void set_orig_bytecode_at(int bci, Bytecodes::Code code); 197 void set_breakpoint(int bci); 198 void clear_breakpoint(int bci); 199 void clear_all_breakpoints(); 200 // Tracking number of breakpoints, for fullspeed debugging. 201 // Only mutated by VM thread. 202 u2 number_of_breakpoints() const { 203 MethodCounters* mcs = method_counters(); 204 if (mcs == NULL) { 205 return 0; 206 } else { 207 return mcs->number_of_breakpoints(); 208 } 209 } 210 void incr_number_of_breakpoints(TRAPS) { 211 MethodCounters* mcs = get_method_counters(CHECK); 212 if (mcs != NULL) { 213 mcs->incr_number_of_breakpoints(); 214 } 215 } 216 void decr_number_of_breakpoints(TRAPS) { 217 MethodCounters* mcs = get_method_counters(CHECK); 218 if (mcs != NULL) { 219 mcs->decr_number_of_breakpoints(); 220 } 221 } 222 // Initialization only 223 void clear_number_of_breakpoints() { 224 MethodCounters* mcs = method_counters(); 225 if (mcs != NULL) { 226 mcs->clear_number_of_breakpoints(); 227 } 228 } 229 230 // index into InstanceKlass methods() array 231 // note: also used by jfr 232 u2 method_idnum() const { return constMethod()->method_idnum(); } 233 void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); } 234 235 // code size 236 int code_size() const { return constMethod()->code_size(); } 237 238 // method size 239 int method_size() const { return _method_size; } 240 void set_method_size(int size) { 241 assert(0 <= size && size < (1 << 16), "invalid method size"); 242 _method_size = size; 243 } 244 245 // constant pool for Klass* holding this method 246 ConstantPool* constants() const { return constMethod()->constants(); } 247 void set_constants(ConstantPool* c) { constMethod()->set_constants(c); } 248 249 // max stack 250 // return original max stack size for method verification 251 int verifier_max_stack() const { return constMethod()->max_stack(); } 252 int max_stack() const { return constMethod()->max_stack() + extra_stack_entries(); } 253 void set_max_stack(int size) { constMethod()->set_max_stack(size); } 254 255 // max locals 256 int max_locals() const { return constMethod()->max_locals(); } 257 void set_max_locals(int size) { constMethod()->set_max_locals(size); } 258 259 int highest_comp_level() const; 260 void set_highest_comp_level(int level); 261 int highest_osr_comp_level() const; 262 void set_highest_osr_comp_level(int level); 263 264 // Count of times method was exited via exception while interpreting 265 void interpreter_throwout_increment(TRAPS) { 266 MethodCounters* mcs = get_method_counters(CHECK); 267 if (mcs != NULL) { 268 mcs->interpreter_throwout_increment(); 269 } 270 } 271 272 int interpreter_throwout_count() const { 273 MethodCounters* mcs = method_counters(); 274 if (mcs == NULL) { 275 return 0; 276 } else { 277 return mcs->interpreter_throwout_count(); 278 } 279 } 280 281 // size of parameters 282 int size_of_parameters() const { return constMethod()->size_of_parameters(); } 283 void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); } 284 285 bool has_stackmap_table() const { 286 return constMethod()->has_stackmap_table(); 287 } 288 289 Array<u1>* stackmap_data() const { 290 return constMethod()->stackmap_data(); 291 } 292 293 void set_stackmap_data(Array<u1>* sd) { 294 constMethod()->set_stackmap_data(sd); 295 } 296 297 // exception handler table 298 bool has_exception_handler() const 299 { return constMethod()->has_exception_handler(); } 300 int exception_table_length() const 301 { return constMethod()->exception_table_length(); } 302 ExceptionTableElement* exception_table_start() const 303 { return constMethod()->exception_table_start(); } 304 305 // Finds the first entry point bci of an exception handler for an 306 // exception of klass ex_klass thrown at throw_bci. A value of NULL 307 // for ex_klass indicates that the exception klass is not known; in 308 // this case it matches any constraint class. Returns -1 if the 309 // exception cannot be handled in this method. The handler 310 // constraint classes are loaded if necessary. Note that this may 311 // throw an exception if loading of the constraint classes causes 312 // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError. 313 // If an exception is thrown, returns the bci of the 314 // exception handler which caused the exception to be thrown, which 315 // is needed for proper retries. See, for example, 316 // InterpreterRuntime::exception_handler_for_exception. 317 static int fast_exception_handler_bci_for(methodHandle mh, KlassHandle ex_klass, int throw_bci, TRAPS); 318 319 // method data access 320 MethodData* method_data() const { 321 return _method_data; 322 } 323 324 void set_method_data(MethodData* data) { 325 // The store into method must be released. On platforms without 326 // total store order (TSO) the reference may become visible before 327 // the initialization of data otherwise. 328 OrderAccess::release_store_ptr((volatile void *)&_method_data, data); 329 } 330 331 MethodCounters* method_counters() const { 332 return _method_counters; 333 } 334 335 void set_method_counters(MethodCounters* counters) { 336 // The store into method must be released. On platforms without 337 // total store order (TSO) the reference may become visible before 338 // the initialization of data otherwise. 339 OrderAccess::release_store_ptr((volatile void *)&_method_counters, counters); 340 } 341 342 #ifdef TIERED 343 // We are reusing interpreter_invocation_count as a holder for the previous event count! 344 // We can do that since interpreter_invocation_count is not used in tiered. 345 int prev_event_count() const { 346 if (method_counters() == NULL) { 347 return 0; 348 } else { 349 return method_counters()->interpreter_invocation_count(); 350 } 351 } 352 void set_prev_event_count(int count) { 353 MethodCounters* mcs = method_counters(); 354 if (mcs != NULL) { 355 mcs->set_interpreter_invocation_count(count); 356 } 357 } 358 jlong prev_time() const { 359 MethodCounters* mcs = method_counters(); 360 return mcs == NULL ? 0 : mcs->prev_time(); 361 } 362 void set_prev_time(jlong time) { 363 MethodCounters* mcs = method_counters(); 364 if (mcs != NULL) { 365 mcs->set_prev_time(time); 366 } 367 } 368 float rate() const { 369 MethodCounters* mcs = method_counters(); 370 return mcs == NULL ? 0 : mcs->rate(); 371 } 372 void set_rate(float rate) { 373 MethodCounters* mcs = method_counters(); 374 if (mcs != NULL) { 375 mcs->set_rate(rate); 376 } 377 } 378 #endif 379 int nmethod_age() const { 380 if (method_counters() == NULL) { 381 return INT_MAX; 382 } else { 383 return method_counters()->nmethod_age(); 384 } 385 } 386 387 int invocation_count(); 388 int backedge_count(); 389 390 bool was_executed_more_than(int n); 391 bool was_never_executed() { return !was_executed_more_than(0); } 392 393 static void build_interpreter_method_data(methodHandle method, TRAPS); 394 395 static MethodCounters* build_method_counters(Method* m, TRAPS); 396 397 int interpreter_invocation_count() { 398 if (TieredCompilation) { 399 return invocation_count(); 400 } else { 401 MethodCounters* mcs = method_counters(); 402 return (mcs == NULL) ? 0 : mcs->interpreter_invocation_count(); 403 } 404 } 405 int increment_interpreter_invocation_count(TRAPS) { 406 if (TieredCompilation) ShouldNotReachHere(); 407 MethodCounters* mcs = get_method_counters(CHECK_0); 408 return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count(); 409 } 410 411 #ifndef PRODUCT 412 int compiled_invocation_count() const { return _compiled_invocation_count; } 413 void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; } 414 #else 415 // for PrintMethodData in a product build 416 int compiled_invocation_count() const { return 0; } 417 #endif // not PRODUCT 418 419 // Clear (non-shared space) pointers which could not be relevant 420 // if this (shared) method were mapped into another JVM. 421 void remove_unshareable_info(); 422 423 // nmethod/verified compiler entry 424 address verified_code_entry(); 425 bool check_code() const; // Not inline to avoid circular ref 426 nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); } 427 void clear_code(); // Clear out any compiled code 428 static void set_code(methodHandle mh, nmethod* code); 429 void set_adapter_entry(AdapterHandlerEntry* adapter) { _adapter = adapter; } 430 address get_i2c_entry(); 431 address get_c2i_entry(); 432 address get_c2i_unverified_entry(); 433 AdapterHandlerEntry* adapter() { return _adapter; } 434 // setup entry points 435 void link_method(methodHandle method, TRAPS); 436 // clear entry points. Used by sharing code 437 void unlink_method(); 438 439 // vtable index 440 enum VtableIndexFlag { 441 // Valid vtable indexes are non-negative (>= 0). 442 // These few negative values are used as sentinels. 443 itable_index_max = -10, // first itable index, growing downward 444 pending_itable_index = -9, // itable index will be assigned 445 invalid_vtable_index = -4, // distinct from any valid vtable index 446 garbage_vtable_index = -3, // not yet linked; no vtable layout yet 447 nonvirtual_vtable_index = -2 // there is no need for vtable dispatch 448 // 6330203 Note: Do not use -1, which was overloaded with many meanings. 449 }; 450 DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; }) 451 bool has_vtable_index() const { return _vtable_index >= 0; } 452 int vtable_index() const { return _vtable_index; } 453 void set_vtable_index(int index) { _vtable_index = index; } 454 DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; }) 455 bool has_itable_index() const { return _vtable_index <= itable_index_max; } 456 int itable_index() const { assert(valid_itable_index(), ""); 457 return itable_index_max - _vtable_index; } 458 void set_itable_index(int index) { _vtable_index = itable_index_max - index; assert(valid_itable_index(), ""); } 459 460 // interpreter entry 461 address interpreter_entry() const { return _i2i_entry; } 462 // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry 463 void set_interpreter_entry(address entry) { _i2i_entry = entry; _from_interpreted_entry = entry; } 464 465 // native function (used for native methods only) 466 enum { 467 native_bind_event_is_interesting = true 468 }; 469 address native_function() const { return *(native_function_addr()); } 470 address critical_native_function(); 471 472 // Must specify a real function (not NULL). 473 // Use clear_native_function() to unregister. 474 void set_native_function(address function, bool post_event_flag); 475 bool has_native_function() const; 476 void clear_native_function(); 477 478 // signature handler (used for native methods only) 479 address signature_handler() const { return *(signature_handler_addr()); } 480 void set_signature_handler(address handler); 481 482 // Interpreter oopmap support 483 void mask_for(int bci, InterpreterOopMap* mask); 484 485 // operations on invocation counter 486 void print_invocation_count(); 487 488 // byte codes 489 void set_code(address code) { return constMethod()->set_code(code); } 490 address code_base() const { return constMethod()->code_base(); } 491 bool contains(address bcp) const { return constMethod()->contains(bcp); } 492 493 // prints byte codes 494 void print_codes() const { print_codes_on(tty); } 495 void print_codes_on(outputStream* st) const; 496 void print_codes_on(int from, int to, outputStream* st) const; 497 498 // method parameters 499 bool has_method_parameters() const 500 { return constMethod()->has_method_parameters(); } 501 int method_parameters_length() const 502 { return constMethod()->method_parameters_length(); } 503 MethodParametersElement* method_parameters_start() const 504 { return constMethod()->method_parameters_start(); } 505 506 // checked exceptions 507 int checked_exceptions_length() const 508 { return constMethod()->checked_exceptions_length(); } 509 CheckedExceptionElement* checked_exceptions_start() const 510 { return constMethod()->checked_exceptions_start(); } 511 512 // localvariable table 513 bool has_localvariable_table() const 514 { return constMethod()->has_localvariable_table(); } 515 int localvariable_table_length() const 516 { return constMethod()->localvariable_table_length(); } 517 LocalVariableTableElement* localvariable_table_start() const 518 { return constMethod()->localvariable_table_start(); } 519 520 bool has_linenumber_table() const 521 { return constMethod()->has_linenumber_table(); } 522 u_char* compressed_linenumber_table() const 523 { return constMethod()->compressed_linenumber_table(); } 524 525 // method holder (the Klass* holding this method) 526 InstanceKlass* method_holder() const { return constants()->pool_holder(); } 527 528 void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments) 529 Symbol* klass_name() const; // returns the name of the method holder 530 BasicType result_type() const; // type of the method result 531 int result_type_index() const; // type index of the method result 532 bool is_returning_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY); } 533 bool is_returning_fp() const { BasicType r = result_type(); return (r == T_FLOAT || r == T_DOUBLE); } 534 535 // Checked exceptions thrown by this method (resolved to mirrors) 536 objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); } 537 538 // Access flags 539 bool is_public() const { return access_flags().is_public(); } 540 bool is_private() const { return access_flags().is_private(); } 541 bool is_protected() const { return access_flags().is_protected(); } 542 bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); } 543 bool is_static() const { return access_flags().is_static(); } 544 bool is_final() const { return access_flags().is_final(); } 545 bool is_synchronized() const { return access_flags().is_synchronized();} 546 bool is_native() const { return access_flags().is_native(); } 547 bool is_abstract() const { return access_flags().is_abstract(); } 548 bool is_strict() const { return access_flags().is_strict(); } 549 bool is_synthetic() const { return access_flags().is_synthetic(); } 550 551 // returns true if contains only return operation 552 bool is_empty_method() const; 553 554 // returns true if this is a vanilla constructor 555 bool is_vanilla_constructor() const; 556 557 // checks method and its method holder 558 bool is_final_method() const; 559 bool is_final_method(AccessFlags class_access_flags) const; 560 bool is_default_method() const; 561 562 // true if method needs no dynamic dispatch (final and/or no vtable entry) 563 bool can_be_statically_bound() const; 564 bool can_be_statically_bound(AccessFlags class_access_flags) const; 565 566 // returns true if the method has any backward branches. 567 bool has_loops() { 568 return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag(); 569 }; 570 571 bool compute_has_loops_flag(); 572 573 bool has_jsrs() { 574 return access_flags().has_jsrs(); 575 }; 576 void set_has_jsrs() { 577 _access_flags.set_has_jsrs(); 578 } 579 580 // returns true if the method has any monitors. 581 bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); } 582 bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); } 583 584 void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); } 585 586 // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes 587 // propererly nest in the method. It might return false, even though they actually nest properly, since the info. 588 // has not been computed yet. 589 bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); } 590 void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); } 591 592 // returns true if the method is an accessor function (setter/getter). 593 bool is_accessor() const; 594 595 // returns true if the method is an initializer (<init> or <clinit>). 596 bool is_initializer() const; 597 598 // returns true if the method is static OR if the classfile version < 51 599 bool has_valid_initializer_flags() const; 600 601 // returns true if the method name is <clinit> and the method has 602 // valid static initializer flags. 603 bool is_static_initializer() const; 604 605 // compiled code support 606 // NOTE: code() is inherently racy as deopt can be clearing code 607 // simultaneously. Use with caution. 608 bool has_compiled_code() const { return code() != NULL; } 609 610 // sizing 611 static int header_size() { return sizeof(Method)/HeapWordSize; } 612 static int size(bool is_native); 613 int size() const { return method_size(); } 614 #if INCLUDE_SERVICES 615 void collect_statistics(KlassSizeStats *sz) const; 616 #endif 617 618 // interpreter support 619 static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); } 620 static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); } 621 #ifdef CC_INTERP 622 static ByteSize result_index_offset() { return byte_offset_of(Method, _result_index ); } 623 #endif /* CC_INTERP */ 624 static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); } 625 static ByteSize code_offset() { return byte_offset_of(Method, _code); } 626 static ByteSize method_data_offset() { 627 return byte_offset_of(Method, _method_data); 628 } 629 static ByteSize method_counters_offset() { 630 return byte_offset_of(Method, _method_counters); 631 } 632 #ifndef PRODUCT 633 static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); } 634 #endif // not PRODUCT 635 static ByteSize native_function_offset() { return in_ByteSize(sizeof(Method)); } 636 static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); } 637 static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); } 638 static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); } 639 640 // for code generation 641 static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); } 642 static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); } 643 static int intrinsic_id_size_in_bytes() { return sizeof(u1); } 644 645 // Static methods that are used to implement member methods where an exposed this pointer 646 // is needed due to possible GCs 647 static objArrayHandle resolved_checked_exceptions_impl(Method* method, TRAPS); 648 649 // Returns the byte code index from the byte code pointer 650 int bci_from(address bcp) const; 651 address bcp_from(int bci) const; 652 address bcp_from(address bcp) const; 653 int validate_bci_from_bcp(address bcp) const; 654 int validate_bci(int bci) const; 655 656 // Returns the line number for a bci if debugging information for the method is prowided, 657 // -1 is returned otherwise. 658 int line_number_from_bci(int bci) const; 659 660 // Reflection support 661 bool is_overridden_in(Klass* k) const; 662 663 // Stack walking support 664 bool is_ignored_by_security_stack_walk() const; 665 666 // JSR 292 support 667 bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id) 668 bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm 669 bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc. 670 static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual 671 Symbol* signature, //anything at all 672 TRAPS); 673 static Klass* check_non_bcp_klass(Klass* klass); 674 675 // How many extra stack entries for invokedynamic when it's enabled 676 static const int extra_stack_entries_for_jsr292 = 1; 677 678 // this operates only on invoke methods: 679 // presize interpreter frames for extra interpreter stack entries, if needed 680 // Account for the extra appendix argument for invokehandle/invokedynamic 681 static int extra_stack_entries() { return extra_stack_entries_for_jsr292; } 682 static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize 683 684 // RedefineClasses() support: 685 bool is_old() const { return access_flags().is_old(); } 686 void set_is_old() { _access_flags.set_is_old(); } 687 bool is_obsolete() const { return access_flags().is_obsolete(); } 688 void set_is_obsolete() { _access_flags.set_is_obsolete(); } 689 bool is_deleted() const { return access_flags().is_deleted(); } 690 void set_is_deleted() { _access_flags.set_is_deleted(); } 691 692 bool is_emcp() const { 693 // emcp methods (equivalent method except constant pool is different) are methods 694 // that are old but not obsolete or deleted. 695 return is_old() && !(is_obsolete() || is_deleted()); 696 } 697 698 bool on_stack() const { return access_flags().on_stack(); } 699 void set_on_stack(const bool value); 700 701 // see the definition in Method*.cpp for the gory details 702 bool should_not_be_cached() const; 703 704 // JVMTI Native method prefixing support: 705 bool is_prefixed_native() const { return access_flags().is_prefixed_native(); } 706 void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); } 707 708 // Rewriting support 709 static methodHandle clone_with_new_data(methodHandle m, u_char* new_code, int new_code_length, 710 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS); 711 712 // jmethodID handling 713 // Because the useful life-span of a jmethodID cannot be determined, 714 // once created they are never reclaimed. The methods to which they refer, 715 // however, can be GC'ed away if the class is unloaded or if the method is 716 // made obsolete or deleted -- in these cases, the jmethodID 717 // refers to NULL (as is the case for any weak reference). 718 static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh); 719 static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid); 720 721 // Use resolve_jmethod_id() in situations where the caller is expected 722 // to provide a valid jmethodID; the only sanity checks are in asserts; 723 // result guaranteed not to be NULL. 724 inline static Method* resolve_jmethod_id(jmethodID mid) { 725 assert(mid != NULL, "JNI method id should not be null"); 726 return *((Method**)mid); 727 } 728 729 // Use checked_resolve_jmethod_id() in situations where the caller 730 // should provide a valid jmethodID, but might not. NULL is returned 731 // when the jmethodID does not refer to a valid method. 732 static Method* checked_resolve_jmethod_id(jmethodID mid); 733 734 static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method); 735 static bool is_method_id(jmethodID mid); 736 737 // Clear methods 738 static void clear_jmethod_ids(ClassLoaderData* loader_data); 739 static void print_jmethod_ids(ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN; 740 741 // Get this method's jmethodID -- allocate if it doesn't exist 742 jmethodID jmethod_id() { methodHandle this_h(this); 743 return InstanceKlass::get_jmethod_id(method_holder(), this_h); } 744 745 // Lookup the jmethodID for this method. Return NULL if not found. 746 // NOTE that this function can be called from a signal handler 747 // (see AsyncGetCallTrace support for Forte Analyzer) and this 748 // needs to be async-safe. No allocation should be done and 749 // so handles are not used to avoid deadlock. 750 jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); } 751 752 // Support for inlining of intrinsic methods 753 vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; } 754 void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u1) id; } 755 756 // Helper routines for intrinsic_id() and vmIntrinsics::method(). 757 void init_intrinsic_id(); // updates from _none if a match 758 static vmSymbols::SID klass_id_for_intrinsics(Klass* holder); 759 760 bool jfr_towrite() { 761 return (_flags & _jfr_towrite) != 0; 762 } 763 void set_jfr_towrite(bool x) { 764 _flags = x ? (_flags | _jfr_towrite) : (_flags & ~_jfr_towrite); 765 } 766 767 bool caller_sensitive() { 768 return (_flags & _caller_sensitive) != 0; 769 } 770 void set_caller_sensitive(bool x) { 771 _flags = x ? (_flags | _caller_sensitive) : (_flags & ~_caller_sensitive); 772 } 773 774 bool force_inline() { 775 return (_flags & _force_inline) != 0; 776 } 777 void set_force_inline(bool x) { 778 _flags = x ? (_flags | _force_inline) : (_flags & ~_force_inline); 779 } 780 781 bool dont_inline() { 782 return (_flags & _dont_inline) != 0; 783 } 784 void set_dont_inline(bool x) { 785 _flags = x ? (_flags | _dont_inline) : (_flags & ~_dont_inline); 786 } 787 788 bool is_hidden() { 789 return (_flags & _hidden) != 0; 790 } 791 void set_hidden(bool x) { 792 _flags = x ? (_flags | _hidden) : (_flags & ~_hidden); 793 } 794 795 ConstMethod::MethodType method_type() const { 796 return _constMethod->method_type(); 797 } 798 bool is_overpass() const { return method_type() == ConstMethod::OVERPASS; } 799 800 // On-stack replacement support 801 bool has_osr_nmethod(int level, bool match_level) { 802 return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL; 803 } 804 805 nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) { 806 return method_holder()->lookup_osr_nmethod(this, bci, level, match_level); 807 } 808 809 // Inline cache support 810 void cleanup_inline_caches(); 811 812 // Find if klass for method is loaded 813 bool is_klass_loaded_by_klass_index(int klass_index) const; 814 bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const; 815 816 // Indicates whether compilation failed earlier for this method, or 817 // whether it is not compilable for another reason like having a 818 // breakpoint set in it. 819 bool is_not_compilable(int comp_level = CompLevel_any) const; 820 void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 821 void set_not_compilable_quietly(int comp_level = CompLevel_all) { 822 set_not_compilable(comp_level, false); 823 } 824 bool is_not_osr_compilable(int comp_level = CompLevel_any) const; 825 void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 826 void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) { 827 set_not_osr_compilable(comp_level, false); 828 } 829 bool is_always_compilable() const; 830 831 private: 832 void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); 833 834 public: 835 MethodCounters* get_method_counters(TRAPS) { 836 if (_method_counters == NULL) { 837 build_method_counters(this, CHECK_AND_CLEAR_NULL); 838 } 839 return _method_counters; 840 } 841 842 bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); } 843 void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); } 844 void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); } 845 bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); } 846 void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); } 847 void clear_not_c2_compilable() { _access_flags.clear_not_c2_compilable(); } 848 849 bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit 850 void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit 851 void clear_not_c1_osr_compilable() { clear_not_c1_compilable(); } // don't waste an accessFlags bit 852 bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); } 853 void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); } 854 void clear_not_c2_osr_compilable() { _access_flags.clear_not_c2_osr_compilable(); } 855 856 // Background compilation support 857 bool queued_for_compilation() const { return access_flags().queued_for_compilation(); } 858 void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); } 859 void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); } 860 861 // Resolve all classes in signature, return 'true' if successful 862 static bool load_signature_classes(methodHandle m, TRAPS); 863 864 // Return if true if not all classes references in signature, including return type, has been loaded 865 static bool has_unloaded_classes_in_signature(methodHandle m, TRAPS); 866 867 // Printing 868 void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM 869 #if INCLUDE_JVMTI 870 void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses 871 #else 872 void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)" 873 #endif 874 875 // Helper routine used for method sorting 876 static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true); 877 878 // Deallocation function for redefine classes or if an error occurs 879 void deallocate_contents(ClassLoaderData* loader_data); 880 881 // Printing 882 #ifndef PRODUCT 883 void print_on(outputStream* st) const; 884 #endif 885 void print_value_on(outputStream* st) const; 886 887 const char* internal_name() const { return "{method}"; } 888 889 // Check for valid method pointer 890 static bool has_method_vptr(const void* ptr); 891 bool is_valid_method() const; 892 893 // Verify 894 void verify() { verify_on(tty); } 895 void verify_on(outputStream* st); 896 897 private: 898 899 // Inlined elements 900 address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); } 901 address* signature_handler_addr() const { return native_function_addr() + 1; } 902 }; 903 904 905 // Utility class for compressing line number tables 906 907 class CompressedLineNumberWriteStream: public CompressedWriteStream { 908 private: 909 int _bci; 910 int _line; 911 public: 912 // Constructor 913 CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {} 914 CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {} 915 916 // Write (bci, line number) pair to stream 917 void write_pair_regular(int bci_delta, int line_delta); 918 919 inline void write_pair_inline(int bci, int line) { 920 int bci_delta = bci - _bci; 921 int line_delta = line - _line; 922 _bci = bci; 923 _line = line; 924 // Skip (0,0) deltas - they do not add information and conflict with terminator. 925 if (bci_delta == 0 && line_delta == 0) return; 926 // Check if bci is 5-bit and line number 3-bit unsigned. 927 if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) { 928 // Compress into single byte. 929 jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta; 930 // Check that value doesn't match escape character. 931 if (value != 0xFF) { 932 write_byte(value); 933 return; 934 } 935 } 936 write_pair_regular(bci_delta, line_delta); 937 } 938 939 // Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair. 940 // Disabling optimization doesn't work for methods in header files 941 // so we force it to call through the non-optimized version in the .cpp. 942 // It's gross, but it's the only way we can ensure that all callers are 943 // fixed. _MSC_VER is defined by the windows compiler 944 #if defined(_M_AMD64) && _MSC_VER >= 1400 945 void write_pair(int bci, int line); 946 #else 947 void write_pair(int bci, int line) { write_pair_inline(bci, line); } 948 #endif 949 950 // Write end-of-stream marker 951 void write_terminator() { write_byte(0); } 952 }; 953 954 955 // Utility class for decompressing line number tables 956 957 class CompressedLineNumberReadStream: public CompressedReadStream { 958 private: 959 int _bci; 960 int _line; 961 public: 962 // Constructor 963 CompressedLineNumberReadStream(u_char* buffer); 964 // Read (bci, line number) pair from stream. Returns false at end-of-stream. 965 bool read_pair(); 966 // Accessing bci and line number (after calling read_pair) 967 int bci() const { return _bci; } 968 int line() const { return _line; } 969 }; 970 971 972 /// Fast Breakpoints. 973 974 // If this structure gets more complicated (because bpts get numerous), 975 // move it into its own header. 976 977 // There is presently no provision for concurrent access 978 // to breakpoint lists, which is only OK for JVMTI because 979 // breakpoints are written only at safepoints, and are read 980 // concurrently only outside of safepoints. 981 982 class BreakpointInfo : public CHeapObj<mtClass> { 983 friend class VMStructs; 984 private: 985 Bytecodes::Code _orig_bytecode; 986 int _bci; 987 u2 _name_index; // of method 988 u2 _signature_index; // of method 989 BreakpointInfo* _next; // simple storage allocation 990 991 public: 992 BreakpointInfo(Method* m, int bci); 993 994 // accessors 995 Bytecodes::Code orig_bytecode() { return _orig_bytecode; } 996 void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; } 997 int bci() { return _bci; } 998 999 BreakpointInfo* next() const { return _next; } 1000 void set_next(BreakpointInfo* n) { _next = n; } 1001 1002 // helps for searchers 1003 bool match(const Method* m, int bci) { 1004 return bci == _bci && match(m); 1005 } 1006 1007 bool match(const Method* m) { 1008 return _name_index == m->name_index() && 1009 _signature_index == m->signature_index(); 1010 } 1011 1012 void set(Method* method); 1013 void clear(Method* method); 1014 }; 1015 1016 // Utility class for access exception handlers 1017 class ExceptionTable : public StackObj { 1018 private: 1019 ExceptionTableElement* _table; 1020 u2 _length; 1021 1022 public: 1023 ExceptionTable(const Method* m) { 1024 if (m->has_exception_handler()) { 1025 _table = m->exception_table_start(); 1026 _length = m->exception_table_length(); 1027 } else { 1028 _table = NULL; 1029 _length = 0; 1030 } 1031 } 1032 1033 int length() const { 1034 return _length; 1035 } 1036 1037 u2 start_pc(int idx) const { 1038 assert(idx < _length, "out of bounds"); 1039 return _table[idx].start_pc; 1040 } 1041 1042 void set_start_pc(int idx, u2 value) { 1043 assert(idx < _length, "out of bounds"); 1044 _table[idx].start_pc = value; 1045 } 1046 1047 u2 end_pc(int idx) const { 1048 assert(idx < _length, "out of bounds"); 1049 return _table[idx].end_pc; 1050 } 1051 1052 void set_end_pc(int idx, u2 value) { 1053 assert(idx < _length, "out of bounds"); 1054 _table[idx].end_pc = value; 1055 } 1056 1057 u2 handler_pc(int idx) const { 1058 assert(idx < _length, "out of bounds"); 1059 return _table[idx].handler_pc; 1060 } 1061 1062 void set_handler_pc(int idx, u2 value) { 1063 assert(idx < _length, "out of bounds"); 1064 _table[idx].handler_pc = value; 1065 } 1066 1067 u2 catch_type_index(int idx) const { 1068 assert(idx < _length, "out of bounds"); 1069 return _table[idx].catch_type_index; 1070 } 1071 1072 void set_catch_type_index(int idx, u2 value) { 1073 assert(idx < _length, "out of bounds"); 1074 _table[idx].catch_type_index = value; 1075 } 1076 }; 1077 1078 #endif // SHARE_VM_OOPS_METHODOOP_HPP