1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OOPS_METHODOOP_HPP 26 #define SHARE_VM_OOPS_METHODOOP_HPP 27 28 #include "classfile/vmSymbols.hpp" 29 #include "code/compressedStream.hpp" 30 #include "compiler/compilerDefinitions.hpp" 31 #include "compiler/oopMap.hpp" 32 #include "interpreter/invocationCounter.hpp" 33 #include "oops/annotations.hpp" 34 #include "oops/constantPool.hpp" 35 #include "oops/methodCounters.hpp" 36 #include "oops/instanceKlass.hpp" 37 #include "oops/oop.hpp" 38 #include "oops/typeArrayOop.hpp" 39 #include "utilities/accessFlags.hpp" 40 #include "utilities/align.hpp" 41 #include "utilities/growableArray.hpp" 42 #include "utilities/macros.hpp" 43 #if INCLUDE_JFR 44 #include "jfr/support/jfrTraceIdExtension.hpp" 45 #endif 46 47 48 // A Method represents a Java method. 49 // 50 // Note that most applications load thousands of methods, so keeping the size of this 51 // class small has a big impact on footprint. 52 // 53 // Note that native_function and signature_handler have to be at fixed offsets 54 // (required by the interpreter) 55 // 56 // Method embedded field layout (after declared fields): 57 // [EMBEDDED native_function (present only if native) ] 58 // [EMBEDDED signature_handler (present only if native) ] 59 60 class CheckedExceptionElement; 61 class LocalVariableTableElement; 62 class AdapterHandlerEntry; 63 class MethodData; 64 class MethodCounters; 65 class ConstMethod; 66 class InlineTableSizes; 67 class KlassSizeStats; 68 class CompiledMethod; 69 70 class Method : public Metadata { 71 friend class VMStructs; 72 friend class JVMCIVMStructs; 73 private: 74 // If you add a new field that points to any metaspace object, you 75 // must add this field to Method::metaspace_pointers_do(). 76 ConstMethod* _constMethod; // Method read-only data. 77 MethodData* _method_data; 78 MethodCounters* _method_counters; 79 AccessFlags _access_flags; // Access flags 80 int _vtable_index; // vtable index of this method (see VtableIndexFlag) 81 // note: can have vtables with >2**16 elements (because of inheritance) 82 u2 _intrinsic_id; // vmSymbols::intrinsic_id (0 == _none) 83 84 // Flags 85 enum Flags { 86 _caller_sensitive = 1 << 0, 87 _force_inline = 1 << 1, 88 _dont_inline = 1 << 2, 89 _hidden = 1 << 3, 90 _has_injected_profile = 1 << 4, 91 _running_emcp = 1 << 5, 92 _intrinsic_candidate = 1 << 6, 93 _reserved_stack_access = 1 << 7, 94 _known_not_returning_vt= 1 << 8, // <- See byte_value_for_known_not_returning_vt() 95 _known_returning_vt = 1 << 9, // <- for these 2 bits. 96 _unused_bits_mask = 0xfc00 97 }; 98 mutable u2 _flags; 99 100 JFR_ONLY(DEFINE_TRACE_FLAG;) 101 102 #ifndef PRODUCT 103 int _compiled_invocation_count; // Number of nmethod invocations so far (for perf. debugging) 104 #endif 105 // Entry point for calling both from and to the interpreter. 106 address _i2i_entry; // All-args-on-stack calling convention 107 // Entry point for calling from compiled code, to compiled code if it exists 108 // or else the interpreter. 109 volatile address _from_compiled_entry; // Cache of: _code ? _code->entry_point() : _adapter->c2i_entry() 110 // The entry point for calling both from and to compiled code is 111 // "_code->entry_point()". Because of tiered compilation and de-opt, this 112 // field can come and go. It can transition from NULL to not-null at any 113 // time (whenever a compile completes). It can transition from not-null to 114 // NULL only at safepoints (because of a de-opt). 115 CompiledMethod* volatile _code; // Points to the corresponding piece of native code 116 volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry 117 int _max_vt_buffer; // max number of VT buffer chunk to use before recycling 118 119 #if INCLUDE_AOT && defined(TIERED) 120 CompiledMethod* _aot_code; 121 #endif 122 123 // Constructor 124 Method(ConstMethod* xconst, AccessFlags access_flags); 125 public: 126 127 static Method* allocate(ClassLoaderData* loader_data, 128 int byte_code_size, 129 AccessFlags access_flags, 130 InlineTableSizes* sizes, 131 ConstMethod::MethodType method_type, 132 TRAPS); 133 134 // CDS and vtbl checking can create an empty Method to get vtbl pointer. 135 Method(){} 136 137 bool is_method() const volatile { return true; } 138 139 void restore_unshareable_info(TRAPS); 140 141 // accessors for instance variables 142 143 ConstMethod* constMethod() const { return _constMethod; } 144 void set_constMethod(ConstMethod* xconst) { _constMethod = xconst; } 145 146 147 static address make_adapters(const methodHandle& mh, TRAPS); 148 address from_compiled_entry() const; 149 address from_compiled_entry_no_trampoline() const; 150 address from_interpreted_entry() const; 151 152 // access flag 153 AccessFlags access_flags() const { return _access_flags; } 154 void set_access_flags(AccessFlags flags) { _access_flags = flags; } 155 156 // name 157 Symbol* name() const { return constants()->symbol_at(name_index()); } 158 int name_index() const { return constMethod()->name_index(); } 159 void set_name_index(int index) { constMethod()->set_name_index(index); } 160 161 // signature 162 Symbol* signature() const { return constants()->symbol_at(signature_index()); } 163 int signature_index() const { return constMethod()->signature_index(); } 164 void set_signature_index(int index) { constMethod()->set_signature_index(index); } 165 166 // generics support 167 Symbol* generic_signature() const { int idx = generic_signature_index(); return ((idx != 0) ? constants()->symbol_at(idx) : (Symbol*)NULL); } 168 int generic_signature_index() const { return constMethod()->generic_signature_index(); } 169 void set_generic_signature_index(int index) { constMethod()->set_generic_signature_index(index); } 170 171 // annotations support 172 AnnotationArray* annotations() const { 173 return constMethod()->method_annotations(); 174 } 175 AnnotationArray* parameter_annotations() const { 176 return constMethod()->parameter_annotations(); 177 } 178 AnnotationArray* annotation_default() const { 179 return constMethod()->default_annotations(); 180 } 181 AnnotationArray* type_annotations() const { 182 return constMethod()->type_annotations(); 183 } 184 185 // Helper routine: get klass name + "." + method name + signature as 186 // C string, for the purpose of providing more useful NoSuchMethodErrors 187 // and fatal error handling. The string is allocated in resource 188 // area if a buffer is not provided by the caller. 189 char* name_and_sig_as_C_string() const; 190 char* name_and_sig_as_C_string(char* buf, int size) const; 191 192 // Static routine in the situations we don't have a Method* 193 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature); 194 static char* name_and_sig_as_C_string(Klass* klass, Symbol* method_name, Symbol* signature, char* buf, int size); 195 196 Bytecodes::Code java_code_at(int bci) const { 197 return Bytecodes::java_code_at(this, bcp_from(bci)); 198 } 199 Bytecodes::Code code_at(int bci) const { 200 return Bytecodes::code_at(this, bcp_from(bci)); 201 } 202 203 // JVMTI breakpoints 204 #if !INCLUDE_JVMTI 205 Bytecodes::Code orig_bytecode_at(int bci) const { 206 ShouldNotReachHere(); 207 return Bytecodes::_shouldnotreachhere; 208 } 209 void set_orig_bytecode_at(int bci, Bytecodes::Code code) { 210 ShouldNotReachHere(); 211 }; 212 u2 number_of_breakpoints() const {return 0;} 213 #else // !INCLUDE_JVMTI 214 Bytecodes::Code orig_bytecode_at(int bci) const; 215 void set_orig_bytecode_at(int bci, Bytecodes::Code code); 216 void set_breakpoint(int bci); 217 void clear_breakpoint(int bci); 218 void clear_all_breakpoints(); 219 // Tracking number of breakpoints, for fullspeed debugging. 220 // Only mutated by VM thread. 221 u2 number_of_breakpoints() const { 222 MethodCounters* mcs = method_counters(); 223 if (mcs == NULL) { 224 return 0; 225 } else { 226 return mcs->number_of_breakpoints(); 227 } 228 } 229 void incr_number_of_breakpoints(TRAPS) { 230 MethodCounters* mcs = get_method_counters(CHECK); 231 if (mcs != NULL) { 232 mcs->incr_number_of_breakpoints(); 233 } 234 } 235 void decr_number_of_breakpoints(TRAPS) { 236 MethodCounters* mcs = get_method_counters(CHECK); 237 if (mcs != NULL) { 238 mcs->decr_number_of_breakpoints(); 239 } 240 } 241 // Initialization only 242 void clear_number_of_breakpoints() { 243 MethodCounters* mcs = method_counters(); 244 if (mcs != NULL) { 245 mcs->clear_number_of_breakpoints(); 246 } 247 } 248 #endif // !INCLUDE_JVMTI 249 250 // index into InstanceKlass methods() array 251 // note: also used by jfr 252 u2 method_idnum() const { return constMethod()->method_idnum(); } 253 void set_method_idnum(u2 idnum) { constMethod()->set_method_idnum(idnum); } 254 255 u2 orig_method_idnum() const { return constMethod()->orig_method_idnum(); } 256 void set_orig_method_idnum(u2 idnum) { constMethod()->set_orig_method_idnum(idnum); } 257 258 // code size 259 int code_size() const { return constMethod()->code_size(); } 260 261 // method size in words 262 int method_size() const { return sizeof(Method)/wordSize + ( is_native() ? 2 : 0 ); } 263 264 // constant pool for Klass* holding this method 265 ConstantPool* constants() const { return constMethod()->constants(); } 266 void set_constants(ConstantPool* c) { constMethod()->set_constants(c); } 267 268 // max stack 269 // return original max stack size for method verification 270 int verifier_max_stack() const { return constMethod()->max_stack(); } 271 int max_stack() const { return constMethod()->max_stack() + extra_stack_entries(); } 272 void set_max_stack(int size) { constMethod()->set_max_stack(size); } 273 274 // max locals 275 int max_locals() const { return constMethod()->max_locals(); } 276 void set_max_locals(int size) { constMethod()->set_max_locals(size); } 277 278 // value type buffering 279 void initialize_max_vt_buffer(); 280 int max_vt_buffer() const { return _max_vt_buffer; } 281 void set_max_vt_buffer(int size) { _max_vt_buffer = size; } 282 283 284 int highest_comp_level() const; 285 void set_highest_comp_level(int level); 286 int highest_osr_comp_level() const; 287 void set_highest_osr_comp_level(int level); 288 289 #if COMPILER2_OR_JVMCI 290 // Count of times method was exited via exception while interpreting 291 void interpreter_throwout_increment(TRAPS) { 292 MethodCounters* mcs = get_method_counters(CHECK); 293 if (mcs != NULL) { 294 mcs->interpreter_throwout_increment(); 295 } 296 } 297 #endif 298 299 int interpreter_throwout_count() const { 300 MethodCounters* mcs = method_counters(); 301 if (mcs == NULL) { 302 return 0; 303 } else { 304 return mcs->interpreter_throwout_count(); 305 } 306 } 307 308 // size of parameters 309 int size_of_parameters() const { return constMethod()->size_of_parameters(); } 310 void set_size_of_parameters(int size) { constMethod()->set_size_of_parameters(size); } 311 312 bool has_stackmap_table() const { 313 return constMethod()->has_stackmap_table(); 314 } 315 316 Array<u1>* stackmap_data() const { 317 return constMethod()->stackmap_data(); 318 } 319 320 void set_stackmap_data(Array<u1>* sd) { 321 constMethod()->set_stackmap_data(sd); 322 } 323 324 // exception handler table 325 bool has_exception_handler() const 326 { return constMethod()->has_exception_handler(); } 327 int exception_table_length() const 328 { return constMethod()->exception_table_length(); } 329 ExceptionTableElement* exception_table_start() const 330 { return constMethod()->exception_table_start(); } 331 332 // Finds the first entry point bci of an exception handler for an 333 // exception of klass ex_klass thrown at throw_bci. A value of NULL 334 // for ex_klass indicates that the exception klass is not known; in 335 // this case it matches any constraint class. Returns -1 if the 336 // exception cannot be handled in this method. The handler 337 // constraint classes are loaded if necessary. Note that this may 338 // throw an exception if loading of the constraint classes causes 339 // an IllegalAccessError (bugid 4307310) or an OutOfMemoryError. 340 // If an exception is thrown, returns the bci of the 341 // exception handler which caused the exception to be thrown, which 342 // is needed for proper retries. See, for example, 343 // InterpreterRuntime::exception_handler_for_exception. 344 static int fast_exception_handler_bci_for(const methodHandle& mh, Klass* ex_klass, int throw_bci, TRAPS); 345 346 // method data access 347 MethodData* method_data() const { 348 return _method_data; 349 } 350 351 void set_method_data(MethodData* data); 352 353 MethodCounters* method_counters() const { 354 return _method_counters; 355 } 356 357 void clear_method_counters() { 358 _method_counters = NULL; 359 } 360 361 bool init_method_counters(MethodCounters* counters); 362 363 #ifdef TIERED 364 // We are reusing interpreter_invocation_count as a holder for the previous event count! 365 // We can do that since interpreter_invocation_count is not used in tiered. 366 int prev_event_count() const { 367 if (method_counters() == NULL) { 368 return 0; 369 } else { 370 return method_counters()->interpreter_invocation_count(); 371 } 372 } 373 void set_prev_event_count(int count) { 374 MethodCounters* mcs = method_counters(); 375 if (mcs != NULL) { 376 mcs->set_interpreter_invocation_count(count); 377 } 378 } 379 jlong prev_time() const { 380 MethodCounters* mcs = method_counters(); 381 return mcs == NULL ? 0 : mcs->prev_time(); 382 } 383 void set_prev_time(jlong time) { 384 MethodCounters* mcs = method_counters(); 385 if (mcs != NULL) { 386 mcs->set_prev_time(time); 387 } 388 } 389 float rate() const { 390 MethodCounters* mcs = method_counters(); 391 return mcs == NULL ? 0 : mcs->rate(); 392 } 393 void set_rate(float rate) { 394 MethodCounters* mcs = method_counters(); 395 if (mcs != NULL) { 396 mcs->set_rate(rate); 397 } 398 } 399 400 #if INCLUDE_AOT 401 void set_aot_code(CompiledMethod* aot_code) { 402 _aot_code = aot_code; 403 } 404 405 CompiledMethod* aot_code() const { 406 return _aot_code; 407 } 408 #else 409 CompiledMethod* aot_code() const { return NULL; } 410 #endif // INCLUDE_AOT 411 #endif // TIERED 412 413 int nmethod_age() const { 414 if (method_counters() == NULL) { 415 return INT_MAX; 416 } else { 417 return method_counters()->nmethod_age(); 418 } 419 } 420 421 int invocation_count(); 422 int backedge_count(); 423 424 bool was_executed_more_than(int n); 425 bool was_never_executed() { return !was_executed_more_than(0); } 426 427 static void build_interpreter_method_data(const methodHandle& method, TRAPS); 428 429 static MethodCounters* build_method_counters(Method* m, TRAPS); 430 431 int interpreter_invocation_count() { 432 if (TieredCompilation) { 433 return invocation_count(); 434 } else { 435 MethodCounters* mcs = method_counters(); 436 return (mcs == NULL) ? 0 : mcs->interpreter_invocation_count(); 437 } 438 } 439 #if COMPILER2_OR_JVMCI 440 int increment_interpreter_invocation_count(TRAPS) { 441 if (TieredCompilation) ShouldNotReachHere(); 442 MethodCounters* mcs = get_method_counters(CHECK_0); 443 return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count(); 444 } 445 #endif 446 447 #ifndef PRODUCT 448 int compiled_invocation_count() const { return _compiled_invocation_count; } 449 void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; } 450 #else 451 // for PrintMethodData in a product build 452 int compiled_invocation_count() const { return 0; } 453 #endif // not PRODUCT 454 455 // Clear (non-shared space) pointers which could not be relevant 456 // if this (shared) method were mapped into another JVM. 457 void remove_unshareable_info(); 458 459 // nmethod/verified compiler entry 460 address verified_code_entry(); 461 bool check_code() const; // Not inline to avoid circular ref 462 CompiledMethod* volatile code() const; 463 void clear_code(bool acquire_lock = true); // Clear out any compiled code 464 static void set_code(const methodHandle& mh, CompiledMethod* code); 465 void set_adapter_entry(AdapterHandlerEntry* adapter) { 466 constMethod()->set_adapter_entry(adapter); 467 } 468 void update_adapter_trampoline(AdapterHandlerEntry* adapter) { 469 constMethod()->update_adapter_trampoline(adapter); 470 } 471 472 address get_i2c_entry(); 473 address get_c2i_entry(); 474 address get_c2i_unverified_entry(); 475 AdapterHandlerEntry* adapter() const { 476 return constMethod()->adapter(); 477 } 478 // setup entry points 479 void link_method(const methodHandle& method, TRAPS); 480 // clear entry points. Used by sharing code during dump time 481 void unlink_method() NOT_CDS_RETURN; 482 483 virtual void metaspace_pointers_do(MetaspaceClosure* iter); 484 virtual MetaspaceObj::Type type() const { return MethodType; } 485 486 // vtable index 487 enum VtableIndexFlag { 488 // Valid vtable indexes are non-negative (>= 0). 489 // These few negative values are used as sentinels. 490 itable_index_max = -10, // first itable index, growing downward 491 pending_itable_index = -9, // itable index will be assigned 492 invalid_vtable_index = -4, // distinct from any valid vtable index 493 garbage_vtable_index = -3, // not yet linked; no vtable layout yet 494 nonvirtual_vtable_index = -2 // there is no need for vtable dispatch 495 // 6330203 Note: Do not use -1, which was overloaded with many meanings. 496 }; 497 DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; }) 498 bool has_vtable_index() const { return _vtable_index >= 0; } 499 int vtable_index() const { return _vtable_index; } 500 void set_vtable_index(int index); 501 DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; }) 502 bool has_itable_index() const { return _vtable_index <= itable_index_max; } 503 int itable_index() const { assert(valid_itable_index(), ""); 504 return itable_index_max - _vtable_index; } 505 void set_itable_index(int index); 506 507 // interpreter entry 508 address interpreter_entry() const { return _i2i_entry; } 509 // Only used when first initialize so we can set _i2i_entry and _from_interpreted_entry 510 void set_interpreter_entry(address entry) { 511 assert(!is_shared(), "shared method's interpreter entry should not be changed at run time"); 512 if (_i2i_entry != entry) { 513 _i2i_entry = entry; 514 } 515 if (_from_interpreted_entry != entry) { 516 _from_interpreted_entry = entry; 517 } 518 } 519 520 // native function (used for native methods only) 521 enum { 522 native_bind_event_is_interesting = true 523 }; 524 address native_function() const { return *(native_function_addr()); } 525 address critical_native_function(); 526 527 // Must specify a real function (not NULL). 528 // Use clear_native_function() to unregister. 529 void set_native_function(address function, bool post_event_flag); 530 bool has_native_function() const; 531 void clear_native_function(); 532 533 // signature handler (used for native methods only) 534 address signature_handler() const { return *(signature_handler_addr()); } 535 void set_signature_handler(address handler); 536 537 // Interpreter oopmap support 538 void mask_for(int bci, InterpreterOopMap* mask); 539 540 // operations on invocation counter 541 void print_invocation_count(); 542 543 // byte codes 544 void set_code(address code) { return constMethod()->set_code(code); } 545 address code_base() const { return constMethod()->code_base(); } 546 bool contains(address bcp) const { return constMethod()->contains(bcp); } 547 548 // prints byte codes 549 void print_codes() const { print_codes_on(tty); } 550 void print_codes_on(outputStream* st) const; 551 void print_codes_on(int from, int to, outputStream* st) const; 552 553 // method parameters 554 bool has_method_parameters() const 555 { return constMethod()->has_method_parameters(); } 556 int method_parameters_length() const 557 { return constMethod()->method_parameters_length(); } 558 MethodParametersElement* method_parameters_start() const 559 { return constMethod()->method_parameters_start(); } 560 561 // checked exceptions 562 int checked_exceptions_length() const 563 { return constMethod()->checked_exceptions_length(); } 564 CheckedExceptionElement* checked_exceptions_start() const 565 { return constMethod()->checked_exceptions_start(); } 566 567 // localvariable table 568 bool has_localvariable_table() const 569 { return constMethod()->has_localvariable_table(); } 570 int localvariable_table_length() const 571 { return constMethod()->localvariable_table_length(); } 572 LocalVariableTableElement* localvariable_table_start() const 573 { return constMethod()->localvariable_table_start(); } 574 575 bool has_linenumber_table() const 576 { return constMethod()->has_linenumber_table(); } 577 u_char* compressed_linenumber_table() const 578 { return constMethod()->compressed_linenumber_table(); } 579 580 // method holder (the Klass* holding this method) 581 InstanceKlass* method_holder() const { return constants()->pool_holder(); } 582 583 void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments) 584 Symbol* klass_name() const; // returns the name of the method holder 585 BasicType result_type() const; // type of the method result 586 bool may_return_oop() const { BasicType r = result_type(); return (r == T_OBJECT || r == T_ARRAY || r == T_VALUETYPE); } 587 bool is_returning_vt() const { BasicType r = result_type(); return r == T_VALUETYPE; } 588 #ifdef ASSERT 589 ValueKlass* returned_value_type(Thread* thread) const; 590 #endif 591 592 // Checked exceptions thrown by this method (resolved to mirrors) 593 objArrayHandle resolved_checked_exceptions(TRAPS) { return resolved_checked_exceptions_impl(this, THREAD); } 594 595 // Access flags 596 bool is_public() const { return access_flags().is_public(); } 597 bool is_private() const { return access_flags().is_private(); } 598 bool is_protected() const { return access_flags().is_protected(); } 599 bool is_package_private() const { return !is_public() && !is_private() && !is_protected(); } 600 bool is_static() const { return access_flags().is_static(); } 601 bool is_final() const { return access_flags().is_final(); } 602 bool is_synchronized() const { return access_flags().is_synchronized();} 603 bool is_native() const { return access_flags().is_native(); } 604 bool is_abstract() const { return access_flags().is_abstract(); } 605 bool is_strict() const { return access_flags().is_strict(); } 606 bool is_synthetic() const { return access_flags().is_synthetic(); } 607 608 // returns true if contains only return operation 609 bool is_empty_method() const; 610 611 // returns true if this is a vanilla constructor 612 bool is_vanilla_constructor() const; 613 614 // checks method and its method holder 615 bool is_final_method() const; 616 bool is_final_method(AccessFlags class_access_flags) const; 617 // interface method declared with 'default' - excludes private interface methods 618 bool is_default_method() const; 619 620 // true if method needs no dynamic dispatch (final and/or no vtable entry) 621 bool can_be_statically_bound() const; 622 bool can_be_statically_bound(AccessFlags class_access_flags) const; 623 624 // returns true if the method has any backward branches. 625 bool has_loops() { 626 return access_flags().loops_flag_init() ? access_flags().has_loops() : compute_has_loops_flag(); 627 }; 628 629 bool compute_has_loops_flag(); 630 631 bool has_jsrs() { 632 return access_flags().has_jsrs(); 633 }; 634 void set_has_jsrs() { 635 _access_flags.set_has_jsrs(); 636 } 637 638 // returns true if the method has any monitors. 639 bool has_monitors() const { return is_synchronized() || access_flags().has_monitor_bytecodes(); } 640 bool has_monitor_bytecodes() const { return access_flags().has_monitor_bytecodes(); } 641 642 void set_has_monitor_bytecodes() { _access_flags.set_has_monitor_bytecodes(); } 643 644 // monitor matching. This returns a conservative estimate of whether the monitorenter/monitorexit bytecodes 645 // propererly nest in the method. It might return false, even though they actually nest properly, since the info. 646 // has not been computed yet. 647 bool guaranteed_monitor_matching() const { return access_flags().is_monitor_matching(); } 648 void set_guaranteed_monitor_matching() { _access_flags.set_monitor_matching(); } 649 650 // returns true if the method is an accessor function (setter/getter). 651 bool is_accessor() const; 652 653 // returns true if the method is a getter 654 bool is_getter() const; 655 656 // returns true if the method is a setter 657 bool is_setter() const; 658 659 // returns true if the method does nothing but return a constant of primitive type 660 bool is_constant_getter() const; 661 662 // returns true if the method is an initializer (<init> or <clinit>). 663 bool is_initializer() const; 664 665 // returns true if the method is static OR if the classfile version < 51 666 bool has_valid_initializer_flags() const; 667 668 // returns true if the method name is <clinit> and the method has 669 // valid static initializer flags. 670 bool is_static_initializer() const; 671 672 // returns true if the method name is <init> 673 bool is_object_initializer() const; 674 675 // compiled code support 676 // NOTE: code() is inherently racy as deopt can be clearing code 677 // simultaneously. Use with caution. 678 bool has_compiled_code() const; 679 680 #ifdef TIERED 681 bool has_aot_code() const { return aot_code() != NULL; } 682 #endif 683 684 // sizing 685 static int header_size() { 686 return align_up((int)sizeof(Method), wordSize) / wordSize; 687 } 688 static int size(bool is_native); 689 int size() const { return method_size(); } 690 #if INCLUDE_SERVICES 691 void collect_statistics(KlassSizeStats *sz) const; 692 #endif 693 void log_touched(TRAPS); 694 static void print_touched_methods(outputStream* out); 695 696 // interpreter support 697 static ByteSize const_offset() { return byte_offset_of(Method, _constMethod ); } 698 static ByteSize access_flags_offset() { return byte_offset_of(Method, _access_flags ); } 699 static ByteSize from_compiled_offset() { return byte_offset_of(Method, _from_compiled_entry); } 700 static ByteSize code_offset() { return byte_offset_of(Method, _code); } 701 static ByteSize flags_offset() { return byte_offset_of(Method, _flags); } 702 static ByteSize method_data_offset() { 703 return byte_offset_of(Method, _method_data); 704 } 705 static ByteSize method_counters_offset() { 706 return byte_offset_of(Method, _method_counters); 707 } 708 #ifndef PRODUCT 709 static ByteSize compiled_invocation_counter_offset() { return byte_offset_of(Method, _compiled_invocation_count); } 710 #endif // not PRODUCT 711 static ByteSize native_function_offset() { return in_ByteSize(sizeof(Method)); } 712 static ByteSize from_interpreted_offset() { return byte_offset_of(Method, _from_interpreted_entry ); } 713 static ByteSize interpreter_entry_offset() { return byte_offset_of(Method, _i2i_entry ); } 714 static ByteSize signature_handler_offset() { return in_ByteSize(sizeof(Method) + wordSize); } 715 static ByteSize itable_index_offset() { return byte_offset_of(Method, _vtable_index ); } 716 717 // for code generation 718 static int method_data_offset_in_bytes() { return offset_of(Method, _method_data); } 719 static int intrinsic_id_offset_in_bytes() { return offset_of(Method, _intrinsic_id); } 720 static int intrinsic_id_size_in_bytes() { return sizeof(u2); } 721 722 static ByteSize max_vt_buffer_offset() { return byte_offset_of(Method, _max_vt_buffer); } 723 724 // Static methods that are used to implement member methods where an exposed this pointer 725 // is needed due to possible GCs 726 static objArrayHandle resolved_checked_exceptions_impl(Method* method, TRAPS); 727 728 // Returns the byte code index from the byte code pointer 729 int bci_from(address bcp) const; 730 address bcp_from(int bci) const; 731 address bcp_from(address bcp) const; 732 int validate_bci_from_bcp(address bcp) const; 733 int validate_bci(int bci) const; 734 735 // Returns the line number for a bci if debugging information for the method is prowided, 736 // -1 is returned otherwise. 737 int line_number_from_bci(int bci) const; 738 739 // Reflection support 740 bool is_overridden_in(Klass* k) const; 741 742 // Stack walking support 743 bool is_ignored_by_security_stack_walk() const; 744 745 // JSR 292 support 746 bool is_method_handle_intrinsic() const; // MethodHandles::is_signature_polymorphic_intrinsic(intrinsic_id) 747 bool is_compiled_lambda_form() const; // intrinsic_id() == vmIntrinsics::_compiledLambdaForm 748 bool has_member_arg() const; // intrinsic_id() == vmIntrinsics::_linkToSpecial, etc. 749 static methodHandle make_method_handle_intrinsic(vmIntrinsics::ID iid, // _invokeBasic, _linkToVirtual 750 Symbol* signature, //anything at all 751 TRAPS); 752 static Klass* check_non_bcp_klass(Klass* klass); 753 754 enum { 755 // How many extra stack entries for invokedynamic 756 extra_stack_entries_for_jsr292 = 1 757 }; 758 759 // this operates only on invoke methods: 760 // presize interpreter frames for extra interpreter stack entries, if needed 761 // Account for the extra appendix argument for invokehandle/invokedynamic 762 static int extra_stack_entries() { return extra_stack_entries_for_jsr292; } 763 static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize 764 765 // RedefineClasses() support: 766 bool is_old() const { return access_flags().is_old(); } 767 void set_is_old() { _access_flags.set_is_old(); } 768 bool is_obsolete() const { return access_flags().is_obsolete(); } 769 void set_is_obsolete() { _access_flags.set_is_obsolete(); } 770 bool is_deleted() const { return access_flags().is_deleted(); } 771 void set_is_deleted() { _access_flags.set_is_deleted(); } 772 773 bool is_running_emcp() const { 774 // EMCP methods are old but not obsolete or deleted. Equivalent 775 // Modulo Constant Pool means the method is equivalent except 776 // the constant pool and instructions that access the constant 777 // pool might be different. 778 // If a breakpoint is set in a redefined method, its EMCP methods that are 779 // still running must have a breakpoint also. 780 return (_flags & _running_emcp) != 0; 781 } 782 783 void set_running_emcp(bool x) { 784 _flags = x ? (_flags | _running_emcp) : (_flags & ~_running_emcp); 785 } 786 787 bool on_stack() const { return access_flags().on_stack(); } 788 void set_on_stack(const bool value); 789 790 // see the definition in Method*.cpp for the gory details 791 bool should_not_be_cached() const; 792 793 // JVMTI Native method prefixing support: 794 bool is_prefixed_native() const { return access_flags().is_prefixed_native(); } 795 void set_is_prefixed_native() { _access_flags.set_is_prefixed_native(); } 796 797 // Rewriting support 798 static methodHandle clone_with_new_data(const methodHandle& m, u_char* new_code, int new_code_length, 799 u_char* new_compressed_linenumber_table, int new_compressed_linenumber_size, TRAPS); 800 801 // jmethodID handling 802 // Because the useful life-span of a jmethodID cannot be determined, 803 // once created they are never reclaimed. The methods to which they refer, 804 // however, can be GC'ed away if the class is unloaded or if the method is 805 // made obsolete or deleted -- in these cases, the jmethodID 806 // refers to NULL (as is the case for any weak reference). 807 static jmethodID make_jmethod_id(ClassLoaderData* loader_data, Method* mh); 808 static void destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID mid); 809 810 // Ensure there is enough capacity in the internal tracking data 811 // structures to hold the number of jmethodIDs you plan to generate. 812 // This saves substantial time doing allocations. 813 static void ensure_jmethod_ids(ClassLoaderData* loader_data, int capacity); 814 815 // Use resolve_jmethod_id() in situations where the caller is expected 816 // to provide a valid jmethodID; the only sanity checks are in asserts; 817 // result guaranteed not to be NULL. 818 inline static Method* resolve_jmethod_id(jmethodID mid) { 819 assert(mid != NULL, "JNI method id should not be null"); 820 return *((Method**)mid); 821 } 822 823 // Use checked_resolve_jmethod_id() in situations where the caller 824 // should provide a valid jmethodID, but might not. NULL is returned 825 // when the jmethodID does not refer to a valid method. 826 static Method* checked_resolve_jmethod_id(jmethodID mid); 827 828 static void change_method_associated_with_jmethod_id(jmethodID old_jmid_ptr, Method* new_method); 829 static bool is_method_id(jmethodID mid); 830 831 // Clear methods 832 static void clear_jmethod_ids(ClassLoaderData* loader_data); 833 static void print_jmethod_ids(const ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN; 834 835 // Get this method's jmethodID -- allocate if it doesn't exist 836 jmethodID jmethod_id() { return method_holder()->get_jmethod_id(this); } 837 838 // Lookup the jmethodID for this method. Return NULL if not found. 839 // NOTE that this function can be called from a signal handler 840 // (see AsyncGetCallTrace support for Forte Analyzer) and this 841 // needs to be async-safe. No allocation should be done and 842 // so handles are not used to avoid deadlock. 843 jmethodID find_jmethod_id_or_null() { return method_holder()->jmethod_id_or_null(this); } 844 845 // Support for inlining of intrinsic methods 846 vmIntrinsics::ID intrinsic_id() const { return (vmIntrinsics::ID) _intrinsic_id; } 847 void set_intrinsic_id(vmIntrinsics::ID id) { _intrinsic_id = (u2) id; } 848 849 // Helper routines for intrinsic_id() and vmIntrinsics::method(). 850 void init_intrinsic_id(); // updates from _none if a match 851 static vmSymbols::SID klass_id_for_intrinsics(const Klass* holder); 852 853 bool caller_sensitive() { 854 return (_flags & _caller_sensitive) != 0; 855 } 856 void set_caller_sensitive(bool x) { 857 _flags = x ? (_flags | _caller_sensitive) : (_flags & ~_caller_sensitive); 858 } 859 860 bool force_inline() { 861 return (_flags & _force_inline) != 0; 862 } 863 void set_force_inline(bool x) { 864 _flags = x ? (_flags | _force_inline) : (_flags & ~_force_inline); 865 } 866 867 bool dont_inline() { 868 return (_flags & _dont_inline) != 0; 869 } 870 void set_dont_inline(bool x) { 871 _flags = x ? (_flags | _dont_inline) : (_flags & ~_dont_inline); 872 } 873 874 bool is_hidden() { 875 return (_flags & _hidden) != 0; 876 } 877 void set_hidden(bool x) { 878 _flags = x ? (_flags | _hidden) : (_flags & ~_hidden); 879 } 880 881 bool intrinsic_candidate() { 882 return (_flags & _intrinsic_candidate) != 0; 883 } 884 void set_intrinsic_candidate(bool x) { 885 _flags = x ? (_flags | _intrinsic_candidate) : (_flags & ~_intrinsic_candidate); 886 } 887 888 bool has_injected_profile() { 889 return (_flags & _has_injected_profile) != 0; 890 } 891 void set_has_injected_profile(bool x) { 892 _flags = x ? (_flags | _has_injected_profile) : (_flags & ~_has_injected_profile); 893 } 894 895 bool has_reserved_stack_access() { 896 return (_flags & _reserved_stack_access) != 0; 897 } 898 899 void set_has_reserved_stack_access(bool x) { 900 _flags = x ? (_flags | _reserved_stack_access) : (_flags & ~_reserved_stack_access); 901 } 902 903 static int byte_value_for_known_not_returning_vt() { 904 // The higher byte of Method::_flags has only the 905 // _known_not_returning_vt and _known_returning_vt bits, and all other bits 906 // are zero, so we can test for is_known_not_returning_vt() in the interpreter 907 // by essentially comparing (_flags >> 8) == Method::byte_value_for_known_not_returning_vt() 908 assert(_unused_bits_mask == 0xfc00, "must be"); 909 return (_known_not_returning_vt >> 8); 910 } 911 912 bool is_known_not_returning_vt() { 913 return (_flags & _known_not_returning_vt) != 0; 914 } 915 916 void set_known_not_returning_vt() { 917 _flags |= _known_not_returning_vt; 918 } 919 920 bool is_known_returning_vt() { 921 return (_flags & _known_returning_vt) != 0; 922 } 923 924 void set_known_returning_vt() { 925 _flags |= _known_returning_vt; 926 } 927 928 void check_returning_vt(TRAPS); 929 930 JFR_ONLY(DEFINE_TRACE_FLAG_ACCESSOR;) 931 932 ConstMethod::MethodType method_type() const { 933 return _constMethod->method_type(); 934 } 935 bool is_overpass() const { return method_type() == ConstMethod::OVERPASS; } 936 937 // On-stack replacement support 938 bool has_osr_nmethod(int level, bool match_level) { 939 return method_holder()->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL; 940 } 941 942 int mark_osr_nmethods() { 943 return method_holder()->mark_osr_nmethods(this); 944 } 945 946 nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) { 947 return method_holder()->lookup_osr_nmethod(this, bci, level, match_level); 948 } 949 950 // Inline cache support 951 void cleanup_inline_caches(); 952 953 // Find if klass for method is loaded 954 bool is_klass_loaded_by_klass_index(int klass_index) const; 955 bool is_klass_loaded(int refinfo_index, bool must_be_resolved = false) const; 956 957 // Indicates whether compilation failed earlier for this method, or 958 // whether it is not compilable for another reason like having a 959 // breakpoint set in it. 960 bool is_not_compilable(int comp_level = CompLevel_any) const; 961 void set_not_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 962 void set_not_compilable_quietly(int comp_level = CompLevel_all) { 963 set_not_compilable(comp_level, false); 964 } 965 bool is_not_osr_compilable(int comp_level = CompLevel_any) const; 966 void set_not_osr_compilable(int comp_level = CompLevel_all, bool report = true, const char* reason = NULL); 967 void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) { 968 set_not_osr_compilable(comp_level, false); 969 } 970 bool is_always_compilable() const; 971 972 private: 973 void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason); 974 975 public: 976 MethodCounters* get_method_counters(TRAPS) { 977 if (_method_counters == NULL) { 978 build_method_counters(this, CHECK_AND_CLEAR_NULL); 979 } 980 return _method_counters; 981 } 982 983 bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); } 984 void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); } 985 void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); } 986 bool is_not_c2_compilable() const { return access_flags().is_not_c2_compilable(); } 987 void set_not_c2_compilable() { _access_flags.set_not_c2_compilable(); } 988 void clear_not_c2_compilable() { _access_flags.clear_not_c2_compilable(); } 989 990 bool is_not_c1_osr_compilable() const { return is_not_c1_compilable(); } // don't waste an accessFlags bit 991 void set_not_c1_osr_compilable() { set_not_c1_compilable(); } // don't waste an accessFlags bit 992 void clear_not_c1_osr_compilable() { clear_not_c1_compilable(); } // don't waste an accessFlags bit 993 bool is_not_c2_osr_compilable() const { return access_flags().is_not_c2_osr_compilable(); } 994 void set_not_c2_osr_compilable() { _access_flags.set_not_c2_osr_compilable(); } 995 void clear_not_c2_osr_compilable() { _access_flags.clear_not_c2_osr_compilable(); } 996 997 // Background compilation support 998 bool queued_for_compilation() const { return access_flags().queued_for_compilation(); } 999 void set_queued_for_compilation() { _access_flags.set_queued_for_compilation(); } 1000 void clear_queued_for_compilation() { _access_flags.clear_queued_for_compilation(); } 1001 1002 // Resolve all classes in signature, return 'true' if successful 1003 static bool load_signature_classes(const methodHandle& m, TRAPS); 1004 1005 // Return if true if not all classes references in signature, including return type, has been loaded 1006 static bool has_unloaded_classes_in_signature(const methodHandle& m, TRAPS); 1007 1008 // Printing 1009 void print_short_name(outputStream* st = tty); // prints as klassname::methodname; Exposed so field engineers can debug VM 1010 #if INCLUDE_JVMTI 1011 void print_name(outputStream* st = tty); // prints as "virtual void foo(int)"; exposed for TraceRedefineClasses 1012 #else 1013 void print_name(outputStream* st = tty) PRODUCT_RETURN; // prints as "virtual void foo(int)" 1014 #endif 1015 1016 // Helper routine used for method sorting 1017 static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true); 1018 1019 // Deallocation function for redefine classes or if an error occurs 1020 void deallocate_contents(ClassLoaderData* loader_data); 1021 1022 // Printing 1023 #ifndef PRODUCT 1024 void print_on(outputStream* st) const; 1025 #endif 1026 void print_value_on(outputStream* st) const; 1027 void print_linkage_flags(outputStream* st) PRODUCT_RETURN; 1028 1029 const char* internal_name() const { return "{method}"; } 1030 1031 // Check for valid method pointer 1032 static bool has_method_vptr(const void* ptr); 1033 bool is_valid_method() const; 1034 1035 // Verify 1036 void verify() { verify_on(tty); } 1037 void verify_on(outputStream* st); 1038 1039 private: 1040 1041 // Inlined elements 1042 address* native_function_addr() const { assert(is_native(), "must be native"); return (address*) (this+1); } 1043 address* signature_handler_addr() const { return native_function_addr() + 1; } 1044 }; 1045 1046 1047 // Utility class for compressing line number tables 1048 1049 class CompressedLineNumberWriteStream: public CompressedWriteStream { 1050 private: 1051 int _bci; 1052 int _line; 1053 public: 1054 // Constructor 1055 CompressedLineNumberWriteStream(int initial_size) : CompressedWriteStream(initial_size), _bci(0), _line(0) {} 1056 CompressedLineNumberWriteStream(u_char* buffer, int initial_size) : CompressedWriteStream(buffer, initial_size), _bci(0), _line(0) {} 1057 1058 // Write (bci, line number) pair to stream 1059 void write_pair_regular(int bci_delta, int line_delta); 1060 1061 inline void write_pair_inline(int bci, int line) { 1062 int bci_delta = bci - _bci; 1063 int line_delta = line - _line; 1064 _bci = bci; 1065 _line = line; 1066 // Skip (0,0) deltas - they do not add information and conflict with terminator. 1067 if (bci_delta == 0 && line_delta == 0) return; 1068 // Check if bci is 5-bit and line number 3-bit unsigned. 1069 if (((bci_delta & ~0x1F) == 0) && ((line_delta & ~0x7) == 0)) { 1070 // Compress into single byte. 1071 jubyte value = ((jubyte) bci_delta << 3) | (jubyte) line_delta; 1072 // Check that value doesn't match escape character. 1073 if (value != 0xFF) { 1074 write_byte(value); 1075 return; 1076 } 1077 } 1078 write_pair_regular(bci_delta, line_delta); 1079 } 1080 1081 // Windows AMD64 + Apr 2005 PSDK with /O2 generates bad code for write_pair. 1082 // Disabling optimization doesn't work for methods in header files 1083 // so we force it to call through the non-optimized version in the .cpp. 1084 // It's gross, but it's the only way we can ensure that all callers are 1085 // fixed. _MSC_VER is defined by the windows compiler 1086 #if defined(_M_AMD64) && _MSC_VER >= 1400 1087 void write_pair(int bci, int line); 1088 #else 1089 void write_pair(int bci, int line) { write_pair_inline(bci, line); } 1090 #endif 1091 1092 // Write end-of-stream marker 1093 void write_terminator() { write_byte(0); } 1094 }; 1095 1096 1097 // Utility class for decompressing line number tables 1098 1099 class CompressedLineNumberReadStream: public CompressedReadStream { 1100 private: 1101 int _bci; 1102 int _line; 1103 public: 1104 // Constructor 1105 CompressedLineNumberReadStream(u_char* buffer); 1106 // Read (bci, line number) pair from stream. Returns false at end-of-stream. 1107 bool read_pair(); 1108 // Accessing bci and line number (after calling read_pair) 1109 int bci() const { return _bci; } 1110 int line() const { return _line; } 1111 }; 1112 1113 1114 #if INCLUDE_JVMTI 1115 1116 /// Fast Breakpoints. 1117 1118 // If this structure gets more complicated (because bpts get numerous), 1119 // move it into its own header. 1120 1121 // There is presently no provision for concurrent access 1122 // to breakpoint lists, which is only OK for JVMTI because 1123 // breakpoints are written only at safepoints, and are read 1124 // concurrently only outside of safepoints. 1125 1126 class BreakpointInfo : public CHeapObj<mtClass> { 1127 friend class VMStructs; 1128 private: 1129 Bytecodes::Code _orig_bytecode; 1130 int _bci; 1131 u2 _name_index; // of method 1132 u2 _signature_index; // of method 1133 BreakpointInfo* _next; // simple storage allocation 1134 1135 public: 1136 BreakpointInfo(Method* m, int bci); 1137 1138 // accessors 1139 Bytecodes::Code orig_bytecode() { return _orig_bytecode; } 1140 void set_orig_bytecode(Bytecodes::Code code) { _orig_bytecode = code; } 1141 int bci() { return _bci; } 1142 1143 BreakpointInfo* next() const { return _next; } 1144 void set_next(BreakpointInfo* n) { _next = n; } 1145 1146 // helps for searchers 1147 bool match(const Method* m, int bci) { 1148 return bci == _bci && match(m); 1149 } 1150 1151 bool match(const Method* m) { 1152 return _name_index == m->name_index() && 1153 _signature_index == m->signature_index(); 1154 } 1155 1156 void set(Method* method); 1157 void clear(Method* method); 1158 }; 1159 1160 #endif // INCLUDE_JVMTI 1161 1162 // Utility class for access exception handlers 1163 class ExceptionTable : public StackObj { 1164 private: 1165 ExceptionTableElement* _table; 1166 u2 _length; 1167 1168 public: 1169 ExceptionTable(const Method* m) { 1170 if (m->has_exception_handler()) { 1171 _table = m->exception_table_start(); 1172 _length = m->exception_table_length(); 1173 } else { 1174 _table = NULL; 1175 _length = 0; 1176 } 1177 } 1178 1179 int length() const { 1180 return _length; 1181 } 1182 1183 u2 start_pc(int idx) const { 1184 assert(idx < _length, "out of bounds"); 1185 return _table[idx].start_pc; 1186 } 1187 1188 void set_start_pc(int idx, u2 value) { 1189 assert(idx < _length, "out of bounds"); 1190 _table[idx].start_pc = value; 1191 } 1192 1193 u2 end_pc(int idx) const { 1194 assert(idx < _length, "out of bounds"); 1195 return _table[idx].end_pc; 1196 } 1197 1198 void set_end_pc(int idx, u2 value) { 1199 assert(idx < _length, "out of bounds"); 1200 _table[idx].end_pc = value; 1201 } 1202 1203 u2 handler_pc(int idx) const { 1204 assert(idx < _length, "out of bounds"); 1205 return _table[idx].handler_pc; 1206 } 1207 1208 void set_handler_pc(int idx, u2 value) { 1209 assert(idx < _length, "out of bounds"); 1210 _table[idx].handler_pc = value; 1211 } 1212 1213 u2 catch_type_index(int idx) const { 1214 assert(idx < _length, "out of bounds"); 1215 return _table[idx].catch_type_index; 1216 } 1217 1218 void set_catch_type_index(int idx, u2 value) { 1219 assert(idx < _length, "out of bounds"); 1220 _table[idx].catch_type_index = value; 1221 } 1222 }; 1223 1224 #endif // SHARE_VM_OOPS_METHODOOP_HPP