1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_CODE_NMETHOD_HPP 26 #define SHARE_VM_CODE_NMETHOD_HPP 27 28 #include "code/compiledMethod.hpp" 29 30 class DepChange; 31 class DirectiveSet; 32 33 // nmethods (native methods) are the compiled code versions of Java methods. 34 // 35 // An nmethod contains: 36 // - header (the nmethod structure) 37 // [Relocation] 38 // - relocation information 39 // - constant part (doubles, longs and floats used in nmethod) 40 // - oop table 41 // [Code] 42 // - code body 43 // - exception handler 44 // - stub code 45 // [Debugging information] 46 // - oop array 47 // - data array 48 // - pcs 49 // [Exception handler table] 50 // - handler entry point array 51 // [Implicit Null Pointer exception table] 52 // - implicit null table array 53 54 class nmethod : public CompiledMethod { 55 friend class VMStructs; 56 friend class JVMCIVMStructs; 57 friend class NMethodSweeper; 58 friend class CodeCache; // scavengable oops 59 private: 60 61 // Shared fields for all nmethod's 62 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method 63 jmethodID _jmethod_id; // Cache of method()->jmethod_id() 64 65 #if INCLUDE_JVMCI 66 // Needed to keep nmethods alive that are not the default nmethod for the associated Method. 67 oop _jvmci_installed_code; 68 oop _speculation_log; 69 #endif 70 71 // To support simple linked-list chaining of nmethods: 72 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head 73 74 static nmethod* volatile _oops_do_mark_nmethods; 75 nmethod* volatile _oops_do_mark_link; 76 77 AbstractCompiler* _compiler; // The compiler which compiled this nmethod 78 79 // offsets for entry points 80 address _entry_point; // entry point with class check 81 address _verified_entry_point; // entry point without class check 82 address _osr_entry_point; // entry point for on stack replacement 83 84 // Offsets for different nmethod parts 85 int _exception_offset; 86 // Offset of the unwind handler if it exists 87 int _unwind_handler_offset; 88 89 int _consts_offset; 90 int _stub_offset; 91 int _oops_offset; // offset to where embedded oop table begins (inside data) 92 int _metadata_offset; // embedded meta data table 93 int _scopes_data_offset; 94 int _scopes_pcs_offset; 95 int _dependencies_offset; 96 int _handler_table_offset; 97 int _nul_chk_table_offset; 98 int _nmethod_end_offset; 99 100 int code_offset() const { return (address) code_begin() - header_begin(); } 101 102 // location in frame (offset for sp) that deopt can store the original 103 // pc during a deopt. 104 int _orig_pc_offset; 105 106 int _compile_id; // which compilation made this nmethod 107 int _comp_level; // compilation level 108 109 // protected by CodeCache_lock 110 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) 111 112 // used by jvmti to track if an unload event has been posted for this nmethod. 113 bool _unload_reported; 114 115 // Protected by Patching_lock 116 volatile unsigned char _state; // {in_use, not_entrant, zombie, unloaded} 117 118 #ifdef ASSERT 119 bool _oops_are_stale; // indicates that it's no longer safe to access oops section 120 #endif 121 122 jbyte _scavenge_root_state; 123 124 #if INCLUDE_RTM_OPT 125 // RTM state at compile time. Used during deoptimization to decide 126 // whether to restart collecting RTM locking abort statistic again. 127 RTMState _rtm_state; 128 #endif 129 130 // Nmethod Flushing lock. If non-zero, then the nmethod is not removed 131 // and is not made into a zombie. However, once the nmethod is made into 132 // a zombie, it will be locked one final time if CompiledMethodUnload 133 // event processing needs to be done. 134 volatile jint _lock_count; 135 136 // not_entrant method removal. Each mark_sweep pass will update 137 // this mark to current sweep invocation count if it is seen on the 138 // stack. An not_entrant method can be removed when there are no 139 // more activations, i.e., when the _stack_traversal_mark is less than 140 // current sweep traversal index. 141 long _stack_traversal_mark; 142 143 // The _hotness_counter indicates the hotness of a method. The higher 144 // the value the hotter the method. The hotness counter of a nmethod is 145 // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method 146 // is active while stack scanning (mark_active_nmethods()). The hotness 147 // counter is decreased (by 1) while sweeping. 148 int _hotness_counter; 149 150 // These are used for compiled synchronized native methods to 151 // locate the owner and stack slot for the BasicLock so that we can 152 // properly revoke the bias of the owner if necessary. They are 153 // needed because there is no debug information for compiled native 154 // wrappers and the oop maps are insufficient to allow 155 // frame::retrieve_receiver() to work. Currently they are expected 156 // to be byte offsets from the Java stack pointer for maximum code 157 // sharing between platforms. Note that currently biased locking 158 // will never cause Class instances to be biased but this code 159 // handles the static synchronized case as well. 160 // JVMTI's GetLocalInstance() also uses these offsets to find the receiver 161 // for non-static native wrapper frames. 162 ByteSize _native_receiver_sp_offset; 163 ByteSize _native_basic_lock_sp_offset; 164 165 friend class nmethodLocker; 166 167 // For native wrappers 168 nmethod(Method* method, 169 int nmethod_size, 170 int compile_id, 171 CodeOffsets* offsets, 172 CodeBuffer *code_buffer, 173 int frame_size, 174 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ 175 ByteSize basic_lock_sp_offset, /* synchronized natives only */ 176 OopMapSet* oop_maps); 177 178 // Creation support 179 nmethod(Method* method, 180 int nmethod_size, 181 int compile_id, 182 int entry_bci, 183 CodeOffsets* offsets, 184 int orig_pc_offset, 185 DebugInformationRecorder *recorder, 186 Dependencies* dependencies, 187 CodeBuffer *code_buffer, 188 int frame_size, 189 OopMapSet* oop_maps, 190 ExceptionHandlerTable* handler_table, 191 ImplicitExceptionTable* nul_chk_table, 192 AbstractCompiler* compiler, 193 int comp_level 194 #if INCLUDE_JVMCI 195 , Handle installed_code, 196 Handle speculation_log 197 #endif 198 ); 199 200 // helper methods 201 void* operator new(size_t size, int nmethod_size, int comp_level) throw(); 202 203 const char* reloc_string_for(u_char* begin, u_char* end); 204 // Returns true if this thread changed the state of the nmethod or 205 // false if another thread performed the transition. 206 bool make_not_entrant_or_zombie(unsigned int state); 207 bool make_entrant() { Unimplemented(); return false; } 208 void inc_decompile_count(); 209 210 // Inform external interfaces that a compiled method has been unloaded 211 void post_compiled_method_unload(); 212 213 // Initailize fields to their default values 214 void init_defaults(); 215 216 // Offsets 217 int content_offset() const { return content_begin() - header_begin(); } 218 int data_offset() const { return _data_offset; } 219 220 address header_end() const { return (address) header_begin() + header_size(); } 221 222 public: 223 // create nmethod with entry_bci 224 static nmethod* new_nmethod(const methodHandle& method, 225 int compile_id, 226 int entry_bci, 227 CodeOffsets* offsets, 228 int orig_pc_offset, 229 DebugInformationRecorder* recorder, 230 Dependencies* dependencies, 231 CodeBuffer *code_buffer, 232 int frame_size, 233 OopMapSet* oop_maps, 234 ExceptionHandlerTable* handler_table, 235 ImplicitExceptionTable* nul_chk_table, 236 AbstractCompiler* compiler, 237 int comp_level 238 #if INCLUDE_JVMCI 239 , Handle installed_code = Handle(), 240 Handle speculation_log = Handle() 241 #endif 242 ); 243 244 static nmethod* new_native_nmethod(const methodHandle& method, 245 int compile_id, 246 CodeBuffer *code_buffer, 247 int vep_offset, 248 int frame_complete, 249 int frame_size, 250 ByteSize receiver_sp_offset, 251 ByteSize basic_lock_sp_offset, 252 OopMapSet* oop_maps); 253 254 // accessors 255 AbstractCompiler* compiler() const { return _compiler; } 256 257 // type info 258 bool is_nmethod() const { return true; } 259 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } 260 261 bool is_compiled_by_c1() const; 262 bool is_compiled_by_jvmci() const; 263 bool is_compiled_by_c2() const; 264 bool is_compiled_by_shark() const; 265 266 // boundaries for different parts 267 address consts_begin () const { return header_begin() + _consts_offset ; } 268 address consts_end () const { return code_begin() ; } 269 address stub_begin () const { return header_begin() + _stub_offset ; } 270 address stub_end () const { return header_begin() + _oops_offset ; } 271 address exception_begin () const { return header_begin() + _exception_offset ; } 272 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; } 273 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } 274 oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } 275 276 Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } 277 Metadata** metadata_end () const { return (Metadata**) _scopes_data_begin; } 278 279 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } 280 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } 281 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } 282 address dependencies_begin () const { return header_begin() + _dependencies_offset ; } 283 address dependencies_end () const { return header_begin() + _handler_table_offset ; } 284 address handler_table_begin () const { return header_begin() + _handler_table_offset ; } 285 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } 286 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } 287 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } 288 289 // Sizes 290 int oops_size () const { return (address) oops_end () - (address) oops_begin (); } 291 int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); } 292 int dependencies_size () const { return dependencies_end () - dependencies_begin (); } 293 294 int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; } 295 int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; } 296 297 int total_size () const; 298 299 void dec_hotness_counter() { _hotness_counter--; } 300 void set_hotness_counter(int val) { _hotness_counter = val; } 301 int hotness_counter() const { return _hotness_counter; } 302 303 // Containment 304 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } 305 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } 306 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } 307 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } 308 309 // entry points 310 address entry_point() const { return _entry_point; } // normal entry point 311 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct 312 313 enum { in_use = 0, // executable nmethod 314 not_entrant = 1, // marked for deoptimization but activations may still exist, 315 // will be transformed to zombie when all activations are gone 316 zombie = 2, // no activations exist, nmethod is ready for purge 317 unloaded = 3 }; // there should be no activations, should not be called, 318 // will be transformed to zombie immediately 319 320 // flag accessing and manipulation 321 bool is_in_use() const { return _state == in_use; } 322 bool is_alive() const { unsigned char s = _state; return s < zombie; } 323 bool is_not_entrant() const { return _state == not_entrant; } 324 bool is_zombie() const { return _state == zombie; } 325 bool is_unloaded() const { return _state == unloaded; } 326 327 #if INCLUDE_RTM_OPT 328 // rtm state accessing and manipulating 329 RTMState rtm_state() const { return _rtm_state; } 330 void set_rtm_state(RTMState state) { _rtm_state = state; } 331 #endif 332 333 // Make the nmethod non entrant. The nmethod will continue to be 334 // alive. It is used when an uncommon trap happens. Returns true 335 // if this thread changed the state of the nmethod or false if 336 // another thread performed the transition. 337 bool make_not_entrant() { 338 assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant"); 339 return make_not_entrant_or_zombie(not_entrant); 340 } 341 bool make_not_used() { return make_not_entrant(); } 342 bool make_zombie() { return make_not_entrant_or_zombie(zombie); } 343 344 // used by jvmti to track if the unload event has been reported 345 bool unload_reported() { return _unload_reported; } 346 void set_unload_reported() { _unload_reported = true; } 347 348 int get_state() const { 349 return _state; 350 } 351 352 void make_unloaded(BoolObjectClosure* is_alive, oop cause); 353 354 bool has_dependencies() { return dependencies_size() != 0; } 355 void flush_dependencies(BoolObjectClosure* is_alive); 356 bool has_flushed_dependencies() { return _has_flushed_dependencies; } 357 void set_has_flushed_dependencies() { 358 assert(!has_flushed_dependencies(), "should only happen once"); 359 _has_flushed_dependencies = 1; 360 } 361 362 int comp_level() const { return _comp_level; } 363 364 // Support for oops in scopes and relocs: 365 // Note: index 0 is reserved for null. 366 oop oop_at(int index) const { return index == 0 ? (oop) NULL: *oop_addr_at(index); } 367 oop* oop_addr_at(int index) const { // for GC 368 // relocation indexes are biased by 1 (because 0 is reserved) 369 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index"); 370 assert(!_oops_are_stale, "oops are stale"); 371 return &oops_begin()[index - 1]; 372 } 373 374 // Support for meta data in scopes and relocs: 375 // Note: index 0 is reserved for null. 376 Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); } 377 Metadata** metadata_addr_at(int index) const { // for GC 378 // relocation indexes are biased by 1 (because 0 is reserved) 379 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index"); 380 return &metadata_begin()[index - 1]; 381 } 382 383 void copy_values(GrowableArray<jobject>* oops); 384 void copy_values(GrowableArray<Metadata*>* metadata); 385 386 // Relocation support 387 private: 388 void fix_oop_relocations(address begin, address end, bool initialize_immediates); 389 inline void initialize_immediate_oop(oop* dest, jobject handle); 390 391 public: 392 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } 393 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } 394 395 // Scavengable oop support 396 bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; } 397 protected: 398 enum { sl_on_list = 0x01, sl_marked = 0x10 }; 399 void set_on_scavenge_root_list() { _scavenge_root_state = sl_on_list; } 400 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; } 401 // assertion-checking and pruning logic uses the bits of _scavenge_root_state 402 #ifndef PRODUCT 403 void set_scavenge_root_marked() { _scavenge_root_state |= sl_marked; } 404 void clear_scavenge_root_marked() { _scavenge_root_state &= ~sl_marked; } 405 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ sl_on_list) == 0; } 406 // N.B. there is no positive marked query, and we only use the not_marked query for asserts. 407 #endif //PRODUCT 408 nmethod* scavenge_root_link() const { return _scavenge_root_link; } 409 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; } 410 411 public: 412 413 // Sweeper support 414 long stack_traversal_mark() { return _stack_traversal_mark; } 415 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; } 416 417 // implicit exceptions support 418 address continuation_for_implicit_exception(address pc); 419 420 // On-stack replacement support 421 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } 422 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } 423 void invalidate_osr_method(); 424 nmethod* osr_link() const { return _osr_link; } 425 void set_osr_link(nmethod *n) { _osr_link = n; } 426 427 // Verify calls to dead methods have been cleaned. 428 void verify_clean_inline_caches(); 429 430 // unlink and deallocate this nmethod 431 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not 432 // expected to use any other private methods/data in this class. 433 434 protected: 435 void flush(); 436 437 public: 438 // When true is returned, it is unsafe to remove this nmethod even if 439 // it is a zombie, since the VM or the ServiceThread might still be 440 // using it. 441 bool is_locked_by_vm() const { return _lock_count >0; } 442 443 // See comment at definition of _last_seen_on_stack 444 void mark_as_seen_on_stack(); 445 bool can_convert_to_zombie(); 446 447 // Evolution support. We make old (discarded) compiled methods point to new Method*s. 448 void set_method(Method* method) { _method = method; } 449 450 #if INCLUDE_JVMCI 451 oop jvmci_installed_code() { return _jvmci_installed_code ; } 452 char* jvmci_installed_code_name(char* buf, size_t buflen); 453 454 // Update the state of any InstalledCode instance associated with 455 // this nmethod based on the current value of _state. 456 void maybe_invalidate_installed_code(); 457 458 // Helper function to invalidate InstalledCode instances 459 static void invalidate_installed_code(Handle installed_code, TRAPS); 460 461 oop speculation_log() { return _speculation_log ; } 462 463 private: 464 void clear_jvmci_installed_code(); 465 466 public: 467 #endif 468 469 protected: 470 virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred); 471 #if INCLUDE_JVMCI 472 virtual bool do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred); 473 #endif 474 475 private: 476 bool do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_occurred); 477 // Unload a nmethod if the *root object is dead. 478 bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred); 479 bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred); 480 481 public: 482 void oops_do(OopClosure* f) { oops_do(f, false); } 483 void oops_do(OopClosure* f, bool allow_zombie); 484 bool detect_scavenge_root_oops(); 485 void verify_scavenge_root_oops() PRODUCT_RETURN; 486 487 bool test_set_oops_do_mark(); 488 static void oops_do_marking_prologue(); 489 static void oops_do_marking_epilogue(); 490 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; } 491 bool test_oops_do_mark() { return _oops_do_mark_link != NULL; } 492 493 private: 494 ScopeDesc* scope_desc_in(address begin, address end); 495 496 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); } 497 498 public: 499 // copying of debugging information 500 void copy_scopes_pcs(PcDesc* pcs, int count); 501 void copy_scopes_data(address buffer, int size); 502 503 // Accessor/mutator for the original pc of a frame before a frame was deopted. 504 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } 505 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } 506 507 // jvmti support: 508 void post_compiled_method_load_event(); 509 jmethodID get_and_cache_jmethod_id(); 510 511 // verify operations 512 void verify(); 513 void verify_scopes(); 514 void verify_interrupt_point(address interrupt_point); 515 516 // printing support 517 void print() const; 518 void print_relocations() PRODUCT_RETURN; 519 void print_pcs() PRODUCT_RETURN; 520 void print_scopes() PRODUCT_RETURN; 521 void print_dependencies() PRODUCT_RETURN; 522 void print_value_on(outputStream* st) const PRODUCT_RETURN; 523 void print_calls(outputStream* st) PRODUCT_RETURN; 524 void print_handler_table() PRODUCT_RETURN; 525 void print_nul_chk_table() PRODUCT_RETURN; 526 void print_recorded_oops() PRODUCT_RETURN; 527 void print_recorded_metadata() PRODUCT_RETURN; 528 529 void maybe_print_nmethod(DirectiveSet* directive); 530 void print_nmethod(bool print_code); 531 532 // need to re-define this from CodeBlob else the overload hides it 533 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } 534 void print_on(outputStream* st, const char* msg) const; 535 536 // Logging 537 void log_identity(xmlStream* log) const; 538 void log_new_nmethod() const; 539 void log_state_change() const; 540 541 // Prints block-level comments, including nmethod specific block labels: 542 virtual void print_block_comment(outputStream* stream, address block_begin) const { 543 print_nmethod_labels(stream, block_begin); 544 CodeBlob::print_block_comment(stream, block_begin); 545 } 546 void print_nmethod_labels(outputStream* stream, address block_begin) const; 547 548 // Prints a comment for one native instruction (reloc info, pc desc) 549 void print_code_comment_on(outputStream* st, int column, address begin, address end); 550 static void print_statistics() PRODUCT_RETURN; 551 552 // Compiler task identification. Note that all OSR methods 553 // are numbered in an independent sequence if CICountOSR is true, 554 // and native method wrappers are also numbered independently if 555 // CICountNative is true. 556 virtual int compile_id() const { return _compile_id; } 557 const char* compile_kind() const; 558 559 // tells if any of this method's dependencies have been invalidated 560 // (this is expensive!) 561 static void check_all_dependencies(DepChange& changes); 562 563 // tells if this compiled method is dependent on the given changes, 564 // and the changes have invalidated it 565 bool check_dependency_on(DepChange& changes); 566 567 // Evolution support. Tells if this compiled method is dependent on any of 568 // methods m() of class dependee, such that if m() in dependee is replaced, 569 // this compiled method will have to be deoptimized. 570 bool is_evol_dependent_on(Klass* dependee); 571 572 // Fast breakpoint support. Tells if this compiled method is 573 // dependent on the given method. Returns true if this nmethod 574 // corresponds to the given method as well. 575 virtual bool is_dependent_on_method(Method* dependee); 576 577 // is it ok to patch at address? 578 bool is_patchable_at(address instr_address); 579 580 // UseBiasedLocking support 581 ByteSize native_receiver_sp_offset() { 582 return _native_receiver_sp_offset; 583 } 584 ByteSize native_basic_lock_sp_offset() { 585 return _native_basic_lock_sp_offset; 586 } 587 588 // support for code generation 589 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); } 590 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); } 591 static int state_offset() { return offset_of(nmethod, _state); } 592 593 virtual void metadata_do(void f(Metadata*)); 594 }; 595 596 // Locks an nmethod so its code will not get removed and it will not 597 // be made into a zombie, even if it is a not_entrant method. After the 598 // nmethod becomes a zombie, if CompiledMethodUnload event processing 599 // needs to be done, then lock_nmethod() is used directly to keep the 600 // generated code from being reused too early. 601 class nmethodLocker : public StackObj { 602 CompiledMethod* _nm; 603 604 public: 605 606 // note: nm can be NULL 607 // Only JvmtiDeferredEvent::compiled_method_unload_event() 608 // should pass zombie_ok == true. 609 static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false); 610 static void unlock_nmethod(CompiledMethod* nm); // (ditto) 611 612 nmethodLocker(address pc); // derive nm from pc 613 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); } 614 nmethodLocker(CompiledMethod *nm) { 615 _nm = nm; 616 lock(_nm); 617 } 618 619 static void lock(CompiledMethod* method) { 620 if (method == NULL) return; 621 lock_nmethod(method); 622 } 623 624 static void unlock(CompiledMethod* method) { 625 if (method == NULL) return; 626 unlock_nmethod(method); 627 } 628 629 nmethodLocker() { _nm = NULL; } 630 ~nmethodLocker() { 631 unlock(_nm); 632 } 633 634 CompiledMethod* code() { return _nm; } 635 void set_code(CompiledMethod* new_nm) { 636 unlock(_nm); // note: This works even if _nm==new_nm. 637 _nm = new_nm; 638 lock(_nm); 639 } 640 }; 641 642 #endif // SHARE_VM_CODE_NMETHOD_HPP