1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_CODE_NMETHOD_HPP 26 #define SHARE_CODE_NMETHOD_HPP 27 28 #include "code/compiledMethod.hpp" 29 30 class DepChange; 31 class DirectiveSet; 32 class DebugInformationRecorder; 33 34 // nmethods (native methods) are the compiled code versions of Java methods. 35 // 36 // An nmethod contains: 37 // - header (the nmethod structure) 38 // [Relocation] 39 // - relocation information 40 // - constant part (doubles, longs and floats used in nmethod) 41 // - oop table 42 // [Code] 43 // - code body 44 // - exception handler 45 // - stub code 46 // [Debugging information] 47 // - oop array 48 // - data array 49 // - pcs 50 // [Exception handler table] 51 // - handler entry point array 52 // [Implicit Null Pointer exception table] 53 // - implicit null table array 54 55 class nmethod : public CompiledMethod { 56 friend class VMStructs; 57 friend class JVMCIVMStructs; 58 friend class NMethodSweeper; 59 friend class CodeCache; // scavengable oops 60 private: 61 62 // Shared fields for all nmethod's 63 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method 64 jmethodID _jmethod_id; // Cache of method()->jmethod_id() 65 66 #if INCLUDE_JVMCI 67 // A weak reference to an InstalledCode object associated with 68 // this nmethod. 69 jweak _jvmci_installed_code; 70 71 // A weak reference to a SpeculationLog object associated with 72 // this nmethod. 73 jweak _speculation_log; 74 75 // Determines whether this nmethod is unloaded when the 76 // referent in _jvmci_installed_code is cleared. This 77 // will be false if the referent is initialized to a 78 // HotSpotNMethod object whose isDefault field is true. 79 // That is, installed code other than a "default" 80 // HotSpotNMethod causes nmethod unloading. 81 // This field is ignored once _jvmci_installed_code is NULL. 82 bool _jvmci_installed_code_triggers_invalidation; 83 #endif 84 85 // To support simple linked-list chaining of nmethods: 86 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head 87 88 static nmethod* volatile _oops_do_mark_nmethods; 89 nmethod* volatile _oops_do_mark_link; 90 91 // offsets for entry points 92 address _entry_point; // entry point with class check 93 address _verified_entry_point; // entry point without class check 94 address _osr_entry_point; // entry point for on stack replacement 95 96 // Offsets for different nmethod parts 97 int _exception_offset; 98 // Offset of the unwind handler if it exists 99 int _unwind_handler_offset; 100 101 int _consts_offset; 102 int _stub_offset; 103 int _oops_offset; // offset to where embedded oop table begins (inside data) 104 int _metadata_offset; // embedded meta data table 105 int _scopes_data_offset; 106 int _scopes_pcs_offset; 107 int _dependencies_offset; 108 int _handler_table_offset; 109 int _nul_chk_table_offset; 110 int _nmethod_end_offset; 111 112 int code_offset() const { return (address) code_begin() - header_begin(); } 113 114 // location in frame (offset for sp) that deopt can store the original 115 // pc during a deopt. 116 int _orig_pc_offset; 117 118 int _compile_id; // which compilation made this nmethod 119 int _comp_level; // compilation level 120 121 // protected by CodeCache_lock 122 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) 123 124 // used by jvmti to track if an unload event has been posted for this nmethod. 125 bool _unload_reported; 126 127 // Protected by Patching_lock 128 volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded} 129 130 #ifdef ASSERT 131 bool _oops_are_stale; // indicates that it's no longer safe to access oops section 132 #endif 133 134 #if INCLUDE_RTM_OPT 135 // RTM state at compile time. Used during deoptimization to decide 136 // whether to restart collecting RTM locking abort statistic again. 137 RTMState _rtm_state; 138 #endif 139 140 // Nmethod Flushing lock. If non-zero, then the nmethod is not removed 141 // and is not made into a zombie. However, once the nmethod is made into 142 // a zombie, it will be locked one final time if CompiledMethodUnload 143 // event processing needs to be done. 144 volatile jint _lock_count; 145 146 // not_entrant method removal. Each mark_sweep pass will update 147 // this mark to current sweep invocation count if it is seen on the 148 // stack. An not_entrant method can be removed when there are no 149 // more activations, i.e., when the _stack_traversal_mark is less than 150 // current sweep traversal index. 151 volatile long _stack_traversal_mark; 152 153 // The _hotness_counter indicates the hotness of a method. The higher 154 // the value the hotter the method. The hotness counter of a nmethod is 155 // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method 156 // is active while stack scanning (mark_active_nmethods()). The hotness 157 // counter is decreased (by 1) while sweeping. 158 int _hotness_counter; 159 160 // Local state used to keep track of whether unloading is happening or not 161 volatile uint8_t _is_unloading_state; 162 163 // These are used for compiled synchronized native methods to 164 // locate the owner and stack slot for the BasicLock so that we can 165 // properly revoke the bias of the owner if necessary. They are 166 // needed because there is no debug information for compiled native 167 // wrappers and the oop maps are insufficient to allow 168 // frame::retrieve_receiver() to work. Currently they are expected 169 // to be byte offsets from the Java stack pointer for maximum code 170 // sharing between platforms. Note that currently biased locking 171 // will never cause Class instances to be biased but this code 172 // handles the static synchronized case as well. 173 // JVMTI's GetLocalInstance() also uses these offsets to find the receiver 174 // for non-static native wrapper frames. 175 ByteSize _native_receiver_sp_offset; 176 ByteSize _native_basic_lock_sp_offset; 177 178 friend class nmethodLocker; 179 180 // For native wrappers 181 nmethod(Method* method, 182 CompilerType type, 183 int nmethod_size, 184 int compile_id, 185 CodeOffsets* offsets, 186 CodeBuffer *code_buffer, 187 int frame_size, 188 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ 189 ByteSize basic_lock_sp_offset, /* synchronized natives only */ 190 OopMapSet* oop_maps); 191 192 // Creation support 193 nmethod(Method* method, 194 CompilerType type, 195 int nmethod_size, 196 int compile_id, 197 int entry_bci, 198 CodeOffsets* offsets, 199 int orig_pc_offset, 200 DebugInformationRecorder *recorder, 201 Dependencies* dependencies, 202 CodeBuffer *code_buffer, 203 int frame_size, 204 OopMapSet* oop_maps, 205 ExceptionHandlerTable* handler_table, 206 ImplicitExceptionTable* nul_chk_table, 207 AbstractCompiler* compiler, 208 int comp_level 209 #if INCLUDE_JVMCI 210 , jweak installed_code, 211 jweak speculation_log 212 #endif 213 ); 214 215 // helper methods 216 void* operator new(size_t size, int nmethod_size, int comp_level) throw(); 217 218 const char* reloc_string_for(u_char* begin, u_char* end); 219 // Returns true if this thread changed the state of the nmethod or 220 // false if another thread performed the transition. 221 bool make_not_entrant_or_zombie(int state); 222 bool make_entrant() { Unimplemented(); return false; } 223 void inc_decompile_count(); 224 225 // Inform external interfaces that a compiled method has been unloaded 226 void post_compiled_method_unload(); 227 228 // Initailize fields to their default values 229 void init_defaults(); 230 231 // Offsets 232 int content_offset() const { return content_begin() - header_begin(); } 233 int data_offset() const { return _data_offset; } 234 235 address header_end() const { return (address) header_begin() + header_size(); } 236 237 public: 238 // create nmethod with entry_bci 239 static nmethod* new_nmethod(const methodHandle& method, 240 int compile_id, 241 int entry_bci, 242 CodeOffsets* offsets, 243 int orig_pc_offset, 244 DebugInformationRecorder* recorder, 245 Dependencies* dependencies, 246 CodeBuffer *code_buffer, 247 int frame_size, 248 OopMapSet* oop_maps, 249 ExceptionHandlerTable* handler_table, 250 ImplicitExceptionTable* nul_chk_table, 251 AbstractCompiler* compiler, 252 int comp_level 253 #if INCLUDE_JVMCI 254 , jweak installed_code = NULL, 255 jweak speculation_log = NULL 256 #endif 257 ); 258 259 // Only used for unit tests. 260 nmethod() 261 : CompiledMethod(), 262 _is_unloading_state(0), 263 _native_receiver_sp_offset(in_ByteSize(-1)), 264 _native_basic_lock_sp_offset(in_ByteSize(-1)) {} 265 266 267 static nmethod* new_native_nmethod(const methodHandle& method, 268 int compile_id, 269 CodeBuffer *code_buffer, 270 int vep_offset, 271 int frame_complete, 272 int frame_size, 273 ByteSize receiver_sp_offset, 274 ByteSize basic_lock_sp_offset, 275 OopMapSet* oop_maps); 276 277 // type info 278 bool is_nmethod() const { return true; } 279 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } 280 281 // boundaries for different parts 282 address consts_begin () const { return header_begin() + _consts_offset ; } 283 address consts_end () const { return code_begin() ; } 284 address stub_begin () const { return header_begin() + _stub_offset ; } 285 address stub_end () const { return header_begin() + _oops_offset ; } 286 address exception_begin () const { return header_begin() + _exception_offset ; } 287 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; } 288 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } 289 oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } 290 291 Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } 292 Metadata** metadata_end () const { return (Metadata**) _scopes_data_begin; } 293 294 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } 295 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } 296 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } 297 address dependencies_begin () const { return header_begin() + _dependencies_offset ; } 298 address dependencies_end () const { return header_begin() + _handler_table_offset ; } 299 address handler_table_begin () const { return header_begin() + _handler_table_offset ; } 300 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } 301 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } 302 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } 303 304 // Sizes 305 int oops_size () const { return (address) oops_end () - (address) oops_begin (); } 306 int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); } 307 int dependencies_size () const { return dependencies_end () - dependencies_begin (); } 308 309 int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; } 310 int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; } 311 312 int total_size () const; 313 314 void dec_hotness_counter() { _hotness_counter--; } 315 void set_hotness_counter(int val) { _hotness_counter = val; } 316 int hotness_counter() const { return _hotness_counter; } 317 318 // Containment 319 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } 320 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } 321 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } 322 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } 323 324 // entry points 325 address entry_point() const { return _entry_point; } // normal entry point 326 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct 327 328 // flag accessing and manipulation 329 bool is_not_installed() const { return _state == not_installed; } 330 bool is_in_use() const { return _state <= in_use; } 331 bool is_alive() const { return _state < zombie; } 332 bool is_not_entrant() const { return _state == not_entrant; } 333 bool is_zombie() const { return _state == zombie; } 334 bool is_unloaded() const { return _state == unloaded; } 335 336 void clear_unloading_state(); 337 virtual bool is_unloading(); 338 virtual void do_unloading(bool unloading_occurred); 339 340 #if INCLUDE_RTM_OPT 341 // rtm state accessing and manipulating 342 RTMState rtm_state() const { return _rtm_state; } 343 void set_rtm_state(RTMState state) { _rtm_state = state; } 344 #endif 345 346 void make_in_use() { _state = in_use; } 347 // Make the nmethod non entrant. The nmethod will continue to be 348 // alive. It is used when an uncommon trap happens. Returns true 349 // if this thread changed the state of the nmethod or false if 350 // another thread performed the transition. 351 bool make_not_entrant() { 352 assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant"); 353 return make_not_entrant_or_zombie(not_entrant); 354 } 355 bool make_not_used() { return make_not_entrant(); } 356 bool make_zombie() { return make_not_entrant_or_zombie(zombie); } 357 358 // used by jvmti to track if the unload event has been reported 359 bool unload_reported() { return _unload_reported; } 360 void set_unload_reported() { _unload_reported = true; } 361 362 int get_state() const { 363 return _state; 364 } 365 366 void make_unloaded(); 367 368 bool has_dependencies() { return dependencies_size() != 0; } 369 void flush_dependencies(bool delete_immediately); 370 bool has_flushed_dependencies() { return _has_flushed_dependencies; } 371 void set_has_flushed_dependencies() { 372 assert(!has_flushed_dependencies(), "should only happen once"); 373 _has_flushed_dependencies = 1; 374 } 375 376 int comp_level() const { return _comp_level; } 377 378 void unlink_from_method(bool acquire_lock); 379 380 // Support for oops in scopes and relocs: 381 // Note: index 0 is reserved for null. 382 oop oop_at(int index) const; 383 oop* oop_addr_at(int index) const { // for GC 384 // relocation indexes are biased by 1 (because 0 is reserved) 385 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index"); 386 assert(!_oops_are_stale, "oops are stale"); 387 return &oops_begin()[index - 1]; 388 } 389 390 // Support for meta data in scopes and relocs: 391 // Note: index 0 is reserved for null. 392 Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); } 393 Metadata** metadata_addr_at(int index) const { // for GC 394 // relocation indexes are biased by 1 (because 0 is reserved) 395 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index"); 396 return &metadata_begin()[index - 1]; 397 } 398 399 void copy_values(GrowableArray<jobject>* oops); 400 void copy_values(GrowableArray<Metadata*>* metadata); 401 402 // Relocation support 403 private: 404 void fix_oop_relocations(address begin, address end, bool initialize_immediates); 405 inline void initialize_immediate_oop(oop* dest, jobject handle); 406 407 public: 408 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } 409 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } 410 411 // Sweeper support 412 long stack_traversal_mark() { return _stack_traversal_mark; } 413 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; } 414 415 // implicit exceptions support 416 address continuation_for_implicit_exception(address pc); 417 418 // On-stack replacement support 419 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } 420 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } 421 void invalidate_osr_method(); 422 nmethod* osr_link() const { return _osr_link; } 423 void set_osr_link(nmethod *n) { _osr_link = n; } 424 425 // Verify calls to dead methods have been cleaned. 426 void verify_clean_inline_caches(); 427 428 // unlink and deallocate this nmethod 429 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not 430 // expected to use any other private methods/data in this class. 431 432 protected: 433 void flush(); 434 435 public: 436 // When true is returned, it is unsafe to remove this nmethod even if 437 // it is a zombie, since the VM or the ServiceThread might still be 438 // using it. 439 bool is_locked_by_vm() const { return _lock_count >0; } 440 441 // See comment at definition of _last_seen_on_stack 442 void mark_as_seen_on_stack(); 443 bool can_convert_to_zombie(); 444 445 // Evolution support. We make old (discarded) compiled methods point to new Method*s. 446 void set_method(Method* method) { _method = method; } 447 448 #if INCLUDE_JVMCI 449 // Gets the InstalledCode object associated with this nmethod 450 // which may be NULL if this nmethod was not compiled by JVMCI 451 // or the weak reference has been cleared. 452 oop jvmci_installed_code(); 453 454 // Copies the value of the name field in the InstalledCode 455 // object (if any) associated with this nmethod into buf. 456 // Returns the value of buf if it was updated otherwise NULL. 457 char* jvmci_installed_code_name(char* buf, size_t buflen) const; 458 459 // Updates the state of the InstalledCode (if any) associated with 460 // this nmethod based on the current value of _state. 461 void maybe_invalidate_installed_code(); 462 463 // Deoptimizes the nmethod (if any) in the address field of a given 464 // InstalledCode object. The address field is zeroed upon return. 465 static void invalidate_installed_code(Handle installed_code, TRAPS); 466 467 // Gets the SpeculationLog object associated with this nmethod 468 // which may be NULL if this nmethod was not compiled by JVMCI 469 // or the weak reference has been cleared. 470 oop speculation_log(); 471 472 private: 473 // Deletes the weak reference (if any) to the InstalledCode object 474 // associated with this nmethod. 475 void clear_jvmci_installed_code(); 476 477 // Deletes the weak reference (if any) to the SpeculationLog object 478 // associated with this nmethod. 479 void clear_speculation_log(); 480 481 public: 482 #endif 483 484 public: 485 void oops_do(OopClosure* f) { oops_do(f, false); } 486 void oops_do(OopClosure* f, bool allow_zombie); 487 488 bool test_set_oops_do_mark(); 489 static void oops_do_marking_prologue(); 490 static void oops_do_marking_epilogue(); 491 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; } 492 bool test_oops_do_mark() { return _oops_do_mark_link != NULL; } 493 494 private: 495 ScopeDesc* scope_desc_in(address begin, address end); 496 497 address* orig_pc_addr(const frame* fr); 498 499 public: 500 // copying of debugging information 501 void copy_scopes_pcs(PcDesc* pcs, int count); 502 void copy_scopes_data(address buffer, int size); 503 504 // Accessor/mutator for the original pc of a frame before a frame was deopted. 505 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } 506 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } 507 508 // jvmti support: 509 void post_compiled_method_load_event(); 510 jmethodID get_and_cache_jmethod_id(); 511 512 // verify operations 513 void verify(); 514 void verify_scopes(); 515 void verify_interrupt_point(address interrupt_point); 516 517 // printing support 518 void print() const; 519 void print_relocations() PRODUCT_RETURN; 520 void print_pcs() PRODUCT_RETURN; 521 void print_scopes() PRODUCT_RETURN; 522 void print_dependencies() PRODUCT_RETURN; 523 void print_value_on(outputStream* st) const PRODUCT_RETURN; 524 void print_calls(outputStream* st) PRODUCT_RETURN; 525 void print_handler_table() PRODUCT_RETURN; 526 void print_nul_chk_table() PRODUCT_RETURN; 527 void print_recorded_oops() PRODUCT_RETURN; 528 void print_recorded_metadata() PRODUCT_RETURN; 529 530 void maybe_print_nmethod(DirectiveSet* directive); 531 void print_nmethod(bool print_code); 532 533 // need to re-define this from CodeBlob else the overload hides it 534 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } 535 void print_on(outputStream* st, const char* msg) const; 536 537 // Logging 538 void log_identity(xmlStream* log) const; 539 void log_new_nmethod() const; 540 void log_state_change() const; 541 542 // Prints block-level comments, including nmethod specific block labels: 543 virtual void print_block_comment(outputStream* stream, address block_begin) const { 544 print_nmethod_labels(stream, block_begin); 545 CodeBlob::print_block_comment(stream, block_begin); 546 } 547 void print_nmethod_labels(outputStream* stream, address block_begin) const; 548 549 // Prints a comment for one native instruction (reloc info, pc desc) 550 void print_code_comment_on(outputStream* st, int column, address begin, address end); 551 static void print_statistics() PRODUCT_RETURN; 552 553 // Compiler task identification. Note that all OSR methods 554 // are numbered in an independent sequence if CICountOSR is true, 555 // and native method wrappers are also numbered independently if 556 // CICountNative is true. 557 virtual int compile_id() const { return _compile_id; } 558 const char* compile_kind() const; 559 560 // tells if any of this method's dependencies have been invalidated 561 // (this is expensive!) 562 static void check_all_dependencies(DepChange& changes); 563 564 // tells if this compiled method is dependent on the given changes, 565 // and the changes have invalidated it 566 bool check_dependency_on(DepChange& changes); 567 568 // Evolution support. Tells if this compiled method is dependent on any of 569 // redefined methods, such that if m() is replaced, 570 // this compiled method will have to be deoptimized. 571 bool is_evol_dependent(); 572 573 // Fast breakpoint support. Tells if this compiled method is 574 // dependent on the given method. Returns true if this nmethod 575 // corresponds to the given method as well. 576 virtual bool is_dependent_on_method(Method* dependee); 577 578 // is it ok to patch at address? 579 bool is_patchable_at(address instr_address); 580 581 // UseBiasedLocking support 582 ByteSize native_receiver_sp_offset() { 583 return _native_receiver_sp_offset; 584 } 585 ByteSize native_basic_lock_sp_offset() { 586 return _native_basic_lock_sp_offset; 587 } 588 589 // support for code generation 590 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); } 591 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); } 592 static int state_offset() { return offset_of(nmethod, _state); } 593 594 virtual void metadata_do(void f(Metadata*)); 595 596 NativeCallWrapper* call_wrapper_at(address call) const; 597 NativeCallWrapper* call_wrapper_before(address return_pc) const; 598 address call_instruction_address(address pc) const; 599 600 virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const; 601 virtual CompiledStaticCall* compiledStaticCall_at(address addr) const; 602 virtual CompiledStaticCall* compiledStaticCall_before(address addr) const; 603 }; 604 605 // Locks an nmethod so its code will not get removed and it will not 606 // be made into a zombie, even if it is a not_entrant method. After the 607 // nmethod becomes a zombie, if CompiledMethodUnload event processing 608 // needs to be done, then lock_nmethod() is used directly to keep the 609 // generated code from being reused too early. 610 class nmethodLocker : public StackObj { 611 CompiledMethod* _nm; 612 613 public: 614 615 // note: nm can be NULL 616 // Only JvmtiDeferredEvent::compiled_method_unload_event() 617 // should pass zombie_ok == true. 618 static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false); 619 static void unlock_nmethod(CompiledMethod* nm); // (ditto) 620 621 nmethodLocker(address pc); // derive nm from pc 622 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); } 623 nmethodLocker(CompiledMethod *nm) { 624 _nm = nm; 625 lock(_nm); 626 } 627 628 static void lock(CompiledMethod* method) { 629 if (method == NULL) return; 630 lock_nmethod(method); 631 } 632 633 static void unlock(CompiledMethod* method) { 634 if (method == NULL) return; 635 unlock_nmethod(method); 636 } 637 638 nmethodLocker() { _nm = NULL; } 639 ~nmethodLocker() { 640 unlock(_nm); 641 } 642 643 CompiledMethod* code() { return _nm; } 644 void set_code(CompiledMethod* new_nm) { 645 unlock(_nm); // note: This works even if _nm==new_nm. 646 _nm = new_nm; 647 lock(_nm); 648 } 649 }; 650 651 #endif // SHARE_CODE_NMETHOD_HPP