1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_CODE_NMETHOD_HPP 26 #define SHARE_CODE_NMETHOD_HPP 27 28 #include "code/compiledMethod.hpp" 29 #include "compiler/compilerDefinitions.hpp" 30 31 class DepChange; 32 class DirectiveSet; 33 class DebugInformationRecorder; 34 35 // nmethods (native methods) are the compiled code versions of Java methods. 36 // 37 // An nmethod contains: 38 // - header (the nmethod structure) 39 // [Relocation] 40 // - relocation information 41 // - constant part (doubles, longs and floats used in nmethod) 42 // - oop table 43 // [Code] 44 // - code body 45 // - exception handler 46 // - stub code 47 // [Debugging information] 48 // - oop array 49 // - data array 50 // - pcs 51 // [Exception handler table] 52 // - handler entry point array 53 // [Implicit Null Pointer exception table] 54 // - implicit null table array 55 // [Speculations] 56 // - encoded speculations array 57 // [JVMCINMethodData] 58 // - meta data for JVMCI compiled nmethod 59 60 #if INCLUDE_JVMCI 61 class FailedSpeculation; 62 class JVMCINMethodData; 63 #endif 64 65 class nmethod : public CompiledMethod { 66 friend class VMStructs; 67 friend class JVMCIVMStructs; 68 friend class NMethodSweeper; 69 friend class CodeCache; // scavengable oops 70 friend class JVMCINMethodData; 71 private: 72 73 // Shared fields for all nmethod's 74 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method 75 jmethodID _jmethod_id; // Cache of method()->jmethod_id() 76 77 // To support simple linked-list chaining of nmethods: 78 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head 79 80 static nmethod* volatile _oops_do_mark_nmethods; 81 nmethod* volatile _oops_do_mark_link; 82 83 // offsets for entry points 84 address _entry_point; // entry point with class check 85 address _verified_entry_point; // entry point without class check 86 address _verified_value_entry_point; // value type entry point (unpack all value args) without class check 87 address _verified_value_ro_entry_point; // value type entry point (unpack receiver only) without class check 88 address _osr_entry_point; // entry point for on stack replacement 89 90 // Offsets for different nmethod parts 91 int _exception_offset; 92 // Offset of the unwind handler if it exists 93 int _unwind_handler_offset; 94 95 int _consts_offset; 96 int _stub_offset; 97 int _oops_offset; // offset to where embedded oop table begins (inside data) 98 int _metadata_offset; // embedded meta data table 99 int _scopes_data_offset; 100 int _scopes_pcs_offset; 101 int _dependencies_offset; 102 int _handler_table_offset; 103 int _nul_chk_table_offset; 104 #if INCLUDE_JVMCI 105 int _speculations_offset; 106 int _jvmci_data_offset; 107 #endif 108 int _nmethod_end_offset; 109 110 int code_offset() const { return (address) code_begin() - header_begin(); } 111 112 // location in frame (offset for sp) that deopt can store the original 113 // pc during a deopt. 114 int _orig_pc_offset; 115 116 int _compile_id; // which compilation made this nmethod 117 int _comp_level; // compilation level 118 119 // protected by CodeCache_lock 120 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) 121 122 // used by jvmti to track if an unload event has been posted for this nmethod. 123 bool _unload_reported; 124 125 // Protected by Patching_lock 126 volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded} 127 128 #ifdef ASSERT 129 bool _oops_are_stale; // indicates that it's no longer safe to access oops section 130 #endif 131 132 #if INCLUDE_RTM_OPT 133 // RTM state at compile time. Used during deoptimization to decide 134 // whether to restart collecting RTM locking abort statistic again. 135 RTMState _rtm_state; 136 #endif 137 138 // Nmethod Flushing lock. If non-zero, then the nmethod is not removed 139 // and is not made into a zombie. However, once the nmethod is made into 140 // a zombie, it will be locked one final time if CompiledMethodUnload 141 // event processing needs to be done. 142 volatile jint _lock_count; 143 144 // not_entrant method removal. Each mark_sweep pass will update 145 // this mark to current sweep invocation count if it is seen on the 146 // stack. An not_entrant method can be removed when there are no 147 // more activations, i.e., when the _stack_traversal_mark is less than 148 // current sweep traversal index. 149 volatile long _stack_traversal_mark; 150 151 // The _hotness_counter indicates the hotness of a method. The higher 152 // the value the hotter the method. The hotness counter of a nmethod is 153 // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method 154 // is active while stack scanning (mark_active_nmethods()). The hotness 155 // counter is decreased (by 1) while sweeping. 156 int _hotness_counter; 157 158 // Local state used to keep track of whether unloading is happening or not 159 volatile uint8_t _is_unloading_state; 160 161 // These are used for compiled synchronized native methods to 162 // locate the owner and stack slot for the BasicLock so that we can 163 // properly revoke the bias of the owner if necessary. They are 164 // needed because there is no debug information for compiled native 165 // wrappers and the oop maps are insufficient to allow 166 // frame::retrieve_receiver() to work. Currently they are expected 167 // to be byte offsets from the Java stack pointer for maximum code 168 // sharing between platforms. Note that currently biased locking 169 // will never cause Class instances to be biased but this code 170 // handles the static synchronized case as well. 171 // JVMTI's GetLocalInstance() also uses these offsets to find the receiver 172 // for non-static native wrapper frames. 173 ByteSize _native_receiver_sp_offset; 174 ByteSize _native_basic_lock_sp_offset; 175 176 friend class nmethodLocker; 177 178 // For native wrappers 179 nmethod(Method* method, 180 CompilerType type, 181 int nmethod_size, 182 int compile_id, 183 CodeOffsets* offsets, 184 CodeBuffer *code_buffer, 185 int frame_size, 186 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ 187 ByteSize basic_lock_sp_offset, /* synchronized natives only */ 188 OopMapSet* oop_maps); 189 190 // Creation support 191 nmethod(Method* method, 192 CompilerType type, 193 int nmethod_size, 194 int compile_id, 195 int entry_bci, 196 CodeOffsets* offsets, 197 int orig_pc_offset, 198 DebugInformationRecorder *recorder, 199 Dependencies* dependencies, 200 CodeBuffer *code_buffer, 201 int frame_size, 202 OopMapSet* oop_maps, 203 ExceptionHandlerTable* handler_table, 204 ImplicitExceptionTable* nul_chk_table, 205 AbstractCompiler* compiler, 206 int comp_level 207 #if INCLUDE_JVMCI 208 , char* speculations, 209 int speculations_len, 210 int jvmci_data_size 211 #endif 212 ); 213 214 // helper methods 215 void* operator new(size_t size, int nmethod_size, int comp_level) throw(); 216 217 const char* reloc_string_for(u_char* begin, u_char* end); 218 // Returns true if this thread changed the state of the nmethod or 219 // false if another thread performed the transition. 220 bool make_not_entrant_or_zombie(int state); 221 bool make_entrant() { Unimplemented(); return false; } 222 void inc_decompile_count(); 223 224 // Inform external interfaces that a compiled method has been unloaded 225 void post_compiled_method_unload(); 226 227 // Initailize fields to their default values 228 void init_defaults(); 229 230 // Offsets 231 int content_offset() const { return content_begin() - header_begin(); } 232 int data_offset() const { return _data_offset; } 233 234 address header_end() const { return (address) header_begin() + header_size(); } 235 236 public: 237 // create nmethod with entry_bci 238 static nmethod* new_nmethod(const methodHandle& method, 239 int compile_id, 240 int entry_bci, 241 CodeOffsets* offsets, 242 int orig_pc_offset, 243 DebugInformationRecorder* recorder, 244 Dependencies* dependencies, 245 CodeBuffer *code_buffer, 246 int frame_size, 247 OopMapSet* oop_maps, 248 ExceptionHandlerTable* handler_table, 249 ImplicitExceptionTable* nul_chk_table, 250 AbstractCompiler* compiler, 251 int comp_level 252 #if INCLUDE_JVMCI 253 , char* speculations = NULL, 254 int speculations_len = 0, 255 int nmethod_mirror_index = -1, 256 const char* nmethod_mirror_name = NULL, 257 FailedSpeculation** failed_speculations = NULL 258 #endif 259 ); 260 261 // Only used for unit tests. 262 nmethod() 263 : CompiledMethod(), 264 _is_unloading_state(0), 265 _native_receiver_sp_offset(in_ByteSize(-1)), 266 _native_basic_lock_sp_offset(in_ByteSize(-1)) {} 267 268 269 static nmethod* new_native_nmethod(const methodHandle& method, 270 int compile_id, 271 CodeBuffer *code_buffer, 272 int vep_offset, 273 int frame_complete, 274 int frame_size, 275 ByteSize receiver_sp_offset, 276 ByteSize basic_lock_sp_offset, 277 OopMapSet* oop_maps); 278 279 // type info 280 bool is_nmethod() const { return true; } 281 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } 282 283 // boundaries for different parts 284 address consts_begin () const { return header_begin() + _consts_offset ; } 285 address consts_end () const { return code_begin() ; } 286 address stub_begin () const { return header_begin() + _stub_offset ; } 287 address stub_end () const { return header_begin() + _oops_offset ; } 288 address exception_begin () const { return header_begin() + _exception_offset ; } 289 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; } 290 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } 291 oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } 292 293 Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } 294 Metadata** metadata_end () const { return (Metadata**) _scopes_data_begin; } 295 296 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } 297 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } 298 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } 299 address dependencies_begin () const { return header_begin() + _dependencies_offset ; } 300 address dependencies_end () const { return header_begin() + _handler_table_offset ; } 301 address handler_table_begin () const { return header_begin() + _handler_table_offset ; } 302 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } 303 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } 304 #if INCLUDE_JVMCI 305 address nul_chk_table_end () const { return header_begin() + _speculations_offset ; } 306 address speculations_begin () const { return header_begin() + _speculations_offset ; } 307 address speculations_end () const { return header_begin() + _jvmci_data_offset ; } 308 address jvmci_data_begin () const { return header_begin() + _jvmci_data_offset ; } 309 address jvmci_data_end () const { return header_begin() + _nmethod_end_offset ; } 310 #else 311 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } 312 #endif 313 314 // Sizes 315 int oops_size () const { return (address) oops_end () - (address) oops_begin (); } 316 int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); } 317 int dependencies_size () const { return dependencies_end () - dependencies_begin (); } 318 #if INCLUDE_JVMCI 319 int speculations_size () const { return speculations_end () - speculations_begin (); } 320 int jvmci_data_size () const { return jvmci_data_end () - jvmci_data_begin (); } 321 #endif 322 323 int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; } 324 int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; } 325 326 int total_size () const; 327 328 void dec_hotness_counter() { _hotness_counter--; } 329 void set_hotness_counter(int val) { _hotness_counter = val; } 330 int hotness_counter() const { return _hotness_counter; } 331 332 // Containment 333 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } 334 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } 335 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } 336 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } 337 338 // entry points 339 address entry_point() const { return _entry_point; } // normal entry point 340 address verified_entry_point() const { return _verified_entry_point; } // normal entry point without class check 341 address verified_value_entry_point() const { return _verified_value_entry_point; } // value type entry point (unpack all value args) without class check 342 address verified_value_ro_entry_point() const { return _verified_value_ro_entry_point; } // value type entry point (only unpack receiver) without class check 343 344 // flag accessing and manipulation 345 bool is_not_installed() const { return _state == not_installed; } 346 bool is_in_use() const { return _state <= in_use; } 347 bool is_alive() const { return _state < zombie; } 348 bool is_not_entrant() const { return _state == not_entrant; } 349 bool is_zombie() const { return _state == zombie; } 350 bool is_unloaded() const { return _state == unloaded; } 351 352 void clear_unloading_state(); 353 virtual bool is_unloading(); 354 virtual void do_unloading(bool unloading_occurred); 355 356 #if INCLUDE_RTM_OPT 357 // rtm state accessing and manipulating 358 RTMState rtm_state() const { return _rtm_state; } 359 void set_rtm_state(RTMState state) { _rtm_state = state; } 360 #endif 361 362 void make_in_use() { _state = in_use; } 363 // Make the nmethod non entrant. The nmethod will continue to be 364 // alive. It is used when an uncommon trap happens. Returns true 365 // if this thread changed the state of the nmethod or false if 366 // another thread performed the transition. 367 bool make_not_entrant() { 368 assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant"); 369 return make_not_entrant_or_zombie(not_entrant); 370 } 371 bool make_not_used() { return make_not_entrant(); } 372 bool make_zombie() { return make_not_entrant_or_zombie(zombie); } 373 374 // used by jvmti to track if the unload event has been reported 375 bool unload_reported() { return _unload_reported; } 376 void set_unload_reported() { _unload_reported = true; } 377 378 int get_state() const { 379 return _state; 380 } 381 382 void make_unloaded(); 383 384 bool has_dependencies() { return dependencies_size() != 0; } 385 void flush_dependencies(bool delete_immediately); 386 bool has_flushed_dependencies() { return _has_flushed_dependencies; } 387 void set_has_flushed_dependencies() { 388 assert(!has_flushed_dependencies(), "should only happen once"); 389 _has_flushed_dependencies = 1; 390 } 391 392 int comp_level() const { return _comp_level; } 393 void unlink_from_method(bool acquire_lock); 394 395 // Support for oops in scopes and relocs: 396 // Note: index 0 is reserved for null. 397 oop oop_at(int index) const; 398 oop* oop_addr_at(int index) const { // for GC 399 // relocation indexes are biased by 1 (because 0 is reserved) 400 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index"); 401 assert(!_oops_are_stale, "oops are stale"); 402 return &oops_begin()[index - 1]; 403 } 404 405 // Support for meta data in scopes and relocs: 406 // Note: index 0 is reserved for null. 407 Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); } 408 Metadata** metadata_addr_at(int index) const { // for GC 409 // relocation indexes are biased by 1 (because 0 is reserved) 410 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index"); 411 return &metadata_begin()[index - 1]; 412 } 413 414 void copy_values(GrowableArray<jobject>* oops); 415 void copy_values(GrowableArray<Metadata*>* metadata); 416 417 // Relocation support 418 private: 419 void fix_oop_relocations(address begin, address end, bool initialize_immediates); 420 inline void initialize_immediate_oop(oop* dest, jobject handle); 421 422 public: 423 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } 424 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } 425 426 // Sweeper support 427 long stack_traversal_mark() { return _stack_traversal_mark; } 428 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; } 429 430 // implicit exceptions support 431 address continuation_for_implicit_exception(address pc); 432 433 // On-stack replacement support 434 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } 435 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } 436 void invalidate_osr_method(); 437 nmethod* osr_link() const { return _osr_link; } 438 void set_osr_link(nmethod *n) { _osr_link = n; } 439 440 // Verify calls to dead methods have been cleaned. 441 void verify_clean_inline_caches(); 442 443 // unlink and deallocate this nmethod 444 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not 445 // expected to use any other private methods/data in this class. 446 447 protected: 448 void flush(); 449 450 public: 451 // When true is returned, it is unsafe to remove this nmethod even if 452 // it is a zombie, since the VM or the ServiceThread might still be 453 // using it. 454 bool is_locked_by_vm() const { return _lock_count >0; } 455 456 // See comment at definition of _last_seen_on_stack 457 void mark_as_seen_on_stack(); 458 bool can_convert_to_zombie(); 459 460 // Evolution support. We make old (discarded) compiled methods point to new Method*s. 461 void set_method(Method* method) { _method = method; } 462 463 #if INCLUDE_JVMCI 464 // Gets the JVMCI name of this nmethod. 465 const char* jvmci_name(); 466 467 // Records the pending failed speculation in the 468 // JVMCI speculation log associated with this nmethod. 469 void update_speculation(JavaThread* thread); 470 471 // Gets the data specific to a JVMCI compiled method. 472 // This returns a non-NULL value iff this nmethod was 473 // compiled by the JVMCI compiler. 474 JVMCINMethodData* jvmci_nmethod_data() const { 475 return jvmci_data_size() == 0 ? NULL : (JVMCINMethodData*) jvmci_data_begin(); 476 } 477 #endif 478 479 public: 480 void oops_do(OopClosure* f) { oops_do(f, false); } 481 void oops_do(OopClosure* f, bool allow_zombie); 482 483 bool test_set_oops_do_mark(); 484 static void oops_do_marking_prologue(); 485 static void oops_do_marking_epilogue(); 486 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; } 487 bool test_oops_do_mark() { return _oops_do_mark_link != NULL; } 488 489 private: 490 ScopeDesc* scope_desc_in(address begin, address end); 491 492 address* orig_pc_addr(const frame* fr); 493 494 public: 495 // copying of debugging information 496 void copy_scopes_pcs(PcDesc* pcs, int count); 497 void copy_scopes_data(address buffer, int size); 498 499 // Accessor/mutator for the original pc of a frame before a frame was deopted. 500 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } 501 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } 502 503 // jvmti support: 504 void post_compiled_method_load_event(); 505 jmethodID get_and_cache_jmethod_id(); 506 507 // verify operations 508 void verify(); 509 void verify_scopes(); 510 void verify_interrupt_point(address interrupt_point); 511 512 // printing support 513 void print() const; 514 void print_relocations() PRODUCT_RETURN; 515 void print_pcs() PRODUCT_RETURN; 516 void print_scopes() PRODUCT_RETURN; 517 void print_dependencies() PRODUCT_RETURN; 518 void print_value_on(outputStream* st) const PRODUCT_RETURN; 519 void print_calls(outputStream* st) PRODUCT_RETURN; 520 void print_handler_table() PRODUCT_RETURN; 521 void print_nul_chk_table() PRODUCT_RETURN; 522 void print_recorded_oops() PRODUCT_RETURN; 523 void print_recorded_metadata() PRODUCT_RETURN; 524 525 void maybe_print_nmethod(DirectiveSet* directive); 526 void print_nmethod(bool print_code); 527 528 // need to re-define this from CodeBlob else the overload hides it 529 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } 530 void print_on(outputStream* st, const char* msg) const; 531 532 // Logging 533 void log_identity(xmlStream* log) const; 534 void log_new_nmethod() const; 535 void log_state_change() const; 536 537 // Prints block-level comments, including nmethod specific block labels: 538 virtual void print_block_comment(outputStream* stream, address block_begin) const { 539 print_nmethod_labels(stream, block_begin); 540 CodeBlob::print_block_comment(stream, block_begin); 541 } 542 void print_nmethod_labels(outputStream* stream, address block_begin) const; 543 void print_entry_parameters(outputStream* stream, address block_begin) const; 544 545 // Prints a comment for one native instruction (reloc info, pc desc) 546 void print_code_comment_on(outputStream* st, int column, address begin, address end); 547 static void print_statistics() PRODUCT_RETURN; 548 549 // Compiler task identification. Note that all OSR methods 550 // are numbered in an independent sequence if CICountOSR is true, 551 // and native method wrappers are also numbered independently if 552 // CICountNative is true. 553 virtual int compile_id() const { return _compile_id; } 554 const char* compile_kind() const; 555 556 // tells if any of this method's dependencies have been invalidated 557 // (this is expensive!) 558 static void check_all_dependencies(DepChange& changes); 559 560 // tells if this compiled method is dependent on the given changes, 561 // and the changes have invalidated it 562 bool check_dependency_on(DepChange& changes); 563 564 // Fast breakpoint support. Tells if this compiled method is 565 // dependent on the given method. Returns true if this nmethod 566 // corresponds to the given method as well. 567 virtual bool is_dependent_on_method(Method* dependee); 568 569 // is it ok to patch at address? 570 bool is_patchable_at(address instr_address); 571 572 // UseBiasedLocking support 573 ByteSize native_receiver_sp_offset() { 574 return _native_receiver_sp_offset; 575 } 576 ByteSize native_basic_lock_sp_offset() { 577 return _native_basic_lock_sp_offset; 578 } 579 580 // support for code generation 581 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); } 582 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); } 583 static int state_offset() { return offset_of(nmethod, _state); } 584 585 virtual void metadata_do(MetadataClosure* f); 586 587 NativeCallWrapper* call_wrapper_at(address call) const; 588 NativeCallWrapper* call_wrapper_before(address return_pc) const; 589 address call_instruction_address(address pc) const; 590 591 virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const; 592 virtual CompiledStaticCall* compiledStaticCall_at(address addr) const; 593 virtual CompiledStaticCall* compiledStaticCall_before(address addr) const; 594 }; 595 596 // Locks an nmethod so its code will not get removed and it will not 597 // be made into a zombie, even if it is a not_entrant method. After the 598 // nmethod becomes a zombie, if CompiledMethodUnload event processing 599 // needs to be done, then lock_nmethod() is used directly to keep the 600 // generated code from being reused too early. 601 class nmethodLocker : public StackObj { 602 CompiledMethod* _nm; 603 604 public: 605 606 // note: nm can be NULL 607 // Only JvmtiDeferredEvent::compiled_method_unload_event() 608 // should pass zombie_ok == true. 609 static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false); 610 static void unlock_nmethod(CompiledMethod* nm); // (ditto) 611 612 nmethodLocker(address pc); // derive nm from pc 613 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); } 614 nmethodLocker(CompiledMethod *nm) { 615 _nm = nm; 616 lock(_nm); 617 } 618 619 static void lock(CompiledMethod* method) { 620 if (method == NULL) return; 621 lock_nmethod(method); 622 } 623 624 static void unlock(CompiledMethod* method) { 625 if (method == NULL) return; 626 unlock_nmethod(method); 627 } 628 629 nmethodLocker() { _nm = NULL; } 630 ~nmethodLocker() { 631 unlock(_nm); 632 } 633 634 CompiledMethod* code() { return _nm; } 635 void set_code(CompiledMethod* new_nm) { 636 unlock(_nm); // note: This works even if _nm==new_nm. 637 _nm = new_nm; 638 lock(_nm); 639 } 640 }; 641 642 #endif // SHARE_CODE_NMETHOD_HPP