1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_CODE_NMETHOD_HPP 26 #define SHARE_VM_CODE_NMETHOD_HPP 27 28 #include "code/compiledMethod.hpp" 29 30 class DepChange; 31 class DirectiveSet; 32 33 // nmethods (native methods) are the compiled code versions of Java methods. 34 // 35 // An nmethod contains: 36 // - header (the nmethod structure) 37 // [Relocation] 38 // - relocation information 39 // - constant part (doubles, longs and floats used in nmethod) 40 // - oop table 41 // [Code] 42 // - code body 43 // - exception handler 44 // - stub code 45 // [Debugging information] 46 // - oop array 47 // - data array 48 // - pcs 49 // [Exception handler table] 50 // - handler entry point array 51 // [Implicit Null Pointer exception table] 52 // - implicit null table array 53 54 class nmethod : public CompiledMethod { 55 friend class VMStructs; 56 friend class JVMCIVMStructs; 57 friend class NMethodSweeper; 58 friend class CodeCache; // scavengable oops 59 private: 60 61 // Shared fields for all nmethod's 62 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method 63 jmethodID _jmethod_id; // Cache of method()->jmethod_id() 64 65 #if INCLUDE_JVMCI 66 // A weak reference to an InstalledCode object associated with 67 // this nmethod. 68 jweak _jvmci_installed_code; 69 70 // A weak reference to a SpeculationLog object associated with 71 // this nmethod. 72 jweak _speculation_log; 73 74 // Determines whether this nmethod is unloaded when the 75 // referent in _jvmci_installed_code is cleared. This 76 // will be false if the referent is initialized to a 77 // HotSpotNMethod object whose isDefault field is true. 78 // That is, installed code other than a "default" 79 // HotSpotNMethod causes nmethod unloading. 80 // This field is ignored once _jvmci_installed_code is NULL. 81 bool _jvmci_installed_code_triggers_invalidation; 82 #endif 83 84 // To support simple linked-list chaining of nmethods: 85 nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head 86 87 static nmethod* volatile _oops_do_mark_nmethods; 88 nmethod* volatile _oops_do_mark_link; 89 90 // offsets for entry points 91 address _entry_point; // entry point with class check 92 address _verified_entry_point; // entry point without class check 93 address _osr_entry_point; // entry point for on stack replacement 94 95 // Offsets for different nmethod parts 96 int _exception_offset; 97 // Offset of the unwind handler if it exists 98 int _unwind_handler_offset; 99 100 int _consts_offset; 101 int _stub_offset; 102 int _oops_offset; // offset to where embedded oop table begins (inside data) 103 int _metadata_offset; // embedded meta data table 104 int _scopes_data_offset; 105 int _scopes_pcs_offset; 106 int _dependencies_offset; 107 int _handler_table_offset; 108 int _nul_chk_table_offset; 109 int _nmethod_end_offset; 110 111 int code_offset() const { return (address) code_begin() - header_begin(); } 112 113 // location in frame (offset for sp) that deopt can store the original 114 // pc during a deopt. 115 int _orig_pc_offset; 116 117 int _compile_id; // which compilation made this nmethod 118 int _comp_level; // compilation level 119 120 // protected by CodeCache_lock 121 bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock) 122 123 // used by jvmti to track if an unload event has been posted for this nmethod. 124 bool _unload_reported; 125 126 // Protected by Patching_lock 127 volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded} 128 129 #ifdef ASSERT 130 bool _oops_are_stale; // indicates that it's no longer safe to access oops section 131 #endif 132 133 jbyte _scavenge_root_state; 134 135 #if INCLUDE_RTM_OPT 136 // RTM state at compile time. Used during deoptimization to decide 137 // whether to restart collecting RTM locking abort statistic again. 138 RTMState _rtm_state; 139 #endif 140 141 // Nmethod Flushing lock. If non-zero, then the nmethod is not removed 142 // and is not made into a zombie. However, once the nmethod is made into 143 // a zombie, it will be locked one final time if CompiledMethodUnload 144 // event processing needs to be done. 145 volatile jint _lock_count; 146 147 // not_entrant method removal. Each mark_sweep pass will update 148 // this mark to current sweep invocation count if it is seen on the 149 // stack. An not_entrant method can be removed when there are no 150 // more activations, i.e., when the _stack_traversal_mark is less than 151 // current sweep traversal index. 152 volatile long _stack_traversal_mark; 153 154 // The _hotness_counter indicates the hotness of a method. The higher 155 // the value the hotter the method. The hotness counter of a nmethod is 156 // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method 157 // is active while stack scanning (mark_active_nmethods()). The hotness 158 // counter is decreased (by 1) while sweeping. 159 int _hotness_counter; 160 161 // Local state used to keep track of whether unloading is happening or not 162 volatile uint8_t _is_unloading_state; 163 164 // These are used for compiled synchronized native methods to 165 // locate the owner and stack slot for the BasicLock so that we can 166 // properly revoke the bias of the owner if necessary. They are 167 // needed because there is no debug information for compiled native 168 // wrappers and the oop maps are insufficient to allow 169 // frame::retrieve_receiver() to work. Currently they are expected 170 // to be byte offsets from the Java stack pointer for maximum code 171 // sharing between platforms. Note that currently biased locking 172 // will never cause Class instances to be biased but this code 173 // handles the static synchronized case as well. 174 // JVMTI's GetLocalInstance() also uses these offsets to find the receiver 175 // for non-static native wrapper frames. 176 ByteSize _native_receiver_sp_offset; 177 ByteSize _native_basic_lock_sp_offset; 178 179 friend class nmethodLocker; 180 181 // For native wrappers 182 nmethod(Method* method, 183 CompilerType type, 184 int nmethod_size, 185 int compile_id, 186 CodeOffsets* offsets, 187 CodeBuffer *code_buffer, 188 int frame_size, 189 ByteSize basic_lock_owner_sp_offset, /* synchronized natives only */ 190 ByteSize basic_lock_sp_offset, /* synchronized natives only */ 191 OopMapSet* oop_maps); 192 193 // Creation support 194 nmethod(Method* method, 195 CompilerType type, 196 int nmethod_size, 197 int compile_id, 198 int entry_bci, 199 CodeOffsets* offsets, 200 int orig_pc_offset, 201 DebugInformationRecorder *recorder, 202 Dependencies* dependencies, 203 CodeBuffer *code_buffer, 204 int frame_size, 205 OopMapSet* oop_maps, 206 ExceptionHandlerTable* handler_table, 207 ImplicitExceptionTable* nul_chk_table, 208 AbstractCompiler* compiler, 209 int comp_level 210 #if INCLUDE_JVMCI 211 , jweak installed_code, 212 jweak speculation_log 213 #endif 214 ); 215 216 // helper methods 217 void* operator new(size_t size, int nmethod_size, int comp_level) throw(); 218 219 const char* reloc_string_for(u_char* begin, u_char* end); 220 // Returns true if this thread changed the state of the nmethod or 221 // false if another thread performed the transition. 222 bool make_not_entrant_or_zombie(int state); 223 bool make_entrant() { Unimplemented(); return false; } 224 void inc_decompile_count(); 225 226 // Inform external interfaces that a compiled method has been unloaded 227 void post_compiled_method_unload(); 228 229 // Initailize fields to their default values 230 void init_defaults(); 231 232 // Offsets 233 int content_offset() const { return content_begin() - header_begin(); } 234 int data_offset() const { return _data_offset; } 235 236 address header_end() const { return (address) header_begin() + header_size(); } 237 238 public: 239 // create nmethod with entry_bci 240 static nmethod* new_nmethod(const methodHandle& method, 241 int compile_id, 242 int entry_bci, 243 CodeOffsets* offsets, 244 int orig_pc_offset, 245 DebugInformationRecorder* recorder, 246 Dependencies* dependencies, 247 CodeBuffer *code_buffer, 248 int frame_size, 249 OopMapSet* oop_maps, 250 ExceptionHandlerTable* handler_table, 251 ImplicitExceptionTable* nul_chk_table, 252 AbstractCompiler* compiler, 253 int comp_level 254 #if INCLUDE_JVMCI 255 , jweak installed_code = NULL, 256 jweak speculation_log = NULL 257 #endif 258 ); 259 260 // Only used for unit tests. 261 nmethod() 262 : CompiledMethod(), 263 _is_unloading_state(0), 264 _native_receiver_sp_offset(in_ByteSize(-1)), 265 _native_basic_lock_sp_offset(in_ByteSize(-1)) {} 266 267 268 static nmethod* new_native_nmethod(const methodHandle& method, 269 int compile_id, 270 CodeBuffer *code_buffer, 271 int vep_offset, 272 int frame_complete, 273 int frame_size, 274 ByteSize receiver_sp_offset, 275 ByteSize basic_lock_sp_offset, 276 OopMapSet* oop_maps); 277 278 // type info 279 bool is_nmethod() const { return true; } 280 bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } 281 282 // boundaries for different parts 283 address consts_begin () const { return header_begin() + _consts_offset ; } 284 address consts_end () const { return code_begin() ; } 285 address stub_begin () const { return header_begin() + _stub_offset ; } 286 address stub_end () const { return header_begin() + _oops_offset ; } 287 address exception_begin () const { return header_begin() + _exception_offset ; } 288 address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; } 289 oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; } 290 oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; } 291 292 Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; } 293 Metadata** metadata_end () const { return (Metadata**) _scopes_data_begin; } 294 295 address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; } 296 PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); } 297 PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; } 298 address dependencies_begin () const { return header_begin() + _dependencies_offset ; } 299 address dependencies_end () const { return header_begin() + _handler_table_offset ; } 300 address handler_table_begin () const { return header_begin() + _handler_table_offset ; } 301 address handler_table_end () const { return header_begin() + _nul_chk_table_offset ; } 302 address nul_chk_table_begin () const { return header_begin() + _nul_chk_table_offset ; } 303 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } 304 305 // Sizes 306 int oops_size () const { return (address) oops_end () - (address) oops_begin (); } 307 int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); } 308 int dependencies_size () const { return dependencies_end () - dependencies_begin (); } 309 310 int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; } 311 int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; } 312 313 int total_size () const; 314 315 void dec_hotness_counter() { _hotness_counter--; } 316 void set_hotness_counter(int val) { _hotness_counter = val; } 317 int hotness_counter() const { return _hotness_counter; } 318 319 // Containment 320 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } 321 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); } 322 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); } 323 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); } 324 325 // entry points 326 address entry_point() const { return _entry_point; } // normal entry point 327 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct 328 329 // flag accessing and manipulation 330 bool is_not_installed() const { return _state == not_installed; } 331 bool is_in_use() const { return _state <= in_use; } 332 bool is_alive() const { return _state < zombie; } 333 bool is_not_entrant() const { return _state == not_entrant; } 334 bool is_zombie() const { return _state == zombie; } 335 bool is_unloaded() const { return _state == unloaded; } 336 337 void clear_unloading_state(); 338 virtual bool is_unloading(); 339 virtual void do_unloading(bool unloading_occurred); 340 341 #if INCLUDE_RTM_OPT 342 // rtm state accessing and manipulating 343 RTMState rtm_state() const { return _rtm_state; } 344 void set_rtm_state(RTMState state) { _rtm_state = state; } 345 #endif 346 347 void make_in_use() { _state = in_use; } 348 // Make the nmethod non entrant. The nmethod will continue to be 349 // alive. It is used when an uncommon trap happens. Returns true 350 // if this thread changed the state of the nmethod or false if 351 // another thread performed the transition. 352 bool make_not_entrant() { 353 assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant"); 354 return make_not_entrant_or_zombie(not_entrant); 355 } 356 bool make_not_used() { return make_not_entrant(); } 357 bool make_zombie() { return make_not_entrant_or_zombie(zombie); } 358 359 // used by jvmti to track if the unload event has been reported 360 bool unload_reported() { return _unload_reported; } 361 void set_unload_reported() { _unload_reported = true; } 362 363 int get_state() const { 364 return _state; 365 } 366 367 void make_unloaded(); 368 369 bool has_dependencies() { return dependencies_size() != 0; } 370 void flush_dependencies(bool delete_immediately); 371 bool has_flushed_dependencies() { return _has_flushed_dependencies; } 372 void set_has_flushed_dependencies() { 373 assert(!has_flushed_dependencies(), "should only happen once"); 374 _has_flushed_dependencies = 1; 375 } 376 377 int comp_level() const { return _comp_level; } 378 379 // Support for oops in scopes and relocs: 380 // Note: index 0 is reserved for null. 381 oop oop_at(int index) const; 382 oop* oop_addr_at(int index) const { // for GC 383 // relocation indexes are biased by 1 (because 0 is reserved) 384 assert(index > 0 && index <= oops_count(), "must be a valid non-zero index"); 385 assert(!_oops_are_stale, "oops are stale"); 386 return &oops_begin()[index - 1]; 387 } 388 389 // Support for meta data in scopes and relocs: 390 // Note: index 0 is reserved for null. 391 Metadata* metadata_at(int index) const { return index == 0 ? NULL: *metadata_addr_at(index); } 392 Metadata** metadata_addr_at(int index) const { // for GC 393 // relocation indexes are biased by 1 (because 0 is reserved) 394 assert(index > 0 && index <= metadata_count(), "must be a valid non-zero index"); 395 return &metadata_begin()[index - 1]; 396 } 397 398 void copy_values(GrowableArray<jobject>* oops); 399 void copy_values(GrowableArray<Metadata*>* metadata); 400 401 // Relocation support 402 private: 403 void fix_oop_relocations(address begin, address end, bool initialize_immediates); 404 inline void initialize_immediate_oop(oop* dest, jobject handle); 405 406 public: 407 void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); } 408 void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); } 409 410 // Scavengable oop support 411 bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; } 412 protected: 413 enum { sl_on_list = 0x01, sl_marked = 0x10 }; 414 void set_on_scavenge_root_list() { _scavenge_root_state = sl_on_list; } 415 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; } 416 // assertion-checking and pruning logic uses the bits of _scavenge_root_state 417 #ifndef PRODUCT 418 void set_scavenge_root_marked() { _scavenge_root_state |= sl_marked; } 419 void clear_scavenge_root_marked() { _scavenge_root_state &= ~sl_marked; } 420 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ sl_on_list) == 0; } 421 // N.B. there is no positive marked query, and we only use the not_marked query for asserts. 422 #endif //PRODUCT 423 nmethod* scavenge_root_link() const { return _scavenge_root_link; } 424 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; } 425 426 public: 427 428 // Sweeper support 429 long stack_traversal_mark() { return _stack_traversal_mark; } 430 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; } 431 432 // implicit exceptions support 433 address continuation_for_implicit_exception(address pc); 434 435 // On-stack replacement support 436 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; } 437 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; } 438 void invalidate_osr_method(); 439 nmethod* osr_link() const { return _osr_link; } 440 void set_osr_link(nmethod *n) { _osr_link = n; } 441 442 // Verify calls to dead methods have been cleaned. 443 void verify_clean_inline_caches(); 444 445 // unlink and deallocate this nmethod 446 // Only NMethodSweeper class is expected to use this. NMethodSweeper is not 447 // expected to use any other private methods/data in this class. 448 449 protected: 450 void flush(); 451 452 public: 453 // When true is returned, it is unsafe to remove this nmethod even if 454 // it is a zombie, since the VM or the ServiceThread might still be 455 // using it. 456 bool is_locked_by_vm() const { return _lock_count >0; } 457 458 // See comment at definition of _last_seen_on_stack 459 void mark_as_seen_on_stack(); 460 bool can_convert_to_zombie(); 461 462 // Evolution support. We make old (discarded) compiled methods point to new Method*s. 463 void set_method(Method* method) { _method = method; } 464 465 #if INCLUDE_JVMCI 466 // Gets the InstalledCode object associated with this nmethod 467 // which may be NULL if this nmethod was not compiled by JVMCI 468 // or the weak reference has been cleared. 469 oop jvmci_installed_code(); 470 471 // Copies the value of the name field in the InstalledCode 472 // object (if any) associated with this nmethod into buf. 473 // Returns the value of buf if it was updated otherwise NULL. 474 char* jvmci_installed_code_name(char* buf, size_t buflen) const; 475 476 // Updates the state of the InstalledCode (if any) associated with 477 // this nmethod based on the current value of _state. 478 void maybe_invalidate_installed_code(); 479 480 // Deoptimizes the nmethod (if any) in the address field of a given 481 // InstalledCode object. The address field is zeroed upon return. 482 static void invalidate_installed_code(Handle installed_code, TRAPS); 483 484 // Gets the SpeculationLog object associated with this nmethod 485 // which may be NULL if this nmethod was not compiled by JVMCI 486 // or the weak reference has been cleared. 487 oop speculation_log(); 488 489 private: 490 // Deletes the weak reference (if any) to the InstalledCode object 491 // associated with this nmethod. 492 void clear_jvmci_installed_code(); 493 494 // Deletes the weak reference (if any) to the SpeculationLog object 495 // associated with this nmethod. 496 void clear_speculation_log(); 497 498 public: 499 #endif 500 501 public: 502 void oops_do(OopClosure* f) { oops_do(f, false); } 503 void oops_do(OopClosure* f, bool allow_zombie); 504 bool detect_scavenge_root_oops(); 505 void verify_scavenge_root_oops() PRODUCT_RETURN; 506 507 bool test_set_oops_do_mark(); 508 static void oops_do_marking_prologue(); 509 static void oops_do_marking_epilogue(); 510 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; } 511 bool test_oops_do_mark() { return _oops_do_mark_link != NULL; } 512 513 private: 514 ScopeDesc* scope_desc_in(address begin, address end); 515 516 address* orig_pc_addr(const frame* fr); 517 518 public: 519 // copying of debugging information 520 void copy_scopes_pcs(PcDesc* pcs, int count); 521 void copy_scopes_data(address buffer, int size); 522 523 // Accessor/mutator for the original pc of a frame before a frame was deopted. 524 address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } 525 void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } 526 527 // jvmti support: 528 void post_compiled_method_load_event(); 529 jmethodID get_and_cache_jmethod_id(); 530 531 // verify operations 532 void verify(); 533 void verify_scopes(); 534 void verify_interrupt_point(address interrupt_point); 535 536 // printing support 537 void print() const; 538 void print_relocations() PRODUCT_RETURN; 539 void print_pcs() PRODUCT_RETURN; 540 void print_scopes() PRODUCT_RETURN; 541 void print_dependencies() PRODUCT_RETURN; 542 void print_value_on(outputStream* st) const PRODUCT_RETURN; 543 void print_calls(outputStream* st) PRODUCT_RETURN; 544 void print_handler_table() PRODUCT_RETURN; 545 void print_nul_chk_table() PRODUCT_RETURN; 546 void print_recorded_oops() PRODUCT_RETURN; 547 void print_recorded_metadata() PRODUCT_RETURN; 548 549 void maybe_print_nmethod(DirectiveSet* directive); 550 void print_nmethod(bool print_code); 551 552 // need to re-define this from CodeBlob else the overload hides it 553 virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); } 554 void print_on(outputStream* st, const char* msg) const; 555 556 // Logging 557 void log_identity(xmlStream* log) const; 558 void log_new_nmethod() const; 559 void log_state_change() const; 560 561 // Prints block-level comments, including nmethod specific block labels: 562 virtual void print_block_comment(outputStream* stream, address block_begin) const { 563 print_nmethod_labels(stream, block_begin); 564 CodeBlob::print_block_comment(stream, block_begin); 565 } 566 void print_nmethod_labels(outputStream* stream, address block_begin) const; 567 568 // Prints a comment for one native instruction (reloc info, pc desc) 569 void print_code_comment_on(outputStream* st, int column, address begin, address end); 570 static void print_statistics() PRODUCT_RETURN; 571 572 // Compiler task identification. Note that all OSR methods 573 // are numbered in an independent sequence if CICountOSR is true, 574 // and native method wrappers are also numbered independently if 575 // CICountNative is true. 576 virtual int compile_id() const { return _compile_id; } 577 const char* compile_kind() const; 578 579 // tells if any of this method's dependencies have been invalidated 580 // (this is expensive!) 581 static void check_all_dependencies(DepChange& changes); 582 583 // tells if this compiled method is dependent on the given changes, 584 // and the changes have invalidated it 585 bool check_dependency_on(DepChange& changes); 586 587 // Evolution support. Tells if this compiled method is dependent on any of 588 // methods m() of class dependee, such that if m() in dependee is replaced, 589 // this compiled method will have to be deoptimized. 590 bool is_evol_dependent_on(Klass* dependee); 591 592 // Fast breakpoint support. Tells if this compiled method is 593 // dependent on the given method. Returns true if this nmethod 594 // corresponds to the given method as well. 595 virtual bool is_dependent_on_method(Method* dependee); 596 597 // is it ok to patch at address? 598 bool is_patchable_at(address instr_address); 599 600 // UseBiasedLocking support 601 ByteSize native_receiver_sp_offset() { 602 return _native_receiver_sp_offset; 603 } 604 ByteSize native_basic_lock_sp_offset() { 605 return _native_basic_lock_sp_offset; 606 } 607 608 // support for code generation 609 static int verified_entry_point_offset() { return offset_of(nmethod, _verified_entry_point); } 610 static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); } 611 static int state_offset() { return offset_of(nmethod, _state); } 612 613 virtual void metadata_do(void f(Metadata*)); 614 615 NativeCallWrapper* call_wrapper_at(address call) const; 616 NativeCallWrapper* call_wrapper_before(address return_pc) const; 617 address call_instruction_address(address pc) const; 618 619 virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const; 620 virtual CompiledStaticCall* compiledStaticCall_at(address addr) const; 621 virtual CompiledStaticCall* compiledStaticCall_before(address addr) const; 622 }; 623 624 // Locks an nmethod so its code will not get removed and it will not 625 // be made into a zombie, even if it is a not_entrant method. After the 626 // nmethod becomes a zombie, if CompiledMethodUnload event processing 627 // needs to be done, then lock_nmethod() is used directly to keep the 628 // generated code from being reused too early. 629 class nmethodLocker : public StackObj { 630 CompiledMethod* _nm; 631 632 public: 633 634 // note: nm can be NULL 635 // Only JvmtiDeferredEvent::compiled_method_unload_event() 636 // should pass zombie_ok == true. 637 static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false); 638 static void unlock_nmethod(CompiledMethod* nm); // (ditto) 639 640 nmethodLocker(address pc); // derive nm from pc 641 nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); } 642 nmethodLocker(CompiledMethod *nm) { 643 _nm = nm; 644 lock(_nm); 645 } 646 647 static void lock(CompiledMethod* method) { 648 if (method == NULL) return; 649 lock_nmethod(method); 650 } 651 652 static void unlock(CompiledMethod* method) { 653 if (method == NULL) return; 654 unlock_nmethod(method); 655 } 656 657 nmethodLocker() { _nm = NULL; } 658 ~nmethodLocker() { 659 unlock(_nm); 660 } 661 662 CompiledMethod* code() { return _nm; } 663 void set_code(CompiledMethod* new_nm) { 664 unlock(_nm); // note: This works even if _nm==new_nm. 665 _nm = new_nm; 666 lock(_nm); 667 } 668 }; 669 670 #endif // SHARE_VM_CODE_NMETHOD_HPP