24
25 #ifndef SHARE_VM_CODE_NMETHOD_HPP
26 #define SHARE_VM_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31
32 class DirectiveSet;
33
34 // This class is used internally by nmethods, to cache
35 // exception/pc/handler information.
36
37 class ExceptionCache : public CHeapObj<mtCode> {
38 friend class VMStructs;
39 private:
40 enum { cache_size = 16 };
41 Klass* _exception_type;
42 address _pc[cache_size];
43 address _handler[cache_size];
44 int _count;
45 ExceptionCache* _next;
46
47 address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
48 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
49 address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
50 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
51 int count() { return _count; }
52 void increment_count() { _count++; }
53
54 public:
55
56 ExceptionCache(Handle exception, address pc, address handler);
57
58 Klass* exception_type() { return _exception_type; }
59 ExceptionCache* next() { return _next; }
60 void set_next(ExceptionCache *ec) { _next = ec; }
61
62 address match(Handle exception, address pc);
63 bool match_exception_with_space(Handle exception) ;
64 address test_address(address addr);
65 bool add_address_and_handler(address addr, address handler) ;
66 };
67
68
69 // cache pc descs found in earlier inquiries
70 class PcDescCache VALUE_OBJ_CLASS_SPEC {
71 friend class VMStructs;
224 // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
225 // and is not made into a zombie. However, once the nmethod is made into
226 // a zombie, it will be locked one final time if CompiledMethodUnload
227 // event processing needs to be done.
228 volatile jint _lock_count;
229
230 // not_entrant method removal. Each mark_sweep pass will update
231 // this mark to current sweep invocation count if it is seen on the
232 // stack. An not_entrant method can be removed when there are no
233 // more activations, i.e., when the _stack_traversal_mark is less than
234 // current sweep traversal index.
235 long _stack_traversal_mark;
236
237 // The _hotness_counter indicates the hotness of a method. The higher
238 // the value the hotter the method. The hotness counter of a nmethod is
239 // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
240 // is active while stack scanning (mark_active_nmethods()). The hotness
241 // counter is decreased (by 1) while sweeping.
242 int _hotness_counter;
243
244 ExceptionCache *_exception_cache;
245 PcDescCache _pc_desc_cache;
246
247 // These are used for compiled synchronized native methods to
248 // locate the owner and stack slot for the BasicLock so that we can
249 // properly revoke the bias of the owner if necessary. They are
250 // needed because there is no debug information for compiled native
251 // wrappers and the oop maps are insufficient to allow
252 // frame::retrieve_receiver() to work. Currently they are expected
253 // to be byte offsets from the Java stack pointer for maximum code
254 // sharing between platforms. Note that currently biased locking
255 // will never cause Class instances to be biased but this code
256 // handles the static synchronized case as well.
257 // JVMTI's GetLocalInstance() also uses these offsets to find the receiver
258 // for non-static native wrapper frames.
259 ByteSize _native_receiver_sp_offset;
260 ByteSize _native_basic_lock_sp_offset;
261
262 friend class nmethodLocker;
263
264 // For native wrappers
416 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
417 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
418 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
419 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
420 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
421 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
422
423 // entry points
424 address entry_point() const { return _entry_point; } // normal entry point
425 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
426
427 enum { in_use = 0, // executable nmethod
428 not_entrant = 1, // marked for deoptimization but activations may still exist,
429 // will be transformed to zombie when all activations are gone
430 zombie = 2, // no activations exist, nmethod is ready for purge
431 unloaded = 3 }; // there should be no activations, should not be called,
432 // will be transformed to zombie immediately
433
434 // flag accessing and manipulation
435 bool is_in_use() const { return _state == in_use; }
436 bool is_alive() const { return _state == in_use || _state == not_entrant; }
437 bool is_not_entrant() const { return _state == not_entrant; }
438 bool is_zombie() const { return _state == zombie; }
439 bool is_unloaded() const { return _state == unloaded; }
440
441 // returns a string version of the nmethod state
442 const char* state() const {
443 switch(_state) {
444 case in_use: return "in use";
445 case not_entrant: return "not_entrant";
446 case zombie: return "zombie";
447 case unloaded: return "unloaded";
448 default:
449 fatal("unexpected nmethod state: %d", _state);
450 return NULL;
451 }
452 }
453
454 #if INCLUDE_RTM_OPT
455 // rtm state accessing and manipulating
456 RTMState rtm_state() const { return _rtm_state; }
561 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
562 // assertion-checking and pruning logic uses the bits of _scavenge_root_state
563 #ifndef PRODUCT
564 void set_scavenge_root_marked() { _scavenge_root_state |= sl_marked; }
565 void clear_scavenge_root_marked() { _scavenge_root_state &= ~sl_marked; }
566 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ sl_on_list) == 0; }
567 // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
568 #endif //PRODUCT
569 nmethod* scavenge_root_link() const { return _scavenge_root_link; }
570 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
571
572 public:
573
574 // Sweeper support
575 long stack_traversal_mark() { return _stack_traversal_mark; }
576 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
577
578 // Exception cache support
579 ExceptionCache* exception_cache() const { return _exception_cache; }
580 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
581 address handler_for_exception_and_pc(Handle exception, address pc);
582 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
583 void clean_exception_cache(BoolObjectClosure* is_alive);
584
585 // implicit exceptions support
586 address continuation_for_implicit_exception(address pc);
587
588 // On-stack replacement support
589 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
590 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
591 void invalidate_osr_method();
592 nmethod* osr_link() const { return _osr_link; }
593 void set_osr_link(nmethod *n) { _osr_link = n; }
594
595 // tells whether frames described by this nmethod can be deoptimized
596 // note: native wrappers cannot be deoptimized.
597 bool can_be_deoptimized() const { return is_java_method(); }
598
599 // Inline cache support
600 void clear_inline_caches();
|
24
25 #ifndef SHARE_VM_CODE_NMETHOD_HPP
26 #define SHARE_VM_CODE_NMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31
32 class DirectiveSet;
33
34 // This class is used internally by nmethods, to cache
35 // exception/pc/handler information.
36
37 class ExceptionCache : public CHeapObj<mtCode> {
38 friend class VMStructs;
39 private:
40 enum { cache_size = 16 };
41 Klass* _exception_type;
42 address _pc[cache_size];
43 address _handler[cache_size];
44 volatile int _count;
45 ExceptionCache* _next;
46
47 address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
48 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
49 address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
50 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
51 int count() { return OrderAccess::load_acquire(&_count); }
52 void increment_count() { _count++; }
53
54 public:
55
56 ExceptionCache(Handle exception, address pc, address handler);
57
58 Klass* exception_type() { return _exception_type; }
59 ExceptionCache* next() { return _next; }
60 void set_next(ExceptionCache *ec) { _next = ec; }
61
62 address match(Handle exception, address pc);
63 bool match_exception_with_space(Handle exception) ;
64 address test_address(address addr);
65 bool add_address_and_handler(address addr, address handler) ;
66 };
67
68
69 // cache pc descs found in earlier inquiries
70 class PcDescCache VALUE_OBJ_CLASS_SPEC {
71 friend class VMStructs;
224 // Nmethod Flushing lock. If non-zero, then the nmethod is not removed
225 // and is not made into a zombie. However, once the nmethod is made into
226 // a zombie, it will be locked one final time if CompiledMethodUnload
227 // event processing needs to be done.
228 volatile jint _lock_count;
229
230 // not_entrant method removal. Each mark_sweep pass will update
231 // this mark to current sweep invocation count if it is seen on the
232 // stack. An not_entrant method can be removed when there are no
233 // more activations, i.e., when the _stack_traversal_mark is less than
234 // current sweep traversal index.
235 long _stack_traversal_mark;
236
237 // The _hotness_counter indicates the hotness of a method. The higher
238 // the value the hotter the method. The hotness counter of a nmethod is
239 // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
240 // is active while stack scanning (mark_active_nmethods()). The hotness
241 // counter is decreased (by 1) while sweeping.
242 int _hotness_counter;
243
244 ExceptionCache * volatile _exception_cache;
245 PcDescCache _pc_desc_cache;
246
247 // These are used for compiled synchronized native methods to
248 // locate the owner and stack slot for the BasicLock so that we can
249 // properly revoke the bias of the owner if necessary. They are
250 // needed because there is no debug information for compiled native
251 // wrappers and the oop maps are insufficient to allow
252 // frame::retrieve_receiver() to work. Currently they are expected
253 // to be byte offsets from the Java stack pointer for maximum code
254 // sharing between platforms. Note that currently biased locking
255 // will never cause Class instances to be biased but this code
256 // handles the static synchronized case as well.
257 // JVMTI's GetLocalInstance() also uses these offsets to find the receiver
258 // for non-static native wrapper frames.
259 ByteSize _native_receiver_sp_offset;
260 ByteSize _native_basic_lock_sp_offset;
261
262 friend class nmethodLocker;
263
264 // For native wrappers
416 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
417 bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
418 bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
419 bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
420 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
421 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
422
423 // entry points
424 address entry_point() const { return _entry_point; } // normal entry point
425 address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
426
427 enum { in_use = 0, // executable nmethod
428 not_entrant = 1, // marked for deoptimization but activations may still exist,
429 // will be transformed to zombie when all activations are gone
430 zombie = 2, // no activations exist, nmethod is ready for purge
431 unloaded = 3 }; // there should be no activations, should not be called,
432 // will be transformed to zombie immediately
433
434 // flag accessing and manipulation
435 bool is_in_use() const { return _state == in_use; }
436 bool is_alive() const { unsigned char s = _state; return s == in_use || s == not_entrant; }
437 bool is_not_entrant() const { return _state == not_entrant; }
438 bool is_zombie() const { return _state == zombie; }
439 bool is_unloaded() const { return _state == unloaded; }
440
441 // returns a string version of the nmethod state
442 const char* state() const {
443 switch(_state) {
444 case in_use: return "in use";
445 case not_entrant: return "not_entrant";
446 case zombie: return "zombie";
447 case unloaded: return "unloaded";
448 default:
449 fatal("unexpected nmethod state: %d", _state);
450 return NULL;
451 }
452 }
453
454 #if INCLUDE_RTM_OPT
455 // rtm state accessing and manipulating
456 RTMState rtm_state() const { return _rtm_state; }
561 void clear_on_scavenge_root_list() { _scavenge_root_state = 0; }
562 // assertion-checking and pruning logic uses the bits of _scavenge_root_state
563 #ifndef PRODUCT
564 void set_scavenge_root_marked() { _scavenge_root_state |= sl_marked; }
565 void clear_scavenge_root_marked() { _scavenge_root_state &= ~sl_marked; }
566 bool scavenge_root_not_marked() { return (_scavenge_root_state &~ sl_on_list) == 0; }
567 // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
568 #endif //PRODUCT
569 nmethod* scavenge_root_link() const { return _scavenge_root_link; }
570 void set_scavenge_root_link(nmethod *n) { _scavenge_root_link = n; }
571
572 public:
573
574 // Sweeper support
575 long stack_traversal_mark() { return _stack_traversal_mark; }
576 void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
577
578 // Exception cache support
579 ExceptionCache* exception_cache() const { return _exception_cache; }
580 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
581 void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); }
582 address handler_for_exception_and_pc(Handle exception, address pc);
583 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
584 void clean_exception_cache(BoolObjectClosure* is_alive);
585
586 // implicit exceptions support
587 address continuation_for_implicit_exception(address pc);
588
589 // On-stack replacement support
590 int osr_entry_bci() const { assert(is_osr_method(), "wrong kind of nmethod"); return _entry_bci; }
591 address osr_entry() const { assert(is_osr_method(), "wrong kind of nmethod"); return _osr_entry_point; }
592 void invalidate_osr_method();
593 nmethod* osr_link() const { return _osr_link; }
594 void set_osr_link(nmethod *n) { _osr_link = n; }
595
596 // tells whether frames described by this nmethod can be deoptimized
597 // note: native wrappers cannot be deoptimized.
598 bool can_be_deoptimized() const { return is_java_method(); }
599
600 // Inline cache support
601 void clear_inline_caches();
|