597 void flush();
598
599 public:
600 // When true is returned, it is unsafe to remove this nmethod even if
601 // it is a zombie, since the VM or the ServiceThread might still be
602 // using it.
603 bool is_locked_by_vm() const { return _lock_count >0; }
604
605 // See comment at definition of _last_seen_on_stack
606 void mark_as_seen_on_stack();
607 bool can_not_entrant_be_converted();
608
609 // Evolution support. We make old (discarded) compiled methods point to new Method*s.
610 void set_method(Method* method) { _method = method; }
611
612 // GC support
613 void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
614 // The parallel versions are used by G1.
615 bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
616 void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
617 // Unload a nmethod if the *root object is dead.
618 bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
619
620 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
621 OopClosure* f);
622 void oops_do(OopClosure* f) { oops_do(f, false); }
623 void oops_do(OopClosure* f, bool allow_zombie);
624 bool detect_scavenge_root_oops();
625 void verify_scavenge_root_oops() PRODUCT_RETURN;
626
627 bool test_set_oops_do_mark();
628 static void oops_do_marking_prologue();
629 static void oops_do_marking_epilogue();
630 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
631 bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }
632
633 // ScopeDesc for an instruction
634 ScopeDesc* scope_desc_at(address pc);
635
636 private:
637 ScopeDesc* scope_desc_in(address begin, address end);
638
639 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
|
597 void flush();
598
599 public:
600 // When true is returned, it is unsafe to remove this nmethod even if
601 // it is a zombie, since the VM or the ServiceThread might still be
602 // using it.
603 bool is_locked_by_vm() const { return _lock_count >0; }
604
605 // See comment at definition of _last_seen_on_stack
606 void mark_as_seen_on_stack();
607 bool can_not_entrant_be_converted();
608
609 // Evolution support. We make old (discarded) compiled methods point to new Method*s.
610 void set_method(Method* method) { _method = method; }
611
612 // GC support
613 void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
614 // The parallel versions are used by G1.
615 bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
616 void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
617
618 private:
619 // Unload a nmethod if the *root object is dead.
620 bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
621 bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
622
623 void mark_metadata_on_stack_at(RelocIterator* iter_at_metadata);
624 void mark_metadata_on_stack_non_relocs();
625
626 public:
627 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
628 OopClosure* f);
629 void oops_do(OopClosure* f) { oops_do(f, false); }
630 void oops_do(OopClosure* f, bool allow_zombie);
631 bool detect_scavenge_root_oops();
632 void verify_scavenge_root_oops() PRODUCT_RETURN;
633
634 bool test_set_oops_do_mark();
635 static void oops_do_marking_prologue();
636 static void oops_do_marking_epilogue();
637 static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
638 bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }
639
640 // ScopeDesc for an instruction
641 ScopeDesc* scope_desc_at(address pc);
642
643 private:
644 ScopeDesc* scope_desc_in(address begin, address end);
645
646 address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
|