1085 RelocIterator iter(this, begin, end);
1086 while (iter.next()) {
1087 if (iter.type() == relocInfo::oop_type) {
1088 oop_Relocation* reloc = iter.oop_reloc();
1089 if (initialize_immediates && reloc->oop_is_immediate()) {
1090 oop* dest = reloc->oop_addr();
1091 initialize_immediate_oop(dest, (jobject) *dest);
1092 }
1093 // Refresh the oop-related bits of this instruction.
1094 reloc->fix_oop_relocation();
1095 }
1096
1097 // There must not be any interfering patches or breakpoints.
1098 assert(!(iter.type() == relocInfo::breakpoint_type
1099 && iter.breakpoint_reloc()->active()),
1100 "no active breakpoint");
1101 }
1102 }
1103
1104
1105 ScopeDesc* nmethod::scope_desc_at(address pc) {
1106 PcDesc* pd = pc_desc_at(pc);
1107 guarantee(pd != NULL, "scope must be present");
1108 return new ScopeDesc(this, pd->scope_decode_offset(),
1109 pd->obj_decode_offset(), pd->should_reexecute(),
1110 pd->return_oop());
1111 }
1112
1113
1114 void nmethod::clear_inline_caches() {
1115 assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
1116 if (is_zombie()) {
1117 return;
1118 }
1119
1120 RelocIterator iter(this);
1121 while (iter.next()) {
1122 iter.reloc()->clear_inline_cache();
1123 }
1124 }
1805 // On fall through, another racing thread marked this nmethod before we did.
1806 return true;
1807 }
1808
1809 void nmethod::oops_do_marking_prologue() {
1810 NOT_PRODUCT(if (TraceScavenge) tty->print_cr("[oops_do_marking_prologue"));
1811 assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
1812 // We use cmpxchg_ptr instead of regular assignment here because the user
1813 // may fork a bunch of threads, and we need them all to see the same state.
1814 void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
1815 guarantee(observed == NULL, "no races in this sequential code");
1816 }
1817
1818 void nmethod::oops_do_marking_epilogue() {
1819 assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
1820 nmethod* cur = _oops_do_mark_nmethods;
1821 while (cur != NMETHOD_SENTINEL) {
1822 assert(cur != NULL, "not NULL-terminated");
1823 nmethod* next = cur->_oops_do_mark_link;
1824 cur->_oops_do_mark_link = NULL;
1825 NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark\n"));
1826 cur = next;
1827 }
1828 void* required = _oops_do_mark_nmethods;
1829 void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
1830 guarantee(observed == required, "no races in this sequential code");
1831 NOT_PRODUCT(if (TraceScavenge) tty->print_cr("oops_do_marking_epilogue]"));
1832 }
1833
1834 class DetectScavengeRoot: public OopClosure {
1835 bool _detected_scavenge_root;
1836 public:
1837 DetectScavengeRoot() : _detected_scavenge_root(false)
1838 { NOT_PRODUCT(_print_nm = NULL); }
1839 bool detected_scavenge_root() { return _detected_scavenge_root; }
1840 virtual void do_oop(oop* p) {
1841 if ((*p) != NULL && (*p)->is_scavengable()) {
1842 NOT_PRODUCT(maybe_print(p));
1843 _detected_scavenge_root = true;
1844 }
|
1085 RelocIterator iter(this, begin, end);
1086 while (iter.next()) {
1087 if (iter.type() == relocInfo::oop_type) {
1088 oop_Relocation* reloc = iter.oop_reloc();
1089 if (initialize_immediates && reloc->oop_is_immediate()) {
1090 oop* dest = reloc->oop_addr();
1091 initialize_immediate_oop(dest, (jobject) *dest);
1092 }
1093 // Refresh the oop-related bits of this instruction.
1094 reloc->fix_oop_relocation();
1095 }
1096
1097 // There must not be any interfering patches or breakpoints.
1098 assert(!(iter.type() == relocInfo::breakpoint_type
1099 && iter.breakpoint_reloc()->active()),
1100 "no active breakpoint");
1101 }
1102 }
1103
1104
1105 void nmethod::verify_oop_relocations() {
1106 // Ensure sure that the code matches the current oop values
1107 RelocIterator iter(this, NULL, NULL);
1108 while (iter.next()) {
1109 if (iter.type() == relocInfo::oop_type) {
1110 oop_Relocation* reloc = iter.oop_reloc();
1111 if (!reloc->oop_is_immediate()) {
1112 reloc->verify_oop_relocation();
1113 }
1114 }
1115 }
1116 }
1117
1118
1119 ScopeDesc* nmethod::scope_desc_at(address pc) {
1120 PcDesc* pd = pc_desc_at(pc);
1121 guarantee(pd != NULL, "scope must be present");
1122 return new ScopeDesc(this, pd->scope_decode_offset(),
1123 pd->obj_decode_offset(), pd->should_reexecute(),
1124 pd->return_oop());
1125 }
1126
1127
1128 void nmethod::clear_inline_caches() {
1129 assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
1130 if (is_zombie()) {
1131 return;
1132 }
1133
1134 RelocIterator iter(this);
1135 while (iter.next()) {
1136 iter.reloc()->clear_inline_cache();
1137 }
1138 }
1819 // On fall through, another racing thread marked this nmethod before we did.
1820 return true;
1821 }
1822
1823 void nmethod::oops_do_marking_prologue() {
1824 NOT_PRODUCT(if (TraceScavenge) tty->print_cr("[oops_do_marking_prologue"));
1825 assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
1826 // We use cmpxchg_ptr instead of regular assignment here because the user
1827 // may fork a bunch of threads, and we need them all to see the same state.
1828 void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
1829 guarantee(observed == NULL, "no races in this sequential code");
1830 }
1831
1832 void nmethod::oops_do_marking_epilogue() {
1833 assert(_oops_do_mark_nmethods != NULL, "must not call oops_do_marking_epilogue twice in a row");
1834 nmethod* cur = _oops_do_mark_nmethods;
1835 while (cur != NMETHOD_SENTINEL) {
1836 assert(cur != NULL, "not NULL-terminated");
1837 nmethod* next = cur->_oops_do_mark_link;
1838 cur->_oops_do_mark_link = NULL;
1839 cur->fix_oop_relocations();
1840 NOT_PRODUCT(if (TraceScavenge) cur->print_on(tty, "oops_do, unmark\n"));
1841 cur = next;
1842 }
1843 void* required = _oops_do_mark_nmethods;
1844 void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
1845 guarantee(observed == required, "no races in this sequential code");
1846 NOT_PRODUCT(if (TraceScavenge) tty->print_cr("oops_do_marking_epilogue]"));
1847 }
1848
1849 class DetectScavengeRoot: public OopClosure {
1850 bool _detected_scavenge_root;
1851 public:
1852 DetectScavengeRoot() : _detected_scavenge_root(false)
1853 { NOT_PRODUCT(_print_nm = NULL); }
1854 bool detected_scavenge_root() { return _detected_scavenge_root; }
1855 virtual void do_oop(oop* p) {
1856 if ((*p) != NULL && (*p)->is_scavengable()) {
1857 NOT_PRODUCT(maybe_print(p));
1858 _detected_scavenge_root = true;
1859 }
|