116 return Atomic::load_acquire(&_exception_cache);
117 }
118
119 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
120 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
121 assert(new_entry != NULL,"Must be non null");
122 assert(new_entry->next() == NULL, "Must be null");
123
124 for (;;) {
125 ExceptionCache *ec = exception_cache();
126 if (ec != NULL) {
127 Klass* ex_klass = ec->exception_type();
128 if (!ex_klass->is_loader_alive()) {
129 // We must guarantee that entries are not inserted with new next pointer
130 // edges to ExceptionCache entries with dead klasses, due to bad interactions
131 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
132 // the head pointer forward to the first live ExceptionCache, so that the new
133 // next pointers always point at live ExceptionCaches, that are not removed due
134 // to concurrent ExceptionCache cleanup.
135 ExceptionCache* next = ec->next();
136 if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
137 CodeCache::release_exception_cache(ec);
138 }
139 continue;
140 }
141 ec = exception_cache();
142 if (ec != NULL) {
143 new_entry->set_next(ec);
144 }
145 }
146 if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
147 return;
148 }
149 }
150 }
151
152 void CompiledMethod::clean_exception_cache() {
153 // For each nmethod, only a single thread may call this cleanup function
154 // at the same time, whether called in STW cleanup or concurrent cleanup.
155 // Note that if the GC is processing exception cache cleaning in a concurrent phase,
156 // then a single writer may contend with cleaning up the head pointer to the
157 // first ExceptionCache node that has a Klass* that is alive. That is fine,
158 // as long as there is no concurrent cleanup of next pointers from concurrent writers.
159 // And the concurrent writers do not clean up next pointers, only the head.
160 // Also note that concurent readers will walk through Klass* pointers that are not
161 // alive. That does not cause ABA problems, because Klass* is deleted after
162 // a handshake with all threads, after all stale ExceptionCaches have been
163 // unlinked. That is also when the CodeCache::exception_cache_purge_list()
164 // is deleted, with all ExceptionCache entries that were cleaned concurrently.
165 // That similarly implies that CAS operations on ExceptionCache entries do not
166 // suffer from ABA problems as unlinking and deletion is separated by a global
167 // handshake operation.
168 ExceptionCache* prev = NULL;
169 ExceptionCache* curr = exception_cache_acquire();
170
171 while (curr != NULL) {
172 ExceptionCache* next = curr->next();
173
174 if (!curr->exception_type()->is_loader_alive()) {
175 if (prev == NULL) {
176 // Try to clean head; this is contended by concurrent inserts, that
177 // both lazily clean the head, and insert entries at the head. If
178 // the CAS fails, the operation is restarted.
179 if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
180 prev = NULL;
181 curr = exception_cache_acquire();
182 continue;
183 }
184 } else {
185 // It is impossible to during cleanup connect the next pointer to
186 // an ExceptionCache that has not been published before a safepoint
187 // prior to the cleanup. Therefore, release is not required.
188 prev->set_next(next);
189 }
190 // prev stays the same.
191
192 CodeCache::release_exception_cache(curr);
193 } else {
194 prev = curr;
195 }
196
197 curr = next;
198 }
199 }
598 // that violates the invariant that all metadata relocations have an oop
599 // in the compiled method (due to deferred resolution and code patching).
600
601 // This causes dead metadata to remain in compiled methods that are not
602 // unloading. Unless these slippery metadata relocations of the static
603 // stubs are at least cleared, subsequent class redefinition operations
604 // will access potentially free memory, and JavaThread execution
605 // concurrent to class unloading may call c2i adapters with dead methods.
606 if (!is_in_static_stub) {
607 // The first metadata relocation after a static stub relocation is the
608 // metadata relocation of the static stub used to pass the Method* to
609 // c2i adapters.
610 continue;
611 }
612 is_in_static_stub = false;
613 metadata_Relocation* r = iter.metadata_reloc();
614 Metadata* md = r->metadata_value();
615 if (md != NULL && md->is_method()) {
616 Method* method = static_cast<Method*>(md);
617 if (!method->method_holder()->is_loader_alive()) {
618 Atomic::store((Method*)NULL, r->metadata_addr());
619
620 if (!r->metadata_is_immediate()) {
621 r->fix_metadata_relocation();
622 }
623 }
624 }
625 break;
626 }
627
628 default:
629 break;
630 }
631 }
632
633 return true;
634 }
635
636 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
637 // to not be inherently safe. There is a chance that fields are seen which are not properly
638 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
|
116 return Atomic::load_acquire(&_exception_cache);
117 }
118
119 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
120 assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
121 assert(new_entry != NULL,"Must be non null");
122 assert(new_entry->next() == NULL, "Must be null");
123
124 for (;;) {
125 ExceptionCache *ec = exception_cache();
126 if (ec != NULL) {
127 Klass* ex_klass = ec->exception_type();
128 if (!ex_klass->is_loader_alive()) {
129 // We must guarantee that entries are not inserted with new next pointer
130 // edges to ExceptionCache entries with dead klasses, due to bad interactions
131 // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
132 // the head pointer forward to the first live ExceptionCache, so that the new
133 // next pointers always point at live ExceptionCaches, that are not removed due
134 // to concurrent ExceptionCache cleanup.
135 ExceptionCache* next = ec->next();
136 if (Atomic::cmpxchg(&_exception_cache, ec, next) == ec) {
137 CodeCache::release_exception_cache(ec);
138 }
139 continue;
140 }
141 ec = exception_cache();
142 if (ec != NULL) {
143 new_entry->set_next(ec);
144 }
145 }
146 if (Atomic::cmpxchg(&_exception_cache, ec, new_entry) == ec) {
147 return;
148 }
149 }
150 }
151
152 void CompiledMethod::clean_exception_cache() {
153 // For each nmethod, only a single thread may call this cleanup function
154 // at the same time, whether called in STW cleanup or concurrent cleanup.
155 // Note that if the GC is processing exception cache cleaning in a concurrent phase,
156 // then a single writer may contend with cleaning up the head pointer to the
157 // first ExceptionCache node that has a Klass* that is alive. That is fine,
158 // as long as there is no concurrent cleanup of next pointers from concurrent writers.
159 // And the concurrent writers do not clean up next pointers, only the head.
160 // Also note that concurent readers will walk through Klass* pointers that are not
161 // alive. That does not cause ABA problems, because Klass* is deleted after
162 // a handshake with all threads, after all stale ExceptionCaches have been
163 // unlinked. That is also when the CodeCache::exception_cache_purge_list()
164 // is deleted, with all ExceptionCache entries that were cleaned concurrently.
165 // That similarly implies that CAS operations on ExceptionCache entries do not
166 // suffer from ABA problems as unlinking and deletion is separated by a global
167 // handshake operation.
168 ExceptionCache* prev = NULL;
169 ExceptionCache* curr = exception_cache_acquire();
170
171 while (curr != NULL) {
172 ExceptionCache* next = curr->next();
173
174 if (!curr->exception_type()->is_loader_alive()) {
175 if (prev == NULL) {
176 // Try to clean head; this is contended by concurrent inserts, that
177 // both lazily clean the head, and insert entries at the head. If
178 // the CAS fails, the operation is restarted.
179 if (Atomic::cmpxchg(&_exception_cache, curr, next) != curr) {
180 prev = NULL;
181 curr = exception_cache_acquire();
182 continue;
183 }
184 } else {
185 // It is impossible to during cleanup connect the next pointer to
186 // an ExceptionCache that has not been published before a safepoint
187 // prior to the cleanup. Therefore, release is not required.
188 prev->set_next(next);
189 }
190 // prev stays the same.
191
192 CodeCache::release_exception_cache(curr);
193 } else {
194 prev = curr;
195 }
196
197 curr = next;
198 }
199 }
598 // that violates the invariant that all metadata relocations have an oop
599 // in the compiled method (due to deferred resolution and code patching).
600
601 // This causes dead metadata to remain in compiled methods that are not
602 // unloading. Unless these slippery metadata relocations of the static
603 // stubs are at least cleared, subsequent class redefinition operations
604 // will access potentially free memory, and JavaThread execution
605 // concurrent to class unloading may call c2i adapters with dead methods.
606 if (!is_in_static_stub) {
607 // The first metadata relocation after a static stub relocation is the
608 // metadata relocation of the static stub used to pass the Method* to
609 // c2i adapters.
610 continue;
611 }
612 is_in_static_stub = false;
613 metadata_Relocation* r = iter.metadata_reloc();
614 Metadata* md = r->metadata_value();
615 if (md != NULL && md->is_method()) {
616 Method* method = static_cast<Method*>(md);
617 if (!method->method_holder()->is_loader_alive()) {
618 Atomic::store(r->metadata_addr(), (Method*)NULL);
619
620 if (!r->metadata_is_immediate()) {
621 r->fix_metadata_relocation();
622 }
623 }
624 }
625 break;
626 }
627
628 default:
629 break;
630 }
631 }
632
633 return true;
634 }
635
636 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
637 // to not be inherently safe. There is a chance that fields are seen which are not properly
638 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
|