1 /*
  2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/compiledIC.hpp"
 27 #include "code/compiledMethod.inline.hpp"
 28 #include "code/scopeDesc.hpp"
 29 #include "code/codeCache.hpp"
 30 #include "gc/shared/barrierSet.hpp"
 31 #include "gc/shared/gcBehaviours.hpp"
 32 #include "interpreter/bytecode.inline.hpp"
 33 #include "logging/log.hpp"
 34 #include "logging/logTag.hpp"
 35 #include "memory/resourceArea.hpp"
 36 #include "oops/methodData.hpp"
 37 #include "oops/method.inline.hpp"
 38 #include "prims/methodHandles.hpp"
 39 #include "runtime/handles.inline.hpp"
 40 #include "runtime/mutexLocker.hpp"
 41 
 42 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
 43                                int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
 44                                bool caller_must_gc_arguments)
 45   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 46     _mark_for_deoptimization_status(not_marked),
 47     _is_unloading_state(0),
 48     _method(method)
 49 {
 50   init_defaults();
 51   clear_unloading_state();
 52 }
 53 
 54 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,
 55                                int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,
 56                                OopMapSet* oop_maps, bool caller_must_gc_arguments)
 57   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
 58              frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 59     _mark_for_deoptimization_status(not_marked),
 60     _is_unloading_state(0),
 61     _method(method)
 62 {
 63   init_defaults();
 64   clear_unloading_state();
 65 }
 66 
 67 void CompiledMethod::init_defaults() {
 68   _has_unsafe_access          = 0;
 69   _has_method_handle_invokes  = 0;
 70   _lazy_critical_native       = 0;
 71   _has_wide_vectors           = 0;
 72 }
 73 
 74 bool CompiledMethod::is_method_handle_return(address return_pc) {
 75   if (!has_method_handle_invokes())  return false;
 76   PcDesc* pd = pc_desc_at(return_pc);
 77   if (pd == NULL)
 78     return false;
 79   return pd->is_method_handle_invoke();
 80 }
 81 
 82 // Returns a string version of the method state.
 83 const char* CompiledMethod::state() const {
 84   int state = get_state();
 85   switch (state) {
 86   case not_installed:
 87     return "not installed";
 88   case in_use:
 89     return "in use";
 90   case not_used:
 91     return "not_used";
 92   case not_entrant:
 93     return "not_entrant";
 94   case zombie:
 95     return "zombie";
 96   case unloaded:
 97     return "unloaded";
 98   default:
 99     fatal("unexpected method state: %d", state);
100     return NULL;
101   }
102 }
103 
104 //-----------------------------------------------------------------------------
105 
106 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
107   return OrderAccess::load_acquire(&_exception_cache);
108 }
109 
110 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
111   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
112   assert(new_entry != NULL,"Must be non null");
113   assert(new_entry->next() == NULL, "Must be null");
114 
115   for (;;) {
116     ExceptionCache *ec = exception_cache();
117     if (ec != NULL) {
118       Klass* ex_klass = ec->exception_type();
119       if (!ex_klass->is_loader_alive()) {
120         // We must guarantee that entries are not inserted with new next pointer
121         // edges to ExceptionCache entries with dead klasses, due to bad interactions
122         // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
123         // the head pointer forward to the first live ExceptionCache, so that the new
124         // next pointers always point at live ExceptionCaches, that are not removed due
125         // to concurrent ExceptionCache cleanup.
126         ExceptionCache* next = ec->next();
127         if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
128           CodeCache::release_exception_cache(ec);
129         }
130         continue;
131       }
132       ec = exception_cache();
133       if (ec != NULL) {
134         new_entry->set_next(ec);
135       }
136     }
137     if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
138       return;
139     }
140   }
141 }
142 
143 void CompiledMethod::clean_exception_cache() {
144   // For each nmethod, only a single thread may call this cleanup function
145   // at the same time, whether called in STW cleanup or concurrent cleanup.
146   // Note that if the GC is processing exception cache cleaning in a concurrent phase,
147   // then a single writer may contend with cleaning up the head pointer to the
148   // first ExceptionCache node that has a Klass* that is alive. That is fine,
149   // as long as there is no concurrent cleanup of next pointers from concurrent writers.
150   // And the concurrent writers do not clean up next pointers, only the head.
151   // Also note that concurent readers will walk through Klass* pointers that are not
152   // alive. That does not cause ABA problems, because Klass* is deleted after
153   // a handshake with all threads, after all stale ExceptionCaches have been
154   // unlinked. That is also when the CodeCache::exception_cache_purge_list()
155   // is deleted, with all ExceptionCache entries that were cleaned concurrently.
156   // That similarly implies that CAS operations on ExceptionCache entries do not
157   // suffer from ABA problems as unlinking and deletion is separated by a global
158   // handshake operation.
159   ExceptionCache* prev = NULL;
160   ExceptionCache* curr = exception_cache_acquire();
161 
162   while (curr != NULL) {
163     ExceptionCache* next = curr->next();
164 
165     if (!curr->exception_type()->is_loader_alive()) {
166       if (prev == NULL) {
167         // Try to clean head; this is contended by concurrent inserts, that
168         // both lazily clean the head, and insert entries at the head. If
169         // the CAS fails, the operation is restarted.
170         if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
171           prev = NULL;
172           curr = exception_cache_acquire();
173           continue;
174         }
175       } else {
176         // It is impossible to during cleanup connect the next pointer to
177         // an ExceptionCache that has not been published before a safepoint
178         // prior to the cleanup. Therefore, release is not required.
179         prev->set_next(next);
180       }
181       // prev stays the same.
182 
183       CodeCache::release_exception_cache(curr);
184     } else {
185       prev = curr;
186     }
187 
188     curr = next;
189   }
190 }
191 
192 // public method for accessing the exception cache
193 // These are the public access methods.
194 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
195   // We never grab a lock to read the exception cache, so we may
196   // have false negatives. This is okay, as it can only happen during
197   // the first few exception lookups for a given nmethod.
198   ExceptionCache* ec = exception_cache_acquire();
199   while (ec != NULL) {
200     address ret_val;
201     if ((ret_val = ec->match(exception,pc)) != NULL) {
202       return ret_val;
203     }
204     ec = ec->next();
205   }
206   return NULL;
207 }
208 
209 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
210   // There are potential race conditions during exception cache updates, so we
211   // must own the ExceptionCache_lock before doing ANY modifications. Because
212   // we don't lock during reads, it is possible to have several threads attempt
213   // to update the cache with the same data. We need to check for already inserted
214   // copies of the current data before adding it.
215 
216   MutexLocker ml(ExceptionCache_lock);
217   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
218 
219   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
220     target_entry = new ExceptionCache(exception,pc,handler);
221     add_exception_cache_entry(target_entry);
222   }
223 }
224 
225 // private method for handling exception cache
226 // These methods are private, and used to manipulate the exception cache
227 // directly.
228 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
229   ExceptionCache* ec = exception_cache_acquire();
230   while (ec != NULL) {
231     if (ec->match_exception_with_space(exception)) {
232       return ec;
233     }
234     ec = ec->next();
235   }
236   return NULL;
237 }
238 
239 //-------------end of code for ExceptionCache--------------
240 
241 bool CompiledMethod::is_at_poll_return(address pc) {
242   RelocIterator iter(this, pc, pc+1);
243   while (iter.next()) {
244     if (iter.type() == relocInfo::poll_return_type)
245       return true;
246   }
247   return false;
248 }
249 
250 
251 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
252   RelocIterator iter(this, pc, pc+1);
253   while (iter.next()) {
254     relocInfo::relocType t = iter.type();
255     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
256       return true;
257   }
258   return false;
259 }
260 
261 void CompiledMethod::verify_oop_relocations() {
262   // Ensure sure that the code matches the current oop values
263   RelocIterator iter(this, NULL, NULL);
264   while (iter.next()) {
265     if (iter.type() == relocInfo::oop_type) {
266       oop_Relocation* reloc = iter.oop_reloc();
267       if (!reloc->oop_is_immediate()) {
268         reloc->verify_oop_relocation();
269       }
270     }
271   }
272 }
273 
274 
275 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
276   PcDesc* pd = pc_desc_at(pc);
277   guarantee(pd != NULL, "scope must be present");
278   return new ScopeDesc(this, pd->scope_decode_offset(),
279                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
280                        pd->return_oop());
281 }
282 
283 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
284   PcDesc* pd = pc_desc_near(pc);
285   guarantee(pd != NULL, "scope must be present");
286   return new ScopeDesc(this, pd->scope_decode_offset(),
287                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
288                        pd->return_oop());
289 }
290 
291 address CompiledMethod::oops_reloc_begin() const {
292   // If the method is not entrant or zombie then a JMP is plastered over the
293   // first few bytes.  If an oop in the old code was there, that oop
294   // should not get GC'd.  Skip the first few bytes of oops on
295   // not-entrant methods.
296   if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
297       code_begin() + frame_complete_offset() >
298       verified_entry_point() + NativeJump::instruction_size)
299   {
300     // If we have a frame_complete_offset after the native jump, then there
301     // is no point trying to look for oops before that. This is a requirement
302     // for being allowed to scan oops concurrently.
303     return code_begin() + frame_complete_offset();
304   }
305 
306   // It is not safe to read oops concurrently using entry barriers, if their
307   // location depend on whether the nmethod is entrant or not.
308   assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan");
309 
310   address low_boundary = verified_entry_point();
311   if (!is_in_use() && is_nmethod()) {
312     low_boundary += NativeJump::instruction_size;
313     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
314     // This means that the low_boundary is going to be a little too high.
315     // This shouldn't matter, since oops of non-entrant methods are never used.
316     // In fact, why are we bothering to look at oops in a non-entrant method??
317   }
318   return low_boundary;
319 }
320 
321 int CompiledMethod::verify_icholder_relocations() {
322   ResourceMark rm;
323   int count = 0;
324 
325   RelocIterator iter(this);
326   while(iter.next()) {
327     if (iter.type() == relocInfo::virtual_call_type) {
328       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
329         CompiledIC *ic = CompiledIC_at(&iter);
330         if (TraceCompiledIC) {
331           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
332           ic->print();
333         }
334         assert(ic->cached_icholder() != NULL, "must be non-NULL");
335         count++;
336       }
337     }
338   }
339 
340   return count;
341 }
342 
343 // Method that knows how to preserve outgoing arguments at call. This method must be
344 // called with a frame corresponding to a Java invoke
345 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
346   if (method() != NULL && !method()->is_native()) {
347     address pc = fr.pc();
348     SimpleScopeDesc ssd(this, pc);
349     Bytecode_invoke call(ssd.method(), ssd.bci());
350     bool has_receiver = call.has_receiver();
351     bool has_appendix = call.has_appendix();
352     Symbol* signature = call.signature();
353 
354     // The method attached by JIT-compilers should be used, if present.
355     // Bytecode can be inaccurate in such case.
356     Method* callee = attached_method_before_pc(pc);
357     if (callee != NULL) {
358       has_receiver = !(callee->access_flags().is_static());
359       has_appendix = false;
360       signature = callee->signature();
361     }
362 
363     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
364   }
365 }
366 
367 Method* CompiledMethod::attached_method(address call_instr) {
368   assert(code_contains(call_instr), "not part of the nmethod");
369   RelocIterator iter(this, call_instr, call_instr + 1);
370   while (iter.next()) {
371     if (iter.addr() == call_instr) {
372       switch(iter.type()) {
373         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
374         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
375         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
376         default:                               break;
377       }
378     }
379   }
380   return NULL; // not found
381 }
382 
383 Method* CompiledMethod::attached_method_before_pc(address pc) {
384   if (NativeCall::is_call_before(pc)) {
385     NativeCall* ncall = nativeCall_before(pc);
386     return attached_method(ncall->instruction_address());
387   }
388   return NULL; // not a call
389 }
390 
391 void CompiledMethod::clear_inline_caches() {
392   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
393   if (is_zombie()) {
394     return;
395   }
396 
397   RelocIterator iter(this);
398   while (iter.next()) {
399     iter.reloc()->clear_inline_cache();
400   }
401 }
402 
403 // Clear ICStubs of all compiled ICs
404 void CompiledMethod::clear_ic_stubs() {
405   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
406   ResourceMark rm;
407   RelocIterator iter(this);
408   while(iter.next()) {
409     if (iter.type() == relocInfo::virtual_call_type) {
410       CompiledIC* ic = CompiledIC_at(&iter);
411       ic->clear_ic_stub();
412     }
413   }
414 }
415 
416 #ifdef ASSERT
417 // Check class_loader is alive for this bit of metadata.
418 static void check_class(Metadata* md) {
419    Klass* klass = NULL;
420    if (md->is_klass()) {
421      klass = ((Klass*)md);
422    } else if (md->is_method()) {
423      klass = ((Method*)md)->method_holder();
424    } else if (md->is_methodData()) {
425      klass = ((MethodData*)md)->method()->method_holder();
426    } else {
427      md->print();
428      ShouldNotReachHere();
429    }
430    assert(klass->is_loader_alive(), "must be alive");
431 }
432 #endif // ASSERT
433 
434 
435 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
436   if (ic->is_icholder_call()) {
437     // The only exception is compiledICHolder metdata which may
438     // yet be marked below. (We check this further below).
439     CompiledICHolder* cichk_metdata = ic->cached_icholder();
440 
441     if (cichk_metdata->is_loader_alive()) {
442       return;
443     }
444   } else {
445     Metadata* ic_metdata = ic->cached_metadata();
446     if (ic_metdata != NULL) {
447       if (ic_metdata->is_klass()) {
448         if (((Klass*)ic_metdata)->is_loader_alive()) {
449           return;
450         }
451       } else if (ic_metdata->is_method()) {
452         Method* method = (Method*)ic_metdata;
453         assert(!method->is_old(), "old method should have been cleaned");
454         if (method->method_holder()->is_loader_alive()) {
455           return;
456         }
457       } else {
458         ShouldNotReachHere();
459       }
460     }
461   }
462 
463   ic->set_to_clean();
464 }
465 
466 // static_stub_Relocations may have dangling references to
467 // nmethods so trim them out here.  Otherwise it looks like
468 // compiled code is maintaining a link to dead metadata.
469 void CompiledMethod::clean_ic_stubs() {
470 #ifdef ASSERT
471   address low_boundary = oops_reloc_begin();
472   RelocIterator iter(this, low_boundary);
473   while (iter.next()) {
474     address static_call_addr = NULL;
475     if (iter.type() == relocInfo::opt_virtual_call_type) {
476       CompiledIC* cic = CompiledIC_at(&iter);
477       if (!cic->is_call_to_interpreted()) {
478         static_call_addr = iter.addr();
479       }
480     } else if (iter.type() == relocInfo::static_call_type) {
481       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
482       if (!csc->is_call_to_interpreted()) {
483         static_call_addr = iter.addr();
484       }
485     }
486     if (static_call_addr != NULL) {
487       RelocIterator sciter(this, low_boundary);
488       while (sciter.next()) {
489         if (sciter.type() == relocInfo::static_stub_type &&
490             sciter.static_stub_reloc()->static_call() == static_call_addr) {
491           sciter.static_stub_reloc()->clear_inline_cache();
492         }
493       }
494     }
495   }
496 #endif
497 }
498 
499 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
500 template <class CompiledICorStaticCall>
501 static void clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
502                                          bool clean_all) {
503   // Ok, to lookup references to zombies here
504   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
505   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
506   if (nm != NULL) {
507     // Clean inline caches pointing to both zombie and not_entrant methods
508     if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
509       ic->set_to_clean(from->is_alive());
510       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
511     }
512   }
513 }
514 
515 static void clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
516                                          bool clean_all) {
517   clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
518 }
519 
520 static void clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
521                                          bool clean_all) {
522   clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
523 }
524 
525 // Cleans caches in nmethods that point to either classes that are unloaded
526 // or nmethods that are unloaded.
527 //
528 // Can be called either in parallel by G1 currently or after all
529 // nmethods are unloaded.  Return postponed=true in the parallel case for
530 // inline caches found that point to nmethods that are not yet visited during
531 // the do_unloading walk.
532 void CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
533   ResourceMark rm;
534 
535   // Exception cache only needs to be called if unloading occurred
536   if (unloading_occurred) {
537     clean_exception_cache();
538   }
539 
540   cleanup_inline_caches_impl(unloading_occurred, false);
541 
542   // All static stubs need to be cleaned.
543   clean_ic_stubs();
544 
545   // Check that the metadata embedded in the nmethod is alive
546   DEBUG_ONLY(metadata_do(check_class));
547 }
548 
549 // The _is_unloading_state encodes a tuple comprising the unloading cycle
550 // and the result of IsUnloadingBehaviour::is_unloading() fpr that cycle.
551 // This is the bit layout of the _is_unloading_state byte: 00000CCU
552 // CC refers to the cycle, which has 2 bits, and U refers to the result of
553 // IsUnloadingBehaviour::is_unloading() for that unloading cycle.
554 
555 class IsUnloadingState: public AllStatic {
556   static const uint8_t _is_unloading_mask = 1;
557   static const uint8_t _is_unloading_shift = 0;
558   static const uint8_t _unloading_cycle_mask = 6;
559   static const uint8_t _unloading_cycle_shift = 1;
560 
561   static uint8_t set_is_unloading(uint8_t state, bool value) {
562     state &= ~_is_unloading_mask;
563     if (value) {
564       state |= 1 << _is_unloading_shift;
565     }
566     assert(is_unloading(state) == value, "unexpected unloading cycle overflow");
567     return state;
568   }
569 
570   static uint8_t set_unloading_cycle(uint8_t state, uint8_t value) {
571     state &= ~_unloading_cycle_mask;
572     state |= value << _unloading_cycle_shift;
573     assert(unloading_cycle(state) == value, "unexpected unloading cycle overflow");
574     return state;
575   }
576 
577 public:
578   static bool is_unloading(uint8_t state) { return (state & _is_unloading_mask) >> _is_unloading_shift == 1; }
579   static uint8_t unloading_cycle(uint8_t state) { return (state & _unloading_cycle_mask) >> _unloading_cycle_shift; }
580 
581   static uint8_t create(bool is_unloading, uint8_t unloading_cycle) {
582     uint8_t state = 0;
583     state = set_is_unloading(state, is_unloading);
584     state = set_unloading_cycle(state, unloading_cycle);
585     return state;
586   }
587 };
588 
589 bool CompiledMethod::is_unloading() {
590   uint8_t state = RawAccess<MO_RELAXED>::load(&_is_unloading_state);
591   bool state_is_unloading = IsUnloadingState::is_unloading(state);
592   uint8_t state_unloading_cycle = IsUnloadingState::unloading_cycle(state);
593   if (state_is_unloading) {
594     return true;
595   }
596   if (state_unloading_cycle == CodeCache::unloading_cycle()) {
597     return false;
598   }
599 
600   // The IsUnloadingBehaviour is responsible for checking if there are any dead
601   // oops in the CompiledMethod, by calling oops_do on it.
602   state_unloading_cycle = CodeCache::unloading_cycle();
603   state_is_unloading = IsUnloadingBehaviour::current()->is_unloading(this);
604 
605   state = IsUnloadingState::create(state_is_unloading, state_unloading_cycle);
606 
607   RawAccess<MO_RELAXED>::store(&_is_unloading_state, state);
608 
609   return state_is_unloading;
610 }
611 
612 void CompiledMethod::clear_unloading_state() {
613   uint8_t state = IsUnloadingState::create(false, CodeCache::unloading_cycle());
614   RawAccess<MO_RELAXED>::store(&_is_unloading_state, state);
615 }
616 
617 // Called to clean up after class unloading for live nmethods and from the sweeper
618 // for all methods.
619 void CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
620   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
621   ResourceMark rm;
622 
623   // Find all calls in an nmethod and clear the ones that point to non-entrant,
624   // zombie and unloaded nmethods.
625   RelocIterator iter(this, oops_reloc_begin());
626   while(iter.next()) {
627 
628     switch (iter.type()) {
629 
630     case relocInfo::virtual_call_type:
631       if (unloading_occurred) {
632         // If class unloading occurred we first clear ICs where the cached metadata
633         // is referring to an unloaded klass or method.
634         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
635       }
636 
637       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
638       break;
639 
640     case relocInfo::opt_virtual_call_type:
641       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
642       break;
643 
644     case relocInfo::static_call_type:
645       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all);
646       break;
647 
648     case relocInfo::oop_type:
649       break;
650 
651     case relocInfo::metadata_type:
652       break; // nothing to do.
653 
654     default:
655       break;
656     }
657   }
658 }
659 
660 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
661 // to not be inherently safe. There is a chance that fields are seen which are not properly
662 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
663 // to be held.
664 // To bundle knowledge about necessary checks in one place, this function was introduced.
665 // It is not claimed that these checks are sufficient, but they were found to be necessary.
666 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) {
667   Method* method = (nm == NULL) ? NULL : nm->method();  // nm->method() may be uninitialized, i.e. != NULL, but invalid
668   return (nm != NULL) && (method != NULL) && (method->signature() != NULL) &&
669          !nm->is_zombie() && !nm->is_not_installed() &&
670          os::is_readable_pointer(method) &&
671          os::is_readable_pointer(method->constants()) &&
672          os::is_readable_pointer(method->signature());
673 }