1 /*
  2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/compiledIC.hpp"
 27 #include "code/compiledMethod.inline.hpp"
 28 #include "code/scopeDesc.hpp"
 29 #include "code/codeCache.hpp"
 30 #include "interpreter/bytecode.inline.hpp"
 31 #include "logging/log.hpp"
 32 #include "logging/logTag.hpp"
 33 #include "memory/resourceArea.hpp"
 34 #include "oops/methodData.hpp"
 35 #include "oops/method.inline.hpp"
 36 #include "prims/methodHandles.hpp"
 37 #include "runtime/handles.inline.hpp"
 38 #include "runtime/mutexLocker.hpp"
 39 
 40 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
 41   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 42   _mark_for_deoptimization_status(not_marked), _method(method) {
 43   init_defaults();
 44 }
 45 
 46 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
 47   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 48   _mark_for_deoptimization_status(not_marked), _method(method) {
 49   init_defaults();
 50 }
 51 
 52 void CompiledMethod::init_defaults() {
 53   _has_unsafe_access          = 0;
 54   _has_method_handle_invokes  = 0;
 55   _lazy_critical_native       = 0;
 56   _has_wide_vectors           = 0;
 57   _unloading_clock            = 0;
 58 }
 59 
 60 bool CompiledMethod::is_method_handle_return(address return_pc) {
 61   if (!has_method_handle_invokes())  return false;
 62   PcDesc* pd = pc_desc_at(return_pc);
 63   if (pd == NULL)
 64     return false;
 65   return pd->is_method_handle_invoke();
 66 }
 67 
 68 // Returns a string version of the method state.
 69 const char* CompiledMethod::state() const {
 70   int state = get_state();
 71   switch (state) {
 72   case not_installed:
 73     return "not installed";
 74   case in_use:
 75     return "in use";
 76   case not_used:
 77     return "not_used";
 78   case not_entrant:
 79     return "not_entrant";
 80   case zombie:
 81     return "zombie";
 82   case unloaded:
 83     return "unloaded";
 84   default:
 85     fatal("unexpected method state: %d", state);
 86     return NULL;
 87   }
 88 }
 89 
 90 //-----------------------------------------------------------------------------
 91 
 92 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
 93   return OrderAccess::load_acquire(&_exception_cache);
 94 }
 95 
 96 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
 97   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
 98   assert(new_entry != NULL,"Must be non null");
 99   assert(new_entry->next() == NULL, "Must be null");
100 
101   for (;;) {
102     ExceptionCache *ec = exception_cache();
103     if (ec != NULL) {
104       Klass* ex_klass = ec->exception_type();
105       if (!ex_klass->is_loader_alive()) {
106         // We must guarantee that entries are not inserted with new next pointer
107         // edges to ExceptionCache entries with dead klasses, due to bad interactions
108         // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
109         // the head pointer forward to the first live ExceptionCache, so that the new
110         // next pointers always point at live ExceptionCaches, that are not removed due
111         // to concurrent ExceptionCache cleanup.
112         ExceptionCache* next = ec->next();
113         if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
114           CodeCache::release_exception_cache(ec);
115         }
116         continue;
117       }
118       ec = exception_cache();
119       if (ec != NULL) {
120         new_entry->set_next(ec);
121       }
122     }
123     if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
124       return;
125     }
126   }
127 }
128 
129 void CompiledMethod::clean_exception_cache() {
130   // For each nmethod, only a single thread may call this cleanup function
131   // at the same time, whether called in STW cleanup or concurrent cleanup.
132   // Note that if the GC is processing exception cache cleaning in a concurrent phase,
133   // then a single writer may contend with cleaning up the head pointer to the
134   // first ExceptionCache node that has a Klass* that is alive. That is fine,
135   // as long as there is no concurrent cleanup of next pointers from concurrent writers.
136   // And the concurrent writers do not clean up next pointers, only the head.
137   // Also note that concurent readers will walk through Klass* pointers that are not
138   // alive. That does not cause ABA problems, because Klass* is deleted after
139   // a handshake with all threads, after all stale ExceptionCaches have been
140   // unlinked. That is also when the CodeCache::exception_cache_purge_list()
141   // is deleted, with all ExceptionCache entries that were cleaned concurrently.
142   // That similarly implies that CAS operations on ExceptionCache entries do not
143   // suffer from ABA problems as unlinking and deletion is separated by a global
144   // handshake operation.
145   ExceptionCache* prev = NULL;
146   ExceptionCache* curr = exception_cache_acquire();
147 
148   while (curr != NULL) {
149     ExceptionCache* next = curr->next();
150 
151     if (!curr->exception_type()->is_loader_alive()) {
152       if (prev == NULL) {
153         // Try to clean head; this is contended by concurrent inserts, that
154         // both lazily clean the head, and insert entries at the head. If
155         // the CAS fails, the operation is restarted.
156         if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
157           prev = NULL;
158           curr = exception_cache_acquire();
159           continue;
160         }
161       } else {
162         // It is impossible to during cleanup connect the next pointer to
163         // an ExceptionCache that has not been published before a safepoint
164         // prior to the cleanup. Therefore, release is not required.
165         prev->set_next(next);
166       }
167       // prev stays the same.
168 
169       CodeCache::release_exception_cache(curr);
170     } else {
171       prev = curr;
172     }
173 
174     curr = next;
175   }
176 }
177 
178 // public method for accessing the exception cache
179 // These are the public access methods.
180 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
181   // We never grab a lock to read the exception cache, so we may
182   // have false negatives. This is okay, as it can only happen during
183   // the first few exception lookups for a given nmethod.
184   ExceptionCache* ec = exception_cache_acquire();
185   while (ec != NULL) {
186     address ret_val;
187     if ((ret_val = ec->match(exception,pc)) != NULL) {
188       return ret_val;
189     }
190     ec = ec->next();
191   }
192   return NULL;
193 }
194 
195 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
196   // There are potential race conditions during exception cache updates, so we
197   // must own the ExceptionCache_lock before doing ANY modifications. Because
198   // we don't lock during reads, it is possible to have several threads attempt
199   // to update the cache with the same data. We need to check for already inserted
200   // copies of the current data before adding it.
201 
202   MutexLocker ml(ExceptionCache_lock);
203   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
204 
205   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
206     target_entry = new ExceptionCache(exception,pc,handler);
207     add_exception_cache_entry(target_entry);
208   }
209 }
210 
211 // private method for handling exception cache
212 // These methods are private, and used to manipulate the exception cache
213 // directly.
214 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
215   ExceptionCache* ec = exception_cache_acquire();
216   while (ec != NULL) {
217     if (ec->match_exception_with_space(exception)) {
218       return ec;
219     }
220     ec = ec->next();
221   }
222   return NULL;
223 }
224 
225 //-------------end of code for ExceptionCache--------------
226 
227 bool CompiledMethod::is_at_poll_return(address pc) {
228   RelocIterator iter(this, pc, pc+1);
229   while (iter.next()) {
230     if (iter.type() == relocInfo::poll_return_type)
231       return true;
232   }
233   return false;
234 }
235 
236 
237 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
238   RelocIterator iter(this, pc, pc+1);
239   while (iter.next()) {
240     relocInfo::relocType t = iter.type();
241     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
242       return true;
243   }
244   return false;
245 }
246 
247 void CompiledMethod::verify_oop_relocations() {
248   // Ensure sure that the code matches the current oop values
249   RelocIterator iter(this, NULL, NULL);
250   while (iter.next()) {
251     if (iter.type() == relocInfo::oop_type) {
252       oop_Relocation* reloc = iter.oop_reloc();
253       if (!reloc->oop_is_immediate()) {
254         reloc->verify_oop_relocation();
255       }
256     }
257   }
258 }
259 
260 
261 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
262   PcDesc* pd = pc_desc_at(pc);
263   guarantee(pd != NULL, "scope must be present");
264   return new ScopeDesc(this, pd->scope_decode_offset(),
265                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
266                        pd->return_oop());
267 }
268 
269 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
270   PcDesc* pd = pc_desc_near(pc);
271   guarantee(pd != NULL, "scope must be present");
272   return new ScopeDesc(this, pd->scope_decode_offset(),
273                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
274                        pd->return_oop());
275 }
276 
277 address CompiledMethod::oops_reloc_begin() const {
278   // If the method is not entrant or zombie then a JMP is plastered over the
279   // first few bytes.  If an oop in the old code was there, that oop
280   // should not get GC'd.  Skip the first few bytes of oops on
281   // not-entrant methods.
282   address low_boundary = verified_entry_point();
283   if (!is_in_use() && is_nmethod()) {
284     low_boundary += NativeJump::instruction_size;
285     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
286     // This means that the low_boundary is going to be a little too high.
287     // This shouldn't matter, since oops of non-entrant methods are never used.
288     // In fact, why are we bothering to look at oops in a non-entrant method??
289   }
290   return low_boundary;
291 }
292 
293 int CompiledMethod::verify_icholder_relocations() {
294   ResourceMark rm;
295   int count = 0;
296 
297   RelocIterator iter(this);
298   while(iter.next()) {
299     if (iter.type() == relocInfo::virtual_call_type) {
300       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
301         CompiledIC *ic = CompiledIC_at(&iter);
302         if (TraceCompiledIC) {
303           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
304           ic->print();
305         }
306         assert(ic->cached_icholder() != NULL, "must be non-NULL");
307         count++;
308       }
309     }
310   }
311 
312   return count;
313 }
314 
315 // Method that knows how to preserve outgoing arguments at call. This method must be
316 // called with a frame corresponding to a Java invoke
317 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
318   if (method() != NULL && !method()->is_native()) {
319     address pc = fr.pc();
320     SimpleScopeDesc ssd(this, pc);
321     Bytecode_invoke call(ssd.method(), ssd.bci());
322     bool has_receiver = call.has_receiver();
323     bool has_appendix = call.has_appendix();
324     Symbol* signature = call.signature();
325 
326     // The method attached by JIT-compilers should be used, if present.
327     // Bytecode can be inaccurate in such case.
328     Method* callee = attached_method_before_pc(pc);
329     if (callee != NULL) {
330       has_receiver = !(callee->access_flags().is_static());
331       has_appendix = false;
332       signature = callee->signature();
333     }
334 
335     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
336   }
337 }
338 
339 Method* CompiledMethod::attached_method(address call_instr) {
340   assert(code_contains(call_instr), "not part of the nmethod");
341   RelocIterator iter(this, call_instr, call_instr + 1);
342   while (iter.next()) {
343     if (iter.addr() == call_instr) {
344       switch(iter.type()) {
345         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
346         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
347         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
348         default:                               break;
349       }
350     }
351   }
352   return NULL; // not found
353 }
354 
355 Method* CompiledMethod::attached_method_before_pc(address pc) {
356   if (NativeCall::is_call_before(pc)) {
357     NativeCall* ncall = nativeCall_before(pc);
358     return attached_method(ncall->instruction_address());
359   }
360   return NULL; // not a call
361 }
362 
363 void CompiledMethod::clear_inline_caches() {
364   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
365   if (is_zombie()) {
366     return;
367   }
368 
369   RelocIterator iter(this);
370   while (iter.next()) {
371     iter.reloc()->clear_inline_cache();
372   }
373 }
374 
375 // Clear ICStubs of all compiled ICs
376 void CompiledMethod::clear_ic_stubs() {
377   assert_locked_or_safepoint(CompiledIC_lock);
378   ResourceMark rm;
379   RelocIterator iter(this);
380   while(iter.next()) {
381     if (iter.type() == relocInfo::virtual_call_type) {
382       CompiledIC* ic = CompiledIC_at(&iter);
383       ic->clear_ic_stub();
384     }
385   }
386 }
387 
388 #ifdef ASSERT
389 // Check class_loader is alive for this bit of metadata.
390 static void check_class(Metadata* md) {
391    Klass* klass = NULL;
392    if (md->is_klass()) {
393      klass = ((Klass*)md);
394    } else if (md->is_method()) {
395      klass = ((Method*)md)->method_holder();
396    } else if (md->is_methodData()) {
397      klass = ((MethodData*)md)->method()->method_holder();
398    } else {
399      md->print();
400      ShouldNotReachHere();
401    }
402    assert(klass->is_loader_alive(), "must be alive");
403 }
404 #endif // ASSERT
405 
406 
407 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
408   if (ic->is_icholder_call()) {
409     // The only exception is compiledICHolder metdata which may
410     // yet be marked below. (We check this further below).
411     CompiledICHolder* cichk_metdata = ic->cached_icholder();
412 
413     if (cichk_metdata->is_loader_alive()) {
414       return;
415     }
416   } else {
417     Metadata* ic_metdata = ic->cached_metadata();
418     if (ic_metdata != NULL) {
419       if (ic_metdata->is_klass()) {
420         if (((Klass*)ic_metdata)->is_loader_alive()) {
421           return;
422         }
423       } else if (ic_metdata->is_method()) {
424         Method* method = (Method*)ic_metdata;
425         assert(!method->is_old(), "old method should have been cleaned");
426         if (method->method_holder()->is_loader_alive()) {
427           return;
428         }
429       } else {
430         ShouldNotReachHere();
431       }
432     }
433   }
434 
435   ic->set_to_clean();
436 }
437 
438 unsigned char CompiledMethod::_global_unloading_clock = 0;
439 
440 void CompiledMethod::increase_unloading_clock() {
441   _global_unloading_clock++;
442   if (_global_unloading_clock == 0) {
443     // _nmethods are allocated with _unloading_clock == 0,
444     // so 0 is never used as a clock value.
445     _global_unloading_clock = 1;
446   }
447 }
448 
449 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
450   OrderAccess::release_store(&_unloading_clock, unloading_clock);
451 }
452 
453 unsigned char CompiledMethod::unloading_clock() {
454   return OrderAccess::load_acquire(&_unloading_clock);
455 }
456 
457 
458 // static_stub_Relocations may have dangling references to
459 // nmethods so trim them out here.  Otherwise it looks like
460 // compiled code is maintaining a link to dead metadata.
461 void CompiledMethod::clean_ic_stubs() {
462 #ifdef ASSERT
463   address low_boundary = oops_reloc_begin();
464   RelocIterator iter(this, low_boundary);
465   while (iter.next()) {
466     address static_call_addr = NULL;
467     if (iter.type() == relocInfo::opt_virtual_call_type) {
468       CompiledIC* cic = CompiledIC_at(&iter);
469       if (!cic->is_call_to_interpreted()) {
470         static_call_addr = iter.addr();
471       }
472     } else if (iter.type() == relocInfo::static_call_type) {
473       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
474       if (!csc->is_call_to_interpreted()) {
475         static_call_addr = iter.addr();
476       }
477     }
478     if (static_call_addr != NULL) {
479       RelocIterator sciter(this, low_boundary);
480       while (sciter.next()) {
481         if (sciter.type() == relocInfo::static_stub_type &&
482             sciter.static_stub_reloc()->static_call() == static_call_addr) {
483           sciter.static_stub_reloc()->clear_inline_cache();
484         }
485       }
486     }
487   }
488 #endif
489 }
490 
491 // This is called at the end of the strong tracing/marking phase of a
492 // GC to unload an nmethod if it contains otherwise unreachable
493 // oops.
494 
495 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive) {
496   // Make sure the oop's ready to receive visitors
497   assert(!is_zombie() && !is_unloaded(),
498          "should not call follow on zombie or unloaded nmethod");
499 
500   address low_boundary = oops_reloc_begin();
501 
502   if (do_unloading_oops(low_boundary, is_alive)) {
503     return;
504   }
505 
506 #if INCLUDE_JVMCI
507   if (do_unloading_jvmci()) {
508     return;
509   }
510 #endif
511 
512   // Cleanup exception cache and inline caches happens
513   // after all the unloaded methods are found.
514 }
515 
516 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
517 template <class CompiledICorStaticCall>
518 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
519                                          bool parallel, bool clean_all) {
520   // Ok, to lookup references to zombies here
521   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
522   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
523   if (nm != NULL) {
524     if (parallel && nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
525       // The nmethod has not been processed yet.
526       return true;
527     }
528 
529     // Clean inline caches pointing to both zombie and not_entrant methods
530     if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) {
531       ic->set_to_clean(from->is_alive());
532       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
533     }
534   }
535 
536   return false;
537 }
538 
539 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
540                                          bool parallel, bool clean_all = false) {
541   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, parallel, clean_all);
542 }
543 
544 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
545                                          bool parallel, bool clean_all = false) {
546   return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, parallel, clean_all);
547 }
548 
549 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
550   ResourceMark rm;
551 
552   // Make sure the oop's ready to receive visitors
553   assert(!is_zombie() && !is_unloaded(),
554          "should not call follow on zombie or unloaded nmethod");
555 
556   address low_boundary = oops_reloc_begin();
557 
558   if (do_unloading_oops(low_boundary, is_alive)) {
559     return false;
560   }
561 
562 #if INCLUDE_JVMCI
563   if (do_unloading_jvmci()) {
564     return false;
565   }
566 #endif
567 
568   return unload_nmethod_caches(/*parallel*/true, unloading_occurred);
569 }
570 
571 // Cleans caches in nmethods that point to either classes that are unloaded
572 // or nmethods that are unloaded.
573 //
574 // Can be called either in parallel by G1 currently or after all
575 // nmethods are unloaded.  Return postponed=true in the parallel case for
576 // inline caches found that point to nmethods that are not yet visited during
577 // the do_unloading walk.
578 bool CompiledMethod::unload_nmethod_caches(bool parallel, bool unloading_occurred) {
579 
580   // Exception cache only needs to be called if unloading occurred
581   if (unloading_occurred) {
582     clean_exception_cache();
583   }
584 
585   bool postponed = cleanup_inline_caches_impl(parallel, unloading_occurred, /*clean_all*/false);
586 
587   // All static stubs need to be cleaned.
588   clean_ic_stubs();
589 
590   // Check that the metadata embedded in the nmethod is alive
591   DEBUG_ONLY(metadata_do(check_class));
592 
593   return postponed;
594 }
595 
596 // Called to clean up after class unloading for live nmethods and from the sweeper
597 // for all methods.
598 bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) {
599   assert_locked_or_safepoint(CompiledIC_lock);
600   bool postponed = false;
601   ResourceMark rm;
602 
603   // Find all calls in an nmethod and clear the ones that point to non-entrant,
604   // zombie and unloaded nmethods.
605   RelocIterator iter(this, oops_reloc_begin());
606   while(iter.next()) {
607 
608     switch (iter.type()) {
609 
610     case relocInfo::virtual_call_type:
611       if (unloading_occurred) {
612         // If class unloading occurred we first clear ICs where the cached metadata
613         // is referring to an unloaded klass or method.
614         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
615       }
616 
617       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
618       break;
619 
620     case relocInfo::opt_virtual_call_type:
621       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
622       break;
623 
624     case relocInfo::static_call_type:
625       postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, parallel, clean_all);
626       break;
627 
628     case relocInfo::oop_type:
629       // handled by do_unloading_oops already
630       break;
631 
632     case relocInfo::metadata_type:
633       break; // nothing to do.
634 
635     default:
636       break;
637     }
638   }
639 
640   return postponed;
641 }
642 
643 void CompiledMethod::do_unloading_parallel_postponed() {
644   ResourceMark rm;
645 
646   // Make sure the oop's ready to receive visitors
647   assert(!is_zombie(),
648          "should not call follow on zombie nmethod");
649 
650   RelocIterator iter(this, oops_reloc_begin());
651   while(iter.next()) {
652 
653     switch (iter.type()) {
654 
655     case relocInfo::virtual_call_type:
656       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
657       break;
658 
659     case relocInfo::opt_virtual_call_type:
660       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
661       break;
662 
663     case relocInfo::static_call_type:
664       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, true);
665       break;
666 
667     default:
668       break;
669     }
670   }
671 }
672 
673 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
674 // to not be inherently safe. There is a chance that fields are seen which are not properly
675 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
676 // to be held.
677 // To bundle knowledge about necessary checks in one place, this function was introduced.
678 // It is not claimed that these checks are sufficient, but they were found to be necessary.
679 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) {
680   Method* method = (nm == NULL) ? NULL : nm->method();  // nm->method() may be uninitialized, i.e. != NULL, but invalid
681   return (nm != NULL) && (method != NULL) && (method->signature() != NULL) &&
682          !nm->is_zombie() && !nm->is_not_installed() &&
683          os::is_readable_pointer(method) &&
684          os::is_readable_pointer(method->constants()) &&
685          os::is_readable_pointer(method->signature());
686 }