0 /*
1  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3  *
4  * This code is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 only, as
6  * published by the Free Software Foundation.
7  *
8  * This code is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
11  * version 2 for more details (a copy is included in the LICENSE file that
12  * accompanied this code).
13  *
14  * You should have received a copy of the GNU General Public License version
15  * 2 along with this work; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19  * or visit www.oracle.com if you need additional information or have any
20  * questions.
21  *
22  */
23 
24 #include "precompiled.hpp"
25 #include "code/compiledIC.hpp"
26 #include "code/compiledMethod.inline.hpp"
27 #include "code/scopeDesc.hpp"
28 #include "code/codeCache.hpp"
29 #include "interpreter/bytecode.inline.hpp"
30 #include "logging/log.hpp"
31 #include "logging/logTag.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/method.inline.hpp"
35 #include "prims/methodHandles.hpp"
36 #include "runtime/handles.inline.hpp"
37 #include "runtime/mutexLocker.hpp"
38 
39 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_
40   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
41   _mark_for_deoptimization_status(not_marked), _method(method) {
42   init_defaults();
43 }
44 
45 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int fr
46   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, call
47   _mark_for_deoptimization_status(not_marked), _method(method) {
48   init_defaults();
49 }
50 
51 void CompiledMethod::init_defaults() {
52   _has_unsafe_access          = 0;
53   _has_method_handle_invokes  = 0;
54   _lazy_critical_native       = 0;
55   _has_wide_vectors           = 0;
56   _unloading_clock            = 0;
57 }
58 
59 bool CompiledMethod::is_method_handle_return(address return_pc) {
60   if (!has_method_handle_invokes())  return false;
61   PcDesc* pd = pc_desc_at(return_pc);
62   if (pd == NULL)
63     return false;
64   return pd->is_method_handle_invoke();
65 }
66 
67 // Returns a string version of the method state.
68 const char* CompiledMethod::state() const {
69   int state = get_state();
70   switch (state) {
71   case not_installed:
72     return "not installed";
73   case in_use:
74     return "in use";
75   case not_used:
76     return "not_used";
77   case not_entrant:
78     return "not_entrant";
79   case zombie:
80     return "zombie";
81   case unloaded:
82     return "unloaded";
83   default:
84     fatal("unexpected method state: %d", state);
85     return NULL;
86   }
87 }
88 
89 //-----------------------------------------------------------------------------
90 
91 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
92   return OrderAccess::load_acquire(&_exception_cache);
93 }
94 
95 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
96   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
97   assert(new_entry != NULL,"Must be non null");
98   assert(new_entry->next() == NULL, "Must be null");
99 
100   for (;;) {
101     ExceptionCache *ec = exception_cache();
102     if (ec != NULL) {
103       Klass* ex_klass = ec->exception_type();
104       if (!ex_klass->is_loader_alive()) {
105         // We must guarantee that entries are not inserted with new next pointer
106         // edges to ExceptionCache entries with dead klasses, due to bad interactions
107         // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
108         // the head pointer forward to the first live ExceptionCache, so that the new
109         // next pointers always point at live ExceptionCaches, that are not removed due
110         // to concurrent ExceptionCache cleanup.
111         ExceptionCache* next = ec->next();
112         if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
113           CodeCache::release_exception_cache(ec);
114         }
115         continue;
116       }
117       ec = exception_cache();
118       if (ec != NULL) {
119         new_entry->set_next(ec);
120       }
121     }
122     if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
123       return;
124     }
125   }

126 }
127 
128 void CompiledMethod::clean_exception_cache() {
129   // For each nmethod, only a single thread may call this cleanup function
130   // at the same time, whether called in STW cleanup or concurrent cleanup.
131   // Note that if the GC is processing exception cache cleaning in a concurrent phase,
132   // then a single writer may contend with cleaning up the head pointer to the
133   // first ExceptionCache node that has a Klass* that is alive. That is fine,
134   // as long as there is no concurrent cleanup of next pointers from concurrent writers.
135   // And the concurrent writers do not clean up next pointers, only the head.
136   // Also note that concurent readers will walk through Klass* pointers that are not
137   // alive. That does not cause ABA problems, because Klass* is deleted after
138   // a handshake with all threads, after all stale ExceptionCaches have been
139   // unlinked. That is also when the CodeCache::exception_cache_purge_list()
140   // is deleted, with all ExceptionCache entries that were cleaned concurrently.
141   // That similarly implies that CAS operations on ExceptionCache entries do not
142   // suffer from ABA problems as unlinking and deletion is separated by a global
143   // handshake operation.
144   ExceptionCache* prev = NULL;
145   ExceptionCache* curr = exception_cache_acquire();
146 
147   while (curr != NULL) {
148     ExceptionCache* next = curr->next();
149 
150     if (!curr->exception_type()->is_loader_alive()) {

151       if (prev == NULL) {
152         // Try to clean head; this is contended by concurrent inserts, that
153         // both lazily clean the head, and insert entries at the head. If
154         // the CAS fails, the operation is restarted.
155         if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
156           prev = NULL;
157           curr = exception_cache_acquire();
158           continue;
159         }
160       } else {
161         // It is impossible to during cleanup connect the next pointer to
162         // an ExceptionCache that has not been published before a safepoint
163         // prior to the cleanup. Therefore, release is not required.
164         prev->set_next(next);
165       }

166       // prev stays the same.
167 
168       CodeCache::release_exception_cache(curr);
169     } else {
170       prev = curr;
171     }
172 
173     curr = next;
174   }
175 }
176 
177 // public method for accessing the exception cache
178 // These are the public access methods.
179 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
180   // We never grab a lock to read the exception cache, so we may
181   // have false negatives. This is okay, as it can only happen during
182   // the first few exception lookups for a given nmethod.
183   ExceptionCache* ec = exception_cache_acquire();
184   while (ec != NULL) {
185     address ret_val;
186     if ((ret_val = ec->match(exception,pc)) != NULL) {
187       return ret_val;
188     }
189     ec = ec->next();
190   }
191   return NULL;
192 }
193 
194 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
195   // There are potential race conditions during exception cache updates, so we
196   // must own the ExceptionCache_lock before doing ANY modifications. Because
197   // we don't lock during reads, it is possible to have several threads attempt
198   // to update the cache with the same data. We need to check for already inserted
199   // copies of the current data before adding it.
200 
201   MutexLocker ml(ExceptionCache_lock);
202   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
203 
204   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
205     target_entry = new ExceptionCache(exception,pc,handler);
206     add_exception_cache_entry(target_entry);
207   }
208 }
209 


210 // private method for handling exception cache
211 // These methods are private, and used to manipulate the exception cache
212 // directly.
213 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
214   ExceptionCache* ec = exception_cache_acquire();
215   while (ec != NULL) {
216     if (ec->match_exception_with_space(exception)) {
217       return ec;
218     }
219     ec = ec->next();
220   }
221   return NULL;
222 }
223 
224 //-------------end of code for ExceptionCache--------------
225 
226 bool CompiledMethod::is_at_poll_return(address pc) {
227   RelocIterator iter(this, pc, pc+1);
228   while (iter.next()) {
229     if (iter.type() == relocInfo::poll_return_type)
230       return true;
231   }
232   return false;
233 }
234 
235 
236 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
237   RelocIterator iter(this, pc, pc+1);
238   while (iter.next()) {
239     relocInfo::relocType t = iter.type();
240     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
241       return true;
242   }
243   return false;
244 }
245 
246 void CompiledMethod::verify_oop_relocations() {
247   // Ensure sure that the code matches the current oop values
248   RelocIterator iter(this, NULL, NULL);
249   while (iter.next()) {
250     if (iter.type() == relocInfo::oop_type) {
251       oop_Relocation* reloc = iter.oop_reloc();
252       if (!reloc->oop_is_immediate()) {
253         reloc->verify_oop_relocation();
254       }
255     }
256   }
257 }
258 
259 
260 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
261   PcDesc* pd = pc_desc_at(pc);
262   guarantee(pd != NULL, "scope must be present");
263   return new ScopeDesc(this, pd->scope_decode_offset(),
264                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
265                        pd->return_oop());
266 }
267 
268 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
269   PcDesc* pd = pc_desc_near(pc);
270   guarantee(pd != NULL, "scope must be present");
271   return new ScopeDesc(this, pd->scope_decode_offset(),
272                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
273                        pd->return_oop());
274 }
275 
276 address CompiledMethod::oops_reloc_begin() const {
277   // If the method is not entrant or zombie then a JMP is plastered over the
278   // first few bytes.  If an oop in the old code was there, that oop
279   // should not get GC'd.  Skip the first few bytes of oops on
280   // not-entrant methods.
281   address low_boundary = verified_entry_point();
282   if (!is_in_use() && is_nmethod()) {
283     low_boundary += NativeJump::instruction_size;
284     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
285     // This means that the low_boundary is going to be a little too high.
286     // This shouldn't matter, since oops of non-entrant methods are never used.
287     // In fact, why are we bothering to look at oops in a non-entrant method??
288   }
289   return low_boundary;
290 }
291 
292 int CompiledMethod::verify_icholder_relocations() {
293   ResourceMark rm;
294   int count = 0;
295 
296   RelocIterator iter(this);
297   while(iter.next()) {
298     if (iter.type() == relocInfo::virtual_call_type) {
299       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
300         CompiledIC *ic = CompiledIC_at(&iter);
301         if (TraceCompiledIC) {
302           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
303           ic->print();
304         }
305         assert(ic->cached_icholder() != NULL, "must be non-NULL");
306         count++;
307       }
308     }
309   }
310 
311   return count;
312 }
313 
314 // Method that knows how to preserve outgoing arguments at call. This method must be
315 // called with a frame corresponding to a Java invoke
316 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
317   if (method() != NULL && !method()->is_native()) {
318     address pc = fr.pc();
319     SimpleScopeDesc ssd(this, pc);
320     Bytecode_invoke call(ssd.method(), ssd.bci());
321     bool has_receiver = call.has_receiver();
322     bool has_appendix = call.has_appendix();
323     Symbol* signature = call.signature();
324 
325     // The method attached by JIT-compilers should be used, if present.
326     // Bytecode can be inaccurate in such case.
327     Method* callee = attached_method_before_pc(pc);
328     if (callee != NULL) {
329       has_receiver = !(callee->access_flags().is_static());
330       has_appendix = false;
331       signature = callee->signature();
332     }
333 
334     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
335   }
336 }
337 
338 Method* CompiledMethod::attached_method(address call_instr) {
339   assert(code_contains(call_instr), "not part of the nmethod");
340   RelocIterator iter(this, call_instr, call_instr + 1);
341   while (iter.next()) {
342     if (iter.addr() == call_instr) {
343       switch(iter.type()) {
344         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
345         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
346         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
347         default:                               break;
348       }
349     }
350   }
351   return NULL; // not found
352 }
353 
354 Method* CompiledMethod::attached_method_before_pc(address pc) {
355   if (NativeCall::is_call_before(pc)) {
356     NativeCall* ncall = nativeCall_before(pc);
357     return attached_method(ncall->instruction_address());
358   }
359   return NULL; // not a call
360 }
361 
362 void CompiledMethod::clear_inline_caches() {
363   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
364   if (is_zombie()) {
365     return;
366   }
367 
368   RelocIterator iter(this);
369   while (iter.next()) {
370     iter.reloc()->clear_inline_cache();
371   }
372 }
373 
374 // Clear ICStubs of all compiled ICs
375 void CompiledMethod::clear_ic_stubs() {
376   assert_locked_or_safepoint(CompiledIC_lock);
377   ResourceMark rm;
378   RelocIterator iter(this);
379   while(iter.next()) {
380     if (iter.type() == relocInfo::virtual_call_type) {
381       CompiledIC* ic = CompiledIC_at(&iter);
382       ic->clear_ic_stub();
383     }
384   }
385 }
386 
387 #ifdef ASSERT
388 // Check class_loader is alive for this bit of metadata.
389 static void check_class(Metadata* md) {
390    Klass* klass = NULL;
391    if (md->is_klass()) {
392      klass = ((Klass*)md);
393    } else if (md->is_method()) {
394      klass = ((Method*)md)->method_holder();
395    } else if (md->is_methodData()) {
396      klass = ((MethodData*)md)->method()->method_holder();
397    } else {
398      md->print();
399      ShouldNotReachHere();
400    }
401    assert(klass->is_loader_alive(), "must be alive");
402 }
403 #endif // ASSERT
404 
405 
406 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
407   if (ic->is_icholder_call()) {
408     // The only exception is compiledICHolder metdata which may
409     // yet be marked below. (We check this further below).
410     CompiledICHolder* cichk_metdata = ic->cached_icholder();
411 
412     if (cichk_metdata->is_loader_alive()) {
413       return;
414     }
415   } else {
416     Metadata* ic_metdata = ic->cached_metadata();
417     if (ic_metdata != NULL) {
418       if (ic_metdata->is_klass()) {
419         if (((Klass*)ic_metdata)->is_loader_alive()) {
420           return;
421         }
422       } else if (ic_metdata->is_method()) {
423         Method* method = (Method*)ic_metdata;
424         assert(!method->is_old(), "old method should have been cleaned");
425         if (method->method_holder()->is_loader_alive()) {
426           return;
427         }
428       } else {
429         ShouldNotReachHere();
430       }
431     }
432   }
433 
434   ic->set_to_clean();
435 }
436 
437 unsigned char CompiledMethod::_global_unloading_clock = 0;
438 
439 void CompiledMethod::increase_unloading_clock() {
440   _global_unloading_clock++;
441   if (_global_unloading_clock == 0) {
442     // _nmethods are allocated with _unloading_clock == 0,
443     // so 0 is never used as a clock value.
444     _global_unloading_clock = 1;
445   }
446 }
447 
448 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
449   OrderAccess::release_store(&_unloading_clock, unloading_clock);
450 }
451 
452 unsigned char CompiledMethod::unloading_clock() {
453   return OrderAccess::load_acquire(&_unloading_clock);
454 }
455 
456 
457 // static_stub_Relocations may have dangling references to
458 // nmethods so trim them out here.  Otherwise it looks like
459 // compiled code is maintaining a link to dead metadata.
460 void CompiledMethod::clean_ic_stubs() {
461 #ifdef ASSERT
462   address low_boundary = oops_reloc_begin();
463   RelocIterator iter(this, low_boundary);
464   while (iter.next()) {
465     address static_call_addr = NULL;
466     if (iter.type() == relocInfo::opt_virtual_call_type) {
467       CompiledIC* cic = CompiledIC_at(&iter);
468       if (!cic->is_call_to_interpreted()) {
469         static_call_addr = iter.addr();
470       }
471     } else if (iter.type() == relocInfo::static_call_type) {
472       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
473       if (!csc->is_call_to_interpreted()) {
474         static_call_addr = iter.addr();
475       }
476     }
477     if (static_call_addr != NULL) {
478       RelocIterator sciter(this, low_boundary);
479       while (sciter.next()) {
480         if (sciter.type() == relocInfo::static_stub_type &&
481             sciter.static_stub_reloc()->static_call() == static_call_addr) {
482           sciter.static_stub_reloc()->clear_inline_cache();
483         }
484       }
485     }
486   }
487 #endif
488 }
489 
490 // This is called at the end of the strong tracing/marking phase of a
491 // GC to unload an nmethod if it contains otherwise unreachable
492 // oops.
493 
494 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive) {
495   // Make sure the oop's ready to receive visitors
496   assert(!is_zombie() && !is_unloaded(),
497          "should not call follow on zombie or unloaded nmethod");
498 
499   address low_boundary = oops_reloc_begin();
500 
501   if (do_unloading_oops(low_boundary, is_alive)) {
502     return;
503   }
504 
505 #if INCLUDE_JVMCI
506   if (do_unloading_jvmci()) {
507     return;
508   }
509 #endif
510 
511   // Cleanup exception cache and inline caches happens
512   // after all the unloaded methods are found.
513 }
514 
515 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
516 template <class CompiledICorStaticCall>
517 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
518                                          bool parallel, bool clean_all) {
519   // Ok, to lookup references to zombies here
520   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
521   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
522   if (nm != NULL) {
523     if (parallel && nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
524       // The nmethod has not been processed yet.
525       return true;
526     }
527 
528     // Clean inline caches pointing to both zombie and not_entrant methods
529     if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) {
530       ic->set_to_clean(from->is_alive());
531       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
532     }
533   }
534 
535   return false;
536 }
537 
538 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
539                                          bool parallel, bool clean_all = false) {
540   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, parallel, clean_all);
541 }
542 
543 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
544                                          bool parallel, bool clean_all = false) {
545   return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, parallel, clean_all);
546 }
547 
548 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
549   ResourceMark rm;
550 
551   // Make sure the oop's ready to receive visitors
552   assert(!is_zombie() && !is_unloaded(),
553          "should not call follow on zombie or unloaded nmethod");
554 
555   address low_boundary = oops_reloc_begin();
556 
557   if (do_unloading_oops(low_boundary, is_alive)) {
558     return false;
559   }
560 
561 #if INCLUDE_JVMCI
562   if (do_unloading_jvmci()) {
563     return false;
564   }
565 #endif
566 
567   return unload_nmethod_caches(/*parallel*/true, unloading_occurred);
568 }
569 
570 // Cleans caches in nmethods that point to either classes that are unloaded
571 // or nmethods that are unloaded.
572 //
573 // Can be called either in parallel by G1 currently or after all
574 // nmethods are unloaded.  Return postponed=true in the parallel case for
575 // inline caches found that point to nmethods that are not yet visited during
576 // the do_unloading walk.
577 bool CompiledMethod::unload_nmethod_caches(bool parallel, bool unloading_occurred) {
578 
579   // Exception cache only needs to be called if unloading occurred
580   if (unloading_occurred) {
581     clean_exception_cache();
582   }
583 
584   bool postponed = cleanup_inline_caches_impl(parallel, unloading_occurred, /*clean_all*/false);
585 
586   // All static stubs need to be cleaned.
587   clean_ic_stubs();
588 
589   // Check that the metadata embedded in the nmethod is alive
590   DEBUG_ONLY(metadata_do(check_class));
591 
592   return postponed;
593 }
594 
595 // Called to clean up after class unloading for live nmethods and from the sweeper
596 // for all methods.
597 bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) {
598   assert_locked_or_safepoint(CompiledIC_lock);
599   bool postponed = false;
600   ResourceMark rm;
601 
602   // Find all calls in an nmethod and clear the ones that point to non-entrant,
603   // zombie and unloaded nmethods.
604   RelocIterator iter(this, oops_reloc_begin());
605   while(iter.next()) {
606 
607     switch (iter.type()) {
608 
609     case relocInfo::virtual_call_type:
610       if (unloading_occurred) {
611         // If class unloading occurred we first clear ICs where the cached metadata
612         // is referring to an unloaded klass or method.
613         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
614       }
615 
616       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
617       break;
618 
619     case relocInfo::opt_virtual_call_type:
620       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
621       break;
622 
623     case relocInfo::static_call_type:
624       postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, parallel, clean_all);
625       break;
626 
627     case relocInfo::oop_type:
628       // handled by do_unloading_oops already
629       break;
630 
631     case relocInfo::metadata_type:
632       break; // nothing to do.
633 
634     default:
635       break;
636     }
637   }
638 
639   return postponed;
640 }
641 
642 void CompiledMethod::do_unloading_parallel_postponed() {
643   ResourceMark rm;
644 
645   // Make sure the oop's ready to receive visitors
646   assert(!is_zombie(),
647          "should not call follow on zombie nmethod");
648 
649   RelocIterator iter(this, oops_reloc_begin());
650   while(iter.next()) {
651 
652     switch (iter.type()) {
653 
654     case relocInfo::virtual_call_type:
655       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
656       break;
657 
658     case relocInfo::opt_virtual_call_type:
659       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
660       break;
661 
662     case relocInfo::static_call_type:
663       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, true);
664       break;
665 
666     default:
667       break;
668     }
669   }
670 }
671 
672 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
673 // to not be inherently safe. There is a chance that fields are seen which are not properly
674 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
675 // to be held.
676 // To bundle knowledge about necessary checks in one place, this function was introduced.
677 // It is not claimed that these checks are sufficient, but they were found to be necessary.
678 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) {
679   Method* method = (nm == NULL) ? NULL : nm->method();  // nm->method() may be uninitialized, i.e. != NULL, but invalid
680   return (nm != NULL) && (method != NULL) && (method->signature() != NULL) &&
681          !nm->is_zombie() && !nm->is_not_installed() &&
682          os::is_readable_pointer(method) &&
683          os::is_readable_pointer(method->constants()) &&
684          os::is_readable_pointer(method->signature());
685 }
--- EOF ---