0 /*
1  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3  *
4  * This code is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 only, as
6  * published by the Free Software Foundation.
7  *
8  * This code is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
11  * version 2 for more details (a copy is included in the LICENSE file that
12  * accompanied this code).
13  *
14  * You should have received a copy of the GNU General Public License version
15  * 2 along with this work; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19  * or visit www.oracle.com if you need additional information or have any
20  * questions.
21  *
22  */
23 
24 #include "precompiled.hpp"
25 #include "code/compiledIC.hpp"
26 #include "code/compiledMethod.inline.hpp"
27 #include "code/scopeDesc.hpp"
28 #include "code/codeCache.hpp"
29 #include "code/icBuffer.hpp"
30 #include "gc/shared/barrierSet.hpp"
31 #include "gc/shared/gcBehaviours.hpp"
32 #include "interpreter/bytecode.inline.hpp"
33 #include "logging/log.hpp"
34 #include "logging/logTag.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "oops/methodData.hpp"
37 #include "oops/method.inline.hpp"
38 #include "prims/methodHandles.hpp"
39 #include "runtime/handles.inline.hpp"
40 #include "runtime/mutexLocker.hpp"
41 
42 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
43                                int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
44                                bool caller_must_gc_arguments)
45   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
46     _mark_for_deoptimization_status(not_marked),
47     _method(method),
48     _gc_data(NULL)
49 {
50   init_defaults();
51 }
52 
53 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,
54                                int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,
55                                OopMapSet* oop_maps, bool caller_must_gc_arguments)
56   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
57              frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
58     _mark_for_deoptimization_status(not_marked),
59     _method(method),
60     _gc_data(NULL)
61 {
62   init_defaults();
63 }
64 
65 void CompiledMethod::init_defaults() {
66   _has_unsafe_access          = 0;
67   _has_method_handle_invokes  = 0;
68   _lazy_critical_native       = 0;
69   _has_wide_vectors           = 0;
70 }
71 
72 bool CompiledMethod::is_method_handle_return(address return_pc) {
73   if (!has_method_handle_invokes())  return false;
74   PcDesc* pd = pc_desc_at(return_pc);
75   if (pd == NULL)
76     return false;
77   return pd->is_method_handle_invoke();
78 }
79 
80 // Returns a string version of the method state.
81 const char* CompiledMethod::state() const {
82   int state = get_state();
83   switch (state) {
84   case not_installed:
85     return "not installed";
86   case in_use:
87     return "in use";
88   case not_used:
89     return "not_used";
90   case not_entrant:
91     return "not_entrant";
92   case zombie:
93     return "zombie";
94   case unloaded:
95     return "unloaded";
96   default:
97     fatal("unexpected method state: %d", state);
98     return NULL;
99   }
100 }
101 
102 //-----------------------------------------------------------------------------
103 
104 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
105   return OrderAccess::load_acquire(&_exception_cache);
106 }
107 
108 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
109   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
110   assert(new_entry != NULL,"Must be non null");
111   assert(new_entry->next() == NULL, "Must be null");
112 
113   for (;;) {
114     ExceptionCache *ec = exception_cache();
115     if (ec != NULL) {
116       Klass* ex_klass = ec->exception_type();
117       if (!ex_klass->is_loader_alive()) {
118         // We must guarantee that entries are not inserted with new next pointer
119         // edges to ExceptionCache entries with dead klasses, due to bad interactions
120         // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
121         // the head pointer forward to the first live ExceptionCache, so that the new
122         // next pointers always point at live ExceptionCaches, that are not removed due
123         // to concurrent ExceptionCache cleanup.
124         ExceptionCache* next = ec->next();
125         if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
126           CodeCache::release_exception_cache(ec);
127         }
128         continue;
129       }
130       ec = exception_cache();
131       if (ec != NULL) {
132         new_entry->set_next(ec);
133       }
134     }
135     if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
136       return;
137     }
138   }
139 }
140 
141 void CompiledMethod::clean_exception_cache() {
142   // For each nmethod, only a single thread may call this cleanup function
143   // at the same time, whether called in STW cleanup or concurrent cleanup.
144   // Note that if the GC is processing exception cache cleaning in a concurrent phase,
145   // then a single writer may contend with cleaning up the head pointer to the
146   // first ExceptionCache node that has a Klass* that is alive. That is fine,
147   // as long as there is no concurrent cleanup of next pointers from concurrent writers.
148   // And the concurrent writers do not clean up next pointers, only the head.
149   // Also note that concurent readers will walk through Klass* pointers that are not
150   // alive. That does not cause ABA problems, because Klass* is deleted after
151   // a handshake with all threads, after all stale ExceptionCaches have been
152   // unlinked. That is also when the CodeCache::exception_cache_purge_list()
153   // is deleted, with all ExceptionCache entries that were cleaned concurrently.
154   // That similarly implies that CAS operations on ExceptionCache entries do not
155   // suffer from ABA problems as unlinking and deletion is separated by a global
156   // handshake operation.
157   ExceptionCache* prev = NULL;
158   ExceptionCache* curr = exception_cache_acquire();
159 
160   while (curr != NULL) {
161     ExceptionCache* next = curr->next();
162 
163     if (!curr->exception_type()->is_loader_alive()) {
164       if (prev == NULL) {
165         // Try to clean head; this is contended by concurrent inserts, that
166         // both lazily clean the head, and insert entries at the head. If
167         // the CAS fails, the operation is restarted.
168         if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
169           prev = NULL;
170           curr = exception_cache_acquire();
171           continue;
172         }
173       } else {
174         // It is impossible to during cleanup connect the next pointer to
175         // an ExceptionCache that has not been published before a safepoint
176         // prior to the cleanup. Therefore, release is not required.
177         prev->set_next(next);
178       }
179       // prev stays the same.
180 
181       CodeCache::release_exception_cache(curr);
182     } else {
183       prev = curr;
184     }
185 
186     curr = next;
187   }
188 }
189 
190 // public method for accessing the exception cache
191 // These are the public access methods.
192 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
193   // We never grab a lock to read the exception cache, so we may
194   // have false negatives. This is okay, as it can only happen during
195   // the first few exception lookups for a given nmethod.
196   ExceptionCache* ec = exception_cache_acquire();
197   while (ec != NULL) {
198     address ret_val;
199     if ((ret_val = ec->match(exception,pc)) != NULL) {
200       return ret_val;
201     }
202     ec = ec->next();
203   }
204   return NULL;
205 }
206 
207 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
208   // There are potential race conditions during exception cache updates, so we
209   // must own the ExceptionCache_lock before doing ANY modifications. Because
210   // we don't lock during reads, it is possible to have several threads attempt
211   // to update the cache with the same data. We need to check for already inserted
212   // copies of the current data before adding it.
213 
214   MutexLocker ml(ExceptionCache_lock);
215   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
216 
217   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
218     target_entry = new ExceptionCache(exception,pc,handler);
219     add_exception_cache_entry(target_entry);
220   }
221 }
222 
223 // private method for handling exception cache
224 // These methods are private, and used to manipulate the exception cache
225 // directly.
226 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
227   ExceptionCache* ec = exception_cache_acquire();
228   while (ec != NULL) {
229     if (ec->match_exception_with_space(exception)) {
230       return ec;
231     }
232     ec = ec->next();
233   }
234   return NULL;
235 }
236 
237 //-------------end of code for ExceptionCache--------------
238 
239 bool CompiledMethod::is_at_poll_return(address pc) {
240   RelocIterator iter(this, pc, pc+1);
241   while (iter.next()) {
242     if (iter.type() == relocInfo::poll_return_type)
243       return true;
244   }
245   return false;
246 }
247 
248 
249 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
250   RelocIterator iter(this, pc, pc+1);
251   while (iter.next()) {
252     relocInfo::relocType t = iter.type();
253     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
254       return true;
255   }
256   return false;
257 }
258 
259 void CompiledMethod::verify_oop_relocations() {
260   // Ensure sure that the code matches the current oop values
261   RelocIterator iter(this, NULL, NULL);
262   while (iter.next()) {
263     if (iter.type() == relocInfo::oop_type) {
264       oop_Relocation* reloc = iter.oop_reloc();
265       if (!reloc->oop_is_immediate()) {
266         reloc->verify_oop_relocation();
267       }
268     }
269   }
270 }
271 
272 
273 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
274   PcDesc* pd = pc_desc_at(pc);
275   guarantee(pd != NULL, "scope must be present");
276   return new ScopeDesc(this, pd->scope_decode_offset(),
277                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
278                        pd->return_oop());
279 }
280 
281 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
282   PcDesc* pd = pc_desc_near(pc);
283   guarantee(pd != NULL, "scope must be present");
284   return new ScopeDesc(this, pd->scope_decode_offset(),
285                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
286                        pd->return_oop());
287 }
288 
289 address CompiledMethod::oops_reloc_begin() const {
290   // If the method is not entrant or zombie then a JMP is plastered over the
291   // first few bytes.  If an oop in the old code was there, that oop
292   // should not get GC'd.  Skip the first few bytes of oops on
293   // not-entrant methods.
294   if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
295       code_begin() + frame_complete_offset() >
296       verified_entry_point() + NativeJump::instruction_size)
297   {
298     // If we have a frame_complete_offset after the native jump, then there
299     // is no point trying to look for oops before that. This is a requirement
300     // for being allowed to scan oops concurrently.
301     return code_begin() + frame_complete_offset();
302   }
303 
304   // It is not safe to read oops concurrently using entry barriers, if their
305   // location depend on whether the nmethod is entrant or not.
306   assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan");
307 
308   address low_boundary = verified_entry_point();
309   if (!is_in_use() && is_nmethod()) {
310     low_boundary += NativeJump::instruction_size;
311     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
312     // This means that the low_boundary is going to be a little too high.
313     // This shouldn't matter, since oops of non-entrant methods are never used.
314     // In fact, why are we bothering to look at oops in a non-entrant method??
315   }
316   return low_boundary;
317 }
318 
319 int CompiledMethod::verify_icholder_relocations() {
320   ResourceMark rm;
321   int count = 0;
322 
323   RelocIterator iter(this);
324   while(iter.next()) {
325     if (iter.type() == relocInfo::virtual_call_type) {
326       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
327         CompiledIC *ic = CompiledIC_at(&iter);
328         if (TraceCompiledIC) {
329           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
330           ic->print();
331         }
332         assert(ic->cached_icholder() != NULL, "must be non-NULL");
333         count++;
334       }
335     }
336   }
337 
338   return count;
339 }
340 
341 // Method that knows how to preserve outgoing arguments at call. This method must be
342 // called with a frame corresponding to a Java invoke
343 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
344   if (method() != NULL && !method()->is_native()) {
345     address pc = fr.pc();
346     SimpleScopeDesc ssd(this, pc);
347     Bytecode_invoke call(ssd.method(), ssd.bci());
348     bool has_receiver = call.has_receiver();
349     bool has_appendix = call.has_appendix();
350     Symbol* signature = call.signature();
351 
352     // The method attached by JIT-compilers should be used, if present.
353     // Bytecode can be inaccurate in such case.
354     Method* callee = attached_method_before_pc(pc);
355     if (callee != NULL) {
356       has_receiver = !(callee->access_flags().is_static());
357       has_appendix = false;
358       signature = callee->signature();
359     }
360 
361     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
362   }
363 }
364 
365 Method* CompiledMethod::attached_method(address call_instr) {
366   assert(code_contains(call_instr), "not part of the nmethod");
367   RelocIterator iter(this, call_instr, call_instr + 1);
368   while (iter.next()) {
369     if (iter.addr() == call_instr) {
370       switch(iter.type()) {
371         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
372         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
373         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
374         default:                               break;
375       }
376     }
377   }
378   return NULL; // not found
379 }
380 
381 Method* CompiledMethod::attached_method_before_pc(address pc) {
382   if (NativeCall::is_call_before(pc)) {
383     NativeCall* ncall = nativeCall_before(pc);
384     return attached_method(ncall->instruction_address());
385   }
386   return NULL; // not a call
387 }
388 
389 void CompiledMethod::clear_inline_caches() {
390   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
391   if (is_zombie()) {
392     return;
393   }
394 
395   RelocIterator iter(this);
396   while (iter.next()) {
397     iter.reloc()->clear_inline_cache();
398   }
399 }
400 
401 // Clear ICStubs of all compiled ICs
402 void CompiledMethod::clear_ic_stubs() {
403   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
404   ResourceMark rm;
405   RelocIterator iter(this);
406   while(iter.next()) {
407     if (iter.type() == relocInfo::virtual_call_type) {
408       CompiledIC* ic = CompiledIC_at(&iter);
409       ic->clear_ic_stub();
410     }
411   }
412 }
413 
414 #ifdef ASSERT
415 // Check class_loader is alive for this bit of metadata.
416 static void check_class(Metadata* md) {
417    Klass* klass = NULL;
418    if (md->is_klass()) {
419      klass = ((Klass*)md);
420    } else if (md->is_method()) {
421      klass = ((Method*)md)->method_holder();
422    } else if (md->is_methodData()) {
423      klass = ((MethodData*)md)->method()->method_holder();
424    } else {
425      md->print();
426      ShouldNotReachHere();
427    }
428    assert(klass->is_loader_alive(), "must be alive");
429 }
430 #endif // ASSERT
431 
432 
433 bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
434   if (ic->is_clean()) {
435     return true;
436   }
437   if (ic->is_icholder_call()) {
438     // The only exception is compiledICHolder metdata which may
439     // yet be marked below. (We check this further below).
440     CompiledICHolder* cichk_metdata = ic->cached_icholder();
441 
442     if (cichk_metdata->is_loader_alive()) {
443       return true;
444     }
445   } else {
446     Metadata* ic_metdata = ic->cached_metadata();
447     if (ic_metdata != NULL) {
448       if (ic_metdata->is_klass()) {
449         if (((Klass*)ic_metdata)->is_loader_alive()) {
450           return true;
451         }
452       } else if (ic_metdata->is_method()) {
453         Method* method = (Method*)ic_metdata;
454         assert(!method->is_old(), "old method should have been cleaned");
455         if (method->method_holder()->is_loader_alive()) {
456           return true;
457         }
458       } else {
459         ShouldNotReachHere();
460       }
461     }
462   }
463 
464   return ic->set_to_clean();
465 }
466 
467 // static_stub_Relocations may have dangling references to
468 // nmethods so trim them out here.  Otherwise it looks like
469 // compiled code is maintaining a link to dead metadata.
470 void CompiledMethod::clean_ic_stubs() {
471 #ifdef ASSERT
472   address low_boundary = oops_reloc_begin();
473   RelocIterator iter(this, low_boundary);
474   while (iter.next()) {
475     address static_call_addr = NULL;
476     if (iter.type() == relocInfo::opt_virtual_call_type) {
477       CompiledIC* cic = CompiledIC_at(&iter);
478       if (!cic->is_call_to_interpreted()) {
479         static_call_addr = iter.addr();
480       }
481     } else if (iter.type() == relocInfo::static_call_type) {
482       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
483       if (!csc->is_call_to_interpreted()) {
484         static_call_addr = iter.addr();
485       }
486     }
487     if (static_call_addr != NULL) {
488       RelocIterator sciter(this, low_boundary);
489       while (sciter.next()) {
490         if (sciter.type() == relocInfo::static_stub_type &&
491             sciter.static_stub_reloc()->static_call() == static_call_addr) {
492           sciter.static_stub_reloc()->clear_inline_cache();
493         }
494       }
495     }
496   }
497 #endif
498 }
499 
500 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
501 template <class CompiledICorStaticCall>
502 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
503                                          bool clean_all) {
504   // Ok, to lookup references to zombies here
505   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
506   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
507   if (nm != NULL) {
508     // Clean inline caches pointing to both zombie and not_entrant methods
509     if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
510       if (!ic->set_to_clean(from->is_alive())) {
511         return false;
512       }
513       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
514     }
515   }
516   return true;
517 }
518 
519 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
520                                          bool clean_all) {
521   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
522 }
523 
524 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
525                                          bool clean_all) {
526   return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
527 }
528 
529 // Cleans caches in nmethods that point to either classes that are unloaded
530 // or nmethods that are unloaded.
531 //
532 // Can be called either in parallel by G1 currently or after all
533 // nmethods are unloaded.  Return postponed=true in the parallel case for
534 // inline caches found that point to nmethods that are not yet visited during
535 // the do_unloading walk.
536 bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
537   ResourceMark rm;
538 
539   // Exception cache only needs to be called if unloading occurred
540   if (unloading_occurred) {
541     clean_exception_cache();
542   }
543 
544   if (!cleanup_inline_caches_impl(unloading_occurred, false)) {
545     return false;
546   }
547 
548   // All static stubs need to be cleaned.
549   clean_ic_stubs();
550 
551   // Check that the metadata embedded in the nmethod is alive
552   DEBUG_ONLY(metadata_do(check_class));
553   return true;
554 }
555 
556 void CompiledMethod::cleanup_inline_caches(bool clean_all) {
557   for (;;) {
558     ICRefillVerifier ic_refill_verifier;
559     { CompiledICLocker ic_locker(this);
560       if (cleanup_inline_caches_impl(false, clean_all)) {
561         return;
562       }
563     }
564     InlineCacheBuffer::refill_ic_stubs();
565   }
566 }
567 
568 // Called to clean up after class unloading for live nmethods and from the sweeper
569 // for all methods.
570 bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
571   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
572   ResourceMark rm;
573 
574   // Find all calls in an nmethod and clear the ones that point to non-entrant,
575   // zombie and unloaded nmethods.
576   RelocIterator iter(this, oops_reloc_begin());
577   while(iter.next()) {
578 
579     switch (iter.type()) {
580 
581     case relocInfo::virtual_call_type:
582       if (unloading_occurred) {
583         // If class unloading occurred we first clear ICs where the cached metadata
584         // is referring to an unloaded klass or method.
585         if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) {
586           return false;
587         }
588       }
589 
590       if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
591         return false;
592       }
593       break;
594 
595     case relocInfo::opt_virtual_call_type:
596       if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
597         return false;
598       }
599       break;
600 
601     case relocInfo::static_call_type:
602       if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) {
603         return false;
604       }
605       break;
606 
607     default:
608       break;
609     }
610   }
611 
612   return true;
613 }
614 
615 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
616 // to not be inherently safe. There is a chance that fields are seen which are not properly
617 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
618 // to be held.
619 // To bundle knowledge about necessary checks in one place, this function was introduced.
620 // It is not claimed that these checks are sufficient, but they were found to be necessary.
621 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) {
622   Method* method = (nm == NULL) ? NULL : nm->method();  // nm->method() may be uninitialized, i.e. != NULL, but invalid
623   return (nm != NULL) && (method != NULL) && (method->signature() != NULL) &&
624          !nm->is_zombie() && !nm->is_not_installed() &&
625          os::is_readable_pointer(method) &&
626          os::is_readable_pointer(method->constants()) &&
627          os::is_readable_pointer(method->signature());
628 }
--- EOF ---