1 /*
  2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/compiledIC.hpp"
 27 #include "code/compiledMethod.inline.hpp"
 28 #include "code/scopeDesc.hpp"
 29 #include "code/codeCache.hpp"
 30 #include "interpreter/bytecode.inline.hpp"
 31 #include "logging/log.hpp"
 32 #include "logging/logTag.hpp"
 33 #include "memory/resourceArea.hpp"
 34 #include "oops/methodData.hpp"
 35 #include "oops/method.inline.hpp"
 36 #include "prims/methodHandles.hpp"
 37 #include "runtime/handles.inline.hpp"
 38 #include "runtime/mutexLocker.hpp"
 39 
 40 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
 41   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 42   _mark_for_deoptimization_status(not_marked), _method(method) {
 43   init_defaults();
 44 }
 45 
 46 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
 47   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 48   _mark_for_deoptimization_status(not_marked), _method(method) {
 49   init_defaults();
 50 }
 51 
 52 void CompiledMethod::init_defaults() {
 53   _has_unsafe_access          = 0;
 54   _has_method_handle_invokes  = 0;
 55   _lazy_critical_native       = 0;
 56   _has_wide_vectors           = 0;
 57   _unloading_clock            = 0;
 58 }
 59 
 60 bool CompiledMethod::is_method_handle_return(address return_pc) {
 61   if (!has_method_handle_invokes())  return false;
 62   PcDesc* pd = pc_desc_at(return_pc);
 63   if (pd == NULL)
 64     return false;
 65   return pd->is_method_handle_invoke();
 66 }
 67 
 68 // Returns a string version of the method state.
 69 const char* CompiledMethod::state() const {
 70   int state = get_state();
 71   switch (state) {
 72   case not_installed:
 73     return "not installed";
 74   case in_use:
 75     return "in use";
 76   case not_used:
 77     return "not_used";
 78   case not_entrant:
 79     return "not_entrant";
 80   case zombie:
 81     return "zombie";
 82   case unloaded:
 83     return "unloaded";
 84   default:
 85     fatal("unexpected method state: %d", state);
 86     return NULL;
 87   }
 88 }
 89 
 90 //-----------------------------------------------------------------------------
 91 
 92 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
 93   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
 94   assert(new_entry != NULL,"Must be non null");
 95   assert(new_entry->next() == NULL, "Must be null");
 96 
 97   ExceptionCache *ec = exception_cache();
 98   if (ec != NULL) {
 99     new_entry->set_next(ec);
100   }
101   release_set_exception_cache(new_entry);
102 }
103 
104 void CompiledMethod::clean_exception_cache() {
105   ExceptionCache* prev = NULL;
106   ExceptionCache* curr = exception_cache();
107 
108   while (curr != NULL) {
109     ExceptionCache* next = curr->next();
110 
111     Klass* ex_klass = curr->exception_type();
112     if (ex_klass != NULL && !ex_klass->is_loader_alive()) {
113       if (prev == NULL) {
114         set_exception_cache(next);
115       } else {
116         prev->set_next(next);
117       }
118       delete curr;
119       // prev stays the same.
120     } else {
121       prev = curr;
122     }
123 
124     curr = next;
125   }
126 }
127 
128 // public method for accessing the exception cache
129 // These are the public access methods.
130 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
131   // We never grab a lock to read the exception cache, so we may
132   // have false negatives. This is okay, as it can only happen during
133   // the first few exception lookups for a given nmethod.
134   ExceptionCache* ec = exception_cache();
135   while (ec != NULL) {
136     address ret_val;
137     if ((ret_val = ec->match(exception,pc)) != NULL) {
138       return ret_val;
139     }
140     ec = ec->next();
141   }
142   return NULL;
143 }
144 
145 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
146   // There are potential race conditions during exception cache updates, so we
147   // must own the ExceptionCache_lock before doing ANY modifications. Because
148   // we don't lock during reads, it is possible to have several threads attempt
149   // to update the cache with the same data. We need to check for already inserted
150   // copies of the current data before adding it.
151 
152   MutexLocker ml(ExceptionCache_lock);
153   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
154 
155   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
156     target_entry = new ExceptionCache(exception,pc,handler);
157     add_exception_cache_entry(target_entry);
158   }
159 }
160 
161 //-------------end of code for ExceptionCache--------------
162 
163 // private method for handling exception cache
164 // These methods are private, and used to manipulate the exception cache
165 // directly.
166 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
167   ExceptionCache* ec = exception_cache();
168   while (ec != NULL) {
169     if (ec->match_exception_with_space(exception)) {
170       return ec;
171     }
172     ec = ec->next();
173   }
174   return NULL;
175 }
176 
177 bool CompiledMethod::is_at_poll_return(address pc) {
178   RelocIterator iter(this, pc, pc+1);
179   while (iter.next()) {
180     if (iter.type() == relocInfo::poll_return_type)
181       return true;
182   }
183   return false;
184 }
185 
186 
187 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
188   RelocIterator iter(this, pc, pc+1);
189   while (iter.next()) {
190     relocInfo::relocType t = iter.type();
191     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
192       return true;
193   }
194   return false;
195 }
196 
197 void CompiledMethod::verify_oop_relocations() {
198   // Ensure sure that the code matches the current oop values
199   RelocIterator iter(this, NULL, NULL);
200   while (iter.next()) {
201     if (iter.type() == relocInfo::oop_type) {
202       oop_Relocation* reloc = iter.oop_reloc();
203       if (!reloc->oop_is_immediate()) {
204         reloc->verify_oop_relocation();
205       }
206     }
207   }
208 }
209 
210 
211 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
212   PcDesc* pd = pc_desc_at(pc);
213   guarantee(pd != NULL, "scope must be present");
214   return new ScopeDesc(this, pd->scope_decode_offset(),
215                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
216                        pd->return_oop());
217 }
218 
219 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
220   PcDesc* pd = pc_desc_near(pc);
221   guarantee(pd != NULL, "scope must be present");
222   return new ScopeDesc(this, pd->scope_decode_offset(),
223                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
224                        pd->return_oop());
225 }
226 
227 address CompiledMethod::oops_reloc_begin() const {
228   // If the method is not entrant or zombie then a JMP is plastered over the
229   // first few bytes.  If an oop in the old code was there, that oop
230   // should not get GC'd.  Skip the first few bytes of oops on
231   // not-entrant methods.
232   address low_boundary = verified_entry_point();
233   if (!is_in_use() && is_nmethod()) {
234     low_boundary += NativeJump::instruction_size;
235     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
236     // This means that the low_boundary is going to be a little too high.
237     // This shouldn't matter, since oops of non-entrant methods are never used.
238     // In fact, why are we bothering to look at oops in a non-entrant method??
239   }
240   return low_boundary;
241 }
242 
243 int CompiledMethod::verify_icholder_relocations() {
244   ResourceMark rm;
245   int count = 0;
246 
247   RelocIterator iter(this);
248   while(iter.next()) {
249     if (iter.type() == relocInfo::virtual_call_type) {
250       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
251         CompiledIC *ic = CompiledIC_at(&iter);
252         if (TraceCompiledIC) {
253           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
254           ic->print();
255         }
256         assert(ic->cached_icholder() != NULL, "must be non-NULL");
257         count++;
258       }
259     }
260   }
261 
262   return count;
263 }
264 
265 // Method that knows how to preserve outgoing arguments at call. This method must be
266 // called with a frame corresponding to a Java invoke
267 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
268   if (method() != NULL && !method()->is_native()) {
269     address pc = fr.pc();
270     SimpleScopeDesc ssd(this, pc);
271     Bytecode_invoke call(ssd.method(), ssd.bci());
272     bool has_receiver = call.has_receiver();
273     bool has_appendix = call.has_appendix();
274     Symbol* signature = call.signature();
275 
276     // The method attached by JIT-compilers should be used, if present.
277     // Bytecode can be inaccurate in such case.
278     Method* callee = attached_method_before_pc(pc);
279     if (callee != NULL) {
280       has_receiver = !(callee->access_flags().is_static());
281       has_appendix = false;
282       signature = callee->signature();
283     }
284 
285     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
286   }
287 }
288 
289 Method* CompiledMethod::attached_method(address call_instr) {
290   assert(code_contains(call_instr), "not part of the nmethod");
291   RelocIterator iter(this, call_instr, call_instr + 1);
292   while (iter.next()) {
293     if (iter.addr() == call_instr) {
294       switch(iter.type()) {
295         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
296         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
297         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
298         default:                               break;
299       }
300     }
301   }
302   return NULL; // not found
303 }
304 
305 Method* CompiledMethod::attached_method_before_pc(address pc) {
306   if (NativeCall::is_call_before(pc)) {
307     NativeCall* ncall = nativeCall_before(pc);
308     return attached_method(ncall->instruction_address());
309   }
310   return NULL; // not a call
311 }
312 
313 void CompiledMethod::clear_inline_caches() {
314   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
315   if (is_zombie()) {
316     return;
317   }
318 
319   RelocIterator iter(this);
320   while (iter.next()) {
321     iter.reloc()->clear_inline_cache();
322   }
323 }
324 
325 // Clear ICStubs of all compiled ICs
326 void CompiledMethod::clear_ic_stubs() {
327   assert_locked_or_safepoint(CompiledIC_lock);
328   ResourceMark rm;
329   RelocIterator iter(this);
330   while(iter.next()) {
331     if (iter.type() == relocInfo::virtual_call_type) {
332       CompiledIC* ic = CompiledIC_at(&iter);
333       ic->clear_ic_stub();
334     }
335   }
336 }
337 
338 #ifdef ASSERT
339 // Check class_loader is alive for this bit of metadata.
340 static void check_class(Metadata* md) {
341    Klass* klass = NULL;
342    if (md->is_klass()) {
343      klass = ((Klass*)md);
344    } else if (md->is_method()) {
345      klass = ((Method*)md)->method_holder();
346    } else if (md->is_methodData()) {
347      klass = ((MethodData*)md)->method()->method_holder();
348    } else {
349      md->print();
350      ShouldNotReachHere();
351    }
352    assert(klass->is_loader_alive(), "must be alive");
353 }
354 #endif // ASSERT
355 
356 
357 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
358   if (ic->is_icholder_call()) {
359     // The only exception is compiledICHolder metdata which may
360     // yet be marked below. (We check this further below).
361     CompiledICHolder* cichk_metdata = ic->cached_icholder();
362 
363     if (cichk_metdata->is_loader_alive()) {
364       return;
365     }
366   } else {
367     Metadata* ic_metdata = ic->cached_metadata();
368     if (ic_metdata != NULL) {
369       if (ic_metdata->is_klass()) {
370         if (((Klass*)ic_metdata)->is_loader_alive()) {
371           return;
372         }
373       } else if (ic_metdata->is_method()) {
374         Method* method = (Method*)ic_metdata;
375         assert(!method->is_old(), "old method should have been cleaned");
376         if (method->method_holder()->is_loader_alive()) {
377           return;
378         }
379       } else {
380         ShouldNotReachHere();
381       }
382     }
383   }
384 
385   ic->set_to_clean();
386 }
387 
388 unsigned char CompiledMethod::_global_unloading_clock = 0;
389 
390 void CompiledMethod::increase_unloading_clock() {
391   _global_unloading_clock++;
392   if (_global_unloading_clock == 0) {
393     // _nmethods are allocated with _unloading_clock == 0,
394     // so 0 is never used as a clock value.
395     _global_unloading_clock = 1;
396   }
397 }
398 
399 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
400   OrderAccess::release_store(&_unloading_clock, unloading_clock);
401 }
402 
403 unsigned char CompiledMethod::unloading_clock() {
404   return OrderAccess::load_acquire(&_unloading_clock);
405 }
406 
407 
408 // static_stub_Relocations may have dangling references to
409 // nmethods so trim them out here.  Otherwise it looks like
410 // compiled code is maintaining a link to dead metadata.
411 void CompiledMethod::clean_ic_stubs() {
412 #ifdef ASSERT
413   address low_boundary = oops_reloc_begin();
414   RelocIterator iter(this, low_boundary);
415   while (iter.next()) {
416     address static_call_addr = NULL;
417     if (iter.type() == relocInfo::opt_virtual_call_type) {
418       CompiledIC* cic = CompiledIC_at(&iter);
419       if (!cic->is_call_to_interpreted()) {
420         static_call_addr = iter.addr();
421       }
422     } else if (iter.type() == relocInfo::static_call_type) {
423       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
424       if (!csc->is_call_to_interpreted()) {
425         static_call_addr = iter.addr();
426       }
427     }
428     if (static_call_addr != NULL) {
429       RelocIterator sciter(this, low_boundary);
430       while (sciter.next()) {
431         if (sciter.type() == relocInfo::static_stub_type &&
432             sciter.static_stub_reloc()->static_call() == static_call_addr) {
433           sciter.static_stub_reloc()->clear_inline_cache();
434         }
435       }
436     }
437   }
438 #endif
439 }
440 
441 // This is called at the end of the strong tracing/marking phase of a
442 // GC to unload an nmethod if it contains otherwise unreachable
443 // oops.
444 
445 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive) {
446   // Make sure the oop's ready to receive visitors
447   assert(!is_zombie() && !is_unloaded(),
448          "should not call follow on zombie or unloaded nmethod");
449 
450   address low_boundary = oops_reloc_begin();
451 
452   if (do_unloading_oops(low_boundary, is_alive)) {
453     return;
454   }
455 
456 #if INCLUDE_JVMCI
457   if (do_unloading_jvmci()) {
458     return;
459   }
460 #endif
461 
462   // Cleanup exception cache and inline caches happens
463   // after all the unloaded methods are found.
464 }
465 
466 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
467 template <class CompiledICorStaticCall>
468 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
469                                          bool parallel, bool clean_all) {
470   // Ok, to lookup references to zombies here
471   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
472   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
473   if (nm != NULL) {
474     if (parallel && nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
475       // The nmethod has not been processed yet.
476       return true;
477     }
478 
479     // Clean inline caches pointing to both zombie and not_entrant methods
480     if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) {
481       ic->set_to_clean(from->is_alive());
482       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
483     }
484   }
485 
486   return false;
487 }
488 
489 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
490                                          bool parallel, bool clean_all = false) {
491   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, parallel, clean_all);
492 }
493 
494 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
495                                          bool parallel, bool clean_all = false) {
496   return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, parallel, clean_all);
497 }
498 
499 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
500   ResourceMark rm;
501 
502   // Make sure the oop's ready to receive visitors
503   assert(!is_zombie() && !is_unloaded(),
504          "should not call follow on zombie or unloaded nmethod");
505 
506   address low_boundary = oops_reloc_begin();
507 
508   if (do_unloading_oops(low_boundary, is_alive)) {
509     return false;
510   }
511 
512 #if INCLUDE_JVMCI
513   if (do_unloading_jvmci()) {
514     return false;
515   }
516 #endif
517 
518   return unload_nmethod_caches(/*parallel*/true, unloading_occurred);
519 }
520 
521 // Cleans caches in nmethods that point to either classes that are unloaded
522 // or nmethods that are unloaded.
523 //
524 // Can be called either in parallel by G1 currently or after all
525 // nmethods are unloaded.  Return postponed=true in the parallel case for
526 // inline caches found that point to nmethods that are not yet visited during
527 // the do_unloading walk.
528 bool CompiledMethod::unload_nmethod_caches(bool parallel, bool unloading_occurred) {
529 
530   // Exception cache only needs to be called if unloading occurred
531   if (unloading_occurred) {
532     clean_exception_cache();
533   }
534 
535   bool postponed = cleanup_inline_caches_impl(parallel, unloading_occurred, /*clean_all*/false);
536 
537   // All static stubs need to be cleaned.
538   clean_ic_stubs();
539 
540   // Check that the metadata embedded in the nmethod is alive
541   DEBUG_ONLY(metadata_do(check_class));
542 
543   return postponed;
544 }
545 
546 // Called to clean up after class unloading for live nmethods and from the sweeper
547 // for all methods.
548 bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) {
549   assert_locked_or_safepoint(CompiledIC_lock);
550   bool postponed = false;
551   ResourceMark rm;
552 
553   // Find all calls in an nmethod and clear the ones that point to non-entrant,
554   // zombie and unloaded nmethods.
555   RelocIterator iter(this, oops_reloc_begin());
556   while(iter.next()) {
557 
558     switch (iter.type()) {
559 
560     case relocInfo::virtual_call_type:
561       if (unloading_occurred) {
562         // If class unloading occurred we first clear ICs where the cached metadata
563         // is referring to an unloaded klass or method.
564         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
565       }
566 
567       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
568       break;
569 
570     case relocInfo::opt_virtual_call_type:
571       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
572       break;
573 
574     case relocInfo::static_call_type:
575       postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, parallel, clean_all);
576       break;
577 
578     case relocInfo::oop_type:
579       // handled by do_unloading_oops already
580       break;
581 
582     case relocInfo::metadata_type:
583       break; // nothing to do.
584 
585     default:
586       break;
587     }
588   }
589 
590   return postponed;
591 }
592 
593 void CompiledMethod::do_unloading_parallel_postponed() {
594   ResourceMark rm;
595 
596   // Make sure the oop's ready to receive visitors
597   assert(!is_zombie(),
598          "should not call follow on zombie nmethod");
599 
600   RelocIterator iter(this, oops_reloc_begin());
601   while(iter.next()) {
602 
603     switch (iter.type()) {
604 
605     case relocInfo::virtual_call_type:
606       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
607       break;
608 
609     case relocInfo::opt_virtual_call_type:
610       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
611       break;
612 
613     case relocInfo::static_call_type:
614       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, true);
615       break;
616 
617     default:
618       break;
619     }
620   }
621 }
622 
623 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
624 // to not be inherently safe. There is a chance that fields are seen which are not properly
625 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
626 // to be held.
627 // To bundle knowledge about necessary checks in one place, this function was introduced.
628 // It is not claimed that these checks are sufficient, but they were found to be necessary.
629 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) {
630   Method* method = (nm == NULL) ? NULL : nm->method();  // nm->method() may be uninitialized, i.e. != NULL, but invalid
631   return (nm != NULL) && (method != NULL) && (method->signature() != NULL) &&
632          !nm->is_zombie() && !nm->is_not_installed() &&
633          os::is_readable_pointer(method) &&
634          os::is_readable_pointer(method->constants()) &&
635          os::is_readable_pointer(method->signature());
636 }