1 /*
  2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/compiledIC.hpp"
 27 #include "code/compiledMethod.inline.hpp"
 28 #include "code/scopeDesc.hpp"
 29 #include "code/codeCache.hpp"
 30 #include "interpreter/bytecode.inline.hpp"
 31 #include "logging/log.hpp"
 32 #include "logging/logTag.hpp"
 33 #include "memory/resourceArea.hpp"
 34 #include "oops/methodData.hpp"
 35 #include "oops/method.inline.hpp"
 36 #include "prims/methodHandles.hpp"
 37 #include "runtime/handles.inline.hpp"
 38 #include "runtime/mutexLocker.hpp"
 39 
 40 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
 41   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 42   _mark_for_deoptimization_status(not_marked), _method(method) {
 43   init_defaults();
 44 }
 45 
 46 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
 47   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 48   _mark_for_deoptimization_status(not_marked), _method(method) {
 49   init_defaults();
 50 }
 51 
 52 void CompiledMethod::init_defaults() {
 53   _has_unsafe_access          = 0;
 54   _has_method_handle_invokes  = 0;
 55   _lazy_critical_native       = 0;
 56   _has_wide_vectors           = 0;
 57   _unloading_clock            = 0;
 58 }
 59 
 60 bool CompiledMethod::is_method_handle_return(address return_pc) {
 61   if (!has_method_handle_invokes())  return false;
 62   PcDesc* pd = pc_desc_at(return_pc);
 63   if (pd == NULL)
 64     return false;
 65   return pd->is_method_handle_invoke();
 66 }
 67 
 68 // Returns a string version of the method state.
 69 const char* CompiledMethod::state() const {
 70   int state = get_state();
 71   switch (state) {
 72   case not_installed:
 73     return "not installed";
 74   case in_use:
 75     return "in use";
 76   case not_used:
 77     return "not_used";
 78   case not_entrant:
 79     return "not_entrant";
 80   case zombie:
 81     return "zombie";
 82   case unloaded:
 83     return "unloaded";
 84   default:
 85     fatal("unexpected method state: %d", state);
 86     return NULL;
 87   }
 88 }
 89 
 90 //-----------------------------------------------------------------------------
 91 
 92 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
 93   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
 94   assert(new_entry != NULL,"Must be non null");
 95   assert(new_entry->next() == NULL, "Must be null");
 96 
 97   ExceptionCache *ec = exception_cache();
 98   if (ec != NULL) {
 99     new_entry->set_next(ec);
100   }
101   release_set_exception_cache(new_entry);
102 }
103 
104 void CompiledMethod::clean_exception_cache() {
105   ExceptionCache* prev = NULL;
106   ExceptionCache* curr = exception_cache();
107 
108   while (curr != NULL) {
109     ExceptionCache* next = curr->next();
110 
111     Klass* ex_klass = curr->exception_type();
112     if (ex_klass != NULL && !ex_klass->is_loader_alive()) {
113       if (prev == NULL) {
114         set_exception_cache(next);
115       } else {
116         prev->set_next(next);
117       }
118       delete curr;
119       // prev stays the same.
120     } else {
121       prev = curr;
122     }
123 
124     curr = next;
125   }
126 }
127 
128 // public method for accessing the exception cache
129 // These are the public access methods.
130 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
131   // We never grab a lock to read the exception cache, so we may
132   // have false negatives. This is okay, as it can only happen during
133   // the first few exception lookups for a given nmethod.
134   ExceptionCache* ec = exception_cache();
135   while (ec != NULL) {
136     address ret_val;
137     if ((ret_val = ec->match(exception,pc)) != NULL) {
138       return ret_val;
139     }
140     ec = ec->next();
141   }
142   return NULL;
143 }
144 
145 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
146   // There are potential race conditions during exception cache updates, so we
147   // must own the ExceptionCache_lock before doing ANY modifications. Because
148   // we don't lock during reads, it is possible to have several threads attempt
149   // to update the cache with the same data. We need to check for already inserted
150   // copies of the current data before adding it.
151 
152   MutexLocker ml(ExceptionCache_lock);
153   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
154 
155   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
156     target_entry = new ExceptionCache(exception,pc,handler);
157     add_exception_cache_entry(target_entry);
158   }
159 }
160 
161 //-------------end of code for ExceptionCache--------------
162 
163 // private method for handling exception cache
164 // These methods are private, and used to manipulate the exception cache
165 // directly.
166 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
167   ExceptionCache* ec = exception_cache();
168   while (ec != NULL) {
169     if (ec->match_exception_with_space(exception)) {
170       return ec;
171     }
172     ec = ec->next();
173   }
174   return NULL;
175 }
176 
177 bool CompiledMethod::is_at_poll_return(address pc) {
178   RelocIterator iter(this, pc, pc+1);
179   while (iter.next()) {
180     if (iter.type() == relocInfo::poll_return_type)
181       return true;
182   }
183   return false;
184 }
185 
186 
187 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
188   RelocIterator iter(this, pc, pc+1);
189   while (iter.next()) {
190     relocInfo::relocType t = iter.type();
191     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
192       return true;
193   }
194   return false;
195 }
196 
197 void CompiledMethod::verify_oop_relocations() {
198   // Ensure sure that the code matches the current oop values
199   RelocIterator iter(this, NULL, NULL);
200   while (iter.next()) {
201     if (iter.type() == relocInfo::oop_type) {
202       oop_Relocation* reloc = iter.oop_reloc();
203       if (!reloc->oop_is_immediate()) {
204         reloc->verify_oop_relocation();
205       }
206     }
207   }
208 }
209 
210 
211 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
212   PcDesc* pd = pc_desc_at(pc);
213   guarantee(pd != NULL, "scope must be present");
214   return new ScopeDesc(this, pd->scope_decode_offset(),
215                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
216                        pd->return_oop());
217 }
218 
219 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
220   PcDesc* pd = pc_desc_near(pc);
221   guarantee(pd != NULL, "scope must be present");
222   return new ScopeDesc(this, pd->scope_decode_offset(),
223                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
224                        pd->return_oop());
225 }
226 
227 address CompiledMethod::oops_reloc_begin() const {
228   // If the method is not entrant or zombie then a JMP is plastered over the
229   // first few bytes.  Therefore, we do not allow an oop in the first
230   // NativeJump::instruction_size after the verified entry. Since the
231   // frame is being built in this path, that guarantees such oops can not
232   // exist until the frame is completed, which should be strictly later.
233   return MAX2(code_begin() + frame_complete_offset(),
234               verified_entry_point() + NativeJump::instruction_size);
235 }
236 
237 int CompiledMethod::verify_icholder_relocations() {
238   ResourceMark rm;
239   int count = 0;
240 
241   RelocIterator iter(this);
242   while(iter.next()) {
243     if (iter.type() == relocInfo::virtual_call_type) {
244       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
245         CompiledIC *ic = CompiledIC_at(&iter);
246         if (TraceCompiledIC) {
247           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
248           ic->print();
249         }
250         assert(ic->cached_icholder() != NULL, "must be non-NULL");
251         count++;
252       }
253     }
254   }
255 
256   return count;
257 }
258 
259 // Method that knows how to preserve outgoing arguments at call. This method must be
260 // called with a frame corresponding to a Java invoke
261 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
262   if (method() != NULL && !method()->is_native()) {
263     address pc = fr.pc();
264     SimpleScopeDesc ssd(this, pc);
265     Bytecode_invoke call(ssd.method(), ssd.bci());
266     bool has_receiver = call.has_receiver();
267     bool has_appendix = call.has_appendix();
268     Symbol* signature = call.signature();
269 
270     // The method attached by JIT-compilers should be used, if present.
271     // Bytecode can be inaccurate in such case.
272     Method* callee = attached_method_before_pc(pc);
273     if (callee != NULL) {
274       has_receiver = !(callee->access_flags().is_static());
275       has_appendix = false;
276       signature = callee->signature();
277     }
278 
279     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
280   }
281 }
282 
283 Method* CompiledMethod::attached_method(address call_instr) {
284   assert(code_contains(call_instr), "not part of the nmethod");
285   RelocIterator iter(this, call_instr, call_instr + 1);
286   while (iter.next()) {
287     if (iter.addr() == call_instr) {
288       switch(iter.type()) {
289         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
290         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
291         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
292         default:                               break;
293       }
294     }
295   }
296   return NULL; // not found
297 }
298 
299 Method* CompiledMethod::attached_method_before_pc(address pc) {
300   if (NativeCall::is_call_before(pc)) {
301     NativeCall* ncall = nativeCall_before(pc);
302     return attached_method(ncall->instruction_address());
303   }
304   return NULL; // not a call
305 }
306 
307 void CompiledMethod::clear_inline_caches() {
308   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
309   if (is_zombie()) {
310     return;
311   }
312 
313   RelocIterator iter(this);
314   while (iter.next()) {
315     iter.reloc()->clear_inline_cache();
316   }
317 }
318 
319 // Clear ICStubs of all compiled ICs
320 void CompiledMethod::clear_ic_stubs() {
321   assert_locked_or_safepoint(CompiledIC_lock);
322   ResourceMark rm;
323   RelocIterator iter(this);
324   while(iter.next()) {
325     if (iter.type() == relocInfo::virtual_call_type) {
326       CompiledIC* ic = CompiledIC_at(&iter);
327       ic->clear_ic_stub();
328     }
329   }
330 }
331 
332 #ifdef ASSERT
333 // Check class_loader is alive for this bit of metadata.
334 static void check_class(Metadata* md) {
335    Klass* klass = NULL;
336    if (md->is_klass()) {
337      klass = ((Klass*)md);
338    } else if (md->is_method()) {
339      klass = ((Method*)md)->method_holder();
340    } else if (md->is_methodData()) {
341      klass = ((MethodData*)md)->method()->method_holder();
342    } else {
343      md->print();
344      ShouldNotReachHere();
345    }
346    assert(klass->is_loader_alive(), "must be alive");
347 }
348 #endif // ASSERT
349 
350 
351 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
352   if (ic->is_icholder_call()) {
353     // The only exception is compiledICHolder metdata which may
354     // yet be marked below. (We check this further below).
355     CompiledICHolder* cichk_metdata = ic->cached_icholder();
356 
357     if (cichk_metdata->is_loader_alive()) {
358       return;
359     }
360   } else {
361     Metadata* ic_metdata = ic->cached_metadata();
362     if (ic_metdata != NULL) {
363       if (ic_metdata->is_klass()) {
364         if (((Klass*)ic_metdata)->is_loader_alive()) {
365           return;
366         }
367       } else if (ic_metdata->is_method()) {
368         Method* method = (Method*)ic_metdata;
369         assert(!method->is_old(), "old method should have been cleaned");
370         if (method->method_holder()->is_loader_alive()) {
371           return;
372         }
373       } else {
374         ShouldNotReachHere();
375       }
376     }
377   }
378 
379   ic->set_to_clean();
380 }
381 
382 unsigned char CompiledMethod::_global_unloading_clock = 0;
383 
384 void CompiledMethod::increase_unloading_clock() {
385   _global_unloading_clock++;
386   if (_global_unloading_clock == 0) {
387     // _nmethods are allocated with _unloading_clock == 0,
388     // so 0 is never used as a clock value.
389     _global_unloading_clock = 1;
390   }
391 }
392 
393 void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
394   OrderAccess::release_store(&_unloading_clock, unloading_clock);
395 }
396 
397 unsigned char CompiledMethod::unloading_clock() {
398   return OrderAccess::load_acquire(&_unloading_clock);
399 }
400 
401 
402 // static_stub_Relocations may have dangling references to
403 // nmethods so trim them out here.  Otherwise it looks like
404 // compiled code is maintaining a link to dead metadata.
405 void CompiledMethod::clean_ic_stubs() {
406 #ifdef ASSERT
407   address low_boundary = oops_reloc_begin();
408   RelocIterator iter(this, low_boundary);
409   while (iter.next()) {
410     address static_call_addr = NULL;
411     if (iter.type() == relocInfo::opt_virtual_call_type) {
412       CompiledIC* cic = CompiledIC_at(&iter);
413       if (!cic->is_call_to_interpreted()) {
414         static_call_addr = iter.addr();
415       }
416     } else if (iter.type() == relocInfo::static_call_type) {
417       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
418       if (!csc->is_call_to_interpreted()) {
419         static_call_addr = iter.addr();
420       }
421     }
422     if (static_call_addr != NULL) {
423       RelocIterator sciter(this, low_boundary);
424       while (sciter.next()) {
425         if (sciter.type() == relocInfo::static_stub_type &&
426             sciter.static_stub_reloc()->static_call() == static_call_addr) {
427           sciter.static_stub_reloc()->clear_inline_cache();
428         }
429       }
430     }
431   }
432 #endif
433 }
434 
435 // This is called at the end of the strong tracing/marking phase of a
436 // GC to unload an nmethod if it contains otherwise unreachable
437 // oops.
438 
439 void CompiledMethod::do_unloading(BoolObjectClosure* is_alive) {
440   // Make sure the oop's ready to receive visitors
441   assert(!is_zombie() && !is_unloaded(),
442          "should not call follow on zombie or unloaded nmethod");
443 
444   address low_boundary = oops_reloc_begin();
445 
446   if (do_unloading_oops(low_boundary, is_alive)) {
447     return;
448   }
449 
450 #if INCLUDE_JVMCI
451   if (do_unloading_jvmci()) {
452     return;
453   }
454 #endif
455 
456   // Cleanup exception cache and inline caches happens
457   // after all the unloaded methods are found.
458 }
459 
460 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
461 template <class CompiledICorStaticCall>
462 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
463                                          bool parallel, bool clean_all) {
464   // Ok, to lookup references to zombies here
465   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
466   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
467   if (nm != NULL) {
468     if (parallel && nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
469       // The nmethod has not been processed yet.
470       return true;
471     }
472 
473     // Clean inline caches pointing to both zombie and not_entrant methods
474     if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) {
475       ic->set_to_clean(from->is_alive());
476       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
477     }
478   }
479 
480   return false;
481 }
482 
483 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
484                                          bool parallel, bool clean_all = false) {
485   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, parallel, clean_all);
486 }
487 
488 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
489                                          bool parallel, bool clean_all = false) {
490   return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, parallel, clean_all);
491 }
492 
493 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
494   ResourceMark rm;
495 
496   // Make sure the oop's ready to receive visitors
497   assert(!is_zombie() && !is_unloaded(),
498          "should not call follow on zombie or unloaded nmethod");
499 
500   address low_boundary = oops_reloc_begin();
501 
502   if (do_unloading_oops(low_boundary, is_alive)) {
503     return false;
504   }
505 
506 #if INCLUDE_JVMCI
507   if (do_unloading_jvmci()) {
508     return false;
509   }
510 #endif
511 
512   return unload_nmethod_caches(/*parallel*/true, unloading_occurred);
513 }
514 
515 // Cleans caches in nmethods that point to either classes that are unloaded
516 // or nmethods that are unloaded.
517 //
518 // Can be called either in parallel by G1 currently or after all
519 // nmethods are unloaded.  Return postponed=true in the parallel case for
520 // inline caches found that point to nmethods that are not yet visited during
521 // the do_unloading walk.
522 bool CompiledMethod::unload_nmethod_caches(bool parallel, bool unloading_occurred) {
523 
524   // Exception cache only needs to be called if unloading occurred
525   if (unloading_occurred) {
526     clean_exception_cache();
527   }
528 
529   bool postponed = cleanup_inline_caches_impl(parallel, unloading_occurred, /*clean_all*/false);
530 
531   // All static stubs need to be cleaned.
532   clean_ic_stubs();
533 
534   // Check that the metadata embedded in the nmethod is alive
535   DEBUG_ONLY(metadata_do(check_class));
536 
537   return postponed;
538 }
539 
540 // Called to clean up after class unloading for live nmethods and from the sweeper
541 // for all methods.
542 bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) {
543   assert_locked_or_safepoint(CompiledIC_lock);
544   bool postponed = false;
545   ResourceMark rm;
546 
547   // Find all calls in an nmethod and clear the ones that point to non-entrant,
548   // zombie and unloaded nmethods.
549   RelocIterator iter(this, oops_reloc_begin());
550   while(iter.next()) {
551 
552     switch (iter.type()) {
553 
554     case relocInfo::virtual_call_type:
555       if (unloading_occurred) {
556         // If class unloading occurred we first clear ICs where the cached metadata
557         // is referring to an unloaded klass or method.
558         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
559       }
560 
561       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
562       break;
563 
564     case relocInfo::opt_virtual_call_type:
565       postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
566       break;
567 
568     case relocInfo::static_call_type:
569       postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, parallel, clean_all);
570       break;
571 
572     case relocInfo::oop_type:
573       // handled by do_unloading_oops already
574       break;
575 
576     case relocInfo::metadata_type:
577       break; // nothing to do.
578 
579     default:
580       break;
581     }
582   }
583 
584   return postponed;
585 }
586 
587 void CompiledMethod::do_unloading_parallel_postponed() {
588   ResourceMark rm;
589 
590   // Make sure the oop's ready to receive visitors
591   assert(!is_zombie(),
592          "should not call follow on zombie nmethod");
593 
594   RelocIterator iter(this, oops_reloc_begin());
595   while(iter.next()) {
596 
597     switch (iter.type()) {
598 
599     case relocInfo::virtual_call_type:
600       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
601       break;
602 
603     case relocInfo::opt_virtual_call_type:
604       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
605       break;
606 
607     case relocInfo::static_call_type:
608       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, true);
609       break;
610 
611     default:
612       break;
613     }
614   }
615 }
616 
617 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
618 // to not be inherently safe. There is a chance that fields are seen which are not properly
619 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
620 // to be held.
621 // To bundle knowledge about necessary checks in one place, this function was introduced.
622 // It is not claimed that these checks are sufficient, but they were found to be necessary.
623 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) {
624   Method* method = (nm == NULL) ? NULL : nm->method();  // nm->method() may be uninitialized, i.e. != NULL, but invalid
625   return (nm != NULL) && (method != NULL) && (method->signature() != NULL) &&
626          !nm->is_zombie() && !nm->is_not_installed() &&
627          os::is_readable_pointer(method) &&
628          os::is_readable_pointer(method->constants()) &&
629          os::is_readable_pointer(method->signature());
630 }