1 /*
  2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/compiledIC.hpp"
 27 #include "code/compiledMethod.inline.hpp"
 28 #include "code/scopeDesc.hpp"
 29 #include "code/codeCache.hpp"
 30 #include "interpreter/bytecode.inline.hpp"
 31 #include "logging/log.hpp"
 32 #include "logging/logTag.hpp"
 33 #include "memory/resourceArea.hpp"
 34 #include "oops/methodData.hpp"
 35 #include "oops/method.inline.hpp"
 36 #include "prims/methodHandles.hpp"
 37 #include "runtime/handles.inline.hpp"
 38 #include "runtime/mutexLocker.hpp"
 39 
 40 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
 41                                int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
 42                                bool caller_must_gc_arguments)
 43   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 44     _mark_for_deoptimization_status(not_marked),
 45     _is_unloading_state(0),
 46     _method(method)
 47 {
 48   init_defaults();
 49 }
 50 
 51 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,
 52                                int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,
 53                                OopMapSet* oop_maps, bool caller_must_gc_arguments)
 54   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
 55              frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 56     _mark_for_deoptimization_status(not_marked),
 57     _is_unloading_state(0),
 58     _method(method)
 59 {
 60   init_defaults();
 61 }
 62 
 63 void CompiledMethod::init_defaults() {
 64   _has_unsafe_access          = 0;
 65   _has_method_handle_invokes  = 0;
 66   _lazy_critical_native       = 0;
 67   _has_wide_vectors           = 0;
 68 }
 69 
 70 bool CompiledMethod::is_method_handle_return(address return_pc) {
 71   if (!has_method_handle_invokes())  return false;
 72   PcDesc* pd = pc_desc_at(return_pc);
 73   if (pd == NULL)
 74     return false;
 75   return pd->is_method_handle_invoke();
 76 }
 77 
 78 // Returns a string version of the method state.
 79 const char* CompiledMethod::state() const {
 80   int state = get_state();
 81   switch (state) {
 82   case not_installed:
 83     return "not installed";
 84   case in_use:
 85     return "in use";
 86   case not_used:
 87     return "not_used";
 88   case not_entrant:
 89     return "not_entrant";
 90   case zombie:
 91     return "zombie";
 92   case unloaded:
 93     return "unloaded";
 94   default:
 95     fatal("unexpected method state: %d", state);
 96     return NULL;
 97   }
 98 }
 99 
100 //-----------------------------------------------------------------------------
101 
102 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
103   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
104   assert(new_entry != NULL,"Must be non null");
105   assert(new_entry->next() == NULL, "Must be null");
106 
107   ExceptionCache *ec = exception_cache();
108   if (ec != NULL) {
109     new_entry->set_next(ec);
110   }
111   release_set_exception_cache(new_entry);
112 }
113 
114 void CompiledMethod::clean_exception_cache() {
115   ExceptionCache* prev = NULL;
116   ExceptionCache* curr = exception_cache();
117 
118   while (curr != NULL) {
119     ExceptionCache* next = curr->next();
120 
121     Klass* ex_klass = curr->exception_type();
122     if (ex_klass != NULL && !ex_klass->is_loader_alive()) {
123       if (prev == NULL) {
124         set_exception_cache(next);
125       } else {
126         prev->set_next(next);
127       }
128       delete curr;
129       // prev stays the same.
130     } else {
131       prev = curr;
132     }
133 
134     curr = next;
135   }
136 }
137 
138 // public method for accessing the exception cache
139 // These are the public access methods.
140 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
141   // We never grab a lock to read the exception cache, so we may
142   // have false negatives. This is okay, as it can only happen during
143   // the first few exception lookups for a given nmethod.
144   ExceptionCache* ec = exception_cache();
145   while (ec != NULL) {
146     address ret_val;
147     if ((ret_val = ec->match(exception,pc)) != NULL) {
148       return ret_val;
149     }
150     ec = ec->next();
151   }
152   return NULL;
153 }
154 
155 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
156   // There are potential race conditions during exception cache updates, so we
157   // must own the ExceptionCache_lock before doing ANY modifications. Because
158   // we don't lock during reads, it is possible to have several threads attempt
159   // to update the cache with the same data. We need to check for already inserted
160   // copies of the current data before adding it.
161 
162   MutexLocker ml(ExceptionCache_lock);
163   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
164 
165   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
166     target_entry = new ExceptionCache(exception,pc,handler);
167     add_exception_cache_entry(target_entry);
168   }
169 }
170 
171 //-------------end of code for ExceptionCache--------------
172 
173 // private method for handling exception cache
174 // These methods are private, and used to manipulate the exception cache
175 // directly.
176 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
177   ExceptionCache* ec = exception_cache();
178   while (ec != NULL) {
179     if (ec->match_exception_with_space(exception)) {
180       return ec;
181     }
182     ec = ec->next();
183   }
184   return NULL;
185 }
186 
187 bool CompiledMethod::is_at_poll_return(address pc) {
188   RelocIterator iter(this, pc, pc+1);
189   while (iter.next()) {
190     if (iter.type() == relocInfo::poll_return_type)
191       return true;
192   }
193   return false;
194 }
195 
196 
197 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
198   RelocIterator iter(this, pc, pc+1);
199   while (iter.next()) {
200     relocInfo::relocType t = iter.type();
201     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
202       return true;
203   }
204   return false;
205 }
206 
207 void CompiledMethod::verify_oop_relocations() {
208   // Ensure sure that the code matches the current oop values
209   RelocIterator iter(this, NULL, NULL);
210   while (iter.next()) {
211     if (iter.type() == relocInfo::oop_type) {
212       oop_Relocation* reloc = iter.oop_reloc();
213       if (!reloc->oop_is_immediate()) {
214         reloc->verify_oop_relocation();
215       }
216     }
217   }
218 }
219 
220 
221 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
222   PcDesc* pd = pc_desc_at(pc);
223   guarantee(pd != NULL, "scope must be present");
224   return new ScopeDesc(this, pd->scope_decode_offset(),
225                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
226                        pd->return_oop());
227 }
228 
229 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
230   PcDesc* pd = pc_desc_near(pc);
231   guarantee(pd != NULL, "scope must be present");
232   return new ScopeDesc(this, pd->scope_decode_offset(),
233                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
234                        pd->return_oop());
235 }
236 
237 address CompiledMethod::oops_reloc_begin() const {
238   // If the method is not entrant or zombie then a JMP is plastered over the
239   // first few bytes.  If an oop in the old code was there, that oop
240   // should not get GC'd.  Skip the first few bytes of oops on
241   // not-entrant methods.
242   address low_boundary = verified_entry_point();
243   if (!is_in_use() && is_nmethod()) {
244     low_boundary += NativeJump::instruction_size;
245     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
246     // This means that the low_boundary is going to be a little too high.
247     // This shouldn't matter, since oops of non-entrant methods are never used.
248     // In fact, why are we bothering to look at oops in a non-entrant method??
249   }
250   return low_boundary;
251 }
252 
253 int CompiledMethod::verify_icholder_relocations() {
254   ResourceMark rm;
255   int count = 0;
256 
257   RelocIterator iter(this);
258   while(iter.next()) {
259     if (iter.type() == relocInfo::virtual_call_type) {
260       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
261         CompiledIC *ic = CompiledIC_at(&iter);
262         if (TraceCompiledIC) {
263           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
264           ic->print();
265         }
266         assert(ic->cached_icholder() != NULL, "must be non-NULL");
267         count++;
268       }
269     }
270   }
271 
272   return count;
273 }
274 
275 // Method that knows how to preserve outgoing arguments at call. This method must be
276 // called with a frame corresponding to a Java invoke
277 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
278   if (method() != NULL && !method()->is_native()) {
279     address pc = fr.pc();
280     SimpleScopeDesc ssd(this, pc);
281     Bytecode_invoke call(ssd.method(), ssd.bci());
282     bool has_receiver = call.has_receiver();
283     bool has_appendix = call.has_appendix();
284     Symbol* signature = call.signature();
285 
286     // The method attached by JIT-compilers should be used, if present.
287     // Bytecode can be inaccurate in such case.
288     Method* callee = attached_method_before_pc(pc);
289     if (callee != NULL) {
290       has_receiver = !(callee->access_flags().is_static());
291       has_appendix = false;
292       signature = callee->signature();
293     }
294 
295     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
296   }
297 }
298 
299 Method* CompiledMethod::attached_method(address call_instr) {
300   assert(code_contains(call_instr), "not part of the nmethod");
301   RelocIterator iter(this, call_instr, call_instr + 1);
302   while (iter.next()) {
303     if (iter.addr() == call_instr) {
304       switch(iter.type()) {
305         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
306         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
307         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
308         default:                               break;
309       }
310     }
311   }
312   return NULL; // not found
313 }
314 
315 Method* CompiledMethod::attached_method_before_pc(address pc) {
316   if (NativeCall::is_call_before(pc)) {
317     NativeCall* ncall = nativeCall_before(pc);
318     return attached_method(ncall->instruction_address());
319   }
320   return NULL; // not a call
321 }
322 
323 void CompiledMethod::clear_inline_caches() {
324   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
325   if (is_zombie()) {
326     return;
327   }
328 
329   RelocIterator iter(this);
330   while (iter.next()) {
331     iter.reloc()->clear_inline_cache();
332   }
333 }
334 
335 // Clear ICStubs of all compiled ICs
336 void CompiledMethod::clear_ic_stubs() {
337   assert_locked_or_safepoint(CompiledIC_lock);
338   ResourceMark rm;
339   RelocIterator iter(this);
340   while(iter.next()) {
341     if (iter.type() == relocInfo::virtual_call_type) {
342       CompiledIC* ic = CompiledIC_at(&iter);
343       ic->clear_ic_stub();
344     }
345   }
346 }
347 
348 #ifdef ASSERT
349 // Check class_loader is alive for this bit of metadata.
350 static void check_class(Metadata* md) {
351    Klass* klass = NULL;
352    if (md->is_klass()) {
353      klass = ((Klass*)md);
354    } else if (md->is_method()) {
355      klass = ((Method*)md)->method_holder();
356    } else if (md->is_methodData()) {
357      klass = ((MethodData*)md)->method()->method_holder();
358    } else {
359      md->print();
360      ShouldNotReachHere();
361    }
362    assert(klass->is_loader_alive(), "must be alive");
363 }
364 #endif // ASSERT
365 
366 
367 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
368   if (ic->is_icholder_call()) {
369     // The only exception is compiledICHolder metdata which may
370     // yet be marked below. (We check this further below).
371     CompiledICHolder* cichk_metdata = ic->cached_icholder();
372 
373     if (cichk_metdata->is_loader_alive()) {
374       return;
375     }
376   } else {
377     Metadata* ic_metdata = ic->cached_metadata();
378     if (ic_metdata != NULL) {
379       if (ic_metdata->is_klass()) {
380         if (((Klass*)ic_metdata)->is_loader_alive()) {
381           return;
382         }
383       } else if (ic_metdata->is_method()) {
384         Method* method = (Method*)ic_metdata;
385         assert(!method->is_old(), "old method should have been cleaned");
386         if (method->method_holder()->is_loader_alive()) {
387           return;
388         }
389       } else {
390         ShouldNotReachHere();
391       }
392     }
393   }
394 
395   ic->set_to_clean();
396 }
397 
398 // static_stub_Relocations may have dangling references to
399 // nmethods so trim them out here.  Otherwise it looks like
400 // compiled code is maintaining a link to dead metadata.
401 void CompiledMethod::clean_ic_stubs() {
402 #ifdef ASSERT
403   address low_boundary = oops_reloc_begin();
404   RelocIterator iter(this, low_boundary);
405   while (iter.next()) {
406     address static_call_addr = NULL;
407     if (iter.type() == relocInfo::opt_virtual_call_type) {
408       CompiledIC* cic = CompiledIC_at(&iter);
409       if (!cic->is_call_to_interpreted()) {
410         static_call_addr = iter.addr();
411       }
412     } else if (iter.type() == relocInfo::static_call_type) {
413       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
414       if (!csc->is_call_to_interpreted()) {
415         static_call_addr = iter.addr();
416       }
417     }
418     if (static_call_addr != NULL) {
419       RelocIterator sciter(this, low_boundary);
420       while (sciter.next()) {
421         if (sciter.type() == relocInfo::static_stub_type &&
422             sciter.static_stub_reloc()->static_call() == static_call_addr) {
423           sciter.static_stub_reloc()->clear_inline_cache();
424         }
425       }
426     }
427   }
428 #endif
429 }
430 
431 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
432 template <class CompiledICorStaticCall>
433 static void clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
434                                          bool clean_all) {
435   // Ok, to lookup references to zombies here
436   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
437   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
438   if (nm != NULL) {
439     // Clean inline caches pointing to both zombie and not_entrant methods
440     if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
441       ic->set_to_clean(from->is_alive());
442       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
443     }
444   }
445 }
446 
447 static void clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
448                                          bool clean_all) {
449   clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
450 }
451 
452 static void clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
453                                          bool clean_all) {
454   clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
455 }
456 
457 // Cleans caches in nmethods that point to either classes that are unloaded
458 // or nmethods that are unloaded.
459 //
460 // Can be called either in parallel by G1 currently or after all
461 // nmethods are unloaded.  Return postponed=true in the parallel case for
462 // inline caches found that point to nmethods that are not yet visited during
463 // the do_unloading walk.
464 void CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
465   ResourceMark rm;
466 
467   // Exception cache only needs to be called if unloading occurred
468   if (unloading_occurred) {
469     clean_exception_cache();
470   }
471 
472   cleanup_inline_caches_impl(unloading_occurred, false);
473 
474   // All static stubs need to be cleaned.
475   clean_ic_stubs();
476 
477   // Check that the metadata embedded in the nmethod is alive
478   DEBUG_ONLY(metadata_do(check_class));
479 }
480 
481 void CompiledMethod::clear_unloading_state() {
482   NMethodIsUnloadingUnion state;
483   state._inflated._unloading_cycle = CodeCache::unloading_cycle();
484   state._inflated._is_unloading = 0;
485   RawAccess<MO_RELAXED>::store(&_is_unloading_state, state._value);
486 }
487 
488 // Called to clean up after class unloading for live nmethods and from the sweeper
489 // for all methods.
490 void CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
491 #ifdef ASSERT
492   if (is_nmethod()) {
493     nmethod* nm = (nmethod*)this;
494     assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint() ||
495            nm->is_safe_for_ic_patching(), "mt unsafe call");
496   } else {
497     assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
498   }
499 #endif
500   ResourceMark rm;
501 
502   // Find all calls in an nmethod and clear the ones that point to non-entrant,
503   // zombie and unloaded nmethods.
504   RelocIterator iter(this, oops_reloc_begin());
505   while(iter.next()) {
506 
507     switch (iter.type()) {
508 
509     case relocInfo::virtual_call_type:
510       if (unloading_occurred) {
511         // If class unloading occurred we first clear ICs where the cached metadata
512         // is referring to an unloaded klass or method.
513         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
514       }
515 
516       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
517       break;
518 
519     case relocInfo::opt_virtual_call_type:
520       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
521       break;
522 
523     case relocInfo::static_call_type:
524       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all);
525       break;
526 
527     case relocInfo::oop_type:
528       // handled by do_unloading_oops already
529       break;
530 
531     case relocInfo::metadata_type:
532       break; // nothing to do.
533 
534     default:
535       break;
536     }
537   }
538 }