1 /*
  2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/compiledIC.hpp"
 27 #include "code/compiledMethod.inline.hpp"
 28 #include "code/scopeDesc.hpp"
 29 #include "code/codeCache.hpp"
 30 #include "gc/shared/barrierSet.hpp"
 31 #include "gc/shared/gcBehaviours.hpp"
 32 #include "interpreter/bytecode.inline.hpp"
 33 #include "logging/log.hpp"
 34 #include "logging/logTag.hpp"
 35 #include "memory/resourceArea.hpp"
 36 #include "oops/methodData.hpp"
 37 #include "oops/method.inline.hpp"
 38 #include "prims/methodHandles.hpp"
 39 #include "runtime/handles.inline.hpp"
 40 #include "runtime/mutexLocker.hpp"
 41 
 42 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
 43                                int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
 44                                bool caller_must_gc_arguments)
 45   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 46     _mark_for_deoptimization_status(not_marked),
 47     _method(method),
 48     _gc_data(NULL)
 49 {
 50   init_defaults();
 51 }
 52 
 53 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,
 54                                int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,
 55                                OopMapSet* oop_maps, bool caller_must_gc_arguments)
 56   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
 57              frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 58     _mark_for_deoptimization_status(not_marked),
 59     _method(method),
 60     _gc_data(NULL)
 61 {
 62   init_defaults();
 63 }
 64 
 65 void CompiledMethod::init_defaults() {
 66   _has_unsafe_access          = 0;
 67   _has_method_handle_invokes  = 0;
 68   _lazy_critical_native       = 0;
 69   _has_wide_vectors           = 0;
 70 }
 71 
 72 bool CompiledMethod::is_method_handle_return(address return_pc) {
 73   if (!has_method_handle_invokes())  return false;
 74   PcDesc* pd = pc_desc_at(return_pc);
 75   if (pd == NULL)
 76     return false;
 77   return pd->is_method_handle_invoke();
 78 }
 79 
 80 // Returns a string version of the method state.
 81 const char* CompiledMethod::state() const {
 82   int state = get_state();
 83   switch (state) {
 84   case not_installed:
 85     return "not installed";
 86   case in_use:
 87     return "in use";
 88   case not_used:
 89     return "not_used";
 90   case not_entrant:
 91     return "not_entrant";
 92   case zombie:
 93     return "zombie";
 94   case unloaded:
 95     return "unloaded";
 96   default:
 97     fatal("unexpected method state: %d", state);
 98     return NULL;
 99   }
100 }
101 
102 //-----------------------------------------------------------------------------
103 
104 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
105   return OrderAccess::load_acquire(&_exception_cache);
106 }
107 
108 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
109   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
110   assert(new_entry != NULL,"Must be non null");
111   assert(new_entry->next() == NULL, "Must be null");
112 
113   for (;;) {
114     ExceptionCache *ec = exception_cache();
115     if (ec != NULL) {
116       Klass* ex_klass = ec->exception_type();
117       if (!ex_klass->is_loader_alive()) {
118         // We must guarantee that entries are not inserted with new next pointer
119         // edges to ExceptionCache entries with dead klasses, due to bad interactions
120         // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
121         // the head pointer forward to the first live ExceptionCache, so that the new
122         // next pointers always point at live ExceptionCaches, that are not removed due
123         // to concurrent ExceptionCache cleanup.
124         ExceptionCache* next = ec->next();
125         if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
126           CodeCache::release_exception_cache(ec);
127         }
128         continue;
129       }
130       ec = exception_cache();
131       if (ec != NULL) {
132         new_entry->set_next(ec);
133       }
134     }
135     if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
136       return;
137     }
138   }
139 }
140 
141 void CompiledMethod::clean_exception_cache() {
142   // For each nmethod, only a single thread may call this cleanup function
143   // at the same time, whether called in STW cleanup or concurrent cleanup.
144   // Note that if the GC is processing exception cache cleaning in a concurrent phase,
145   // then a single writer may contend with cleaning up the head pointer to the
146   // first ExceptionCache node that has a Klass* that is alive. That is fine,
147   // as long as there is no concurrent cleanup of next pointers from concurrent writers.
148   // And the concurrent writers do not clean up next pointers, only the head.
149   // Also note that concurent readers will walk through Klass* pointers that are not
150   // alive. That does not cause ABA problems, because Klass* is deleted after
151   // a handshake with all threads, after all stale ExceptionCaches have been
152   // unlinked. That is also when the CodeCache::exception_cache_purge_list()
153   // is deleted, with all ExceptionCache entries that were cleaned concurrently.
154   // That similarly implies that CAS operations on ExceptionCache entries do not
155   // suffer from ABA problems as unlinking and deletion is separated by a global
156   // handshake operation.
157   ExceptionCache* prev = NULL;
158   ExceptionCache* curr = exception_cache_acquire();
159 
160   while (curr != NULL) {
161     ExceptionCache* next = curr->next();
162 
163     if (!curr->exception_type()->is_loader_alive()) {
164       if (prev == NULL) {
165         // Try to clean head; this is contended by concurrent inserts, that
166         // both lazily clean the head, and insert entries at the head. If
167         // the CAS fails, the operation is restarted.
168         if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
169           prev = NULL;
170           curr = exception_cache_acquire();
171           continue;
172         }
173       } else {
174         // It is impossible to during cleanup connect the next pointer to
175         // an ExceptionCache that has not been published before a safepoint
176         // prior to the cleanup. Therefore, release is not required.
177         prev->set_next(next);
178       }
179       // prev stays the same.
180 
181       CodeCache::release_exception_cache(curr);
182     } else {
183       prev = curr;
184     }
185 
186     curr = next;
187   }
188 }
189 
190 // public method for accessing the exception cache
191 // These are the public access methods.
192 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
193   // We never grab a lock to read the exception cache, so we may
194   // have false negatives. This is okay, as it can only happen during
195   // the first few exception lookups for a given nmethod.
196   ExceptionCache* ec = exception_cache_acquire();
197   while (ec != NULL) {
198     address ret_val;
199     if ((ret_val = ec->match(exception,pc)) != NULL) {
200       return ret_val;
201     }
202     ec = ec->next();
203   }
204   return NULL;
205 }
206 
207 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
208   // There are potential race conditions during exception cache updates, so we
209   // must own the ExceptionCache_lock before doing ANY modifications. Because
210   // we don't lock during reads, it is possible to have several threads attempt
211   // to update the cache with the same data. We need to check for already inserted
212   // copies of the current data before adding it.
213 
214   MutexLocker ml(ExceptionCache_lock);
215   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
216 
217   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
218     target_entry = new ExceptionCache(exception,pc,handler);
219     add_exception_cache_entry(target_entry);
220   }
221 }
222 
223 // private method for handling exception cache
224 // These methods are private, and used to manipulate the exception cache
225 // directly.
226 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
227   ExceptionCache* ec = exception_cache_acquire();
228   while (ec != NULL) {
229     if (ec->match_exception_with_space(exception)) {
230       return ec;
231     }
232     ec = ec->next();
233   }
234   return NULL;
235 }
236 
237 //-------------end of code for ExceptionCache--------------
238 
239 bool CompiledMethod::is_at_poll_return(address pc) {
240   RelocIterator iter(this, pc, pc+1);
241   while (iter.next()) {
242     if (iter.type() == relocInfo::poll_return_type)
243       return true;
244   }
245   return false;
246 }
247 
248 
249 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
250   RelocIterator iter(this, pc, pc+1);
251   while (iter.next()) {
252     relocInfo::relocType t = iter.type();
253     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
254       return true;
255   }
256   return false;
257 }
258 
259 void CompiledMethod::verify_oop_relocations() {
260   // Ensure sure that the code matches the current oop values
261   RelocIterator iter(this, NULL, NULL);
262   while (iter.next()) {
263     if (iter.type() == relocInfo::oop_type) {
264       oop_Relocation* reloc = iter.oop_reloc();
265       if (!reloc->oop_is_immediate()) {
266         reloc->verify_oop_relocation();
267       }
268     }
269   }
270 }
271 
272 
273 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
274   PcDesc* pd = pc_desc_at(pc);
275   guarantee(pd != NULL, "scope must be present");
276   return new ScopeDesc(this, pd->scope_decode_offset(),
277                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
278                        pd->return_oop());
279 }
280 
281 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
282   PcDesc* pd = pc_desc_near(pc);
283   guarantee(pd != NULL, "scope must be present");
284   return new ScopeDesc(this, pd->scope_decode_offset(),
285                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
286                        pd->return_oop());
287 }
288 
289 address CompiledMethod::oops_reloc_begin() const {
290   // If the method is not entrant or zombie then a JMP is plastered over the
291   // first few bytes.  If an oop in the old code was there, that oop
292   // should not get GC'd.  Skip the first few bytes of oops on
293   // not-entrant methods.
294   if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
295       code_begin() + frame_complete_offset() >
296       verified_entry_point() + NativeJump::instruction_size)
297   {
298     // If we have a frame_complete_offset after the native jump, then there
299     // is no point trying to look for oops before that. This is a requirement
300     // for being allowed to scan oops concurrently.
301     return code_begin() + frame_complete_offset();
302   }
303 
304   // It is not safe to read oops concurrently using entry barriers, if their
305   // location depend on whether the nmethod is entrant or not.
306   assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan");
307 
308   address low_boundary = verified_entry_point();
309   if (!is_in_use() && is_nmethod()) {
310     low_boundary += NativeJump::instruction_size;
311     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
312     // This means that the low_boundary is going to be a little too high.
313     // This shouldn't matter, since oops of non-entrant methods are never used.
314     // In fact, why are we bothering to look at oops in a non-entrant method??
315   }
316   return low_boundary;
317 }
318 
319 int CompiledMethod::verify_icholder_relocations() {
320   ResourceMark rm;
321   int count = 0;
322 
323   RelocIterator iter(this);
324   while(iter.next()) {
325     if (iter.type() == relocInfo::virtual_call_type) {
326       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
327         CompiledIC *ic = CompiledIC_at(&iter);
328         if (TraceCompiledIC) {
329           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
330           ic->print();
331         }
332         assert(ic->cached_icholder() != NULL, "must be non-NULL");
333         count++;
334       }
335     }
336   }
337 
338   return count;
339 }
340 
341 // Method that knows how to preserve outgoing arguments at call. This method must be
342 // called with a frame corresponding to a Java invoke
343 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
344   if (method() != NULL && !method()->is_native()) {
345     address pc = fr.pc();
346     SimpleScopeDesc ssd(this, pc);
347     Bytecode_invoke call(ssd.method(), ssd.bci());
348     bool has_receiver = call.has_receiver();
349     bool has_appendix = call.has_appendix();
350     Symbol* signature = call.signature();
351 
352     // The method attached by JIT-compilers should be used, if present.
353     // Bytecode can be inaccurate in such case.
354     Method* callee = attached_method_before_pc(pc);
355     if (callee != NULL) {
356       has_receiver = !(callee->access_flags().is_static());
357       has_appendix = false;
358       signature = callee->signature();
359     }
360 
361     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
362   }
363 }
364 
365 Method* CompiledMethod::attached_method(address call_instr) {
366   assert(code_contains(call_instr), "not part of the nmethod");
367   RelocIterator iter(this, call_instr, call_instr + 1);
368   while (iter.next()) {
369     if (iter.addr() == call_instr) {
370       switch(iter.type()) {
371         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
372         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
373         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
374         default:                               break;
375       }
376     }
377   }
378   return NULL; // not found
379 }
380 
381 Method* CompiledMethod::attached_method_before_pc(address pc) {
382   if (NativeCall::is_call_before(pc)) {
383     NativeCall* ncall = nativeCall_before(pc);
384     return attached_method(ncall->instruction_address());
385   }
386   return NULL; // not a call
387 }
388 
389 void CompiledMethod::clear_inline_caches() {
390   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
391   if (is_zombie()) {
392     return;
393   }
394 
395   RelocIterator iter(this);
396   while (iter.next()) {
397     iter.reloc()->clear_inline_cache();
398   }
399 }
400 
401 // Clear ICStubs of all compiled ICs
402 void CompiledMethod::clear_ic_stubs() {
403   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
404   ResourceMark rm;
405   RelocIterator iter(this);
406   while(iter.next()) {
407     if (iter.type() == relocInfo::virtual_call_type) {
408       CompiledIC* ic = CompiledIC_at(&iter);
409       ic->clear_ic_stub();
410     }
411   }
412 }
413 
414 #ifdef ASSERT
415 // Check class_loader is alive for this bit of metadata.
416 static void check_class(Metadata* md) {
417    Klass* klass = NULL;
418    if (md->is_klass()) {
419      klass = ((Klass*)md);
420    } else if (md->is_method()) {
421      klass = ((Method*)md)->method_holder();
422    } else if (md->is_methodData()) {
423      klass = ((MethodData*)md)->method()->method_holder();
424    } else {
425      md->print();
426      ShouldNotReachHere();
427    }
428    assert(klass->is_loader_alive(), "must be alive");
429 }
430 #endif // ASSERT
431 
432 
433 void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
434   if (ic->is_icholder_call()) {
435     // The only exception is compiledICHolder metdata which may
436     // yet be marked below. (We check this further below).
437     CompiledICHolder* cichk_metdata = ic->cached_icholder();
438 
439     if (cichk_metdata->is_loader_alive()) {
440       return;
441     }
442   } else {
443     Metadata* ic_metdata = ic->cached_metadata();
444     if (ic_metdata != NULL) {
445       if (ic_metdata->is_klass()) {
446         if (((Klass*)ic_metdata)->is_loader_alive()) {
447           return;
448         }
449       } else if (ic_metdata->is_method()) {
450         Method* method = (Method*)ic_metdata;
451         assert(!method->is_old(), "old method should have been cleaned");
452         if (method->method_holder()->is_loader_alive()) {
453           return;
454         }
455       } else {
456         ShouldNotReachHere();
457       }
458     }
459   }
460 
461   ic->set_to_clean();
462 }
463 
464 // static_stub_Relocations may have dangling references to
465 // nmethods so trim them out here.  Otherwise it looks like
466 // compiled code is maintaining a link to dead metadata.
467 void CompiledMethod::clean_ic_stubs() {
468 #ifdef ASSERT
469   address low_boundary = oops_reloc_begin();
470   RelocIterator iter(this, low_boundary);
471   while (iter.next()) {
472     address static_call_addr = NULL;
473     if (iter.type() == relocInfo::opt_virtual_call_type) {
474       CompiledIC* cic = CompiledIC_at(&iter);
475       if (!cic->is_call_to_interpreted()) {
476         static_call_addr = iter.addr();
477       }
478     } else if (iter.type() == relocInfo::static_call_type) {
479       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
480       if (!csc->is_call_to_interpreted()) {
481         static_call_addr = iter.addr();
482       }
483     }
484     if (static_call_addr != NULL) {
485       RelocIterator sciter(this, low_boundary);
486       while (sciter.next()) {
487         if (sciter.type() == relocInfo::static_stub_type &&
488             sciter.static_stub_reloc()->static_call() == static_call_addr) {
489           sciter.static_stub_reloc()->clear_inline_cache();
490         }
491       }
492     }
493   }
494 #endif
495 }
496 
497 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
498 template <class CompiledICorStaticCall>
499 static void clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
500                                          bool clean_all) {
501   // Ok, to lookup references to zombies here
502   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
503   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
504   if (nm != NULL) {
505     // Clean inline caches pointing to both zombie and not_entrant methods
506     if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
507       ic->set_to_clean(from->is_alive());
508       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
509     }
510   }
511 }
512 
513 static void clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
514                                          bool clean_all) {
515   clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
516 }
517 
518 static void clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
519                                          bool clean_all) {
520   clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
521 }
522 
523 // Cleans caches in nmethods that point to either classes that are unloaded
524 // or nmethods that are unloaded.
525 //
526 // Can be called either in parallel by G1 currently or after all
527 // nmethods are unloaded.  Return postponed=true in the parallel case for
528 // inline caches found that point to nmethods that are not yet visited during
529 // the do_unloading walk.
530 void CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
531   ResourceMark rm;
532 
533   // Exception cache only needs to be called if unloading occurred
534   if (unloading_occurred) {
535     clean_exception_cache();
536   }
537 
538   cleanup_inline_caches_impl(unloading_occurred, false);
539 
540   // All static stubs need to be cleaned.
541   clean_ic_stubs();
542 
543   // Check that the metadata embedded in the nmethod is alive
544   DEBUG_ONLY(metadata_do(check_class));
545 }
546 
547 // Called to clean up after class unloading for live nmethods and from the sweeper
548 // for all methods.
549 void CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
550   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
551   ResourceMark rm;
552 
553   // Find all calls in an nmethod and clear the ones that point to non-entrant,
554   // zombie and unloaded nmethods.
555   RelocIterator iter(this, oops_reloc_begin());
556   while(iter.next()) {
557 
558     switch (iter.type()) {
559 
560     case relocInfo::virtual_call_type:
561       if (unloading_occurred) {
562         // If class unloading occurred we first clear ICs where the cached metadata
563         // is referring to an unloaded klass or method.
564         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
565       }
566 
567       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
568       break;
569 
570     case relocInfo::opt_virtual_call_type:
571       clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all);
572       break;
573 
574     case relocInfo::static_call_type:
575       clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all);
576       break;
577 
578     case relocInfo::oop_type:
579       break;
580 
581     case relocInfo::metadata_type:
582       break; // nothing to do.
583 
584     default:
585       break;
586     }
587   }
588 }
589 
590 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
591 // to not be inherently safe. There is a chance that fields are seen which are not properly
592 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
593 // to be held.
594 // To bundle knowledge about necessary checks in one place, this function was introduced.
595 // It is not claimed that these checks are sufficient, but they were found to be necessary.
596 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) {
597   Method* method = (nm == NULL) ? NULL : nm->method();  // nm->method() may be uninitialized, i.e. != NULL, but invalid
598   return (nm != NULL) && (method != NULL) && (method->signature() != NULL) &&
599          !nm->is_zombie() && !nm->is_not_installed() &&
600          os::is_readable_pointer(method) &&
601          os::is_readable_pointer(method->constants()) &&
602          os::is_readable_pointer(method->signature());
603 }