1 /*
  2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/compiledIC.hpp"
 27 #include "code/compiledMethod.inline.hpp"
 28 #include "code/scopeDesc.hpp"
 29 #include "code/codeCache.hpp"
 30 #include "code/icBuffer.hpp"
 31 #include "gc/shared/barrierSet.hpp"
 32 #include "gc/shared/gcBehaviours.hpp"
 33 #include "interpreter/bytecode.inline.hpp"
 34 #include "logging/log.hpp"
 35 #include "logging/logTag.hpp"
 36 #include "memory/resourceArea.hpp"
 37 #include "oops/methodData.hpp"
 38 #include "oops/method.inline.hpp"
 39 #include "prims/methodHandles.hpp"
 40 #include "runtime/handles.inline.hpp"
 41 #include "runtime/mutexLocker.hpp"
 42 
 43 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout,
 44                                int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps,
 45                                bool caller_must_gc_arguments)
 46   : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 47     _mark_for_deoptimization_status(not_marked),
 48     _method(method),
 49     _gc_data(NULL)
 50 {
 51   init_defaults();
 52 }
 53 
 54 CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size,
 55                                int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size,
 56                                OopMapSet* oop_maps, bool caller_must_gc_arguments)
 57   : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb,
 58              frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
 59     _mark_for_deoptimization_status(not_marked),
 60     _method(method),
 61     _gc_data(NULL)
 62 {
 63   init_defaults();
 64 }
 65 
 66 void CompiledMethod::init_defaults() {
 67   _has_unsafe_access          = 0;
 68   _has_method_handle_invokes  = 0;
 69   _lazy_critical_native       = 0;
 70   _has_wide_vectors           = 0;
 71 }
 72 
 73 bool CompiledMethod::is_method_handle_return(address return_pc) {
 74   if (!has_method_handle_invokes())  return false;
 75   PcDesc* pd = pc_desc_at(return_pc);
 76   if (pd == NULL)
 77     return false;
 78   return pd->is_method_handle_invoke();
 79 }
 80 
 81 // Returns a string version of the method state.
 82 const char* CompiledMethod::state() const {
 83   int state = get_state();
 84   switch (state) {
 85   case not_installed:
 86     return "not installed";
 87   case in_use:
 88     return "in use";
 89   case not_used:
 90     return "not_used";
 91   case not_entrant:
 92     return "not_entrant";
 93   case zombie:
 94     return "zombie";
 95   case unloaded:
 96     return "unloaded";
 97   default:
 98     fatal("unexpected method state: %d", state);
 99     return NULL;
100   }
101 }
102 
103 //-----------------------------------------------------------------------------
104 
105 ExceptionCache* CompiledMethod::exception_cache_acquire() const {
106   return OrderAccess::load_acquire(&_exception_cache);
107 }
108 
109 void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
110   assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
111   assert(new_entry != NULL,"Must be non null");
112   assert(new_entry->next() == NULL, "Must be null");
113 
114   for (;;) {
115     ExceptionCache *ec = exception_cache();
116     if (ec != NULL) {
117       Klass* ex_klass = ec->exception_type();
118       if (!ex_klass->is_loader_alive()) {
119         // We must guarantee that entries are not inserted with new next pointer
120         // edges to ExceptionCache entries with dead klasses, due to bad interactions
121         // with concurrent ExceptionCache cleanup. Therefore, the inserts roll
122         // the head pointer forward to the first live ExceptionCache, so that the new
123         // next pointers always point at live ExceptionCaches, that are not removed due
124         // to concurrent ExceptionCache cleanup.
125         ExceptionCache* next = ec->next();
126         if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) {
127           CodeCache::release_exception_cache(ec);
128         }
129         continue;
130       }
131       ec = exception_cache();
132       if (ec != NULL) {
133         new_entry->set_next(ec);
134       }
135     }
136     if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) {
137       return;
138     }
139   }
140 }
141 
142 void CompiledMethod::clean_exception_cache() {
143   // For each nmethod, only a single thread may call this cleanup function
144   // at the same time, whether called in STW cleanup or concurrent cleanup.
145   // Note that if the GC is processing exception cache cleaning in a concurrent phase,
146   // then a single writer may contend with cleaning up the head pointer to the
147   // first ExceptionCache node that has a Klass* that is alive. That is fine,
148   // as long as there is no concurrent cleanup of next pointers from concurrent writers.
149   // And the concurrent writers do not clean up next pointers, only the head.
150   // Also note that concurent readers will walk through Klass* pointers that are not
151   // alive. That does not cause ABA problems, because Klass* is deleted after
152   // a handshake with all threads, after all stale ExceptionCaches have been
153   // unlinked. That is also when the CodeCache::exception_cache_purge_list()
154   // is deleted, with all ExceptionCache entries that were cleaned concurrently.
155   // That similarly implies that CAS operations on ExceptionCache entries do not
156   // suffer from ABA problems as unlinking and deletion is separated by a global
157   // handshake operation.
158   ExceptionCache* prev = NULL;
159   ExceptionCache* curr = exception_cache_acquire();
160 
161   while (curr != NULL) {
162     ExceptionCache* next = curr->next();
163 
164     if (!curr->exception_type()->is_loader_alive()) {
165       if (prev == NULL) {
166         // Try to clean head; this is contended by concurrent inserts, that
167         // both lazily clean the head, and insert entries at the head. If
168         // the CAS fails, the operation is restarted.
169         if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) {
170           prev = NULL;
171           curr = exception_cache_acquire();
172           continue;
173         }
174       } else {
175         // It is impossible to during cleanup connect the next pointer to
176         // an ExceptionCache that has not been published before a safepoint
177         // prior to the cleanup. Therefore, release is not required.
178         prev->set_next(next);
179       }
180       // prev stays the same.
181 
182       CodeCache::release_exception_cache(curr);
183     } else {
184       prev = curr;
185     }
186 
187     curr = next;
188   }
189 }
190 
191 // public method for accessing the exception cache
192 // These are the public access methods.
193 address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
194   // We never grab a lock to read the exception cache, so we may
195   // have false negatives. This is okay, as it can only happen during
196   // the first few exception lookups for a given nmethod.
197   ExceptionCache* ec = exception_cache_acquire();
198   while (ec != NULL) {
199     address ret_val;
200     if ((ret_val = ec->match(exception,pc)) != NULL) {
201       return ret_val;
202     }
203     ec = ec->next();
204   }
205   return NULL;
206 }
207 
208 void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
209   // There are potential race conditions during exception cache updates, so we
210   // must own the ExceptionCache_lock before doing ANY modifications. Because
211   // we don't lock during reads, it is possible to have several threads attempt
212   // to update the cache with the same data. We need to check for already inserted
213   // copies of the current data before adding it.
214 
215   MutexLocker ml(ExceptionCache_lock);
216   ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
217 
218   if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
219     target_entry = new ExceptionCache(exception,pc,handler);
220     add_exception_cache_entry(target_entry);
221   }
222 }
223 
224 // private method for handling exception cache
225 // These methods are private, and used to manipulate the exception cache
226 // directly.
227 ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
228   ExceptionCache* ec = exception_cache_acquire();
229   while (ec != NULL) {
230     if (ec->match_exception_with_space(exception)) {
231       return ec;
232     }
233     ec = ec->next();
234   }
235   return NULL;
236 }
237 
238 //-------------end of code for ExceptionCache--------------
239 
240 bool CompiledMethod::is_at_poll_return(address pc) {
241   RelocIterator iter(this, pc, pc+1);
242   while (iter.next()) {
243     if (iter.type() == relocInfo::poll_return_type)
244       return true;
245   }
246   return false;
247 }
248 
249 
250 bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
251   RelocIterator iter(this, pc, pc+1);
252   while (iter.next()) {
253     relocInfo::relocType t = iter.type();
254     if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
255       return true;
256   }
257   return false;
258 }
259 
260 void CompiledMethod::verify_oop_relocations() {
261   // Ensure sure that the code matches the current oop values
262   RelocIterator iter(this, NULL, NULL);
263   while (iter.next()) {
264     if (iter.type() == relocInfo::oop_type) {
265       oop_Relocation* reloc = iter.oop_reloc();
266       if (!reloc->oop_is_immediate()) {
267         reloc->verify_oop_relocation();
268       }
269     }
270   }
271 }
272 
273 
274 ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
275   PcDesc* pd = pc_desc_at(pc);
276   guarantee(pd != NULL, "scope must be present");
277   return new ScopeDesc(this, pd->scope_decode_offset(),
278                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
279                        pd->return_oop());
280 }
281 
282 ScopeDesc* CompiledMethod::scope_desc_near(address pc) {
283   PcDesc* pd = pc_desc_near(pc);
284   guarantee(pd != NULL, "scope must be present");
285   return new ScopeDesc(this, pd->scope_decode_offset(),
286                        pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
287                        pd->return_oop());
288 }
289 
290 address CompiledMethod::oops_reloc_begin() const {
291   // If the method is not entrant or zombie then a JMP is plastered over the
292   // first few bytes.  If an oop in the old code was there, that oop
293   // should not get GC'd.  Skip the first few bytes of oops on
294   // not-entrant methods.
295   if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
296       code_begin() + frame_complete_offset() >
297       verified_entry_point() + NativeJump::instruction_size)
298   {
299     // If we have a frame_complete_offset after the native jump, then there
300     // is no point trying to look for oops before that. This is a requirement
301     // for being allowed to scan oops concurrently.
302     return code_begin() + frame_complete_offset();
303   }
304 
305   // It is not safe to read oops concurrently using entry barriers, if their
306   // location depend on whether the nmethod is entrant or not.
307   assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan");
308 
309   address low_boundary = verified_entry_point();
310   if (!is_in_use() && is_nmethod()) {
311     low_boundary += NativeJump::instruction_size;
312     // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
313     // This means that the low_boundary is going to be a little too high.
314     // This shouldn't matter, since oops of non-entrant methods are never used.
315     // In fact, why are we bothering to look at oops in a non-entrant method??
316   }
317   return low_boundary;
318 }
319 
320 int CompiledMethod::verify_icholder_relocations() {
321   ResourceMark rm;
322   int count = 0;
323 
324   RelocIterator iter(this);
325   while(iter.next()) {
326     if (iter.type() == relocInfo::virtual_call_type) {
327       if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) {
328         CompiledIC *ic = CompiledIC_at(&iter);
329         if (TraceCompiledIC) {
330           tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
331           ic->print();
332         }
333         assert(ic->cached_icholder() != NULL, "must be non-NULL");
334         count++;
335       }
336     }
337   }
338 
339   return count;
340 }
341 
342 // Method that knows how to preserve outgoing arguments at call. This method must be
343 // called with a frame corresponding to a Java invoke
344 void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
345   if (method() != NULL && !method()->is_native()) {
346     address pc = fr.pc();
347     SimpleScopeDesc ssd(this, pc);
348     Bytecode_invoke call(ssd.method(), ssd.bci());
349     bool has_receiver = call.has_receiver();
350     bool has_appendix = call.has_appendix();
351     Symbol* signature = call.signature();
352 
353     // The method attached by JIT-compilers should be used, if present.
354     // Bytecode can be inaccurate in such case.
355     Method* callee = attached_method_before_pc(pc);
356     if (callee != NULL) {
357       has_receiver = !(callee->access_flags().is_static());
358       has_appendix = false;
359       signature = callee->signature();
360     }
361 
362     fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
363   }
364 }
365 
366 Method* CompiledMethod::attached_method(address call_instr) {
367   assert(code_contains(call_instr), "not part of the nmethod");
368   RelocIterator iter(this, call_instr, call_instr + 1);
369   while (iter.next()) {
370     if (iter.addr() == call_instr) {
371       switch(iter.type()) {
372         case relocInfo::static_call_type:      return iter.static_call_reloc()->method_value();
373         case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
374         case relocInfo::virtual_call_type:     return iter.virtual_call_reloc()->method_value();
375         default:                               break;
376       }
377     }
378   }
379   return NULL; // not found
380 }
381 
382 Method* CompiledMethod::attached_method_before_pc(address pc) {
383   if (NativeCall::is_call_before(pc)) {
384     NativeCall* ncall = nativeCall_before(pc);
385     return attached_method(ncall->instruction_address());
386   }
387   return NULL; // not a call
388 }
389 
390 void CompiledMethod::clear_inline_caches() {
391   assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
392   if (is_zombie()) {
393     return;
394   }
395 
396   RelocIterator iter(this);
397   while (iter.next()) {
398     iter.reloc()->clear_inline_cache();
399   }
400 }
401 
402 // Clear ICStubs of all compiled ICs
403 void CompiledMethod::clear_ic_stubs() {
404   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
405   ResourceMark rm;
406   RelocIterator iter(this);
407   while(iter.next()) {
408     if (iter.type() == relocInfo::virtual_call_type) {
409       CompiledIC* ic = CompiledIC_at(&iter);
410       ic->clear_ic_stub();
411     }
412   }
413 }
414 
415 #ifdef ASSERT
416 // Check class_loader is alive for this bit of metadata.
417 static void check_class(Metadata* md) {
418    Klass* klass = NULL;
419    if (md->is_klass()) {
420      klass = ((Klass*)md);
421    } else if (md->is_method()) {
422      klass = ((Method*)md)->method_holder();
423    } else if (md->is_methodData()) {
424      klass = ((MethodData*)md)->method()->method_holder();
425    } else {
426      md->print();
427      ShouldNotReachHere();
428    }
429    assert(klass->is_loader_alive(), "must be alive");
430 }
431 #endif // ASSERT
432 
433 
434 bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) {
435   if (ic->is_clean()) {
436     return true;
437   }
438   if (ic->is_icholder_call()) {
439     // The only exception is compiledICHolder metdata which may
440     // yet be marked below. (We check this further below).
441     CompiledICHolder* cichk_metdata = ic->cached_icholder();
442 
443     if (cichk_metdata->is_loader_alive()) {
444       return true;
445     }
446   } else {
447     Metadata* ic_metdata = ic->cached_metadata();
448     if (ic_metdata != NULL) {
449       if (ic_metdata->is_klass()) {
450         if (((Klass*)ic_metdata)->is_loader_alive()) {
451           return true;
452         }
453       } else if (ic_metdata->is_method()) {
454         Method* method = (Method*)ic_metdata;
455         assert(!method->is_old(), "old method should have been cleaned");
456         if (method->method_holder()->is_loader_alive()) {
457           return true;
458         }
459       } else {
460         ShouldNotReachHere();
461       }
462     }
463   }
464 
465   return ic->set_to_clean();
466 }
467 
468 // static_stub_Relocations may have dangling references to
469 // nmethods so trim them out here.  Otherwise it looks like
470 // compiled code is maintaining a link to dead metadata.
471 void CompiledMethod::clean_ic_stubs() {
472 #ifdef ASSERT
473   address low_boundary = oops_reloc_begin();
474   RelocIterator iter(this, low_boundary);
475   while (iter.next()) {
476     address static_call_addr = NULL;
477     if (iter.type() == relocInfo::opt_virtual_call_type) {
478       CompiledIC* cic = CompiledIC_at(&iter);
479       if (!cic->is_call_to_interpreted()) {
480         static_call_addr = iter.addr();
481       }
482     } else if (iter.type() == relocInfo::static_call_type) {
483       CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
484       if (!csc->is_call_to_interpreted()) {
485         static_call_addr = iter.addr();
486       }
487     }
488     if (static_call_addr != NULL) {
489       RelocIterator sciter(this, low_boundary);
490       while (sciter.next()) {
491         if (sciter.type() == relocInfo::static_stub_type &&
492             sciter.static_stub_reloc()->static_call() == static_call_addr) {
493           sciter.static_stub_reloc()->clear_inline_cache();
494         }
495       }
496     }
497   }
498 #endif
499 }
500 
501 // Clean references to unloaded nmethods at addr from this one, which is not unloaded.
502 template <class CompiledICorStaticCall>
503 static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
504                                          bool clean_all) {
505   // Ok, to lookup references to zombies here
506   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
507   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
508   if (nm != NULL) {
509     // Clean inline caches pointing to both zombie and not_entrant methods
510     if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) {
511       if (!ic->set_to_clean(from->is_alive())) {
512         return false;
513       }
514       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
515     }
516   }
517   return true;
518 }
519 
520 static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
521                                          bool clean_all) {
522   return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all);
523 }
524 
525 static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
526                                          bool clean_all) {
527   return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all);
528 }
529 
530 // Cleans caches in nmethods that point to either classes that are unloaded
531 // or nmethods that are unloaded.
532 //
533 // Can be called either in parallel by G1 currently or after all
534 // nmethods are unloaded.  Return postponed=true in the parallel case for
535 // inline caches found that point to nmethods that are not yet visited during
536 // the do_unloading walk.
537 bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) {
538   ResourceMark rm;
539 
540   // Exception cache only needs to be called if unloading occurred
541   if (unloading_occurred) {
542     clean_exception_cache();
543   }
544 
545   if (!cleanup_inline_caches_impl(unloading_occurred, false)) {
546     return false;
547   }
548 
549   // All static stubs need to be cleaned.
550   clean_ic_stubs();
551 
552   // Check that the metadata embedded in the nmethod is alive
553   DEBUG_ONLY(metadata_do(check_class));
554   return true;
555 }
556 
557 void CompiledMethod::cleanup_inline_caches(bool clean_all) {
558   for (;;) {
559     { CompiledICLocker ic_locker(this);
560       if (cleanup_inline_caches_impl(false, clean_all)) {
561         return;
562       }
563     }
564     InlineCacheBuffer::refill_ic_stubs();
565   }
566 }
567 
568 // Called to clean up after class unloading for live nmethods and from the sweeper
569 // for all methods.
570 bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) {
571   assert(CompiledICLocker::is_safe(this), "mt unsafe call");
572   ResourceMark rm;
573 
574   // Find all calls in an nmethod and clear the ones that point to non-entrant,
575   // zombie and unloaded nmethods.
576   RelocIterator iter(this, oops_reloc_begin());
577   while(iter.next()) {
578 
579     switch (iter.type()) {
580 
581     case relocInfo::virtual_call_type:
582       if (unloading_occurred) {
583         // If class unloading occurred we first clear ICs where the cached metadata
584         // is referring to an unloaded klass or method.
585         if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) {
586           return false;
587         }
588       }
589 
590       if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
591         return false;
592       }
593       break;
594 
595     case relocInfo::opt_virtual_call_type:
596       if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) {
597         return false;
598       }
599       break;
600 
601     case relocInfo::static_call_type:
602       if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) {
603         return false;
604       }
605       break;
606 
607     default:
608       break;
609     }
610   }
611 
612   return true;
613 }
614 
615 // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
616 // to not be inherently safe. There is a chance that fields are seen which are not properly
617 // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
618 // to be held.
619 // To bundle knowledge about necessary checks in one place, this function was introduced.
620 // It is not claimed that these checks are sufficient, but they were found to be necessary.
621 bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) {
622   Method* method = (nm == NULL) ? NULL : nm->method();  // nm->method() may be uninitialized, i.e. != NULL, but invalid
623   return (nm != NULL) && (method != NULL) && (method->signature() != NULL) &&
624          !nm->is_zombie() && !nm->is_not_installed() &&
625          os::is_readable_pointer(method) &&
626          os::is_readable_pointer(method->constants()) &&
627          os::is_readable_pointer(method->signature());
628 }