1 /*
  2  * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_VM_CODE_COMPILEDMETHOD_HPP
 26 #define SHARE_VM_CODE_COMPILEDMETHOD_HPP
 27 
 28 #include "code/codeBlob.hpp"
 29 #include "code/pcDesc.hpp"
 30 #include "oops/metadata.hpp"
 31 
 32 class Dependencies;
 33 class ExceptionHandlerTable;
 34 class ImplicitExceptionTable;
 35 class AbstractCompiler;
 36 class xmlStream;
 37 class CompiledStaticCall;
 38 class NativeCallWrapper;
 39 
 40 // This class is used internally by nmethods, to cache
 41 // exception/pc/handler information.
 42 
 43 class ExceptionCache : public CHeapObj<mtCode> {
 44   friend class VMStructs;
 45  private:
 46   enum { cache_size = 16 };
 47   Klass*   _exception_type;
 48   address  _pc[cache_size];
 49   address  _handler[cache_size];
 50   volatile int _count;
 51   ExceptionCache* _next;
 52 
 53   inline address pc_at(int index);
 54   void set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
 55 
 56   inline address handler_at(int index);
 57   void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
 58 
 59   inline int count();
 60   // increment_count is only called under lock, but there may be concurrent readers.
 61   void increment_count();
 62 
 63  public:
 64 
 65   ExceptionCache(Handle exception, address pc, address handler);
 66 
 67   Klass*    exception_type()                { return _exception_type; }
 68   ExceptionCache* next()                    { return _next; }
 69   void      set_next(ExceptionCache *ec)    { _next = ec; }
 70 
 71   address match(Handle exception, address pc);
 72   bool    match_exception_with_space(Handle exception) ;
 73   address test_address(address addr);
 74   bool    add_address_and_handler(address addr, address handler) ;
 75 };
 76 
 77 class nmethod;
 78 
 79 // cache pc descs found in earlier inquiries
 80 class PcDescCache {
 81   friend class VMStructs;
 82  private:
 83   enum { cache_size = 4 };
 84   // The array elements MUST be volatile! Several threads may modify
 85   // and read from the cache concurrently. find_pc_desc_internal has
 86   // returned wrong results. C++ compiler (namely xlC12) may duplicate
 87   // C++ field accesses if the elements are not volatile.
 88   typedef PcDesc* PcDescPtr;
 89   volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
 90  public:
 91   PcDescCache() { debug_only(_pc_descs[0] = NULL); }
 92   void    reset_to(PcDesc* initial_pc_desc);
 93   PcDesc* find_pc_desc(int pc_offset, bool approximate);
 94   void    add_pc_desc(PcDesc* pc_desc);
 95   PcDesc* last_pc_desc() { return _pc_descs[0]; }
 96 };
 97 
 98 class PcDescSearch {
 99 private:
100   address _code_begin;
101   PcDesc* _lower;
102   PcDesc* _upper;
103 public:
104   PcDescSearch(address code, PcDesc* lower, PcDesc* upper) :
105     _code_begin(code), _lower(lower), _upper(upper)
106   {
107   }
108 
109   address code_begin() const { return _code_begin; }
110   PcDesc* scopes_pcs_begin() const { return _lower; }
111   PcDesc* scopes_pcs_end() const { return _upper; }
112 };
113 
114 class PcDescContainer {
115 private:
116   PcDescCache _pc_desc_cache;
117 public:
118   PcDescContainer() {}
119 
120   PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search);
121   void    reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); }
122 
123   PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) {
124     address base_address = search.code_begin();
125     PcDesc* desc = _pc_desc_cache.last_pc_desc();
126     if (desc != NULL && desc->pc_offset() == pc - base_address) {
127       return desc;
128     }
129     return find_pc_desc_internal(pc, approximate, search);
130   }
131 };
132 
133 
134 class CompiledMethod : public CodeBlob {
135   friend class VMStructs;
136   friend class NMethodSweeper;
137 
138   void init_defaults();
139 protected:
140   enum MarkForDeoptimizationStatus {
141     not_marked,
142     deoptimize,
143     deoptimize_noupdate
144   };
145 
146   MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
147 
148   bool _is_far_code; // Code is far from CodeCache.
149                      // Have to use far call instructions to call it from code in CodeCache.
150   // set during construction
151   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
152   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
153   unsigned int _lazy_critical_native:1;      // Lazy JNI critical native
154   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
155 
156   Method*   _method;
157   address _scopes_data_begin;
158   // All deoptee's will resume execution at this location described by
159   // this address.
160   address _deopt_handler_begin;
161   // All deoptee's at a MethodHandle call site will resume execution
162   // at this location described by this offset.
163   address _deopt_mh_handler_begin;
164 
165   PcDescContainer _pc_desc_container;
166   ExceptionCache * volatile _exception_cache;
167 
168   virtual void flush() = 0;
169 protected:
170   CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
171   CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
172 
173 public:
174   virtual bool is_compiled() const                { return true; }
175 
176   bool  has_unsafe_access() const                 { return _has_unsafe_access; }
177   void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
178 
179   bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
180   void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
181 
182   bool  is_lazy_critical_native() const           { return _lazy_critical_native; }
183   void  set_lazy_critical_native(bool z)          { _lazy_critical_native = z; }
184 
185   bool  has_wide_vectors() const                  { return _has_wide_vectors; }
186   void  set_has_wide_vectors(bool z)              { _has_wide_vectors = z; }
187 
188   enum { not_installed = -1, // in construction, only the owner doing the construction is
189                              // allowed to advance state
190          in_use        = 0,  // executable nmethod
191          not_used      = 1,  // not entrant, but revivable
192          not_entrant   = 2,  // marked for deoptimization but activations may still exist,
193                              // will be transformed to zombie when all activations are gone
194          zombie        = 3,  // no activations exist, nmethod is ready for purge
195          unloaded      = 4   // there should be no activations, should not be called,
196                              // will be transformed to zombie immediately
197   };
198 
199   virtual bool  is_in_use() const = 0;
200   virtual int   comp_level() const = 0;
201   virtual int   compile_id() const = 0;
202 
203   virtual address verified_entry_point() const = 0;
204   virtual void log_identity(xmlStream* log) const = 0;
205   virtual void log_state_change(oop cause = NULL) const = 0;
206   virtual bool make_not_used() = 0;
207   virtual bool make_not_entrant() = 0;
208   virtual bool make_entrant() = 0;
209   virtual address entry_point() const = 0;
210   virtual bool make_zombie() = 0;
211   virtual bool is_osr_method() const = 0;
212   virtual int osr_entry_bci() const = 0;
213   Method* method() const                          { return _method; }
214   virtual void print_pcs() = 0;
215   bool is_native_method() const { return _method != NULL && _method->is_native(); }
216   bool is_java_method() const { return _method != NULL && !_method->is_native(); }
217 
218   // ScopeDesc retrieval operation
219   PcDesc* pc_desc_at(address pc)   { return find_pc_desc(pc, false); }
220   // pc_desc_near returns the first PcDesc at or after the given pc.
221   PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
222 
223   // ScopeDesc for an instruction
224   ScopeDesc* scope_desc_at(address pc);
225   ScopeDesc* scope_desc_near(address pc);
226 
227   bool is_at_poll_return(address pc);
228   bool is_at_poll_or_poll_return(address pc);
229 
230   bool  is_marked_for_deoptimization() const      { return _mark_for_deoptimization_status != not_marked; }
231   void  mark_for_deoptimization(bool inc_recompile_counts = true) {
232     _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
233   }
234   bool update_recompile_counts() const {
235     // Update recompile counts when either the update is explicitly requested (deoptimize)
236     // or the nmethod is not marked for deoptimization at all (not_marked).
237     // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
238     return _mark_for_deoptimization_status != deoptimize_noupdate;
239   }
240 
241   static bool nmethod_access_is_safe(nmethod* nm);
242 
243   // tells whether frames described by this nmethod can be deoptimized
244   // note: native wrappers cannot be deoptimized.
245   bool can_be_deoptimized() const { return is_java_method(); }
246 
247   virtual oop oop_at(int index) const = 0;
248   virtual Metadata* metadata_at(int index) const = 0;
249 
250   address scopes_data_begin() const { return _scopes_data_begin; }
251   virtual address scopes_data_end() const = 0;
252   int scopes_data_size() const { return scopes_data_end() - scopes_data_begin(); }
253 
254   virtual PcDesc* scopes_pcs_begin() const = 0;
255   virtual PcDesc* scopes_pcs_end() const = 0;
256   int scopes_pcs_size() const { return (intptr_t) scopes_pcs_end() - (intptr_t) scopes_pcs_begin(); }
257 
258   address insts_begin() const { return code_begin(); }
259   address insts_end() const { return stub_begin(); }
260   // Returns true if a given address is in the 'insts' section. The method
261   // insts_contains_inclusive() is end-inclusive.
262   bool insts_contains(address addr) const { return insts_begin() <= addr && addr < insts_end(); }
263   bool insts_contains_inclusive(address addr) const { return insts_begin() <= addr && addr <= insts_end(); }
264 
265   int insts_size() const { return insts_end() - insts_begin(); }
266 
267   virtual address consts_begin() const = 0;
268   virtual address consts_end() const = 0;
269   bool consts_contains(address addr) const { return consts_begin() <= addr && addr < consts_end(); }
270   int consts_size() const { return consts_end() - consts_begin(); }
271 
272   virtual address stub_begin() const = 0;
273   virtual address stub_end() const = 0;
274   bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); }
275   int stub_size() const { return stub_end() - stub_begin(); }
276 
277   virtual address handler_table_begin() const = 0;
278   virtual address handler_table_end() const = 0;
279   bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
280   int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
281 
282   virtual address exception_begin() const = 0;
283 
284   virtual address nul_chk_table_begin() const = 0;
285   virtual address nul_chk_table_end() const = 0;
286   bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
287   int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
288 
289   virtual oop* oop_addr_at(int index) const = 0;
290   virtual Metadata** metadata_addr_at(int index) const = 0;
291   virtual void    set_original_pc(const frame* fr, address pc) = 0;
292 
293   // Exception cache support
294   // Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
295   ExceptionCache* exception_cache() const         { return _exception_cache; }
296   void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
297   void release_set_exception_cache(ExceptionCache *ec);
298   address handler_for_exception_and_pc(Handle exception, address pc);
299   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
300   void clean_exception_cache();
301 
302   void add_exception_cache_entry(ExceptionCache* new_entry);
303   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
304 
305   // MethodHandle
306   bool is_method_handle_return(address return_pc);
307   address deopt_mh_handler_begin() const  { return _deopt_mh_handler_begin; }
308 
309   address deopt_handler_begin() const { return _deopt_handler_begin; }
310   virtual address get_original_pc(const frame* fr) = 0;
311   // Deopt
312   // Return true is the PC is one would expect if the frame is being deopted.
313   inline bool is_deopt_pc(address pc);
314   bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
315   inline bool is_deopt_entry(address pc);
316 
317   virtual bool can_convert_to_zombie() = 0;
318   virtual const char* compile_kind() const = 0;
319   virtual int get_state() const = 0;
320 
321   const char* state() const;
322 
323   bool is_far_code() const { return _is_far_code; }
324 
325   bool inlinecache_check_contains(address addr) const {
326     return (addr >= code_begin() && addr < verified_entry_point());
327   }
328 
329   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
330 
331   // implicit exceptions support
332   virtual address continuation_for_implicit_exception(address pc) { return NULL; }
333 
334   static address get_deopt_original_pc(const frame* fr);
335 
336   // GC unloading support
337   // Cleans unloaded klasses and unloaded nmethods in inline caches
338   bool unload_nmethod_caches(bool parallel, bool class_unloading_occurred);
339 
340   // Inline cache support for class unloading and nmethod unloading
341  private:
342   bool cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all);
343  public:
344   bool cleanup_inline_caches(bool clean_all = false) {
345     // Serial version used by sweeper and whitebox test
346     return cleanup_inline_caches_impl(false, false, clean_all);
347   }
348 
349   virtual void clear_inline_caches();
350   void clear_ic_stubs();
351 
352   // Verify and count cached icholder relocations.
353   int  verify_icholder_relocations();
354   void verify_oop_relocations();
355 
356   virtual bool is_evol_dependent_on(Klass* dependee) = 0;
357   // Fast breakpoint support. Tells if this compiled method is
358   // dependent on the given method. Returns true if this nmethod
359   // corresponds to the given method as well.
360   virtual bool is_dependent_on_method(Method* dependee) = 0;
361 
362   virtual NativeCallWrapper* call_wrapper_at(address call) const = 0;
363   virtual NativeCallWrapper* call_wrapper_before(address return_pc) const = 0;
364   virtual address call_instruction_address(address pc) const = 0;
365 
366   virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const = 0;
367   virtual CompiledStaticCall* compiledStaticCall_at(address addr) const = 0;
368   virtual CompiledStaticCall* compiledStaticCall_before(address addr) const = 0;
369 
370   Method* attached_method(address call_pc);
371   Method* attached_method_before_pc(address pc);
372 
373   virtual void metadata_do(void f(Metadata*)) = 0;
374 
375   // GC support
376 
377   void set_unloading_next(CompiledMethod* next) { _unloading_next = next; }
378   CompiledMethod* unloading_next()              { return _unloading_next; }
379 
380  protected:
381   address oops_reloc_begin() const;
382  private:
383   void static clean_ic_if_metadata_is_dead(CompiledIC *ic);
384 
385   void clean_ic_stubs();
386 
387  public:
388   virtual void do_unloading(BoolObjectClosure* is_alive);
389   //  The parallel versions are used by G1.
390   virtual bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
391   virtual void do_unloading_parallel_postponed();
392 
393   static unsigned char global_unloading_clock()   { return _global_unloading_clock; }
394   static void increase_unloading_clock();
395 
396   void set_unloading_clock(unsigned char unloading_clock);
397   unsigned char unloading_clock();
398 
399 protected:
400   virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive) = 0;
401 #if INCLUDE_JVMCI
402   virtual bool do_unloading_jvmci() = 0;
403 #endif
404 
405 private:
406   // GC support to help figure out if an nmethod has been
407   // cleaned/unloaded by the current GC.
408   static unsigned char _global_unloading_clock;
409 
410   volatile unsigned char _unloading_clock;   // Incremented after GC unloaded/cleaned the nmethod
411 
412   PcDesc* find_pc_desc(address pc, bool approximate) {
413     return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
414   }
415 
416 protected:
417   union {
418     // Used by G1 to chain nmethods.
419     CompiledMethod* _unloading_next;
420     // Used by non-G1 GCs to chain nmethods.
421     nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
422   };
423 };
424 
425 #endif //SHARE_VM_CODE_COMPILEDMETHOD_HPP