1 /* 2 * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_CODE_COMPILEDMETHOD_HPP 26 #define SHARE_CODE_COMPILEDMETHOD_HPP 27 28 #include "code/codeBlob.hpp" 29 #include "code/pcDesc.hpp" 30 #include "oops/metadata.hpp" 31 32 class Dependencies; 33 class ExceptionHandlerTable; 34 class ImplicitExceptionTable; 35 class AbstractCompiler; 36 class xmlStream; 37 class CompiledStaticCall; 38 class NativeCallWrapper; 39 class ScopeDesc; 40 class CompiledIC; 41 class MetadataClosure; 42 43 // This class is used internally by nmethods, to cache 44 // exception/pc/handler information. 45 46 class ExceptionCache : public CHeapObj<mtCode> { 47 friend class VMStructs; 48 private: 49 enum { cache_size = 16 }; 50 Klass* _exception_type; 51 address _pc[cache_size]; 52 address _handler[cache_size]; 53 volatile int _count; 54 ExceptionCache* volatile _next; 55 ExceptionCache* _purge_list_next; 56 57 inline address pc_at(int index); 58 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; } 59 60 inline address handler_at(int index); 61 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; } 62 63 inline int count(); 64 // increment_count is only called under lock, but there may be concurrent readers. 65 void increment_count(); 66 67 public: 68 69 ExceptionCache(Handle exception, address pc, address handler); 70 71 Klass* exception_type() { return _exception_type; } 72 ExceptionCache* next(); 73 void set_next(ExceptionCache *ec); 74 ExceptionCache* purge_list_next() { return _purge_list_next; } 75 void set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; } 76 77 address match(Handle exception, address pc); 78 bool match_exception_with_space(Handle exception) ; 79 address test_address(address addr); 80 bool add_address_and_handler(address addr, address handler) ; 81 }; 82 83 class nmethod; 84 85 // cache pc descs found in earlier inquiries 86 class PcDescCache { 87 friend class VMStructs; 88 private: 89 enum { cache_size = 4 }; 90 // The array elements MUST be volatile! Several threads may modify 91 // and read from the cache concurrently. find_pc_desc_internal has 92 // returned wrong results. C++ compiler (namely xlC12) may duplicate 93 // C++ field accesses if the elements are not volatile. 94 typedef PcDesc* PcDescPtr; 95 volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found 96 public: 97 PcDescCache() { debug_only(_pc_descs[0] = NULL); } 98 void reset_to(PcDesc* initial_pc_desc); 99 PcDesc* find_pc_desc(int pc_offset, bool approximate); 100 void add_pc_desc(PcDesc* pc_desc); 101 PcDesc* last_pc_desc() { return _pc_descs[0]; } 102 }; 103 104 class PcDescSearch { 105 private: 106 address _code_begin; 107 PcDesc* _lower; 108 PcDesc* _upper; 109 public: 110 PcDescSearch(address code, PcDesc* lower, PcDesc* upper) : 111 _code_begin(code), _lower(lower), _upper(upper) 112 { 113 } 114 115 address code_begin() const { return _code_begin; } 116 PcDesc* scopes_pcs_begin() const { return _lower; } 117 PcDesc* scopes_pcs_end() const { return _upper; } 118 }; 119 120 class PcDescContainer { 121 private: 122 PcDescCache _pc_desc_cache; 123 public: 124 PcDescContainer() {} 125 126 PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search); 127 void reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); } 128 129 PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) { 130 address base_address = search.code_begin(); 131 PcDesc* desc = _pc_desc_cache.last_pc_desc(); 132 if (desc != NULL && desc->pc_offset() == pc - base_address) { 133 return desc; 134 } 135 return find_pc_desc_internal(pc, approximate, search); 136 } 137 }; 138 139 140 class CompiledMethod : public CodeBlob { 141 friend class VMStructs; 142 friend class NMethodSweeper; 143 144 void init_defaults(); 145 protected: 146 enum MarkForDeoptimizationStatus { 147 not_marked, 148 deoptimize, 149 deoptimize_noupdate 150 }; 151 152 MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization 153 154 bool _is_far_code; // Code is far from CodeCache. 155 // Have to use far call instructions to call it from code in CodeCache. 156 157 // set during construction 158 unsigned int _has_unsafe_access:1; // May fault due to unsafe access. 159 unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes? 160 unsigned int _lazy_critical_native:1; // Lazy JNI critical native 161 unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints 162 163 Method* _method; 164 address _scopes_data_begin; 165 // All deoptee's will resume execution at this location described by 166 // this address. 167 address _deopt_handler_begin; 168 // All deoptee's at a MethodHandle call site will resume execution 169 // at this location described by this offset. 170 address _deopt_mh_handler_begin; 171 172 PcDescContainer _pc_desc_container; 173 ExceptionCache * volatile _exception_cache; 174 175 void* _gc_data; 176 177 virtual void flush() = 0; 178 protected: 179 CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments); 180 CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments); 181 182 public: 183 // Only used by unit test. 184 CompiledMethod() {} 185 186 virtual bool is_compiled() const { return true; } 187 188 template<typename T> 189 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); } 190 template<typename T> 191 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); } 192 193 bool has_unsafe_access() const { return _has_unsafe_access; } 194 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } 195 196 bool has_method_handle_invokes() const { return _has_method_handle_invokes; } 197 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } 198 199 bool is_lazy_critical_native() const { return _lazy_critical_native; } 200 void set_lazy_critical_native(bool z) { _lazy_critical_native = z; } 201 202 bool has_wide_vectors() const { return _has_wide_vectors; } 203 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; } 204 205 enum { not_installed = -1, // in construction, only the owner doing the construction is 206 // allowed to advance state 207 in_use = 0, // executable nmethod 208 not_used = 1, // not entrant, but revivable 209 not_entrant = 2, // marked for deoptimization but activations may still exist, 210 // will be transformed to zombie when all activations are gone 211 unloaded = 3, // there should be no activations, should not be called, will be 212 // transformed to zombie by the sweeper, when not "locked in vm". 213 zombie = 4 // no activations exist, nmethod is ready for purge 214 }; 215 216 virtual bool is_in_use() const = 0; 217 virtual int comp_level() const = 0; 218 virtual int compile_id() const = 0; 219 220 virtual address verified_entry_point() const = 0; 221 virtual void log_identity(xmlStream* log) const = 0; 222 virtual void log_state_change() const = 0; 223 virtual bool make_not_used() = 0; 224 virtual bool make_not_entrant() = 0; 225 virtual bool make_entrant() = 0; 226 virtual address entry_point() const = 0; 227 virtual bool make_zombie() = 0; 228 virtual bool is_osr_method() const = 0; 229 virtual int osr_entry_bci() const = 0; 230 Method* method() const { return _method; } 231 virtual void print_pcs() = 0; 232 bool is_native_method() const { return _method != NULL && _method->is_native(); } 233 bool is_java_method() const { return _method != NULL && !_method->is_native(); } 234 235 // ScopeDesc retrieval operation 236 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); } 237 // pc_desc_near returns the first PcDesc at or after the given pc. 238 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); } 239 240 // ScopeDesc for an instruction 241 ScopeDesc* scope_desc_at(address pc); 242 ScopeDesc* scope_desc_near(address pc); 243 244 bool is_at_poll_return(address pc); 245 bool is_at_poll_or_poll_return(address pc); 246 247 bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; } 248 void mark_for_deoptimization(bool inc_recompile_counts = true); 249 250 bool update_recompile_counts() const { 251 // Update recompile counts when either the update is explicitly requested (deoptimize) 252 // or the nmethod is not marked for deoptimization at all (not_marked). 253 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant. 254 return _mark_for_deoptimization_status != deoptimize_noupdate; 255 } 256 257 static bool nmethod_access_is_safe(nmethod* nm); 258 259 // tells whether frames described by this nmethod can be deoptimized 260 // note: native wrappers cannot be deoptimized. 261 bool can_be_deoptimized() const { return is_java_method(); } 262 263 virtual oop oop_at(int index) const = 0; 264 virtual Metadata* metadata_at(int index) const = 0; 265 266 address scopes_data_begin() const { return _scopes_data_begin; } 267 virtual address scopes_data_end() const = 0; 268 int scopes_data_size() const { return scopes_data_end() - scopes_data_begin(); } 269 270 virtual PcDesc* scopes_pcs_begin() const = 0; 271 virtual PcDesc* scopes_pcs_end() const = 0; 272 int scopes_pcs_size() const { return (intptr_t) scopes_pcs_end() - (intptr_t) scopes_pcs_begin(); } 273 274 address insts_begin() const { return code_begin(); } 275 address insts_end() const { return stub_begin(); } 276 // Returns true if a given address is in the 'insts' section. The method 277 // insts_contains_inclusive() is end-inclusive. 278 bool insts_contains(address addr) const { return insts_begin() <= addr && addr < insts_end(); } 279 bool insts_contains_inclusive(address addr) const { return insts_begin() <= addr && addr <= insts_end(); } 280 281 int insts_size() const { return insts_end() - insts_begin(); } 282 283 virtual address consts_begin() const = 0; 284 virtual address consts_end() const = 0; 285 bool consts_contains(address addr) const { return consts_begin() <= addr && addr < consts_end(); } 286 int consts_size() const { return consts_end() - consts_begin(); } 287 288 virtual address stub_begin() const = 0; 289 virtual address stub_end() const = 0; 290 bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); } 291 int stub_size() const { return stub_end() - stub_begin(); } 292 293 virtual address handler_table_begin() const = 0; 294 virtual address handler_table_end() const = 0; 295 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); } 296 int handler_table_size() const { return handler_table_end() - handler_table_begin(); } 297 298 virtual address exception_begin() const = 0; 299 300 virtual address nul_chk_table_begin() const = 0; 301 virtual address nul_chk_table_end() const = 0; 302 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); } 303 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); } 304 305 virtual oop* oop_addr_at(int index) const = 0; 306 virtual Metadata** metadata_addr_at(int index) const = 0; 307 virtual void set_original_pc(const frame* fr, address pc) = 0; 308 309 protected: 310 // Exception cache support 311 // Note: _exception_cache may be read and cleaned concurrently. 312 ExceptionCache* exception_cache() const { return _exception_cache; } 313 ExceptionCache* exception_cache_acquire() const; 314 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; } 315 316 public: 317 address handler_for_exception_and_pc(Handle exception, address pc); 318 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler); 319 void clean_exception_cache(); 320 321 void add_exception_cache_entry(ExceptionCache* new_entry); 322 ExceptionCache* exception_cache_entry_for_exception(Handle exception); 323 324 // MethodHandle 325 bool is_method_handle_return(address return_pc); 326 address deopt_mh_handler_begin() const { return _deopt_mh_handler_begin; } 327 328 address deopt_handler_begin() const { return _deopt_handler_begin; } 329 virtual address get_original_pc(const frame* fr) = 0; 330 // Deopt 331 // Return true is the PC is one would expect if the frame is being deopted. 332 inline bool is_deopt_pc(address pc); 333 inline bool is_deopt_mh_entry(address pc); 334 inline bool is_deopt_entry(address pc); 335 336 virtual bool can_convert_to_zombie() = 0; 337 virtual const char* compile_kind() const = 0; 338 virtual int get_state() const = 0; 339 340 const char* state() const; 341 342 bool is_far_code() const { return _is_far_code; } 343 344 bool inlinecache_check_contains(address addr) const { 345 return (addr >= code_begin() && addr < verified_entry_point()); 346 } 347 348 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f); 349 350 // implicit exceptions support 351 address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); } 352 address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); } 353 354 static address get_deopt_original_pc(const frame* fr); 355 356 // Inline cache support for class unloading and nmethod unloading 357 private: 358 bool cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all); 359 360 address continuation_for_implicit_exception(address pc, bool for_div0_check); 361 362 public: 363 // Serial version used by sweeper and whitebox test 364 void cleanup_inline_caches(bool clean_all); 365 366 virtual void clear_inline_caches(); 367 void clear_ic_callsites(); 368 369 // Execute nmethod barrier code, as if entering through nmethod call. 370 void run_nmethod_entry_barrier(); 371 372 // Verify and count cached icholder relocations. 373 int verify_icholder_relocations(); 374 void verify_oop_relocations(); 375 376 bool has_evol_metadata(); 377 378 // Fast breakpoint support. Tells if this compiled method is 379 // dependent on the given method. Returns true if this nmethod 380 // corresponds to the given method as well. 381 virtual bool is_dependent_on_method(Method* dependee) = 0; 382 383 virtual NativeCallWrapper* call_wrapper_at(address call) const = 0; 384 virtual NativeCallWrapper* call_wrapper_before(address return_pc) const = 0; 385 virtual address call_instruction_address(address pc) const = 0; 386 387 virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const = 0; 388 virtual CompiledStaticCall* compiledStaticCall_at(address addr) const = 0; 389 virtual CompiledStaticCall* compiledStaticCall_before(address addr) const = 0; 390 391 Method* attached_method(address call_pc); 392 Method* attached_method_before_pc(address pc); 393 394 virtual void metadata_do(MetadataClosure* f) = 0; 395 396 // GC support 397 protected: 398 address oops_reloc_begin() const; 399 400 private: 401 bool static clean_ic_if_metadata_is_dead(CompiledIC *ic); 402 403 public: 404 // GC unloading support 405 // Cleans unloaded klasses and unloaded nmethods in inline caches 406 407 virtual bool is_unloading() = 0; 408 409 bool unload_nmethod_caches(bool class_unloading_occurred); 410 virtual void do_unloading(bool unloading_occurred) = 0; 411 412 private: 413 PcDesc* find_pc_desc(address pc, bool approximate) { 414 return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end())); 415 } 416 }; 417 418 #endif // SHARE_CODE_COMPILEDMETHOD_HPP