rev 54936 : imported patch 8221734-v3
1 /*
2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_CODE_COMPILEDMETHOD_HPP
26 #define SHARE_CODE_COMPILEDMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31
32 class Dependencies;
33 class ExceptionHandlerTable;
34 class ImplicitExceptionTable;
35 class AbstractCompiler;
36 class xmlStream;
37 class CompiledStaticCall;
38 class NativeCallWrapper;
39 class ScopeDesc;
40 class CompiledIC;
41 class MetadataClosure;
42
43 // This class is used internally by nmethods, to cache
44 // exception/pc/handler information.
45
46 class ExceptionCache : public CHeapObj<mtCode> {
47 friend class VMStructs;
48 private:
49 enum { cache_size = 16 };
50 Klass* _exception_type;
51 address _pc[cache_size];
52 address _handler[cache_size];
53 volatile int _count;
54 ExceptionCache* volatile _next;
55 ExceptionCache* _purge_list_next;
56
57 inline address pc_at(int index);
58 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
59
60 inline address handler_at(int index);
61 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
62
63 inline int count();
64 // increment_count is only called under lock, but there may be concurrent readers.
65 void increment_count();
66
67 public:
68
69 ExceptionCache(Handle exception, address pc, address handler);
70
71 Klass* exception_type() { return _exception_type; }
72 ExceptionCache* next();
73 void set_next(ExceptionCache *ec);
74 ExceptionCache* purge_list_next() { return _purge_list_next; }
75 void set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; }
76
77 address match(Handle exception, address pc);
78 bool match_exception_with_space(Handle exception) ;
79 address test_address(address addr);
80 bool add_address_and_handler(address addr, address handler) ;
81 };
82
83 class nmethod;
84
85 // cache pc descs found in earlier inquiries
86 class PcDescCache {
87 friend class VMStructs;
88 private:
89 enum { cache_size = 4 };
90 // The array elements MUST be volatile! Several threads may modify
91 // and read from the cache concurrently. find_pc_desc_internal has
92 // returned wrong results. C++ compiler (namely xlC12) may duplicate
93 // C++ field accesses if the elements are not volatile.
94 typedef PcDesc* PcDescPtr;
95 volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
96 public:
97 PcDescCache() { debug_only(_pc_descs[0] = NULL); }
98 void reset_to(PcDesc* initial_pc_desc);
99 PcDesc* find_pc_desc(int pc_offset, bool approximate);
100 void add_pc_desc(PcDesc* pc_desc);
101 PcDesc* last_pc_desc() { return _pc_descs[0]; }
102 };
103
104 class PcDescSearch {
105 private:
106 address _code_begin;
107 PcDesc* _lower;
108 PcDesc* _upper;
109 public:
110 PcDescSearch(address code, PcDesc* lower, PcDesc* upper) :
111 _code_begin(code), _lower(lower), _upper(upper)
112 {
113 }
114
115 address code_begin() const { return _code_begin; }
116 PcDesc* scopes_pcs_begin() const { return _lower; }
117 PcDesc* scopes_pcs_end() const { return _upper; }
118 };
119
120 class PcDescContainer {
121 private:
122 PcDescCache _pc_desc_cache;
123 public:
124 PcDescContainer() {}
125
126 PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search);
127 void reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); }
128
129 PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) {
130 address base_address = search.code_begin();
131 PcDesc* desc = _pc_desc_cache.last_pc_desc();
132 if (desc != NULL && desc->pc_offset() == pc - base_address) {
133 return desc;
134 }
135 return find_pc_desc_internal(pc, approximate, search);
136 }
137 };
138
139
140 class CompiledMethod : public CodeBlob {
141 friend class VMStructs;
142 friend class NMethodSweeper;
143
144 void init_defaults();
145 protected:
146 enum MarkForDeoptimizationStatus {
147 not_marked,
148 deoptimize,
149 deoptimize_noupdate
150 };
151
152 MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
153
154 bool _is_far_code; // Code is far from CodeCache.
155 // Have to use far call instructions to call it from code in CodeCache.
156
157 // set during construction
158 unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
159 unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
160 unsigned int _lazy_critical_native:1; // Lazy JNI critical native
161 unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
162
163 Method* _method;
164 address _scopes_data_begin;
165 // All deoptee's will resume execution at this location described by
166 // this address.
167 address _deopt_handler_begin;
168 // All deoptee's at a MethodHandle call site will resume execution
169 // at this location described by this offset.
170 address _deopt_mh_handler_begin;
171
172 PcDescContainer _pc_desc_container;
173 ExceptionCache * volatile _exception_cache;
174
175 void* _gc_data;
176
177 virtual void flush() = 0;
178 protected:
179 CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
180 CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
181
182 public:
183 // Only used by unit test.
184 CompiledMethod() {}
185
186 virtual bool is_compiled() const { return true; }
187
188 template<typename T>
189 T* gc_data() const { return reinterpret_cast<T*>(_gc_data); }
190 template<typename T>
191 void set_gc_data(T* gc_data) { _gc_data = reinterpret_cast<void*>(gc_data); }
192
193 bool has_unsafe_access() const { return _has_unsafe_access; }
194 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
195
196 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
197 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
198
199 bool is_lazy_critical_native() const { return _lazy_critical_native; }
200 void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
201
202 bool has_wide_vectors() const { return _has_wide_vectors; }
203 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
204
205 enum { not_installed = -1, // in construction, only the owner doing the construction is
206 // allowed to advance state
207 in_use = 0, // executable nmethod
208 not_used = 1, // not entrant, but revivable
209 not_entrant = 2, // marked for deoptimization but activations may still exist,
210 // will be transformed to zombie when all activations are gone
211 zombie = 3, // no activations exist, nmethod is ready for purge
212 unloaded = 4 // there should be no activations, should not be called,
213 // will be transformed to zombie immediately
214 };
215
216 virtual bool is_in_use() const = 0;
217 virtual bool is_not_installed() const = 0;
218 virtual int comp_level() const = 0;
219 virtual int compile_id() const = 0;
220
221 virtual address verified_entry_point() const = 0;
222 virtual void log_identity(xmlStream* log) const = 0;
223 virtual void log_state_change() const = 0;
224 virtual bool make_not_used() = 0;
225 virtual bool make_not_entrant() = 0;
226 virtual bool make_entrant() = 0;
227 virtual address entry_point() const = 0;
228 virtual bool make_zombie() = 0;
229 virtual bool is_osr_method() const = 0;
230 virtual int osr_entry_bci() const = 0;
231 Method* method() const { return _method; }
232 virtual void print_pcs() = 0;
233 bool is_native_method() const { return _method != NULL && _method->is_native(); }
234 bool is_java_method() const { return _method != NULL && !_method->is_native(); }
235
236 // ScopeDesc retrieval operation
237 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
238 // pc_desc_near returns the first PcDesc at or after the given pc.
239 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
240
241 // ScopeDesc for an instruction
242 ScopeDesc* scope_desc_at(address pc);
243 ScopeDesc* scope_desc_near(address pc);
244
245 bool is_at_poll_return(address pc);
246 bool is_at_poll_or_poll_return(address pc);
247
248 bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; }
249 void mark_for_deoptimization(bool inc_recompile_counts = true) {
250 _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
251 }
252 bool update_recompile_counts() const {
253 // Update recompile counts when either the update is explicitly requested (deoptimize)
254 // or the nmethod is not marked for deoptimization at all (not_marked).
255 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
256 return _mark_for_deoptimization_status != deoptimize_noupdate;
257 }
258
259 static bool nmethod_access_is_safe(nmethod* nm);
260
261 // tells whether frames described by this nmethod can be deoptimized
262 // note: native wrappers cannot be deoptimized.
263 bool can_be_deoptimized() const { return is_java_method(); }
264
265 virtual oop oop_at(int index) const = 0;
266 virtual Metadata* metadata_at(int index) const = 0;
267
268 address scopes_data_begin() const { return _scopes_data_begin; }
269 virtual address scopes_data_end() const = 0;
270 int scopes_data_size() const { return scopes_data_end() - scopes_data_begin(); }
271
272 virtual PcDesc* scopes_pcs_begin() const = 0;
273 virtual PcDesc* scopes_pcs_end() const = 0;
274 int scopes_pcs_size() const { return (intptr_t) scopes_pcs_end() - (intptr_t) scopes_pcs_begin(); }
275
276 address insts_begin() const { return code_begin(); }
277 address insts_end() const { return stub_begin(); }
278 // Returns true if a given address is in the 'insts' section. The method
279 // insts_contains_inclusive() is end-inclusive.
280 bool insts_contains(address addr) const { return insts_begin() <= addr && addr < insts_end(); }
281 bool insts_contains_inclusive(address addr) const { return insts_begin() <= addr && addr <= insts_end(); }
282
283 int insts_size() const { return insts_end() - insts_begin(); }
284
285 virtual address consts_begin() const = 0;
286 virtual address consts_end() const = 0;
287 bool consts_contains(address addr) const { return consts_begin() <= addr && addr < consts_end(); }
288 int consts_size() const { return consts_end() - consts_begin(); }
289
290 virtual address stub_begin() const = 0;
291 virtual address stub_end() const = 0;
292 bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); }
293 int stub_size() const { return stub_end() - stub_begin(); }
294
295 virtual address handler_table_begin() const = 0;
296 virtual address handler_table_end() const = 0;
297 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
298 int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
299
300 virtual address exception_begin() const = 0;
301
302 virtual address nul_chk_table_begin() const = 0;
303 virtual address nul_chk_table_end() const = 0;
304 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
305 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
306
307 virtual oop* oop_addr_at(int index) const = 0;
308 virtual Metadata** metadata_addr_at(int index) const = 0;
309 virtual void set_original_pc(const frame* fr, address pc) = 0;
310
311 protected:
312 // Exception cache support
313 // Note: _exception_cache may be read and cleaned concurrently.
314 ExceptionCache* exception_cache() const { return _exception_cache; }
315 ExceptionCache* exception_cache_acquire() const;
316 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
317
318 public:
319 address handler_for_exception_and_pc(Handle exception, address pc);
320 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
321 void clean_exception_cache();
322
323 void add_exception_cache_entry(ExceptionCache* new_entry);
324 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
325
326 // MethodHandle
327 bool is_method_handle_return(address return_pc);
328 address deopt_mh_handler_begin() const { return _deopt_mh_handler_begin; }
329
330 address deopt_handler_begin() const { return _deopt_handler_begin; }
331 virtual address get_original_pc(const frame* fr) = 0;
332 // Deopt
333 // Return true is the PC is one would expect if the frame is being deopted.
334 inline bool is_deopt_pc(address pc);
335 bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
336 inline bool is_deopt_entry(address pc);
337
338 virtual bool can_convert_to_zombie() = 0;
339 virtual const char* compile_kind() const = 0;
340 virtual int get_state() const = 0;
341
342 const char* state() const;
343
344 bool is_far_code() const { return _is_far_code; }
345
346 bool inlinecache_check_contains(address addr) const {
347 return (addr >= code_begin() && addr < verified_entry_point());
348 }
349
350 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
351
352 // implicit exceptions support
353 virtual address continuation_for_implicit_exception(address pc) { return NULL; }
354
355 static address get_deopt_original_pc(const frame* fr);
356
357 // Inline cache support for class unloading and nmethod unloading
358 private:
359 bool cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
360
361 public:
362 // Serial version used by sweeper and whitebox test
363 void cleanup_inline_caches(bool clean_all);
364
365 virtual void clear_inline_caches();
366 void clear_ic_callsites();
367
368 // Verify and count cached icholder relocations.
369 int verify_icholder_relocations();
370 void verify_oop_relocations();
371
372 bool has_evol_metadata();
373
374 // Fast breakpoint support. Tells if this compiled method is
375 // dependent on the given method. Returns true if this nmethod
376 // corresponds to the given method as well.
377 virtual bool is_dependent_on_method(Method* dependee) = 0;
378
379 virtual NativeCallWrapper* call_wrapper_at(address call) const = 0;
380 virtual NativeCallWrapper* call_wrapper_before(address return_pc) const = 0;
381 virtual address call_instruction_address(address pc) const = 0;
382
383 virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const = 0;
384 virtual CompiledStaticCall* compiledStaticCall_at(address addr) const = 0;
385 virtual CompiledStaticCall* compiledStaticCall_before(address addr) const = 0;
386
387 Method* attached_method(address call_pc);
388 Method* attached_method_before_pc(address pc);
389
390 virtual void metadata_do(MetadataClosure* f) = 0;
391
392 // GC support
393 protected:
394 address oops_reloc_begin() const;
395
396 private:
397 bool static clean_ic_if_metadata_is_dead(CompiledIC *ic);
398
399 public:
400 // GC unloading support
401 // Cleans unloaded klasses and unloaded nmethods in inline caches
402
403 virtual bool is_unloading() = 0;
404
405 bool unload_nmethod_caches(bool class_unloading_occurred);
406 virtual void do_unloading(bool unloading_occurred) = 0;
407
408 private:
409 PcDesc* find_pc_desc(address pc, bool approximate) {
410 return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
411 }
412 };
413
414 #endif // SHARE_CODE_COMPILEDMETHOD_HPP
--- EOF ---