rev 47406 : [mq]: assembler_cmpxchg
1 /*
2 * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_CODE_COMPILEDMETHOD_HPP
26 #define SHARE_VM_CODE_COMPILEDMETHOD_HPP
27
28 #include "code/codeBlob.hpp"
29 #include "code/pcDesc.hpp"
30 #include "oops/metadata.hpp"
31
32 class Dependencies;
33 class ExceptionHandlerTable;
34 class ImplicitExceptionTable;
35 class AbstractCompiler;
36 class xmlStream;
37 class CompiledStaticCall;
38 class NativeCallWrapper;
39
40 // This class is used internally by nmethods, to cache
41 // exception/pc/handler information.
42
43 class ExceptionCache : public CHeapObj<mtCode> {
44 friend class VMStructs;
45 private:
46 enum { cache_size = 16 };
47 Klass* _exception_type;
48 address _pc[cache_size];
49 address _handler[cache_size];
50 volatile int _count;
51 ExceptionCache* _next;
52
53 address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
54 void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
55 address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
56 void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
57 int count() { return OrderAccess::load_acquire(&_count); }
58 // increment_count is only called under lock, but there may be concurrent readers.
59 void increment_count() { OrderAccess::release_store(&_count, _count + 1); }
60
61 public:
62
63 ExceptionCache(Handle exception, address pc, address handler);
64
65 Klass* exception_type() { return _exception_type; }
66 ExceptionCache* next() { return _next; }
67 void set_next(ExceptionCache *ec) { _next = ec; }
68
69 address match(Handle exception, address pc);
70 bool match_exception_with_space(Handle exception) ;
71 address test_address(address addr);
72 bool add_address_and_handler(address addr, address handler) ;
73 };
74
75 class nmethod;
76
77 // cache pc descs found in earlier inquiries
78 class PcDescCache VALUE_OBJ_CLASS_SPEC {
79 friend class VMStructs;
80 private:
81 enum { cache_size = 4 };
82 // The array elements MUST be volatile! Several threads may modify
83 // and read from the cache concurrently. find_pc_desc_internal has
84 // returned wrong results. C++ compiler (namely xlC12) may duplicate
85 // C++ field accesses if the elements are not volatile.
86 typedef PcDesc* PcDescPtr;
87 volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
88 public:
89 PcDescCache() { debug_only(_pc_descs[0] = NULL); }
90 void reset_to(PcDesc* initial_pc_desc);
91 PcDesc* find_pc_desc(int pc_offset, bool approximate);
92 void add_pc_desc(PcDesc* pc_desc);
93 PcDesc* last_pc_desc() { return _pc_descs[0]; }
94 };
95
96 class PcDescSearch {
97 private:
98 address _code_begin;
99 PcDesc* _lower;
100 PcDesc* _upper;
101 public:
102 PcDescSearch(address code, PcDesc* lower, PcDesc* upper) :
103 _code_begin(code), _lower(lower), _upper(upper)
104 {
105 }
106
107 address code_begin() const { return _code_begin; }
108 PcDesc* scopes_pcs_begin() const { return _lower; }
109 PcDesc* scopes_pcs_end() const { return _upper; }
110 };
111
112 class PcDescContainer VALUE_OBJ_CLASS_SPEC {
113 private:
114 PcDescCache _pc_desc_cache;
115 public:
116 PcDescContainer() {}
117
118 PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search);
119 void reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); }
120
121 PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) {
122 address base_address = search.code_begin();
123 PcDesc* desc = _pc_desc_cache.last_pc_desc();
124 if (desc != NULL && desc->pc_offset() == pc - base_address) {
125 return desc;
126 }
127 return find_pc_desc_internal(pc, approximate, search);
128 }
129 };
130
131
132 class CompiledMethod : public CodeBlob {
133 friend class VMStructs;
134 friend class NMethodSweeper;
135
136 void init_defaults();
137 protected:
138 enum MarkForDeoptimizationStatus {
139 not_marked,
140 deoptimize,
141 deoptimize_noupdate
142 };
143
144 MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
145
146 bool _is_far_code; // Code is far from CodeCache.
147 // Have to use far call instructions to call it from code in CodeCache.
148 // set during construction
149 unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
150 unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
151 unsigned int _lazy_critical_native:1; // Lazy JNI critical native
152 unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
153
154 Method* _method;
155 address _scopes_data_begin;
156 // All deoptee's will resume execution at this location described by
157 // this address.
158 address _deopt_handler_begin;
159 // All deoptee's at a MethodHandle call site will resume execution
160 // at this location described by this offset.
161 address _deopt_mh_handler_begin;
162
163 PcDescContainer _pc_desc_container;
164 ExceptionCache * volatile _exception_cache;
165
166 virtual void flush() = 0;
167 protected:
168 CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
169 CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
170
171 public:
172 virtual bool is_compiled() const { return true; }
173
174 bool has_unsafe_access() const { return _has_unsafe_access; }
175 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
176
177 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
178 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
179
180 bool is_lazy_critical_native() const { return _lazy_critical_native; }
181 void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
182
183 bool has_wide_vectors() const { return _has_wide_vectors; }
184 void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
185
186 enum { in_use = 0, // executable nmethod
187 not_used = 1, // not entrant, but revivable
188 not_entrant = 2, // marked for deoptimization but activations may still exist,
189 // will be transformed to zombie when all activations are gone
190 zombie = 3, // no activations exist, nmethod is ready for purge
191 unloaded = 4 // there should be no activations, should not be called,
192 // will be transformed to zombie immediately
193 };
194
195 virtual bool is_in_use() const = 0;
196 virtual int comp_level() const = 0;
197 virtual int compile_id() const = 0;
198
199 virtual address verified_entry_point() const = 0;
200 virtual void log_identity(xmlStream* log) const = 0;
201 virtual void log_state_change() const = 0;
202 virtual bool make_not_used() = 0;
203 virtual bool make_not_entrant() = 0;
204 virtual bool make_entrant() = 0;
205 virtual address entry_point() const = 0;
206 virtual bool make_zombie() = 0;
207 virtual bool is_osr_method() const = 0;
208 virtual int osr_entry_bci() const = 0;
209 Method* method() const { return _method; }
210 virtual void print_pcs() = 0;
211 bool is_native_method() const { return _method != NULL && _method->is_native(); }
212 bool is_java_method() const { return _method != NULL && !_method->is_native(); }
213
214 // ScopeDesc retrieval operation
215 PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
216 // pc_desc_near returns the first PcDesc at or after the given pc.
217 PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
218
219 // ScopeDesc for an instruction
220 ScopeDesc* scope_desc_at(address pc);
221 ScopeDesc* scope_desc_near(address pc);
222
223 bool is_at_poll_return(address pc);
224 bool is_at_poll_or_poll_return(address pc);
225
226 bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; }
227 void mark_for_deoptimization(bool inc_recompile_counts = true) {
228 _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
229 }
230 bool update_recompile_counts() const {
231 // Update recompile counts when either the update is explicitly requested (deoptimize)
232 // or the nmethod is not marked for deoptimization at all (not_marked).
233 // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
234 return _mark_for_deoptimization_status != deoptimize_noupdate;
235 }
236
237 // tells whether frames described by this nmethod can be deoptimized
238 // note: native wrappers cannot be deoptimized.
239 bool can_be_deoptimized() const { return is_java_method(); }
240
241 virtual oop oop_at(int index) const = 0;
242 virtual Metadata* metadata_at(int index) const = 0;
243
244 address scopes_data_begin() const { return _scopes_data_begin; }
245 virtual address scopes_data_end() const = 0;
246 int scopes_data_size() const { return scopes_data_end() - scopes_data_begin(); }
247
248 virtual PcDesc* scopes_pcs_begin() const = 0;
249 virtual PcDesc* scopes_pcs_end() const = 0;
250 int scopes_pcs_size() const { return (intptr_t) scopes_pcs_end() - (intptr_t) scopes_pcs_begin(); }
251
252 address insts_begin() const { return code_begin(); }
253 address insts_end() const { return stub_begin(); }
254 // Returns true if a given address is in the 'insts' section. The method
255 // insts_contains_inclusive() is end-inclusive.
256 bool insts_contains(address addr) const { return insts_begin() <= addr && addr < insts_end(); }
257 bool insts_contains_inclusive(address addr) const { return insts_begin() <= addr && addr <= insts_end(); }
258
259 int insts_size() const { return insts_end() - insts_begin(); }
260
261 virtual address consts_begin() const = 0;
262 virtual address consts_end() const = 0;
263 bool consts_contains(address addr) const { return consts_begin() <= addr && addr < consts_end(); }
264 int consts_size() const { return consts_end() - consts_begin(); }
265
266 virtual address stub_begin() const = 0;
267 virtual address stub_end() const = 0;
268 bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); }
269 int stub_size() const { return stub_end() - stub_begin(); }
270
271 virtual address handler_table_begin() const = 0;
272 virtual address handler_table_end() const = 0;
273 bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
274 int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
275
276 virtual address exception_begin() const = 0;
277
278 virtual address nul_chk_table_begin() const = 0;
279 virtual address nul_chk_table_end() const = 0;
280 bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
281 int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
282
283 virtual oop* oop_addr_at(int index) const = 0;
284 virtual Metadata** metadata_addr_at(int index) const = 0;
285 virtual void set_original_pc(const frame* fr, address pc) = 0;
286
287 // Exception cache support
288 // Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
289 ExceptionCache* exception_cache() const { return _exception_cache; }
290 void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
291 void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store(&_exception_cache, ec); }
292 address handler_for_exception_and_pc(Handle exception, address pc);
293 void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
294 void clean_exception_cache(BoolObjectClosure* is_alive);
295
296 void add_exception_cache_entry(ExceptionCache* new_entry);
297 ExceptionCache* exception_cache_entry_for_exception(Handle exception);
298
299 // MethodHandle
300 bool is_method_handle_return(address return_pc);
301 address deopt_mh_handler_begin() const { return _deopt_mh_handler_begin; }
302
303 address deopt_handler_begin() const { return _deopt_handler_begin; }
304 virtual address get_original_pc(const frame* fr) = 0;
305 // Deopt
306 // Return true is the PC is one would expect if the frame is being deopted.
307 bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
308 bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
309 bool is_deopt_entry(address pc);
310
311 virtual bool can_convert_to_zombie() = 0;
312 virtual const char* compile_kind() const = 0;
313 virtual int get_state() const = 0;
314
315 const char* state() const;
316
317 bool is_far_code() const { return _is_far_code; }
318
319 bool inlinecache_check_contains(address addr) const {
320 return (addr >= code_begin() && addr < verified_entry_point());
321 }
322
323 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
324
325 // implicit exceptions support
326 virtual address continuation_for_implicit_exception(address pc) { return NULL; }
327
328 static address get_deopt_original_pc(const frame* fr);
329
330 // Inline cache support
331 void cleanup_inline_caches(bool clean_all = false);
332 virtual void clear_inline_caches();
333 void clear_ic_stubs();
334
335 // Verify and count cached icholder relocations.
336 int verify_icholder_relocations();
337 void verify_oop_relocations();
338
339 virtual bool is_evol_dependent_on(Klass* dependee) = 0;
340 // Fast breakpoint support. Tells if this compiled method is
341 // dependent on the given method. Returns true if this nmethod
342 // corresponds to the given method as well.
343 virtual bool is_dependent_on_method(Method* dependee) = 0;
344
345 virtual NativeCallWrapper* call_wrapper_at(address call) const = 0;
346 virtual NativeCallWrapper* call_wrapper_before(address return_pc) const = 0;
347 virtual address call_instruction_address(address pc) const = 0;
348
349 virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const = 0;
350 virtual CompiledStaticCall* compiledStaticCall_at(address addr) const = 0;
351 virtual CompiledStaticCall* compiledStaticCall_before(address addr) const = 0;
352
353 Method* attached_method(address call_pc);
354 Method* attached_method_before_pc(address pc);
355
356 virtual void metadata_do(void f(Metadata*)) = 0;
357
358 // GC support
359
360 void set_unloading_next(CompiledMethod* next) { _unloading_next = next; }
361 CompiledMethod* unloading_next() { return _unloading_next; }
362
363 void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive);
364
365 // Check that all metadata is still alive
366 void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
367
368 virtual void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
369 // The parallel versions are used by G1.
370 virtual bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
371 virtual void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
372
373 static unsigned char global_unloading_clock() { return _global_unloading_clock; }
374 static void increase_unloading_clock();
375
376 void set_unloading_clock(unsigned char unloading_clock);
377 unsigned char unloading_clock();
378
379 protected:
380 virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) = 0;
381 #if INCLUDE_JVMCI
382 virtual bool do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) = 0;
383 #endif
384
385 private:
386 // GC support to help figure out if an nmethod has been
387 // cleaned/unloaded by the current GC.
388 static unsigned char _global_unloading_clock;
389
390 volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
391
392 PcDesc* find_pc_desc(address pc, bool approximate) {
393 return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
394 }
395
396 protected:
397 union {
398 // Used by G1 to chain nmethods.
399 CompiledMethod* _unloading_next;
400 // Used by non-G1 GCs to chain nmethods.
401 nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
402 };
403 };
404
405 #endif //SHARE_VM_CODE_COMPILEDMETHOD_HPP
--- EOF ---