16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 # include "incls/_precompiled.incl"
26 # include "incls/_sweeper.cpp.incl"
27
28 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
29 CodeBlob* NMethodSweeper::_current = NULL; // Current nmethod
30 int NMethodSweeper::_seen = 0 ; // No. of blobs we have currently processed in current pass of CodeCache
31 int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
32
33 jint NMethodSweeper::_locked_seen = 0;
34 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
35 bool NMethodSweeper::_rescan = false;
36
37 class MarkActivationClosure: public CodeBlobClosure {
38 public:
39 virtual void do_code_blob(CodeBlob* cb) {
40 // If we see an activation belonging to a non_entrant nmethod, we mark it.
41 if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
42 ((nmethod*)cb)->mark_as_seen_on_stack();
43 }
44 }
45 };
46 static MarkActivationClosure mark_activation_closure;
47
48 void NMethodSweeper::sweep() {
49 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
50 if (!MethodFlushing) return;
51
52 // No need to synchronize access, since this is always executed at a
53 // safepoint. If we aren't in the middle of scan and a rescan
54 // hasn't been requested then just return.
55 if (_current == NULL && !_rescan) return;
97 _current = next;
98 }
99 // Because we could stop on a codeBlob other than an nmethod we skip forward
100 // to the next nmethod (if any). codeBlobs other than nmethods can be freed
101 // async to us and make _current invalid while we sleep.
102 while (_current != NULL && !_current->is_nmethod()) {
103 _current = CodeCache::next(_current);
104 }
105
106 if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
107 // we've completed a scan without making progress but there were
108 // nmethods we were unable to process either because they were
109 // locked or were still on stack. We don't have to aggresively
110 // clean them up so just stop scanning. We could scan once more
111 // but that complicates the control logic and it's unlikely to
112 // matter much.
113 if (PrintMethodFlushing) {
114 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
115 }
116 }
117 }
118
119
120 void NMethodSweeper::process_nmethod(nmethod *nm) {
121 // Skip methods that are currently referenced by the VM
122 if (nm->is_locked_by_vm()) {
123 // But still remember to clean-up inline caches for alive nmethods
124 if (nm->is_alive()) {
125 // Clean-up all inline caches that points to zombie/non-reentrant methods
126 nm->cleanup_inline_caches();
127 } else {
128 _locked_seen++;
129 }
130 return;
131 }
132
133 if (nm->is_zombie()) {
134 // If it is first time, we see nmethod then we mark it. Otherwise,
135 // we reclame it. When we have seen a zombie method twice, we know that
136 // there are no inline caches that referes to it.
160 // Still alive, clean up its inline caches
161 nm->cleanup_inline_caches();
162 // we coudn't transition this nmethod so don't immediately
163 // request a rescan. If this method stays on the stack for a
164 // long time we don't want to keep rescanning at every safepoint.
165 _not_entrant_seen_on_stack++;
166 }
167 } else if (nm->is_unloaded()) {
168 // Unloaded code, just make it a zombie
169 if (PrintMethodFlushing && Verbose)
170 tty->print_cr("### Nmethod 0x%x (unloaded) being made zombie", nm);
171 if (nm->is_osr_method()) {
172 // No inline caches will ever point to osr methods, so we can just remove it
173 nm->flush();
174 } else {
175 nm->make_zombie();
176 _rescan = true;
177 }
178 } else {
179 assert(nm->is_alive(), "should be alive");
180 // Clean-up all inline caches that points to zombie/non-reentrant methods
181 nm->cleanup_inline_caches();
182 }
183 }
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25 # include "incls/_precompiled.incl"
26 # include "incls/_sweeper.cpp.incl"
27
28 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
29 CodeBlob* NMethodSweeper::_current = NULL; // Current nmethod
30 int NMethodSweeper::_seen = 0 ; // No. of blobs we have currently processed in current pass of CodeCache
31 int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
32
33 jint NMethodSweeper::_locked_seen = 0;
34 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
35 bool NMethodSweeper::_rescan = false;
36 bool NMethodSweeper::_was_full = false;
37 jlong NMethodSweeper::_advise_to_sweep = 0;
38 jlong NMethodSweeper::_last_was_full = 0;
39 uint NMethodSweeper::_highest_marked = 0;
40 long NMethodSweeper::_was_full_traversal = 0;
41
42 class MarkActivationClosure: public CodeBlobClosure {
43 public:
44 virtual void do_code_blob(CodeBlob* cb) {
45 // If we see an activation belonging to a non_entrant nmethod, we mark it.
46 if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
47 ((nmethod*)cb)->mark_as_seen_on_stack();
48 }
49 }
50 };
51 static MarkActivationClosure mark_activation_closure;
52
53 void NMethodSweeper::sweep() {
54 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
55 if (!MethodFlushing) return;
56
57 // No need to synchronize access, since this is always executed at a
58 // safepoint. If we aren't in the middle of scan and a rescan
59 // hasn't been requested then just return.
60 if (_current == NULL && !_rescan) return;
102 _current = next;
103 }
104 // Because we could stop on a codeBlob other than an nmethod we skip forward
105 // to the next nmethod (if any). codeBlobs other than nmethods can be freed
106 // async to us and make _current invalid while we sleep.
107 while (_current != NULL && !_current->is_nmethod()) {
108 _current = CodeCache::next(_current);
109 }
110
111 if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
112 // we've completed a scan without making progress but there were
113 // nmethods we were unable to process either because they were
114 // locked or were still on stack. We don't have to aggresively
115 // clean them up so just stop scanning. We could scan once more
116 // but that complicates the control logic and it's unlikely to
117 // matter much.
118 if (PrintMethodFlushing) {
119 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
120 }
121 }
122
123 if (UseCodeCacheFlushing) {
124 if (CodeCache::unallocated_capacity() > CodeCacheFlushingMinimumFreeSpace) {
125 // In a safepoint, no race with setters
126 _advise_to_sweep = false;
127 }
128
129 if (was_full()) {
130 // There was some progress so attempt to restart the compiler
131 jlong now = os::javaTimeMillis();
132 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
133 jlong curr_interval = now - _last_was_full;
134 if ((CodeCache::unallocated_capacity() > CodeCacheFlushingMinimumFreeSpace) &&
135 (curr_interval > max_interval)){
136 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
137 set_was_full(false);
138
139 // Update the _last_was_full time so we can tell how fast the
140 // code cache is filling up
141 _last_was_full = os::javaTimeMillis();
142
143 if (PrintMethodFlushing) {
144 tty->print_cr("### sweeper: " UINT32_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT " restarting compiler",
145 CodeCache::nof_blobs(), CodeCache::unallocated_capacity(), CodeCache::max_capacity());
146 }
147 }
148 }
149 }
150 }
151
152
153 void NMethodSweeper::process_nmethod(nmethod *nm) {
154 // Skip methods that are currently referenced by the VM
155 if (nm->is_locked_by_vm()) {
156 // But still remember to clean-up inline caches for alive nmethods
157 if (nm->is_alive()) {
158 // Clean-up all inline caches that points to zombie/non-reentrant methods
159 nm->cleanup_inline_caches();
160 } else {
161 _locked_seen++;
162 }
163 return;
164 }
165
166 if (nm->is_zombie()) {
167 // If it is first time, we see nmethod then we mark it. Otherwise,
168 // we reclame it. When we have seen a zombie method twice, we know that
169 // there are no inline caches that referes to it.
193 // Still alive, clean up its inline caches
194 nm->cleanup_inline_caches();
195 // we coudn't transition this nmethod so don't immediately
196 // request a rescan. If this method stays on the stack for a
197 // long time we don't want to keep rescanning at every safepoint.
198 _not_entrant_seen_on_stack++;
199 }
200 } else if (nm->is_unloaded()) {
201 // Unloaded code, just make it a zombie
202 if (PrintMethodFlushing && Verbose)
203 tty->print_cr("### Nmethod 0x%x (unloaded) being made zombie", nm);
204 if (nm->is_osr_method()) {
205 // No inline caches will ever point to osr methods, so we can just remove it
206 nm->flush();
207 } else {
208 nm->make_zombie();
209 _rescan = true;
210 }
211 } else {
212 assert(nm->is_alive(), "should be alive");
213
214 if (UseCodeCacheFlushing) {
215 if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
216 (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
217 (CodeCache::unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace)) {
218 // This method has not been called since the forced cleanup happened
219 nm->make_not_entrant();
220 nm->method()->set_saved_code(NULL);
221 }
222 }
223
224 // Clean-up all inline caches that points to zombie/non-reentrant methods
225 nm->cleanup_inline_caches();
226 }
227 }
228
229 // Code cache unloading: when compilers notice the code cache is getting full,
230 // they will call a vm op that comes here. This code attempts to speculatively
231 // unload the oldest half of the nmethods (based on the compile job id) by
232 // hiding the methodOop's ref to the nmethod in the _saved_code field. Then
233 // execution resumes. If a method so marked is not called by the second
234 // safepoint from the current one, the nmethod will be marked non-entrant and
235 // got rid of by normal sweeping. If the method is called, the methodOop's
236 // _code field is restored from the _saved_code field and the methodOop/nmethod
237 // go back to their normal state.
238 void NMethodSweeper::handle_full_code_cache(bool is_full) {
239 // Only the first one to notice can advise us to start early cleaning
240 if (!is_full){
241 jlong old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
242 if (old != 0) {
243 return;
244 }
245 }
246
247 if (is_full) {
248 // Since code cache is full, immediately stop new compiles
249 bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
250 if (!did_set) {
251 // only the first to notice can start the cleaning,
252 // others will go back and block
253 return;
254 }
255 set_was_full(true);
256
257 // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
258 jlong now = os::javaTimeMillis();
259 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
260 jlong curr_interval = now - _last_was_full;
261 if (curr_interval < max_interval) {
262 _rescan = true;
263 if (PrintMethodFlushing) {
264 tty->print_cr("### handle full too often, turning off compiler");
265 }
266 return;
267 }
268 }
269
270 VM_HandleFullCodeCache op(is_full);
271 VMThread::execute(&op);
272
273 // rescan again as soon as possible
274 _rescan = true;
275 }
276
277 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
278 // If there was a race in detecting full code cache, only run
279 // one vm op for it or keep the compiler shut off
280
281 debug_only(jlong start = os::javaTimeMillis();)
282
283 if ((!was_full()) && (is_full)) {
284 if (CodeCache::unallocated_capacity() > CodeCacheFlushingMinimumFreeSpace) {
285 if (PrintMethodFlushing) {
286 tty->print_cr("### sweeper: " UINT32_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT " restarting compiler",
287 CodeCache::nof_blobs(), CodeCache::unallocated_capacity(), CodeCache::max_capacity());
288 }
289 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
290 return;
291 }
292 }
293
294 // Traverse the code cache trying to dump the oldest nmethods
295 uint curr_max_comp_id = CompileBroker::get_compilation_id();
296 uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
297 if (PrintMethodFlushing) {
298 tty->print_cr("### Cleaning code cache: " UINT32_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
299 CodeCache::nof_blobs(), CodeCache::unallocated_capacity(), CodeCache::max_capacity());
300 }
301
302 nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
303
304 while ((nm != NULL)){
305 uint curr_comp_id = nm->compile_id();
306
307 // OSR methods cannot be flushed like this. Also, don't flush native methods
308 // since they are part of the JDK in most cases
309 if(nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
310 (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
311
312 if ((nm->method()->code() == nm)) {
313 // This method has not been previously considered for
314 // unloading or it was restored already
315 nm->method()->clear_code_hedge();
316 } else if (nm->method()->saved_code() == nm) {
317 // This method was previously considered for preemptive unloading and was not called since then
318 nm->method()->set_saved_code(NULL);
319 nm->method()->invocation_counter()->decay();
320 nm->method()->backedge_counter()->decay();
321 nm->make_not_entrant();
322 }
323
324 if (curr_comp_id > _highest_marked) {
325 _highest_marked = curr_comp_id;
326 }
327 }
328 nm = CodeCache::alive_nmethod(CodeCache::next(nm));
329 }
330
331 // Shut off compiler. Sweeper will run exiting from this safepoint
332 // and turn it back on if it clears enough space
333 if (was_full()) {
334 _last_was_full = os::javaTimeMillis();
335 CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
336 }
337
338 // After two more traversals the sweeper will get rid of unrestored nmethods
339 _was_full_traversal = _traversals;
340 debug_only(jlong end = os::javaTimeMillis(); if(PrintMethodFlushing) tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);)
341 }
|