126 #else
127 #define SWEEP(nm)
128 #endif
129
130
131 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
132 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
133 int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache
134 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
135 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
136 int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
137
138 volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
139 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
140
141 jint NMethodSweeper::_locked_seen = 0;
142 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
143 bool NMethodSweeper::_resweep = false;
144 jint NMethodSweeper::_flush_token = 0;
145 jlong NMethodSweeper::_last_full_flush_time = 0;
146 int NMethodSweeper::_highest_marked = 0;
147 int NMethodSweeper::_dead_compile_ids = 0;
148 long NMethodSweeper::_last_flush_traversal_id = 0;
149
150 int NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
151 int NMethodSweeper::_total_nof_methods_reclaimed = 0;
152 jlong NMethodSweeper::_total_time_sweeping = 0;
153 jlong NMethodSweeper::_total_time_this_sweep = 0;
154 jlong NMethodSweeper::_peak_sweep_time = 0;
155 jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
156 jlong NMethodSweeper::_total_disconnect_time = 0;
157 jlong NMethodSweeper::_peak_disconnect_time = 0;
158
159 class MarkActivationClosure: public CodeBlobClosure {
160 public:
161 virtual void do_code_blob(CodeBlob* cb) {
162 // If we see an activation belonging to a non_entrant nmethod, we mark it.
163 if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
164 ((nmethod*)cb)->mark_as_seen_on_stack();
165 }
166 }
167 };
168 static MarkActivationClosure mark_activation_closure;
169
170 bool NMethodSweeper::sweep_in_progress() {
171 return (_current != NULL);
172 }
173
174 void NMethodSweeper::scan_stacks() {
175 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
176 if (!MethodFlushing) return;
177
178 // No need to synchronize access, since this is always executed at a
179 // safepoint.
180
181 // Make sure CompiledIC_lock in unlocked, since we might update some
182 // inline caches. If it is, we just bail-out and try later.
183 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
184
292 // Since we will give up the CodeCache_lock, always skip ahead
293 // to the next nmethod. Other blobs can be deleted by other
294 // threads but nmethods are only reclaimed by the sweeper.
295 nmethod* next = CodeCache::next_nmethod(_current);
296
297 // Now ready to process nmethod and give up CodeCache_lock
298 {
299 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
300 process_nmethod(_current);
301 }
302 _seen++;
303 _current = next;
304 }
305 }
306
307 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
308
309 if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
310 // we've completed a scan without making progress but there were
311 // nmethods we were unable to process either because they were
312 // locked or were still on stack. We don't have to aggresively
313 // clean them up so just stop scanning. We could scan once more
314 // but that complicates the control logic and it's unlikely to
315 // matter much.
316 if (PrintMethodFlushing) {
317 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
318 }
319 }
320
321 jlong sweep_end_counter = os::elapsed_counter();
322 jlong sweep_time = sweep_end_counter - sweep_start_counter;
323 _total_time_sweeping += sweep_time;
324 _total_time_this_sweep += sweep_time;
325 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
326 _total_nof_methods_reclaimed += _flushed_count;
327
328 EventSweepCodeCache event(UNTIMED);
329 if (event.should_commit()) {
330 event.set_starttime(sweep_start_counter);
331 event.set_endtime(sweep_end_counter);
332 event.set_sweepIndex(_traversals);
375
376 void NMethodSweeper::release_nmethod(nmethod *nm) {
377 // Clean up any CompiledICHolders
378 {
379 ResourceMark rm;
380 MutexLocker ml_patch(CompiledIC_lock);
381 RelocIterator iter(nm);
382 while (iter.next()) {
383 if (iter.type() == relocInfo::virtual_call_type) {
384 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
385 }
386 }
387 }
388
389 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
390 nm->flush();
391 }
392
393 void NMethodSweeper::process_nmethod(nmethod *nm) {
394 assert(!CodeCache_lock->owned_by_self(), "just checking");
395
396 // Make sure this nmethod doesn't get unloaded during the scan,
397 // since the locks acquired below might safepoint.
398 NMethodMarker nmm(nm);
399
400 SWEEP(nm);
401
402 // Skip methods that are currently referenced by the VM
403 if (nm->is_locked_by_vm()) {
404 // But still remember to clean-up inline caches for alive nmethods
405 if (nm->is_alive()) {
406 // Clean-up all inline caches that points to zombie/non-reentrant methods
407 MutexLocker cl(CompiledIC_lock);
408 nm->cleanup_inline_caches();
409 SWEEP(nm);
410 } else {
411 _locked_seen++;
412 SWEEP(nm);
413 }
414 return;
415 }
416
417 if (nm->is_zombie()) {
418 // If it is first time, we see nmethod then we mark it. Otherwise,
419 // we reclame it. When we have seen a zombie method twice, we know that
420 // there are no inline caches that refer to it.
421 if (nm->is_marked_for_reclamation()) {
422 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
423 if (PrintMethodFlushing && Verbose) {
424 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
425 }
426 release_nmethod(nm);
427 _flushed_count++;
428 } else {
429 if (PrintMethodFlushing && Verbose) {
430 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
431 }
432 nm->mark_for_reclamation();
433 _resweep = true;
434 _marked_count++;
435 SWEEP(nm);
436 }
437 } else if (nm->is_not_entrant()) {
438 // If there is no current activations of this method on the
439 // stack we can safely convert it to a zombie method
440 if (nm->can_not_entrant_be_converted()) {
441 if (PrintMethodFlushing && Verbose) {
442 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
443 }
444 nm->make_zombie();
445 _resweep = true;
446 _zombified_count++;
447 SWEEP(nm);
448 } else {
449 // Still alive, clean up its inline caches
450 MutexLocker cl(CompiledIC_lock);
451 nm->cleanup_inline_caches();
452 // we coudn't transition this nmethod so don't immediately
453 // request a rescan. If this method stays on the stack for a
454 // long time we don't want to keep rescanning the code cache.
455 _not_entrant_seen_on_stack++;
456 SWEEP(nm);
457 }
458 } else if (nm->is_unloaded()) {
459 // Unloaded code, just make it a zombie
460 if (PrintMethodFlushing && Verbose)
461 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
462
463 if (nm->is_osr_method()) {
464 SWEEP(nm);
465 // No inline caches will ever point to osr methods, so we can just remove it
466 release_nmethod(nm);
467 _flushed_count++;
468 } else {
469 nm->make_zombie();
470 _resweep = true;
471 _zombified_count++;
472 SWEEP(nm);
473 }
474 } else {
475 assert(nm->is_alive(), "should be alive");
476
477 if (UseCodeCacheFlushing) {
478 if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
479 (_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {
480 // This method has not been called since the forced cleanup happened
481 nm->make_not_entrant();
482 }
483 }
484
485 // Clean-up all inline caches that points to zombie/non-reentrant methods
486 MutexLocker cl(CompiledIC_lock);
487 nm->cleanup_inline_caches();
488 SWEEP(nm);
489 }
490 }
491
492 // Code cache unloading: when compilers notice the code cache is getting full,
493 // they will call a vm op that comes here. This code attempts to speculatively
494 // unload the oldest half of the nmethods (based on the compile job id) by
495 // saving the old code in a list in the CodeCache. Then
496 // execution resumes. If a method so marked is not called by the second sweeper
497 // stack traversal after the current one, the nmethod will be marked non-entrant and
498 // got rid of by normal sweeping. If the method is called, the Method*'s
499 // _code field is restored and the Method*/nmethod
500 // go back to their normal state.
501 void NMethodSweeper::handle_full_code_cache(bool is_full) {
502
503 if (is_full) {
504 // Since code cache is full, immediately stop new compiles
505 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
506 log_sweep("disable_compiler");
507 }
508 }
509
510 // Make sure only one thread can flush
511 // The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
512 // no need to check the timeout here.
513 jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
514 if (old != 0) {
515 return;
516 }
517
518 VM_HandleFullCodeCache op(is_full);
519 VMThread::execute(&op);
520
521 // resweep again as soon as possible
522 _resweep = true;
523 }
524
525 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
526 // If there was a race in detecting full code cache, only run
527 // one vm op for it or keep the compiler shut off
528
529 jlong disconnect_start_counter = os::elapsed_counter();
530
531 // Traverse the code cache trying to dump the oldest nmethods
532 int curr_max_comp_id = CompileBroker::get_compilation_id();
533 int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
534
535 log_sweep("start_cleaning");
536
537 nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
538 jint disconnected = 0;
539 jint made_not_entrant = 0;
540 jint nmethod_count = 0;
541
542 while ((nm != NULL)){
543 int curr_comp_id = nm->compile_id();
544
545 // OSR methods cannot be flushed like this. Also, don't flush native methods
546 // since they are part of the JDK in most cases
547 if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
548
549 // only count methods that can be speculatively disconnected
550 nmethod_count++;
551
552 if (nm->is_in_use() && (curr_comp_id < flush_target)) {
553 if ((nm->method()->code() == nm)) {
554 // This method has not been previously considered for
555 // unloading or it was restored already
556 CodeCache::speculatively_disconnect(nm);
557 disconnected++;
558 } else if (nm->is_speculatively_disconnected()) {
559 // This method was previously considered for preemptive unloading and was not called since then
560 CompilationPolicy::policy()->delay_compilation(nm->method());
561 nm->make_not_entrant();
562 made_not_entrant++;
563 }
564
565 if (curr_comp_id > _highest_marked) {
566 _highest_marked = curr_comp_id;
567 }
568 }
569 }
570 nm = CodeCache::alive_nmethod(CodeCache::next(nm));
571 }
572
573 // remember how many compile_ids wheren't seen last flush.
574 _dead_compile_ids = curr_max_comp_id - nmethod_count;
575
576 log_sweep("stop_cleaning",
577 "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
578 disconnected, made_not_entrant);
579
580 // Shut off compiler. Sweeper will start over with a new stack scan and
581 // traversal cycle and turn it back on if it clears enough space.
582 if (is_full) {
583 _last_full_flush_time = os::javaTimeMillis();
584 }
585
586 jlong disconnect_end_counter = os::elapsed_counter();
587 jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
588 _total_disconnect_time += disconnect_time;
589 _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
590
591 EventCleanCodeCache event(UNTIMED);
592 if (event.should_commit()) {
593 event.set_starttime(disconnect_start_counter);
594 event.set_endtime(disconnect_end_counter);
|
126 #else
127 #define SWEEP(nm)
128 #endif
129
130
131 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
132 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
133 int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache
134 int NMethodSweeper::_flushed_count = 0; // Nof. nmethods flushed in current sweep
135 int NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
136 int NMethodSweeper::_marked_count = 0; // Nof. nmethods marked for reclaim in current sweep
137
138 volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
139 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
140
141 jint NMethodSweeper::_locked_seen = 0;
142 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
143 bool NMethodSweeper::_resweep = false;
144 jint NMethodSweeper::_flush_token = 0;
145 jlong NMethodSweeper::_last_full_flush_time = 0;
146 long NMethodSweeper::_last_flush_traversal_id = 0;
147
148 int NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
149 int NMethodSweeper::_total_nof_methods_reclaimed = 0;
150 jlong NMethodSweeper::_total_time_sweeping = 0;
151 jlong NMethodSweeper::_total_time_this_sweep = 0;
152 jlong NMethodSweeper::_peak_sweep_time = 0;
153 jlong NMethodSweeper::_peak_sweep_fraction_time = 0;
154 jlong NMethodSweeper::_total_disconnect_time = 0;
155 jlong NMethodSweeper::_peak_disconnect_time = 0;
156
157 class MarkActivationClosure: public CodeBlobClosure {
158 public:
159 virtual void do_code_blob(CodeBlob* cb) {
160 if (cb->is_nmethod()) {
161 nmethod* nm = (nmethod*)cb;
162 nm->set_hotness_counter(NMethodSweeper::hc_reset_value);
163 // If we see an activation belonging to a non_entrant nmethod, we mark it.
164 if (nm->is_not_entrant()) {
165 nm->mark_as_seen_on_stack();
166 }
167 }
168 }
169 };
170 static MarkActivationClosure mark_activation_closure;
171
172 bool NMethodSweeper::sweep_in_progress() {
173 return (_current != NULL);
174 }
175
176 void NMethodSweeper::scan_stacks() {
177 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
178 if (!MethodFlushing) return;
179
180 // No need to synchronize access, since this is always executed at a
181 // safepoint.
182
183 // Make sure CompiledIC_lock in unlocked, since we might update some
184 // inline caches. If it is, we just bail-out and try later.
185 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
186
294 // Since we will give up the CodeCache_lock, always skip ahead
295 // to the next nmethod. Other blobs can be deleted by other
296 // threads but nmethods are only reclaimed by the sweeper.
297 nmethod* next = CodeCache::next_nmethod(_current);
298
299 // Now ready to process nmethod and give up CodeCache_lock
300 {
301 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
302 process_nmethod(_current);
303 }
304 _seen++;
305 _current = next;
306 }
307 }
308
309 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
310
311 if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
312 // we've completed a scan without making progress but there were
313 // nmethods we were unable to process either because they were
314 // locked or were still on stack. We don't have to aggressively
315 // clean them up so just stop scanning. We could scan once more
316 // but that complicates the control logic and it's unlikely to
317 // matter much.
318 if (PrintMethodFlushing) {
319 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
320 }
321 }
322
323 jlong sweep_end_counter = os::elapsed_counter();
324 jlong sweep_time = sweep_end_counter - sweep_start_counter;
325 _total_time_sweeping += sweep_time;
326 _total_time_this_sweep += sweep_time;
327 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
328 _total_nof_methods_reclaimed += _flushed_count;
329
330 EventSweepCodeCache event(UNTIMED);
331 if (event.should_commit()) {
332 event.set_starttime(sweep_start_counter);
333 event.set_endtime(sweep_end_counter);
334 event.set_sweepIndex(_traversals);
377
378 void NMethodSweeper::release_nmethod(nmethod *nm) {
379 // Clean up any CompiledICHolders
380 {
381 ResourceMark rm;
382 MutexLocker ml_patch(CompiledIC_lock);
383 RelocIterator iter(nm);
384 while (iter.next()) {
385 if (iter.type() == relocInfo::virtual_call_type) {
386 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
387 }
388 }
389 }
390
391 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
392 nm->flush();
393 }
394
395 void NMethodSweeper::process_nmethod(nmethod *nm) {
396 assert(!CodeCache_lock->owned_by_self(), "just checking");
397 // Make sure this nmethod doesn't get unloaded during the scan,
398 // since the locks acquired might below the safepoint.
399 NMethodMarker nmm(nm);
400 nm->dec_hotness_counter(NMethodSweeper::hc_dec_value);
401
402 SWEEP(nm);
403
404 // Skip methods that are currently referenced by the VM
405 if (nm->is_locked_by_vm()) {
406 // But still remember to clean-up inline caches for alive nmethods
407 if (nm->is_alive()) {
408 // Clean-up all inline caches that point to zombie/non-reentrant methods
409 MutexLocker cl(CompiledIC_lock);
410 nm->cleanup_inline_caches();
411 SWEEP(nm);
412 } else {
413 _locked_seen++;
414 SWEEP(nm);
415 }
416 return;
417 }
418
419 if (nm->is_zombie()) {
420 // If it is the first time we see nmethod then we mark it. Otherwise,
421 // we reclaim it. When we have seen a zombie method twice, we know that
422 // there are no inline caches that refer to it.
423 if (nm->is_marked_for_reclamation()) {
424 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
425 if (PrintMethodFlushing && Verbose) {
426 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
427 }
428 release_nmethod(nm);
429 _flushed_count++;
430 } else {
431 if (PrintMethodFlushing && Verbose) {
432 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
433 }
434 nm->mark_for_reclamation();
435 _resweep = true;
436 _marked_count++;
437 SWEEP(nm);
438 }
439 } else if (nm->is_not_entrant()) {
440 // If there are no current activations of this method on the
441 // stack we can safely convert it to a zombie method
442 if (nm->can_not_entrant_be_converted()) {
443 if (PrintMethodFlushing && Verbose) {
444 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
445 }
446 nm->make_zombie();
447 _resweep = true;
448 _zombified_count++;
449 SWEEP(nm);
450 } else {
451 // Still alive, clean up its inline caches
452 MutexLocker cl(CompiledIC_lock);
453 nm->cleanup_inline_caches();
454 // we coudn't transition this nmethod so don't immediately
455 // request a rescan. If this method stays on the stack for a
456 // long time we don't want to keep rescanning the code cache.
457 _not_entrant_seen_on_stack++;
458 SWEEP(nm);
459 }
460 } else if (nm->is_unloaded()) {
461 // Unloaded code, just make it a zombie
462 if (PrintMethodFlushing && Verbose)
463 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
464
465 if (nm->is_osr_method()) {
466 SWEEP(nm);
467 // No inline caches will ever point to osr methods, so we can just remove it
468 release_nmethod(nm);
469 _flushed_count++;
470 } else {
471 nm->make_zombie();
472 _resweep = true;
473 _zombified_count++;
474 SWEEP(nm);
475 }
476 } else {
477 assert(nm->is_alive(), "should be alive");
478 if (UseCodeCacheFlushing) {
479 if (!nm->is_locked_by_vm() && !nm->is_osr_method()) {
480 if (!nm->is_speculatively_disconnected()) {
481 // This method is cold and the code cache fills up => get rid of it.
482 double threshold = -100 + (CodeCache::reverse_free_ratio() * NmethodSweepActivity);
483 if (nm->get_hotness_counter() < threshold) {
484 nm->make_not_entrant();
485 }
486 } else if (nm->is_speculatively_disconnected() && (_traversals > _last_flush_traversal_id + 2)) {
487 // This method has not been called since the forced cleanup happened
488 nm->make_not_entrant();
489 }
490 }
491 }
492 // Clean-up all inline caches that points to zombie/non-reentrant methods
493 MutexLocker cl(CompiledIC_lock);
494 nm->cleanup_inline_caches();
495 SWEEP(nm);
496 }
497 }
498
499 // Code cache unloading: when compilers notice the code cache is getting full,
500 // they will call a vm op that comes here. This code attempts to speculatively
501 // unload the coldest part (the part is defined by CodeCacheFlushingFraction) of
502 // the nmethods by saving the cold code in a list in the CodeCache. Then
503 // execution resumes. If a method so marked is not called by the second sweeper
504 // stack traversal after the current one, the nmethod will be marked non-entrant and
505 // got rid of by normal sweeping. If the method is called, the Method*'s
506 // _code field is restored and the Method*/nmethod
507 // go back to their normal state.
508 void NMethodSweeper::handle_full_code_cache(bool is_full) {
509
510 if (is_full) {
511 // Since code cache is full, immediately stop new compiles
512 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
513 log_sweep("disable_compiler");
514 }
515 }
516
517 // Make sure only one thread can flush
518 // The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
519 // no need to check the timeout here.
520 jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
521 if (old != 0) {
522 return;
523 }
524
525 VM_HandleFullCodeCache op(is_full);
526 VMThread::execute(&op);
527
528 // resweep again as soon as possible
529 _resweep = true;
530 }
531
532 int NMethodSweeper::sort_nmentod_by_hotness(nmethod** nm1, nmethod** nm2) {
533 return ((*(nm1))->get_hotness_counter() > (*nm2)->get_hotness_counter());
534 }
535
536 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
537 // If there was a race in detecting full code cache, only run
538 // one vm op for it or keep the compiler shut off
539 jlong disconnect_start_counter = os::elapsed_counter();
540
541 jint disconnected = 0;
542 jint made_not_entrant = 0;
543 jint nmethod_count = 0;
544
545 log_sweep("start_cleaning");
546
547 {
548 ResourceMark rm;
549 GrowableArray<nmethod*>* live_methods= new GrowableArray<nmethod*>();
550 nmethod* nm = CodeCache::next_nmethod(CodeCache::first());
551
552 size_t methods_to_flush = CodeCache::nof_nmethods() / CodeCacheFlushingFraction;
553 size_t methods_will_be_flushed = 0;
554 size_t nmethods = 0;
555
556 // See how many methods are 'in flight' of being flushed
557 while ((nm != NULL) && (methods_will_be_flushed < methods_to_flush)) {
558 // OSR methods cannot be flushed like this. Also, don't flush native methods
559 // since they are part of the JDK in most cases
560 if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method() && nm->is_alive()) {
561 if ((nm->is_in_use()) && (nm->method()->code() == nm)) {
562 live_methods->append(nm);
563 } else if (nm->is_speculatively_disconnected()) {
564 // This method was previously considered for preemptive unloading and was not called since then
565 CompilationPolicy::policy()->delay_compilation(nm->method());
566 nm->make_not_entrant();
567 made_not_entrant++;
568 methods_will_be_flushed++;
569 } else {
570 methods_will_be_flushed++;
571 }
572 }
573 nm = CodeCache::next_nmethod(nm);
574 }
575
576 // Speculatively disconnect methods until we reach 'memory_to_flush'
577 if (methods_will_be_flushed < methods_to_flush) {
578 live_methods->sort(sort_nmentod_by_hotness);
579 //Iterate over sorted array and speculatively disconnect these nmethods
580 for (int i = 0; i < live_methods->length(); i++) {
581 nm = live_methods->at(i);
582 if (methods_will_be_flushed < methods_to_flush) {
583 // Method was not previously disconnected
584 if ((nm->method()->code() == nm)) {
585 CodeCache::speculatively_disconnect(nm);
586 disconnected++;
587 methods_will_be_flushed++;
588 }
589 } else {
590 // The requested number of nmethods is scheduled for flushing
591 break;
592 }
593 }
594 }
595 }
596
597 log_sweep("stop_cleaning",
598 "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
599 disconnected, made_not_entrant);
600
601 // Shut off compiler. Sweeper will start over with a new stack scan and
602 // traversal cycle and turn it back on if it clears enough space.
603 if (is_full) {
604 _last_full_flush_time = os::javaTimeMillis();
605 }
606
607 jlong disconnect_end_counter = os::elapsed_counter();
608 jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
609 _total_disconnect_time += disconnect_time;
610 _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
611
612 EventCleanCodeCache event(UNTIMED);
613 if (event.should_commit()) {
614 event.set_starttime(disconnect_start_counter);
615 event.set_endtime(disconnect_end_counter);
|