372
373 _flushed_count = 0;
374 _zombified_count = 0;
375 _marked_for_reclamation_count = 0;
376
377 if (PrintMethodFlushing && Verbose) {
378 tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nof_nmethods());
379 }
380
381 int swept_count = 0;
382 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
383 assert(!CodeCache_lock->owned_by_self(), "just checking");
384
385 int freed_memory = 0;
386 {
387 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
388
389 // The last invocation iterates until there are no more nmethods
390 while (!_current.end()) {
391 swept_count++;
392 handle_safepoint_request();
393 // Since we will give up the CodeCache_lock, always skip ahead
394 // to the next nmethod. Other blobs can be deleted by other
395 // threads but nmethods are only reclaimed by the sweeper.
396 nmethod* nm = _current.method();
397 _current.next();
398
399 // Now ready to process nmethod and give up CodeCache_lock
400 {
401 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
402 freed_memory += process_nmethod(nm);
403 }
404 _seen++;
405 }
406 }
407
408 assert(_current.end(), "must have scanned the whole cache");
409
410 const Ticks sweep_end_counter = Ticks::now();
411 const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
412 _total_time_sweeping += sweep_time;
413 _total_time_this_sweep += sweep_time;
414 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
415 _total_flushed_size += freed_memory;
416 _total_nof_methods_reclaimed += _flushed_count;
417
418 EventSweepCodeCache event(UNTIMED);
419 if (event.should_commit()) {
420 event.set_starttime(sweep_start_counter);
421 event.set_endtime(sweep_end_counter);
422 event.set_sweepIndex(_traversals);
423 event.set_sweptCount(swept_count);
424 event.set_flushedCount(_flushed_count);
495 };
496
497 void NMethodSweeper::release_nmethod(nmethod* nm) {
498 // Clean up any CompiledICHolders
499 {
500 ResourceMark rm;
501 MutexLocker ml_patch(CompiledIC_lock);
502 RelocIterator iter(nm);
503 while (iter.next()) {
504 if (iter.type() == relocInfo::virtual_call_type) {
505 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
506 }
507 }
508 }
509
510 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
511 nm->flush();
512 }
513
514 int NMethodSweeper::process_nmethod(nmethod* nm) {
515 assert(!CodeCache_lock->owned_by_self(), "just checking");
516
517 int freed_memory = 0;
518 // Make sure this nmethod doesn't get unloaded during the scan,
519 // since safepoints may happen during acquired below locks.
520 NMethodMarker nmm(nm);
521 SWEEP(nm);
522
523 // Skip methods that are currently referenced by the VM
524 if (nm->is_locked_by_vm()) {
525 // But still remember to clean-up inline caches for alive nmethods
526 if (nm->is_alive()) {
527 // Clean inline caches that point to zombie/non-entrant methods
528 MutexLocker cl(CompiledIC_lock);
529 nm->cleanup_inline_caches();
530 SWEEP(nm);
531 }
532 return freed_memory;
533 }
534
|
372
373 _flushed_count = 0;
374 _zombified_count = 0;
375 _marked_for_reclamation_count = 0;
376
377 if (PrintMethodFlushing && Verbose) {
378 tty->print_cr("### Sweep at %d out of %d", _seen, CodeCache::nof_nmethods());
379 }
380
381 int swept_count = 0;
382 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
383 assert(!CodeCache_lock->owned_by_self(), "just checking");
384
385 int freed_memory = 0;
386 {
387 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
388
389 // The last invocation iterates until there are no more nmethods
390 while (!_current.end()) {
391 swept_count++;
392 // Since we will give up the CodeCache_lock, always skip ahead
393 // to the next nmethod. Other blobs can be deleted by other
394 // threads but nmethods are only reclaimed by the sweeper.
395 nmethod* nm = _current.method();
396 _current.next();
397
398 // Now ready to process nmethod and give up CodeCache_lock
399 {
400 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
401 freed_memory += process_nmethod(nm);
402 }
403 _seen++;
404 handle_safepoint_request();
405 }
406 }
407
408 assert(_current.end(), "must have scanned the whole cache");
409
410 const Ticks sweep_end_counter = Ticks::now();
411 const Tickspan sweep_time = sweep_end_counter - sweep_start_counter;
412 _total_time_sweeping += sweep_time;
413 _total_time_this_sweep += sweep_time;
414 _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
415 _total_flushed_size += freed_memory;
416 _total_nof_methods_reclaimed += _flushed_count;
417
418 EventSweepCodeCache event(UNTIMED);
419 if (event.should_commit()) {
420 event.set_starttime(sweep_start_counter);
421 event.set_endtime(sweep_end_counter);
422 event.set_sweepIndex(_traversals);
423 event.set_sweptCount(swept_count);
424 event.set_flushedCount(_flushed_count);
495 };
496
497 void NMethodSweeper::release_nmethod(nmethod* nm) {
498 // Clean up any CompiledICHolders
499 {
500 ResourceMark rm;
501 MutexLocker ml_patch(CompiledIC_lock);
502 RelocIterator iter(nm);
503 while (iter.next()) {
504 if (iter.type() == relocInfo::virtual_call_type) {
505 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
506 }
507 }
508 }
509
510 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
511 nm->flush();
512 }
513
514 int NMethodSweeper::process_nmethod(nmethod* nm) {
515 assert(nm != NULL, "sanity");
516 assert(!CodeCache_lock->owned_by_self(), "just checking");
517
518 int freed_memory = 0;
519 // Make sure this nmethod doesn't get unloaded during the scan,
520 // since safepoints may happen during acquired below locks.
521 NMethodMarker nmm(nm);
522 SWEEP(nm);
523
524 // Skip methods that are currently referenced by the VM
525 if (nm->is_locked_by_vm()) {
526 // But still remember to clean-up inline caches for alive nmethods
527 if (nm->is_alive()) {
528 // Clean inline caches that point to zombie/non-entrant methods
529 MutexLocker cl(CompiledIC_lock);
530 nm->cleanup_inline_caches();
531 SWEEP(nm);
532 }
533 return freed_memory;
534 }
535
|