1396 CollectorState first_state = _collectorState;
1397
1398 // Signal to a possibly ongoing concurrent collection that
1399 // we want to do a foreground collection.
1400 _foregroundGCIsActive = true;
1401
1402 // release locks and wait for a notify from the background collector
1403 // releasing the locks in only necessary for phases which
1404 // do yields to improve the granularity of the collection.
1405 assert_lock_strong(bitMapLock());
1406 // We need to lock the Free list lock for the space that we are
1407 // currently collecting.
1408 assert(haveFreelistLocks(), "Must be holding free list locks");
1409 bitMapLock()->unlock();
1410 releaseFreelistLocks();
1411 {
1412 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1413 if (_foregroundGCShouldWait) {
1414 // We are going to be waiting for action for the CMS thread;
1415 // it had better not be gone (for instance at shutdown)!
1416 assert(ConcurrentMarkSweepThread::cmst() != NULL,
1417 "CMS thread must be running");
1418 // Wait here until the background collector gives us the go-ahead
1419 ConcurrentMarkSweepThread::clear_CMS_flag(
1420 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1421 // Get a possibly blocked CMS thread going:
1422 // Note that we set _foregroundGCIsActive true above,
1423 // without protection of the CGC_lock.
1424 CGC_lock->notify();
1425 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1426 "Possible deadlock");
1427 while (_foregroundGCShouldWait) {
1428 // wait for notification
1429 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1430 // Possibility of delay/starvation here, since CMS token does
1431 // not know to give priority to VM thread? Actually, i think
1432 // there wouldn't be any delay/starvation, but the proof of
1433 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1434 }
1435 ConcurrentMarkSweepThread::set_CMS_flag(
1436 ConcurrentMarkSweepThread::CMS_vm_has_token);
3633 // past the next scavenge in an effort to
3634 // schedule the pause as described above. By choosing
3635 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3636 // we will never do an actual abortable preclean cycle.
3637 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3638 GCTraceCPUTime tcpu;
3639 CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean");
3640 // We need more smarts in the abortable preclean
3641 // loop below to deal with cases where allocation
3642 // in young gen is very very slow, and our precleaning
3643 // is running a losing race against a horde of
3644 // mutators intent on flooding us with CMS updates
3645 // (dirty cards).
3646 // One, admittedly dumb, strategy is to give up
3647 // after a certain number of abortable precleaning loops
3648 // or after a certain maximum time. We want to make
3649 // this smarter in the next iteration.
3650 // XXX FIX ME!!! YSR
3651 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3652 while (!(should_abort_preclean() ||
3653 ConcurrentMarkSweepThread::should_terminate())) {
3654 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3655 cumworkdone += workdone;
3656 loops++;
3657 // Voluntarily terminate abortable preclean phase if we have
3658 // been at it for too long.
3659 if ((CMSMaxAbortablePrecleanLoops != 0) &&
3660 loops >= CMSMaxAbortablePrecleanLoops) {
3661 log_debug(gc)(" CMS: abort preclean due to loops ");
3662 break;
3663 }
3664 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3665 log_debug(gc)(" CMS: abort preclean due to time ");
3666 break;
3667 }
3668 // If we are doing little work each iteration, we should
3669 // take a short break.
3670 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3671 // Sleep for some time, waiting for work to accumulate
3672 stopTimer();
3673 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
|
1396 CollectorState first_state = _collectorState;
1397
1398 // Signal to a possibly ongoing concurrent collection that
1399 // we want to do a foreground collection.
1400 _foregroundGCIsActive = true;
1401
1402 // release locks and wait for a notify from the background collector
1403 // releasing the locks in only necessary for phases which
1404 // do yields to improve the granularity of the collection.
1405 assert_lock_strong(bitMapLock());
1406 // We need to lock the Free list lock for the space that we are
1407 // currently collecting.
1408 assert(haveFreelistLocks(), "Must be holding free list locks");
1409 bitMapLock()->unlock();
1410 releaseFreelistLocks();
1411 {
1412 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1413 if (_foregroundGCShouldWait) {
1414 // We are going to be waiting for action for the CMS thread;
1415 // it had better not be gone (for instance at shutdown)!
1416 assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(),
1417 "CMS thread must be running");
1418 // Wait here until the background collector gives us the go-ahead
1419 ConcurrentMarkSweepThread::clear_CMS_flag(
1420 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1421 // Get a possibly blocked CMS thread going:
1422 // Note that we set _foregroundGCIsActive true above,
1423 // without protection of the CGC_lock.
1424 CGC_lock->notify();
1425 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1426 "Possible deadlock");
1427 while (_foregroundGCShouldWait) {
1428 // wait for notification
1429 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1430 // Possibility of delay/starvation here, since CMS token does
1431 // not know to give priority to VM thread? Actually, i think
1432 // there wouldn't be any delay/starvation, but the proof of
1433 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1434 }
1435 ConcurrentMarkSweepThread::set_CMS_flag(
1436 ConcurrentMarkSweepThread::CMS_vm_has_token);
3633 // past the next scavenge in an effort to
3634 // schedule the pause as described above. By choosing
3635 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3636 // we will never do an actual abortable preclean cycle.
3637 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3638 GCTraceCPUTime tcpu;
3639 CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean");
3640 // We need more smarts in the abortable preclean
3641 // loop below to deal with cases where allocation
3642 // in young gen is very very slow, and our precleaning
3643 // is running a losing race against a horde of
3644 // mutators intent on flooding us with CMS updates
3645 // (dirty cards).
3646 // One, admittedly dumb, strategy is to give up
3647 // after a certain number of abortable precleaning loops
3648 // or after a certain maximum time. We want to make
3649 // this smarter in the next iteration.
3650 // XXX FIX ME!!! YSR
3651 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3652 while (!(should_abort_preclean() ||
3653 ConcurrentMarkSweepThread::cmst()->should_terminate())) {
3654 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3655 cumworkdone += workdone;
3656 loops++;
3657 // Voluntarily terminate abortable preclean phase if we have
3658 // been at it for too long.
3659 if ((CMSMaxAbortablePrecleanLoops != 0) &&
3660 loops >= CMSMaxAbortablePrecleanLoops) {
3661 log_debug(gc)(" CMS: abort preclean due to loops ");
3662 break;
3663 }
3664 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3665 log_debug(gc)(" CMS: abort preclean due to time ");
3666 break;
3667 }
3668 // If we are doing little work each iteration, we should
3669 // take a short break.
3670 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3671 // Sleep for some time, waiting for work to accumulate
3672 stopTimer();
3673 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
|