src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page




1672     // A consistency test for GC_locker
1673     assert(GC_locker::needs_gc(), "Should have been set already");
1674     // Skip this foreground collection, instead
1675     // expanding the heap if necessary.
1676     // Need the free list locks for the call to free() in compute_new_size()
1677     compute_new_size();
1678     return;
1679   }
1680   acquire_control_and_collect(full, clear_all_soft_refs);
1681   _full_gcs_since_conc_gc++;
1682 
1683 }
1684 
1685 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1686   GenCollectedHeap* gch = GenCollectedHeap::heap();
1687   unsigned int gc_count = gch->total_full_collections();
1688   if (gc_count == full_gc_count) {
1689     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1690     _full_gc_requested = true;
1691     CGC_lock->notify();   // nudge CMS thread


1692   }
1693 }
1694 
1695 
1696 // The foreground and background collectors need to coordinate in order
1697 // to make sure that they do not mutually interfere with CMS collections.
1698 // When a background collection is active,
1699 // the foreground collector may need to take over (preempt) and
1700 // synchronously complete an ongoing collection. Depending on the
1701 // frequency of the background collections and the heap usage
1702 // of the application, this preemption can be seldom or frequent.
1703 // There are only certain
1704 // points in the background collection that the "collection-baton"
1705 // can be passed to the foreground collector.
1706 //
1707 // The foreground collector will wait for the baton before
1708 // starting any part of the collection.  The foreground collector
1709 // will only wait at one location.
1710 //
1711 // The background collector will yield the baton before starting a new




1672     // A consistency test for GC_locker
1673     assert(GC_locker::needs_gc(), "Should have been set already");
1674     // Skip this foreground collection, instead
1675     // expanding the heap if necessary.
1676     // Need the free list locks for the call to free() in compute_new_size()
1677     compute_new_size();
1678     return;
1679   }
1680   acquire_control_and_collect(full, clear_all_soft_refs);
1681   _full_gcs_since_conc_gc++;
1682 
1683 }
1684 
1685 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1686   GenCollectedHeap* gch = GenCollectedHeap::heap();
1687   unsigned int gc_count = gch->total_full_collections();
1688   if (gc_count == full_gc_count) {
1689     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1690     _full_gc_requested = true;
1691     CGC_lock->notify();   // nudge CMS thread
1692   } else {
1693     assert(gc_count > full_gc_count, "Error: causal loop");
1694   }
1695 }
1696 
1697 
1698 // The foreground and background collectors need to coordinate in order
1699 // to make sure that they do not mutually interfere with CMS collections.
1700 // When a background collection is active,
1701 // the foreground collector may need to take over (preempt) and
1702 // synchronously complete an ongoing collection. Depending on the
1703 // frequency of the background collections and the heap usage
1704 // of the application, this preemption can be seldom or frequent.
1705 // There are only certain
1706 // points in the background collection that the "collection-baton"
1707 // can be passed to the foreground collector.
1708 //
1709 // The foreground collector will wait for the baton before
1710 // starting any part of the collection.  The foreground collector
1711 // will only wait at one location.
1712 //
1713 // The background collector will yield the baton before starting a new