< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page
rev 9088 : 8139040: Fix initializations before ShouldNotReachHere() etc. and enable -Wuninitialized on linux.
Reviewed-by: stuefe, coleenp


1778       register_gc_start(cause);
1779       // Reset the expansion cause, now that we are about to begin
1780       // a new cycle.
1781       clear_expansion_cause();
1782 
1783       // Clear the MetaspaceGC flag since a concurrent collection
1784       // is starting but also clear it after the collection.
1785       MetaspaceGC::set_should_concurrent_collect(false);
1786     }
1787     // Decide if we want to enable class unloading as part of the
1788     // ensuing concurrent GC cycle.
1789     update_should_unload_classes();
1790     _full_gc_requested = false;           // acks all outstanding full gc requests
1791     _full_gc_cause = GCCause::_no_gc;
1792     // Signal that we are about to start a collection
1793     gch->increment_total_full_collections();  // ... starting a collection cycle
1794     _collection_count_start = gch->total_full_collections();
1795   }
1796 
1797   // Used for PrintGC
1798   size_t prev_used;
1799   if (PrintGC && Verbose) {
1800     prev_used = _cmsGen->used();
1801   }
1802 
1803   // The change of the collection state is normally done at this level;
1804   // the exceptions are phases that are executed while the world is
1805   // stopped.  For those phases the change of state is done while the
1806   // world is stopped.  For baton passing purposes this allows the
1807   // background collector to finish the phase and change state atomically.
1808   // The foreground collector cannot wait on a phase that is done
1809   // while the world is stopped because the foreground collector already
1810   // has the world stopped and would deadlock.
1811   while (_collectorState != Idling) {
1812     if (TraceCMSState) {
1813       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
1814         p2i(Thread::current()), _collectorState);
1815     }
1816     // The foreground collector
1817     //   holds the Heap_lock throughout its collection.
1818     //   holds the CMS token (but not the lock)


7711   return size;
7712 }
7713 
7714 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7715                                                  size_t chunkSize) {
7716   // do_post_free_or_garbage_chunk() should only be called in the case
7717   // of the adaptive free list allocator.
7718   const bool fcInFreeLists = fc->is_free();
7719   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
7720   assert((HeapWord*)fc <= _limit, "sweep invariant");
7721   if (CMSTestInFreeList && fcInFreeLists) {
7722     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7723   }
7724 
7725   if (CMSTraceSweeper) {
7726     gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7727   }
7728 
7729   HeapWord* const fc_addr = (HeapWord*) fc;
7730 
7731   bool coalesce;
7732   const size_t left  = pointer_delta(fc_addr, freeFinger());
7733   const size_t right = chunkSize;
7734   switch (FLSCoalescePolicy) {
7735     // numeric value forms a coalition aggressiveness metric
7736     case 0:  { // never coalesce
7737       coalesce = false;
7738       break;
7739     }
7740     case 1: { // coalesce if left & right chunks on overpopulated lists
7741       coalesce = _sp->coalOverPopulated(left) &&
7742                  _sp->coalOverPopulated(right);
7743       break;
7744     }
7745     case 2: { // coalesce if left chunk on overpopulated list (default)
7746       coalesce = _sp->coalOverPopulated(left);
7747       break;
7748     }
7749     case 3: { // coalesce if left OR right chunk on overpopulated list
7750       coalesce = _sp->coalOverPopulated(left) ||
7751                  _sp->coalOverPopulated(right);




1778       register_gc_start(cause);
1779       // Reset the expansion cause, now that we are about to begin
1780       // a new cycle.
1781       clear_expansion_cause();
1782 
1783       // Clear the MetaspaceGC flag since a concurrent collection
1784       // is starting but also clear it after the collection.
1785       MetaspaceGC::set_should_concurrent_collect(false);
1786     }
1787     // Decide if we want to enable class unloading as part of the
1788     // ensuing concurrent GC cycle.
1789     update_should_unload_classes();
1790     _full_gc_requested = false;           // acks all outstanding full gc requests
1791     _full_gc_cause = GCCause::_no_gc;
1792     // Signal that we are about to start a collection
1793     gch->increment_total_full_collections();  // ... starting a collection cycle
1794     _collection_count_start = gch->total_full_collections();
1795   }
1796 
1797   // Used for PrintGC
1798   size_t prev_used = 0;
1799   if (PrintGC && Verbose) {
1800     prev_used = _cmsGen->used();
1801   }
1802 
1803   // The change of the collection state is normally done at this level;
1804   // the exceptions are phases that are executed while the world is
1805   // stopped.  For those phases the change of state is done while the
1806   // world is stopped.  For baton passing purposes this allows the
1807   // background collector to finish the phase and change state atomically.
1808   // The foreground collector cannot wait on a phase that is done
1809   // while the world is stopped because the foreground collector already
1810   // has the world stopped and would deadlock.
1811   while (_collectorState != Idling) {
1812     if (TraceCMSState) {
1813       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
1814         p2i(Thread::current()), _collectorState);
1815     }
1816     // The foreground collector
1817     //   holds the Heap_lock throughout its collection.
1818     //   holds the CMS token (but not the lock)


7711   return size;
7712 }
7713 
7714 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7715                                                  size_t chunkSize) {
7716   // do_post_free_or_garbage_chunk() should only be called in the case
7717   // of the adaptive free list allocator.
7718   const bool fcInFreeLists = fc->is_free();
7719   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
7720   assert((HeapWord*)fc <= _limit, "sweep invariant");
7721   if (CMSTestInFreeList && fcInFreeLists) {
7722     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7723   }
7724 
7725   if (CMSTraceSweeper) {
7726     gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7727   }
7728 
7729   HeapWord* const fc_addr = (HeapWord*) fc;
7730 
7731   bool coalesce = false;
7732   const size_t left  = pointer_delta(fc_addr, freeFinger());
7733   const size_t right = chunkSize;
7734   switch (FLSCoalescePolicy) {
7735     // numeric value forms a coalition aggressiveness metric
7736     case 0:  { // never coalesce
7737       coalesce = false;
7738       break;
7739     }
7740     case 1: { // coalesce if left & right chunks on overpopulated lists
7741       coalesce = _sp->coalOverPopulated(left) &&
7742                  _sp->coalOverPopulated(right);
7743       break;
7744     }
7745     case 2: { // coalesce if left chunk on overpopulated list (default)
7746       coalesce = _sp->coalOverPopulated(left);
7747       break;
7748     }
7749     case 3: { // coalesce if left OR right chunk on overpopulated list
7750       coalesce = _sp->coalOverPopulated(left) ||
7751                  _sp->coalOverPopulated(right);


< prev index next >