< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp

Print this page
rev 8031 : imported patch cms


  48 #include "memory/gcLocker.inline.hpp"
  49 #include "memory/genCollectedHeap.hpp"
  50 #include "memory/genMarkSweep.hpp"
  51 #include "memory/genOopClosures.inline.hpp"
  52 #include "memory/iterator.inline.hpp"
  53 #include "memory/padded.hpp"
  54 #include "memory/referencePolicy.hpp"
  55 #include "memory/resourceArea.hpp"
  56 #include "memory/tenuredGeneration.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "prims/jvmtiExport.hpp"
  59 #include "runtime/atomic.inline.hpp"
  60 #include "runtime/globals_extension.hpp"
  61 #include "runtime/handles.inline.hpp"
  62 #include "runtime/java.hpp"
  63 #include "runtime/orderAccess.inline.hpp"
  64 #include "runtime/vmThread.hpp"
  65 #include "services/memoryService.hpp"
  66 #include "services/runtimeService.hpp"
  67 
  68 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  69 
  70 // statics
  71 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  72 bool CMSCollector::_full_gc_requested = false;
  73 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  74 
  75 //////////////////////////////////////////////////////////////////
  76 // In support of CMS/VM thread synchronization
  77 //////////////////////////////////////////////////////////////////
  78 // We split use of the CGC_lock into 2 "levels".
  79 // The low-level locking is of the usual CGC_lock monitor. We introduce
  80 // a higher level "token" (hereafter "CMS token") built on top of the
  81 // low level monitor (hereafter "CGC lock").
  82 // The token-passing protocol gives priority to the VM thread. The
  83 // CMS-lock doesn't provide any fairness guarantees, but clients
  84 // should ensure that it is only held for very short, bounded
  85 // durations.
  86 //
  87 // When either of the CMS thread or the VM thread is involved in
  88 // collection operations during which it does not want the other
  89 // thread to interfere, it obtains the CMS token.


1576              "Possible deadlock");
1577       while (_foregroundGCShouldWait) {
1578         // wait for notification
1579         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1580         // Possibility of delay/starvation here, since CMS token does
1581         // not know to give priority to VM thread? Actually, i think
1582         // there wouldn't be any delay/starvation, but the proof of
1583         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1584       }
1585       ConcurrentMarkSweepThread::set_CMS_flag(
1586         ConcurrentMarkSweepThread::CMS_vm_has_token);
1587     }
1588   }
1589   // The CMS_token is already held.  Get back the other locks.
1590   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1591          "VM thread should have CMS token");
1592   getFreelistLocks();
1593   bitMapLock()->lock_without_safepoint_check();
1594   if (TraceCMSState) {
1595     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1596       INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1597     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1598   }
1599 
1600   // Inform cms gen if this was due to partial collection failing.
1601   // The CMS gen may use this fact to determine its expansion policy.
1602   GenCollectedHeap* gch = GenCollectedHeap::heap();
1603   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1604     assert(!_cmsGen->incremental_collection_failed(),
1605            "Should have been noticed, reacted to and cleared");
1606     _cmsGen->set_incremental_collection_failed();
1607   }
1608 
1609   if (first_state > Idling) {
1610     report_concurrent_mode_interruption();
1611   }
1612 
1613   set_did_compact(true);
1614 
1615   // If the collection is being acquired from the background
1616   // collector, there may be references on the discovered


1746   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1747   // Restart the "inter sweep timer" for the next epoch.
1748   _inter_sweep_timer.reset();
1749   _inter_sweep_timer.start();
1750 
1751   gc_timer->register_gc_end();
1752 
1753   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1754 
1755   // For a mark-sweep-compact, compute_new_size() will be called
1756   // in the heap's do_collection() method.
1757 }
1758 
1759 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1760   ContiguousSpace* eden_space = _young_gen->eden();
1761   ContiguousSpace* from_space = _young_gen->from();
1762   ContiguousSpace* to_space   = _young_gen->to();
1763   // Eden
1764   if (_eden_chunk_array != NULL) {
1765     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1766                            eden_space->bottom(), eden_space->top(),
1767                            eden_space->end(), eden_space->capacity());
1768     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
1769                            "_eden_chunk_capacity=" SIZE_FORMAT,
1770                            _eden_chunk_index, _eden_chunk_capacity);
1771     for (size_t i = 0; i < _eden_chunk_index; i++) {
1772       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1773                              i, _eden_chunk_array[i]);
1774     }
1775   }
1776   // Survivor
1777   if (_survivor_chunk_array != NULL) {
1778     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1779                            from_space->bottom(), from_space->top(),
1780                            from_space->end(), from_space->capacity());
1781     gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
1782                            "_survivor_chunk_capacity=" SIZE_FORMAT,
1783                            _survivor_chunk_index, _survivor_chunk_capacity);
1784     for (size_t i = 0; i < _survivor_chunk_index; i++) {
1785       gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1786                              i, _survivor_chunk_array[i]);
1787     }
1788   }
1789 }
1790 
1791 void CMSCollector::getFreelistLocks() const {
1792   // Get locks for all free lists in all generations that this
1793   // collector is responsible for
1794   _cmsGen->freelistLock()->lock_without_safepoint_check();
1795 }
1796 
1797 void CMSCollector::releaseFreelistLocks() const {
1798   // Release locks for all free lists in all generations that this
1799   // collector is responsible for
1800   _cmsGen->freelistLock()->unlock();
1801 }
1802 
1803 bool CMSCollector::haveFreelistLocks() const {
1804   // Check locks for all free lists in all generations that this
1805   // collector is responsible for
1806   assert_lock_strong(_cmsGen->freelistLock());


1873     _collection_count_start = gch->total_full_collections();
1874   }
1875 
1876   // Used for PrintGC
1877   size_t prev_used;
1878   if (PrintGC && Verbose) {
1879     prev_used = _cmsGen->used();
1880   }
1881 
1882   // The change of the collection state is normally done at this level;
1883   // the exceptions are phases that are executed while the world is
1884   // stopped.  For those phases the change of state is done while the
1885   // world is stopped.  For baton passing purposes this allows the
1886   // background collector to finish the phase and change state atomically.
1887   // The foreground collector cannot wait on a phase that is done
1888   // while the world is stopped because the foreground collector already
1889   // has the world stopped and would deadlock.
1890   while (_collectorState != Idling) {
1891     if (TraceCMSState) {
1892       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
1893         Thread::current(), _collectorState);
1894     }
1895     // The foreground collector
1896     //   holds the Heap_lock throughout its collection.
1897     //   holds the CMS token (but not the lock)
1898     //     except while it is waiting for the background collector to yield.
1899     //
1900     // The foreground collector should be blocked (not for long)
1901     //   if the background collector is about to start a phase
1902     //   executed with world stopped.  If the background
1903     //   collector has already started such a phase, the
1904     //   foreground collector is blocked waiting for the
1905     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1906     //   are executed in the VM thread.
1907     //
1908     // The locking order is
1909     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1910     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1911     //   CMS token  (claimed in
1912     //                stop_world_and_do() -->
1913     //                  safepoint_synchronize() -->
1914     //                    CMSThread::synchronize())
1915 
1916     {
1917       // Check if the FG collector wants us to yield.
1918       CMSTokenSync x(true); // is cms thread
1919       if (waitForForegroundGC()) {
1920         // We yielded to a foreground GC, nothing more to be
1921         // done this round.
1922         assert(_foregroundGCShouldWait == false, "We set it to false in "
1923                "waitForForegroundGC()");
1924         if (TraceCMSState) {
1925           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1926             " exiting collection CMS state %d",
1927             Thread::current(), _collectorState);
1928         }
1929         return;
1930       } else {
1931         // The background collector can run but check to see if the
1932         // foreground collector has done a collection while the
1933         // background collector was waiting to get the CGC_lock
1934         // above.  If yes, break so that _foregroundGCShouldWait
1935         // is cleared before returning.
1936         if (_collectorState == Idling) {
1937           break;
1938         }
1939       }
1940     }
1941 
1942     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1943       "should be waiting");
1944 
1945     switch (_collectorState) {
1946       case InitialMarking:
1947         {


2014       case Resetting:
2015         // CMS heap resizing has been completed
2016         reset(true);
2017         assert(_collectorState == Idling, "Collector state should "
2018           "have changed");
2019 
2020         MetaspaceGC::set_should_concurrent_collect(false);
2021 
2022         stats().record_cms_end();
2023         // Don't move the concurrent_phases_end() and compute_new_size()
2024         // calls to here because a preempted background collection
2025         // has it's state set to "Resetting".
2026         break;
2027       case Idling:
2028       default:
2029         ShouldNotReachHere();
2030         break;
2031     }
2032     if (TraceCMSState) {
2033       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2034         Thread::current(), _collectorState);
2035     }
2036     assert(_foregroundGCShouldWait, "block post-condition");
2037   }
2038 
2039   // Should this be in gc_epilogue?
2040   collector_policy()->counters()->update_counters();
2041 
2042   {
2043     // Clear _foregroundGCShouldWait and, in the event that the
2044     // foreground collector is waiting, notify it, before
2045     // returning.
2046     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2047     _foregroundGCShouldWait = false;
2048     if (_foregroundGCIsActive) {
2049       CGC_lock->notify();
2050     }
2051     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2052            "Possible deadlock");
2053   }
2054   if (TraceCMSState) {
2055     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2056       " exiting collection CMS state %d",
2057       Thread::current(), _collectorState);
2058   }
2059   if (PrintGC && Verbose) {
2060     _cmsGen->print_heap_change(prev_used);
2061   }
2062 }
2063 
2064 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2065   _cms_start_registered = true;
2066   _gc_timer_cm->register_gc_start();
2067   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2068 }
2069 
2070 void CMSCollector::register_gc_end() {
2071   if (_cms_start_registered) {
2072     report_heap_summary(GCWhen::AfterGC);
2073 
2074     _gc_timer_cm->register_gc_end();
2075     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2076     _cms_start_registered = false;
2077   }


2095   // Block the foreground collector until the
2096   // background collectors decides whether to
2097   // yield.
2098   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2099   _foregroundGCShouldWait = true;
2100   if (_foregroundGCIsActive) {
2101     // The background collector yields to the
2102     // foreground collector and returns a value
2103     // indicating that it has yielded.  The foreground
2104     // collector can proceed.
2105     res = true;
2106     _foregroundGCShouldWait = false;
2107     ConcurrentMarkSweepThread::clear_CMS_flag(
2108       ConcurrentMarkSweepThread::CMS_cms_has_token);
2109     ConcurrentMarkSweepThread::set_CMS_flag(
2110       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2111     // Get a possibly blocked foreground thread going
2112     CGC_lock->notify();
2113     if (TraceCMSState) {
2114       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2115         Thread::current(), _collectorState);
2116     }
2117     while (_foregroundGCIsActive) {
2118       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2119     }
2120     ConcurrentMarkSweepThread::set_CMS_flag(
2121       ConcurrentMarkSweepThread::CMS_cms_has_token);
2122     ConcurrentMarkSweepThread::clear_CMS_flag(
2123       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2124   }
2125   if (TraceCMSState) {
2126     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2127       Thread::current(), _collectorState);
2128   }
2129   return res;
2130 }
2131 
2132 // Because of the need to lock the free lists and other structures in
2133 // the collector, common to all the generations that the collector is
2134 // collecting, we need the gc_prologues of individual CMS generations
2135 // delegate to their collector. It may have been simpler had the
2136 // current infrastructure allowed one to call a prologue on a
2137 // collector. In the absence of that we have the generation's
2138 // prologue delegate to the collector, which delegates back
2139 // some "local" work to a worker method in the individual generations
2140 // that it's responsible for collecting, while itself doing any
2141 // work common to all generations it's responsible for. A similar
2142 // comment applies to the  gc_epilogue()'s.
2143 // The role of the variable _between_prologue_and_epilogue is to
2144 // enforce the invocation protocol.
2145 void CMSCollector::gc_prologue(bool full) {
2146   // Call gc_prologue_work() for the CMSGen
2147   // we are responsible for.


2339 #endif
2340 
2341 // Check reachability of the given heap address in CMS generation,
2342 // treating all other generations as roots.
2343 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2344   // We could "guarantee" below, rather than assert, but I'll
2345   // leave these as "asserts" so that an adventurous debugger
2346   // could try this in the product build provided some subset of
2347   // the conditions were met, provided they were interested in the
2348   // results and knew that the computation below wouldn't interfere
2349   // with other concurrent computations mutating the structures
2350   // being read or written.
2351   assert(SafepointSynchronize::is_at_safepoint(),
2352          "Else mutations in object graph will make answer suspect");
2353   assert(have_cms_token(), "Should hold cms token");
2354   assert(haveFreelistLocks(), "must hold free list locks");
2355   assert_lock_strong(bitMapLock());
2356 
2357   // Clear the marking bit map array before starting, but, just
2358   // for kicks, first report if the given address is already marked
2359   gclog_or_tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", addr,
2360                 _markBitMap.isMarked(addr) ? "" : " not");
2361 
2362   if (verify_after_remark()) {
2363     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2364     bool result = verification_mark_bm()->isMarked(addr);
2365     gclog_or_tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", addr,
2366                            result ? "IS" : "is NOT");
2367     return result;
2368   } else {
2369     gclog_or_tty->print_cr("Could not compute result");
2370     return false;
2371   }
2372 }
2373 
2374 
2375 void
2376 CMSCollector::print_on_error(outputStream* st) {
2377   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2378   if (collector != NULL) {
2379     CMSBitMap* bitmap = &collector->_markBitMap;
2380     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, bitmap);
2381     bitmap->print_on_error(st, " Bits: ");
2382 
2383     st->cr();
2384 
2385     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2386     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, mut_bitmap);
2387     mut_bitmap->print_on_error(st, " Bits: ");
2388   }
2389 }
2390 
2391 ////////////////////////////////////////////////////////
2392 // CMS Verification Support
2393 ////////////////////////////////////////////////////////
2394 // Following the remark phase, the following invariant
2395 // should hold -- each object in the CMS heap which is
2396 // marked in markBitMap() should be marked in the verification_mark_bm().
2397 
2398 class VerifyMarkedClosure: public BitMapClosure {
2399   CMSBitMap* _marks;
2400   bool       _failed;
2401 
2402  public:
2403   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2404 
2405   bool do_bit(size_t offset) {
2406     HeapWord* addr = _marks->offsetToHeapWord(offset);
2407     if (!_marks->isMarked(addr)) {
2408       oop(addr)->print_on(gclog_or_tty);
2409       gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2410       _failed = true;
2411     }
2412     return true;
2413   }
2414 
2415   bool failed() { return _failed; }
2416 };
2417 
2418 bool CMSCollector::verify_after_remark(bool silent) {
2419   if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2420   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2421   static bool init = false;
2422 
2423   assert(SafepointSynchronize::is_at_safepoint(),
2424          "Else mutations in object graph will make answer suspect");
2425   assert(have_cms_token(),
2426          "Else there may be mutual interference in use of "
2427          " verification data structures");
2428   assert(_collectorState > Marking && _collectorState <= Sweeping,
2429          "Else marking info checked here may be obsolete");


2457   verify_work_stacks_empty();
2458 
2459   GenCollectedHeap* gch = GenCollectedHeap::heap();
2460   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2461   // Update the saved marks which may affect the root scans.
2462   gch->save_marks();
2463 
2464   if (CMSRemarkVerifyVariant == 1) {
2465     // In this first variant of verification, we complete
2466     // all marking, then check if the new marks-vector is
2467     // a subset of the CMS marks-vector.
2468     verify_after_remark_work_1();
2469   } else if (CMSRemarkVerifyVariant == 2) {
2470     // In this second variant of verification, we flag an error
2471     // (i.e. an object reachable in the new marks-vector not reachable
2472     // in the CMS marks-vector) immediately, also indicating the
2473     // identify of an object (A) that references the unmarked object (B) --
2474     // presumably, a mutation to A failed to be picked up by preclean/remark?
2475     verify_after_remark_work_2();
2476   } else {
2477     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2478             CMSRemarkVerifyVariant);
2479   }
2480   if (!silent) gclog_or_tty->print(" done] ");
2481   return true;
2482 }
2483 
2484 void CMSCollector::verify_after_remark_work_1() {
2485   ResourceMark rm;
2486   HandleMark  hm;
2487   GenCollectedHeap* gch = GenCollectedHeap::heap();
2488 
2489   // Get a clear set of claim bits for the roots processing to work with.
2490   ClassLoaderDataGraph::clear_claimed_marks();
2491 
2492   // Mark from roots one level into CMS
2493   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2494   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2495 
2496   gch->gen_process_roots(_cmsGen->level(),
2497                          true,   // younger gens are roots


5039     _cursor[min_tid]++;
5040   }
5041   // We are all done; record the size of the _survivor_chunk_array
5042   _survivor_chunk_index = i; // exclusive: [0, i)
5043   if (PrintCMSStatistics > 0) {
5044     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5045   }
5046   // Verify that we used up all the recorded entries
5047   #ifdef ASSERT
5048     size_t total = 0;
5049     for (int j = 0; j < no_of_gc_threads; j++) {
5050       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5051       total += _cursor[j];
5052     }
5053     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5054     // Check that the merged array is in sorted order
5055     if (total > 0) {
5056       for (size_t i = 0; i < total - 1; i++) {
5057         if (PrintCMSStatistics > 0) {
5058           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5059                               i, _survivor_chunk_array[i]);
5060         }
5061         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5062                "Not sorted");
5063       }
5064     }
5065   #endif // ASSERT
5066 }
5067 
5068 // Set up the space's par_seq_tasks structure for work claiming
5069 // for parallel initial scan and rescan of young gen.
5070 // See ParRescanTask where this is currently used.
5071 void
5072 CMSCollector::
5073 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5074   assert(n_threads > 0, "Unexpected n_threads argument");
5075 
5076   // Eden space
5077   if (!_young_gen->eden()->is_empty()) {
5078     SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
5079     assert(!pst->valid(), "Clobbering existing data?");


5683 
5684 // FIX ME!!! Looks like this belongs in CFLSpace, with
5685 // CMSGen merely delegating to it.
5686 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5687   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5688   HeapWord*  minAddr        = _cmsSpace->bottom();
5689   HeapWord*  largestAddr    =
5690     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5691   if (largestAddr == NULL) {
5692     // The dictionary appears to be empty.  In this case
5693     // try to coalesce at the end of the heap.
5694     largestAddr = _cmsSpace->end();
5695   }
5696   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5697   size_t nearLargestOffset =
5698     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5699   if (PrintFLSStatistics != 0) {
5700     gclog_or_tty->print_cr(
5701       "CMS: Large Block: " PTR_FORMAT ";"
5702       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5703       largestAddr,
5704       _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
5705   }
5706   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5707 }
5708 
5709 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5710   return addr >= _cmsSpace->nearLargestChunk();
5711 }
5712 
5713 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5714   return _cmsSpace->find_chunk_at_end();
5715 }
5716 
5717 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
5718                                                     bool full) {
5719   // The next lower level has been collected.  Gather any statistics
5720   // that are of interest at this point.
5721   if (!full && (current_level + 1) == level()) {
5722     // Gather statistics on the young generation collection.
5723     collector()->stats().record_gc0_end(used());
5724   }


6167 
6168 // A variant of the above, used for CMS marking verification.
6169 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6170   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6171     _span(span),
6172     _verification_bm(verification_bm),
6173     _cms_bm(cms_bm)
6174 {
6175     assert(_ref_processor == NULL, "deliberately left NULL");
6176     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6177 }
6178 
6179 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6180   // if p points into _span, then mark corresponding bit in _markBitMap
6181   assert(obj->is_oop(), "expected an oop");
6182   HeapWord* addr = (HeapWord*)obj;
6183   if (_span.contains(addr)) {
6184     _verification_bm->mark(addr);
6185     if (!_cms_bm->isMarked(addr)) {
6186       oop(addr)->print();
6187       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6188       fatal("... aborting");
6189     }
6190   }
6191 }
6192 
6193 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6194 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6195 
6196 //////////////////////////////////////////////////
6197 // MarkRefsIntoAndScanClosure
6198 //////////////////////////////////////////////////
6199 
6200 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6201                                                        ReferenceProcessor* rp,
6202                                                        CMSBitMap* bit_map,
6203                                                        CMSBitMap* mod_union_table,
6204                                                        CMSMarkStack*  mark_stack,
6205                                                        CMSCollector* collector,
6206                                                        bool should_yield,
6207                                                        bool concurrent_precleaning):


6962 // Upon stack overflow, we discard (part of) the stack,
6963 // remembering the least address amongst those discarded
6964 // in CMSCollector's _restart_address.
6965 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6966   // Remember the least grey address discarded
6967   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6968   _collector->lower_restart_addr(ra);
6969   _mark_stack->reset();  // discard stack contents
6970   _mark_stack->expand(); // expand the stack if possible
6971 }
6972 
6973 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6974   assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
6975   HeapWord* addr = (HeapWord*)obj;
6976   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6977     // Oop lies in _span and isn't yet grey or black
6978     _verification_bm->mark(addr);            // now grey
6979     if (!_cms_bm->isMarked(addr)) {
6980       oop(addr)->print();
6981       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
6982                              addr);
6983       fatal("... aborting");
6984     }
6985 
6986     if (!_mark_stack->push(obj)) { // stack overflow
6987       if (PrintCMSStatistics != 0) {
6988         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
6989                                SIZE_FORMAT, _mark_stack->capacity());
6990       }
6991       assert(_mark_stack->isFull(), "Else push should have succeeded");
6992       handle_stack_overflow(addr);
6993     }
6994     // anything including and to the right of _finger
6995     // will be scanned as we iterate over the remainder of the
6996     // bit map
6997   }
6998 }
6999 
7000 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7001                      MemRegion span,
7002                      CMSBitMap* bitMap, CMSMarkStack*  markStack,


7358   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7359   _lastFreeRangeCoalesced(false),
7360   _freeFinger(g->used_region().start())
7361 {
7362   NOT_PRODUCT(
7363     _numObjectsFreed = 0;
7364     _numWordsFreed   = 0;
7365     _numObjectsLive = 0;
7366     _numWordsLive = 0;
7367     _numObjectsAlreadyFree = 0;
7368     _numWordsAlreadyFree = 0;
7369     _last_fc = NULL;
7370 
7371     _sp->initializeIndexedFreeListArrayReturnedBytes();
7372     _sp->dictionary()->initialize_dict_returned_bytes();
7373   )
7374   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7375          "sweep _limit out of bounds");
7376   if (CMSTraceSweeper) {
7377     gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
7378                         _limit);
7379   }
7380 }
7381 
7382 void SweepClosure::print_on(outputStream* st) const {
7383   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7384                 _sp->bottom(), _sp->end());
7385   tty->print_cr("_limit = " PTR_FORMAT, _limit);
7386   tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
7387   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
7388   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7389                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7390 }
7391 
7392 #ifndef PRODUCT
7393 // Assertion checking only:  no useful work in product mode --
7394 // however, if any of the flags below become product flags,
7395 // you may need to review this code to see if it needs to be
7396 // enabled in product mode.
7397 SweepClosure::~SweepClosure() {
7398   assert_lock_strong(_freelistLock);
7399   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7400          "sweep _limit out of bounds");
7401   if (inFreeRange()) {
7402     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
7403     print();
7404     ShouldNotReachHere();
7405   }
7406   if (Verbose && PrintGC) {
7407     gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",


7411       "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7412       _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7413       _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7414     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
7415                         * sizeof(HeapWord);
7416     gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7417 
7418     if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7419       size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7420       size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7421       size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7422       gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
7423       gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
7424         indexListReturnedBytes);
7425       gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
7426         dict_returned_bytes);
7427     }
7428   }
7429   if (CMSTraceSweeper) {
7430     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
7431                            _limit);
7432   }
7433 }
7434 #endif  // PRODUCT
7435 
7436 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7437     bool freeRangeInFreeLists) {
7438   if (CMSTraceSweeper) {
7439     gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
7440                freeFinger, freeRangeInFreeLists);
7441   }
7442   assert(!inFreeRange(), "Trampling existing free range");
7443   set_inFreeRange(true);
7444   set_lastFreeRangeCoalesced(false);
7445 
7446   set_freeFinger(freeFinger);
7447   set_freeRangeInFreeLists(freeRangeInFreeLists);
7448   if (CMSTestInFreeList) {
7449     if (freeRangeInFreeLists) {
7450       FreeChunk* fc = (FreeChunk*) freeFinger;
7451       assert(fc->is_free(), "A chunk on the free list should be free.");
7452       assert(fc->size() > 0, "Free range should have a size");
7453       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7454     }
7455   }
7456 }
7457 
7458 // Note that the sweeper runs concurrently with mutators. Thus,
7459 // it is possible for direct allocation in this generation to happen
7460 // in the middle of the sweep. Note that the sweeper also coalesces


7484 
7485 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7486   FreeChunk* fc = (FreeChunk*)addr;
7487   size_t res;
7488 
7489   // Check if we are done sweeping. Below we check "addr >= _limit" rather
7490   // than "addr == _limit" because although _limit was a block boundary when
7491   // we started the sweep, it may no longer be one because heap expansion
7492   // may have caused us to coalesce the block ending at the address _limit
7493   // with a newly expanded chunk (this happens when _limit was set to the
7494   // previous _end of the space), so we may have stepped past _limit:
7495   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7496   if (addr >= _limit) { // we have swept up to or past the limit: finish up
7497     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7498            "sweep _limit out of bounds");
7499     assert(addr < _sp->end(), "addr out of bounds");
7500     // Flush any free range we might be holding as a single
7501     // coalesced chunk to the appropriate free list.
7502     if (inFreeRange()) {
7503       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7504              err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
7505       flush_cur_free_chunk(freeFinger(),
7506                            pointer_delta(addr, freeFinger()));
7507       if (CMSTraceSweeper) {
7508         gclog_or_tty->print("Sweep: last chunk: ");
7509         gclog_or_tty->print("put_free_blk " PTR_FORMAT " ("SIZE_FORMAT") "
7510                    "[coalesced:%d]\n",
7511                    freeFinger(), pointer_delta(addr, freeFinger()),
7512                    lastFreeRangeCoalesced() ? 1 : 0);
7513       }
7514     }
7515 
7516     // help the iterator loop finish
7517     return pointer_delta(_sp->end(), addr);
7518   }
7519 
7520   assert(addr < _limit, "sweep invariant");
7521   // check if we should yield
7522   do_yield_check(addr);
7523   if (fc->is_free()) {
7524     // Chunk that is already free
7525     res = fc->size();
7526     do_already_free_chunk(fc);
7527     debug_only(_sp->verifyFreeLists());
7528     // If we flush the chunk at hand in lookahead_and_flush()
7529     // and it's coalesced with a preceding chunk, then the
7530     // process of "mangling" the payload of the coalesced block
7531     // will cause erasure of the size information from the


7635         assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
7636         if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
7637             nextChunk->is_free()               &&     // ... which is free...
7638             nextChunk->cantCoalesce()) {             // ... but can't be coalesced
7639           // nothing to do
7640         } else {
7641           // Potentially the start of a new free range:
7642           // Don't eagerly remove it from the free lists.
7643           // No need to remove it if it will just be put
7644           // back again.  (Also from a pragmatic point of view
7645           // if it is a free block in a region that is beyond
7646           // any allocated blocks, an assertion will fail)
7647           // Remember the start of a free run.
7648           initialize_free_range(addr, true);
7649           // end - can coalesce with next chunk
7650         }
7651       } else {
7652         // the midst of a free range, we are coalescing
7653         print_free_block_coalesced(fc);
7654         if (CMSTraceSweeper) {
7655           gclog_or_tty->print("  -- pick up free block " PTR_FORMAT " (" SIZE_FORMAT ")\n", fc, size);
7656         }
7657         // remove it from the free lists
7658         _sp->removeFreeChunkFromFreeLists(fc);
7659         set_lastFreeRangeCoalesced(true);
7660         // If the chunk is being coalesced and the current free range is
7661         // in the free lists, remove the current free range so that it
7662         // will be returned to the free lists in its entirety - all
7663         // the coalesced pieces included.
7664         if (freeRangeInFreeLists()) {
7665           FreeChunk* ffc = (FreeChunk*) freeFinger();
7666           assert(ffc->size() == pointer_delta(addr, freeFinger()),
7667             "Size of free range is inconsistent with chunk size.");
7668           if (CMSTestInFreeList) {
7669             assert(_sp->verify_chunk_in_free_list(ffc),
7670               "free range is not in free lists");
7671           }
7672           _sp->removeFreeChunkFromFreeLists(ffc);
7673           set_freeRangeInFreeLists(false);
7674         }
7675       }


7697   // Add it to a free list or let it possibly be coalesced into
7698   // a larger chunk.
7699   HeapWord* const addr = (HeapWord*) fc;
7700   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7701 
7702   if (_sp->adaptive_freelists()) {
7703     // Verify that the bit map has no bits marked between
7704     // addr and purported end of just dead object.
7705     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7706 
7707     do_post_free_or_garbage_chunk(fc, size);
7708   } else {
7709     if (!inFreeRange()) {
7710       // start of a new free range
7711       assert(size > 0, "A free range should have a size");
7712       initialize_free_range(addr, false);
7713     } else {
7714       // this will be swept up when we hit the end of the
7715       // free range
7716       if (CMSTraceSweeper) {
7717         gclog_or_tty->print("  -- pick up garbage " PTR_FORMAT " (" SIZE_FORMAT ")\n", fc, size);
7718       }
7719       // If the chunk is being coalesced and the current free range is
7720       // in the free lists, remove the current free range so that it
7721       // will be returned to the free lists in its entirety - all
7722       // the coalesced pieces included.
7723       if (freeRangeInFreeLists()) {
7724         FreeChunk* ffc = (FreeChunk*)freeFinger();
7725         assert(ffc->size() == pointer_delta(addr, freeFinger()),
7726           "Size of free range is inconsistent with chunk size.");
7727         if (CMSTestInFreeList) {
7728           assert(_sp->verify_chunk_in_free_list(ffc),
7729             "free range is not in free lists");
7730         }
7731         _sp->removeFreeChunkFromFreeLists(ffc);
7732         set_freeRangeInFreeLists(false);
7733       }
7734       set_lastFreeRangeCoalesced(true);
7735     }
7736     // this will be swept up when we hit the end of the free range
7737 


7790     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7791     assert(size >= 3, "Necessary for Printezis marks to work");
7792     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7793     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7794   }
7795   return size;
7796 }
7797 
7798 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7799                                                  size_t chunkSize) {
7800   // do_post_free_or_garbage_chunk() should only be called in the case
7801   // of the adaptive free list allocator.
7802   const bool fcInFreeLists = fc->is_free();
7803   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
7804   assert((HeapWord*)fc <= _limit, "sweep invariant");
7805   if (CMSTestInFreeList && fcInFreeLists) {
7806     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7807   }
7808 
7809   if (CMSTraceSweeper) {
7810     gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", fc, chunkSize);
7811   }
7812 
7813   HeapWord* const fc_addr = (HeapWord*) fc;
7814 
7815   bool coalesce;
7816   const size_t left  = pointer_delta(fc_addr, freeFinger());
7817   const size_t right = chunkSize;
7818   switch (FLSCoalescePolicy) {
7819     // numeric value forms a coalition aggressiveness metric
7820     case 0:  { // never coalesce
7821       coalesce = false;
7822       break;
7823     }
7824     case 1: { // coalesce if left & right chunks on overpopulated lists
7825       coalesce = _sp->coalOverPopulated(left) &&
7826                  _sp->coalOverPopulated(right);
7827       break;
7828     }
7829     case 2: { // coalesce if left chunk on overpopulated list (default)
7830       coalesce = _sp->coalOverPopulated(left);


7889 
7890 // Lookahead flush:
7891 // If we are tracking a free range, and this is the last chunk that
7892 // we'll look at because its end crosses past _limit, we'll preemptively
7893 // flush it along with any free range we may be holding on to. Note that
7894 // this can be the case only for an already free or freshly garbage
7895 // chunk. If this block is an object, it can never straddle
7896 // over _limit. The "straddling" occurs when _limit is set at
7897 // the previous end of the space when this cycle started, and
7898 // a subsequent heap expansion caused the previously co-terminal
7899 // free block to be coalesced with the newly expanded portion,
7900 // thus rendering _limit a non-block-boundary making it dangerous
7901 // for the sweeper to step over and examine.
7902 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7903   assert(inFreeRange(), "Should only be called if currently in a free range.");
7904   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7905   assert(_sp->used_region().contains(eob - 1),
7906          err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7907                  " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7908                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7909                  eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
7910   if (eob >= _limit) {
7911     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7912     if (CMSTraceSweeper) {
7913       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
7914                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7915                              "[" PTR_FORMAT "," PTR_FORMAT ")",
7916                              _limit, fc, eob, _sp->bottom(), _sp->end());
7917     }
7918     // Return the storage we are tracking back into the free lists.
7919     if (CMSTraceSweeper) {
7920       gclog_or_tty->print_cr("Flushing ... ");
7921     }
7922     assert(freeFinger() < eob, "Error");
7923     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7924   }
7925 }
7926 
7927 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7928   assert(inFreeRange(), "Should only be called if currently in a free range.");
7929   assert(size > 0,
7930     "A zero sized chunk cannot be added to the free lists.");
7931   if (!freeRangeInFreeLists()) {
7932     if (CMSTestInFreeList) {
7933       FreeChunk* fc = (FreeChunk*) chunk;
7934       fc->set_size(size);
7935       assert(!_sp->verify_chunk_in_free_list(fc),
7936         "chunk should not be in free lists yet");
7937     }
7938     if (CMSTraceSweeper) {
7939       gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
7940                     chunk, size);
7941     }
7942     // A new free range is going to be starting.  The current
7943     // free range has not been added to the free lists yet or
7944     // was removed so add it back.
7945     // If the current free range was coalesced, then the death
7946     // of the free range was recorded.  Record a birth now.
7947     if (lastFreeRangeCoalesced()) {
7948       _sp->coalBirth(size);
7949     }
7950     _sp->addChunkAndRepairOffsetTable(chunk, size,
7951             lastFreeRangeCoalesced());
7952   } else if (CMSTraceSweeper) {
7953     gclog_or_tty->print_cr("Already in free list: nothing to flush");
7954   }
7955   set_inFreeRange(false);
7956   set_freeRangeInFreeLists(false);
7957 }
7958 
7959 // We take a break if we've been at this for a while,
7960 // so as to avoid monopolizing the locks involved.


7993   }
7994 
7995   ConcurrentMarkSweepThread::synchronize(true);
7996   _freelistLock->lock();
7997   _bitMap->lock()->lock_without_safepoint_check();
7998   _collector->startTimer();
7999 }
8000 
8001 #ifndef PRODUCT
8002 // This is actually very useful in a product build if it can
8003 // be called from the debugger.  Compile it into the product
8004 // as needed.
8005 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8006   return debug_cms_space->verify_chunk_in_free_list(fc);
8007 }
8008 #endif
8009 
8010 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8011   if (CMSTraceSweeper) {
8012     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
8013                            fc, fc->size());
8014   }
8015 }
8016 
8017 // CMSIsAliveClosure
8018 bool CMSIsAliveClosure::do_object_b(oop obj) {
8019   HeapWord* addr = (HeapWord*)obj;
8020   return addr != NULL &&
8021          (!_span.contains(addr) || _bit_map->isMarked(addr));
8022 }
8023 
8024 
8025 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8026                       MemRegion span,
8027                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8028                       bool cpc):
8029   _collector(collector),
8030   _span(span),
8031   _bit_map(bit_map),
8032   _mark_stack(mark_stack),
8033   _concurrent_precleaning(cpc) {




  48 #include "memory/gcLocker.inline.hpp"
  49 #include "memory/genCollectedHeap.hpp"
  50 #include "memory/genMarkSweep.hpp"
  51 #include "memory/genOopClosures.inline.hpp"
  52 #include "memory/iterator.inline.hpp"
  53 #include "memory/padded.hpp"
  54 #include "memory/referencePolicy.hpp"
  55 #include "memory/resourceArea.hpp"
  56 #include "memory/tenuredGeneration.hpp"
  57 #include "oops/oop.inline.hpp"
  58 #include "prims/jvmtiExport.hpp"
  59 #include "runtime/atomic.inline.hpp"
  60 #include "runtime/globals_extension.hpp"
  61 #include "runtime/handles.inline.hpp"
  62 #include "runtime/java.hpp"
  63 #include "runtime/orderAccess.inline.hpp"
  64 #include "runtime/vmThread.hpp"
  65 #include "services/memoryService.hpp"
  66 #include "services/runtimeService.hpp"
  67 


  68 // statics
  69 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  70 bool CMSCollector::_full_gc_requested = false;
  71 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  72 
  73 //////////////////////////////////////////////////////////////////
  74 // In support of CMS/VM thread synchronization
  75 //////////////////////////////////////////////////////////////////
  76 // We split use of the CGC_lock into 2 "levels".
  77 // The low-level locking is of the usual CGC_lock monitor. We introduce
  78 // a higher level "token" (hereafter "CMS token") built on top of the
  79 // low level monitor (hereafter "CGC lock").
  80 // The token-passing protocol gives priority to the VM thread. The
  81 // CMS-lock doesn't provide any fairness guarantees, but clients
  82 // should ensure that it is only held for very short, bounded
  83 // durations.
  84 //
  85 // When either of the CMS thread or the VM thread is involved in
  86 // collection operations during which it does not want the other
  87 // thread to interfere, it obtains the CMS token.


1574              "Possible deadlock");
1575       while (_foregroundGCShouldWait) {
1576         // wait for notification
1577         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1578         // Possibility of delay/starvation here, since CMS token does
1579         // not know to give priority to VM thread? Actually, i think
1580         // there wouldn't be any delay/starvation, but the proof of
1581         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1582       }
1583       ConcurrentMarkSweepThread::set_CMS_flag(
1584         ConcurrentMarkSweepThread::CMS_vm_has_token);
1585     }
1586   }
1587   // The CMS_token is already held.  Get back the other locks.
1588   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1589          "VM thread should have CMS token");
1590   getFreelistLocks();
1591   bitMapLock()->lock_without_safepoint_check();
1592   if (TraceCMSState) {
1593     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1594       INTPTR_FORMAT " with first state %d", p2i(Thread::current()), first_state);
1595     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1596   }
1597 
1598   // Inform cms gen if this was due to partial collection failing.
1599   // The CMS gen may use this fact to determine its expansion policy.
1600   GenCollectedHeap* gch = GenCollectedHeap::heap();
1601   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1602     assert(!_cmsGen->incremental_collection_failed(),
1603            "Should have been noticed, reacted to and cleared");
1604     _cmsGen->set_incremental_collection_failed();
1605   }
1606 
1607   if (first_state > Idling) {
1608     report_concurrent_mode_interruption();
1609   }
1610 
1611   set_did_compact(true);
1612 
1613   // If the collection is being acquired from the background
1614   // collector, there may be references on the discovered


1744   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1745   // Restart the "inter sweep timer" for the next epoch.
1746   _inter_sweep_timer.reset();
1747   _inter_sweep_timer.start();
1748 
1749   gc_timer->register_gc_end();
1750 
1751   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1752 
1753   // For a mark-sweep-compact, compute_new_size() will be called
1754   // in the heap's do_collection() method.
1755 }
1756 
1757 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1758   ContiguousSpace* eden_space = _young_gen->eden();
1759   ContiguousSpace* from_space = _young_gen->from();
1760   ContiguousSpace* to_space   = _young_gen->to();
1761   // Eden
1762   if (_eden_chunk_array != NULL) {
1763     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1764                            p2i(eden_space->bottom()), p2i(eden_space->top()),
1765                            p2i(eden_space->end()), eden_space->capacity());
1766     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
1767                            "_eden_chunk_capacity=" SIZE_FORMAT,
1768                            _eden_chunk_index, _eden_chunk_capacity);
1769     for (size_t i = 0; i < _eden_chunk_index; i++) {
1770       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1771                              i, p2i(_eden_chunk_array[i]));
1772     }
1773   }
1774   // Survivor
1775   if (_survivor_chunk_array != NULL) {
1776     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1777                            p2i(from_space->bottom()), p2i(from_space->top()),
1778                            p2i(from_space->end()), from_space->capacity());
1779     gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
1780                            "_survivor_chunk_capacity=" SIZE_FORMAT,
1781                            _survivor_chunk_index, _survivor_chunk_capacity);
1782     for (size_t i = 0; i < _survivor_chunk_index; i++) {
1783       gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1784                              i, p2i(_survivor_chunk_array[i]));
1785     }
1786   }
1787 }
1788 
1789 void CMSCollector::getFreelistLocks() const {
1790   // Get locks for all free lists in all generations that this
1791   // collector is responsible for
1792   _cmsGen->freelistLock()->lock_without_safepoint_check();
1793 }
1794 
1795 void CMSCollector::releaseFreelistLocks() const {
1796   // Release locks for all free lists in all generations that this
1797   // collector is responsible for
1798   _cmsGen->freelistLock()->unlock();
1799 }
1800 
1801 bool CMSCollector::haveFreelistLocks() const {
1802   // Check locks for all free lists in all generations that this
1803   // collector is responsible for
1804   assert_lock_strong(_cmsGen->freelistLock());


1871     _collection_count_start = gch->total_full_collections();
1872   }
1873 
1874   // Used for PrintGC
1875   size_t prev_used;
1876   if (PrintGC && Verbose) {
1877     prev_used = _cmsGen->used();
1878   }
1879 
1880   // The change of the collection state is normally done at this level;
1881   // the exceptions are phases that are executed while the world is
1882   // stopped.  For those phases the change of state is done while the
1883   // world is stopped.  For baton passing purposes this allows the
1884   // background collector to finish the phase and change state atomically.
1885   // The foreground collector cannot wait on a phase that is done
1886   // while the world is stopped because the foreground collector already
1887   // has the world stopped and would deadlock.
1888   while (_collectorState != Idling) {
1889     if (TraceCMSState) {
1890       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
1891         p2i(Thread::current()), _collectorState);
1892     }
1893     // The foreground collector
1894     //   holds the Heap_lock throughout its collection.
1895     //   holds the CMS token (but not the lock)
1896     //     except while it is waiting for the background collector to yield.
1897     //
1898     // The foreground collector should be blocked (not for long)
1899     //   if the background collector is about to start a phase
1900     //   executed with world stopped.  If the background
1901     //   collector has already started such a phase, the
1902     //   foreground collector is blocked waiting for the
1903     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1904     //   are executed in the VM thread.
1905     //
1906     // The locking order is
1907     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1908     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1909     //   CMS token  (claimed in
1910     //                stop_world_and_do() -->
1911     //                  safepoint_synchronize() -->
1912     //                    CMSThread::synchronize())
1913 
1914     {
1915       // Check if the FG collector wants us to yield.
1916       CMSTokenSync x(true); // is cms thread
1917       if (waitForForegroundGC()) {
1918         // We yielded to a foreground GC, nothing more to be
1919         // done this round.
1920         assert(_foregroundGCShouldWait == false, "We set it to false in "
1921                "waitForForegroundGC()");
1922         if (TraceCMSState) {
1923           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1924             " exiting collection CMS state %d",
1925                         p2i(Thread::current()), _collectorState);
1926         }
1927         return;
1928       } else {
1929         // The background collector can run but check to see if the
1930         // foreground collector has done a collection while the
1931         // background collector was waiting to get the CGC_lock
1932         // above.  If yes, break so that _foregroundGCShouldWait
1933         // is cleared before returning.
1934         if (_collectorState == Idling) {
1935           break;
1936         }
1937       }
1938     }
1939 
1940     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1941       "should be waiting");
1942 
1943     switch (_collectorState) {
1944       case InitialMarking:
1945         {


2012       case Resetting:
2013         // CMS heap resizing has been completed
2014         reset(true);
2015         assert(_collectorState == Idling, "Collector state should "
2016           "have changed");
2017 
2018         MetaspaceGC::set_should_concurrent_collect(false);
2019 
2020         stats().record_cms_end();
2021         // Don't move the concurrent_phases_end() and compute_new_size()
2022         // calls to here because a preempted background collection
2023         // has it's state set to "Resetting".
2024         break;
2025       case Idling:
2026       default:
2027         ShouldNotReachHere();
2028         break;
2029     }
2030     if (TraceCMSState) {
2031       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2032         p2i(Thread::current()), _collectorState);
2033     }
2034     assert(_foregroundGCShouldWait, "block post-condition");
2035   }
2036 
2037   // Should this be in gc_epilogue?
2038   collector_policy()->counters()->update_counters();
2039 
2040   {
2041     // Clear _foregroundGCShouldWait and, in the event that the
2042     // foreground collector is waiting, notify it, before
2043     // returning.
2044     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2045     _foregroundGCShouldWait = false;
2046     if (_foregroundGCIsActive) {
2047       CGC_lock->notify();
2048     }
2049     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2050            "Possible deadlock");
2051   }
2052   if (TraceCMSState) {
2053     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2054       " exiting collection CMS state %d",
2055           p2i(Thread::current()), _collectorState);
2056   }
2057   if (PrintGC && Verbose) {
2058     _cmsGen->print_heap_change(prev_used);
2059   }
2060 }
2061 
2062 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2063   _cms_start_registered = true;
2064   _gc_timer_cm->register_gc_start();
2065   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2066 }
2067 
2068 void CMSCollector::register_gc_end() {
2069   if (_cms_start_registered) {
2070     report_heap_summary(GCWhen::AfterGC);
2071 
2072     _gc_timer_cm->register_gc_end();
2073     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2074     _cms_start_registered = false;
2075   }


2093   // Block the foreground collector until the
2094   // background collectors decides whether to
2095   // yield.
2096   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2097   _foregroundGCShouldWait = true;
2098   if (_foregroundGCIsActive) {
2099     // The background collector yields to the
2100     // foreground collector and returns a value
2101     // indicating that it has yielded.  The foreground
2102     // collector can proceed.
2103     res = true;
2104     _foregroundGCShouldWait = false;
2105     ConcurrentMarkSweepThread::clear_CMS_flag(
2106       ConcurrentMarkSweepThread::CMS_cms_has_token);
2107     ConcurrentMarkSweepThread::set_CMS_flag(
2108       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2109     // Get a possibly blocked foreground thread going
2110     CGC_lock->notify();
2111     if (TraceCMSState) {
2112       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2113         p2i(Thread::current()), _collectorState);
2114     }
2115     while (_foregroundGCIsActive) {
2116       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2117     }
2118     ConcurrentMarkSweepThread::set_CMS_flag(
2119       ConcurrentMarkSweepThread::CMS_cms_has_token);
2120     ConcurrentMarkSweepThread::clear_CMS_flag(
2121       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2122   }
2123   if (TraceCMSState) {
2124     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2125       p2i(Thread::current()), _collectorState);
2126   }
2127   return res;
2128 }
2129 
2130 // Because of the need to lock the free lists and other structures in
2131 // the collector, common to all the generations that the collector is
2132 // collecting, we need the gc_prologues of individual CMS generations
2133 // delegate to their collector. It may have been simpler had the
2134 // current infrastructure allowed one to call a prologue on a
2135 // collector. In the absence of that we have the generation's
2136 // prologue delegate to the collector, which delegates back
2137 // some "local" work to a worker method in the individual generations
2138 // that it's responsible for collecting, while itself doing any
2139 // work common to all generations it's responsible for. A similar
2140 // comment applies to the  gc_epilogue()'s.
2141 // The role of the variable _between_prologue_and_epilogue is to
2142 // enforce the invocation protocol.
2143 void CMSCollector::gc_prologue(bool full) {
2144   // Call gc_prologue_work() for the CMSGen
2145   // we are responsible for.


2337 #endif
2338 
2339 // Check reachability of the given heap address in CMS generation,
2340 // treating all other generations as roots.
2341 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2342   // We could "guarantee" below, rather than assert, but I'll
2343   // leave these as "asserts" so that an adventurous debugger
2344   // could try this in the product build provided some subset of
2345   // the conditions were met, provided they were interested in the
2346   // results and knew that the computation below wouldn't interfere
2347   // with other concurrent computations mutating the structures
2348   // being read or written.
2349   assert(SafepointSynchronize::is_at_safepoint(),
2350          "Else mutations in object graph will make answer suspect");
2351   assert(have_cms_token(), "Should hold cms token");
2352   assert(haveFreelistLocks(), "must hold free list locks");
2353   assert_lock_strong(bitMapLock());
2354 
2355   // Clear the marking bit map array before starting, but, just
2356   // for kicks, first report if the given address is already marked
2357   gclog_or_tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2358                 _markBitMap.isMarked(addr) ? "" : " not");
2359 
2360   if (verify_after_remark()) {
2361     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2362     bool result = verification_mark_bm()->isMarked(addr);
2363     gclog_or_tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2364                            result ? "IS" : "is NOT");
2365     return result;
2366   } else {
2367     gclog_or_tty->print_cr("Could not compute result");
2368     return false;
2369   }
2370 }
2371 
2372 
2373 void
2374 CMSCollector::print_on_error(outputStream* st) {
2375   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2376   if (collector != NULL) {
2377     CMSBitMap* bitmap = &collector->_markBitMap;
2378     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2379     bitmap->print_on_error(st, " Bits: ");
2380 
2381     st->cr();
2382 
2383     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2384     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2385     mut_bitmap->print_on_error(st, " Bits: ");
2386   }
2387 }
2388 
2389 ////////////////////////////////////////////////////////
2390 // CMS Verification Support
2391 ////////////////////////////////////////////////////////
2392 // Following the remark phase, the following invariant
2393 // should hold -- each object in the CMS heap which is
2394 // marked in markBitMap() should be marked in the verification_mark_bm().
2395 
2396 class VerifyMarkedClosure: public BitMapClosure {
2397   CMSBitMap* _marks;
2398   bool       _failed;
2399 
2400  public:
2401   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2402 
2403   bool do_bit(size_t offset) {
2404     HeapWord* addr = _marks->offsetToHeapWord(offset);
2405     if (!_marks->isMarked(addr)) {
2406       oop(addr)->print_on(gclog_or_tty);
2407       gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", p2i(addr));
2408       _failed = true;
2409     }
2410     return true;
2411   }
2412 
2413   bool failed() { return _failed; }
2414 };
2415 
2416 bool CMSCollector::verify_after_remark(bool silent) {
2417   if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2418   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2419   static bool init = false;
2420 
2421   assert(SafepointSynchronize::is_at_safepoint(),
2422          "Else mutations in object graph will make answer suspect");
2423   assert(have_cms_token(),
2424          "Else there may be mutual interference in use of "
2425          " verification data structures");
2426   assert(_collectorState > Marking && _collectorState <= Sweeping,
2427          "Else marking info checked here may be obsolete");


2455   verify_work_stacks_empty();
2456 
2457   GenCollectedHeap* gch = GenCollectedHeap::heap();
2458   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2459   // Update the saved marks which may affect the root scans.
2460   gch->save_marks();
2461 
2462   if (CMSRemarkVerifyVariant == 1) {
2463     // In this first variant of verification, we complete
2464     // all marking, then check if the new marks-vector is
2465     // a subset of the CMS marks-vector.
2466     verify_after_remark_work_1();
2467   } else if (CMSRemarkVerifyVariant == 2) {
2468     // In this second variant of verification, we flag an error
2469     // (i.e. an object reachable in the new marks-vector not reachable
2470     // in the CMS marks-vector) immediately, also indicating the
2471     // identify of an object (A) that references the unmarked object (B) --
2472     // presumably, a mutation to A failed to be picked up by preclean/remark?
2473     verify_after_remark_work_2();
2474   } else {
2475     warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
2476             CMSRemarkVerifyVariant);
2477   }
2478   if (!silent) gclog_or_tty->print(" done] ");
2479   return true;
2480 }
2481 
2482 void CMSCollector::verify_after_remark_work_1() {
2483   ResourceMark rm;
2484   HandleMark  hm;
2485   GenCollectedHeap* gch = GenCollectedHeap::heap();
2486 
2487   // Get a clear set of claim bits for the roots processing to work with.
2488   ClassLoaderDataGraph::clear_claimed_marks();
2489 
2490   // Mark from roots one level into CMS
2491   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2492   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2493 
2494   gch->gen_process_roots(_cmsGen->level(),
2495                          true,   // younger gens are roots


5037     _cursor[min_tid]++;
5038   }
5039   // We are all done; record the size of the _survivor_chunk_array
5040   _survivor_chunk_index = i; // exclusive: [0, i)
5041   if (PrintCMSStatistics > 0) {
5042     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5043   }
5044   // Verify that we used up all the recorded entries
5045   #ifdef ASSERT
5046     size_t total = 0;
5047     for (int j = 0; j < no_of_gc_threads; j++) {
5048       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5049       total += _cursor[j];
5050     }
5051     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5052     // Check that the merged array is in sorted order
5053     if (total > 0) {
5054       for (size_t i = 0; i < total - 1; i++) {
5055         if (PrintCMSStatistics > 0) {
5056           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5057                               i, p2i(_survivor_chunk_array[i]));
5058         }
5059         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5060                "Not sorted");
5061       }
5062     }
5063   #endif // ASSERT
5064 }
5065 
5066 // Set up the space's par_seq_tasks structure for work claiming
5067 // for parallel initial scan and rescan of young gen.
5068 // See ParRescanTask where this is currently used.
5069 void
5070 CMSCollector::
5071 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5072   assert(n_threads > 0, "Unexpected n_threads argument");
5073 
5074   // Eden space
5075   if (!_young_gen->eden()->is_empty()) {
5076     SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
5077     assert(!pst->valid(), "Clobbering existing data?");


5681 
5682 // FIX ME!!! Looks like this belongs in CFLSpace, with
5683 // CMSGen merely delegating to it.
5684 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5685   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5686   HeapWord*  minAddr        = _cmsSpace->bottom();
5687   HeapWord*  largestAddr    =
5688     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5689   if (largestAddr == NULL) {
5690     // The dictionary appears to be empty.  In this case
5691     // try to coalesce at the end of the heap.
5692     largestAddr = _cmsSpace->end();
5693   }
5694   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5695   size_t nearLargestOffset =
5696     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5697   if (PrintFLSStatistics != 0) {
5698     gclog_or_tty->print_cr(
5699       "CMS: Large Block: " PTR_FORMAT ";"
5700       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5701       p2i(largestAddr),
5702       p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5703   }
5704   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5705 }
5706 
5707 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5708   return addr >= _cmsSpace->nearLargestChunk();
5709 }
5710 
5711 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5712   return _cmsSpace->find_chunk_at_end();
5713 }
5714 
5715 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
5716                                                     bool full) {
5717   // The next lower level has been collected.  Gather any statistics
5718   // that are of interest at this point.
5719   if (!full && (current_level + 1) == level()) {
5720     // Gather statistics on the young generation collection.
5721     collector()->stats().record_gc0_end(used());
5722   }


6165 
6166 // A variant of the above, used for CMS marking verification.
6167 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6168   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6169     _span(span),
6170     _verification_bm(verification_bm),
6171     _cms_bm(cms_bm)
6172 {
6173     assert(_ref_processor == NULL, "deliberately left NULL");
6174     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6175 }
6176 
6177 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6178   // if p points into _span, then mark corresponding bit in _markBitMap
6179   assert(obj->is_oop(), "expected an oop");
6180   HeapWord* addr = (HeapWord*)obj;
6181   if (_span.contains(addr)) {
6182     _verification_bm->mark(addr);
6183     if (!_cms_bm->isMarked(addr)) {
6184       oop(addr)->print();
6185       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6186       fatal("... aborting");
6187     }
6188   }
6189 }
6190 
6191 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6192 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6193 
6194 //////////////////////////////////////////////////
6195 // MarkRefsIntoAndScanClosure
6196 //////////////////////////////////////////////////
6197 
6198 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6199                                                        ReferenceProcessor* rp,
6200                                                        CMSBitMap* bit_map,
6201                                                        CMSBitMap* mod_union_table,
6202                                                        CMSMarkStack*  mark_stack,
6203                                                        CMSCollector* collector,
6204                                                        bool should_yield,
6205                                                        bool concurrent_precleaning):


6960 // Upon stack overflow, we discard (part of) the stack,
6961 // remembering the least address amongst those discarded
6962 // in CMSCollector's _restart_address.
6963 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6964   // Remember the least grey address discarded
6965   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6966   _collector->lower_restart_addr(ra);
6967   _mark_stack->reset();  // discard stack contents
6968   _mark_stack->expand(); // expand the stack if possible
6969 }
6970 
6971 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6972   assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
6973   HeapWord* addr = (HeapWord*)obj;
6974   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6975     // Oop lies in _span and isn't yet grey or black
6976     _verification_bm->mark(addr);            // now grey
6977     if (!_cms_bm->isMarked(addr)) {
6978       oop(addr)->print();
6979       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
6980                              p2i(addr));
6981       fatal("... aborting");
6982     }
6983 
6984     if (!_mark_stack->push(obj)) { // stack overflow
6985       if (PrintCMSStatistics != 0) {
6986         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
6987                                SIZE_FORMAT, _mark_stack->capacity());
6988       }
6989       assert(_mark_stack->isFull(), "Else push should have succeeded");
6990       handle_stack_overflow(addr);
6991     }
6992     // anything including and to the right of _finger
6993     // will be scanned as we iterate over the remainder of the
6994     // bit map
6995   }
6996 }
6997 
6998 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6999                      MemRegion span,
7000                      CMSBitMap* bitMap, CMSMarkStack*  markStack,


7356   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7357   _lastFreeRangeCoalesced(false),
7358   _freeFinger(g->used_region().start())
7359 {
7360   NOT_PRODUCT(
7361     _numObjectsFreed = 0;
7362     _numWordsFreed   = 0;
7363     _numObjectsLive = 0;
7364     _numWordsLive = 0;
7365     _numObjectsAlreadyFree = 0;
7366     _numWordsAlreadyFree = 0;
7367     _last_fc = NULL;
7368 
7369     _sp->initializeIndexedFreeListArrayReturnedBytes();
7370     _sp->dictionary()->initialize_dict_returned_bytes();
7371   )
7372   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7373          "sweep _limit out of bounds");
7374   if (CMSTraceSweeper) {
7375     gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
7376                         p2i(_limit));
7377   }
7378 }
7379 
7380 void SweepClosure::print_on(outputStream* st) const {
7381   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7382                 p2i(_sp->bottom()), p2i(_sp->end()));
7383   tty->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7384   tty->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7385   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7386   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7387                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7388 }
7389 
7390 #ifndef PRODUCT
7391 // Assertion checking only:  no useful work in product mode --
7392 // however, if any of the flags below become product flags,
7393 // you may need to review this code to see if it needs to be
7394 // enabled in product mode.
7395 SweepClosure::~SweepClosure() {
7396   assert_lock_strong(_freelistLock);
7397   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7398          "sweep _limit out of bounds");
7399   if (inFreeRange()) {
7400     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
7401     print();
7402     ShouldNotReachHere();
7403   }
7404   if (Verbose && PrintGC) {
7405     gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",


7409       "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7410       _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7411       _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7412     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
7413                         * sizeof(HeapWord);
7414     gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7415 
7416     if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7417       size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7418       size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7419       size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7420       gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
7421       gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
7422         indexListReturnedBytes);
7423       gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
7424         dict_returned_bytes);
7425     }
7426   }
7427   if (CMSTraceSweeper) {
7428     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
7429                            p2i(_limit));
7430   }
7431 }
7432 #endif  // PRODUCT
7433 
7434 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7435     bool freeRangeInFreeLists) {
7436   if (CMSTraceSweeper) {
7437     gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
7438                p2i(freeFinger), freeRangeInFreeLists);
7439   }
7440   assert(!inFreeRange(), "Trampling existing free range");
7441   set_inFreeRange(true);
7442   set_lastFreeRangeCoalesced(false);
7443 
7444   set_freeFinger(freeFinger);
7445   set_freeRangeInFreeLists(freeRangeInFreeLists);
7446   if (CMSTestInFreeList) {
7447     if (freeRangeInFreeLists) {
7448       FreeChunk* fc = (FreeChunk*) freeFinger;
7449       assert(fc->is_free(), "A chunk on the free list should be free.");
7450       assert(fc->size() > 0, "Free range should have a size");
7451       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7452     }
7453   }
7454 }
7455 
7456 // Note that the sweeper runs concurrently with mutators. Thus,
7457 // it is possible for direct allocation in this generation to happen
7458 // in the middle of the sweep. Note that the sweeper also coalesces


7482 
7483 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7484   FreeChunk* fc = (FreeChunk*)addr;
7485   size_t res;
7486 
7487   // Check if we are done sweeping. Below we check "addr >= _limit" rather
7488   // than "addr == _limit" because although _limit was a block boundary when
7489   // we started the sweep, it may no longer be one because heap expansion
7490   // may have caused us to coalesce the block ending at the address _limit
7491   // with a newly expanded chunk (this happens when _limit was set to the
7492   // previous _end of the space), so we may have stepped past _limit:
7493   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7494   if (addr >= _limit) { // we have swept up to or past the limit: finish up
7495     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7496            "sweep _limit out of bounds");
7497     assert(addr < _sp->end(), "addr out of bounds");
7498     // Flush any free range we might be holding as a single
7499     // coalesced chunk to the appropriate free list.
7500     if (inFreeRange()) {
7501       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7502              err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", p2i(freeFinger())));
7503       flush_cur_free_chunk(freeFinger(),
7504                            pointer_delta(addr, freeFinger()));
7505       if (CMSTraceSweeper) {
7506         gclog_or_tty->print("Sweep: last chunk: ");
7507         gclog_or_tty->print("put_free_blk " PTR_FORMAT " ("SIZE_FORMAT") "
7508                    "[coalesced:%d]\n",
7509                    p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7510                    lastFreeRangeCoalesced() ? 1 : 0);
7511       }
7512     }
7513 
7514     // help the iterator loop finish
7515     return pointer_delta(_sp->end(), addr);
7516   }
7517 
7518   assert(addr < _limit, "sweep invariant");
7519   // check if we should yield
7520   do_yield_check(addr);
7521   if (fc->is_free()) {
7522     // Chunk that is already free
7523     res = fc->size();
7524     do_already_free_chunk(fc);
7525     debug_only(_sp->verifyFreeLists());
7526     // If we flush the chunk at hand in lookahead_and_flush()
7527     // and it's coalesced with a preceding chunk, then the
7528     // process of "mangling" the payload of the coalesced block
7529     // will cause erasure of the size information from the


7633         assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
7634         if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
7635             nextChunk->is_free()               &&     // ... which is free...
7636             nextChunk->cantCoalesce()) {             // ... but can't be coalesced
7637           // nothing to do
7638         } else {
7639           // Potentially the start of a new free range:
7640           // Don't eagerly remove it from the free lists.
7641           // No need to remove it if it will just be put
7642           // back again.  (Also from a pragmatic point of view
7643           // if it is a free block in a region that is beyond
7644           // any allocated blocks, an assertion will fail)
7645           // Remember the start of a free run.
7646           initialize_free_range(addr, true);
7647           // end - can coalesce with next chunk
7648         }
7649       } else {
7650         // the midst of a free range, we are coalescing
7651         print_free_block_coalesced(fc);
7652         if (CMSTraceSweeper) {
7653           gclog_or_tty->print("  -- pick up free block " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
7654         }
7655         // remove it from the free lists
7656         _sp->removeFreeChunkFromFreeLists(fc);
7657         set_lastFreeRangeCoalesced(true);
7658         // If the chunk is being coalesced and the current free range is
7659         // in the free lists, remove the current free range so that it
7660         // will be returned to the free lists in its entirety - all
7661         // the coalesced pieces included.
7662         if (freeRangeInFreeLists()) {
7663           FreeChunk* ffc = (FreeChunk*) freeFinger();
7664           assert(ffc->size() == pointer_delta(addr, freeFinger()),
7665             "Size of free range is inconsistent with chunk size.");
7666           if (CMSTestInFreeList) {
7667             assert(_sp->verify_chunk_in_free_list(ffc),
7668               "free range is not in free lists");
7669           }
7670           _sp->removeFreeChunkFromFreeLists(ffc);
7671           set_freeRangeInFreeLists(false);
7672         }
7673       }


7695   // Add it to a free list or let it possibly be coalesced into
7696   // a larger chunk.
7697   HeapWord* const addr = (HeapWord*) fc;
7698   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7699 
7700   if (_sp->adaptive_freelists()) {
7701     // Verify that the bit map has no bits marked between
7702     // addr and purported end of just dead object.
7703     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7704 
7705     do_post_free_or_garbage_chunk(fc, size);
7706   } else {
7707     if (!inFreeRange()) {
7708       // start of a new free range
7709       assert(size > 0, "A free range should have a size");
7710       initialize_free_range(addr, false);
7711     } else {
7712       // this will be swept up when we hit the end of the
7713       // free range
7714       if (CMSTraceSweeper) {
7715         gclog_or_tty->print("  -- pick up garbage " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
7716       }
7717       // If the chunk is being coalesced and the current free range is
7718       // in the free lists, remove the current free range so that it
7719       // will be returned to the free lists in its entirety - all
7720       // the coalesced pieces included.
7721       if (freeRangeInFreeLists()) {
7722         FreeChunk* ffc = (FreeChunk*)freeFinger();
7723         assert(ffc->size() == pointer_delta(addr, freeFinger()),
7724           "Size of free range is inconsistent with chunk size.");
7725         if (CMSTestInFreeList) {
7726           assert(_sp->verify_chunk_in_free_list(ffc),
7727             "free range is not in free lists");
7728         }
7729         _sp->removeFreeChunkFromFreeLists(ffc);
7730         set_freeRangeInFreeLists(false);
7731       }
7732       set_lastFreeRangeCoalesced(true);
7733     }
7734     // this will be swept up when we hit the end of the free range
7735 


7788     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7789     assert(size >= 3, "Necessary for Printezis marks to work");
7790     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7791     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7792   }
7793   return size;
7794 }
7795 
7796 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7797                                                  size_t chunkSize) {
7798   // do_post_free_or_garbage_chunk() should only be called in the case
7799   // of the adaptive free list allocator.
7800   const bool fcInFreeLists = fc->is_free();
7801   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
7802   assert((HeapWord*)fc <= _limit, "sweep invariant");
7803   if (CMSTestInFreeList && fcInFreeLists) {
7804     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7805   }
7806 
7807   if (CMSTraceSweeper) {
7808     gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7809   }
7810 
7811   HeapWord* const fc_addr = (HeapWord*) fc;
7812 
7813   bool coalesce;
7814   const size_t left  = pointer_delta(fc_addr, freeFinger());
7815   const size_t right = chunkSize;
7816   switch (FLSCoalescePolicy) {
7817     // numeric value forms a coalition aggressiveness metric
7818     case 0:  { // never coalesce
7819       coalesce = false;
7820       break;
7821     }
7822     case 1: { // coalesce if left & right chunks on overpopulated lists
7823       coalesce = _sp->coalOverPopulated(left) &&
7824                  _sp->coalOverPopulated(right);
7825       break;
7826     }
7827     case 2: { // coalesce if left chunk on overpopulated list (default)
7828       coalesce = _sp->coalOverPopulated(left);


7887 
7888 // Lookahead flush:
7889 // If we are tracking a free range, and this is the last chunk that
7890 // we'll look at because its end crosses past _limit, we'll preemptively
7891 // flush it along with any free range we may be holding on to. Note that
7892 // this can be the case only for an already free or freshly garbage
7893 // chunk. If this block is an object, it can never straddle
7894 // over _limit. The "straddling" occurs when _limit is set at
7895 // the previous end of the space when this cycle started, and
7896 // a subsequent heap expansion caused the previously co-terminal
7897 // free block to be coalesced with the newly expanded portion,
7898 // thus rendering _limit a non-block-boundary making it dangerous
7899 // for the sweeper to step over and examine.
7900 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7901   assert(inFreeRange(), "Should only be called if currently in a free range.");
7902   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7903   assert(_sp->used_region().contains(eob - 1),
7904          err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7905                  " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7906                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7907                                  p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size));
7908   if (eob >= _limit) {
7909     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7910     if (CMSTraceSweeper) {
7911       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
7912                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7913                              "[" PTR_FORMAT "," PTR_FORMAT ")",
7914                              p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7915     }
7916     // Return the storage we are tracking back into the free lists.
7917     if (CMSTraceSweeper) {
7918       gclog_or_tty->print_cr("Flushing ... ");
7919     }
7920     assert(freeFinger() < eob, "Error");
7921     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7922   }
7923 }
7924 
7925 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7926   assert(inFreeRange(), "Should only be called if currently in a free range.");
7927   assert(size > 0,
7928     "A zero sized chunk cannot be added to the free lists.");
7929   if (!freeRangeInFreeLists()) {
7930     if (CMSTestInFreeList) {
7931       FreeChunk* fc = (FreeChunk*) chunk;
7932       fc->set_size(size);
7933       assert(!_sp->verify_chunk_in_free_list(fc),
7934         "chunk should not be in free lists yet");
7935     }
7936     if (CMSTraceSweeper) {
7937       gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
7938                     p2i(chunk), size);
7939     }
7940     // A new free range is going to be starting.  The current
7941     // free range has not been added to the free lists yet or
7942     // was removed so add it back.
7943     // If the current free range was coalesced, then the death
7944     // of the free range was recorded.  Record a birth now.
7945     if (lastFreeRangeCoalesced()) {
7946       _sp->coalBirth(size);
7947     }
7948     _sp->addChunkAndRepairOffsetTable(chunk, size,
7949             lastFreeRangeCoalesced());
7950   } else if (CMSTraceSweeper) {
7951     gclog_or_tty->print_cr("Already in free list: nothing to flush");
7952   }
7953   set_inFreeRange(false);
7954   set_freeRangeInFreeLists(false);
7955 }
7956 
7957 // We take a break if we've been at this for a while,
7958 // so as to avoid monopolizing the locks involved.


7991   }
7992 
7993   ConcurrentMarkSweepThread::synchronize(true);
7994   _freelistLock->lock();
7995   _bitMap->lock()->lock_without_safepoint_check();
7996   _collector->startTimer();
7997 }
7998 
7999 #ifndef PRODUCT
8000 // This is actually very useful in a product build if it can
8001 // be called from the debugger.  Compile it into the product
8002 // as needed.
8003 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8004   return debug_cms_space->verify_chunk_in_free_list(fc);
8005 }
8006 #endif
8007 
8008 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8009   if (CMSTraceSweeper) {
8010     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
8011                            p2i(fc), fc->size());
8012   }
8013 }
8014 
8015 // CMSIsAliveClosure
8016 bool CMSIsAliveClosure::do_object_b(oop obj) {
8017   HeapWord* addr = (HeapWord*)obj;
8018   return addr != NULL &&
8019          (!_span.contains(addr) || _bit_map->isMarked(addr));
8020 }
8021 
8022 
8023 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8024                       MemRegion span,
8025                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8026                       bool cpc):
8027   _collector(collector),
8028   _span(span),
8029   _bit_map(bit_map),
8030   _mark_stack(mark_stack),
8031   _concurrent_precleaning(cpc) {


< prev index next >