< prev index next >

src/share/vm/gc/g1/concurrentMark.cpp

Print this page




  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMark.inline.hpp"
  30 #include "gc/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc/g1/g1CollectorPolicy.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1ErgoVerbose.hpp"
  35 #include "gc/g1/g1Log.hpp"
  36 #include "gc/g1/g1OopClosures.inline.hpp"
  37 #include "gc/g1/g1RemSet.hpp"
  38 #include "gc/g1/g1StringDedup.hpp"
  39 #include "gc/g1/heapRegion.inline.hpp"
  40 #include "gc/g1/heapRegionManager.inline.hpp"
  41 #include "gc/g1/heapRegionRemSet.hpp"
  42 #include "gc/g1/heapRegionSet.inline.hpp"
  43 #include "gc/g1/suspendibleThreadSet.hpp"
  44 #include "gc/shared/gcId.hpp"
  45 #include "gc/shared/gcTimer.hpp"
  46 #include "gc/shared/gcTrace.hpp"
  47 #include "gc/shared/gcTraceTime.hpp"
  48 #include "gc/shared/genOopClosures.inline.hpp"
  49 #include "gc/shared/referencePolicy.hpp"
  50 #include "gc/shared/strongRootsScope.hpp"
  51 #include "gc/shared/taskqueue.inline.hpp"
  52 #include "gc/shared/vmGCOperations.hpp"

  53 #include "memory/allocation.hpp"
  54 #include "memory/resourceArea.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "runtime/atomic.inline.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/prefetch.inline.hpp"
  60 #include "services/memTracker.hpp"
  61 
  62 // Concurrent marking bit map wrapper
  63 
  64 CMBitMapRO::CMBitMapRO(int shifter) :
  65   _bm(),
  66   _shifter(shifter) {
  67   _bmStartWord = 0;
  68   _bmWordSize = 0;
  69 }
  70 
  71 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  72                                                const HeapWord* limit) const {


 215     rs.release();
 216     return false;
 217   }
 218   assert(_virtual_space.committed_size() == rs.size(),
 219          "Didn't reserve backing store for all of ConcurrentMark stack?");
 220   _base = (oop*) _virtual_space.low();
 221   setEmpty();
 222   _capacity = (jint) capacity;
 223   _saved_index = -1;
 224   _should_expand = false;
 225   return true;
 226 }
 227 
 228 void CMMarkStack::expand() {
 229   // Called, during remark, if we've overflown the marking stack during marking.
 230   assert(isEmpty(), "stack should been emptied while handling overflow");
 231   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 232   // Clear expansion flag
 233   _should_expand = false;
 234   if (_capacity == (jint) MarkStackSizeMax) {
 235     if (PrintGCDetails && Verbose) {
 236       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
 237     }
 238     return;
 239   }
 240   // Double capacity if possible
 241   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 242   // Do not give up existing stack until we have managed to
 243   // get the double capacity that we desired.
 244   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 245                                                            sizeof(oop)));
 246   if (rs.is_reserved()) {
 247     // Release the backing store associated with old stack
 248     _virtual_space.release();
 249     // Reinitialize virtual space for new stack
 250     if (!_virtual_space.initialize(rs, rs.size())) {
 251       fatal("Not enough swap for expanded marking stack capacity");
 252     }
 253     _base = (oop*)(_virtual_space.low());
 254     _index = 0;
 255     _capacity = new_capacity;
 256   } else {
 257     if (PrintGCDetails && Verbose) {
 258       // Failed to double capacity, continue;
 259       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 260                           SIZE_FORMAT "K to " SIZE_FORMAT "K",
 261                           _capacity / K, new_capacity / K);
 262     }
 263   }
 264 }
 265 
 266 void CMMarkStack::set_should_expand() {
 267   // If we're resetting the marking state because of an
 268   // marking stack overflow, record that we should, if
 269   // possible, expand the stack.
 270   _should_expand = _cm->has_overflown();
 271 }
 272 
 273 CMMarkStack::~CMMarkStack() {
 274   if (_base != NULL) {
 275     _base = NULL;
 276     _virtual_space.release();
 277   }
 278 }
 279 
 280 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 281   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 282   jint start = _index;
 283   jint next_index = start + n;


 831     // just abort the whole marking phase as quickly as possible.
 832     return;
 833   }
 834 
 835   // If we're executing the concurrent phase of marking, reset the marking
 836   // state; otherwise the marking state is reset after reference processing,
 837   // during the remark pause.
 838   // If we reset here as a result of an overflow during the remark we will
 839   // see assertion failures from any subsequent set_concurrency_and_phase()
 840   // calls.
 841   if (concurrent()) {
 842     // let the task associated with with worker 0 do this
 843     if (worker_id == 0) {
 844       // task 0 is responsible for clearing the global data structures
 845       // We should be here because of an overflow. During STW we should
 846       // not clear the overflow flag since we rely on it being true when
 847       // we exit this method to abort the pause and restart concurrent
 848       // marking.
 849       reset_marking_state(true /* clear_overflow */);
 850 
 851       if (G1Log::fine()) {
 852         gclog_or_tty->gclog_stamp();
 853         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
 854       }
 855     }
 856   }
 857 
 858   // after this, each task should reset its own data structures then
 859   // then go into the second barrier
 860 }
 861 
 862 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 863   SuspendibleThreadSetLeaver sts_leave(concurrent());
 864   _second_overflow_barrier_sync.enter();
 865 
 866   // at this point everything should be re-initialized and ready to go
 867 }
 868 
 869 class CMConcurrentMarkingTask: public AbstractGangTask {
 870 private:
 871   ConcurrentMark*       _cm;
 872   ConcurrentMarkThread* _cmt;
 873 
 874 public:


 970   ConcurrentMark* _cm;
 971 
 972 public:
 973   CMRootRegionScanTask(ConcurrentMark* cm) :
 974     AbstractGangTask("Root Region Scan"), _cm(cm) { }
 975 
 976   void work(uint worker_id) {
 977     assert(Thread::current()->is_ConcurrentGC_thread(),
 978            "this should only be done by a conc GC thread");
 979 
 980     CMRootRegions* root_regions = _cm->root_regions();
 981     HeapRegion* hr = root_regions->claim_next();
 982     while (hr != NULL) {
 983       _cm->scanRootRegion(hr, worker_id);
 984       hr = root_regions->claim_next();
 985     }
 986   }
 987 };
 988 
 989 void ConcurrentMark::scanRootRegions() {
 990   double scan_start = os::elapsedTime();
 991 
 992   // Start of concurrent marking.
 993   ClassLoaderDataGraph::clear_claimed_marks();
 994 
 995   // scan_in_progress() will have been set to true only if there was
 996   // at least one root region to scan. So, if it's false, we
 997   // should not attempt to do any further work.
 998   if (root_regions()->scan_in_progress()) {
 999     if (G1Log::fine()) {
1000       gclog_or_tty->gclog_stamp();
1001       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
1002     }
1003 
1004     _parallel_marking_threads = calc_parallel_marking_threads();
1005     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1006            "Maximum number of marking threads exceeded");
1007     uint active_workers = MAX2(1U, parallel_marking_threads());
1008 
1009     CMRootRegionScanTask task(this);
1010     _parallel_workers->set_active_workers(active_workers);
1011     _parallel_workers->run_task(&task);
1012 
1013     if (G1Log::fine()) {
1014       gclog_or_tty->gclog_stamp();
1015       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
1016     }
1017 
1018     // It's possible that has_aborted() is true here without actually
1019     // aborting the survivor scan earlier. This is OK as it's
1020     // mainly used for sanity checking.
1021     root_regions()->scan_finished();
1022   }
1023 }
1024 
1025 void ConcurrentMark::markFromRoots() {
1026   // we might be tempted to assert that:
1027   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1028   //        "inconsistent argument?");
1029   // However that wouldn't be right, because it's possible that
1030   // a safepoint is indeed in progress as a younger generation
1031   // stop-the-world GC happens even as we mark in this generation.
1032 
1033   _restart_for_overflow = false;
1034 
1035   // _g1h has _n_par_threads
1036   _parallel_marking_threads = calc_parallel_marking_threads();
1037   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1038     "Maximum number of marking threads exceeded");
1039 
1040   uint active_workers = MAX2(1U, parallel_marking_threads());
1041   assert(active_workers > 0, "Should have been set");
1042 
1043   // Parallel task terminator is set in "set_concurrency_and_phase()"
1044   set_concurrency_and_phase(active_workers, true /* concurrent */);
1045 
1046   CMConcurrentMarkingTask markingTask(this, cmThread());
1047   _parallel_workers->set_active_workers(active_workers);
1048   _parallel_workers->run_task(&markingTask);
1049   print_stats();
1050 }
1051 
1052 // Helper class to get rid of some boilerplate code.
1053 class G1CMTraceTime : public StackObj {
1054   GCTraceTimeImpl _gc_trace_time;
1055   static bool doit_and_prepend(bool doit) {
1056     if (doit) {
1057       gclog_or_tty->put(' ');
1058     }
1059     return doit;
1060   }
1061 
1062  public:
1063   G1CMTraceTime(const char* title, bool doit)
1064     : _gc_trace_time(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm()) {
1065   }
1066 };
1067 
1068 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1069   // world is stopped at this checkpoint
1070   assert(SafepointSynchronize::is_at_safepoint(),
1071          "world should be stopped");
1072 
1073   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1074 
1075   // If a full collection has happened, we shouldn't do this.
1076   if (has_aborted()) {
1077     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1078     return;
1079   }
1080 
1081   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1082 
1083   if (VerifyDuringGC) {
1084     HandleMark hm;  // handle scope
1085     g1h->prepare_for_verify();
1086     Universe::verify(VerifyOption_G1UsePrevMarking,
1087                      " VerifyDuringGC:(before)");
1088   }
1089   g1h->check_bitmaps("Remark Start");
1090 
1091   G1CollectorPolicy* g1p = g1h->g1_policy();
1092   g1p->record_concurrent_mark_remark_start();
1093 
1094   double start = os::elapsedTime();
1095 
1096   checkpointRootsFinalWork();
1097 
1098   double mark_work_end = os::elapsedTime();
1099 
1100   weakRefsWork(clear_all_soft_refs);
1101 
1102   if (has_overflown()) {
1103     // Oops.  We overflowed.  Restart concurrent marking.
1104     _restart_for_overflow = true;
1105     if (G1TraceMarkStackOverflow) {
1106       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1107     }
1108 
1109     // Verify the heap w.r.t. the previous marking bitmap.
1110     if (VerifyDuringGC) {
1111       HandleMark hm;  // handle scope
1112       g1h->prepare_for_verify();
1113       Universe::verify(VerifyOption_G1UsePrevMarking,
1114                        " VerifyDuringGC:(overflow)");
1115     }
1116 
1117     // Clear the marking state because we will be restarting
1118     // marking due to overflowing the global mark stack.
1119     reset_marking_state();
1120   } else {
1121     {
1122       G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
1123 
1124       // Aggregate the per-task counting data that we have accumulated
1125       // while marking.
1126       aggregate_count_data();
1127     }
1128 
1129     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1130     // We're done with marking.
1131     // This is the end of  the marking cycle, we're expected all
1132     // threads to have SATB queues with active set to true.
1133     satb_mq_set.set_active_all_threads(false, /* new active value */
1134                                        true /* expected_active */);
1135 
1136     if (VerifyDuringGC) {
1137       HandleMark hm;  // handle scope
1138       g1h->prepare_for_verify();
1139       Universe::verify(VerifyOption_G1UseNextMarking,
1140                        " VerifyDuringGC:(after)");
1141     }
1142     g1h->check_bitmaps("Remark End");
1143     assert(!restart_for_overflow(), "sanity");
1144     // Completely reset the marking state since marking completed
1145     set_non_marking_state();
1146   }
1147 
1148   // Expand the marking stack, if we have to and if we can.
1149   if (_markStack.should_expand()) {
1150     _markStack.expand();
1151   }
1152 
1153   // Statistics
1154   double now = os::elapsedTime();
1155   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1156   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1157   _remark_times.add((now - start) * 1000.0);
1158 
1159   g1p->record_concurrent_mark_remark_end();
1160 


1639 
1640 };
1641 
1642 void ConcurrentMark::cleanup() {
1643   // world is stopped at this checkpoint
1644   assert(SafepointSynchronize::is_at_safepoint(),
1645          "world should be stopped");
1646   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1647 
1648   // If a full collection has happened, we shouldn't do this.
1649   if (has_aborted()) {
1650     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1651     return;
1652   }
1653 
1654   g1h->verify_region_sets_optional();
1655 
1656   if (VerifyDuringGC) {
1657     HandleMark hm;  // handle scope
1658     g1h->prepare_for_verify();
1659     Universe::verify(VerifyOption_G1UsePrevMarking,
1660                      " VerifyDuringGC:(before)");
1661   }
1662   g1h->check_bitmaps("Cleanup Start");
1663 
1664   G1CollectorPolicy* g1p = g1h->g1_policy();
1665   g1p->record_concurrent_mark_cleanup_start();
1666 
1667   double start = os::elapsedTime();
1668 
1669   HeapRegionRemSet::reset_for_cleanup_tasks();
1670 
1671   // Do counting once more with the world stopped for good measure.
1672   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1673 
1674   g1h->workers()->run_task(&g1_par_count_task);
1675 
1676   if (VerifyDuringGC) {
1677     // Verify that the counting data accumulated during marking matches
1678     // that calculated by walking the marking bitmap.
1679 
1680     // Bitmaps to hold expected values


1682     BitMap expected_card_bm(_card_bm.size(), true);
1683 
1684     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
1685                                                  &_region_bm,
1686                                                  &_card_bm,
1687                                                  &expected_region_bm,
1688                                                  &expected_card_bm);
1689 
1690     g1h->workers()->run_task(&g1_par_verify_task);
1691 
1692     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
1693   }
1694 
1695   size_t start_used_bytes = g1h->used();
1696   g1h->collector_state()->set_mark_in_progress(false);
1697 
1698   double count_end = os::elapsedTime();
1699   double this_final_counting_time = (count_end - start);
1700   _total_counting_time += this_final_counting_time;
1701 
1702   if (G1PrintRegionLivenessInfo) {
1703     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
1704     _g1h->heap_region_iterate(&cl);
1705   }
1706 
1707   // Install newly created mark bitMap as "prev".
1708   swapMarkBitMaps();
1709 
1710   g1h->reset_gc_time_stamp();
1711 
1712   uint n_workers = _g1h->workers()->active_workers();
1713 
1714   // Note end of marking in all heap regions.
1715   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
1716   g1h->workers()->run_task(&g1_par_note_end_task);
1717   g1h->check_gc_time_stamps();
1718 
1719   if (!cleanup_list_is_empty()) {
1720     // The cleanup list is not empty, so we'll have to process it
1721     // concurrently. Notify anyone else that might be wanting free
1722     // regions that there will be more free regions coming soon.
1723     g1h->set_free_regions_coming();


1726   // call below, since it affects the metric by which we sort the heap
1727   // regions.
1728   if (G1ScrubRemSets) {
1729     double rs_scrub_start = os::elapsedTime();
1730     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
1731     g1h->workers()->run_task(&g1_par_scrub_rs_task);
1732 
1733     double rs_scrub_end = os::elapsedTime();
1734     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
1735     _total_rs_scrub_time += this_rs_scrub_time;
1736   }
1737 
1738   // this will also free any regions totally full of garbage objects,
1739   // and sort the regions.
1740   g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1741 
1742   // Statistics.
1743   double end = os::elapsedTime();
1744   _cleanup_times.add((end - start) * 1000.0);
1745 
1746   if (G1Log::fine()) {
1747     g1h->g1_policy()->print_heap_transition(start_used_bytes);
1748   }
1749 
1750   // Clean up will have freed any regions completely full of garbage.
1751   // Update the soft reference policy with the new heap occupancy.
1752   Universe::update_heap_info_at_gc();
1753 
1754   if (VerifyDuringGC) {
1755     HandleMark hm;  // handle scope
1756     g1h->prepare_for_verify();
1757     Universe::verify(VerifyOption_G1UsePrevMarking,
1758                      " VerifyDuringGC:(after)");
1759   }
1760 
1761   g1h->check_bitmaps("Cleanup End");
1762 
1763   g1h->verify_region_sets_optional();
1764 
1765   // We need to make this be a "collection" so any collection pause that
1766   // races with it goes around and waits for completeCleanup to finish.
1767   g1h->increment_total_collections();
1768 
1769   // Clean out dead classes and update Metaspace sizes.
1770   if (ClassUnloadingWithConcurrentMark) {
1771     ClassLoaderDataGraph::purge();
1772   }
1773   MetaspaceGC::compute_new_size();
1774 
1775   // We reclaimed old regions so we should calculate the sizes to make
1776   // sure we update the old gen/space data.
1777   g1h->g1mm()->update_sizes();
1778   g1h->allocation_context_stats().update_after_mark();
1779 
1780   g1h->trace_heap_after_concurrent_cycle();
1781 }
1782 
1783 void ConcurrentMark::completeCleanup() {
1784   if (has_aborted()) return;
1785 
1786   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1787 
1788   _cleanup_list.verify_optional();
1789   FreeRegionList tmp_free_list("Tmp Free List");
1790 
1791   if (G1ConcRegionFreeingVerbose) {
1792     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
1793                            "cleanup list has %u entries",
1794                            _cleanup_list.length());
1795   }
1796 
1797   // No one else should be accessing the _cleanup_list at this point,
1798   // so it is not necessary to take any locks
1799   while (!_cleanup_list.is_empty()) {
1800     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1801     assert(hr != NULL, "Got NULL from a non-empty list");
1802     hr->par_clear();
1803     tmp_free_list.add_ordered(hr);
1804 
1805     // Instead of adding one region at a time to the secondary_free_list,
1806     // we accumulate them in the local list and move them a few at a
1807     // time. This also cuts down on the number of notify_all() calls
1808     // we do during this process. We'll also append the local list when
1809     // _cleanup_list is empty (which means we just removed the last
1810     // region from the _cleanup_list).
1811     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
1812         _cleanup_list.is_empty()) {
1813       if (G1ConcRegionFreeingVerbose) {
1814         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
1815                                "appending %u entries to the secondary_free_list, "
1816                                "cleanup list still has %u entries",
1817                                tmp_free_list.length(),
1818                                _cleanup_list.length());
1819       }
1820 
1821       {
1822         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1823         g1h->secondary_free_list_add(&tmp_free_list);
1824         SecondaryFreeList_lock->notify_all();
1825       }
1826 #ifndef PRODUCT
1827       if (G1StressConcRegionFreeing) {
1828         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
1829           os::sleep(Thread::current(), (jlong) 1, false);
1830         }
1831       }
1832 #endif
1833     }
1834   }
1835   assert(tmp_free_list.is_empty(), "post-condition");
1836 }
1837 
1838 // Supporting Object and Oop closures for reference discovery
1839 // and processing in during marking


2056     // Skip processing the discovered references if we have
2057     // overflown the global marking stack. Reference objects
2058     // only get discovered once so it is OK to not
2059     // de-populate the discovered reference lists. We could have,
2060     // but the only benefit would be that, when marking restarts,
2061     // less reference objects are discovered.
2062     return;
2063   }
2064 
2065   ResourceMark rm;
2066   HandleMark   hm;
2067 
2068   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2069 
2070   // Is alive closure.
2071   G1CMIsAliveClosure g1_is_alive(g1h);
2072 
2073   // Inner scope to exclude the cleaning of the string and symbol
2074   // tables from the displayed time.
2075   {
2076     G1CMTraceTime t("GC ref-proc", G1Log::finer());
2077 
2078     ReferenceProcessor* rp = g1h->ref_processor_cm();
2079 
2080     // See the comment in G1CollectedHeap::ref_processing_init()
2081     // about how reference processing currently works in G1.
2082 
2083     // Set the soft reference policy
2084     rp->setup_policy(clear_all_soft_refs);
2085     assert(_markStack.isEmpty(), "mark stack should be empty");
2086 
2087     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2088     // in serial reference processing. Note these closures are also
2089     // used for serially processing (by the the current thread) the
2090     // JNI references during parallel reference processing.
2091     //
2092     // These closures do not need to synchronize with the worker
2093     // threads involved in parallel reference processing as these
2094     // instances are executed serially by the current thread (e.g.
2095     // reference processing is not multi-threaded and is thus
2096     // performed by the current thread instead of a gang worker).


2146       set_has_overflown();
2147     }
2148 
2149     assert(rp->num_q() == active_workers, "why not");
2150 
2151     rp->enqueue_discovered_references(executor);
2152 
2153     rp->verify_no_references_recorded();
2154     assert(!rp->discovery_enabled(), "Post condition");
2155   }
2156 
2157   if (has_overflown()) {
2158     // We can not trust g1_is_alive if the marking stack overflowed
2159     return;
2160   }
2161 
2162   assert(_markStack.isEmpty(), "Marking should have completed");
2163 
2164   // Unload Klasses, String, Symbols, Code Cache, etc.
2165   {
2166     G1CMTraceTime trace("Unloading", G1Log::finer());
2167 
2168     if (ClassUnloadingWithConcurrentMark) {
2169       bool purged_classes;
2170 
2171       {
2172         G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
2173         purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
2174       }
2175 
2176       {
2177         G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
2178         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2179       }
2180     }
2181 
2182     if (G1StringDedup::is_enabled()) {
2183       G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest());
2184       G1StringDedup::unlink(&g1_is_alive);
2185     }
2186   }
2187 }
2188 
2189 void ConcurrentMark::swapMarkBitMaps() {
2190   CMBitMapRO* temp = _prevMarkBitMap;
2191   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2192   _nextMarkBitMap  = (CMBitMap*)  temp;
2193 }
2194 
2195 // Closure for marking entries in SATB buffers.
2196 class CMSATBBufferClosure : public SATBBufferClosure {
2197 private:
2198   CMTask* _task;
2199   G1CollectedHeap* _g1h;
2200 
2201   // This is very similar to CMTask::deal_with_reference, but with
2202   // more relaxed requirements for the argument, so this must be more
2203   // circumspect about treating the argument as an object.


2284                               true         /* do_termination       */,
2285                               false        /* is_serial            */);
2286       } while (task->has_aborted() && !_cm->has_overflown());
2287       // If we overflow, then we do not want to restart. We instead
2288       // want to abort remark and do concurrent marking again.
2289       task->record_end_time();
2290     }
2291   }
2292 
2293   CMRemarkTask(ConcurrentMark* cm, uint active_workers) :
2294     AbstractGangTask("Par Remark"), _cm(cm) {
2295     _cm->terminator()->reset_for_reuse(active_workers);
2296   }
2297 };
2298 
2299 void ConcurrentMark::checkpointRootsFinalWork() {
2300   ResourceMark rm;
2301   HandleMark   hm;
2302   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2303 
2304   G1CMTraceTime trace("Finalize Marking", G1Log::finer());
2305 
2306   g1h->ensure_parsability(false);
2307 
2308   // this is remark, so we'll use up all active threads
2309   uint active_workers = g1h->workers()->active_workers();
2310   set_concurrency_and_phase(active_workers, false /* concurrent */);
2311   // Leave _parallel_marking_threads at it's
2312   // value originally calculated in the ConcurrentMark
2313   // constructor and pass values of the active workers
2314   // through the gang in the task.
2315 
2316   {
2317     StrongRootsScope srs(active_workers);
2318 
2319     CMRemarkTask remarkTask(this, active_workers);
2320     // We will start all available threads, even if we decide that the
2321     // active_workers will be fewer. The extra ones will just bail out
2322     // immediately.
2323     g1h->workers()->run_task(&remarkTask);
2324   }


2597   // Clear the global region bitmap - it will be filled as part
2598   // of the final counting task.
2599   _region_bm.clear();
2600 
2601   uint max_regions = _g1h->max_regions();
2602   assert(_max_worker_id > 0, "uninitialized");
2603 
2604   for (uint i = 0; i < _max_worker_id; i += 1) {
2605     BitMap* task_card_bm = count_card_bitmap_for(i);
2606     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
2607 
2608     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
2609     assert(marked_bytes_array != NULL, "uninitialized");
2610 
2611     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
2612     task_card_bm->clear();
2613   }
2614 }
2615 
2616 void ConcurrentMark::print_stats() {
2617   if (G1MarkingVerboseLevel > 0) {
2618     gclog_or_tty->print_cr("---------------------------------------------------------------------");


2619     for (size_t i = 0; i < _active_tasks; ++i) {
2620       _tasks[i]->print_stats();
2621       gclog_or_tty->print_cr("---------------------------------------------------------------------");
2622     }
2623   }
2624 }
2625 
2626 // abandon current marking iteration due to a Full GC
2627 void ConcurrentMark::abort() {
2628   if (!cmThread()->during_cycle() || _has_aborted) {
2629     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2630     return;
2631   }
2632 
2633   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2634   // concurrent bitmap clearing.
2635   _nextMarkBitMap->clearAll();
2636 
2637   // Note we cannot clear the previous marking bitmap here
2638   // since VerifyDuringGC verifies the objects marked during
2639   // a full GC against the previous bitmap.
2640 
2641   // Clear the liveness counting data
2642   clear_all_count_data();


2646     _tasks[i]->clear_region_fields();
2647   }
2648   _first_overflow_barrier_sync.abort();
2649   _second_overflow_barrier_sync.abort();
2650   _has_aborted = true;
2651 
2652   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2653   satb_mq_set.abandon_partial_marking();
2654   // This can be called either during or outside marking, we'll read
2655   // the expected_active value from the SATB queue set.
2656   satb_mq_set.set_active_all_threads(
2657                                  false, /* new active value */
2658                                  satb_mq_set.is_active() /* expected_active */);
2659 
2660   _g1h->trace_heap_after_concurrent_cycle();
2661   _g1h->register_concurrent_cycle_end();
2662 }
2663 
2664 static void print_ms_time_info(const char* prefix, const char* name,
2665                                NumberSeq& ns) {
2666   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2667                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2668   if (ns.num() > 0) {
2669     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2670                            prefix, ns.sd(), ns.maximum());
2671   }
2672 }
2673 
2674 void ConcurrentMark::print_summary_info() {
2675   gclog_or_tty->print_cr(" Concurrent marking:");





2676   print_ms_time_info("  ", "init marks", _init_times);
2677   print_ms_time_info("  ", "remarks", _remark_times);
2678   {
2679     print_ms_time_info("     ", "final marks", _remark_mark_times);
2680     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2681 
2682   }
2683   print_ms_time_info("  ", "cleanups", _cleanup_times);
2684   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
2685                          _total_counting_time,
2686                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
2687                           (double)_cleanup_times.num()
2688                          : 0.0));
2689   if (G1ScrubRemSets) {
2690     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
2691                            _total_rs_scrub_time,
2692                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
2693                             (double)_cleanup_times.num()
2694                            : 0.0));
2695   }
2696   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
2697                          (_init_times.sum() + _remark_times.sum() +
2698                           _cleanup_times.sum())/1000.0);
2699   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
2700                 "(%8.2f s marking).",
2701                 cmThread()->vtime_accum(),
2702                 cmThread()->vtime_mark_accum());
2703 }
2704 
2705 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2706   _parallel_workers->print_worker_threads_on(st);
2707 }
2708 
2709 void ConcurrentMark::print_on_error(outputStream* st) const {
2710   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2711       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
2712   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
2713   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
2714 }
2715 
2716 // We take a break if someone is trying to stop the world.
2717 bool ConcurrentMark::do_yield_check(uint worker_id) {
2718   if (SuspendibleThreadSet::should_yield()) {
2719     if (worker_id == 0) {
2720       _g1h->g1_policy()->record_concurrent_pause();
2721     }
2722     SuspendibleThreadSet::yield();


3062 
3063   // This keeps claiming and applying the closure to completed buffers
3064   // until we run out of buffers or we need to abort.
3065   while (!has_aborted() &&
3066          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
3067     regular_clock_call();
3068   }
3069 
3070   _draining_satb_buffers = false;
3071 
3072   assert(has_aborted() ||
3073          concurrent() ||
3074          satb_mq_set.completed_buffers_num() == 0, "invariant");
3075 
3076   // again, this was a potentially expensive operation, decrease the
3077   // limits to get the regular clock call early
3078   decrease_limits();
3079 }
3080 
3081 void CMTask::print_stats() {
3082   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3083                          _worker_id, _calls);
3084   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3085                          _elapsed_time_ms, _termination_time_ms);
3086   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3087                          _step_times_ms.num(), _step_times_ms.avg(),
3088                          _step_times_ms.sd());
3089   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
3090                          _step_times_ms.maximum(), _step_times_ms.sum());
3091 }
3092 
3093 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) {
3094   return _task_queues->steal(worker_id, hash_seed, obj);
3095 }
3096 
3097 /*****************************************************************************
3098 
3099     The do_marking_step(time_target_ms, ...) method is the building
3100     block of the parallel marking framework. It can be called in parallel
3101     with other invocations of do_marking_step() on different tasks
3102     (but only one per task, obviously) and concurrently with the
3103     mutator threads, or during remark, hence it eliminates the need
3104     for two versions of the code. When called during remark, it will
3105     pick up from where the task left off during the concurrent marking
3106     phase. Interestingly, tasks are also claimable during evacuation
3107     pauses too, since do_marking_step() ensures that it aborts before
3108     it needs to yield.
3109 


3570 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
3571 #else // _LP64
3572 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
3573 #endif // _LP64
3574 
3575 // For per-region info
3576 #define G1PPRL_TYPE_FORMAT            "   %-4s"
3577 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
3578 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
3579 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
3580 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
3581 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
3582 
3583 // For summary info
3584 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
3585 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
3586 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
3587 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
3588 
3589 G1PrintRegionLivenessInfoClosure::
3590 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
3591   : _out(out),
3592     _total_used_bytes(0), _total_capacity_bytes(0),
3593     _total_prev_live_bytes(0), _total_next_live_bytes(0),
3594     _hum_used_bytes(0), _hum_capacity_bytes(0),
3595     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
3596     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
3597   G1CollectedHeap* g1h = G1CollectedHeap::heap();
3598   MemRegion g1_reserved = g1h->g1_reserved();
3599   double now = os::elapsedTime();
3600 
3601   // Print the header of the output.
3602   _out->cr();
3603   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
3604   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
3605                  G1PPRL_SUM_ADDR_FORMAT("reserved")
3606                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
3607                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),
3608                  HeapRegion::GrainBytes);
3609   _out->print_cr(G1PPRL_LINE_PREFIX);
3610   _out->print_cr(G1PPRL_LINE_PREFIX
3611                 G1PPRL_TYPE_H_FORMAT
3612                 G1PPRL_ADDR_BASE_H_FORMAT
3613                 G1PPRL_BYTE_H_FORMAT
3614                 G1PPRL_BYTE_H_FORMAT
3615                 G1PPRL_BYTE_H_FORMAT
3616                 G1PPRL_DOUBLE_H_FORMAT
3617                 G1PPRL_BYTE_H_FORMAT
3618                 G1PPRL_BYTE_H_FORMAT,
3619                 "type", "address-range",
3620                 "used", "prev-live", "next-live", "gc-eff",
3621                 "remset", "code-roots");
3622   _out->print_cr(G1PPRL_LINE_PREFIX
3623                 G1PPRL_TYPE_H_FORMAT
3624                 G1PPRL_ADDR_BASE_H_FORMAT
3625                 G1PPRL_BYTE_H_FORMAT
3626                 G1PPRL_BYTE_H_FORMAT
3627                 G1PPRL_BYTE_H_FORMAT
3628                 G1PPRL_DOUBLE_H_FORMAT
3629                 G1PPRL_BYTE_H_FORMAT
3630                 G1PPRL_BYTE_H_FORMAT,
3631                 "", "",
3632                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
3633                 "(bytes)", "(bytes)");
3634 }
3635 
3636 // It takes as a parameter a reference to one of the _hum_* fields, it
3637 // deduces the corresponding value for a region in a humongous region
3638 // series (either the region size, or what's left if the _hum_* field
3639 // is < the region size), and updates the _hum_* field accordingly.
3640 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
3641   size_t bytes = 0;
3642   // The > 0 check is to deal with the prev and next live bytes which


3684     _hum_used_bytes      = used_bytes;
3685     _hum_prev_live_bytes = prev_live_bytes;
3686     _hum_next_live_bytes = next_live_bytes;
3687     get_hum_bytes(&used_bytes, &capacity_bytes,
3688                   &prev_live_bytes, &next_live_bytes);
3689     end = bottom + HeapRegion::GrainWords;
3690   } else if (r->is_continues_humongous()) {
3691     get_hum_bytes(&used_bytes, &capacity_bytes,
3692                   &prev_live_bytes, &next_live_bytes);
3693     assert(end == bottom + HeapRegion::GrainWords, "invariant");
3694   }
3695 
3696   _total_used_bytes      += used_bytes;
3697   _total_capacity_bytes  += capacity_bytes;
3698   _total_prev_live_bytes += prev_live_bytes;
3699   _total_next_live_bytes += next_live_bytes;
3700   _total_remset_bytes    += remset_bytes;
3701   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3702 
3703   // Print a line for this particular region.
3704   _out->print_cr(G1PPRL_LINE_PREFIX
3705                  G1PPRL_TYPE_FORMAT
3706                  G1PPRL_ADDR_BASE_FORMAT
3707                  G1PPRL_BYTE_FORMAT
3708                  G1PPRL_BYTE_FORMAT
3709                  G1PPRL_BYTE_FORMAT
3710                  G1PPRL_DOUBLE_FORMAT
3711                  G1PPRL_BYTE_FORMAT
3712                  G1PPRL_BYTE_FORMAT,
3713                  type, p2i(bottom), p2i(end),
3714                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3715                  remset_bytes, strong_code_roots_bytes);
3716 
3717   return false;
3718 }
3719 
3720 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3721   // add static memory usages to remembered set sizes
3722   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3723   // Print the footer of the output.
3724   _out->print_cr(G1PPRL_LINE_PREFIX);
3725   _out->print_cr(G1PPRL_LINE_PREFIX
3726                  " SUMMARY"
3727                  G1PPRL_SUM_MB_FORMAT("capacity")
3728                  G1PPRL_SUM_MB_PERC_FORMAT("used")
3729                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3730                  G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3731                  G1PPRL_SUM_MB_FORMAT("remset")
3732                  G1PPRL_SUM_MB_FORMAT("code-roots"),
3733                  bytes_to_mb(_total_capacity_bytes),
3734                  bytes_to_mb(_total_used_bytes),
3735                  perc(_total_used_bytes, _total_capacity_bytes),
3736                  bytes_to_mb(_total_prev_live_bytes),
3737                  perc(_total_prev_live_bytes, _total_capacity_bytes),
3738                  bytes_to_mb(_total_next_live_bytes),
3739                  perc(_total_next_live_bytes, _total_capacity_bytes),
3740                  bytes_to_mb(_total_remset_bytes),
3741                  bytes_to_mb(_total_strong_code_roots_bytes));
3742   _out->cr();
3743 }


  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMark.inline.hpp"
  30 #include "gc/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc/g1/g1CollectorPolicy.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"


  34 #include "gc/g1/g1OopClosures.inline.hpp"
  35 #include "gc/g1/g1RemSet.hpp"
  36 #include "gc/g1/g1StringDedup.hpp"
  37 #include "gc/g1/heapRegion.inline.hpp"
  38 #include "gc/g1/heapRegionManager.inline.hpp"
  39 #include "gc/g1/heapRegionRemSet.hpp"
  40 #include "gc/g1/heapRegionSet.inline.hpp"
  41 #include "gc/g1/suspendibleThreadSet.hpp"
  42 #include "gc/shared/gcId.hpp"
  43 #include "gc/shared/gcTimer.hpp"
  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/genOopClosures.inline.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/strongRootsScope.hpp"
  49 #include "gc/shared/taskqueue.inline.hpp"
  50 #include "gc/shared/vmGCOperations.hpp"
  51 #include "logging/log.hpp"
  52 #include "memory/allocation.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "runtime/atomic.inline.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/prefetch.inline.hpp"
  59 #include "services/memTracker.hpp"
  60 
  61 // Concurrent marking bit map wrapper
  62 
  63 CMBitMapRO::CMBitMapRO(int shifter) :
  64   _bm(),
  65   _shifter(shifter) {
  66   _bmStartWord = 0;
  67   _bmWordSize = 0;
  68 }
  69 
  70 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  71                                                const HeapWord* limit) const {


 214     rs.release();
 215     return false;
 216   }
 217   assert(_virtual_space.committed_size() == rs.size(),
 218          "Didn't reserve backing store for all of ConcurrentMark stack?");
 219   _base = (oop*) _virtual_space.low();
 220   setEmpty();
 221   _capacity = (jint) capacity;
 222   _saved_index = -1;
 223   _should_expand = false;
 224   return true;
 225 }
 226 
 227 void CMMarkStack::expand() {
 228   // Called, during remark, if we've overflown the marking stack during marking.
 229   assert(isEmpty(), "stack should been emptied while handling overflow");
 230   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 231   // Clear expansion flag
 232   _should_expand = false;
 233   if (_capacity == (jint) MarkStackSizeMax) {
 234     log_trace(gc)("(benign) Can't expand marking stack capacity, at max size limit");


 235     return;
 236   }
 237   // Double capacity if possible
 238   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 239   // Do not give up existing stack until we have managed to
 240   // get the double capacity that we desired.
 241   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 242                                                            sizeof(oop)));
 243   if (rs.is_reserved()) {
 244     // Release the backing store associated with old stack
 245     _virtual_space.release();
 246     // Reinitialize virtual space for new stack
 247     if (!_virtual_space.initialize(rs, rs.size())) {
 248       fatal("Not enough swap for expanded marking stack capacity");
 249     }
 250     _base = (oop*)(_virtual_space.low());
 251     _index = 0;
 252     _capacity = new_capacity;
 253   } else {

 254     // Failed to double capacity, continue;
 255     log_trace(gc)("(benign) Failed to expand marking stack capacity from " SIZE_FORMAT "K to " SIZE_FORMAT "K",

 256                   _capacity / K, new_capacity / K);
 257   }

 258 }
 259 
 260 void CMMarkStack::set_should_expand() {
 261   // If we're resetting the marking state because of an
 262   // marking stack overflow, record that we should, if
 263   // possible, expand the stack.
 264   _should_expand = _cm->has_overflown();
 265 }
 266 
 267 CMMarkStack::~CMMarkStack() {
 268   if (_base != NULL) {
 269     _base = NULL;
 270     _virtual_space.release();
 271   }
 272 }
 273 
 274 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 275   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 276   jint start = _index;
 277   jint next_index = start + n;


 825     // just abort the whole marking phase as quickly as possible.
 826     return;
 827   }
 828 
 829   // If we're executing the concurrent phase of marking, reset the marking
 830   // state; otherwise the marking state is reset after reference processing,
 831   // during the remark pause.
 832   // If we reset here as a result of an overflow during the remark we will
 833   // see assertion failures from any subsequent set_concurrency_and_phase()
 834   // calls.
 835   if (concurrent()) {
 836     // let the task associated with with worker 0 do this
 837     if (worker_id == 0) {
 838       // task 0 is responsible for clearing the global data structures
 839       // We should be here because of an overflow. During STW we should
 840       // not clear the overflow flag since we rely on it being true when
 841       // we exit this method to abort the pause and restart concurrent
 842       // marking.
 843       reset_marking_state(true /* clear_overflow */);
 844 
 845       log_info(gc)("Concurrent Mark reset for overflow");



 846     }
 847   }
 848 
 849   // after this, each task should reset its own data structures then
 850   // then go into the second barrier
 851 }
 852 
 853 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 854   SuspendibleThreadSetLeaver sts_leave(concurrent());
 855   _second_overflow_barrier_sync.enter();
 856 
 857   // at this point everything should be re-initialized and ready to go
 858 }
 859 
 860 class CMConcurrentMarkingTask: public AbstractGangTask {
 861 private:
 862   ConcurrentMark*       _cm;
 863   ConcurrentMarkThread* _cmt;
 864 
 865 public:


 961   ConcurrentMark* _cm;
 962 
 963 public:
 964   CMRootRegionScanTask(ConcurrentMark* cm) :
 965     AbstractGangTask("Root Region Scan"), _cm(cm) { }
 966 
 967   void work(uint worker_id) {
 968     assert(Thread::current()->is_ConcurrentGC_thread(),
 969            "this should only be done by a conc GC thread");
 970 
 971     CMRootRegions* root_regions = _cm->root_regions();
 972     HeapRegion* hr = root_regions->claim_next();
 973     while (hr != NULL) {
 974       _cm->scanRootRegion(hr, worker_id);
 975       hr = root_regions->claim_next();
 976     }
 977   }
 978 };
 979 
 980 void ConcurrentMark::scanRootRegions() {


 981   // Start of concurrent marking.
 982   ClassLoaderDataGraph::clear_claimed_marks();
 983 
 984   // scan_in_progress() will have been set to true only if there was
 985   // at least one root region to scan. So, if it's false, we
 986   // should not attempt to do any further work.
 987   if (root_regions()->scan_in_progress()) {
 988     GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan");



 989 
 990     _parallel_marking_threads = calc_parallel_marking_threads();
 991     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
 992            "Maximum number of marking threads exceeded");
 993     uint active_workers = MAX2(1U, parallel_marking_threads());
 994 
 995     CMRootRegionScanTask task(this);
 996     _parallel_workers->set_active_workers(active_workers);
 997     _parallel_workers->run_task(&task);
 998 





 999     // It's possible that has_aborted() is true here without actually
1000     // aborting the survivor scan earlier. This is OK as it's
1001     // mainly used for sanity checking.
1002     root_regions()->scan_finished();
1003   }
1004 }
1005 
1006 void ConcurrentMark::markFromRoots() {
1007   // we might be tempted to assert that:
1008   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1009   //        "inconsistent argument?");
1010   // However that wouldn't be right, because it's possible that
1011   // a safepoint is indeed in progress as a younger generation
1012   // stop-the-world GC happens even as we mark in this generation.
1013 
1014   _restart_for_overflow = false;
1015 
1016   // _g1h has _n_par_threads
1017   _parallel_marking_threads = calc_parallel_marking_threads();
1018   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1019     "Maximum number of marking threads exceeded");
1020 
1021   uint active_workers = MAX2(1U, parallel_marking_threads());
1022   assert(active_workers > 0, "Should have been set");
1023 
1024   // Parallel task terminator is set in "set_concurrency_and_phase()"
1025   set_concurrency_and_phase(active_workers, true /* concurrent */);
1026 
1027   CMConcurrentMarkingTask markingTask(this, cmThread());
1028   _parallel_workers->set_active_workers(active_workers);
1029   _parallel_workers->run_task(&markingTask);
1030   print_stats();
1031 }
1032 
















1033 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1034   // world is stopped at this checkpoint
1035   assert(SafepointSynchronize::is_at_safepoint(),
1036          "world should be stopped");
1037 
1038   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1039 
1040   // If a full collection has happened, we shouldn't do this.
1041   if (has_aborted()) {
1042     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1043     return;
1044   }
1045 
1046   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1047 
1048   if (VerifyDuringGC) {
1049     HandleMark hm;  // handle scope
1050     g1h->prepare_for_verify();
1051     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");

1052   }
1053   g1h->check_bitmaps("Remark Start");
1054 
1055   G1CollectorPolicy* g1p = g1h->g1_policy();
1056   g1p->record_concurrent_mark_remark_start();
1057 
1058   double start = os::elapsedTime();
1059 
1060   checkpointRootsFinalWork();
1061 
1062   double mark_work_end = os::elapsedTime();
1063 
1064   weakRefsWork(clear_all_soft_refs);
1065 
1066   if (has_overflown()) {
1067     // Oops.  We overflowed.  Restart concurrent marking.
1068     _restart_for_overflow = true;
1069     log_develop_trace(gc)("Remark led to restart for overflow.");


1070 
1071     // Verify the heap w.r.t. the previous marking bitmap.
1072     if (VerifyDuringGC) {
1073       HandleMark hm;  // handle scope
1074       g1h->prepare_for_verify();
1075       Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");

1076     }
1077 
1078     // Clear the marking state because we will be restarting
1079     // marking due to overflowing the global mark stack.
1080     reset_marking_state();
1081   } else {
1082     {
1083       GCTraceTime(Debug, gc) trace("GC Aggregate Data", g1h->gc_timer_cm());
1084 
1085       // Aggregate the per-task counting data that we have accumulated
1086       // while marking.
1087       aggregate_count_data();
1088     }
1089 
1090     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1091     // We're done with marking.
1092     // This is the end of  the marking cycle, we're expected all
1093     // threads to have SATB queues with active set to true.
1094     satb_mq_set.set_active_all_threads(false, /* new active value */
1095                                        true /* expected_active */);
1096 
1097     if (VerifyDuringGC) {
1098       HandleMark hm;  // handle scope
1099       g1h->prepare_for_verify();
1100       Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");

1101     }
1102     g1h->check_bitmaps("Remark End");
1103     assert(!restart_for_overflow(), "sanity");
1104     // Completely reset the marking state since marking completed
1105     set_non_marking_state();
1106   }
1107 
1108   // Expand the marking stack, if we have to and if we can.
1109   if (_markStack.should_expand()) {
1110     _markStack.expand();
1111   }
1112 
1113   // Statistics
1114   double now = os::elapsedTime();
1115   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1116   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1117   _remark_times.add((now - start) * 1000.0);
1118 
1119   g1p->record_concurrent_mark_remark_end();
1120 


1599 
1600 };
1601 
1602 void ConcurrentMark::cleanup() {
1603   // world is stopped at this checkpoint
1604   assert(SafepointSynchronize::is_at_safepoint(),
1605          "world should be stopped");
1606   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1607 
1608   // If a full collection has happened, we shouldn't do this.
1609   if (has_aborted()) {
1610     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1611     return;
1612   }
1613 
1614   g1h->verify_region_sets_optional();
1615 
1616   if (VerifyDuringGC) {
1617     HandleMark hm;  // handle scope
1618     g1h->prepare_for_verify();
1619     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");

1620   }
1621   g1h->check_bitmaps("Cleanup Start");
1622 
1623   G1CollectorPolicy* g1p = g1h->g1_policy();
1624   g1p->record_concurrent_mark_cleanup_start();
1625 
1626   double start = os::elapsedTime();
1627 
1628   HeapRegionRemSet::reset_for_cleanup_tasks();
1629 
1630   // Do counting once more with the world stopped for good measure.
1631   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1632 
1633   g1h->workers()->run_task(&g1_par_count_task);
1634 
1635   if (VerifyDuringGC) {
1636     // Verify that the counting data accumulated during marking matches
1637     // that calculated by walking the marking bitmap.
1638 
1639     // Bitmaps to hold expected values


1641     BitMap expected_card_bm(_card_bm.size(), true);
1642 
1643     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
1644                                                  &_region_bm,
1645                                                  &_card_bm,
1646                                                  &expected_region_bm,
1647                                                  &expected_card_bm);
1648 
1649     g1h->workers()->run_task(&g1_par_verify_task);
1650 
1651     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
1652   }
1653 
1654   size_t start_used_bytes = g1h->used();
1655   g1h->collector_state()->set_mark_in_progress(false);
1656 
1657   double count_end = os::elapsedTime();
1658   double this_final_counting_time = (count_end - start);
1659   _total_counting_time += this_final_counting_time;
1660 
1661   if (log_is_enabled(Trace, gc, liveness)) {
1662     G1PrintRegionLivenessInfoClosure cl("Post-Marking");
1663     _g1h->heap_region_iterate(&cl);
1664   }
1665 
1666   // Install newly created mark bitMap as "prev".
1667   swapMarkBitMaps();
1668 
1669   g1h->reset_gc_time_stamp();
1670 
1671   uint n_workers = _g1h->workers()->active_workers();
1672 
1673   // Note end of marking in all heap regions.
1674   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
1675   g1h->workers()->run_task(&g1_par_note_end_task);
1676   g1h->check_gc_time_stamps();
1677 
1678   if (!cleanup_list_is_empty()) {
1679     // The cleanup list is not empty, so we'll have to process it
1680     // concurrently. Notify anyone else that might be wanting free
1681     // regions that there will be more free regions coming soon.
1682     g1h->set_free_regions_coming();


1685   // call below, since it affects the metric by which we sort the heap
1686   // regions.
1687   if (G1ScrubRemSets) {
1688     double rs_scrub_start = os::elapsedTime();
1689     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
1690     g1h->workers()->run_task(&g1_par_scrub_rs_task);
1691 
1692     double rs_scrub_end = os::elapsedTime();
1693     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
1694     _total_rs_scrub_time += this_rs_scrub_time;
1695   }
1696 
1697   // this will also free any regions totally full of garbage objects,
1698   // and sort the regions.
1699   g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1700 
1701   // Statistics.
1702   double end = os::elapsedTime();
1703   _cleanup_times.add((end - start) * 1000.0);
1704 




1705   // Clean up will have freed any regions completely full of garbage.
1706   // Update the soft reference policy with the new heap occupancy.
1707   Universe::update_heap_info_at_gc();
1708 
1709   if (VerifyDuringGC) {
1710     HandleMark hm;  // handle scope
1711     g1h->prepare_for_verify();
1712     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)");

1713   }
1714 
1715   g1h->check_bitmaps("Cleanup End");
1716 
1717   g1h->verify_region_sets_optional();
1718 
1719   // We need to make this be a "collection" so any collection pause that
1720   // races with it goes around and waits for completeCleanup to finish.
1721   g1h->increment_total_collections();
1722 
1723   // Clean out dead classes and update Metaspace sizes.
1724   if (ClassUnloadingWithConcurrentMark) {
1725     ClassLoaderDataGraph::purge();
1726   }
1727   MetaspaceGC::compute_new_size();
1728 
1729   // We reclaimed old regions so we should calculate the sizes to make
1730   // sure we update the old gen/space data.
1731   g1h->g1mm()->update_sizes();
1732   g1h->allocation_context_stats().update_after_mark();
1733 
1734   g1h->trace_heap_after_concurrent_cycle();
1735 }
1736 
1737 void ConcurrentMark::completeCleanup() {
1738   if (has_aborted()) return;
1739 
1740   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1741 
1742   _cleanup_list.verify_optional();
1743   FreeRegionList tmp_free_list("Tmp Free List");
1744 
1745   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "

1746                                   "cleanup list has %u entries",
1747                                   _cleanup_list.length());

1748 
1749   // No one else should be accessing the _cleanup_list at this point,
1750   // so it is not necessary to take any locks
1751   while (!_cleanup_list.is_empty()) {
1752     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1753     assert(hr != NULL, "Got NULL from a non-empty list");
1754     hr->par_clear();
1755     tmp_free_list.add_ordered(hr);
1756 
1757     // Instead of adding one region at a time to the secondary_free_list,
1758     // we accumulate them in the local list and move them a few at a
1759     // time. This also cuts down on the number of notify_all() calls
1760     // we do during this process. We'll also append the local list when
1761     // _cleanup_list is empty (which means we just removed the last
1762     // region from the _cleanup_list).
1763     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
1764         _cleanup_list.is_empty()) {
1765       log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "

1766                                       "appending %u entries to the secondary_free_list, "
1767                                       "cleanup list still has %u entries",
1768                                       tmp_free_list.length(),
1769                                       _cleanup_list.length());

1770 
1771       {
1772         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1773         g1h->secondary_free_list_add(&tmp_free_list);
1774         SecondaryFreeList_lock->notify_all();
1775       }
1776 #ifndef PRODUCT
1777       if (G1StressConcRegionFreeing) {
1778         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
1779           os::sleep(Thread::current(), (jlong) 1, false);
1780         }
1781       }
1782 #endif
1783     }
1784   }
1785   assert(tmp_free_list.is_empty(), "post-condition");
1786 }
1787 
1788 // Supporting Object and Oop closures for reference discovery
1789 // and processing in during marking


2006     // Skip processing the discovered references if we have
2007     // overflown the global marking stack. Reference objects
2008     // only get discovered once so it is OK to not
2009     // de-populate the discovered reference lists. We could have,
2010     // but the only benefit would be that, when marking restarts,
2011     // less reference objects are discovered.
2012     return;
2013   }
2014 
2015   ResourceMark rm;
2016   HandleMark   hm;
2017 
2018   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2019 
2020   // Is alive closure.
2021   G1CMIsAliveClosure g1_is_alive(g1h);
2022 
2023   // Inner scope to exclude the cleaning of the string and symbol
2024   // tables from the displayed time.
2025   {
2026     GCTraceTime(Debug, gc) trace("GC Ref Proc", g1h->gc_timer_cm());
2027 
2028     ReferenceProcessor* rp = g1h->ref_processor_cm();
2029 
2030     // See the comment in G1CollectedHeap::ref_processing_init()
2031     // about how reference processing currently works in G1.
2032 
2033     // Set the soft reference policy
2034     rp->setup_policy(clear_all_soft_refs);
2035     assert(_markStack.isEmpty(), "mark stack should be empty");
2036 
2037     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2038     // in serial reference processing. Note these closures are also
2039     // used for serially processing (by the the current thread) the
2040     // JNI references during parallel reference processing.
2041     //
2042     // These closures do not need to synchronize with the worker
2043     // threads involved in parallel reference processing as these
2044     // instances are executed serially by the current thread (e.g.
2045     // reference processing is not multi-threaded and is thus
2046     // performed by the current thread instead of a gang worker).


2096       set_has_overflown();
2097     }
2098 
2099     assert(rp->num_q() == active_workers, "why not");
2100 
2101     rp->enqueue_discovered_references(executor);
2102 
2103     rp->verify_no_references_recorded();
2104     assert(!rp->discovery_enabled(), "Post condition");
2105   }
2106 
2107   if (has_overflown()) {
2108     // We can not trust g1_is_alive if the marking stack overflowed
2109     return;
2110   }
2111 
2112   assert(_markStack.isEmpty(), "Marking should have completed");
2113 
2114   // Unload Klasses, String, Symbols, Code Cache, etc.
2115   {
2116     GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm());
2117 
2118     if (ClassUnloadingWithConcurrentMark) {
2119       bool purged_classes;
2120 
2121       {
2122         GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm());
2123         purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
2124       }
2125 
2126       {
2127         GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm());
2128         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2129       }
2130     }
2131 
2132     if (G1StringDedup::is_enabled()) {
2133       GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm());
2134       G1StringDedup::unlink(&g1_is_alive);
2135     }
2136   }
2137 }
2138 
2139 void ConcurrentMark::swapMarkBitMaps() {
2140   CMBitMapRO* temp = _prevMarkBitMap;
2141   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2142   _nextMarkBitMap  = (CMBitMap*)  temp;
2143 }
2144 
2145 // Closure for marking entries in SATB buffers.
2146 class CMSATBBufferClosure : public SATBBufferClosure {
2147 private:
2148   CMTask* _task;
2149   G1CollectedHeap* _g1h;
2150 
2151   // This is very similar to CMTask::deal_with_reference, but with
2152   // more relaxed requirements for the argument, so this must be more
2153   // circumspect about treating the argument as an object.


2234                               true         /* do_termination       */,
2235                               false        /* is_serial            */);
2236       } while (task->has_aborted() && !_cm->has_overflown());
2237       // If we overflow, then we do not want to restart. We instead
2238       // want to abort remark and do concurrent marking again.
2239       task->record_end_time();
2240     }
2241   }
2242 
2243   CMRemarkTask(ConcurrentMark* cm, uint active_workers) :
2244     AbstractGangTask("Par Remark"), _cm(cm) {
2245     _cm->terminator()->reset_for_reuse(active_workers);
2246   }
2247 };
2248 
2249 void ConcurrentMark::checkpointRootsFinalWork() {
2250   ResourceMark rm;
2251   HandleMark   hm;
2252   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2253 
2254   GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm());
2255 
2256   g1h->ensure_parsability(false);
2257 
2258   // this is remark, so we'll use up all active threads
2259   uint active_workers = g1h->workers()->active_workers();
2260   set_concurrency_and_phase(active_workers, false /* concurrent */);
2261   // Leave _parallel_marking_threads at it's
2262   // value originally calculated in the ConcurrentMark
2263   // constructor and pass values of the active workers
2264   // through the gang in the task.
2265 
2266   {
2267     StrongRootsScope srs(active_workers);
2268 
2269     CMRemarkTask remarkTask(this, active_workers);
2270     // We will start all available threads, even if we decide that the
2271     // active_workers will be fewer. The extra ones will just bail out
2272     // immediately.
2273     g1h->workers()->run_task(&remarkTask);
2274   }


2547   // Clear the global region bitmap - it will be filled as part
2548   // of the final counting task.
2549   _region_bm.clear();
2550 
2551   uint max_regions = _g1h->max_regions();
2552   assert(_max_worker_id > 0, "uninitialized");
2553 
2554   for (uint i = 0; i < _max_worker_id; i += 1) {
2555     BitMap* task_card_bm = count_card_bitmap_for(i);
2556     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
2557 
2558     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
2559     assert(marked_bytes_array != NULL, "uninitialized");
2560 
2561     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
2562     task_card_bm->clear();
2563   }
2564 }
2565 
2566 void ConcurrentMark::print_stats() {
2567   if (!log_is_enabled(Debug, gc, stats)) {
2568     return;
2569   }
2570   log_debug(gc, stats)("---------------------------------------------------------------------");
2571   for (size_t i = 0; i < _active_tasks; ++i) {
2572     _tasks[i]->print_stats();
2573     log_debug(gc, stats)("---------------------------------------------------------------------");

2574   }
2575 }
2576 
2577 // abandon current marking iteration due to a Full GC
2578 void ConcurrentMark::abort() {
2579   if (!cmThread()->during_cycle() || _has_aborted) {
2580     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2581     return;
2582   }
2583 
2584   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2585   // concurrent bitmap clearing.
2586   _nextMarkBitMap->clearAll();
2587 
2588   // Note we cannot clear the previous marking bitmap here
2589   // since VerifyDuringGC verifies the objects marked during
2590   // a full GC against the previous bitmap.
2591 
2592   // Clear the liveness counting data
2593   clear_all_count_data();


2597     _tasks[i]->clear_region_fields();
2598   }
2599   _first_overflow_barrier_sync.abort();
2600   _second_overflow_barrier_sync.abort();
2601   _has_aborted = true;
2602 
2603   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2604   satb_mq_set.abandon_partial_marking();
2605   // This can be called either during or outside marking, we'll read
2606   // the expected_active value from the SATB queue set.
2607   satb_mq_set.set_active_all_threads(
2608                                  false, /* new active value */
2609                                  satb_mq_set.is_active() /* expected_active */);
2610 
2611   _g1h->trace_heap_after_concurrent_cycle();
2612   _g1h->register_concurrent_cycle_end();
2613 }
2614 
2615 static void print_ms_time_info(const char* prefix, const char* name,
2616                                NumberSeq& ns) {
2617   log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2618                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2619   if (ns.num() > 0) {
2620     log_trace(gc, marking)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2621                            prefix, ns.sd(), ns.maximum());
2622   }
2623 }
2624 
2625 void ConcurrentMark::print_summary_info() {
2626   LogHandle(gc, marking) log;
2627   if (!log.is_trace()) {
2628     return;
2629   }
2630 
2631   log.trace(" Concurrent marking:");
2632   print_ms_time_info("  ", "init marks", _init_times);
2633   print_ms_time_info("  ", "remarks", _remark_times);
2634   {
2635     print_ms_time_info("     ", "final marks", _remark_mark_times);
2636     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2637 
2638   }
2639   print_ms_time_info("  ", "cleanups", _cleanup_times);
2640   log.trace("    Final counting total time = %8.2f s (avg = %8.2f ms).",
2641             _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0));



2642   if (G1ScrubRemSets) {
2643     log.trace("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
2644               _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2645   }
2646   log.trace("  Total stop_world time = %8.2f s.",
2647             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2648   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2649             cmThread()->vtime_accum(), cmThread()->vtime_mark_accum());






2650 }
2651 
2652 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2653   _parallel_workers->print_worker_threads_on(st);
2654 }
2655 
2656 void ConcurrentMark::print_on_error(outputStream* st) const {
2657   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2658       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
2659   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
2660   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
2661 }
2662 
2663 // We take a break if someone is trying to stop the world.
2664 bool ConcurrentMark::do_yield_check(uint worker_id) {
2665   if (SuspendibleThreadSet::should_yield()) {
2666     if (worker_id == 0) {
2667       _g1h->g1_policy()->record_concurrent_pause();
2668     }
2669     SuspendibleThreadSet::yield();


3009 
3010   // This keeps claiming and applying the closure to completed buffers
3011   // until we run out of buffers or we need to abort.
3012   while (!has_aborted() &&
3013          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
3014     regular_clock_call();
3015   }
3016 
3017   _draining_satb_buffers = false;
3018 
3019   assert(has_aborted() ||
3020          concurrent() ||
3021          satb_mq_set.completed_buffers_num() == 0, "invariant");
3022 
3023   // again, this was a potentially expensive operation, decrease the
3024   // limits to get the regular clock call early
3025   decrease_limits();
3026 }
3027 
3028 void CMTask::print_stats() {
3029   log_debug(gc, stats)("Marking Stats, task = %u, calls = %d",
3030                        _worker_id, _calls);
3031   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3032                        _elapsed_time_ms, _termination_time_ms);
3033   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3034                        _step_times_ms.num(), _step_times_ms.avg(),
3035                        _step_times_ms.sd());
3036   log_debug(gc, stats)("                    max = %1.2lfms, total = %1.2lfms",
3037                        _step_times_ms.maximum(), _step_times_ms.sum());
3038 }
3039 
3040 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) {
3041   return _task_queues->steal(worker_id, hash_seed, obj);
3042 }
3043 
3044 /*****************************************************************************
3045 
3046     The do_marking_step(time_target_ms, ...) method is the building
3047     block of the parallel marking framework. It can be called in parallel
3048     with other invocations of do_marking_step() on different tasks
3049     (but only one per task, obviously) and concurrently with the
3050     mutator threads, or during remark, hence it eliminates the need
3051     for two versions of the code. When called during remark, it will
3052     pick up from where the task left off during the concurrent marking
3053     phase. Interestingly, tasks are also claimable during evacuation
3054     pauses too, since do_marking_step() ensures that it aborts before
3055     it needs to yield.
3056 


3517 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
3518 #else // _LP64
3519 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
3520 #endif // _LP64
3521 
3522 // For per-region info
3523 #define G1PPRL_TYPE_FORMAT            "   %-4s"
3524 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
3525 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
3526 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
3527 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
3528 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
3529 
3530 // For summary info
3531 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
3532 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
3533 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
3534 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
3535 
3536 G1PrintRegionLivenessInfoClosure::
3537 G1PrintRegionLivenessInfoClosure(const char* phase_name)
3538   : _total_used_bytes(0), _total_capacity_bytes(0),

3539     _total_prev_live_bytes(0), _total_next_live_bytes(0),
3540     _hum_used_bytes(0), _hum_capacity_bytes(0),
3541     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
3542     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
3543   G1CollectedHeap* g1h = G1CollectedHeap::heap();
3544   MemRegion g1_reserved = g1h->g1_reserved();
3545   double now = os::elapsedTime();
3546 
3547   // Print the header of the output.
3548   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
3549   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"

3550                           G1PPRL_SUM_ADDR_FORMAT("reserved")
3551                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
3552                           p2i(g1_reserved.start()), p2i(g1_reserved.end()),
3553                           HeapRegion::GrainBytes);
3554   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3555   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3556                           G1PPRL_TYPE_H_FORMAT
3557                           G1PPRL_ADDR_BASE_H_FORMAT
3558                           G1PPRL_BYTE_H_FORMAT
3559                           G1PPRL_BYTE_H_FORMAT
3560                           G1PPRL_BYTE_H_FORMAT
3561                           G1PPRL_DOUBLE_H_FORMAT
3562                           G1PPRL_BYTE_H_FORMAT
3563                           G1PPRL_BYTE_H_FORMAT,
3564                           "type", "address-range",
3565                           "used", "prev-live", "next-live", "gc-eff",
3566                           "remset", "code-roots");
3567   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3568                           G1PPRL_TYPE_H_FORMAT
3569                           G1PPRL_ADDR_BASE_H_FORMAT
3570                           G1PPRL_BYTE_H_FORMAT
3571                           G1PPRL_BYTE_H_FORMAT
3572                           G1PPRL_BYTE_H_FORMAT
3573                           G1PPRL_DOUBLE_H_FORMAT
3574                           G1PPRL_BYTE_H_FORMAT
3575                           G1PPRL_BYTE_H_FORMAT,
3576                           "", "",
3577                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
3578                           "(bytes)", "(bytes)");
3579 }
3580 
3581 // It takes as a parameter a reference to one of the _hum_* fields, it
3582 // deduces the corresponding value for a region in a humongous region
3583 // series (either the region size, or what's left if the _hum_* field
3584 // is < the region size), and updates the _hum_* field accordingly.
3585 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
3586   size_t bytes = 0;
3587   // The > 0 check is to deal with the prev and next live bytes which


3629     _hum_used_bytes      = used_bytes;
3630     _hum_prev_live_bytes = prev_live_bytes;
3631     _hum_next_live_bytes = next_live_bytes;
3632     get_hum_bytes(&used_bytes, &capacity_bytes,
3633                   &prev_live_bytes, &next_live_bytes);
3634     end = bottom + HeapRegion::GrainWords;
3635   } else if (r->is_continues_humongous()) {
3636     get_hum_bytes(&used_bytes, &capacity_bytes,
3637                   &prev_live_bytes, &next_live_bytes);
3638     assert(end == bottom + HeapRegion::GrainWords, "invariant");
3639   }
3640 
3641   _total_used_bytes      += used_bytes;
3642   _total_capacity_bytes  += capacity_bytes;
3643   _total_prev_live_bytes += prev_live_bytes;
3644   _total_next_live_bytes += next_live_bytes;
3645   _total_remset_bytes    += remset_bytes;
3646   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3647 
3648   // Print a line for this particular region.
3649   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3650                           G1PPRL_TYPE_FORMAT
3651                           G1PPRL_ADDR_BASE_FORMAT
3652                           G1PPRL_BYTE_FORMAT
3653                           G1PPRL_BYTE_FORMAT
3654                           G1PPRL_BYTE_FORMAT
3655                           G1PPRL_DOUBLE_FORMAT
3656                           G1PPRL_BYTE_FORMAT
3657                           G1PPRL_BYTE_FORMAT,
3658                           type, p2i(bottom), p2i(end),
3659                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3660                           remset_bytes, strong_code_roots_bytes);
3661 
3662   return false;
3663 }
3664 
3665 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3666   // add static memory usages to remembered set sizes
3667   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3668   // Print the footer of the output.
3669   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3670   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3671                          " SUMMARY"
3672                          G1PPRL_SUM_MB_FORMAT("capacity")
3673                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3674                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3675                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3676                          G1PPRL_SUM_MB_FORMAT("remset")
3677                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3678                          bytes_to_mb(_total_capacity_bytes),
3679                          bytes_to_mb(_total_used_bytes),
3680                          perc(_total_used_bytes, _total_capacity_bytes),
3681                          bytes_to_mb(_total_prev_live_bytes),
3682                          perc(_total_prev_live_bytes, _total_capacity_bytes),
3683                          bytes_to_mb(_total_next_live_bytes),
3684                          perc(_total_next_live_bytes, _total_capacity_bytes),
3685                          bytes_to_mb(_total_remset_bytes),
3686                          bytes_to_mb(_total_strong_code_roots_bytes));

3687 }
< prev index next >