< prev index next >

src/share/vm/gc/g1/concurrentMark.cpp

Print this page




  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMark.inline.hpp"
  30 #include "gc/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc/g1/g1CollectorPolicy.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"
  34 #include "gc/g1/g1ErgoVerbose.hpp"
  35 #include "gc/g1/g1Log.hpp"
  36 #include "gc/g1/g1OopClosures.inline.hpp"
  37 #include "gc/g1/g1RemSet.hpp"
  38 #include "gc/g1/g1StringDedup.hpp"
  39 #include "gc/g1/heapRegion.inline.hpp"
  40 #include "gc/g1/heapRegionManager.inline.hpp"
  41 #include "gc/g1/heapRegionRemSet.hpp"
  42 #include "gc/g1/heapRegionSet.inline.hpp"
  43 #include "gc/g1/suspendibleThreadSet.hpp"
  44 #include "gc/shared/gcId.hpp"
  45 #include "gc/shared/gcTimer.hpp"
  46 #include "gc/shared/gcTrace.hpp"
  47 #include "gc/shared/gcTraceTime.hpp"
  48 #include "gc/shared/genOopClosures.inline.hpp"
  49 #include "gc/shared/referencePolicy.hpp"
  50 #include "gc/shared/strongRootsScope.hpp"
  51 #include "gc/shared/taskqueue.inline.hpp"
  52 #include "gc/shared/vmGCOperations.hpp"

  53 #include "memory/allocation.hpp"
  54 #include "memory/resourceArea.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "runtime/atomic.inline.hpp"
  57 #include "runtime/handles.inline.hpp"
  58 #include "runtime/java.hpp"
  59 #include "runtime/prefetch.inline.hpp"
  60 #include "services/memTracker.hpp"
  61 
  62 // Concurrent marking bit map wrapper
  63 
  64 CMBitMapRO::CMBitMapRO(int shifter) :
  65   _bm(),
  66   _shifter(shifter) {
  67   _bmStartWord = 0;
  68   _bmWordSize = 0;
  69 }
  70 
  71 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  72                                                const HeapWord* limit) const {


 262     rs.release();
 263     return false;
 264   }
 265   assert(_virtual_space.committed_size() == rs.size(),
 266          "Didn't reserve backing store for all of ConcurrentMark stack?");
 267   _base = (oop*) _virtual_space.low();
 268   setEmpty();
 269   _capacity = (jint) capacity;
 270   _saved_index = -1;
 271   _should_expand = false;
 272   return true;
 273 }
 274 
 275 void CMMarkStack::expand() {
 276   // Called, during remark, if we've overflown the marking stack during marking.
 277   assert(isEmpty(), "stack should been emptied while handling overflow");
 278   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 279   // Clear expansion flag
 280   _should_expand = false;
 281   if (_capacity == (jint) MarkStackSizeMax) {
 282     if (PrintGCDetails && Verbose) {
 283       gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
 284     }
 285     return;
 286   }
 287   // Double capacity if possible
 288   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 289   // Do not give up existing stack until we have managed to
 290   // get the double capacity that we desired.
 291   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 292                                                            sizeof(oop)));
 293   if (rs.is_reserved()) {
 294     // Release the backing store associated with old stack
 295     _virtual_space.release();
 296     // Reinitialize virtual space for new stack
 297     if (!_virtual_space.initialize(rs, rs.size())) {
 298       fatal("Not enough swap for expanded marking stack capacity");
 299     }
 300     _base = (oop*)(_virtual_space.low());
 301     _index = 0;
 302     _capacity = new_capacity;
 303   } else {
 304     if (PrintGCDetails && Verbose) {
 305       // Failed to double capacity, continue;
 306       gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
 307                           SIZE_FORMAT "K to " SIZE_FORMAT "K",
 308                           _capacity / K, new_capacity / K);
 309     }
 310   }
 311 }
 312 
 313 void CMMarkStack::set_should_expand() {
 314   // If we're resetting the marking state because of an
 315   // marking stack overflow, record that we should, if
 316   // possible, expand the stack.
 317   _should_expand = _cm->has_overflown();
 318 }
 319 
 320 CMMarkStack::~CMMarkStack() {
 321   if (_base != NULL) {
 322     _base = NULL;
 323     _virtual_space.release();
 324   }
 325 }
 326 
 327 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 328   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 329   jint start = _index;
 330   jint next_index = start + n;


 559     // Calculate the number of parallel marking threads by scaling
 560     // the number of parallel GC threads.
 561     uint marking_thread_num = scale_parallel_threads(ParallelGCThreads);
 562     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 563     _sleep_factor             = 0.0;
 564     _marking_task_overhead    = 1.0;
 565   }
 566 
 567   assert(ConcGCThreads > 0, "Should have been set");
 568   _parallel_marking_threads = ConcGCThreads;
 569   _max_parallel_marking_threads = _parallel_marking_threads;
 570 
 571   if (parallel_marking_threads() > 1) {
 572     _cleanup_task_overhead = 1.0;
 573   } else {
 574     _cleanup_task_overhead = marking_task_overhead();
 575   }
 576   _cleanup_sleep_factor =
 577                    (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 578 
 579 #if 0
 580   gclog_or_tty->print_cr("Marking Threads          %d", parallel_marking_threads());
 581   gclog_or_tty->print_cr("CM Marking Task Overhead %1.4lf", marking_task_overhead());
 582   gclog_or_tty->print_cr("CM Sleep Factor          %1.4lf", sleep_factor());
 583   gclog_or_tty->print_cr("CL Marking Task Overhead %1.4lf", cleanup_task_overhead());
 584   gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 585 #endif
 586 
 587   _parallel_workers = new WorkGang("G1 Marker",
 588        _max_parallel_marking_threads, false, true);
 589   if (_parallel_workers == NULL) {
 590     vm_exit_during_initialization("Failed necessary allocation.");
 591   } else {
 592     _parallel_workers->initialize_workers();
 593   }
 594 
 595   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 596     size_t mark_stack_size =
 597       MIN2(MarkStackSizeMax,
 598           MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 599     // Verify that the calculated value for MarkStackSize is in range.
 600     // It would be nice to use the private utility routine from Arguments.
 601     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 602       warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 603               "must be between 1 and " SIZE_FORMAT,
 604               mark_stack_size, MarkStackSizeMax);
 605       return;
 606     }


 911     return;
 912   }
 913 
 914   // If we're executing the concurrent phase of marking, reset the marking
 915   // state; otherwise the marking state is reset after reference processing,
 916   // during the remark pause.
 917   // If we reset here as a result of an overflow during the remark we will
 918   // see assertion failures from any subsequent set_concurrency_and_phase()
 919   // calls.
 920   if (concurrent()) {
 921     // let the task associated with with worker 0 do this
 922     if (worker_id == 0) {
 923       // task 0 is responsible for clearing the global data structures
 924       // We should be here because of an overflow. During STW we should
 925       // not clear the overflow flag since we rely on it being true when
 926       // we exit this method to abort the pause and restart concurrent
 927       // marking.
 928       reset_marking_state(true /* clear_overflow */);
 929       force_overflow()->update();
 930 
 931       if (G1Log::fine()) {
 932         gclog_or_tty->gclog_stamp();
 933         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
 934       }
 935     }
 936   }
 937 
 938   // after this, each task should reset its own data structures then
 939   // then go into the second barrier
 940 }
 941 
 942 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 943   SuspendibleThreadSetLeaver sts_leave(concurrent());
 944   _second_overflow_barrier_sync.enter();
 945 
 946   // at this point everything should be re-initialized and ready to go
 947 }
 948 
 949 #ifndef PRODUCT
 950 void ForceOverflowSettings::init() {
 951   _num_remaining = G1ConcMarkForceOverflow;
 952   _force = false;
 953   update();
 954 }


1085 
1086     CMRootRegions* root_regions = _cm->root_regions();
1087     HeapRegion* hr = root_regions->claim_next();
1088     while (hr != NULL) {
1089       _cm->scanRootRegion(hr, worker_id);
1090       hr = root_regions->claim_next();
1091     }
1092   }
1093 };
1094 
1095 void ConcurrentMark::scanRootRegions() {
1096   double scan_start = os::elapsedTime();
1097 
1098   // Start of concurrent marking.
1099   ClassLoaderDataGraph::clear_claimed_marks();
1100 
1101   // scan_in_progress() will have been set to true only if there was
1102   // at least one root region to scan. So, if it's false, we
1103   // should not attempt to do any further work.
1104   if (root_regions()->scan_in_progress()) {
1105     if (G1Log::fine()) {
1106       gclog_or_tty->gclog_stamp();
1107       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
1108     }
1109 
1110     _parallel_marking_threads = calc_parallel_marking_threads();
1111     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1112            "Maximum number of marking threads exceeded");
1113     uint active_workers = MAX2(1U, parallel_marking_threads());
1114 
1115     CMRootRegionScanTask task(this);
1116     _parallel_workers->set_active_workers(active_workers);
1117     _parallel_workers->run_task(&task);
1118 
1119     if (G1Log::fine()) {
1120       gclog_or_tty->gclog_stamp();
1121       gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
1122     }
1123 
1124     // It's possible that has_aborted() is true here without actually
1125     // aborting the survivor scan earlier. This is OK as it's
1126     // mainly used for sanity checking.
1127     root_regions()->scan_finished();
1128   }
1129 }
1130 
1131 void ConcurrentMark::markFromRoots() {
1132   // we might be tempted to assert that:
1133   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1134   //        "inconsistent argument?");
1135   // However that wouldn't be right, because it's possible that
1136   // a safepoint is indeed in progress as a younger generation
1137   // stop-the-world GC happens even as we mark in this generation.
1138 
1139   _restart_for_overflow = false;
1140   force_overflow_conc()->init();
1141 
1142   // _g1h has _n_par_threads
1143   _parallel_marking_threads = calc_parallel_marking_threads();
1144   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1145     "Maximum number of marking threads exceeded");
1146 
1147   uint active_workers = MAX2(1U, parallel_marking_threads());
1148   assert(active_workers > 0, "Should have been set");
1149 
1150   // Parallel task terminator is set in "set_concurrency_and_phase()"
1151   set_concurrency_and_phase(active_workers, true /* concurrent */);
1152 
1153   CMConcurrentMarkingTask markingTask(this, cmThread());
1154   _parallel_workers->set_active_workers(active_workers);
1155   _parallel_workers->run_task(&markingTask);
1156   print_stats();
1157 }
1158 
1159 // Helper class to get rid of some boilerplate code.
1160 class G1CMTraceTime : public StackObj {
1161   GCTraceTimeImpl _gc_trace_time;
1162   static bool doit_and_prepend(bool doit) {
1163     if (doit) {
1164       gclog_or_tty->put(' ');
1165     }
1166     return doit;
1167   }
1168 
1169  public:
1170   G1CMTraceTime(const char* title, bool doit)
1171     : _gc_trace_time(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm()) {
1172   }
1173 };
1174 
1175 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1176   // world is stopped at this checkpoint
1177   assert(SafepointSynchronize::is_at_safepoint(),
1178          "world should be stopped");
1179 
1180   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1181 
1182   // If a full collection has happened, we shouldn't do this.
1183   if (has_aborted()) {
1184     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1185     return;
1186   }
1187 
1188   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1189 
1190   if (VerifyDuringGC) {
1191     HandleMark hm;  // handle scope
1192     g1h->prepare_for_verify();
1193     Universe::verify(VerifyOption_G1UsePrevMarking,
1194                      " VerifyDuringGC:(before)");
1195   }
1196   g1h->check_bitmaps("Remark Start");
1197 
1198   G1CollectorPolicy* g1p = g1h->g1_policy();
1199   g1p->record_concurrent_mark_remark_start();
1200 
1201   double start = os::elapsedTime();
1202 
1203   checkpointRootsFinalWork();
1204 
1205   double mark_work_end = os::elapsedTime();
1206 
1207   weakRefsWork(clear_all_soft_refs);
1208 
1209   if (has_overflown()) {
1210     // Oops.  We overflowed.  Restart concurrent marking.
1211     _restart_for_overflow = true;
1212     if (G1TraceMarkStackOverflow) {
1213       gclog_or_tty->print_cr("\nRemark led to restart for overflow.");
1214     }
1215 
1216     // Verify the heap w.r.t. the previous marking bitmap.
1217     if (VerifyDuringGC) {
1218       HandleMark hm;  // handle scope
1219       g1h->prepare_for_verify();
1220       Universe::verify(VerifyOption_G1UsePrevMarking,
1221                        " VerifyDuringGC:(overflow)");
1222     }
1223 
1224     // Clear the marking state because we will be restarting
1225     // marking due to overflowing the global mark stack.
1226     reset_marking_state();
1227   } else {
1228     {
1229       G1CMTraceTime trace("GC aggregate-data", G1Log::finer());
1230 
1231       // Aggregate the per-task counting data that we have accumulated
1232       // while marking.
1233       aggregate_count_data();
1234     }
1235 
1236     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1237     // We're done with marking.
1238     // This is the end of  the marking cycle, we're expected all
1239     // threads to have SATB queues with active set to true.
1240     satb_mq_set.set_active_all_threads(false, /* new active value */
1241                                        true /* expected_active */);
1242 
1243     if (VerifyDuringGC) {
1244       HandleMark hm;  // handle scope
1245       g1h->prepare_for_verify();
1246       Universe::verify(VerifyOption_G1UseNextMarking,
1247                        " VerifyDuringGC:(after)");
1248     }
1249     g1h->check_bitmaps("Remark End");
1250     assert(!restart_for_overflow(), "sanity");
1251     // Completely reset the marking state since marking completed
1252     set_non_marking_state();
1253   }
1254 
1255   // Expand the marking stack, if we have to and if we can.
1256   if (_markStack.should_expand()) {
1257     _markStack.expand();
1258   }
1259 
1260   // Statistics
1261   double now = os::elapsedTime();
1262   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1263   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1264   _remark_times.add((now - start) * 1000.0);
1265 
1266   g1p->record_concurrent_mark_remark_end();
1267 


1774 
1775 };
1776 
1777 void ConcurrentMark::cleanup() {
1778   // world is stopped at this checkpoint
1779   assert(SafepointSynchronize::is_at_safepoint(),
1780          "world should be stopped");
1781   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1782 
1783   // If a full collection has happened, we shouldn't do this.
1784   if (has_aborted()) {
1785     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1786     return;
1787   }
1788 
1789   g1h->verify_region_sets_optional();
1790 
1791   if (VerifyDuringGC) {
1792     HandleMark hm;  // handle scope
1793     g1h->prepare_for_verify();
1794     Universe::verify(VerifyOption_G1UsePrevMarking,
1795                      " VerifyDuringGC:(before)");
1796   }
1797   g1h->check_bitmaps("Cleanup Start");
1798 
1799   G1CollectorPolicy* g1p = g1h->g1_policy();
1800   g1p->record_concurrent_mark_cleanup_start();
1801 
1802   double start = os::elapsedTime();
1803 
1804   HeapRegionRemSet::reset_for_cleanup_tasks();
1805 
1806   // Do counting once more with the world stopped for good measure.
1807   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1808 
1809   g1h->workers()->run_task(&g1_par_count_task);
1810 
1811   if (VerifyDuringGC) {
1812     // Verify that the counting data accumulated during marking matches
1813     // that calculated by walking the marking bitmap.
1814 
1815     // Bitmaps to hold expected values


1817     BitMap expected_card_bm(_card_bm.size(), true);
1818 
1819     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
1820                                                  &_region_bm,
1821                                                  &_card_bm,
1822                                                  &expected_region_bm,
1823                                                  &expected_card_bm);
1824 
1825     g1h->workers()->run_task(&g1_par_verify_task);
1826 
1827     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
1828   }
1829 
1830   size_t start_used_bytes = g1h->used();
1831   g1h->collector_state()->set_mark_in_progress(false);
1832 
1833   double count_end = os::elapsedTime();
1834   double this_final_counting_time = (count_end - start);
1835   _total_counting_time += this_final_counting_time;
1836 
1837   if (G1PrintRegionLivenessInfo) {
1838     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking");
1839     _g1h->heap_region_iterate(&cl);
1840   }
1841 
1842   // Install newly created mark bitMap as "prev".
1843   swapMarkBitMaps();
1844 
1845   g1h->reset_gc_time_stamp();
1846 
1847   uint n_workers = _g1h->workers()->active_workers();
1848 
1849   // Note end of marking in all heap regions.
1850   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
1851   g1h->workers()->run_task(&g1_par_note_end_task);
1852   g1h->check_gc_time_stamps();
1853 
1854   if (!cleanup_list_is_empty()) {
1855     // The cleanup list is not empty, so we'll have to process it
1856     // concurrently. Notify anyone else that might be wanting free
1857     // regions that there will be more free regions coming soon.
1858     g1h->set_free_regions_coming();


1861   // call below, since it affects the metric by which we sort the heap
1862   // regions.
1863   if (G1ScrubRemSets) {
1864     double rs_scrub_start = os::elapsedTime();
1865     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
1866     g1h->workers()->run_task(&g1_par_scrub_rs_task);
1867 
1868     double rs_scrub_end = os::elapsedTime();
1869     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
1870     _total_rs_scrub_time += this_rs_scrub_time;
1871   }
1872 
1873   // this will also free any regions totally full of garbage objects,
1874   // and sort the regions.
1875   g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1876 
1877   // Statistics.
1878   double end = os::elapsedTime();
1879   _cleanup_times.add((end - start) * 1000.0);
1880 
1881   if (G1Log::fine()) {
1882     g1h->g1_policy()->print_heap_transition(start_used_bytes);
1883   }
1884 
1885   // Clean up will have freed any regions completely full of garbage.
1886   // Update the soft reference policy with the new heap occupancy.
1887   Universe::update_heap_info_at_gc();
1888 
1889   if (VerifyDuringGC) {
1890     HandleMark hm;  // handle scope
1891     g1h->prepare_for_verify();
1892     Universe::verify(VerifyOption_G1UsePrevMarking,
1893                      " VerifyDuringGC:(after)");
1894   }
1895 
1896   g1h->check_bitmaps("Cleanup End");
1897 
1898   g1h->verify_region_sets_optional();
1899 
1900   // We need to make this be a "collection" so any collection pause that
1901   // races with it goes around and waits for completeCleanup to finish.
1902   g1h->increment_total_collections();
1903 
1904   // Clean out dead classes and update Metaspace sizes.
1905   if (ClassUnloadingWithConcurrentMark) {
1906     ClassLoaderDataGraph::purge();
1907   }
1908   MetaspaceGC::compute_new_size();
1909 
1910   // We reclaimed old regions so we should calculate the sizes to make
1911   // sure we update the old gen/space data.
1912   g1h->g1mm()->update_sizes();
1913   g1h->allocation_context_stats().update_after_mark();
1914 
1915   g1h->trace_heap_after_concurrent_cycle();
1916 }
1917 
1918 void ConcurrentMark::completeCleanup() {
1919   if (has_aborted()) return;
1920 
1921   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1922 
1923   _cleanup_list.verify_optional();
1924   FreeRegionList tmp_free_list("Tmp Free List");
1925 
1926   if (G1ConcRegionFreeingVerbose) {
1927     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
1928                            "cleanup list has %u entries",
1929                            _cleanup_list.length());
1930   }
1931 
1932   // No one else should be accessing the _cleanup_list at this point,
1933   // so it is not necessary to take any locks
1934   while (!_cleanup_list.is_empty()) {
1935     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1936     assert(hr != NULL, "Got NULL from a non-empty list");
1937     hr->par_clear();
1938     tmp_free_list.add_ordered(hr);
1939 
1940     // Instead of adding one region at a time to the secondary_free_list,
1941     // we accumulate them in the local list and move them a few at a
1942     // time. This also cuts down on the number of notify_all() calls
1943     // we do during this process. We'll also append the local list when
1944     // _cleanup_list is empty (which means we just removed the last
1945     // region from the _cleanup_list).
1946     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
1947         _cleanup_list.is_empty()) {
1948       if (G1ConcRegionFreeingVerbose) {
1949         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
1950                                "appending %u entries to the secondary_free_list, "
1951                                "cleanup list still has %u entries",
1952                                tmp_free_list.length(),
1953                                _cleanup_list.length());
1954       }
1955 
1956       {
1957         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1958         g1h->secondary_free_list_add(&tmp_free_list);
1959         SecondaryFreeList_lock->notify_all();
1960       }
1961 #ifndef PRODUCT
1962       if (G1StressConcRegionFreeing) {
1963         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
1964           os::sleep(Thread::current(), (jlong) 1, false);
1965         }
1966       }
1967 #endif
1968     }
1969   }
1970   assert(tmp_free_list.is_empty(), "post-condition");
1971 }
1972 
1973 // Supporting Object and Oop closures for reference discovery
1974 // and processing in during marking


2191     // Skip processing the discovered references if we have
2192     // overflown the global marking stack. Reference objects
2193     // only get discovered once so it is OK to not
2194     // de-populate the discovered reference lists. We could have,
2195     // but the only benefit would be that, when marking restarts,
2196     // less reference objects are discovered.
2197     return;
2198   }
2199 
2200   ResourceMark rm;
2201   HandleMark   hm;
2202 
2203   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2204 
2205   // Is alive closure.
2206   G1CMIsAliveClosure g1_is_alive(g1h);
2207 
2208   // Inner scope to exclude the cleaning of the string and symbol
2209   // tables from the displayed time.
2210   {
2211     G1CMTraceTime t("GC ref-proc", G1Log::finer());
2212 
2213     ReferenceProcessor* rp = g1h->ref_processor_cm();
2214 
2215     // See the comment in G1CollectedHeap::ref_processing_init()
2216     // about how reference processing currently works in G1.
2217 
2218     // Set the soft reference policy
2219     rp->setup_policy(clear_all_soft_refs);
2220     assert(_markStack.isEmpty(), "mark stack should be empty");
2221 
2222     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2223     // in serial reference processing. Note these closures are also
2224     // used for serially processing (by the the current thread) the
2225     // JNI references during parallel reference processing.
2226     //
2227     // These closures do not need to synchronize with the worker
2228     // threads involved in parallel reference processing as these
2229     // instances are executed serially by the current thread (e.g.
2230     // reference processing is not multi-threaded and is thus
2231     // performed by the current thread instead of a gang worker).


2281       set_has_overflown();
2282     }
2283 
2284     assert(rp->num_q() == active_workers, "why not");
2285 
2286     rp->enqueue_discovered_references(executor);
2287 
2288     rp->verify_no_references_recorded();
2289     assert(!rp->discovery_enabled(), "Post condition");
2290   }
2291 
2292   if (has_overflown()) {
2293     // We can not trust g1_is_alive if the marking stack overflowed
2294     return;
2295   }
2296 
2297   assert(_markStack.isEmpty(), "Marking should have completed");
2298 
2299   // Unload Klasses, String, Symbols, Code Cache, etc.
2300   {
2301     G1CMTraceTime trace("Unloading", G1Log::finer());
2302 
2303     if (ClassUnloadingWithConcurrentMark) {
2304       bool purged_classes;
2305 
2306       {
2307         G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
2308         purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
2309       }
2310 
2311       {
2312         G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
2313         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2314       }
2315     }
2316 
2317     if (G1StringDedup::is_enabled()) {
2318       G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest());
2319       G1StringDedup::unlink(&g1_is_alive);
2320     }
2321   }
2322 }
2323 
2324 void ConcurrentMark::swapMarkBitMaps() {
2325   CMBitMapRO* temp = _prevMarkBitMap;
2326   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2327   _nextMarkBitMap  = (CMBitMap*)  temp;
2328 }
2329 
2330 // Closure for marking entries in SATB buffers.
2331 class CMSATBBufferClosure : public SATBBufferClosure {
2332 private:
2333   CMTask* _task;
2334   G1CollectedHeap* _g1h;
2335 
2336   // This is very similar to CMTask::deal_with_reference, but with
2337   // more relaxed requirements for the argument, so this must be more
2338   // circumspect about treating the argument as an object.


2419                               true         /* do_termination       */,
2420                               false        /* is_serial            */);
2421       } while (task->has_aborted() && !_cm->has_overflown());
2422       // If we overflow, then we do not want to restart. We instead
2423       // want to abort remark and do concurrent marking again.
2424       task->record_end_time();
2425     }
2426   }
2427 
2428   CMRemarkTask(ConcurrentMark* cm, uint active_workers) :
2429     AbstractGangTask("Par Remark"), _cm(cm) {
2430     _cm->terminator()->reset_for_reuse(active_workers);
2431   }
2432 };
2433 
2434 void ConcurrentMark::checkpointRootsFinalWork() {
2435   ResourceMark rm;
2436   HandleMark   hm;
2437   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2438 
2439   G1CMTraceTime trace("Finalize Marking", G1Log::finer());
2440 
2441   g1h->ensure_parsability(false);
2442 
2443   // this is remark, so we'll use up all active threads
2444   uint active_workers = g1h->workers()->active_workers();
2445   set_concurrency_and_phase(active_workers, false /* concurrent */);
2446   // Leave _parallel_marking_threads at it's
2447   // value originally calculated in the ConcurrentMark
2448   // constructor and pass values of the active workers
2449   // through the gang in the task.
2450 
2451   {
2452     StrongRootsScope srs(active_workers);
2453 
2454     CMRemarkTask remarkTask(this, active_workers);
2455     // We will start all available threads, even if we decide that the
2456     // active_workers will be fewer. The extra ones will just bail out
2457     // immediately.
2458     g1h->workers()->run_task(&remarkTask);
2459   }


2777   // Clear the global region bitmap - it will be filled as part
2778   // of the final counting task.
2779   _region_bm.clear();
2780 
2781   uint max_regions = _g1h->max_regions();
2782   assert(_max_worker_id > 0, "uninitialized");
2783 
2784   for (uint i = 0; i < _max_worker_id; i += 1) {
2785     BitMap* task_card_bm = count_card_bitmap_for(i);
2786     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
2787 
2788     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
2789     assert(marked_bytes_array != NULL, "uninitialized");
2790 
2791     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
2792     task_card_bm->clear();
2793   }
2794 }
2795 
2796 void ConcurrentMark::print_stats() {
2797   if (G1MarkingVerboseLevel > 0) {
2798     gclog_or_tty->print_cr("---------------------------------------------------------------------");



2799     for (size_t i = 0; i < _active_tasks; ++i) {
2800       _tasks[i]->print_stats();
2801       gclog_or_tty->print_cr("---------------------------------------------------------------------");
2802     }
2803   }
2804 }
2805 
2806 // abandon current marking iteration due to a Full GC
2807 void ConcurrentMark::abort() {
2808   if (!cmThread()->during_cycle() || _has_aborted) {
2809     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2810     return;
2811   }
2812 
2813   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2814   // concurrent bitmap clearing.
2815   _nextMarkBitMap->clearAll();
2816 
2817   // Note we cannot clear the previous marking bitmap here
2818   // since VerifyDuringGC verifies the objects marked during
2819   // a full GC against the previous bitmap.
2820 
2821   // Clear the liveness counting data
2822   clear_all_count_data();


2826     _tasks[i]->clear_region_fields();
2827   }
2828   _first_overflow_barrier_sync.abort();
2829   _second_overflow_barrier_sync.abort();
2830   _has_aborted = true;
2831 
2832   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2833   satb_mq_set.abandon_partial_marking();
2834   // This can be called either during or outside marking, we'll read
2835   // the expected_active value from the SATB queue set.
2836   satb_mq_set.set_active_all_threads(
2837                                  false, /* new active value */
2838                                  satb_mq_set.is_active() /* expected_active */);
2839 
2840   _g1h->trace_heap_after_concurrent_cycle();
2841   _g1h->register_concurrent_cycle_end();
2842 }
2843 
2844 static void print_ms_time_info(const char* prefix, const char* name,
2845                                NumberSeq& ns) {
2846   gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2847                          prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2848   if (ns.num() > 0) {
2849     gclog_or_tty->print_cr("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2850                            prefix, ns.sd(), ns.maximum());
2851   }
2852 }
2853 
2854 void ConcurrentMark::print_summary_info() {
2855   gclog_or_tty->print_cr(" Concurrent marking:");





2856   print_ms_time_info("  ", "init marks", _init_times);
2857   print_ms_time_info("  ", "remarks", _remark_times);
2858   {
2859     print_ms_time_info("     ", "final marks", _remark_mark_times);
2860     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2861 
2862   }
2863   print_ms_time_info("  ", "cleanups", _cleanup_times);
2864   gclog_or_tty->print_cr("    Final counting total time = %8.2f s (avg = %8.2f ms).",
2865                          _total_counting_time,
2866                          (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 /
2867                           (double)_cleanup_times.num()
2868                          : 0.0));
2869   if (G1ScrubRemSets) {
2870     gclog_or_tty->print_cr("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
2871                            _total_rs_scrub_time,
2872                            (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 /
2873                             (double)_cleanup_times.num()
2874                            : 0.0));
2875   }
2876   gclog_or_tty->print_cr("  Total stop_world time = %8.2f s.",
2877                          (_init_times.sum() + _remark_times.sum() +
2878                           _cleanup_times.sum())/1000.0);
2879   gclog_or_tty->print_cr("  Total concurrent time = %8.2f s "
2880                 "(%8.2f s marking).",
2881                 cmThread()->vtime_accum(),
2882                 cmThread()->vtime_mark_accum());
2883 }
2884 
2885 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2886   _parallel_workers->print_worker_threads_on(st);
2887 }
2888 
2889 void ConcurrentMark::print_on_error(outputStream* st) const {
2890   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2891       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
2892   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
2893   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
2894 }
2895 
2896 // We take a break if someone is trying to stop the world.
2897 bool ConcurrentMark::do_yield_check(uint worker_id) {
2898   if (SuspendibleThreadSet::should_yield()) {
2899     if (worker_id == 0) {
2900       _g1h->g1_policy()->record_concurrent_pause();
2901     }
2902     SuspendibleThreadSet::yield();


3244 
3245   // This keeps claiming and applying the closure to completed buffers
3246   // until we run out of buffers or we need to abort.
3247   while (!has_aborted() &&
3248          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
3249     regular_clock_call();
3250   }
3251 
3252   _draining_satb_buffers = false;
3253 
3254   assert(has_aborted() ||
3255          concurrent() ||
3256          satb_mq_set.completed_buffers_num() == 0, "invariant");
3257 
3258   // again, this was a potentially expensive operation, decrease the
3259   // limits to get the regular clock call early
3260   decrease_limits();
3261 }
3262 
3263 void CMTask::print_stats() {
3264   gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d",
3265                          _worker_id, _calls);
3266   gclog_or_tty->print_cr("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3267                          _elapsed_time_ms, _termination_time_ms);
3268   gclog_or_tty->print_cr("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3269                          _step_times_ms.num(), _step_times_ms.avg(),
3270                          _step_times_ms.sd());
3271   gclog_or_tty->print_cr("                    max = %1.2lfms, total = %1.2lfms",
3272                          _step_times_ms.maximum(), _step_times_ms.sum());
3273 }
3274 
3275 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) {
3276   return _task_queues->steal(worker_id, hash_seed, obj);
3277 }
3278 
3279 /*****************************************************************************
3280 
3281     The do_marking_step(time_target_ms, ...) method is the building
3282     block of the parallel marking framework. It can be called in parallel
3283     with other invocations of do_marking_step() on different tasks
3284     (but only one per task, obviously) and concurrently with the
3285     mutator threads, or during remark, hence it eliminates the need
3286     for two versions of the code. When called during remark, it will
3287     pick up from where the task left off during the concurrent marking
3288     phase. Interestingly, tasks are also claimable during evacuation
3289     pauses too, since do_marking_step() ensures that it aborts before
3290     it needs to yield.
3291 


3761 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
3762 #else // _LP64
3763 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
3764 #endif // _LP64
3765 
3766 // For per-region info
3767 #define G1PPRL_TYPE_FORMAT            "   %-4s"
3768 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
3769 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
3770 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
3771 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
3772 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
3773 
3774 // For summary info
3775 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
3776 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
3777 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
3778 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
3779 
3780 G1PrintRegionLivenessInfoClosure::
3781 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
3782   : _out(out),
3783     _total_used_bytes(0), _total_capacity_bytes(0),
3784     _total_prev_live_bytes(0), _total_next_live_bytes(0),
3785     _hum_used_bytes(0), _hum_capacity_bytes(0),
3786     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
3787     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
3788   G1CollectedHeap* g1h = G1CollectedHeap::heap();
3789   MemRegion g1_reserved = g1h->g1_reserved();
3790   double now = os::elapsedTime();
3791 
3792   // Print the header of the output.
3793   _out->cr();
3794   _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
3795   _out->print_cr(G1PPRL_LINE_PREFIX" HEAP"
3796                  G1PPRL_SUM_ADDR_FORMAT("reserved")
3797                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
3798                  p2i(g1_reserved.start()), p2i(g1_reserved.end()),
3799                  HeapRegion::GrainBytes);
3800   _out->print_cr(G1PPRL_LINE_PREFIX);
3801   _out->print_cr(G1PPRL_LINE_PREFIX
3802                 G1PPRL_TYPE_H_FORMAT
3803                 G1PPRL_ADDR_BASE_H_FORMAT
3804                 G1PPRL_BYTE_H_FORMAT
3805                 G1PPRL_BYTE_H_FORMAT
3806                 G1PPRL_BYTE_H_FORMAT
3807                 G1PPRL_DOUBLE_H_FORMAT
3808                 G1PPRL_BYTE_H_FORMAT
3809                 G1PPRL_BYTE_H_FORMAT,
3810                 "type", "address-range",
3811                 "used", "prev-live", "next-live", "gc-eff",
3812                 "remset", "code-roots");
3813   _out->print_cr(G1PPRL_LINE_PREFIX
3814                 G1PPRL_TYPE_H_FORMAT
3815                 G1PPRL_ADDR_BASE_H_FORMAT
3816                 G1PPRL_BYTE_H_FORMAT
3817                 G1PPRL_BYTE_H_FORMAT
3818                 G1PPRL_BYTE_H_FORMAT
3819                 G1PPRL_DOUBLE_H_FORMAT
3820                 G1PPRL_BYTE_H_FORMAT
3821                 G1PPRL_BYTE_H_FORMAT,
3822                 "", "",
3823                 "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
3824                 "(bytes)", "(bytes)");
3825 }
3826 
3827 // It takes as a parameter a reference to one of the _hum_* fields, it
3828 // deduces the corresponding value for a region in a humongous region
3829 // series (either the region size, or what's left if the _hum_* field
3830 // is < the region size), and updates the _hum_* field accordingly.
3831 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
3832   size_t bytes = 0;
3833   // The > 0 check is to deal with the prev and next live bytes which


3875     _hum_used_bytes      = used_bytes;
3876     _hum_prev_live_bytes = prev_live_bytes;
3877     _hum_next_live_bytes = next_live_bytes;
3878     get_hum_bytes(&used_bytes, &capacity_bytes,
3879                   &prev_live_bytes, &next_live_bytes);
3880     end = bottom + HeapRegion::GrainWords;
3881   } else if (r->is_continues_humongous()) {
3882     get_hum_bytes(&used_bytes, &capacity_bytes,
3883                   &prev_live_bytes, &next_live_bytes);
3884     assert(end == bottom + HeapRegion::GrainWords, "invariant");
3885   }
3886 
3887   _total_used_bytes      += used_bytes;
3888   _total_capacity_bytes  += capacity_bytes;
3889   _total_prev_live_bytes += prev_live_bytes;
3890   _total_next_live_bytes += next_live_bytes;
3891   _total_remset_bytes    += remset_bytes;
3892   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3893 
3894   // Print a line for this particular region.
3895   _out->print_cr(G1PPRL_LINE_PREFIX
3896                  G1PPRL_TYPE_FORMAT
3897                  G1PPRL_ADDR_BASE_FORMAT
3898                  G1PPRL_BYTE_FORMAT
3899                  G1PPRL_BYTE_FORMAT
3900                  G1PPRL_BYTE_FORMAT
3901                  G1PPRL_DOUBLE_FORMAT
3902                  G1PPRL_BYTE_FORMAT
3903                  G1PPRL_BYTE_FORMAT,
3904                  type, p2i(bottom), p2i(end),
3905                  used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3906                  remset_bytes, strong_code_roots_bytes);
3907 
3908   return false;
3909 }
3910 
3911 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3912   // add static memory usages to remembered set sizes
3913   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3914   // Print the footer of the output.
3915   _out->print_cr(G1PPRL_LINE_PREFIX);
3916   _out->print_cr(G1PPRL_LINE_PREFIX
3917                  " SUMMARY"
3918                  G1PPRL_SUM_MB_FORMAT("capacity")
3919                  G1PPRL_SUM_MB_PERC_FORMAT("used")
3920                  G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3921                  G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3922                  G1PPRL_SUM_MB_FORMAT("remset")
3923                  G1PPRL_SUM_MB_FORMAT("code-roots"),
3924                  bytes_to_mb(_total_capacity_bytes),
3925                  bytes_to_mb(_total_used_bytes),
3926                  perc(_total_used_bytes, _total_capacity_bytes),
3927                  bytes_to_mb(_total_prev_live_bytes),
3928                  perc(_total_prev_live_bytes, _total_capacity_bytes),
3929                  bytes_to_mb(_total_next_live_bytes),
3930                  perc(_total_next_live_bytes, _total_capacity_bytes),
3931                  bytes_to_mb(_total_remset_bytes),
3932                  bytes_to_mb(_total_strong_code_roots_bytes));
3933   _out->cr();
3934 }


  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/g1/concurrentMark.inline.hpp"
  30 #include "gc/g1/concurrentMarkThread.inline.hpp"
  31 #include "gc/g1/g1CollectedHeap.inline.hpp"
  32 #include "gc/g1/g1CollectorPolicy.hpp"
  33 #include "gc/g1/g1CollectorState.hpp"


  34 #include "gc/g1/g1OopClosures.inline.hpp"
  35 #include "gc/g1/g1RemSet.hpp"
  36 #include "gc/g1/g1StringDedup.hpp"
  37 #include "gc/g1/heapRegion.inline.hpp"
  38 #include "gc/g1/heapRegionManager.inline.hpp"
  39 #include "gc/g1/heapRegionRemSet.hpp"
  40 #include "gc/g1/heapRegionSet.inline.hpp"
  41 #include "gc/g1/suspendibleThreadSet.hpp"
  42 #include "gc/shared/gcId.hpp"
  43 #include "gc/shared/gcTimer.hpp"
  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.hpp"
  46 #include "gc/shared/genOopClosures.inline.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/strongRootsScope.hpp"
  49 #include "gc/shared/taskqueue.inline.hpp"
  50 #include "gc/shared/vmGCOperations.hpp"
  51 #include "logging/log.hpp"
  52 #include "memory/allocation.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "oops/oop.inline.hpp"
  55 #include "runtime/atomic.inline.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/prefetch.inline.hpp"
  59 #include "services/memTracker.hpp"
  60 
  61 // Concurrent marking bit map wrapper
  62 
  63 CMBitMapRO::CMBitMapRO(int shifter) :
  64   _bm(),
  65   _shifter(shifter) {
  66   _bmStartWord = 0;
  67   _bmWordSize = 0;
  68 }
  69 
  70 HeapWord* CMBitMapRO::getNextMarkedWordAddress(const HeapWord* addr,
  71                                                const HeapWord* limit) const {


 261     rs.release();
 262     return false;
 263   }
 264   assert(_virtual_space.committed_size() == rs.size(),
 265          "Didn't reserve backing store for all of ConcurrentMark stack?");
 266   _base = (oop*) _virtual_space.low();
 267   setEmpty();
 268   _capacity = (jint) capacity;
 269   _saved_index = -1;
 270   _should_expand = false;
 271   return true;
 272 }
 273 
 274 void CMMarkStack::expand() {
 275   // Called, during remark, if we've overflown the marking stack during marking.
 276   assert(isEmpty(), "stack should been emptied while handling overflow");
 277   assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
 278   // Clear expansion flag
 279   _should_expand = false;
 280   if (_capacity == (jint) MarkStackSizeMax) {
 281     log_trace(gc)("(benign) Can't expand marking stack capacity, at max size limit");


 282     return;
 283   }
 284   // Double capacity if possible
 285   jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
 286   // Do not give up existing stack until we have managed to
 287   // get the double capacity that we desired.
 288   ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
 289                                                            sizeof(oop)));
 290   if (rs.is_reserved()) {
 291     // Release the backing store associated with old stack
 292     _virtual_space.release();
 293     // Reinitialize virtual space for new stack
 294     if (!_virtual_space.initialize(rs, rs.size())) {
 295       fatal("Not enough swap for expanded marking stack capacity");
 296     }
 297     _base = (oop*)(_virtual_space.low());
 298     _index = 0;
 299     _capacity = new_capacity;
 300   } else {

 301     // Failed to double capacity, continue;
 302     log_trace(gc)("(benign) Failed to expand marking stack capacity from " SIZE_FORMAT "K to " SIZE_FORMAT "K",

 303                   _capacity / K, new_capacity / K);
 304   }

 305 }
 306 
 307 void CMMarkStack::set_should_expand() {
 308   // If we're resetting the marking state because of an
 309   // marking stack overflow, record that we should, if
 310   // possible, expand the stack.
 311   _should_expand = _cm->has_overflown();
 312 }
 313 
 314 CMMarkStack::~CMMarkStack() {
 315   if (_base != NULL) {
 316     _base = NULL;
 317     _virtual_space.release();
 318   }
 319 }
 320 
 321 void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
 322   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
 323   jint start = _index;
 324   jint next_index = start + n;


 553     // Calculate the number of parallel marking threads by scaling
 554     // the number of parallel GC threads.
 555     uint marking_thread_num = scale_parallel_threads(ParallelGCThreads);
 556     FLAG_SET_ERGO(uint, ConcGCThreads, marking_thread_num);
 557     _sleep_factor             = 0.0;
 558     _marking_task_overhead    = 1.0;
 559   }
 560 
 561   assert(ConcGCThreads > 0, "Should have been set");
 562   _parallel_marking_threads = ConcGCThreads;
 563   _max_parallel_marking_threads = _parallel_marking_threads;
 564 
 565   if (parallel_marking_threads() > 1) {
 566     _cleanup_task_overhead = 1.0;
 567   } else {
 568     _cleanup_task_overhead = marking_task_overhead();
 569   }
 570   _cleanup_sleep_factor =
 571                    (1.0 - cleanup_task_overhead()) / cleanup_task_overhead();
 572 








 573   _parallel_workers = new WorkGang("G1 Marker",
 574        _max_parallel_marking_threads, false, true);
 575   if (_parallel_workers == NULL) {
 576     vm_exit_during_initialization("Failed necessary allocation.");
 577   } else {
 578     _parallel_workers->initialize_workers();
 579   }
 580 
 581   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 582     size_t mark_stack_size =
 583       MIN2(MarkStackSizeMax,
 584           MAX2(MarkStackSize, (size_t) (parallel_marking_threads() * TASKQUEUE_SIZE)));
 585     // Verify that the calculated value for MarkStackSize is in range.
 586     // It would be nice to use the private utility routine from Arguments.
 587     if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
 588       warning("Invalid value calculated for MarkStackSize (" SIZE_FORMAT "): "
 589               "must be between 1 and " SIZE_FORMAT,
 590               mark_stack_size, MarkStackSizeMax);
 591       return;
 592     }


 897     return;
 898   }
 899 
 900   // If we're executing the concurrent phase of marking, reset the marking
 901   // state; otherwise the marking state is reset after reference processing,
 902   // during the remark pause.
 903   // If we reset here as a result of an overflow during the remark we will
 904   // see assertion failures from any subsequent set_concurrency_and_phase()
 905   // calls.
 906   if (concurrent()) {
 907     // let the task associated with with worker 0 do this
 908     if (worker_id == 0) {
 909       // task 0 is responsible for clearing the global data structures
 910       // We should be here because of an overflow. During STW we should
 911       // not clear the overflow flag since we rely on it being true when
 912       // we exit this method to abort the pause and restart concurrent
 913       // marking.
 914       reset_marking_state(true /* clear_overflow */);
 915       force_overflow()->update();
 916 
 917       log_info(gc)("GC concurrent-mark-reset-for-overflow");



 918     }
 919   }
 920 
 921   // after this, each task should reset its own data structures then
 922   // then go into the second barrier
 923 }
 924 
 925 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
 926   SuspendibleThreadSetLeaver sts_leave(concurrent());
 927   _second_overflow_barrier_sync.enter();
 928 
 929   // at this point everything should be re-initialized and ready to go
 930 }
 931 
 932 #ifndef PRODUCT
 933 void ForceOverflowSettings::init() {
 934   _num_remaining = G1ConcMarkForceOverflow;
 935   _force = false;
 936   update();
 937 }


1068 
1069     CMRootRegions* root_regions = _cm->root_regions();
1070     HeapRegion* hr = root_regions->claim_next();
1071     while (hr != NULL) {
1072       _cm->scanRootRegion(hr, worker_id);
1073       hr = root_regions->claim_next();
1074     }
1075   }
1076 };
1077 
1078 void ConcurrentMark::scanRootRegions() {
1079   double scan_start = os::elapsedTime();
1080 
1081   // Start of concurrent marking.
1082   ClassLoaderDataGraph::clear_claimed_marks();
1083 
1084   // scan_in_progress() will have been set to true only if there was
1085   // at least one root region to scan. So, if it's false, we
1086   // should not attempt to do any further work.
1087   if (root_regions()->scan_in_progress()) {
1088     log_info(gc)("GC concurrent-root-region-scan-start");



1089 
1090     _parallel_marking_threads = calc_parallel_marking_threads();
1091     assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1092            "Maximum number of marking threads exceeded");
1093     uint active_workers = MAX2(1U, parallel_marking_threads());
1094 
1095     CMRootRegionScanTask task(this);
1096     _parallel_workers->set_active_workers(active_workers);
1097     _parallel_workers->run_task(&task);
1098 
1099     log_info(gc)("GC concurrent-root-region-scan-end, %1.7lf secs", os::elapsedTime() - scan_start);



1100 
1101     // It's possible that has_aborted() is true here without actually
1102     // aborting the survivor scan earlier. This is OK as it's
1103     // mainly used for sanity checking.
1104     root_regions()->scan_finished();
1105   }
1106 }
1107 
1108 void ConcurrentMark::markFromRoots() {
1109   // we might be tempted to assert that:
1110   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
1111   //        "inconsistent argument?");
1112   // However that wouldn't be right, because it's possible that
1113   // a safepoint is indeed in progress as a younger generation
1114   // stop-the-world GC happens even as we mark in this generation.
1115 
1116   _restart_for_overflow = false;
1117   force_overflow_conc()->init();
1118 
1119   // _g1h has _n_par_threads
1120   _parallel_marking_threads = calc_parallel_marking_threads();
1121   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
1122     "Maximum number of marking threads exceeded");
1123 
1124   uint active_workers = MAX2(1U, parallel_marking_threads());
1125   assert(active_workers > 0, "Should have been set");
1126 
1127   // Parallel task terminator is set in "set_concurrency_and_phase()"
1128   set_concurrency_and_phase(active_workers, true /* concurrent */);
1129 
1130   CMConcurrentMarkingTask markingTask(this, cmThread());
1131   _parallel_workers->set_active_workers(active_workers);
1132   _parallel_workers->run_task(&markingTask);
1133   print_stats();
1134 }
1135 
















1136 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1137   // world is stopped at this checkpoint
1138   assert(SafepointSynchronize::is_at_safepoint(),
1139          "world should be stopped");
1140 
1141   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1142 
1143   // If a full collection has happened, we shouldn't do this.
1144   if (has_aborted()) {
1145     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1146     return;
1147   }
1148 
1149   SvcGCMarker sgcm(SvcGCMarker::OTHER);
1150 
1151   if (VerifyDuringGC) {
1152     HandleMark hm;  // handle scope
1153     g1h->prepare_for_verify();
1154     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");

1155   }
1156   g1h->check_bitmaps("Remark Start");
1157 
1158   G1CollectorPolicy* g1p = g1h->g1_policy();
1159   g1p->record_concurrent_mark_remark_start();
1160 
1161   double start = os::elapsedTime();
1162 
1163   checkpointRootsFinalWork();
1164 
1165   double mark_work_end = os::elapsedTime();
1166 
1167   weakRefsWork(clear_all_soft_refs);
1168 
1169   if (has_overflown()) {
1170     // Oops.  We overflowed.  Restart concurrent marking.
1171     _restart_for_overflow = true;
1172     log_develop(gc)("Remark led to restart for overflow.");


1173 
1174     // Verify the heap w.r.t. the previous marking bitmap.
1175     if (VerifyDuringGC) {
1176       HandleMark hm;  // handle scope
1177       g1h->prepare_for_verify();
1178       Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)");

1179     }
1180 
1181     // Clear the marking state because we will be restarting
1182     // marking due to overflowing the global mark stack.
1183     reset_marking_state();
1184   } else {
1185     {
1186       GCTraceTime(Debug, gc) trace("GC aggregate-data", g1h->gc_timer_cm());
1187 
1188       // Aggregate the per-task counting data that we have accumulated
1189       // while marking.
1190       aggregate_count_data();
1191     }
1192 
1193     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
1194     // We're done with marking.
1195     // This is the end of  the marking cycle, we're expected all
1196     // threads to have SATB queues with active set to true.
1197     satb_mq_set.set_active_all_threads(false, /* new active value */
1198                                        true /* expected_active */);
1199 
1200     if (VerifyDuringGC) {
1201       HandleMark hm;  // handle scope
1202       g1h->prepare_for_verify();
1203       Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)");

1204     }
1205     g1h->check_bitmaps("Remark End");
1206     assert(!restart_for_overflow(), "sanity");
1207     // Completely reset the marking state since marking completed
1208     set_non_marking_state();
1209   }
1210 
1211   // Expand the marking stack, if we have to and if we can.
1212   if (_markStack.should_expand()) {
1213     _markStack.expand();
1214   }
1215 
1216   // Statistics
1217   double now = os::elapsedTime();
1218   _remark_mark_times.add((mark_work_end - start) * 1000.0);
1219   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1220   _remark_times.add((now - start) * 1000.0);
1221 
1222   g1p->record_concurrent_mark_remark_end();
1223 


1730 
1731 };
1732 
1733 void ConcurrentMark::cleanup() {
1734   // world is stopped at this checkpoint
1735   assert(SafepointSynchronize::is_at_safepoint(),
1736          "world should be stopped");
1737   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1738 
1739   // If a full collection has happened, we shouldn't do this.
1740   if (has_aborted()) {
1741     g1h->collector_state()->set_mark_in_progress(false); // So bitmap clearing isn't confused
1742     return;
1743   }
1744 
1745   g1h->verify_region_sets_optional();
1746 
1747   if (VerifyDuringGC) {
1748     HandleMark hm;  // handle scope
1749     g1h->prepare_for_verify();
1750     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)");

1751   }
1752   g1h->check_bitmaps("Cleanup Start");
1753 
1754   G1CollectorPolicy* g1p = g1h->g1_policy();
1755   g1p->record_concurrent_mark_cleanup_start();
1756 
1757   double start = os::elapsedTime();
1758 
1759   HeapRegionRemSet::reset_for_cleanup_tasks();
1760 
1761   // Do counting once more with the world stopped for good measure.
1762   G1ParFinalCountTask g1_par_count_task(g1h, &_region_bm, &_card_bm);
1763 
1764   g1h->workers()->run_task(&g1_par_count_task);
1765 
1766   if (VerifyDuringGC) {
1767     // Verify that the counting data accumulated during marking matches
1768     // that calculated by walking the marking bitmap.
1769 
1770     // Bitmaps to hold expected values


1772     BitMap expected_card_bm(_card_bm.size(), true);
1773 
1774     G1ParVerifyFinalCountTask g1_par_verify_task(g1h,
1775                                                  &_region_bm,
1776                                                  &_card_bm,
1777                                                  &expected_region_bm,
1778                                                  &expected_card_bm);
1779 
1780     g1h->workers()->run_task(&g1_par_verify_task);
1781 
1782     guarantee(g1_par_verify_task.failures() == 0, "Unexpected accounting failures");
1783   }
1784 
1785   size_t start_used_bytes = g1h->used();
1786   g1h->collector_state()->set_mark_in_progress(false);
1787 
1788   double count_end = os::elapsedTime();
1789   double this_final_counting_time = (count_end - start);
1790   _total_counting_time += this_final_counting_time;
1791 
1792   if (Log<LOG_TAGS(gc, liveness)>::is_trace()) {
1793     G1PrintRegionLivenessInfoClosure cl("Post-Marking");
1794     _g1h->heap_region_iterate(&cl);
1795   }
1796 
1797   // Install newly created mark bitMap as "prev".
1798   swapMarkBitMaps();
1799 
1800   g1h->reset_gc_time_stamp();
1801 
1802   uint n_workers = _g1h->workers()->active_workers();
1803 
1804   // Note end of marking in all heap regions.
1805   G1ParNoteEndTask g1_par_note_end_task(g1h, &_cleanup_list, n_workers);
1806   g1h->workers()->run_task(&g1_par_note_end_task);
1807   g1h->check_gc_time_stamps();
1808 
1809   if (!cleanup_list_is_empty()) {
1810     // The cleanup list is not empty, so we'll have to process it
1811     // concurrently. Notify anyone else that might be wanting free
1812     // regions that there will be more free regions coming soon.
1813     g1h->set_free_regions_coming();


1816   // call below, since it affects the metric by which we sort the heap
1817   // regions.
1818   if (G1ScrubRemSets) {
1819     double rs_scrub_start = os::elapsedTime();
1820     G1ParScrubRemSetTask g1_par_scrub_rs_task(g1h, &_region_bm, &_card_bm, n_workers);
1821     g1h->workers()->run_task(&g1_par_scrub_rs_task);
1822 
1823     double rs_scrub_end = os::elapsedTime();
1824     double this_rs_scrub_time = (rs_scrub_end - rs_scrub_start);
1825     _total_rs_scrub_time += this_rs_scrub_time;
1826   }
1827 
1828   // this will also free any regions totally full of garbage objects,
1829   // and sort the regions.
1830   g1h->g1_policy()->record_concurrent_mark_cleanup_end();
1831 
1832   // Statistics.
1833   double end = os::elapsedTime();
1834   _cleanup_times.add((end - start) * 1000.0);
1835 




1836   // Clean up will have freed any regions completely full of garbage.
1837   // Update the soft reference policy with the new heap occupancy.
1838   Universe::update_heap_info_at_gc();
1839 
1840   if (VerifyDuringGC) {
1841     HandleMark hm;  // handle scope
1842     g1h->prepare_for_verify();
1843     Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)");

1844   }
1845 
1846   g1h->check_bitmaps("Cleanup End");
1847 
1848   g1h->verify_region_sets_optional();
1849 
1850   // We need to make this be a "collection" so any collection pause that
1851   // races with it goes around and waits for completeCleanup to finish.
1852   g1h->increment_total_collections();
1853 
1854   // Clean out dead classes and update Metaspace sizes.
1855   if (ClassUnloadingWithConcurrentMark) {
1856     ClassLoaderDataGraph::purge();
1857   }
1858   MetaspaceGC::compute_new_size();
1859 
1860   // We reclaimed old regions so we should calculate the sizes to make
1861   // sure we update the old gen/space data.
1862   g1h->g1mm()->update_sizes();
1863   g1h->allocation_context_stats().update_after_mark();
1864 
1865   g1h->trace_heap_after_concurrent_cycle();
1866 }
1867 
1868 void ConcurrentMark::completeCleanup() {
1869   if (has_aborted()) return;
1870 
1871   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1872 
1873   _cleanup_list.verify_optional();
1874   FreeRegionList tmp_free_list("Tmp Free List");
1875 
1876   log_develop(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "

1877                             "cleanup list has %u entries",
1878                             _cleanup_list.length());

1879 
1880   // No one else should be accessing the _cleanup_list at this point,
1881   // so it is not necessary to take any locks
1882   while (!_cleanup_list.is_empty()) {
1883     HeapRegion* hr = _cleanup_list.remove_region(true /* from_head */);
1884     assert(hr != NULL, "Got NULL from a non-empty list");
1885     hr->par_clear();
1886     tmp_free_list.add_ordered(hr);
1887 
1888     // Instead of adding one region at a time to the secondary_free_list,
1889     // we accumulate them in the local list and move them a few at a
1890     // time. This also cuts down on the number of notify_all() calls
1891     // we do during this process. We'll also append the local list when
1892     // _cleanup_list is empty (which means we just removed the last
1893     // region from the _cleanup_list).
1894     if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) ||
1895         _cleanup_list.is_empty()) {
1896       log_develop(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : "

1897                                 "appending %u entries to the secondary_free_list, "
1898                                 "cleanup list still has %u entries",
1899                                 tmp_free_list.length(),
1900                                 _cleanup_list.length());

1901 
1902       {
1903         MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1904         g1h->secondary_free_list_add(&tmp_free_list);
1905         SecondaryFreeList_lock->notify_all();
1906       }
1907 #ifndef PRODUCT
1908       if (G1StressConcRegionFreeing) {
1909         for (uintx i = 0; i < G1StressConcRegionFreeingDelayMillis; ++i) {
1910           os::sleep(Thread::current(), (jlong) 1, false);
1911         }
1912       }
1913 #endif
1914     }
1915   }
1916   assert(tmp_free_list.is_empty(), "post-condition");
1917 }
1918 
1919 // Supporting Object and Oop closures for reference discovery
1920 // and processing in during marking


2137     // Skip processing the discovered references if we have
2138     // overflown the global marking stack. Reference objects
2139     // only get discovered once so it is OK to not
2140     // de-populate the discovered reference lists. We could have,
2141     // but the only benefit would be that, when marking restarts,
2142     // less reference objects are discovered.
2143     return;
2144   }
2145 
2146   ResourceMark rm;
2147   HandleMark   hm;
2148 
2149   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2150 
2151   // Is alive closure.
2152   G1CMIsAliveClosure g1_is_alive(g1h);
2153 
2154   // Inner scope to exclude the cleaning of the string and symbol
2155   // tables from the displayed time.
2156   {
2157     GCTraceTime(Debug, gc) trace("GC ref-proc", g1h->gc_timer_cm());
2158 
2159     ReferenceProcessor* rp = g1h->ref_processor_cm();
2160 
2161     // See the comment in G1CollectedHeap::ref_processing_init()
2162     // about how reference processing currently works in G1.
2163 
2164     // Set the soft reference policy
2165     rp->setup_policy(clear_all_soft_refs);
2166     assert(_markStack.isEmpty(), "mark stack should be empty");
2167 
2168     // Instances of the 'Keep Alive' and 'Complete GC' closures used
2169     // in serial reference processing. Note these closures are also
2170     // used for serially processing (by the the current thread) the
2171     // JNI references during parallel reference processing.
2172     //
2173     // These closures do not need to synchronize with the worker
2174     // threads involved in parallel reference processing as these
2175     // instances are executed serially by the current thread (e.g.
2176     // reference processing is not multi-threaded and is thus
2177     // performed by the current thread instead of a gang worker).


2227       set_has_overflown();
2228     }
2229 
2230     assert(rp->num_q() == active_workers, "why not");
2231 
2232     rp->enqueue_discovered_references(executor);
2233 
2234     rp->verify_no_references_recorded();
2235     assert(!rp->discovery_enabled(), "Post condition");
2236   }
2237 
2238   if (has_overflown()) {
2239     // We can not trust g1_is_alive if the marking stack overflowed
2240     return;
2241   }
2242 
2243   assert(_markStack.isEmpty(), "Marking should have completed");
2244 
2245   // Unload Klasses, String, Symbols, Code Cache, etc.
2246   {
2247     GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm());
2248 
2249     if (ClassUnloadingWithConcurrentMark) {
2250       bool purged_classes;
2251 
2252       {
2253         GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm());
2254         purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
2255       }
2256 
2257       {
2258         GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm());
2259         weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
2260       }
2261     }
2262 
2263     if (G1StringDedup::is_enabled()) {
2264       GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm());
2265       G1StringDedup::unlink(&g1_is_alive);
2266     }
2267   }
2268 }
2269 
2270 void ConcurrentMark::swapMarkBitMaps() {
2271   CMBitMapRO* temp = _prevMarkBitMap;
2272   _prevMarkBitMap  = (CMBitMapRO*)_nextMarkBitMap;
2273   _nextMarkBitMap  = (CMBitMap*)  temp;
2274 }
2275 
2276 // Closure for marking entries in SATB buffers.
2277 class CMSATBBufferClosure : public SATBBufferClosure {
2278 private:
2279   CMTask* _task;
2280   G1CollectedHeap* _g1h;
2281 
2282   // This is very similar to CMTask::deal_with_reference, but with
2283   // more relaxed requirements for the argument, so this must be more
2284   // circumspect about treating the argument as an object.


2365                               true         /* do_termination       */,
2366                               false        /* is_serial            */);
2367       } while (task->has_aborted() && !_cm->has_overflown());
2368       // If we overflow, then we do not want to restart. We instead
2369       // want to abort remark and do concurrent marking again.
2370       task->record_end_time();
2371     }
2372   }
2373 
2374   CMRemarkTask(ConcurrentMark* cm, uint active_workers) :
2375     AbstractGangTask("Par Remark"), _cm(cm) {
2376     _cm->terminator()->reset_for_reuse(active_workers);
2377   }
2378 };
2379 
2380 void ConcurrentMark::checkpointRootsFinalWork() {
2381   ResourceMark rm;
2382   HandleMark   hm;
2383   G1CollectedHeap* g1h = G1CollectedHeap::heap();
2384 
2385   GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm());
2386 
2387   g1h->ensure_parsability(false);
2388 
2389   // this is remark, so we'll use up all active threads
2390   uint active_workers = g1h->workers()->active_workers();
2391   set_concurrency_and_phase(active_workers, false /* concurrent */);
2392   // Leave _parallel_marking_threads at it's
2393   // value originally calculated in the ConcurrentMark
2394   // constructor and pass values of the active workers
2395   // through the gang in the task.
2396 
2397   {
2398     StrongRootsScope srs(active_workers);
2399 
2400     CMRemarkTask remarkTask(this, active_workers);
2401     // We will start all available threads, even if we decide that the
2402     // active_workers will be fewer. The extra ones will just bail out
2403     // immediately.
2404     g1h->workers()->run_task(&remarkTask);
2405   }


2723   // Clear the global region bitmap - it will be filled as part
2724   // of the final counting task.
2725   _region_bm.clear();
2726 
2727   uint max_regions = _g1h->max_regions();
2728   assert(_max_worker_id > 0, "uninitialized");
2729 
2730   for (uint i = 0; i < _max_worker_id; i += 1) {
2731     BitMap* task_card_bm = count_card_bitmap_for(i);
2732     size_t* marked_bytes_array = count_marked_bytes_array_for(i);
2733 
2734     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
2735     assert(marked_bytes_array != NULL, "uninitialized");
2736 
2737     memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
2738     task_card_bm->clear();
2739   }
2740 }
2741 
2742 void ConcurrentMark::print_stats() {
2743   LogHandle(gc, stats) log;
2744   if (!log.is_debug()) {
2745     return;
2746   }
2747   log.debug("---------------------------------------------------------------------");
2748   for (size_t i = 0; i < _active_tasks; ++i) {
2749     _tasks[i]->print_stats();
2750     log.debug("---------------------------------------------------------------------");

2751   }
2752 }
2753 
2754 // abandon current marking iteration due to a Full GC
2755 void ConcurrentMark::abort() {
2756   if (!cmThread()->during_cycle() || _has_aborted) {
2757     // We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
2758     return;
2759   }
2760 
2761   // Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
2762   // concurrent bitmap clearing.
2763   _nextMarkBitMap->clearAll();
2764 
2765   // Note we cannot clear the previous marking bitmap here
2766   // since VerifyDuringGC verifies the objects marked during
2767   // a full GC against the previous bitmap.
2768 
2769   // Clear the liveness counting data
2770   clear_all_count_data();


2774     _tasks[i]->clear_region_fields();
2775   }
2776   _first_overflow_barrier_sync.abort();
2777   _second_overflow_barrier_sync.abort();
2778   _has_aborted = true;
2779 
2780   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2781   satb_mq_set.abandon_partial_marking();
2782   // This can be called either during or outside marking, we'll read
2783   // the expected_active value from the SATB queue set.
2784   satb_mq_set.set_active_all_threads(
2785                                  false, /* new active value */
2786                                  satb_mq_set.is_active() /* expected_active */);
2787 
2788   _g1h->trace_heap_after_concurrent_cycle();
2789   _g1h->register_concurrent_cycle_end();
2790 }
2791 
2792 static void print_ms_time_info(const char* prefix, const char* name,
2793                                NumberSeq& ns) {
2794   log_trace(gc, marking, stats, exit)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
2795                                       prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
2796   if (ns.num() > 0) {
2797     log_trace(gc, marking, stats, exit)("%s         [std. dev = %8.2f ms, max = %8.2f ms]",
2798                                         prefix, ns.sd(), ns.maximum());
2799   }
2800 }
2801 
2802 void ConcurrentMark::print_summary_info() {
2803   LogHandle(gc, marking, stats, exit) log;
2804   if (!log.is_trace()) {
2805     return;
2806   }
2807 
2808   log.trace(" Concurrent marking:");
2809   print_ms_time_info("  ", "init marks", _init_times);
2810   print_ms_time_info("  ", "remarks", _remark_times);
2811   {
2812     print_ms_time_info("     ", "final marks", _remark_mark_times);
2813     print_ms_time_info("     ", "weak refs", _remark_weak_ref_times);
2814 
2815   }
2816   print_ms_time_info("  ", "cleanups", _cleanup_times);
2817   log.trace("    Final counting total time = %8.2f s (avg = %8.2f ms).",
2818             _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0));



2819   if (G1ScrubRemSets) {
2820     log.trace("    RS scrub total time = %8.2f s (avg = %8.2f ms).",
2821               _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0));
2822   }
2823   log.trace("  Total stop_world time = %8.2f s.",
2824             (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0);
2825   log.trace("  Total concurrent time = %8.2f s (%8.2f s marking).",
2826             cmThread()->vtime_accum(), cmThread()->vtime_mark_accum());






2827 }
2828 
2829 void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
2830   _parallel_workers->print_worker_threads_on(st);
2831 }
2832 
2833 void ConcurrentMark::print_on_error(outputStream* st) const {
2834   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
2835       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
2836   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
2837   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
2838 }
2839 
2840 // We take a break if someone is trying to stop the world.
2841 bool ConcurrentMark::do_yield_check(uint worker_id) {
2842   if (SuspendibleThreadSet::should_yield()) {
2843     if (worker_id == 0) {
2844       _g1h->g1_policy()->record_concurrent_pause();
2845     }
2846     SuspendibleThreadSet::yield();


3188 
3189   // This keeps claiming and applying the closure to completed buffers
3190   // until we run out of buffers or we need to abort.
3191   while (!has_aborted() &&
3192          satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)) {
3193     regular_clock_call();
3194   }
3195 
3196   _draining_satb_buffers = false;
3197 
3198   assert(has_aborted() ||
3199          concurrent() ||
3200          satb_mq_set.completed_buffers_num() == 0, "invariant");
3201 
3202   // again, this was a potentially expensive operation, decrease the
3203   // limits to get the regular clock call early
3204   decrease_limits();
3205 }
3206 
3207 void CMTask::print_stats() {
3208   log_debug(gc, stats)("Marking Stats, task = %u, calls = %d",
3209                        _worker_id, _calls);
3210   log_debug(gc, stats)("  Elapsed time = %1.2lfms, Termination time = %1.2lfms",
3211                        _elapsed_time_ms, _termination_time_ms);
3212   log_debug(gc, stats)("  Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms",
3213                        _step_times_ms.num(), _step_times_ms.avg(),
3214                        _step_times_ms.sd());
3215   log_debug(gc, stats)("                    max = %1.2lfms, total = %1.2lfms",
3216                        _step_times_ms.maximum(), _step_times_ms.sum());
3217 }
3218 
3219 bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) {
3220   return _task_queues->steal(worker_id, hash_seed, obj);
3221 }
3222 
3223 /*****************************************************************************
3224 
3225     The do_marking_step(time_target_ms, ...) method is the building
3226     block of the parallel marking framework. It can be called in parallel
3227     with other invocations of do_marking_step() on different tasks
3228     (but only one per task, obviously) and concurrently with the
3229     mutator threads, or during remark, hence it eliminates the need
3230     for two versions of the code. When called during remark, it will
3231     pick up from where the task left off during the concurrent marking
3232     phase. Interestingly, tasks are also claimable during evacuation
3233     pauses too, since do_marking_step() ensures that it aborts before
3234     it needs to yield.
3235 


3705 #define G1PPRL_ADDR_BASE_H_FORMAT  " %37s"
3706 #else // _LP64
3707 #define G1PPRL_ADDR_BASE_H_FORMAT  " %21s"
3708 #endif // _LP64
3709 
3710 // For per-region info
3711 #define G1PPRL_TYPE_FORMAT            "   %-4s"
3712 #define G1PPRL_TYPE_H_FORMAT          "   %4s"
3713 #define G1PPRL_BYTE_FORMAT            "  " SIZE_FORMAT_W(9)
3714 #define G1PPRL_BYTE_H_FORMAT          "  %9s"
3715 #define G1PPRL_DOUBLE_FORMAT          "  %14.1f"
3716 #define G1PPRL_DOUBLE_H_FORMAT        "  %14s"
3717 
3718 // For summary info
3719 #define G1PPRL_SUM_ADDR_FORMAT(tag)    "  " tag ":" G1PPRL_ADDR_BASE_FORMAT
3720 #define G1PPRL_SUM_BYTE_FORMAT(tag)    "  " tag ": " SIZE_FORMAT
3721 #define G1PPRL_SUM_MB_FORMAT(tag)      "  " tag ": %1.2f MB"
3722 #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%"
3723 
3724 G1PrintRegionLivenessInfoClosure::
3725 G1PrintRegionLivenessInfoClosure(const char* phase_name)
3726   : _total_used_bytes(0), _total_capacity_bytes(0),

3727     _total_prev_live_bytes(0), _total_next_live_bytes(0),
3728     _hum_used_bytes(0), _hum_capacity_bytes(0),
3729     _hum_prev_live_bytes(0), _hum_next_live_bytes(0),
3730     _total_remset_bytes(0), _total_strong_code_roots_bytes(0) {
3731   G1CollectedHeap* g1h = G1CollectedHeap::heap();
3732   MemRegion g1_reserved = g1h->g1_reserved();
3733   double now = os::elapsedTime();
3734 
3735   // Print the header of the output.
3736   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now);
3737   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP"

3738                           G1PPRL_SUM_ADDR_FORMAT("reserved")
3739                           G1PPRL_SUM_BYTE_FORMAT("region-size"),
3740                           p2i(g1_reserved.start()), p2i(g1_reserved.end()),
3741                           HeapRegion::GrainBytes);
3742   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3743   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3744                           G1PPRL_TYPE_H_FORMAT
3745                           G1PPRL_ADDR_BASE_H_FORMAT
3746                           G1PPRL_BYTE_H_FORMAT
3747                           G1PPRL_BYTE_H_FORMAT
3748                           G1PPRL_BYTE_H_FORMAT
3749                           G1PPRL_DOUBLE_H_FORMAT
3750                           G1PPRL_BYTE_H_FORMAT
3751                           G1PPRL_BYTE_H_FORMAT,
3752                           "type", "address-range",
3753                           "used", "prev-live", "next-live", "gc-eff",
3754                           "remset", "code-roots");
3755   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3756                           G1PPRL_TYPE_H_FORMAT
3757                           G1PPRL_ADDR_BASE_H_FORMAT
3758                           G1PPRL_BYTE_H_FORMAT
3759                           G1PPRL_BYTE_H_FORMAT
3760                           G1PPRL_BYTE_H_FORMAT
3761                           G1PPRL_DOUBLE_H_FORMAT
3762                           G1PPRL_BYTE_H_FORMAT
3763                           G1PPRL_BYTE_H_FORMAT,
3764                           "", "",
3765                           "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)",
3766                           "(bytes)", "(bytes)");
3767 }
3768 
3769 // It takes as a parameter a reference to one of the _hum_* fields, it
3770 // deduces the corresponding value for a region in a humongous region
3771 // series (either the region size, or what's left if the _hum_* field
3772 // is < the region size), and updates the _hum_* field accordingly.
3773 size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
3774   size_t bytes = 0;
3775   // The > 0 check is to deal with the prev and next live bytes which


3817     _hum_used_bytes      = used_bytes;
3818     _hum_prev_live_bytes = prev_live_bytes;
3819     _hum_next_live_bytes = next_live_bytes;
3820     get_hum_bytes(&used_bytes, &capacity_bytes,
3821                   &prev_live_bytes, &next_live_bytes);
3822     end = bottom + HeapRegion::GrainWords;
3823   } else if (r->is_continues_humongous()) {
3824     get_hum_bytes(&used_bytes, &capacity_bytes,
3825                   &prev_live_bytes, &next_live_bytes);
3826     assert(end == bottom + HeapRegion::GrainWords, "invariant");
3827   }
3828 
3829   _total_used_bytes      += used_bytes;
3830   _total_capacity_bytes  += capacity_bytes;
3831   _total_prev_live_bytes += prev_live_bytes;
3832   _total_next_live_bytes += next_live_bytes;
3833   _total_remset_bytes    += remset_bytes;
3834   _total_strong_code_roots_bytes += strong_code_roots_bytes;
3835 
3836   // Print a line for this particular region.
3837   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3838                           G1PPRL_TYPE_FORMAT
3839                           G1PPRL_ADDR_BASE_FORMAT
3840                           G1PPRL_BYTE_FORMAT
3841                           G1PPRL_BYTE_FORMAT
3842                           G1PPRL_BYTE_FORMAT
3843                           G1PPRL_DOUBLE_FORMAT
3844                           G1PPRL_BYTE_FORMAT
3845                           G1PPRL_BYTE_FORMAT,
3846                           type, p2i(bottom), p2i(end),
3847                           used_bytes, prev_live_bytes, next_live_bytes, gc_eff,
3848                           remset_bytes, strong_code_roots_bytes);
3849 
3850   return false;
3851 }
3852 
3853 G1PrintRegionLivenessInfoClosure::~G1PrintRegionLivenessInfoClosure() {
3854   // add static memory usages to remembered set sizes
3855   _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size();
3856   // Print the footer of the output.
3857   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX);
3858   log_trace(gc, liveness)(G1PPRL_LINE_PREFIX
3859                          " SUMMARY"
3860                          G1PPRL_SUM_MB_FORMAT("capacity")
3861                          G1PPRL_SUM_MB_PERC_FORMAT("used")
3862                          G1PPRL_SUM_MB_PERC_FORMAT("prev-live")
3863                          G1PPRL_SUM_MB_PERC_FORMAT("next-live")
3864                          G1PPRL_SUM_MB_FORMAT("remset")
3865                          G1PPRL_SUM_MB_FORMAT("code-roots"),
3866                          bytes_to_mb(_total_capacity_bytes),
3867                          bytes_to_mb(_total_used_bytes),
3868                          perc(_total_used_bytes, _total_capacity_bytes),
3869                          bytes_to_mb(_total_prev_live_bytes),
3870                          perc(_total_prev_live_bytes, _total_capacity_bytes),
3871                          bytes_to_mb(_total_next_live_bytes),
3872                          perc(_total_next_live_bytes, _total_capacity_bytes),
3873                          bytes_to_mb(_total_remset_bytes),
3874                          bytes_to_mb(_total_strong_code_roots_bytes));

3875 }
< prev index next >