1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "gc/g1/concurrentMarkThread.inline.hpp"
  28 #include "gc/g1/g1Analytics.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  31 #include "gc/g1/g1MMUTracker.hpp"
  32 #include "gc/g1/g1Policy.hpp"
  33 #include "gc/g1/vm_operations_g1.hpp"
  34 #include "gc/shared/concurrentGCPhaseManager.hpp"
  35 #include "gc/shared/gcId.hpp"
  36 #include "gc/shared/gcTrace.hpp"
  37 #include "gc/shared/gcTraceTime.inline.hpp"
  38 #include "gc/shared/suspendibleThreadSet.hpp"
  39 #include "logging/log.hpp"
  40 #include "memory/resourceArea.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/vmThread.hpp"
  43 #include "utilities/debug.hpp"
  44 
  45 // ======= Concurrent Mark Thread ========
  46 
  47 // Check order in EXPAND_CURRENT_PHASES
  48 STATIC_ASSERT(ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE <
  49               ConcurrentGCPhaseManager::IDLE_PHASE);
  50 
  51 #define EXPAND_CONCURRENT_PHASES(expander)                              \
  52   expander(ANY, = ConcurrentGCPhaseManager::UNCONSTRAINED_PHASE, NULL)  \
  53   expander(IDLE, = ConcurrentGCPhaseManager::IDLE_PHASE, NULL)          \
  54   expander(CONCURRENT_CYCLE,, "Concurrent Cycle")                       \
  55   expander(CLEAR_CLAIMED_MARKS,, "Concurrent Clear Claimed Marks")      \
  56   expander(SCAN_ROOT_REGIONS,, "Concurrent Scan Root Regions")          \
  57   expander(CONCURRENT_MARK,, "Concurrent Mark")                         \
  58   expander(MARK_FROM_ROOTS,, "Concurrent Mark From Roots")              \
  59   expander(BEFORE_REMARK,, NULL)                                        \
  60   expander(REMARK,, NULL)                                               \
  61   expander(CREATE_LIVE_DATA,, "Concurrent Create Live Data")            \
  62   expander(COMPLETE_CLEANUP,, "Concurrent Complete Cleanup")            \
  63   expander(CLEANUP_FOR_NEXT_MARK,, "Concurrent Cleanup for Next Mark")  \
  64   /* */
  65 
  66 class G1ConcurrentPhase : public AllStatic {
  67 public:
  68   enum {
  69 #define CONCURRENT_PHASE_ENUM(tag, value, ignore_title) tag value,
  70     EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_ENUM)
  71 #undef CONCURRENT_PHASE_ENUM
  72     PHASE_ID_LIMIT
  73   };
  74 };
  75 
  76 // The CM thread is created when the G1 garbage collector is used
  77 
  78 ConcurrentMarkThread::ConcurrentMarkThread(G1ConcurrentMark* cm) :
  79   ConcurrentGCThread(),
  80   _cm(cm),
  81   _state(Idle),
  82   _phase_manager_stack(),
  83   _vtime_accum(0.0),
  84   _vtime_mark_accum(0.0) {
  85 
  86   set_name("G1 Main Marker");
  87   create_and_start();
  88 }
  89 
  90 class CMCheckpointRootsFinalClosure: public VoidClosure {
  91 
  92   G1ConcurrentMark* _cm;
  93 public:
  94 
  95   CMCheckpointRootsFinalClosure(G1ConcurrentMark* cm) :
  96     _cm(cm) {}
  97 
  98   void do_void(){
  99     _cm->checkpoint_roots_final(false); // !clear_all_soft_refs
 100   }
 101 };
 102 
 103 class CMCleanUp: public VoidClosure {
 104   G1ConcurrentMark* _cm;
 105 public:
 106 
 107   CMCleanUp(G1ConcurrentMark* cm) :
 108     _cm(cm) {}
 109 
 110   void do_void(){
 111     _cm->cleanup();
 112   }
 113 };
 114 
 115 double ConcurrentMarkThread::mmu_sleep_time(G1Policy* g1_policy, bool remark) {
 116   // There are 3 reasons to use SuspendibleThreadSetJoiner.
 117   // 1. To avoid concurrency problem.
 118   //    - G1MMUTracker::add_pause(), when_sec() and its variation(when_ms() etc..) can be called
 119   //      concurrently from ConcurrentMarkThread and VMThread.
 120   // 2. If currently a gc is running, but it has not yet updated the MMU,
 121   //    we will not forget to consider that pause in the MMU calculation.
 122   // 3. If currently a gc is running, ConcurrentMarkThread will wait it to be finished.
 123   //    And then sleep for predicted amount of time by delay_to_keep_mmu().
 124   SuspendibleThreadSetJoiner sts_join;
 125 
 126   const G1Analytics* analytics = g1_policy->analytics();
 127   double now = os::elapsedTime();
 128   double prediction_ms = remark ? analytics->predict_remark_time_ms()
 129                                 : analytics->predict_cleanup_time_ms();
 130   G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
 131   return mmu_tracker->when_ms(now, prediction_ms);
 132 }
 133 
 134 void ConcurrentMarkThread::delay_to_keep_mmu(G1Policy* g1_policy, bool remark) {
 135   if (g1_policy->adaptive_young_list_length()) {
 136     jlong sleep_time_ms = mmu_sleep_time(g1_policy, remark);
 137     if (!cm()->has_aborted() && sleep_time_ms > 0) {
 138       os::sleep(this, sleep_time_ms, false);
 139     }
 140   }
 141 }
 142 
 143 class G1ConcPhaseTimer : public GCTraceConcTimeImpl<LogLevel::Info, LOG_TAGS(gc, marking)> {
 144   G1ConcurrentMark* _cm;
 145 
 146  public:
 147   G1ConcPhaseTimer(G1ConcurrentMark* cm, const char* title) :
 148     GCTraceConcTimeImpl<LogLevel::Info,  LogTag::_gc, LogTag::_marking>(title),
 149     _cm(cm)
 150   {
 151     _cm->gc_timer_cm()->register_gc_concurrent_start(title);
 152   }
 153 
 154   ~G1ConcPhaseTimer() {
 155     _cm->gc_timer_cm()->register_gc_concurrent_end();
 156   }
 157 };
 158 
 159 static const char* const concurrent_phase_names[] = {
 160 #define CONCURRENT_PHASE_NAME(tag, ignore_value, ignore_title) XSTR(tag),
 161   EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_NAME)
 162 #undef CONCURRENT_PHASE_NAME
 163   NULL                          // terminator
 164 };
 165 // Verify dense enum assumption.  +1 for terminator.
 166 STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT + 1 ==
 167               ARRAY_SIZE(concurrent_phase_names));
 168 
 169 // Returns the phase number for name, or a negative value if unknown.
 170 static int lookup_concurrent_phase(const char* name) {
 171   const char* const* names = concurrent_phase_names;
 172   for (uint i = 0; names[i] != NULL; ++i) {
 173     if (strcmp(name, names[i]) == 0) {
 174       return static_cast<int>(i);
 175     }
 176   }
 177   return -1;
 178 }
 179 
 180 // The phase must be valid and must have a title.
 181 static const char* lookup_concurrent_phase_title(int phase) {
 182   static const char* const titles[] = {
 183 #define CONCURRENT_PHASE_TITLE(ignore_tag, ignore_value, title) title,
 184     EXPAND_CONCURRENT_PHASES(CONCURRENT_PHASE_TITLE)
 185 #undef CONCURRENT_PHASE_TITLE
 186   };
 187   // Verify dense enum assumption.
 188   STATIC_ASSERT(G1ConcurrentPhase::PHASE_ID_LIMIT == ARRAY_SIZE(titles));
 189 
 190   assert(0 <= phase, "precondition");
 191   assert((uint)phase < ARRAY_SIZE(titles), "precondition");
 192   const char* title = titles[phase];
 193   assert(title != NULL, "precondition");
 194   return title;
 195 }
 196 
 197 class G1ConcPhaseManager : public StackObj {
 198   G1ConcurrentMark* _cm;
 199   ConcurrentGCPhaseManager _manager;
 200 
 201 public:
 202   G1ConcPhaseManager(int phase, ConcurrentMarkThread* thread) :
 203     _cm(thread->cm()),
 204     _manager(phase, thread->phase_manager_stack())
 205   { }
 206 
 207   ~G1ConcPhaseManager() {
 208     // Deactivate the manager if marking aborted, to avoid blocking on
 209     // phase exit when the phase has been requested.
 210     if (_cm->has_aborted()) {
 211       _manager.deactivate();
 212     }
 213   }
 214 
 215   void set_phase(int phase, bool force) {
 216     _manager.set_phase(phase, force);
 217   }
 218 };
 219 
 220 // Combine phase management and timing into one convenient utility.
 221 class G1ConcPhase : public StackObj {
 222   G1ConcPhaseTimer _timer;
 223   G1ConcPhaseManager _manager;
 224 
 225 public:
 226   G1ConcPhase(int phase, ConcurrentMarkThread* thread) :
 227     _timer(thread->cm(), lookup_concurrent_phase_title(phase)),
 228     _manager(phase, thread)
 229   { }
 230 };
 231 
 232 const char* const* ConcurrentMarkThread::concurrent_phases() const {
 233   return concurrent_phase_names;
 234 }
 235 
 236 bool ConcurrentMarkThread::request_concurrent_phase(const char* phase_name) {
 237   int phase = lookup_concurrent_phase(phase_name);
 238   if (phase < 0) return false;
 239 
 240   while (!ConcurrentGCPhaseManager::wait_for_phase(phase,
 241                                                    phase_manager_stack())) {
 242     assert(phase != G1ConcurrentPhase::ANY, "Wait for ANY phase must succeed");
 243     if ((phase != G1ConcurrentPhase::IDLE) && !during_cycle()) {
 244       // If idle and the goal is !idle, start a collection.
 245       G1CollectedHeap::heap()->collect(GCCause::_wb_conc_mark);
 246     }
 247   }
 248   return true;
 249 }
 250 
 251 void ConcurrentMarkThread::run_service() {
 252   _vtime_start = os::elapsedVTime();
 253 
 254   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 255   G1Policy* g1_policy = g1h->g1_policy();
 256 
 257   G1ConcPhaseManager cpmanager(G1ConcurrentPhase::IDLE, this);
 258 
 259   while (!should_terminate()) {
 260     // wait until started is set.
 261     sleepBeforeNextCycle();
 262     if (should_terminate()) {
 263       break;
 264     }
 265 
 266     cpmanager.set_phase(G1ConcurrentPhase::CONCURRENT_CYCLE, false /* force */);
 267 
 268     GCIdMark gc_id_mark;
 269 
 270     cm()->concurrent_cycle_start();
 271 
 272     GCTraceConcTime(Info, gc) tt("Concurrent Cycle");
 273     {
 274       ResourceMark rm;
 275       HandleMark   hm;
 276       double cycle_start = os::elapsedVTime();
 277 
 278       {
 279         G1ConcPhase p(G1ConcurrentPhase::CLEAR_CLAIMED_MARKS, this);
 280         ClassLoaderDataGraph::clear_claimed_marks();
 281       }
 282 
 283       // We have to ensure that we finish scanning the root regions
 284       // before the next GC takes place. To ensure this we have to
 285       // make sure that we do not join the STS until the root regions
 286       // have been scanned. If we did then it's possible that a
 287       // subsequent GC could block us from joining the STS and proceed
 288       // without the root regions have been scanned which would be a
 289       // correctness issue.
 290 
 291       {
 292         G1ConcPhase p(G1ConcurrentPhase::SCAN_ROOT_REGIONS, this);
 293         _cm->scan_root_regions();
 294       }
 295 
 296       // It would be nice to use the G1ConcPhase class here but
 297       // the "end" logging is inside the loop and not at the end of
 298       // a scope. Also, the timer doesn't support nesting.
 299       // Mimicking the same log output instead.
 300       {
 301         G1ConcPhaseManager mark_manager(G1ConcurrentPhase::CONCURRENT_MARK, this);
 302         jlong mark_start = os::elapsed_counter();
 303         const char* cm_title =
 304           lookup_concurrent_phase_title(G1ConcurrentPhase::CONCURRENT_MARK);
 305         log_info(gc, marking)("%s (%.3fs)",
 306                               cm_title,
 307                               TimeHelper::counter_to_seconds(mark_start));
 308         for (uint iter = 1; !cm()->has_aborted(); ++iter) {
 309           // Concurrent marking.
 310           {
 311             G1ConcPhase p(G1ConcurrentPhase::MARK_FROM_ROOTS, this);
 312             _cm->mark_from_roots();
 313           }
 314           if (cm()->has_aborted()) break;
 315 
 316           // Provide a control point after mark_from_roots.
 317           {
 318             G1ConcPhaseManager p(G1ConcurrentPhase::BEFORE_REMARK, this);
 319           }
 320           if (cm()->has_aborted()) break;
 321 
 322           // Delay remark pause for MMU.
 323           double mark_end_time = os::elapsedVTime();
 324           jlong mark_end = os::elapsed_counter();
 325           _vtime_mark_accum += (mark_end_time - cycle_start);
 326           delay_to_keep_mmu(g1_policy, true /* remark */);
 327           if (cm()->has_aborted()) break;
 328 
 329           // Pause Remark.
 330           log_info(gc, marking)("%s (%.3fs, %.3fs) %.3fms",
 331                                 cm_title,
 332                                 TimeHelper::counter_to_seconds(mark_start),
 333                                 TimeHelper::counter_to_seconds(mark_end),
 334                                 TimeHelper::counter_to_millis(mark_end - mark_start));
 335           mark_manager.set_phase(G1ConcurrentPhase::REMARK, false);
 336           CMCheckpointRootsFinalClosure final_cl(_cm);
 337           VM_CGC_Operation op(&final_cl, "Pause Remark");
 338           VMThread::execute(&op);
 339           if (cm()->has_aborted()) {
 340             break;
 341           } else if (!cm()->restart_for_overflow()) {
 342             break;              // Exit loop if no restart requested.
 343           } else {
 344             // Loop to restart for overflow.
 345             mark_manager.set_phase(G1ConcurrentPhase::CONCURRENT_MARK, false);
 346             log_info(gc, marking)("%s Restart for Mark Stack Overflow (iteration #%u)",
 347                                   cm_title, iter);
 348           }
 349         }
 350       }
 351 
 352       if (!cm()->has_aborted()) {
 353         G1ConcPhase p(G1ConcurrentPhase::CREATE_LIVE_DATA, this);
 354         cm()->create_live_data();
 355       }
 356 
 357       double end_time = os::elapsedVTime();
 358       // Update the total virtual time before doing this, since it will try
 359       // to measure it to get the vtime for this marking.  We purposely
 360       // neglect the presumably-short "completeCleanup" phase here.
 361       _vtime_accum = (end_time - _vtime_start);
 362 
 363       if (!cm()->has_aborted()) {
 364         delay_to_keep_mmu(g1_policy, false /* cleanup */);
 365 
 366         if (!cm()->has_aborted()) {
 367           CMCleanUp cl_cl(_cm);
 368           VM_CGC_Operation op(&cl_cl, "Pause Cleanup");
 369           VMThread::execute(&op);
 370         }
 371       } else {
 372         // We don't want to update the marking status if a GC pause
 373         // is already underway.
 374         SuspendibleThreadSetJoiner sts_join;
 375         g1h->collector_state()->set_mark_in_progress(false);
 376       }
 377 
 378       // Check if cleanup set the free_regions_coming flag. If it
 379       // hasn't, we can just skip the next step.
 380       if (g1h->free_regions_coming()) {
 381         // The following will finish freeing up any regions that we
 382         // found to be empty during cleanup. We'll do this part
 383         // without joining the suspendible set. If an evacuation pause
 384         // takes place, then we would carry on freeing regions in
 385         // case they are needed by the pause. If a Full GC takes
 386         // place, it would wait for us to process the regions
 387         // reclaimed by cleanup.
 388 
 389         // Now do the concurrent cleanup operation.
 390         G1ConcPhase p(G1ConcurrentPhase::COMPLETE_CLEANUP, this);
 391         _cm->complete_cleanup();
 392 
 393         // Notify anyone who's waiting that there are no more free
 394         // regions coming. We have to do this before we join the STS
 395         // (in fact, we should not attempt to join the STS in the
 396         // interval between finishing the cleanup pause and clearing
 397         // the free_regions_coming flag) otherwise we might deadlock:
 398         // a GC worker could be blocked waiting for the notification
 399         // whereas this thread will be blocked for the pause to finish
 400         // while it's trying to join the STS, which is conditional on
 401         // the GC workers finishing.
 402         g1h->reset_free_regions_coming();
 403       }
 404       guarantee(cm()->cleanup_list_is_empty(),
 405                 "at this point there should be no regions on the cleanup list");
 406 
 407       // There is a tricky race before recording that the concurrent
 408       // cleanup has completed and a potential Full GC starting around
 409       // the same time. We want to make sure that the Full GC calls
 410       // abort() on concurrent mark after
 411       // record_concurrent_mark_cleanup_completed(), since abort() is
 412       // the method that will reset the concurrent mark state. If we
 413       // end up calling record_concurrent_mark_cleanup_completed()
 414       // after abort() then we might incorrectly undo some of the work
 415       // abort() did. Checking the has_aborted() flag after joining
 416       // the STS allows the correct ordering of the two methods. There
 417       // are two scenarios:
 418       //
 419       // a) If we reach here before the Full GC, the fact that we have
 420       // joined the STS means that the Full GC cannot start until we
 421       // leave the STS, so record_concurrent_mark_cleanup_completed()
 422       // will complete before abort() is called.
 423       //
 424       // b) If we reach here during the Full GC, we'll be held up from
 425       // joining the STS until the Full GC is done, which means that
 426       // abort() will have completed and has_aborted() will return
 427       // true to prevent us from calling
 428       // record_concurrent_mark_cleanup_completed() (and, in fact, it's
 429       // not needed any more as the concurrent mark state has been
 430       // already reset).
 431       {
 432         SuspendibleThreadSetJoiner sts_join;
 433         if (!cm()->has_aborted()) {
 434           g1_policy->record_concurrent_mark_cleanup_completed();
 435         } else {
 436           log_info(gc, marking)("Concurrent Mark Abort");
 437         }
 438       }
 439 
 440       // We now want to allow clearing of the marking bitmap to be
 441       // suspended by a collection pause.
 442       // We may have aborted just before the remark. Do not bother clearing the
 443       // bitmap then, as it has been done during mark abort.
 444       if (!cm()->has_aborted()) {
 445         G1ConcPhase p(G1ConcurrentPhase::CLEANUP_FOR_NEXT_MARK, this);
 446         _cm->cleanup_for_next_mark();
 447       } else {
 448         assert(!G1VerifyBitmaps || _cm->next_mark_bitmap_is_clear(), "Next mark bitmap must be clear");
 449       }
 450     }
 451 
 452     // Update the number of full collections that have been
 453     // completed. This will also notify the FullGCCount_lock in case a
 454     // Java thread is waiting for a full GC to happen (e.g., it
 455     // called System.gc() with +ExplicitGCInvokesConcurrent).
 456     {
 457       SuspendibleThreadSetJoiner sts_join;
 458       g1h->increment_old_marking_cycles_completed(true /* concurrent */);
 459 
 460       cm()->concurrent_cycle_end();
 461     }
 462 
 463     cpmanager.set_phase(G1ConcurrentPhase::IDLE, cm()->has_aborted() /* force */);
 464   }
 465   _cm->root_regions()->cancel_scan();
 466 }
 467 
 468 void ConcurrentMarkThread::stop_service() {
 469   MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
 470   CGC_lock->notify_all();
 471 }
 472 
 473 void ConcurrentMarkThread::sleepBeforeNextCycle() {
 474   // We join here because we don't want to do the "shouldConcurrentMark()"
 475   // below while the world is otherwise stopped.
 476   assert(!in_progress(), "should have been cleared");
 477 
 478   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
 479   while (!started() && !should_terminate()) {
 480     CGC_lock->wait(Mutex::_no_safepoint_check_flag);
 481   }
 482 
 483   if (started()) {
 484     set_in_progress();
 485   }
 486 }