src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page
rev 6923 : imported patch 8054819-rename-heapregionseq
rev 6924 : 8055919: Remove dead code in G1 concurrent marking code
Reviewed-by:
rev 6925 : [mq]: jesper-fixes


 417 
 418 void CMMarkStack::note_end_of_gc() {
 419   // This is intentionally a guarantee, instead of an assert. If we
 420   // accidentally add something to the mark stack during GC, it
 421   // will be a correctness issue so it's better if we crash. we'll
 422   // only check this once per GC anyway, so it won't be a performance
 423   // issue in any way.
 424   guarantee(_saved_index == _index,
 425             err_msg("saved index: %d index: %d", _saved_index, _index));
 426   _saved_index = -1;
 427 }
 428 
 429 void CMMarkStack::oops_do(OopClosure* f) {
 430   assert(_saved_index == _index,
 431          err_msg("saved index: %d index: %d", _saved_index, _index));
 432   for (int i = 0; i < _index; i += 1) {
 433     f->do_oop(&_base[i]);
 434   }
 435 }
 436 
 437 bool ConcurrentMark::not_yet_marked(oop obj) const {
 438   return _g1h->is_obj_ill(obj);
 439 }
 440 
 441 CMRootRegions::CMRootRegions() :
 442   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 443   _should_abort(false),  _next_survivor(NULL) { }
 444 
 445 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 446   _young_list = g1h->young_list();
 447   _cm = cm;
 448 }
 449 
 450 void CMRootRegions::prepare_for_scan() {
 451   assert(!scan_in_progress(), "pre-condition");
 452 
 453   // Currently, only survivors can be root regions.
 454   assert(_next_survivor == NULL, "pre-condition");
 455   _next_survivor = _young_list->first_survivor_region();
 456   _scan_in_progress = (_next_survivor != NULL);
 457   _should_abort = false;
 458 }
 459 
 460 HeapRegion* CMRootRegions::claim_next() {


1100 private:
1101   ConcurrentMark*       _cm;
1102   ConcurrentMarkThread* _cmt;
1103 
1104 public:
1105   void work(uint worker_id) {
1106     assert(Thread::current()->is_ConcurrentGC_thread(),
1107            "this should only be done by a conc GC thread");
1108     ResourceMark rm;
1109 
1110     double start_vtime = os::elapsedVTime();
1111 
1112     SuspendibleThreadSet::join();
1113 
1114     assert(worker_id < _cm->active_tasks(), "invariant");
1115     CMTask* the_task = _cm->task(worker_id);
1116     the_task->record_start_time();
1117     if (!_cm->has_aborted()) {
1118       do {
1119         double start_vtime_sec = os::elapsedVTime();
1120         double start_time_sec = os::elapsedTime();
1121         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1122 
1123         the_task->do_marking_step(mark_step_duration_ms,
1124                                   true  /* do_termination */,
1125                                   false /* is_serial*/);
1126 
1127         double end_time_sec = os::elapsedTime();
1128         double end_vtime_sec = os::elapsedVTime();
1129         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1130         double elapsed_time_sec = end_time_sec - start_time_sec;
1131         _cm->clear_has_overflown();
1132 
1133         bool ret = _cm->do_yield_check(worker_id);
1134 
1135         jlong sleep_time_ms;
1136         if (!_cm->has_aborted() && the_task->has_aborted()) {
1137           sleep_time_ms =
1138             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1139           SuspendibleThreadSet::leave();
1140           os::sleep(Thread::current(), sleep_time_ms, false);
1141           SuspendibleThreadSet::join();
1142         }
1143         double end_time2_sec = os::elapsedTime();
1144         double elapsed_time2_sec = end_time2_sec - start_time_sec;
1145 
1146 #if 0
1147           gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
1148                                  "overhead %1.4lf",
1149                                  elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
1150                                  the_task->conc_overhead(os::elapsedTime()) * 8.0);
1151           gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
1152                                  elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
1153 #endif
1154       } while (!_cm->has_aborted() && the_task->has_aborted());
1155     }
1156     the_task->record_end_time();
1157     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1158 
1159     SuspendibleThreadSet::leave();
1160 
1161     double end_vtime = os::elapsedVTime();
1162     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1163   }
1164 
1165   CMConcurrentMarkingTask(ConcurrentMark* cm,
1166                           ConcurrentMarkThread* cmt) :
1167       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1168 
1169   ~CMConcurrentMarkingTask() { }
1170 };
1171 
1172 // Calculates the number of active workers for a concurrent
1173 // phase.


2932   PrintReachableRegionClosure rcl(out, vo, all);
2933   _g1h->heap_region_iterate(&rcl);
2934   out->cr();
2935 
2936   gclog_or_tty->print_cr("  done");
2937   gclog_or_tty->flush();
2938 }
2939 
2940 #endif // PRODUCT
2941 
2942 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2943   // Note we are overriding the read-only view of the prev map here, via
2944   // the cast.
2945   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2946 }
2947 
2948 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2949   _nextMarkBitMap->clearRange(mr);
2950 }
2951 
2952 void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
2953   clearRangePrevBitmap(mr);
2954   clearRangeNextBitmap(mr);
2955 }
2956 
2957 HeapRegion*
2958 ConcurrentMark::claim_region(uint worker_id) {
2959   // "checkpoint" the finger
2960   HeapWord* finger = _finger;
2961 
2962   // _heap_end will not change underneath our feet; it only changes at
2963   // yield points.
2964   while (finger < _heap_end) {
2965     assert(_g1h->is_in_g1_reserved(finger), "invariant");
2966 
2967     // Note on how this code handles humongous regions. In the
2968     // normal case the finger will reach the start of a "starts
2969     // humongous" (SH) region. Its end will either be the end of the
2970     // last "continues humongous" (CH) region in the sequence, or the
2971     // standard end of the SH region (if the SH is the only region in
2972     // the sequence). That way claim_region() will skip over the CH
2973     // regions. However, there is a subtle race between a CM thread
2974     // executing this method and a mutator thread doing a humongous
2975     // object allocation. The two are not mutually exclusive as the CM
2976     // thread does not need to hold the Heap_lock when it gets


3482 void ConcurrentMark::print_on_error(outputStream* st) const {
3483   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3484       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
3485   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3486   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3487 }
3488 
3489 // We take a break if someone is trying to stop the world.
3490 bool ConcurrentMark::do_yield_check(uint worker_id) {
3491   if (SuspendibleThreadSet::should_yield()) {
3492     if (worker_id == 0) {
3493       _g1h->g1_policy()->record_concurrent_pause();
3494     }
3495     SuspendibleThreadSet::yield();
3496     return true;
3497   } else {
3498     return false;
3499   }
3500 }
3501 
3502 bool ConcurrentMark::containing_card_is_marked(void* p) {
3503   size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
3504   return _card_bm.at(offset >> CardTableModRefBS::card_shift);
3505 }
3506 
3507 bool ConcurrentMark::containing_cards_are_marked(void* start,
3508                                                  void* last) {
3509   return containing_card_is_marked(start) &&
3510          containing_card_is_marked(last);
3511 }
3512 
3513 #ifndef PRODUCT
3514 // for debugging purposes
3515 void ConcurrentMark::print_finger() {
3516   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3517                          p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3518   for (uint i = 0; i < _max_worker_id; ++i) {
3519     gclog_or_tty->print("   %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3520   }
3521   gclog_or_tty->cr();
3522 }
3523 #endif
3524 
3525 void CMTask::scan_object(oop obj) {
3526   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3527 
3528   if (_cm->verbose_high()) {
3529     gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
3530                            _worker_id, p2i((void*) obj));
3531   }
3532 


3745     return;
3746   }
3747 
3748   double curr_time_ms = os::elapsedVTime() * 1000.0;
3749 
3750   // (3) If marking stats are enabled, then we update the step history.
3751 #if _MARKING_STATS_
3752   if (_words_scanned >= _words_scanned_limit) {
3753     ++_clock_due_to_scanning;
3754   }
3755   if (_refs_reached >= _refs_reached_limit) {
3756     ++_clock_due_to_marking;
3757   }
3758 
3759   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3760   _interval_start_time_ms = curr_time_ms;
3761   _all_clock_intervals_ms.add(last_interval_ms);
3762 
3763   if (_cm->verbose_medium()) {
3764       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3765                         "scanned = %d%s, refs reached = %d%s",
3766                         _worker_id, last_interval_ms,
3767                         _words_scanned,
3768                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3769                         _refs_reached,
3770                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3771   }
3772 #endif // _MARKING_STATS_
3773 
3774   // (4) We check whether we should yield. If we have to, then we abort.
3775   if (SuspendibleThreadSet::should_yield()) {
3776     // We should yield. To do this we abort the task. The caller is
3777     // responsible for yielding.
3778     set_has_aborted();
3779     statsOnly( ++_aborted_yield );
3780     return;
3781   }
3782 
3783   // (5) We check whether we've reached our time quota. If we have,
3784   // then we abort.
3785   double elapsed_time_ms = curr_time_ms - _start_time_ms;




 417 
 418 void CMMarkStack::note_end_of_gc() {
 419   // This is intentionally a guarantee, instead of an assert. If we
 420   // accidentally add something to the mark stack during GC, it
 421   // will be a correctness issue so it's better if we crash. we'll
 422   // only check this once per GC anyway, so it won't be a performance
 423   // issue in any way.
 424   guarantee(_saved_index == _index,
 425             err_msg("saved index: %d index: %d", _saved_index, _index));
 426   _saved_index = -1;
 427 }
 428 
 429 void CMMarkStack::oops_do(OopClosure* f) {
 430   assert(_saved_index == _index,
 431          err_msg("saved index: %d index: %d", _saved_index, _index));
 432   for (int i = 0; i < _index; i += 1) {
 433     f->do_oop(&_base[i]);
 434   }
 435 }
 436 




 437 CMRootRegions::CMRootRegions() :
 438   _young_list(NULL), _cm(NULL), _scan_in_progress(false),
 439   _should_abort(false),  _next_survivor(NULL) { }
 440 
 441 void CMRootRegions::init(G1CollectedHeap* g1h, ConcurrentMark* cm) {
 442   _young_list = g1h->young_list();
 443   _cm = cm;
 444 }
 445 
 446 void CMRootRegions::prepare_for_scan() {
 447   assert(!scan_in_progress(), "pre-condition");
 448 
 449   // Currently, only survivors can be root regions.
 450   assert(_next_survivor == NULL, "pre-condition");
 451   _next_survivor = _young_list->first_survivor_region();
 452   _scan_in_progress = (_next_survivor != NULL);
 453   _should_abort = false;
 454 }
 455 
 456 HeapRegion* CMRootRegions::claim_next() {


1096 private:
1097   ConcurrentMark*       _cm;
1098   ConcurrentMarkThread* _cmt;
1099 
1100 public:
1101   void work(uint worker_id) {
1102     assert(Thread::current()->is_ConcurrentGC_thread(),
1103            "this should only be done by a conc GC thread");
1104     ResourceMark rm;
1105 
1106     double start_vtime = os::elapsedVTime();
1107 
1108     SuspendibleThreadSet::join();
1109 
1110     assert(worker_id < _cm->active_tasks(), "invariant");
1111     CMTask* the_task = _cm->task(worker_id);
1112     the_task->record_start_time();
1113     if (!_cm->has_aborted()) {
1114       do {
1115         double start_vtime_sec = os::elapsedVTime();

1116         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1117 
1118         the_task->do_marking_step(mark_step_duration_ms,
1119                                   true  /* do_termination */,
1120                                   false /* is_serial*/);
1121 

1122         double end_vtime_sec = os::elapsedVTime();
1123         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;

1124         _cm->clear_has_overflown();
1125 
1126         _cm->do_yield_check(worker_id);
1127 
1128         jlong sleep_time_ms;
1129         if (!_cm->has_aborted() && the_task->has_aborted()) {
1130           sleep_time_ms =
1131             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1132           SuspendibleThreadSet::leave();
1133           os::sleep(Thread::current(), sleep_time_ms, false);
1134           SuspendibleThreadSet::join();
1135         }











1136       } while (!_cm->has_aborted() && the_task->has_aborted());
1137     }
1138     the_task->record_end_time();
1139     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1140 
1141     SuspendibleThreadSet::leave();
1142 
1143     double end_vtime = os::elapsedVTime();
1144     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1145   }
1146 
1147   CMConcurrentMarkingTask(ConcurrentMark* cm,
1148                           ConcurrentMarkThread* cmt) :
1149       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1150 
1151   ~CMConcurrentMarkingTask() { }
1152 };
1153 
1154 // Calculates the number of active workers for a concurrent
1155 // phase.


2914   PrintReachableRegionClosure rcl(out, vo, all);
2915   _g1h->heap_region_iterate(&rcl);
2916   out->cr();
2917 
2918   gclog_or_tty->print_cr("  done");
2919   gclog_or_tty->flush();
2920 }
2921 
2922 #endif // PRODUCT
2923 
2924 void ConcurrentMark::clearRangePrevBitmap(MemRegion mr) {
2925   // Note we are overriding the read-only view of the prev map here, via
2926   // the cast.
2927   ((CMBitMap*)_prevMarkBitMap)->clearRange(mr);
2928 }
2929 
2930 void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
2931   _nextMarkBitMap->clearRange(mr);
2932 }
2933 





2934 HeapRegion*
2935 ConcurrentMark::claim_region(uint worker_id) {
2936   // "checkpoint" the finger
2937   HeapWord* finger = _finger;
2938 
2939   // _heap_end will not change underneath our feet; it only changes at
2940   // yield points.
2941   while (finger < _heap_end) {
2942     assert(_g1h->is_in_g1_reserved(finger), "invariant");
2943 
2944     // Note on how this code handles humongous regions. In the
2945     // normal case the finger will reach the start of a "starts
2946     // humongous" (SH) region. Its end will either be the end of the
2947     // last "continues humongous" (CH) region in the sequence, or the
2948     // standard end of the SH region (if the SH is the only region in
2949     // the sequence). That way claim_region() will skip over the CH
2950     // regions. However, there is a subtle race between a CM thread
2951     // executing this method and a mutator thread doing a humongous
2952     // object allocation. The two are not mutually exclusive as the CM
2953     // thread does not need to hold the Heap_lock when it gets


3459 void ConcurrentMark::print_on_error(outputStream* st) const {
3460   st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
3461       p2i(_prevMarkBitMap), p2i(_nextMarkBitMap));
3462   _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
3463   _nextMarkBitMap->print_on_error(st, " Next Bits: ");
3464 }
3465 
3466 // We take a break if someone is trying to stop the world.
3467 bool ConcurrentMark::do_yield_check(uint worker_id) {
3468   if (SuspendibleThreadSet::should_yield()) {
3469     if (worker_id == 0) {
3470       _g1h->g1_policy()->record_concurrent_pause();
3471     }
3472     SuspendibleThreadSet::yield();
3473     return true;
3474   } else {
3475     return false;
3476   }
3477 }
3478 











3479 #ifndef PRODUCT
3480 // for debugging purposes
3481 void ConcurrentMark::print_finger() {
3482   gclog_or_tty->print_cr("heap ["PTR_FORMAT", "PTR_FORMAT"), global finger = "PTR_FORMAT,
3483                          p2i(_heap_start), p2i(_heap_end), p2i(_finger));
3484   for (uint i = 0; i < _max_worker_id; ++i) {
3485     gclog_or_tty->print("   %u: " PTR_FORMAT, i, p2i(_tasks[i]->finger()));
3486   }
3487   gclog_or_tty->cr();
3488 }
3489 #endif
3490 
3491 void CMTask::scan_object(oop obj) {
3492   assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
3493 
3494   if (_cm->verbose_high()) {
3495     gclog_or_tty->print_cr("[%u] we're scanning object "PTR_FORMAT,
3496                            _worker_id, p2i((void*) obj));
3497   }
3498 


3711     return;
3712   }
3713 
3714   double curr_time_ms = os::elapsedVTime() * 1000.0;
3715 
3716   // (3) If marking stats are enabled, then we update the step history.
3717 #if _MARKING_STATS_
3718   if (_words_scanned >= _words_scanned_limit) {
3719     ++_clock_due_to_scanning;
3720   }
3721   if (_refs_reached >= _refs_reached_limit) {
3722     ++_clock_due_to_marking;
3723   }
3724 
3725   double last_interval_ms = curr_time_ms - _interval_start_time_ms;
3726   _interval_start_time_ms = curr_time_ms;
3727   _all_clock_intervals_ms.add(last_interval_ms);
3728 
3729   if (_cm->verbose_medium()) {
3730       gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
3731                         "scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
3732                         _worker_id, last_interval_ms,
3733                         _words_scanned,
3734                         (_words_scanned >= _words_scanned_limit) ? " (*)" : "",
3735                         _refs_reached,
3736                         (_refs_reached >= _refs_reached_limit) ? " (*)" : "");
3737   }
3738 #endif // _MARKING_STATS_
3739 
3740   // (4) We check whether we should yield. If we have to, then we abort.
3741   if (SuspendibleThreadSet::should_yield()) {
3742     // We should yield. To do this we abort the task. The caller is
3743     // responsible for yielding.
3744     set_has_aborted();
3745     statsOnly( ++_aborted_yield );
3746     return;
3747   }
3748 
3749   // (5) We check whether we've reached our time quota. If we have,
3750   // then we abort.
3751   double elapsed_time_ms = curr_time_ms - _start_time_ms;