< prev index next >

src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page
rev 8331 : 8079579: Add SuspendibleThreadSetLeaver and make SuspendibleThreadSet::joint()/leave() private
Reviewed-by:


 176       // will have them as guarantees at the beginning / end of the bitmap
 177       // clearing to get some checking in the product.
 178       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
 179       assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
 180     }
 181 
 182     return false;
 183   }
 184 };
 185 
 186 class ParClearNextMarkBitmapTask : public AbstractGangTask {
 187   ClearBitmapHRClosure* _cl;
 188   HeapRegionClaimer     _hrclaimer;
 189   bool                  _suspendible; // If the task is suspendible, workers must join the STS.
 190 
 191 public:
 192   ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
 193       _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
 194 
 195   void work(uint worker_id) {
 196     if (_suspendible) {
 197       SuspendibleThreadSet::join();
 198     }
 199     G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
 200     if (_suspendible) {
 201       SuspendibleThreadSet::leave();
 202     }
 203   }
 204 };
 205 
 206 void CMBitMap::clearAll() {
 207   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 208   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
 209   uint n_workers = g1h->workers()->active_workers();
 210   ParClearNextMarkBitmapTask task(&cl, n_workers, false);
 211   g1h->workers()->run_task(&task);
 212   guarantee(cl.complete(), "Must have completed iteration.");
 213   return;
 214 }
 215 
 216 void CMBitMap::markRange(MemRegion mr) {
 217   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 218   assert(!mr.is_empty(), "unexpected empty region");
 219   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 220           ((HeapWord *) mr.end())),
 221          "markRange memory region end is not card aligned");
 222   // convert address range into offset range


 989  * during the barrier sync and join it immediately afterwards. If we
 990  * do not do this, the following deadlock can occur: one thread could
 991  * be in the barrier sync code, waiting for the other thread to also
 992  * sync up, whereas another one could be trying to yield, while also
 993  * waiting for the other threads to sync up too.
 994  *
 995  * Note, however, that this code is also used during remark and in
 996  * this case we should not attempt to leave / enter the STS, otherwise
 997  * we'll either hit an assert (debug / fastdebug) or deadlock
 998  * (product). So we should only leave / enter the STS if we are
 999  * operating concurrently.
1000  *
1001  * Because the thread that does the sync barrier has left the STS, it
1002  * is possible to be suspended for a Full GC or an evacuation pause
1003  * could occur. This is actually safe, since the entering the sync
1004  * barrier is one of the last things do_marking_step() does, and it
1005  * doesn't manipulate any data structures afterwards.
1006  */
1007 
1008 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {


1009   if (verbose_low()) {
1010     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
1011   }
1012 
1013   if (concurrent()) {
1014     SuspendibleThreadSet::leave();

1015   }
1016 
1017   bool barrier_aborted = !_first_overflow_barrier_sync.enter();
1018 
1019   if (concurrent()) {
1020     SuspendibleThreadSet::join();
1021   }
1022   // at this point everyone should have synced up and not be doing any
1023   // more work
1024 
1025   if (verbose_low()) {
1026     if (barrier_aborted) {
1027       gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
1028     } else {
1029       gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
1030     }
1031   }
1032 
1033   if (barrier_aborted) {
1034     // If the barrier aborted we ignore the overflow condition and
1035     // just abort the whole marking phase as quickly as possible.
1036     return;
1037   }
1038 
1039   // If we're executing the concurrent phase of marking, reset the marking
1040   // state; otherwise the marking state is reset after reference processing,
1041   // during the remark pause.


1048       // task 0 is responsible for clearing the global data structures
1049       // We should be here because of an overflow. During STW we should
1050       // not clear the overflow flag since we rely on it being true when
1051       // we exit this method to abort the pause and restart concurrent
1052       // marking.
1053       reset_marking_state(true /* clear_overflow */);
1054       force_overflow()->update();
1055 
1056       if (G1Log::fine()) {
1057         gclog_or_tty->gclog_stamp(concurrent_gc_id());
1058         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1059       }
1060     }
1061   }
1062 
1063   // after this, each task should reset its own data structures then
1064   // then go into the second barrier
1065 }
1066 
1067 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {


1068   if (verbose_low()) {
1069     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1070   }
1071 
1072   if (concurrent()) {
1073     SuspendibleThreadSet::leave();

1074   }
1075 
1076   bool barrier_aborted = !_second_overflow_barrier_sync.enter();
1077 
1078   if (concurrent()) {
1079     SuspendibleThreadSet::join();
1080   }
1081   // at this point everything should be re-initialized and ready to go
1082 
1083   if (verbose_low()) {
1084     if (barrier_aborted) {
1085       gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
1086     } else {
1087       gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1088     }
1089   }
1090 }
1091 
1092 #ifndef PRODUCT
1093 void ForceOverflowSettings::init() {
1094   _num_remaining = G1ConcMarkForceOverflow;
1095   _force = false;
1096   update();
1097 }
1098 
1099 void ForceOverflowSettings::update() {
1100   if (_num_remaining > 0) {


1111     return true;
1112   } else {
1113     return false;
1114   }
1115 }
1116 #endif // !PRODUCT
1117 
1118 class CMConcurrentMarkingTask: public AbstractGangTask {
1119 private:
1120   ConcurrentMark*       _cm;
1121   ConcurrentMarkThread* _cmt;
1122 
1123 public:
1124   void work(uint worker_id) {
1125     assert(Thread::current()->is_ConcurrentGC_thread(),
1126            "this should only be done by a conc GC thread");
1127     ResourceMark rm;
1128 
1129     double start_vtime = os::elapsedVTime();
1130 
1131     SuspendibleThreadSet::join();

1132 
1133     assert(worker_id < _cm->active_tasks(), "invariant");
1134     CMTask* the_task = _cm->task(worker_id);
1135     the_task->record_start_time();
1136     if (!_cm->has_aborted()) {
1137       do {
1138         double start_vtime_sec = os::elapsedVTime();
1139         double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1140 
1141         the_task->do_marking_step(mark_step_duration_ms,
1142                                   true  /* do_termination */,
1143                                   false /* is_serial*/);
1144 
1145         double end_vtime_sec = os::elapsedVTime();
1146         double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1147         _cm->clear_has_overflown();
1148 
1149         _cm->do_yield_check(worker_id);
1150 
1151         jlong sleep_time_ms;
1152         if (!_cm->has_aborted() && the_task->has_aborted()) {
1153           sleep_time_ms =
1154             (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1155           SuspendibleThreadSet::leave();

1156           os::sleep(Thread::current(), sleep_time_ms, false);
1157           SuspendibleThreadSet::join();
1158         }
1159       } while (!_cm->has_aborted() && the_task->has_aborted());
1160     }
1161     the_task->record_end_time();
1162     guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1163 
1164     SuspendibleThreadSet::leave();
1165 
1166     double end_vtime = os::elapsedVTime();
1167     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1168   }
1169 
1170   CMConcurrentMarkingTask(ConcurrentMark* cm,
1171                           ConcurrentMarkThread* cmt) :
1172       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1173 
1174   ~CMConcurrentMarkingTask() { }
1175 };
1176 
1177 // Calculates the number of active workers for a concurrent
1178 // phase.
1179 uint ConcurrentMark::calc_parallel_marking_threads() {
1180   uint n_conc_workers = 0;
1181   if (!UseDynamicNumberOfGCThreads ||
1182       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1183        !ForceDynamicNumberOfGCThreads)) {
1184     n_conc_workers = max_parallel_marking_threads();




 176       // will have them as guarantees at the beginning / end of the bitmap
 177       // clearing to get some checking in the product.
 178       assert(!_may_yield || _cm->cmThread()->during_cycle(), "invariant");
 179       assert(!_may_yield || !G1CollectedHeap::heap()->mark_in_progress(), "invariant");
 180     }
 181 
 182     return false;
 183   }
 184 };
 185 
 186 class ParClearNextMarkBitmapTask : public AbstractGangTask {
 187   ClearBitmapHRClosure* _cl;
 188   HeapRegionClaimer     _hrclaimer;
 189   bool                  _suspendible; // If the task is suspendible, workers must join the STS.
 190 
 191 public:
 192   ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
 193       _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
 194 
 195   void work(uint worker_id) {
 196     SuspendibleThreadSetJoiner sts_join(_suspendible);


 197     G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);



 198   }
 199 };
 200 
 201 void CMBitMap::clearAll() {
 202   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 203   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
 204   uint n_workers = g1h->workers()->active_workers();
 205   ParClearNextMarkBitmapTask task(&cl, n_workers, false);
 206   g1h->workers()->run_task(&task);
 207   guarantee(cl.complete(), "Must have completed iteration.");
 208   return;
 209 }
 210 
 211 void CMBitMap::markRange(MemRegion mr) {
 212   mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
 213   assert(!mr.is_empty(), "unexpected empty region");
 214   assert((offsetToHeapWord(heapWordToOffset(mr.end())) ==
 215           ((HeapWord *) mr.end())),
 216          "markRange memory region end is not card aligned");
 217   // convert address range into offset range


 984  * during the barrier sync and join it immediately afterwards. If we
 985  * do not do this, the following deadlock can occur: one thread could
 986  * be in the barrier sync code, waiting for the other thread to also
 987  * sync up, whereas another one could be trying to yield, while also
 988  * waiting for the other threads to sync up too.
 989  *
 990  * Note, however, that this code is also used during remark and in
 991  * this case we should not attempt to leave / enter the STS, otherwise
 992  * we'll either hit an assert (debug / fastdebug) or deadlock
 993  * (product). So we should only leave / enter the STS if we are
 994  * operating concurrently.
 995  *
 996  * Because the thread that does the sync barrier has left the STS, it
 997  * is possible to be suspended for a Full GC or an evacuation pause
 998  * could occur. This is actually safe, since the entering the sync
 999  * barrier is one of the last things do_marking_step() does, and it
1000  * doesn't manipulate any data structures afterwards.
1001  */
1002 
1003 void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
1004   bool barrier_aborted;
1005 
1006   if (verbose_low()) {
1007     gclog_or_tty->print_cr("[%u] entering first barrier", worker_id);
1008   }
1009 
1010   {
1011     SuspendibleThreadSetLeaver sts_leave(concurrent());
1012     barrier_aborted = !_first_overflow_barrier_sync.enter();
1013   }
1014 





1015   // at this point everyone should have synced up and not be doing any
1016   // more work
1017 
1018   if (verbose_low()) {
1019     if (barrier_aborted) {
1020       gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
1021     } else {
1022       gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
1023     }
1024   }
1025 
1026   if (barrier_aborted) {
1027     // If the barrier aborted we ignore the overflow condition and
1028     // just abort the whole marking phase as quickly as possible.
1029     return;
1030   }
1031 
1032   // If we're executing the concurrent phase of marking, reset the marking
1033   // state; otherwise the marking state is reset after reference processing,
1034   // during the remark pause.


1041       // task 0 is responsible for clearing the global data structures
1042       // We should be here because of an overflow. During STW we should
1043       // not clear the overflow flag since we rely on it being true when
1044       // we exit this method to abort the pause and restart concurrent
1045       // marking.
1046       reset_marking_state(true /* clear_overflow */);
1047       force_overflow()->update();
1048 
1049       if (G1Log::fine()) {
1050         gclog_or_tty->gclog_stamp(concurrent_gc_id());
1051         gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
1052       }
1053     }
1054   }
1055 
1056   // after this, each task should reset its own data structures then
1057   // then go into the second barrier
1058 }
1059 
1060 void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
1061   bool barrier_aborted;
1062 
1063   if (verbose_low()) {
1064     gclog_or_tty->print_cr("[%u] entering second barrier", worker_id);
1065   }
1066 
1067   {
1068     SuspendibleThreadSetLeaver sts_leave(concurrent());
1069     barrier_aborted = !_second_overflow_barrier_sync.enter();
1070   }
1071 





1072   // at this point everything should be re-initialized and ready to go
1073 
1074   if (verbose_low()) {
1075     if (barrier_aborted) {
1076       gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
1077     } else {
1078       gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
1079     }
1080   }
1081 }
1082 
1083 #ifndef PRODUCT
1084 void ForceOverflowSettings::init() {
1085   _num_remaining = G1ConcMarkForceOverflow;
1086   _force = false;
1087   update();
1088 }
1089 
1090 void ForceOverflowSettings::update() {
1091   if (_num_remaining > 0) {


1102     return true;
1103   } else {
1104     return false;
1105   }
1106 }
1107 #endif // !PRODUCT
1108 
1109 class CMConcurrentMarkingTask: public AbstractGangTask {
1110 private:
1111   ConcurrentMark*       _cm;
1112   ConcurrentMarkThread* _cmt;
1113 
1114 public:
1115   void work(uint worker_id) {
1116     assert(Thread::current()->is_ConcurrentGC_thread(),
1117            "this should only be done by a conc GC thread");
1118     ResourceMark rm;
1119 
1120     double start_vtime = os::elapsedVTime();
1121 
1122     {
1123       SuspendibleThreadSetJoiner sts_join;
1124 
1125       assert(worker_id < _cm->active_tasks(), "invariant");
1126       CMTask* the_task = _cm->task(worker_id);
1127       the_task->record_start_time();
1128       if (!_cm->has_aborted()) {
1129         do {
1130           double start_vtime_sec = os::elapsedVTime();
1131           double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1132 
1133           the_task->do_marking_step(mark_step_duration_ms,
1134                                     true  /* do_termination */,
1135                                     false /* is_serial*/);
1136 
1137           double end_vtime_sec = os::elapsedVTime();
1138           double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1139           _cm->clear_has_overflown();
1140 
1141           _cm->do_yield_check(worker_id);
1142 
1143           jlong sleep_time_ms;
1144           if (!_cm->has_aborted() && the_task->has_aborted()) {
1145             sleep_time_ms =
1146               (jlong) (elapsed_vtime_sec * _cm->sleep_factor() * 1000.0);
1147             {
1148               SuspendibleThreadSetLeaver sts_leave;
1149               os::sleep(Thread::current(), sleep_time_ms, false);
1150             }
1151           }
1152         } while (!_cm->has_aborted() && the_task->has_aborted());
1153       }
1154       the_task->record_end_time();
1155       guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
1156     }

1157 
1158     double end_vtime = os::elapsedVTime();
1159     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
1160   }
1161 
1162   CMConcurrentMarkingTask(ConcurrentMark* cm,
1163                           ConcurrentMarkThread* cmt) :
1164       AbstractGangTask("Concurrent Mark"), _cm(cm), _cmt(cmt) { }
1165 
1166   ~CMConcurrentMarkingTask() { }
1167 };
1168 
1169 // Calculates the number of active workers for a concurrent
1170 // phase.
1171 uint ConcurrentMark::calc_parallel_marking_threads() {
1172   uint n_conc_workers = 0;
1173   if (!UseDynamicNumberOfGCThreads ||
1174       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
1175        !ForceDynamicNumberOfGCThreads)) {
1176     n_conc_workers = max_parallel_marking_threads();


< prev index next >