629 assert(!concurrent_marking_in_progress(), "invariant");
630 assert(out_of_regions(),
631 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
632 p2i(_finger), p2i(_heap_end));
633 }
634 }
635
636 void G1ConcurrentMark::set_non_marking_state() {
637 // We set the global marking state to some default values when we're
638 // not doing marking.
639 reset_marking_state();
640 _active_tasks = 0;
641 clear_concurrent_marking_in_progress();
642 }
643
644 G1ConcurrentMark::~G1ConcurrentMark() {
645 // The G1ConcurrentMark instance is never freed.
646 ShouldNotReachHere();
647 }
648
649 class G1ClearBitMapTask : public AbstractGangTask {
650 public:
651 static size_t chunk_size() { return M; }
652
653 private:
654 // Heap region closure used for clearing the given mark bitmap.
655 class G1ClearBitmapHRClosure : public HeapRegionClosure {
656 private:
657 G1CMBitMap* _bitmap;
658 G1ConcurrentMark* _cm;
659 public:
660 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
661 }
662
663 virtual bool doHeapRegion(HeapRegion* r) {
664 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
665
666 HeapWord* cur = r->bottom();
667 HeapWord* const end = r->end();
668
669 while (cur < end) {
673 cur += chunk_size_in_words;
674
675 // Abort iteration if after yielding the marking has been aborted.
676 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
677 return true;
678 }
679 // Repeat the asserts from before the start of the closure. We will do them
680 // as asserts here to minimize their overhead on the product. However, we
681 // will have them as guarantees at the beginning / end of the bitmap
682 // clearing to get some checking in the product.
683 assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant");
684 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
685 }
686 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
687
688 return false;
689 }
690 };
691
692 G1ClearBitmapHRClosure _cl;
693 HeapRegionClaimer _hr_claimer;
694 bool _suspendible; // If the task is suspendible, workers must join the STS.
695
696 public:
697 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
698 AbstractGangTask("G1 Clear Bitmap"),
699 _cl(bitmap, suspendible ? cm : NULL),
700 _hr_claimer(n_workers),
701 _suspendible(suspendible)
702 { }
703
704 void work(uint worker_id) {
705 SuspendibleThreadSetJoiner sts_join(_suspendible);
706 G1CollectedHeap::heap()->heap_region_par_iterate(&_cl, worker_id, &_hr_claimer, true);
707 }
708
709 bool is_complete() {
710 return _cl.complete();
711 }
712 };
713
714 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
715 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
716
717 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
718 size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
719
720 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
721
722 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
723
724 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
725 workers->run_task(&cl, num_workers);
726 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
1158 set_non_marking_state();
1159 }
1160
1161 // Expand the marking stack, if we have to and if we can.
1162 if (_global_mark_stack.should_expand()) {
1163 _global_mark_stack.expand();
1164 }
1165
1166 // Statistics
1167 double now = os::elapsedTime();
1168 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1169 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1170 _remark_times.add((now - start) * 1000.0);
1171
1172 g1p->record_concurrent_mark_remark_end();
1173
1174 G1CMIsAliveClosure is_alive(g1h);
1175 _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1176 }
1177
1178 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1179 G1CollectedHeap* _g1;
1180 size_t _freed_bytes;
1181 FreeRegionList* _local_cleanup_list;
1182 uint _old_regions_removed;
1183 uint _humongous_regions_removed;
1184 HRRSCleanupTask* _hrrs_cleanup_task;
1185
1186 public:
1187 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1188 FreeRegionList* local_cleanup_list,
1189 HRRSCleanupTask* hrrs_cleanup_task) :
1190 _g1(g1),
1191 _freed_bytes(0),
1192 _local_cleanup_list(local_cleanup_list),
1193 _old_regions_removed(0),
1194 _humongous_regions_removed(0),
1195 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1196
1197 size_t freed_bytes() { return _freed_bytes; }
1198 const uint old_regions_removed() { return _old_regions_removed; }
1199 const uint humongous_regions_removed() { return _humongous_regions_removed; }
1200
1201 bool doHeapRegion(HeapRegion *hr) {
1202 if (hr->is_archive()) {
1203 return false;
1204 }
1205 _g1->reset_gc_time_stamps(hr);
1206 hr->note_end_of_marking();
1207
1208 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1209 _freed_bytes += hr->used();
1210 hr->set_containing_set(NULL);
1211 if (hr->is_humongous()) {
1212 _humongous_regions_removed++;
1213 _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */);
1214 } else {
1215 _old_regions_removed++;
1216 _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */);
1217 }
1218 } else {
1219 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1220 }
1221
1222 return false;
1223 }
1224 };
1225
1226 class G1ParNoteEndTask: public AbstractGangTask {
1227 friend class G1NoteEndOfConcMarkClosure;
1228
1229 protected:
1230 G1CollectedHeap* _g1h;
1231 FreeRegionList* _cleanup_list;
1232 HeapRegionClaimer _hrclaimer;
1233
1234 public:
1235 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1236 AbstractGangTask("G1 note end"), _g1h(g1h), _cleanup_list(cleanup_list), _hrclaimer(n_workers) {
1237 }
1238
1239 void work(uint worker_id) {
1240 FreeRegionList local_cleanup_list("Local Cleanup List");
1241 HRRSCleanupTask hrrs_cleanup_task;
1242 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1243 &hrrs_cleanup_task);
1244 _g1h->heap_region_par_iterate(&g1_note_end, worker_id, &_hrclaimer);
1245 assert(g1_note_end.complete(), "Shouldn't have yielded!");
1246
1247 // Now update the lists
1248 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1249 {
1250 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1251 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1252
1253 // If we iterate over the global cleanup list at the end of
1254 // cleanup to do this printing we will not guarantee to only
1255 // generate output for the newly-reclaimed regions (the list
1256 // might not be empty at the beginning of cleanup; we might
1257 // still be working on its previous contents). So we do the
1258 // printing here, before we append the new regions to the global
1259 // cleanup list.
1260
1261 G1HRPrinter* hr_printer = _g1h->hr_printer();
1262 if (hr_printer->is_active()) {
1263 FreeRegionListIterator iter(&local_cleanup_list);
1264 while (iter.more_available()) {
|
629 assert(!concurrent_marking_in_progress(), "invariant");
630 assert(out_of_regions(),
631 "only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
632 p2i(_finger), p2i(_heap_end));
633 }
634 }
635
636 void G1ConcurrentMark::set_non_marking_state() {
637 // We set the global marking state to some default values when we're
638 // not doing marking.
639 reset_marking_state();
640 _active_tasks = 0;
641 clear_concurrent_marking_in_progress();
642 }
643
644 G1ConcurrentMark::~G1ConcurrentMark() {
645 // The G1ConcurrentMark instance is never freed.
646 ShouldNotReachHere();
647 }
648
649 class G1ClearBitMapTask : public G1ParallelizeByRegionsTask {
650 public:
651 static size_t chunk_size() { return M; }
652
653 private:
654 // Heap region closure used for clearing the given mark bitmap.
655 class G1ClearBitmapHRClosure : public HeapRegionClosure {
656 private:
657 G1CMBitMap* _bitmap;
658 G1ConcurrentMark* _cm;
659 public:
660 G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) {
661 }
662
663 virtual bool doHeapRegion(HeapRegion* r) {
664 size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize;
665
666 HeapWord* cur = r->bottom();
667 HeapWord* const end = r->end();
668
669 while (cur < end) {
673 cur += chunk_size_in_words;
674
675 // Abort iteration if after yielding the marking has been aborted.
676 if (_cm != NULL && _cm->do_yield_check() && _cm->has_aborted()) {
677 return true;
678 }
679 // Repeat the asserts from before the start of the closure. We will do them
680 // as asserts here to minimize their overhead on the product. However, we
681 // will have them as guarantees at the beginning / end of the bitmap
682 // clearing to get some checking in the product.
683 assert(_cm == NULL || _cm->cmThread()->during_cycle(), "invariant");
684 assert(_cm == NULL || !G1CollectedHeap::heap()->collector_state()->mark_in_progress(), "invariant");
685 }
686 assert(cur == end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
687
688 return false;
689 }
690 };
691
692 G1ClearBitmapHRClosure _cl;
693 bool _suspendible; // If the task is suspendible, workers must join the STS.
694
695 public:
696 G1ClearBitMapTask(G1CMBitMap* bitmap, G1ConcurrentMark* cm, uint n_workers, bool suspendible) :
697 G1ParallelizeByRegionsTask("G1 Clear Bitmap", n_workers),
698 _cl(bitmap, suspendible ? cm : NULL),
699 _suspendible(suspendible)
700 { }
701
702 void work(uint worker_id) {
703 SuspendibleThreadSetJoiner sts_join(_suspendible);
704 all_heap_regions_work(&_cl, worker_id, true);
705 }
706
707 bool is_complete() {
708 return _cl.complete();
709 }
710 };
711
712 void G1ConcurrentMark::clear_bitmap(G1CMBitMap* bitmap, WorkGang* workers, bool may_yield) {
713 assert(may_yield || SafepointSynchronize::is_at_safepoint(), "Non-yielding bitmap clear only allowed at safepoint.");
714
715 size_t const num_bytes_to_clear = (HeapRegion::GrainBytes * _g1h->num_regions()) / G1CMBitMap::heap_map_factor();
716 size_t const num_chunks = align_size_up(num_bytes_to_clear, G1ClearBitMapTask::chunk_size()) / G1ClearBitMapTask::chunk_size();
717
718 uint const num_workers = (uint)MIN2(num_chunks, (size_t)workers->active_workers());
719
720 G1ClearBitMapTask cl(bitmap, this, num_workers, may_yield);
721
722 log_debug(gc, ergo)("Running %s with %u workers for " SIZE_FORMAT " work units.", cl.name(), num_workers, num_chunks);
723 workers->run_task(&cl, num_workers);
724 guarantee(!may_yield || cl.is_complete(), "Must have completed iteration when not yielding.");
1156 set_non_marking_state();
1157 }
1158
1159 // Expand the marking stack, if we have to and if we can.
1160 if (_global_mark_stack.should_expand()) {
1161 _global_mark_stack.expand();
1162 }
1163
1164 // Statistics
1165 double now = os::elapsedTime();
1166 _remark_mark_times.add((mark_work_end - start) * 1000.0);
1167 _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
1168 _remark_times.add((now - start) * 1000.0);
1169
1170 g1p->record_concurrent_mark_remark_end();
1171
1172 G1CMIsAliveClosure is_alive(g1h);
1173 _gc_tracer_cm->report_object_count_after_gc(&is_alive);
1174 }
1175
1176 class G1ParNoteEndTask: public G1ParallelizeByRegionsTask {
1177 class G1NoteEndOfConcMarkClosure : public HeapRegionClosure {
1178 G1CollectedHeap* _g1;
1179 size_t _freed_bytes;
1180 FreeRegionList* _local_cleanup_list;
1181 uint _old_regions_removed;
1182 uint _humongous_regions_removed;
1183 HRRSCleanupTask* _hrrs_cleanup_task;
1184
1185 public:
1186 G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
1187 FreeRegionList* local_cleanup_list,
1188 HRRSCleanupTask* hrrs_cleanup_task) :
1189 _g1(g1),
1190 _freed_bytes(0),
1191 _local_cleanup_list(local_cleanup_list),
1192 _old_regions_removed(0),
1193 _humongous_regions_removed(0),
1194 _hrrs_cleanup_task(hrrs_cleanup_task) { }
1195
1196 size_t freed_bytes() { return _freed_bytes; }
1197 const uint old_regions_removed() { return _old_regions_removed; }
1198 const uint humongous_regions_removed() { return _humongous_regions_removed; }
1199
1200 bool doHeapRegion(HeapRegion *hr) {
1201 if (hr->is_archive()) {
1202 return false;
1203 }
1204 _g1->reset_gc_time_stamps(hr);
1205 hr->note_end_of_marking();
1206
1207 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
1208 _freed_bytes += hr->used();
1209 hr->set_containing_set(NULL);
1210 if (hr->is_humongous()) {
1211 _humongous_regions_removed++;
1212 _g1->free_humongous_region(hr, _local_cleanup_list, true /* skip_remset */);
1213 } else {
1214 _old_regions_removed++;
1215 _g1->free_region(hr, _local_cleanup_list, true /* skip_remset */);
1216 }
1217 } else {
1218 hr->rem_set()->do_cleanup_work(_hrrs_cleanup_task);
1219 }
1220
1221 return false;
1222 }
1223 };
1224
1225 protected:
1226 G1CollectedHeap* _g1h;
1227 FreeRegionList* _cleanup_list;
1228
1229 public:
1230 G1ParNoteEndTask(G1CollectedHeap* g1h, FreeRegionList* cleanup_list, uint n_workers) :
1231 G1ParallelizeByRegionsTask("G1 note end", n_workers), _g1h(g1h), _cleanup_list(cleanup_list) {
1232 }
1233
1234 void work(uint worker_id) {
1235 FreeRegionList local_cleanup_list("Local Cleanup List");
1236 HRRSCleanupTask hrrs_cleanup_task;
1237 G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list,
1238 &hrrs_cleanup_task);
1239 all_heap_regions_work(&g1_note_end, worker_id );
1240 assert(g1_note_end.complete(), "Shouldn't have yielded!");
1241
1242 // Now update the lists
1243 _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed());
1244 {
1245 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
1246 _g1h->decrement_summary_bytes(g1_note_end.freed_bytes());
1247
1248 // If we iterate over the global cleanup list at the end of
1249 // cleanup to do this printing we will not guarantee to only
1250 // generate output for the newly-reclaimed regions (the list
1251 // might not be empty at the beginning of cleanup; we might
1252 // still be working on its previous contents). So we do the
1253 // printing here, before we append the new regions to the global
1254 // cleanup list.
1255
1256 G1HRPrinter* hr_printer = _g1h->hr_printer();
1257 if (hr_printer->is_active()) {
1258 FreeRegionListIterator iter(&local_cleanup_list);
1259 while (iter.more_available()) {
|