1112 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1113 collector_state()->set_initiate_conc_mark_if_possible(true);
1114 }
1115 return result;
1116 }
1117
1118 ShouldNotReachHere();
1119 }
1120
1121 class PostMCRemSetClearClosure: public HeapRegionClosure {
1122 G1CollectedHeap* _g1h;
1123 ModRefBarrierSet* _mr_bs;
1124 public:
1125 PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1126 _g1h(g1h), _mr_bs(mr_bs) {}
1127
1128 bool doHeapRegion(HeapRegion* r) {
1129 HeapRegionRemSet* hrrs = r->rem_set();
1130
1131 _g1h->reset_gc_time_stamps(r);
1132 hrrs->clear();
1133 // You might think here that we could clear just the cards
1134 // corresponding to the used region. But no: if we leave a dirty card
1135 // in a region we might allocate into, then it would prevent that card
1136 // from being enqueued, and cause it to be missed.
1137 // Re: the performance cost: we shouldn't be doing full GC anyway!
1138 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1139
1140 return false;
1141 }
1142 };
1143
1144 void G1CollectedHeap::clear_rsets_post_compaction() {
1145 PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
1146 heap_region_iterate(&rs_clear);
1147 }
1148
1149 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1150 G1CollectedHeap* _g1h;
1151 UpdateRSOopClosure _cl;
5264 assert(free_list != NULL, "pre-condition");
5265
5266 if (G1VerifyBitmaps) {
5267 MemRegion mr(hr->bottom(), hr->end());
5268 concurrent_mark()->clearRangePrevBitmap(mr);
5269 }
5270
5271 // Clear the card counts for this region.
5272 // Note: we only need to do this if the region is not young
5273 // (since we don't refine cards in young regions).
5274 if (!hr->is_young()) {
5275 _cg1r->hot_card_cache()->reset_card_counts(hr);
5276 }
5277 hr->hr_clear(par, true /* clear_space */, locked /* locked */);
5278 free_list->add_ordered(hr);
5279 }
5280
5281 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5282 FreeRegionList* free_list,
5283 bool par) {
5284 assert(free_list != NULL, "pre-condition");
5285 hr->clear_humongous();
5286 free_region(hr, free_list, par);
5287 }
5288
5289 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
5290 const HeapRegionSetCount& humongous_regions_removed) {
5291 if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
5292 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5293 _old_set.bulk_remove(old_regions_removed);
5294 _humongous_set.bulk_remove(humongous_regions_removed);
5295 }
5296
5297 }
5298
5299 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
5300 assert(list != NULL, "list can't be null");
5301 if (!list->is_empty()) {
5302 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5303 _hrm.insert_list_into_free_list(list);
5703 //
5704 // It is not required to check whether the object has been found dead by marking
5705 // or not, in fact it would prevent reclamation within a concurrent cycle, as
5706 // all objects allocated during that time are considered live.
5707 // SATB marking is even more conservative than the remembered set.
5708 // So if at this point in the collection there is no remembered set entry,
5709 // nobody has a reference to it.
5710 // At the start of collection we flush all refinement logs, and remembered sets
5711 // are completely up-to-date wrt to references to the humongous object.
5712 //
5713 // Other implementation considerations:
5714 // - never consider object arrays at this time because they would pose
5715 // considerable effort for cleaning up the the remembered sets. This is
5716 // required because stale remembered sets might reference locations that
5717 // are currently allocated into.
5718 uint region_idx = r->hrm_index();
5719 if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5720 !r->rem_set()->is_empty()) {
5721
5722 if (G1TraceEagerReclaimHumongousObjects) {
5723 gclog_or_tty->print_cr("Live humongous region %u objectsize " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5724 region_idx,
5725 (size_t)obj->size() * HeapWordSize,
5726 p2i(r->bottom()),
5727 r->rem_set()->occupied(),
5728 r->rem_set()->strong_code_roots_list_length(),
5729 next_bitmap->isMarked(r->bottom()),
5730 g1h->is_humongous_reclaim_candidate(region_idx),
5731 obj->is_typeArray()
5732 );
5733 }
5734
5735 return false;
5736 }
5737
5738 guarantee(obj->is_typeArray(),
5739 "Only eagerly reclaiming type arrays is supported, but the object "
5740 PTR_FORMAT " is not.", p2i(r->bottom()));
5741
5742 if (G1TraceEagerReclaimHumongousObjects) {
5743 gclog_or_tty->print_cr("Dead humongous region %u objectsize " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5744 region_idx,
5745 (size_t)obj->size() * HeapWordSize,
5746 p2i(r->bottom()),
5747 r->rem_set()->occupied(),
5748 r->rem_set()->strong_code_roots_list_length(),
5749 next_bitmap->isMarked(r->bottom()),
5750 g1h->is_humongous_reclaim_candidate(region_idx),
5751 obj->is_typeArray()
5752 );
5753 }
5754 // Need to clear mark bit of the humongous object if already set.
5755 if (next_bitmap->isMarked(r->bottom())) {
5756 next_bitmap->clear(r->bottom());
5757 }
5758 do {
5759 HeapRegion* next = g1h->next_region_by_index(r);
5760 _freed_bytes += r->used();
5761 r->set_containing_set(NULL);
5762 _humongous_regions_removed.increment(1u, r->capacity());
5763 g1h->free_humongous_region(r, _free_region_list, false);
|
1112 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1113 collector_state()->set_initiate_conc_mark_if_possible(true);
1114 }
1115 return result;
1116 }
1117
1118 ShouldNotReachHere();
1119 }
1120
1121 class PostMCRemSetClearClosure: public HeapRegionClosure {
1122 G1CollectedHeap* _g1h;
1123 ModRefBarrierSet* _mr_bs;
1124 public:
1125 PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1126 _g1h(g1h), _mr_bs(mr_bs) {}
1127
1128 bool doHeapRegion(HeapRegion* r) {
1129 HeapRegionRemSet* hrrs = r->rem_set();
1130
1131 _g1h->reset_gc_time_stamps(r);
1132
1133 if (r->is_continues_humongous()) {
1134 // We'll assert that the strong code root list and RSet is empty
1135 assert(hrrs->strong_code_roots_list_length() == 0, "sanity");
1136 assert(hrrs->occupied() == 0, "RSet should be empty");
1137 return false;
1138 }
1139
1140 hrrs->clear();
1141 // You might think here that we could clear just the cards
1142 // corresponding to the used region. But no: if we leave a dirty card
1143 // in a region we might allocate into, then it would prevent that card
1144 // from being enqueued, and cause it to be missed.
1145 // Re: the performance cost: we shouldn't be doing full GC anyway!
1146 _mr_bs->clear(MemRegion(r->bottom(), r->end()));
1147
1148 return false;
1149 }
1150 };
1151
1152 void G1CollectedHeap::clear_rsets_post_compaction() {
1153 PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
1154 heap_region_iterate(&rs_clear);
1155 }
1156
1157 class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
1158 G1CollectedHeap* _g1h;
1159 UpdateRSOopClosure _cl;
5272 assert(free_list != NULL, "pre-condition");
5273
5274 if (G1VerifyBitmaps) {
5275 MemRegion mr(hr->bottom(), hr->end());
5276 concurrent_mark()->clearRangePrevBitmap(mr);
5277 }
5278
5279 // Clear the card counts for this region.
5280 // Note: we only need to do this if the region is not young
5281 // (since we don't refine cards in young regions).
5282 if (!hr->is_young()) {
5283 _cg1r->hot_card_cache()->reset_card_counts(hr);
5284 }
5285 hr->hr_clear(par, true /* clear_space */, locked /* locked */);
5286 free_list->add_ordered(hr);
5287 }
5288
5289 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5290 FreeRegionList* free_list,
5291 bool par) {
5292 assert(hr->is_humongous(), "this is only for humongous regions");
5293 assert(free_list != NULL, "pre-condition");
5294 hr->clear_humongous();
5295 free_region(hr, free_list, par);
5296 }
5297
5298 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
5299 const HeapRegionSetCount& humongous_regions_removed) {
5300 if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
5301 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5302 _old_set.bulk_remove(old_regions_removed);
5303 _humongous_set.bulk_remove(humongous_regions_removed);
5304 }
5305
5306 }
5307
5308 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
5309 assert(list != NULL, "list can't be null");
5310 if (!list->is_empty()) {
5311 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5312 _hrm.insert_list_into_free_list(list);
5712 //
5713 // It is not required to check whether the object has been found dead by marking
5714 // or not, in fact it would prevent reclamation within a concurrent cycle, as
5715 // all objects allocated during that time are considered live.
5716 // SATB marking is even more conservative than the remembered set.
5717 // So if at this point in the collection there is no remembered set entry,
5718 // nobody has a reference to it.
5719 // At the start of collection we flush all refinement logs, and remembered sets
5720 // are completely up-to-date wrt to references to the humongous object.
5721 //
5722 // Other implementation considerations:
5723 // - never consider object arrays at this time because they would pose
5724 // considerable effort for cleaning up the the remembered sets. This is
5725 // required because stale remembered sets might reference locations that
5726 // are currently allocated into.
5727 uint region_idx = r->hrm_index();
5728 if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5729 !r->rem_set()->is_empty()) {
5730
5731 if (G1TraceEagerReclaimHumongousObjects) {
5732 gclog_or_tty->print_cr("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5733 region_idx,
5734 (size_t)obj->size() * HeapWordSize,
5735 p2i(r->bottom()),
5736 r->rem_set()->occupied(),
5737 r->rem_set()->strong_code_roots_list_length(),
5738 next_bitmap->isMarked(r->bottom()),
5739 g1h->is_humongous_reclaim_candidate(region_idx),
5740 obj->is_typeArray()
5741 );
5742 }
5743
5744 return false;
5745 }
5746
5747 guarantee(obj->is_typeArray(),
5748 "Only eagerly reclaiming type arrays is supported, but the object "
5749 PTR_FORMAT " is not.", p2i(r->bottom()));
5750
5751 if (G1TraceEagerReclaimHumongousObjects) {
5752 gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5753 region_idx,
5754 (size_t)obj->size() * HeapWordSize,
5755 p2i(r->bottom()),
5756 r->rem_set()->occupied(),
5757 r->rem_set()->strong_code_roots_list_length(),
5758 next_bitmap->isMarked(r->bottom()),
5759 g1h->is_humongous_reclaim_candidate(region_idx),
5760 obj->is_typeArray()
5761 );
5762 }
5763 // Need to clear mark bit of the humongous object if already set.
5764 if (next_bitmap->isMarked(r->bottom())) {
5765 next_bitmap->clear(r->bottom());
5766 }
5767 do {
5768 HeapRegion* next = g1h->next_region_by_index(r);
5769 _freed_bytes += r->used();
5770 r->set_containing_set(NULL);
5771 _humongous_regions_removed.increment(1u, r->capacity());
5772 g1h->free_humongous_region(r, _free_region_list, false);
|