5289 5290 // Clear the card counts for this region. 5291 // Note: we only need to do this if the region is not young 5292 // (since we don't refine cards in young regions). 5293 if (!hr->is_young()) { 5294 _cg1r->hot_card_cache()->reset_card_counts(hr); 5295 } 5296 hr->hr_clear(par, true /* clear_space */, locked /* locked */); 5297 free_list->add_ordered(hr); 5298 } 5299 5300 void G1CollectedHeap::free_humongous_region(HeapRegion* hr, 5301 FreeRegionList* free_list, 5302 bool par) { 5303 assert(hr->is_humongous(), "this is only for humongous regions"); 5304 assert(free_list != NULL, "pre-condition"); 5305 hr->clear_humongous(); 5306 free_region(hr, free_list, par); 5307 } 5308 5309 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, 5310 const HeapRegionSetCount& humongous_regions_removed) { 5311 if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) { 5312 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); 5313 _old_set.bulk_remove(old_regions_removed); 5314 _humongous_set.bulk_remove(humongous_regions_removed); 5315 } 5316 5317 } 5318 5319 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) { 5320 assert(list != NULL, "list can't be null"); 5321 if (!list->is_empty()) { 5322 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 5323 _hrm.insert_list_into_free_list(list); 5324 } 5325 } 5326 5327 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) { 5328 decrease_used(bytes); 5329 } 5330 5331 class G1ParCleanupCTTask : public AbstractGangTask { 5680 5681 double end_sec = os::elapsedTime(); 5682 double elapsed_ms = (end_sec - start_sec) * 1000.0; 5683 5684 if (non_young) { 5685 non_young_time_ms += elapsed_ms; 5686 } else { 5687 young_time_ms += elapsed_ms; 5688 } 5689 5690 prepend_to_freelist(&local_free_list); 5691 decrement_summary_bytes(pre_used); 5692 policy->phase_times()->record_young_free_cset_time_ms(young_time_ms); 5693 policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms); 5694 } 5695 5696 class G1FreeHumongousRegionClosure : public HeapRegionClosure { 5697 private: 5698 FreeRegionList* _free_region_list; 5699 HeapRegionSet* _proxy_set; 5700 HeapRegionSetCount _humongous_regions_removed; 5701 size_t _freed_bytes; 5702 public: 5703 5704 G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) : 5705 _free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) { 5706 } 5707 5708 virtual bool doHeapRegion(HeapRegion* r) { 5709 if (!r->is_starts_humongous()) { 5710 return false; 5711 } 5712 5713 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 5714 5715 oop obj = (oop)r->bottom(); 5716 CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap(); 5717 5718 // The following checks whether the humongous object is live are sufficient. 5719 // The main additional check (in addition to having a reference from the roots 5720 // or the young gen) is whether the humongous object has a remembered set entry. 5721 // 5722 // A humongous object cannot be live if there is no remembered set for it 5723 // because: 5724 // - there can be no references from within humongous starts regions referencing 5725 // the object because we never allocate other objects into them. 5770 if (G1TraceEagerReclaimHumongousObjects) { 5771 gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", 5772 region_idx, 5773 (size_t)obj->size() * HeapWordSize, 5774 p2i(r->bottom()), 5775 r->rem_set()->occupied(), 5776 r->rem_set()->strong_code_roots_list_length(), 5777 next_bitmap->isMarked(r->bottom()), 5778 g1h->is_humongous_reclaim_candidate(region_idx), 5779 obj->is_typeArray() 5780 ); 5781 } 5782 // Need to clear mark bit of the humongous object if already set. 5783 if (next_bitmap->isMarked(r->bottom())) { 5784 next_bitmap->clear(r->bottom()); 5785 } 5786 do { 5787 HeapRegion* next = g1h->next_region_in_humongous(r); 5788 _freed_bytes += r->used(); 5789 r->set_containing_set(NULL); 5790 _humongous_regions_removed.increment(1u, r->capacity()); 5791 g1h->free_humongous_region(r, _free_region_list, false); 5792 r = next; 5793 } while (r != NULL); 5794 5795 return false; 5796 } 5797 5798 HeapRegionSetCount& humongous_free_count() { 5799 return _humongous_regions_removed; 5800 } 5801 5802 size_t bytes_freed() const { 5803 return _freed_bytes; 5804 } 5805 5806 size_t humongous_reclaimed() const { 5807 return _humongous_regions_removed.length(); 5808 } 5809 }; 5810 5811 void G1CollectedHeap::eagerly_reclaim_humongous_regions() { 5812 assert_at_safepoint(true); 5813 5814 if (!G1EagerReclaimHumongousObjects || 5815 (!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) { 5816 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0); 5817 return; 5818 } 5819 5820 double start_time = os::elapsedTime(); 5821 5822 FreeRegionList local_cleanup_list("Local Humongous Cleanup List"); 5823 5824 G1FreeHumongousRegionClosure cl(&local_cleanup_list); 5825 heap_region_iterate(&cl); 5826 5827 HeapRegionSetCount empty_set; 5828 remove_from_old_sets(empty_set, cl.humongous_free_count()); 5829 5830 G1HRPrinter* hrp = hr_printer(); 5831 if (hrp->is_active()) { 5832 FreeRegionListIterator iter(&local_cleanup_list); 5833 while (iter.more_available()) { 5834 HeapRegion* hr = iter.get_next(); 5835 hrp->cleanup(hr); 5836 } 5837 } 5838 5839 prepend_to_freelist(&local_cleanup_list); 5840 decrement_summary_bytes(cl.bytes_freed()); 5841 5842 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0, 5843 cl.humongous_reclaimed()); 5844 } 5845 5846 // This routine is similar to the above but does not record 5847 // any policy statistics or update free lists; we are abandoning 5848 // the current incremental collection set in preparation of a 5849 // full collection. After the full GC we will start to build up 5850 // the incremental collection set again. 5851 // This is only called when we're doing a full collection 5852 // and is immediately followed by the tearing down of the young list. 5853 5854 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { 5855 HeapRegion* cur = cs_head; 5856 5857 while (cur != NULL) { 5858 HeapRegion* next = cur->next_in_collection_set(); 5859 assert(cur->in_collection_set(), "bad CS"); 5860 cur->set_next_in_collection_set(NULL); 5861 clear_in_cset(cur); 5862 cur->set_young_index_in_cset(-1); 5863 cur = next; 6184 "attempt heap expansion", 6185 ergo_format_reason("requested address range outside heap bounds") 6186 ergo_format_byte("region size"), 6187 HeapRegion::GrainWords * HeapWordSize); 6188 } 6189 _hrm.allocate_free_regions_starting_at(index, 1); 6190 return region_at(index); 6191 } 6192 return NULL; 6193 } 6194 6195 // Heap region set verification 6196 6197 class VerifyRegionListsClosure : public HeapRegionClosure { 6198 private: 6199 HeapRegionSet* _old_set; 6200 HeapRegionSet* _humongous_set; 6201 HeapRegionManager* _hrm; 6202 6203 public: 6204 HeapRegionSetCount _old_count; 6205 HeapRegionSetCount _humongous_count; 6206 HeapRegionSetCount _free_count; 6207 6208 VerifyRegionListsClosure(HeapRegionSet* old_set, 6209 HeapRegionSet* humongous_set, 6210 HeapRegionManager* hrm) : 6211 _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm), 6212 _old_count(), _humongous_count(), _free_count(){ } 6213 6214 bool doHeapRegion(HeapRegion* hr) { 6215 if (hr->is_young()) { 6216 // TODO 6217 } else if (hr->is_humongous()) { 6218 assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index()); 6219 _humongous_count.increment(1u, hr->capacity()); 6220 } else if (hr->is_empty()) { 6221 assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index()); 6222 _free_count.increment(1u, hr->capacity()); 6223 } else if (hr->is_old()) { 6224 assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index()); 6225 _old_count.increment(1u, hr->capacity()); 6226 } else { 6227 // There are no other valid region types. Check for one invalid 6228 // one we can identify: pinned without old or humongous set. 6229 assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index()); 6230 ShouldNotReachHere(); 6231 } 6232 return false; 6233 } 6234 6235 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) { 6236 guarantee(old_set->length() == _old_count.length(), "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()); 6237 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), "Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, 6238 old_set->total_capacity_bytes(), _old_count.capacity()); 6239 6240 guarantee(humongous_set->length() == _humongous_count.length(), "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()); 6241 guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), "Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, 6242 humongous_set->total_capacity_bytes(), _humongous_count.capacity()); 6243 6244 guarantee(free_list->num_free_regions() == _free_count.length(), "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()); 6245 guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), "Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, 6246 free_list->total_capacity_bytes(), _free_count.capacity()); 6247 } 6248 }; 6249 6250 void G1CollectedHeap::verify_region_sets() { 6251 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 6252 6253 // First, check the explicit lists. 6254 _hrm.verify(); 6255 { 6256 // Given that a concurrent operation might be adding regions to 6257 // the secondary free list we have to take the lock before 6258 // verifying it. 6259 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 6260 _secondary_free_list.verify_list(); 6261 } 6262 6263 // If a concurrent region freeing operation is in progress it will 6264 // be difficult to correctly attributed any free regions we come 6265 // across to the correct free list given that they might belong to 6266 // one of several (free_list, secondary_free_list, any local lists, | 5289 5290 // Clear the card counts for this region. 5291 // Note: we only need to do this if the region is not young 5292 // (since we don't refine cards in young regions). 5293 if (!hr->is_young()) { 5294 _cg1r->hot_card_cache()->reset_card_counts(hr); 5295 } 5296 hr->hr_clear(par, true /* clear_space */, locked /* locked */); 5297 free_list->add_ordered(hr); 5298 } 5299 5300 void G1CollectedHeap::free_humongous_region(HeapRegion* hr, 5301 FreeRegionList* free_list, 5302 bool par) { 5303 assert(hr->is_humongous(), "this is only for humongous regions"); 5304 assert(free_list != NULL, "pre-condition"); 5305 hr->clear_humongous(); 5306 free_region(hr, free_list, par); 5307 } 5308 5309 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed, 5310 const uint humongous_regions_removed) { 5311 if (old_regions_removed > 0 || humongous_regions_removed > 0) { 5312 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); 5313 _old_set.bulk_remove(old_regions_removed); 5314 _humongous_set.bulk_remove(humongous_regions_removed); 5315 } 5316 5317 } 5318 5319 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) { 5320 assert(list != NULL, "list can't be null"); 5321 if (!list->is_empty()) { 5322 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 5323 _hrm.insert_list_into_free_list(list); 5324 } 5325 } 5326 5327 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) { 5328 decrease_used(bytes); 5329 } 5330 5331 class G1ParCleanupCTTask : public AbstractGangTask { 5680 5681 double end_sec = os::elapsedTime(); 5682 double elapsed_ms = (end_sec - start_sec) * 1000.0; 5683 5684 if (non_young) { 5685 non_young_time_ms += elapsed_ms; 5686 } else { 5687 young_time_ms += elapsed_ms; 5688 } 5689 5690 prepend_to_freelist(&local_free_list); 5691 decrement_summary_bytes(pre_used); 5692 policy->phase_times()->record_young_free_cset_time_ms(young_time_ms); 5693 policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms); 5694 } 5695 5696 class G1FreeHumongousRegionClosure : public HeapRegionClosure { 5697 private: 5698 FreeRegionList* _free_region_list; 5699 HeapRegionSet* _proxy_set; 5700 uint _humongous_regions_removed; 5701 size_t _freed_bytes; 5702 public: 5703 5704 G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) : 5705 _free_region_list(free_region_list), _humongous_regions_removed(0), _freed_bytes(0) { 5706 } 5707 5708 virtual bool doHeapRegion(HeapRegion* r) { 5709 if (!r->is_starts_humongous()) { 5710 return false; 5711 } 5712 5713 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 5714 5715 oop obj = (oop)r->bottom(); 5716 CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap(); 5717 5718 // The following checks whether the humongous object is live are sufficient. 5719 // The main additional check (in addition to having a reference from the roots 5720 // or the young gen) is whether the humongous object has a remembered set entry. 5721 // 5722 // A humongous object cannot be live if there is no remembered set for it 5723 // because: 5724 // - there can be no references from within humongous starts regions referencing 5725 // the object because we never allocate other objects into them. 5770 if (G1TraceEagerReclaimHumongousObjects) { 5771 gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", 5772 region_idx, 5773 (size_t)obj->size() * HeapWordSize, 5774 p2i(r->bottom()), 5775 r->rem_set()->occupied(), 5776 r->rem_set()->strong_code_roots_list_length(), 5777 next_bitmap->isMarked(r->bottom()), 5778 g1h->is_humongous_reclaim_candidate(region_idx), 5779 obj->is_typeArray() 5780 ); 5781 } 5782 // Need to clear mark bit of the humongous object if already set. 5783 if (next_bitmap->isMarked(r->bottom())) { 5784 next_bitmap->clear(r->bottom()); 5785 } 5786 do { 5787 HeapRegion* next = g1h->next_region_in_humongous(r); 5788 _freed_bytes += r->used(); 5789 r->set_containing_set(NULL); 5790 _humongous_regions_removed++; 5791 g1h->free_humongous_region(r, _free_region_list, false); 5792 r = next; 5793 } while (r != NULL); 5794 5795 return false; 5796 } 5797 5798 uint humongous_free_count() { 5799 return _humongous_regions_removed; 5800 } 5801 5802 size_t bytes_freed() const { 5803 return _freed_bytes; 5804 } 5805 }; 5806 5807 void G1CollectedHeap::eagerly_reclaim_humongous_regions() { 5808 assert_at_safepoint(true); 5809 5810 if (!G1EagerReclaimHumongousObjects || 5811 (!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) { 5812 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0); 5813 return; 5814 } 5815 5816 double start_time = os::elapsedTime(); 5817 5818 FreeRegionList local_cleanup_list("Local Humongous Cleanup List"); 5819 5820 G1FreeHumongousRegionClosure cl(&local_cleanup_list); 5821 heap_region_iterate(&cl); 5822 5823 remove_from_old_sets(0, cl.humongous_free_count()); 5824 5825 G1HRPrinter* hrp = hr_printer(); 5826 if (hrp->is_active()) { 5827 FreeRegionListIterator iter(&local_cleanup_list); 5828 while (iter.more_available()) { 5829 HeapRegion* hr = iter.get_next(); 5830 hrp->cleanup(hr); 5831 } 5832 } 5833 5834 prepend_to_freelist(&local_cleanup_list); 5835 decrement_summary_bytes(cl.bytes_freed()); 5836 5837 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0, 5838 cl.humongous_free_count()); 5839 } 5840 5841 // This routine is similar to the above but does not record 5842 // any policy statistics or update free lists; we are abandoning 5843 // the current incremental collection set in preparation of a 5844 // full collection. After the full GC we will start to build up 5845 // the incremental collection set again. 5846 // This is only called when we're doing a full collection 5847 // and is immediately followed by the tearing down of the young list. 5848 5849 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) { 5850 HeapRegion* cur = cs_head; 5851 5852 while (cur != NULL) { 5853 HeapRegion* next = cur->next_in_collection_set(); 5854 assert(cur->in_collection_set(), "bad CS"); 5855 cur->set_next_in_collection_set(NULL); 5856 clear_in_cset(cur); 5857 cur->set_young_index_in_cset(-1); 5858 cur = next; 6179 "attempt heap expansion", 6180 ergo_format_reason("requested address range outside heap bounds") 6181 ergo_format_byte("region size"), 6182 HeapRegion::GrainWords * HeapWordSize); 6183 } 6184 _hrm.allocate_free_regions_starting_at(index, 1); 6185 return region_at(index); 6186 } 6187 return NULL; 6188 } 6189 6190 // Heap region set verification 6191 6192 class VerifyRegionListsClosure : public HeapRegionClosure { 6193 private: 6194 HeapRegionSet* _old_set; 6195 HeapRegionSet* _humongous_set; 6196 HeapRegionManager* _hrm; 6197 6198 public: 6199 uint _old_count; 6200 uint _humongous_count; 6201 uint _free_count; 6202 6203 VerifyRegionListsClosure(HeapRegionSet* old_set, 6204 HeapRegionSet* humongous_set, 6205 HeapRegionManager* hrm) : 6206 _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm), 6207 _old_count(), _humongous_count(), _free_count(){ } 6208 6209 bool doHeapRegion(HeapRegion* hr) { 6210 if (hr->is_young()) { 6211 // TODO 6212 } else if (hr->is_humongous()) { 6213 assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index()); 6214 _humongous_count++; 6215 } else if (hr->is_empty()) { 6216 assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index()); 6217 _free_count++; 6218 } else if (hr->is_old()) { 6219 assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index()); 6220 _old_count++; 6221 } else { 6222 // There are no other valid region types. Check for one invalid 6223 // one we can identify: pinned without old or humongous set. 6224 assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index()); 6225 ShouldNotReachHere(); 6226 } 6227 return false; 6228 } 6229 6230 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) { 6231 guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count); 6232 guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count); 6233 guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count); 6234 } 6235 }; 6236 6237 void G1CollectedHeap::verify_region_sets() { 6238 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 6239 6240 // First, check the explicit lists. 6241 _hrm.verify(); 6242 { 6243 // Given that a concurrent operation might be adding regions to 6244 // the secondary free list we have to take the lock before 6245 // verifying it. 6246 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 6247 _secondary_free_list.verify_list(); 6248 } 6249 6250 // If a concurrent region freeing operation is in progress it will 6251 // be difficult to correctly attributed any free regions we come 6252 // across to the correct free list given that they might belong to 6253 // one of several (free_list, secondary_free_list, any local lists, |