< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




5178 
5179   // Clear the card counts for this region.
5180   // Note: we only need to do this if the region is not young
5181   // (since we don't refine cards in young regions).
5182   if (!hr->is_young()) {
5183     _cg1r->hot_card_cache()->reset_card_counts(hr);
5184   }
5185   hr->hr_clear(par, true /* clear_space */, locked /* locked */);
5186   free_list->add_ordered(hr);
5187 }
5188 
5189 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5190                                             FreeRegionList* free_list,
5191                                             bool par) {
5192   assert(hr->is_humongous(), "this is only for humongous regions");
5193   assert(free_list != NULL, "pre-condition");
5194   hr->clear_humongous();
5195   free_region(hr, free_list, par);
5196 }
5197 
5198 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
5199                                            const HeapRegionSetCount& humongous_regions_removed) {
5200   if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
5201     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5202     _old_set.bulk_remove(old_regions_removed);
5203     _humongous_set.bulk_remove(humongous_regions_removed);
5204   }
5205 
5206 }
5207 
5208 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
5209   assert(list != NULL, "list can't be null");
5210   if (!list->is_empty()) {
5211     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5212     _hrm.insert_list_into_free_list(list);
5213   }
5214 }
5215 
5216 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
5217   decrease_used(bytes);
5218 }
5219 
5220 class G1ParCleanupCTTask : public AbstractGangTask {


5565 
5566   double end_sec = os::elapsedTime();
5567   double elapsed_ms = (end_sec - start_sec) * 1000.0;
5568 
5569   if (non_young) {
5570     non_young_time_ms += elapsed_ms;
5571   } else {
5572     young_time_ms += elapsed_ms;
5573   }
5574 
5575   prepend_to_freelist(&local_free_list);
5576   decrement_summary_bytes(pre_used);
5577   policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
5578   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
5579 }
5580 
5581 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
5582  private:
5583   FreeRegionList* _free_region_list;
5584   HeapRegionSet* _proxy_set;
5585   HeapRegionSetCount _humongous_regions_removed;
5586   size_t _freed_bytes;
5587  public:
5588 
5589   G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
5590     _free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) {
5591   }
5592 
5593   virtual bool doHeapRegion(HeapRegion* r) {
5594     if (!r->is_starts_humongous()) {
5595       return false;
5596     }
5597 
5598     G1CollectedHeap* g1h = G1CollectedHeap::heap();
5599 
5600     oop obj = (oop)r->bottom();
5601     CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();
5602 
5603     // The following checks whether the humongous object is live are sufficient.
5604     // The main additional check (in addition to having a reference from the roots
5605     // or the young gen) is whether the humongous object has a remembered set entry.
5606     //
5607     // A humongous object cannot be live if there is no remembered set for it
5608     // because:
5609     // - there can be no references from within humongous starts regions referencing
5610     // the object because we never allocate other objects into them.


5650 
5651     log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5652                              region_idx,
5653                              (size_t)obj->size() * HeapWordSize,
5654                              p2i(r->bottom()),
5655                              r->rem_set()->occupied(),
5656                              r->rem_set()->strong_code_roots_list_length(),
5657                              next_bitmap->isMarked(r->bottom()),
5658                              g1h->is_humongous_reclaim_candidate(region_idx),
5659                              obj->is_typeArray()
5660                             );
5661 
5662     // Need to clear mark bit of the humongous object if already set.
5663     if (next_bitmap->isMarked(r->bottom())) {
5664       next_bitmap->clear(r->bottom());
5665     }
5666     do {
5667       HeapRegion* next = g1h->next_region_in_humongous(r);
5668       _freed_bytes += r->used();
5669       r->set_containing_set(NULL);
5670       _humongous_regions_removed.increment(1u, r->capacity());
5671       g1h->free_humongous_region(r, _free_region_list, false);
5672       r = next;
5673     } while (r != NULL);
5674 
5675     return false;
5676   }
5677 
5678   HeapRegionSetCount& humongous_free_count() {
5679     return _humongous_regions_removed;
5680   }
5681 
5682   size_t bytes_freed() const {
5683     return _freed_bytes;
5684   }
5685 
5686   size_t humongous_reclaimed() const {
5687     return _humongous_regions_removed.length();
5688   }
5689 };
5690 
5691 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5692   assert_at_safepoint(true);
5693 
5694   if (!G1EagerReclaimHumongousObjects ||
5695       (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
5696     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
5697     return;
5698   }
5699 
5700   double start_time = os::elapsedTime();
5701 
5702   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5703 
5704   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5705   heap_region_iterate(&cl);
5706 
5707   HeapRegionSetCount empty_set;
5708   remove_from_old_sets(empty_set, cl.humongous_free_count());
5709 
5710   G1HRPrinter* hrp = hr_printer();
5711   if (hrp->is_active()) {
5712     FreeRegionListIterator iter(&local_cleanup_list);
5713     while (iter.more_available()) {
5714       HeapRegion* hr = iter.get_next();
5715       hrp->cleanup(hr);
5716     }
5717   }
5718 
5719   prepend_to_freelist(&local_cleanup_list);
5720   decrement_summary_bytes(cl.bytes_freed());
5721 
5722   g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
5723                                                                     cl.humongous_reclaimed());
5724 }
5725 
5726 // This routine is similar to the above but does not record
5727 // any policy statistics or update free lists; we are abandoning
5728 // the current incremental collection set in preparation of a
5729 // full collection. After the full GC we will start to build up
5730 // the incremental collection set again.
5731 // This is only called when we're doing a full collection
5732 // and is immediately followed by the tearing down of the young list.
5733 
5734 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
5735   HeapRegion* cur = cs_head;
5736 
5737   while (cur != NULL) {
5738     HeapRegion* next = cur->next_in_collection_set();
5739     assert(cur->in_collection_set(), "bad CS");
5740     cur->set_next_in_collection_set(NULL);
5741     clear_in_cset(cur);
5742     cur->set_young_index_in_cset(-1);
5743     cur = next;


6049   if (index != G1_NO_HRM_INDEX) {
6050     if (expanded) {
6051       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
6052                                 HeapRegion::GrainWords * HeapWordSize);
6053     }
6054     _hrm.allocate_free_regions_starting_at(index, 1);
6055     return region_at(index);
6056   }
6057   return NULL;
6058 }
6059 
6060 // Heap region set verification
6061 
6062 class VerifyRegionListsClosure : public HeapRegionClosure {
6063 private:
6064   HeapRegionSet*   _old_set;
6065   HeapRegionSet*   _humongous_set;
6066   HeapRegionManager*   _hrm;
6067 
6068 public:
6069   HeapRegionSetCount _old_count;
6070   HeapRegionSetCount _humongous_count;
6071   HeapRegionSetCount _free_count;
6072 
6073   VerifyRegionListsClosure(HeapRegionSet* old_set,
6074                            HeapRegionSet* humongous_set,
6075                            HeapRegionManager* hrm) :
6076     _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6077     _old_count(), _humongous_count(), _free_count(){ }
6078 
6079   bool doHeapRegion(HeapRegion* hr) {
6080     if (hr->is_young()) {
6081       // TODO
6082     } else if (hr->is_humongous()) {
6083       assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
6084       _humongous_count.increment(1u, hr->capacity());
6085     } else if (hr->is_empty()) {
6086       assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
6087       _free_count.increment(1u, hr->capacity());
6088     } else if (hr->is_old()) {
6089       assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
6090       _old_count.increment(1u, hr->capacity());
6091     } else {
6092       // There are no other valid region types. Check for one invalid
6093       // one we can identify: pinned without old or humongous set.
6094       assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
6095       ShouldNotReachHere();
6096     }
6097     return false;
6098   }
6099 
6100   void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6101     guarantee(old_set->length() == _old_count.length(), "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length());
6102     guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), "Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6103               old_set->total_capacity_bytes(), _old_count.capacity());
6104 
6105     guarantee(humongous_set->length() == _humongous_count.length(), "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length());
6106     guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), "Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6107               humongous_set->total_capacity_bytes(), _humongous_count.capacity());
6108 
6109     guarantee(free_list->num_free_regions() == _free_count.length(), "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length());
6110     guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), "Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6111               free_list->total_capacity_bytes(), _free_count.capacity());
6112   }
6113 };
6114 
6115 void G1CollectedHeap::verify_region_sets() {
6116   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6117 
6118   // First, check the explicit lists.
6119   _hrm.verify();
6120   {
6121     // Given that a concurrent operation might be adding regions to
6122     // the secondary free list we have to take the lock before
6123     // verifying it.
6124     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6125     _secondary_free_list.verify_list();
6126   }
6127 
6128   // If a concurrent region freeing operation is in progress it will
6129   // be difficult to correctly attributed any free regions we come
6130   // across to the correct free list given that they might belong to
6131   // one of several (free_list, secondary_free_list, any local lists,




5178 
5179   // Clear the card counts for this region.
5180   // Note: we only need to do this if the region is not young
5181   // (since we don't refine cards in young regions).
5182   if (!hr->is_young()) {
5183     _cg1r->hot_card_cache()->reset_card_counts(hr);
5184   }
5185   hr->hr_clear(par, true /* clear_space */, locked /* locked */);
5186   free_list->add_ordered(hr);
5187 }
5188 
5189 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
5190                                             FreeRegionList* free_list,
5191                                             bool par) {
5192   assert(hr->is_humongous(), "this is only for humongous regions");
5193   assert(free_list != NULL, "pre-condition");
5194   hr->clear_humongous();
5195   free_region(hr, free_list, par);
5196 }
5197 
5198 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
5199                                            const uint humongous_regions_removed) {
5200   if (old_regions_removed > 0 || humongous_regions_removed > 0) {
5201     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5202     _old_set.bulk_remove(old_regions_removed);
5203     _humongous_set.bulk_remove(humongous_regions_removed);
5204   }
5205 
5206 }
5207 
5208 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
5209   assert(list != NULL, "list can't be null");
5210   if (!list->is_empty()) {
5211     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5212     _hrm.insert_list_into_free_list(list);
5213   }
5214 }
5215 
5216 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
5217   decrease_used(bytes);
5218 }
5219 
5220 class G1ParCleanupCTTask : public AbstractGangTask {


5565 
5566   double end_sec = os::elapsedTime();
5567   double elapsed_ms = (end_sec - start_sec) * 1000.0;
5568 
5569   if (non_young) {
5570     non_young_time_ms += elapsed_ms;
5571   } else {
5572     young_time_ms += elapsed_ms;
5573   }
5574 
5575   prepend_to_freelist(&local_free_list);
5576   decrement_summary_bytes(pre_used);
5577   policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
5578   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
5579 }
5580 
5581 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
5582  private:
5583   FreeRegionList* _free_region_list;
5584   HeapRegionSet* _proxy_set;
5585   uint _humongous_regions_removed;
5586   size_t _freed_bytes;
5587  public:
5588 
5589   G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
5590     _free_region_list(free_region_list), _humongous_regions_removed(0), _freed_bytes(0) {
5591   }
5592 
5593   virtual bool doHeapRegion(HeapRegion* r) {
5594     if (!r->is_starts_humongous()) {
5595       return false;
5596     }
5597 
5598     G1CollectedHeap* g1h = G1CollectedHeap::heap();
5599 
5600     oop obj = (oop)r->bottom();
5601     CMBitMap* next_bitmap = g1h->concurrent_mark()->nextMarkBitMap();
5602 
5603     // The following checks whether the humongous object is live are sufficient.
5604     // The main additional check (in addition to having a reference from the roots
5605     // or the young gen) is whether the humongous object has a remembered set entry.
5606     //
5607     // A humongous object cannot be live if there is no remembered set for it
5608     // because:
5609     // - there can be no references from within humongous starts regions referencing
5610     // the object because we never allocate other objects into them.


5650 
5651     log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
5652                              region_idx,
5653                              (size_t)obj->size() * HeapWordSize,
5654                              p2i(r->bottom()),
5655                              r->rem_set()->occupied(),
5656                              r->rem_set()->strong_code_roots_list_length(),
5657                              next_bitmap->isMarked(r->bottom()),
5658                              g1h->is_humongous_reclaim_candidate(region_idx),
5659                              obj->is_typeArray()
5660                             );
5661 
5662     // Need to clear mark bit of the humongous object if already set.
5663     if (next_bitmap->isMarked(r->bottom())) {
5664       next_bitmap->clear(r->bottom());
5665     }
5666     do {
5667       HeapRegion* next = g1h->next_region_in_humongous(r);
5668       _freed_bytes += r->used();
5669       r->set_containing_set(NULL);
5670       _humongous_regions_removed++;
5671       g1h->free_humongous_region(r, _free_region_list, false);
5672       r = next;
5673     } while (r != NULL);
5674 
5675     return false;
5676   }
5677 
5678   uint humongous_free_count() {
5679     return _humongous_regions_removed;
5680   }
5681 
5682   size_t bytes_freed() const {
5683     return _freed_bytes;
5684   }




5685 };
5686 
5687 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
5688   assert_at_safepoint(true);
5689 
5690   if (!G1EagerReclaimHumongousObjects ||
5691       (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {
5692     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
5693     return;
5694   }
5695 
5696   double start_time = os::elapsedTime();
5697 
5698   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
5699 
5700   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
5701   heap_region_iterate(&cl);
5702 
5703   remove_from_old_sets(0, cl.humongous_free_count());

5704 
5705   G1HRPrinter* hrp = hr_printer();
5706   if (hrp->is_active()) {
5707     FreeRegionListIterator iter(&local_cleanup_list);
5708     while (iter.more_available()) {
5709       HeapRegion* hr = iter.get_next();
5710       hrp->cleanup(hr);
5711     }
5712   }
5713 
5714   prepend_to_freelist(&local_cleanup_list);
5715   decrement_summary_bytes(cl.bytes_freed());
5716 
5717   g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
5718                                                                     cl.humongous_free_count());
5719 }
5720 
5721 // This routine is similar to the above but does not record
5722 // any policy statistics or update free lists; we are abandoning
5723 // the current incremental collection set in preparation of a
5724 // full collection. After the full GC we will start to build up
5725 // the incremental collection set again.
5726 // This is only called when we're doing a full collection
5727 // and is immediately followed by the tearing down of the young list.
5728 
5729 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
5730   HeapRegion* cur = cs_head;
5731 
5732   while (cur != NULL) {
5733     HeapRegion* next = cur->next_in_collection_set();
5734     assert(cur->in_collection_set(), "bad CS");
5735     cur->set_next_in_collection_set(NULL);
5736     clear_in_cset(cur);
5737     cur->set_young_index_in_cset(-1);
5738     cur = next;


6044   if (index != G1_NO_HRM_INDEX) {
6045     if (expanded) {
6046       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
6047                                 HeapRegion::GrainWords * HeapWordSize);
6048     }
6049     _hrm.allocate_free_regions_starting_at(index, 1);
6050     return region_at(index);
6051   }
6052   return NULL;
6053 }
6054 
6055 // Heap region set verification
6056 
6057 class VerifyRegionListsClosure : public HeapRegionClosure {
6058 private:
6059   HeapRegionSet*   _old_set;
6060   HeapRegionSet*   _humongous_set;
6061   HeapRegionManager*   _hrm;
6062 
6063 public:
6064   uint _old_count;
6065   uint _humongous_count;
6066   uint _free_count;
6067 
6068   VerifyRegionListsClosure(HeapRegionSet* old_set,
6069                            HeapRegionSet* humongous_set,
6070                            HeapRegionManager* hrm) :
6071     _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6072     _old_count(), _humongous_count(), _free_count(){ }
6073 
6074   bool doHeapRegion(HeapRegion* hr) {
6075     if (hr->is_young()) {
6076       // TODO
6077     } else if (hr->is_humongous()) {
6078       assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
6079       _humongous_count++;
6080     } else if (hr->is_empty()) {
6081       assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
6082       _free_count++;
6083     } else if (hr->is_old()) {
6084       assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
6085       _old_count++;
6086     } else {
6087       // There are no other valid region types. Check for one invalid
6088       // one we can identify: pinned without old or humongous set.
6089       assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
6090       ShouldNotReachHere();
6091     }
6092     return false;
6093   }
6094 
6095   void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6096     guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count);
6097     guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count);
6098     guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count);








6099   }
6100 };
6101 
6102 void G1CollectedHeap::verify_region_sets() {
6103   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6104 
6105   // First, check the explicit lists.
6106   _hrm.verify();
6107   {
6108     // Given that a concurrent operation might be adding regions to
6109     // the secondary free list we have to take the lock before
6110     // verifying it.
6111     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6112     _secondary_free_list.verify_list();
6113   }
6114 
6115   // If a concurrent region freeing operation is in progress it will
6116   // be difficult to correctly attributed any free regions we come
6117   // across to the correct free list given that they might belong to
6118   // one of several (free_list, secondary_free_list, any local lists,


< prev index next >