< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




4507   _hot_card_cache->reset_hot_cache();
4508   _hot_card_cache->set_use_cache(true);
4509 
4510   purge_code_root_memory();
4511 
4512   redirty_logged_cards();
4513 #if defined(COMPILER2) || INCLUDE_JVMCI
4514   DerivedPointerTable::update_pointers();
4515 #endif
4516 }
4517 
4518 void G1CollectedHeap::record_obj_copy_mem_stats() {
4519   g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
4520 
4521   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
4522                                                create_g1_evac_summary(&_old_evac_stats));
4523 }
4524 
4525 void G1CollectedHeap::free_region(HeapRegion* hr,
4526                                   FreeRegionList* free_list,
4527                                   bool par,

4528                                   bool locked) {
4529   assert(!hr->is_free(), "the region should not be free");
4530   assert(!hr->is_empty(), "the region should not be empty");
4531   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
4532   assert(free_list != NULL, "pre-condition");
4533 
4534   if (G1VerifyBitmaps) {
4535     MemRegion mr(hr->bottom(), hr->end());
4536     concurrent_mark()->clearRangePrevBitmap(mr);
4537   }
4538 
4539   // Clear the card counts for this region.
4540   // Note: we only need to do this if the region is not young
4541   // (since we don't refine cards in young regions).
4542   if (!hr->is_young()) {
4543     _hot_card_cache->reset_card_counts(hr);
4544   }
4545   hr->hr_clear(par, true /* clear_space */, locked /* locked */);
4546   free_list->add_ordered(hr);
4547 }
4548 
4549 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
4550                                             FreeRegionList* free_list,
4551                                             bool par) {
4552   assert(hr->is_humongous(), "this is only for humongous regions");
4553   assert(free_list != NULL, "pre-condition");
4554   hr->clear_humongous();
4555   free_region(hr, free_list, par);
4556 }
4557 
4558 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
4559                                            const uint humongous_regions_removed) {
4560   if (old_regions_removed > 0 || humongous_regions_removed > 0) {
4561     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4562     _old_set.bulk_remove(old_regions_removed);
4563     _humongous_set.bulk_remove(humongous_regions_removed);
4564   }
4565 
4566 }
4567 
4568 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
4569   assert(list != NULL, "list can't be null");
4570   if (!list->is_empty()) {
4571     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
4572     _hrm.insert_list_into_free_list(list);
4573   }
4574 }
4575 


4583   HeapRegionClaimer _hrclaimer;
4584 
4585 public:
4586   G1ParScrubRemSetTask(G1RemSet* g1_rs, uint num_workers) :
4587     AbstractGangTask("G1 ScrubRS"),
4588     _g1rs(g1_rs),
4589     _hrclaimer(num_workers) {
4590   }
4591 
4592   void work(uint worker_id) {
4593     _g1rs->scrub(worker_id, &_hrclaimer);
4594   }
4595 };
4596 
4597 void G1CollectedHeap::scrub_rem_set() {
4598   uint num_workers = workers()->active_workers();
4599   G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), num_workers);
4600   workers()->run_task(&g1_par_scrub_rs_task);
4601 }
4602 
4603 class G1FreeCollectionSetClosure : public HeapRegionClosure {
4604 private:






4605   const size_t* _surviving_young_words;
4606 
4607   FreeRegionList _local_free_list;
4608   size_t _rs_lengths;
4609   // Bytes used in successfully evacuated regions before the evacuation.
4610   size_t _before_used_bytes;
4611   // Bytes used in unsucessfully evacuated regions before the evacuation
4612   size_t _after_used_bytes;
4613 
4614   size_t _bytes_allocated_in_old_since_last_gc;
4615 
4616   size_t _failure_used_words;
4617   size_t _failure_waste_words;
4618 
4619   double _young_time;
4620   double _non_young_time;
4621 public:
4622   G1FreeCollectionSetClosure(const size_t* surviving_young_words) :
4623     HeapRegionClosure(),

4624     _surviving_young_words(surviving_young_words),
4625     _local_free_list("Local Region List for CSet Freeing"),
4626     _rs_lengths(0),
4627     _before_used_bytes(0),
4628     _after_used_bytes(0),
4629     _bytes_allocated_in_old_since_last_gc(0),
4630     _failure_used_words(0),
4631     _failure_waste_words(0),
4632     _young_time(0.0),
4633     _non_young_time(0.0) {
4634   }
4635 
4636   virtual bool doHeapRegion(HeapRegion* r) {
4637     double start_time = os::elapsedTime();
4638 
4639     bool is_young = r->is_young();
4640 
4641     G1CollectedHeap* g1h = G1CollectedHeap::heap();
4642     assert(!g1h->is_on_master_free_list(r), "sanity");
4643 
4644     _rs_lengths += r->rem_set()->occupied_locked();
4645 
4646     assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
4647     g1h->clear_in_cset(r);
4648 
4649     if (is_young) {
4650       int index = r->young_index_in_cset();
4651       assert(index != -1, "Young index in collection set must not be -1 for region %u", r->hrm_index());
4652       assert((uint) index < g1h->collection_set()->young_region_length(), "invariant");
4653       size_t words_survived = _surviving_young_words[index];



4654       r->record_surv_words_in_group(words_survived);
4655     } else {
4656       assert(r->young_index_in_cset() == -1, "Young index for old region %u in collection set must be -1", r->hrm_index());
4657     }
4658 
4659     if (!r->evacuation_failed()) {
4660       assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
4661       _before_used_bytes += r->used();
4662       g1h->free_region(r, &_local_free_list, false /* par */, true /* locked */);




4663     } else {
4664       r->uninstall_surv_rate_group();
4665       r->set_young_index_in_cset(-1);
4666       r->set_evacuation_failed(false);
4667       // When moving a young gen region to old gen, we "allocate" that whole region
4668       // there. This is in addition to any already evacuated objects. Notify the
4669       // policy about that.
4670       // Old gen regions do not cause an additional allocation: both the objects
4671       // still in the region and the ones already moved are accounted for elsewhere.
4672       if (is_young) {
4673         _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
4674       }
4675       // The region is now considered to be old.
4676       r->set_old();
4677       // Do some allocation statistics accounting. Regions that failed evacuation
4678       // are always made old, so there is no need to update anything in the young
4679       // gen statistics, but we need to update old gen statistics.
4680       size_t used_words = r->marked_bytes() / HeapWordSize;
4681 
4682       _failure_used_words += used_words;
4683       _failure_waste_words += HeapRegion::GrainWords - used_words;
4684 
4685       g1h->old_set_add(r);
4686       _after_used_bytes += r->used();
4687     }


4688 
4689     if (is_young) {
4690       _young_time += os::elapsedTime() - start_time;
4691     } else {
4692       _non_young_time += os::elapsedTime() - start_time;









4693     }
4694     return false;





























4695   }
4696 
4697   FreeRegionList* local_free_list() { return &_local_free_list; }
4698   size_t rs_lengths() const { return _rs_lengths; }
4699   size_t before_used_bytes() const { return _before_used_bytes; }
4700   size_t after_used_bytes() const { return _after_used_bytes; }
4701 
4702   size_t bytes_allocated_in_old_since_last_gc() const { return _bytes_allocated_in_old_since_last_gc; }

4703 
4704   size_t failure_used_words() const { return _failure_used_words; }
4705   size_t failure_waste_words() const { return _failure_waste_words; }
4706 
4707   double young_time() const { return _young_time; }
4708   double non_young_time() const { return _non_young_time; }
4709 };
4710 
4711 void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4712   _eden.clear();


4713 
4714   G1FreeCollectionSetClosure cl(surviving_young_words);
4715   collection_set_iterate(&cl);










4716 
4717   evacuation_info.set_regions_freed(cl.local_free_list()->length());
4718   evacuation_info.increment_collectionset_used_after(cl.after_used_bytes());


4719 
4720   G1Policy* policy = g1_policy();

4721 
4722   policy->record_max_rs_lengths(cl.rs_lengths());

4723   policy->cset_regions_freed();




















































4724 
4725   prepend_to_freelist(cl.local_free_list());
4726   decrement_summary_bytes(cl.before_used_bytes());
4727 
4728   policy->add_bytes_allocated_in_old_since_last_gc(cl.bytes_allocated_in_old_since_last_gc());

4729 
4730   _old_evac_stats.add_failure_used_and_waste(cl.failure_used_words(), cl.failure_waste_words());
4731 
4732   policy->phase_times()->record_young_free_cset_time_ms(cl.young_time() * 1000.0);
4733   policy->phase_times()->record_non_young_free_cset_time_ms(cl.non_young_time() * 1000.0);






































4734 
4735   collection_set->clear();
4736 }
4737 
4738 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4739  private:
4740   FreeRegionList* _free_region_list;
4741   HeapRegionSet* _proxy_set;
4742   uint _humongous_regions_removed;
4743   size_t _freed_bytes;
4744  public:
4745 
4746   G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4747     _free_region_list(free_region_list), _humongous_regions_removed(0), _freed_bytes(0) {
4748   }
4749 
4750   virtual bool doHeapRegion(HeapRegion* r) {
4751     if (!r->is_starts_humongous()) {
4752       return false;
4753     }


4808     log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
4809                              region_idx,
4810                              (size_t)obj->size() * HeapWordSize,
4811                              p2i(r->bottom()),
4812                              r->rem_set()->occupied(),
4813                              r->rem_set()->strong_code_roots_list_length(),
4814                              next_bitmap->isMarked(r->bottom()),
4815                              g1h->is_humongous_reclaim_candidate(region_idx),
4816                              obj->is_typeArray()
4817                             );
4818 
4819     // Need to clear mark bit of the humongous object if already set.
4820     if (next_bitmap->isMarked(r->bottom())) {
4821       next_bitmap->clear(r->bottom());
4822     }
4823     do {
4824       HeapRegion* next = g1h->next_region_in_humongous(r);
4825       _freed_bytes += r->used();
4826       r->set_containing_set(NULL);
4827       _humongous_regions_removed++;
4828       g1h->free_humongous_region(r, _free_region_list, false);
4829       r = next;
4830     } while (r != NULL);
4831 
4832     return false;
4833   }
4834 
4835   uint humongous_free_count() {
4836     return _humongous_regions_removed;
4837   }
4838 
4839   size_t bytes_freed() const {
4840     return _freed_bytes;
4841   }
4842 };
4843 
4844 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
4845   assert_at_safepoint(true);
4846 
4847   if (!G1EagerReclaimHumongousObjects ||
4848       (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {




4507   _hot_card_cache->reset_hot_cache();
4508   _hot_card_cache->set_use_cache(true);
4509 
4510   purge_code_root_memory();
4511 
4512   redirty_logged_cards();
4513 #if defined(COMPILER2) || INCLUDE_JVMCI
4514   DerivedPointerTable::update_pointers();
4515 #endif
4516 }
4517 
4518 void G1CollectedHeap::record_obj_copy_mem_stats() {
4519   g1_policy()->add_bytes_allocated_in_old_since_last_gc(_old_evac_stats.allocated() * HeapWordSize);
4520 
4521   _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
4522                                                create_g1_evac_summary(&_old_evac_stats));
4523 }
4524 
4525 void G1CollectedHeap::free_region(HeapRegion* hr,
4526                                   FreeRegionList* free_list,
4527                                   bool skip_remset,
4528                                   bool skip_hot_card_cache,
4529                                   bool locked) {
4530   assert(!hr->is_free(), "the region should not be free");
4531   assert(!hr->is_empty(), "the region should not be empty");
4532   assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
4533   assert(free_list != NULL, "pre-condition");
4534 
4535   if (G1VerifyBitmaps) {
4536     MemRegion mr(hr->bottom(), hr->end());
4537     concurrent_mark()->clearRangePrevBitmap(mr);
4538   }
4539 
4540   // Clear the card counts for this region.
4541   // Note: we only need to do this if the region is not young
4542   // (since we don't refine cards in young regions).
4543   if (!skip_hot_card_cache && !hr->is_young()) {
4544     _hot_card_cache->reset_card_counts(hr);
4545   }
4546   hr->hr_clear(skip_remset, true /* clear_space */, locked /* locked */);
4547   free_list->add_ordered(hr);
4548 }
4549 
4550 void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
4551                                             FreeRegionList* free_list,
4552                                             bool skip_remset) {
4553   assert(hr->is_humongous(), "this is only for humongous regions");
4554   assert(free_list != NULL, "pre-condition");
4555   hr->clear_humongous();
4556   free_region(hr, free_list, skip_remset);
4557 }
4558 
4559 void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed,
4560                                            const uint humongous_regions_removed) {
4561   if (old_regions_removed > 0 || humongous_regions_removed > 0) {
4562     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4563     _old_set.bulk_remove(old_regions_removed);
4564     _humongous_set.bulk_remove(humongous_regions_removed);
4565   }
4566 
4567 }
4568 
4569 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
4570   assert(list != NULL, "list can't be null");
4571   if (!list->is_empty()) {
4572     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
4573     _hrm.insert_list_into_free_list(list);
4574   }
4575 }
4576 


4584   HeapRegionClaimer _hrclaimer;
4585 
4586 public:
4587   G1ParScrubRemSetTask(G1RemSet* g1_rs, uint num_workers) :
4588     AbstractGangTask("G1 ScrubRS"),
4589     _g1rs(g1_rs),
4590     _hrclaimer(num_workers) {
4591   }
4592 
4593   void work(uint worker_id) {
4594     _g1rs->scrub(worker_id, &_hrclaimer);
4595   }
4596 };
4597 
4598 void G1CollectedHeap::scrub_rem_set() {
4599   uint num_workers = workers()->active_workers();
4600   G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), num_workers);
4601   workers()->run_task(&g1_par_scrub_rs_task);
4602 }
4603 
4604 class G1FreeCollectionSetTask : public AbstractGangTask {
4605 private:
4606 
4607   // Closure applied to all regions in the collection set to do work that needs to
4608   // be done serially in a single thread.
4609   class G1SerialFreeCollectionSetClosure : public HeapRegionClosure {
4610   private:
4611     EvacuationInfo* _evacuation_info;
4612     const size_t* _surviving_young_words;
4613 


4614     // Bytes used in successfully evacuated regions before the evacuation.
4615     size_t _before_used_bytes;
4616     // Bytes used in unsucessfully evacuated regions before the evacuation
4617     size_t _after_used_bytes;
4618 
4619     size_t _bytes_allocated_in_old_since_last_gc;
4620 
4621     size_t _failure_used_words;
4622     size_t _failure_waste_words;
4623 
4624     FreeRegionList _local_free_list;
4625   public:
4626     G1SerialFreeCollectionSetClosure(EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :

4627       HeapRegionClosure(),
4628       _evacuation_info(evacuation_info),
4629       _surviving_young_words(surviving_young_words),


4630       _before_used_bytes(0),
4631       _after_used_bytes(0),
4632       _bytes_allocated_in_old_since_last_gc(0),
4633       _failure_used_words(0),
4634       _failure_waste_words(0),
4635       _local_free_list("Local Region List for CSet Freeing") {

4636     }
4637 
4638     virtual bool doHeapRegion(HeapRegion* r) {




4639       G1CollectedHeap* g1h = G1CollectedHeap::heap();



4640 
4641       assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
4642       g1h->clear_in_cset(r);
4643 
4644       if (r->is_young()) {
4645         assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(),
4646                "Young index %d is wrong for region %u of type %s with %u young regions",
4647                r->young_index_in_cset(),
4648                r->hrm_index(),
4649                r->get_type_str(),
4650                g1h->collection_set()->young_region_length());
4651         size_t words_survived = _surviving_young_words[r->young_index_in_cset()];
4652         r->record_surv_words_in_group(words_survived);


4653       }
4654 
4655       if (!r->evacuation_failed()) {
4656         assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
4657         _before_used_bytes += r->used();
4658         g1h->free_region(r,
4659                          &_local_free_list,
4660                          true, /* skip_remset */
4661                          true, /* skip_hot_card_cache */
4662                          true  /* locked */);
4663       } else {
4664         r->uninstall_surv_rate_group();
4665         r->set_young_index_in_cset(-1);
4666         r->set_evacuation_failed(false);
4667         // When moving a young gen region to old gen, we "allocate" that whole region
4668         // there. This is in addition to any already evacuated objects. Notify the
4669         // policy about that.
4670         // Old gen regions do not cause an additional allocation: both the objects
4671         // still in the region and the ones already moved are accounted for elsewhere.
4672         if (r->is_young()) {
4673           _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
4674         }
4675         // The region is now considered to be old.
4676         r->set_old();
4677         // Do some allocation statistics accounting. Regions that failed evacuation
4678         // are always made old, so there is no need to update anything in the young
4679         // gen statistics, but we need to update old gen statistics.
4680         size_t used_words = r->marked_bytes() / HeapWordSize;
4681 
4682         _failure_used_words += used_words;
4683         _failure_waste_words += HeapRegion::GrainWords - used_words;
4684 
4685         g1h->old_set_add(r);
4686         _after_used_bytes += r->used();
4687       }
4688       return false;
4689     }
4690 
4691     void complete_work() {
4692       G1CollectedHeap* g1h = G1CollectedHeap::heap();
4693 
4694       _evacuation_info->set_regions_freed(_local_free_list.length());
4695       _evacuation_info->increment_collectionset_used_after(_after_used_bytes);
4696 
4697       g1h->prepend_to_freelist(&_local_free_list);
4698       g1h->decrement_summary_bytes(_before_used_bytes);
4699 
4700       G1Policy* policy = g1h->g1_policy();
4701       policy->add_bytes_allocated_in_old_since_last_gc(_bytes_allocated_in_old_since_last_gc);
4702 
4703       g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(_failure_used_words, _failure_waste_words);
4704     }
4705   };
4706 
4707   G1CollectionSet* _collection_set;
4708   G1SerialFreeCollectionSetClosure _cl;
4709   const size_t* _surviving_young_words;
4710 
4711   size_t _rs_lengths;
4712 
4713   volatile jint _serial_work_claim;
4714 
4715   struct WorkItem {
4716     uint region_idx;
4717     bool is_young;
4718     bool evacuation_failed;
4719 
4720     WorkItem(HeapRegion* r) {
4721       region_idx = r->hrm_index();
4722       is_young = r->is_young();
4723       evacuation_failed = r->evacuation_failed();
4724     }
4725   };
4726 
4727   volatile size_t _parallel_work_claim;
4728   size_t _num_work_items;
4729   WorkItem* _work_items;
4730 
4731   void do_serial_work() {
4732     // Need to grab the lock to be allowed to modify the old region list.
4733     MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
4734     _collection_set->iterate(&_cl);
4735   }
4736 
4737   void do_parallel_work_for_region(uint region_idx, bool is_young, bool evacuation_failed) {
4738     G1CollectedHeap* g1h = G1CollectedHeap::heap();


4739 
4740     HeapRegion* r = g1h->region_at(region_idx);
4741     assert(!g1h->is_on_master_free_list(r), "sanity");
4742 
4743     Atomic::add(r->rem_set()->occupied_locked(), &_rs_lengths);

4744 
4745     if (!is_young) {
4746       g1h->_hot_card_cache->reset_card_counts(r);
4747     }
4748 
4749     if (!evacuation_failed) {
4750       r->rem_set()->clear_locked();
4751     }
4752   }
4753 
4754   class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
4755   private:
4756     size_t _cur_idx;
4757     WorkItem* _work_items;
4758   public:
4759     G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
4760 
4761     virtual bool doHeapRegion(HeapRegion* r) {
4762       _work_items[_cur_idx++] = WorkItem(r);
4763       return false;
4764     }
4765   };
4766 
4767   void prepare_work() {
4768     G1PrepareFreeCollectionSetClosure cl(_work_items);
4769     _collection_set->iterate(&cl);
4770   }
4771 
4772   void complete_work() {
4773     _cl.complete_work();
4774 
4775     G1Policy* policy = G1CollectedHeap::heap()->g1_policy();
4776     policy->record_max_rs_lengths(_rs_lengths);
4777     policy->cset_regions_freed();
4778   }
4779 public:
4780   G1FreeCollectionSetTask(G1CollectionSet* collection_set, EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4781     AbstractGangTask("G1 Free Collection Set"),
4782     _cl(evacuation_info, surviving_young_words),
4783     _collection_set(collection_set),
4784     _surviving_young_words(surviving_young_words),
4785     _serial_work_claim(0),
4786     _rs_lengths(0),
4787     _parallel_work_claim(0),
4788     _num_work_items(collection_set->region_length()),
4789     _work_items(NEW_C_HEAP_ARRAY(WorkItem, _num_work_items, mtGC)) {
4790     prepare_work();
4791   }
4792 
4793   ~G1FreeCollectionSetTask() {
4794     complete_work();
4795     FREE_C_HEAP_ARRAY(WorkItem, _work_items);
4796   }
4797 
4798   // Chunk size for work distribution. The chosen value has been determined experimentally
4799   // to be a good tradeoff between overhead and achievable parallelism.
4800   static uint chunk_size() { return 32; }
4801 
4802   virtual void work(uint worker_id) {
4803     G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
4804 
4805     // Claim serial work.
4806     if (_serial_work_claim == 0) {
4807       jint value = Atomic::add(1, &_serial_work_claim) - 1;
4808       if (value == 0) {
4809         double serial_time = os::elapsedTime();
4810         do_serial_work();
4811         timer->record_serial_free_cset_time_ms((os::elapsedTime() - serial_time) * 1000.0);
4812       }
4813     }
4814 
4815     // Start parallel work.
4816     double young_time = 0.0;
4817     bool has_young_time = false;
4818     double non_young_time = 0.0;
4819     bool has_non_young_time = false;
4820 
4821     while (true) {
4822       size_t end = Atomic::add(chunk_size(), &_parallel_work_claim);
4823       size_t cur = end - chunk_size();
4824 
4825       if (cur >= _num_work_items) {
4826         break;
4827       }
4828 
4829       double start_time = os::elapsedTime();
4830 
4831       end = MIN2(end, _num_work_items);

4832 
4833       for (; cur < end; cur++) {
4834         bool is_young = _work_items[cur].is_young;
4835 
4836         do_parallel_work_for_region(_work_items[cur].region_idx, is_young, _work_items[cur].evacuation_failed);
4837 
4838         double end_time = os::elapsedTime();
4839         double time_taken = end_time - start_time;
4840         if (is_young) {
4841           young_time += time_taken;
4842           has_young_time = true;
4843         } else {
4844           non_young_time += time_taken;
4845           has_non_young_time = true;
4846         }
4847         start_time = end_time;
4848       }
4849     }
4850 
4851     if (has_young_time) {
4852       timer->record_time_secs(G1GCPhaseTimes::YoungFreeCSet, worker_id, young_time);
4853     }
4854     if (has_non_young_time) {
4855       timer->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, worker_id, young_time);
4856     }
4857   }
4858 };
4859 
4860 void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4861   _eden.clear();
4862 
4863   double free_cset_start_time = os::elapsedTime();
4864 
4865   {
4866     uint const num_chunks = MAX2(_collection_set.region_length() / G1FreeCollectionSetTask::chunk_size(), 1U);
4867     uint const num_workers = MIN2(workers()->active_workers(), num_chunks);
4868 
4869     G1FreeCollectionSetTask cl(collection_set, &evacuation_info, surviving_young_words);
4870 
4871     log_debug(gc, ergo)("Running %s using %u workers for collection set length %u",
4872                         cl.name(),
4873                         num_workers,
4874                         _collection_set.region_length());
4875     workers()->run_task(&cl, num_workers);
4876   }
4877   g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
4878 
4879   collection_set->clear();
4880 }
4881 
4882 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4883  private:
4884   FreeRegionList* _free_region_list;
4885   HeapRegionSet* _proxy_set;
4886   uint _humongous_regions_removed;
4887   size_t _freed_bytes;
4888  public:
4889 
4890   G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4891     _free_region_list(free_region_list), _humongous_regions_removed(0), _freed_bytes(0) {
4892   }
4893 
4894   virtual bool doHeapRegion(HeapRegion* r) {
4895     if (!r->is_starts_humongous()) {
4896       return false;
4897     }


4952     log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d",
4953                              region_idx,
4954                              (size_t)obj->size() * HeapWordSize,
4955                              p2i(r->bottom()),
4956                              r->rem_set()->occupied(),
4957                              r->rem_set()->strong_code_roots_list_length(),
4958                              next_bitmap->isMarked(r->bottom()),
4959                              g1h->is_humongous_reclaim_candidate(region_idx),
4960                              obj->is_typeArray()
4961                             );
4962 
4963     // Need to clear mark bit of the humongous object if already set.
4964     if (next_bitmap->isMarked(r->bottom())) {
4965       next_bitmap->clear(r->bottom());
4966     }
4967     do {
4968       HeapRegion* next = g1h->next_region_in_humongous(r);
4969       _freed_bytes += r->used();
4970       r->set_containing_set(NULL);
4971       _humongous_regions_removed++;
4972       g1h->free_humongous_region(r, _free_region_list, false /* skip_remset */ );
4973       r = next;
4974     } while (r != NULL);
4975 
4976     return false;
4977   }
4978 
4979   uint humongous_free_count() {
4980     return _humongous_regions_removed;
4981   }
4982 
4983   size_t bytes_freed() const {
4984     return _freed_bytes;
4985   }
4986 };
4987 
4988 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
4989   assert_at_safepoint(true);
4990 
4991   if (!G1EagerReclaimHumongousObjects ||
4992       (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) {


< prev index next >