< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page




2711     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2712     if (is_candidate) {
2713       _candidate_humongous++;
2714       g1h->register_humongous_region_with_cset(rindex);
2715       // Is_candidate already filters out humongous object with large remembered sets.
2716       // If we have a humongous object with a few remembered sets, we simply flush these
2717       // remembered set entries into the DCQS. That will result in automatic
2718       // re-evaluation of their remembered set entries during the following evacuation
2719       // phase.
2720       if (!r->rem_set()->is_empty()) {
2721         guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
2722                   "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
2723         G1CardTable* ct = g1h->card_table();
2724         HeapRegionRemSetIterator hrrs(r->rem_set());
2725         size_t card_index;
2726         while (hrrs.has_next(card_index)) {
2727           CardTable::CardValue* card_ptr = ct->byte_for_index(card_index);
2728           // The remembered set might contain references to already freed
2729           // regions. Filter out such entries to avoid failing card table
2730           // verification.
2731           if (g1h->is_in_closed_subset(ct->addr_for(card_ptr))) {
2732             if (*card_ptr != G1CardTable::dirty_card_val()) {
2733               *card_ptr = G1CardTable::dirty_card_val();
2734               _dcq.enqueue(card_ptr);
2735             }
2736           }
2737         }
2738         assert(hrrs.n_yielded() == r->rem_set()->occupied(),
2739                "Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",
2740                hrrs.n_yielded(), r->rem_set()->occupied());
2741         // We should only clear the card based remembered set here as we will not
2742         // implicitly rebuild anything else during eager reclaim. Note that at the moment
2743         // (and probably never) we do not enter this path if there are other kind of
2744         // remembered sets for this region.
2745         r->rem_set()->clear_locked(true /* only_cardset */);
2746         // Clear_locked() above sets the state to Empty. However we want to continue
2747         // collecting remembered set entries for humongous regions that were not
2748         // reclaimed.
2749         r->rem_set()->set_state_complete();
2750       }
2751       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");


4586 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
4587   assert_at_safepoint_on_vm_thread();
4588 
4589   if (!free_list_only) {
4590     _eden.clear();
4591     _survivor.clear();
4592   }
4593 
4594   RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
4595   heap_region_iterate(&cl);
4596 
4597   if (!free_list_only) {
4598     set_used(cl.total_used());
4599     if (_archive_allocator != NULL) {
4600       _archive_allocator->clear_used();
4601     }
4602   }
4603   assert(used() == recalculate_used(),
4604          "inconsistent used(), value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
4605          used(), recalculate_used());
4606 }
4607 
4608 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
4609   HeapRegion* hr = heap_region_containing(p);
4610   return hr->is_in(p);
4611 }
4612 
4613 // Methods for the mutator alloc region
4614 
4615 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4616                                                       bool force) {
4617   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4618   bool should_allocate = policy()->should_allocate_mutator_region();
4619   if (force || should_allocate) {
4620     HeapRegion* new_alloc_region = new_region(word_size,
4621                                               HeapRegionType::Eden,
4622                                               false /* do_expand */);
4623     if (new_alloc_region != NULL) {
4624       set_region_short_lived_locked(new_alloc_region);
4625       _hr_printer.alloc(new_alloc_region, !should_allocate);
4626       _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
4627       _policy->remset_tracker()->update_at_allocate(new_alloc_region);
4628       return new_alloc_region;
4629     }
4630   }




2711     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2712     if (is_candidate) {
2713       _candidate_humongous++;
2714       g1h->register_humongous_region_with_cset(rindex);
2715       // Is_candidate already filters out humongous object with large remembered sets.
2716       // If we have a humongous object with a few remembered sets, we simply flush these
2717       // remembered set entries into the DCQS. That will result in automatic
2718       // re-evaluation of their remembered set entries during the following evacuation
2719       // phase.
2720       if (!r->rem_set()->is_empty()) {
2721         guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
2722                   "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
2723         G1CardTable* ct = g1h->card_table();
2724         HeapRegionRemSetIterator hrrs(r->rem_set());
2725         size_t card_index;
2726         while (hrrs.has_next(card_index)) {
2727           CardTable::CardValue* card_ptr = ct->byte_for_index(card_index);
2728           // The remembered set might contain references to already freed
2729           // regions. Filter out such entries to avoid failing card table
2730           // verification.
2731           if (g1h->is_in(ct->addr_for(card_ptr))) {
2732             if (*card_ptr != G1CardTable::dirty_card_val()) {
2733               *card_ptr = G1CardTable::dirty_card_val();
2734               _dcq.enqueue(card_ptr);
2735             }
2736           }
2737         }
2738         assert(hrrs.n_yielded() == r->rem_set()->occupied(),
2739                "Remembered set hash maps out of sync, cur: " SIZE_FORMAT " entries, next: " SIZE_FORMAT " entries",
2740                hrrs.n_yielded(), r->rem_set()->occupied());
2741         // We should only clear the card based remembered set here as we will not
2742         // implicitly rebuild anything else during eager reclaim. Note that at the moment
2743         // (and probably never) we do not enter this path if there are other kind of
2744         // remembered sets for this region.
2745         r->rem_set()->clear_locked(true /* only_cardset */);
2746         // Clear_locked() above sets the state to Empty. However we want to continue
2747         // collecting remembered set entries for humongous regions that were not
2748         // reclaimed.
2749         r->rem_set()->set_state_complete();
2750       }
2751       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");


4586 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
4587   assert_at_safepoint_on_vm_thread();
4588 
4589   if (!free_list_only) {
4590     _eden.clear();
4591     _survivor.clear();
4592   }
4593 
4594   RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
4595   heap_region_iterate(&cl);
4596 
4597   if (!free_list_only) {
4598     set_used(cl.total_used());
4599     if (_archive_allocator != NULL) {
4600       _archive_allocator->clear_used();
4601     }
4602   }
4603   assert(used() == recalculate_used(),
4604          "inconsistent used(), value: " SIZE_FORMAT " recalculated: " SIZE_FORMAT,
4605          used(), recalculate_used());





4606 }
4607 
4608 // Methods for the mutator alloc region
4609 
4610 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4611                                                       bool force) {
4612   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4613   bool should_allocate = policy()->should_allocate_mutator_region();
4614   if (force || should_allocate) {
4615     HeapRegion* new_alloc_region = new_region(word_size,
4616                                               HeapRegionType::Eden,
4617                                               false /* do_expand */);
4618     if (new_alloc_region != NULL) {
4619       set_region_short_lived_locked(new_alloc_region);
4620       _hr_printer.alloc(new_alloc_region, !should_allocate);
4621       _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
4622       _policy->remset_tracker()->update_at_allocate(new_alloc_region);
4623       return new_alloc_region;
4624     }
4625   }


< prev index next >