< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7558 : 8048179: Early reclaim of large objects that are referenced by a few objects
Summary:
Reviewed-by:
rev 7559 : imported patch bengt-review
rev 7560 : [mq]: bengt-review2


2052   // values in the heap have been properly initialized.
2053   _g1mm = new G1MonitoringSupport(this);
2054 
2055   G1StringDedup::initialize();
2056 
2057   return JNI_OK;
2058 }
2059 
2060 void G1CollectedHeap::stop() {
2061   // Stop all concurrent threads. We do this to make sure these threads
2062   // do not continue to execute and access resources (e.g. gclog_or_tty)
2063   // that are destroyed during shutdown.
2064   _cg1r->stop();
2065   _cmThread->stop();
2066   if (G1StringDedup::is_enabled()) {
2067     G1StringDedup::stop();
2068   }
2069 }
2070 
2071 void G1CollectedHeap::clear_humongous_is_live_table() {
2072   guarantee(G1ReclaimDeadHumongousObjectsAtYoungGC, "Should only be called if true");
2073   _humongous_is_live.clear();
2074 }
2075 
2076 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2077   return HeapRegion::max_region_size();
2078 }
2079 
2080 void G1CollectedHeap::ref_processing_init() {
2081   // Reference processing in G1 currently works as follows:
2082   //
2083   // * There are two reference processor instances. One is
2084   //   used to record and process discovered references
2085   //   during concurrent marking; the other is used to
2086   //   record and process references during STW pauses
2087   //   (both full and incremental).
2088   // * Both ref processors need to 'span' the entire heap as
2089   //   the regions in the collection set may be dotted around.
2090   //
2091   // * For the concurrent marking ref processor:
2092   //   * Reference discovery is enabled at initial marking.


3478   return g1_rem_set()->cardsScanned();
3479 }
3480 
3481 bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
3482   HeapRegion* region = region_at(index);
3483   assert(region->is_starts_humongous(), "Must start a humongous object");
3484   return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
3485 }
3486 
3487 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3488  private:
3489   size_t _total_humongous;
3490   size_t _candidate_humongous;
3491 
3492   DirtyCardQueue _dcq;
3493 
3494   bool humongous_region_is_candidate(uint index) {
3495     HeapRegion* region = G1CollectedHeap::heap()->region_at(index);
3496     assert(region->is_starts_humongous(), "Must start a humongous object");
3497     HeapRegionRemSet* const rset = region->rem_set();
3498     bool const allow_stale_refs = G1ReclaimDeadHumongousObjectsWithStaleRefsAtYoungGC;
3499     return !oop(region->bottom())->is_objArray() &&
3500            ((allow_stale_refs && rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)) ||
3501             (!allow_stale_refs && rset->is_empty()));
3502   }
3503 
3504  public:
3505   RegisterHumongousWithInCSetFastTestClosure()
3506   : _total_humongous(0),
3507     _candidate_humongous(0),
3508     _dcq(&JavaThread::dirty_card_queue_set()) {
3509   }
3510 
3511   virtual bool doHeapRegion(HeapRegion* r) {
3512     if (!r->is_starts_humongous()) {
3513       return false;
3514     }
3515     G1CollectedHeap* g1h = G1CollectedHeap::heap();
3516 
3517     uint region_idx = r->hrm_index();
3518     bool is_candidate = humongous_region_is_candidate(region_idx);


3536           }
3537         }
3538         r->rem_set()->clear_locked();
3539       }
3540       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
3541       g1h->register_humongous_region_with_in_cset_fast_test(region_idx);
3542       _candidate_humongous++;
3543     }
3544     _total_humongous++;
3545 
3546     return false;
3547   }
3548 
3549   size_t total_humongous() const { return _total_humongous; }
3550   size_t candidate_humongous() const { return _candidate_humongous; }
3551 
3552   void flush_rem_set_entries() { _dcq.flush(); }
3553 };
3554 
3555 void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
3556   if (!G1ReclaimDeadHumongousObjectsAtYoungGC) {
3557     g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
3558     return;
3559   }
3560   double time = os::elapsed_counter();
3561 
3562   RegisterHumongousWithInCSetFastTestClosure cl;
3563   heap_region_iterate(&cl);
3564 
3565   time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3566   g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3567                                                                   cl.total_humongous(),
3568                                                                   cl.candidate_humongous());
3569   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3570 
3571   if (_has_humongous_reclaim_candidates || G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
3572     clear_humongous_is_live_table();
3573   }
3574 
3575   // Finally flush all remembered set entries to re-check into the global DCQS.
3576   cl.flush_rem_set_entries();
3577 }
3578 
3579 void
3580 G1CollectedHeap::setup_surviving_young_words() {
3581   assert(_surviving_young_words == NULL, "pre-condition");
3582   uint array_length = g1_policy()->young_cset_region_length();
3583   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3584   if (_surviving_young_words == NULL) {
3585     vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3586                           "Not enough space for young surv words summary.");
3587   }
3588   memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3589 #ifdef ASSERT
3590   for (uint i = 0;  i < array_length; ++i) {
3591     assert( _surviving_young_words[i] == 0, "memset above" );


6167     // until the end of a concurrent mark.
6168     //
6169     // It is not required to check whether the object has been found dead by marking
6170     // or not, in fact it would prevent reclamation within a concurrent cycle, as
6171     // all objects allocated during that time are considered live.
6172     // SATB marking is even more conservative than the remembered set.
6173     // So if at this point in the collection there is no remembered set entry,
6174     // nobody has a reference to it.
6175     // At the start of collection we flush all refinement logs, and remembered sets
6176     // are completely up-to-date wrt to references to the humongous object.
6177     //
6178     // Other implementation considerations:
6179     // - never consider object arrays at this time because they would pose
6180     // considerable effort for cleaning up the the remembered sets. This is
6181     // required because stale remembered sets might reference locations that
6182     // are currently allocated into.
6183     uint region_idx = r->hrm_index();
6184     if (g1h->humongous_is_live(region_idx) ||
6185         g1h->humongous_region_is_always_live(region_idx)) {
6186 
6187       if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6188         gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6189                                region_idx,
6190                                obj->size()*HeapWordSize,
6191                                r->bottom(),
6192                                r->region_num(),
6193                                r->rem_set()->occupied(),
6194                                r->rem_set()->strong_code_roots_list_length(),
6195                                next_bitmap->isMarked(r->bottom()),
6196                                g1h->humongous_is_live(region_idx),
6197                                obj->is_objArray()
6198                               );
6199       }
6200 
6201       return false;
6202     }
6203 
6204     guarantee(!obj->is_objArray(),
6205               err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
6206                       r->bottom()));
6207 
6208     if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6209       gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6210                              region_idx,
6211                              obj->size()*HeapWordSize,
6212                              r->bottom(),
6213                              r->region_num(),
6214                              r->rem_set()->occupied(),
6215                              r->rem_set()->strong_code_roots_list_length(),
6216                              next_bitmap->isMarked(r->bottom()),
6217                              g1h->humongous_is_live(region_idx),
6218                              obj->is_objArray()
6219                             );
6220     }
6221     // Need to clear mark bit of the humongous object if already set.
6222     if (next_bitmap->isMarked(r->bottom())) {
6223       next_bitmap->clear(r->bottom());
6224     }
6225     _freed_bytes += r->used();
6226     r->set_containing_set(NULL);
6227     _humongous_regions_removed.increment(1u, r->capacity());
6228     g1h->free_humongous_region(r, _free_region_list, false);
6229 
6230     return false;
6231   }
6232 
6233   HeapRegionSetCount& humongous_free_count() {
6234     return _humongous_regions_removed;
6235   }
6236 
6237   size_t bytes_freed() const {
6238     return _freed_bytes;
6239   }
6240 
6241   size_t humongous_reclaimed() const {
6242     return _humongous_regions_removed.length();
6243   }
6244 };
6245 
6246 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
6247   assert_at_safepoint(true);
6248 
6249   if (!G1ReclaimDeadHumongousObjectsAtYoungGC ||
6250       (!_has_humongous_reclaim_candidates && !G1TraceReclaimDeadHumongousObjectsAtYoungGC)) {
6251     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
6252     return;
6253   }
6254 
6255   double start_time = os::elapsedTime();
6256 
6257   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
6258 
6259   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
6260   heap_region_iterate(&cl);
6261 
6262   HeapRegionSetCount empty_set;
6263   remove_from_old_sets(empty_set, cl.humongous_free_count());
6264 
6265   G1HRPrinter* hr_printer = _g1h->hr_printer();
6266   if (hr_printer->is_active()) {
6267     FreeRegionListIterator iter(&local_cleanup_list);
6268     while (iter.more_available()) {
6269       HeapRegion* hr = iter.get_next();
6270       hr_printer->cleanup(hr);




2052   // values in the heap have been properly initialized.
2053   _g1mm = new G1MonitoringSupport(this);
2054 
2055   G1StringDedup::initialize();
2056 
2057   return JNI_OK;
2058 }
2059 
2060 void G1CollectedHeap::stop() {
2061   // Stop all concurrent threads. We do this to make sure these threads
2062   // do not continue to execute and access resources (e.g. gclog_or_tty)
2063   // that are destroyed during shutdown.
2064   _cg1r->stop();
2065   _cmThread->stop();
2066   if (G1StringDedup::is_enabled()) {
2067     G1StringDedup::stop();
2068   }
2069 }
2070 
2071 void G1CollectedHeap::clear_humongous_is_live_table() {
2072   guarantee(G1EagerReclaimHumongousObjects, "Should only be called if true");
2073   _humongous_is_live.clear();
2074 }
2075 
2076 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2077   return HeapRegion::max_region_size();
2078 }
2079 
2080 void G1CollectedHeap::ref_processing_init() {
2081   // Reference processing in G1 currently works as follows:
2082   //
2083   // * There are two reference processor instances. One is
2084   //   used to record and process discovered references
2085   //   during concurrent marking; the other is used to
2086   //   record and process references during STW pauses
2087   //   (both full and incremental).
2088   // * Both ref processors need to 'span' the entire heap as
2089   //   the regions in the collection set may be dotted around.
2090   //
2091   // * For the concurrent marking ref processor:
2092   //   * Reference discovery is enabled at initial marking.


3478   return g1_rem_set()->cardsScanned();
3479 }
3480 
3481 bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
3482   HeapRegion* region = region_at(index);
3483   assert(region->is_starts_humongous(), "Must start a humongous object");
3484   return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
3485 }
3486 
3487 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3488  private:
3489   size_t _total_humongous;
3490   size_t _candidate_humongous;
3491 
3492   DirtyCardQueue _dcq;
3493 
3494   bool humongous_region_is_candidate(uint index) {
3495     HeapRegion* region = G1CollectedHeap::heap()->region_at(index);
3496     assert(region->is_starts_humongous(), "Must start a humongous object");
3497     HeapRegionRemSet* const rset = region->rem_set();
3498     bool const allow_stale_refs = G1EagerReclaimHumongousObjectsWithStaleRefs;
3499     return !oop(region->bottom())->is_objArray() &&
3500            ((allow_stale_refs && rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)) ||
3501             (!allow_stale_refs && rset->is_empty()));
3502   }
3503 
3504  public:
3505   RegisterHumongousWithInCSetFastTestClosure()
3506   : _total_humongous(0),
3507     _candidate_humongous(0),
3508     _dcq(&JavaThread::dirty_card_queue_set()) {
3509   }
3510 
3511   virtual bool doHeapRegion(HeapRegion* r) {
3512     if (!r->is_starts_humongous()) {
3513       return false;
3514     }
3515     G1CollectedHeap* g1h = G1CollectedHeap::heap();
3516 
3517     uint region_idx = r->hrm_index();
3518     bool is_candidate = humongous_region_is_candidate(region_idx);


3536           }
3537         }
3538         r->rem_set()->clear_locked();
3539       }
3540       assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
3541       g1h->register_humongous_region_with_in_cset_fast_test(region_idx);
3542       _candidate_humongous++;
3543     }
3544     _total_humongous++;
3545 
3546     return false;
3547   }
3548 
3549   size_t total_humongous() const { return _total_humongous; }
3550   size_t candidate_humongous() const { return _candidate_humongous; }
3551 
3552   void flush_rem_set_entries() { _dcq.flush(); }
3553 };
3554 
3555 void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
3556   if (!G1EagerReclaimHumongousObjects) {
3557     g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
3558     return;
3559   }
3560   double time = os::elapsed_counter();
3561 
3562   RegisterHumongousWithInCSetFastTestClosure cl;
3563   heap_region_iterate(&cl);
3564 
3565   time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3566   g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3567                                                                   cl.total_humongous(),
3568                                                                   cl.candidate_humongous());
3569   _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3570 
3571   if (_has_humongous_reclaim_candidates || G1TraceEagerReclaimHumongousObjects) {
3572     clear_humongous_is_live_table();
3573   }
3574 
3575   // Finally flush all remembered set entries to re-check into the global DCQS.
3576   cl.flush_rem_set_entries();
3577 }
3578 
3579 void
3580 G1CollectedHeap::setup_surviving_young_words() {
3581   assert(_surviving_young_words == NULL, "pre-condition");
3582   uint array_length = g1_policy()->young_cset_region_length();
3583   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3584   if (_surviving_young_words == NULL) {
3585     vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3586                           "Not enough space for young surv words summary.");
3587   }
3588   memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3589 #ifdef ASSERT
3590   for (uint i = 0;  i < array_length; ++i) {
3591     assert( _surviving_young_words[i] == 0, "memset above" );


6167     // until the end of a concurrent mark.
6168     //
6169     // It is not required to check whether the object has been found dead by marking
6170     // or not, in fact it would prevent reclamation within a concurrent cycle, as
6171     // all objects allocated during that time are considered live.
6172     // SATB marking is even more conservative than the remembered set.
6173     // So if at this point in the collection there is no remembered set entry,
6174     // nobody has a reference to it.
6175     // At the start of collection we flush all refinement logs, and remembered sets
6176     // are completely up-to-date wrt to references to the humongous object.
6177     //
6178     // Other implementation considerations:
6179     // - never consider object arrays at this time because they would pose
6180     // considerable effort for cleaning up the the remembered sets. This is
6181     // required because stale remembered sets might reference locations that
6182     // are currently allocated into.
6183     uint region_idx = r->hrm_index();
6184     if (g1h->humongous_is_live(region_idx) ||
6185         g1h->humongous_region_is_always_live(region_idx)) {
6186 
6187       if (G1TraceEagerReclaimHumongousObjects) {
6188         gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6189                                region_idx,
6190                                obj->size()*HeapWordSize,
6191                                r->bottom(),
6192                                r->region_num(),
6193                                r->rem_set()->occupied(),
6194                                r->rem_set()->strong_code_roots_list_length(),
6195                                next_bitmap->isMarked(r->bottom()),
6196                                g1h->humongous_is_live(region_idx),
6197                                obj->is_objArray()
6198                               );
6199       }
6200 
6201       return false;
6202     }
6203 
6204     guarantee(!obj->is_objArray(),
6205               err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
6206                       r->bottom()));
6207 
6208     if (G1TraceEagerReclaimHumongousObjects) {
6209       gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6210                              region_idx,
6211                              obj->size()*HeapWordSize,
6212                              r->bottom(),
6213                              r->region_num(),
6214                              r->rem_set()->occupied(),
6215                              r->rem_set()->strong_code_roots_list_length(),
6216                              next_bitmap->isMarked(r->bottom()),
6217                              g1h->humongous_is_live(region_idx),
6218                              obj->is_objArray()
6219                             );
6220     }
6221     // Need to clear mark bit of the humongous object if already set.
6222     if (next_bitmap->isMarked(r->bottom())) {
6223       next_bitmap->clear(r->bottom());
6224     }
6225     _freed_bytes += r->used();
6226     r->set_containing_set(NULL);
6227     _humongous_regions_removed.increment(1u, r->capacity());
6228     g1h->free_humongous_region(r, _free_region_list, false);
6229 
6230     return false;
6231   }
6232 
6233   HeapRegionSetCount& humongous_free_count() {
6234     return _humongous_regions_removed;
6235   }
6236 
6237   size_t bytes_freed() const {
6238     return _freed_bytes;
6239   }
6240 
6241   size_t humongous_reclaimed() const {
6242     return _humongous_regions_removed.length();
6243   }
6244 };
6245 
6246 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
6247   assert_at_safepoint(true);
6248 
6249   if (!G1EagerReclaimHumongousObjects ||
6250       (!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) {
6251     g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
6252     return;
6253   }
6254 
6255   double start_time = os::elapsedTime();
6256 
6257   FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
6258 
6259   G1FreeHumongousRegionClosure cl(&local_cleanup_list);
6260   heap_region_iterate(&cl);
6261 
6262   HeapRegionSetCount empty_set;
6263   remove_from_old_sets(empty_set, cl.humongous_free_count());
6264 
6265   G1HRPrinter* hr_printer = _g1h->hr_printer();
6266   if (hr_printer->is_active()) {
6267     FreeRegionListIterator iter(&local_cleanup_list);
6268     while (iter.more_available()) {
6269       HeapRegion* hr = iter.get_next();
6270       hr_printer->cleanup(hr);


< prev index next >