< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page




  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/g1/bufferingOopClosure.hpp"
  32 #include "gc/g1/g1Allocator.inline.hpp"

  33 #include "gc/g1/g1CollectedHeap.inline.hpp"
  34 #include "gc/g1/g1CollectionSet.hpp"
  35 #include "gc/g1/g1CollectorPolicy.hpp"
  36 #include "gc/g1/g1CollectorState.hpp"
  37 #include "gc/g1/g1ConcurrentRefine.hpp"
  38 #include "gc/g1/g1ConcurrentRefineThread.hpp"
  39 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  40 #include "gc/g1/g1EvacStats.inline.hpp"
  41 #include "gc/g1/g1FullCollector.hpp"
  42 #include "gc/g1/g1GCPhaseTimes.hpp"
  43 #include "gc/g1/g1HeapSizingPolicy.hpp"
  44 #include "gc/g1/g1HeapTransition.hpp"
  45 #include "gc/g1/g1HeapVerifier.hpp"
  46 #include "gc/g1/g1HotCardCache.hpp"
  47 #include "gc/g1/g1MemoryPool.hpp"
  48 #include "gc/g1/g1OopClosures.inline.hpp"
  49 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  50 #include "gc/g1/g1Policy.hpp"
  51 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  52 #include "gc/g1/g1RemSet.hpp"


1019   resize_if_necessary_after_full_collection();
1020 
1021   // Rebuild the strong code root lists for each region
1022   rebuild_strong_code_roots();
1023 
1024   // Start a new incremental collection set for the next pause
1025   start_new_collection_set();
1026 
1027   _allocator->init_mutator_alloc_region();
1028 
1029   // Post collection state updates.
1030   MetaspaceGC::compute_new_size();
1031 }
1032 
1033 void G1CollectedHeap::abort_refinement() {
1034   if (_hot_card_cache->use_cache()) {
1035     _hot_card_cache->reset_hot_cache();
1036   }
1037 
1038   // Discard all remembered set updates.
1039   JavaThread::dirty_card_queue_set().abandon_logs();
1040   assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1041 }
1042 
1043 void G1CollectedHeap::verify_after_full_collection() {
1044   _hrm.verify_optional();
1045   _verifier->verify_region_sets_optional();
1046   _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
1047   // Clear the previous marking bitmap, if needed for bitmap verification.
1048   // Note we cannot do this when we clear the next marking bitmap in
1049   // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1050   // objects marked during a full GC against the previous bitmap.
1051   // But we need to clear it before calling check_bitmaps below since
1052   // the full GC has compacted objects and updated TAMS but not updated
1053   // the prev bitmap.
1054   if (G1VerifyBitmaps) {
1055     GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
1056     _cm->clear_prev_bitmap(workers());
1057   }
1058   _verifier->check_bitmaps("Full GC End");
1059 


1619   }
1620 
1621   // Create the G1ConcurrentMark data structure and thread.
1622   // (Must do this late, so that "max_regions" is defined.)
1623   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1624   if (_cm == NULL || !_cm->completed_initialization()) {
1625     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1626     return JNI_ENOMEM;
1627   }
1628   _cm_thread = _cm->cm_thread();
1629 
1630   // Now expand into the initial heap size.
1631   if (!expand(init_byte_size, _workers)) {
1632     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1633     return JNI_ENOMEM;
1634   }
1635 
1636   // Perform any initialization actions delegated to the policy.
1637   g1_policy()->init(this, &_collection_set);
1638 
1639   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1640                                                SATB_Q_FL_lock,
1641                                                G1SATBProcessCompletedThreshold,
1642                                                Shared_SATB_Q_lock);
1643 
1644   jint ecode = initialize_concurrent_refinement();
1645   if (ecode != JNI_OK) {
1646     return ecode;
1647   }
1648 
1649   ecode = initialize_young_gen_sampling_thread();
1650   if (ecode != JNI_OK) {
1651     return ecode;
1652   }
1653 
1654   JavaThread::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1655                                                 DirtyCardQ_FL_lock,
1656                                                 (int)concurrent_refine()->yellow_zone(),
1657                                                 (int)concurrent_refine()->red_zone(),
1658                                                 Shared_DirtyCardQ_lock,
1659                                                 NULL,  // fl_owner
1660                                                 true); // init_free_ids
1661 
1662   dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1663                                     DirtyCardQ_FL_lock,
1664                                     -1, // never trigger processing
1665                                     -1, // no limit on length
1666                                     Shared_DirtyCardQ_lock,
1667                                     &JavaThread::dirty_card_queue_set());
1668 
1669   // Here we allocate the dummy HeapRegion that is required by the
1670   // G1AllocRegion class.
1671   HeapRegion* dummy_region = _hrm.get_dummy_region();
1672 
1673   // We'll re-use the same region whether the alloc region will
1674   // require BOT updates or not and, if it doesn't, then a non-young
1675   // region will complain that it cannot support allocations without
1676   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1677   dummy_region->set_eden();
1678   // Make sure it's full.
1679   dummy_region->set_top(dummy_region->end());
1680   G1AllocRegion::setup(this, dummy_region);
1681 
1682   _allocator->init_mutator_alloc_region();
1683 
1684   // Do create of the monitoring and management support so that
1685   // values in the heap have been properly initialized.
1686   _g1mm = new G1MonitoringSupport(this);
1687 


1816   return _collector_policy;
1817 }
1818 
1819 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1820   return &_soft_ref_policy;
1821 }
1822 
1823 size_t G1CollectedHeap::capacity() const {
1824   return _hrm.length() * HeapRegion::GrainBytes;
1825 }
1826 
1827 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1828   return _hrm.total_free_bytes();
1829 }
1830 
1831 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1832   _hot_card_cache->drain(cl, worker_i);
1833 }
1834 
1835 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
1836   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1837   size_t n_completed_buffers = 0;
1838   while (dcqs.apply_closure_during_gc(cl, worker_i)) {
1839     n_completed_buffers++;
1840   }
1841   g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
1842   dcqs.clear_n_completed_buffers();
1843   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
1844 }
1845 
1846 // Computes the sum of the storage used by the various regions.
1847 size_t G1CollectedHeap::used() const {
1848   size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1849   if (_archive_allocator != NULL) {
1850     result += _archive_allocator->used();
1851   }
1852   return result;
1853 }
1854 
1855 size_t G1CollectedHeap::used_unlocked() const {
1856   return _summary_bytes_used;


2450   *succeeded = ret_succeeded;
2451 
2452   assert_heap_not_locked();
2453   return result;
2454 }
2455 
2456 void G1CollectedHeap::do_concurrent_mark() {
2457   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2458   if (!_cm_thread->in_progress()) {
2459     _cm_thread->set_started();
2460     CGC_lock->notify();
2461   }
2462 }
2463 
2464 size_t G1CollectedHeap::pending_card_num() {
2465   size_t extra_cards = 0;
2466   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {
2467     DirtyCardQueue& dcq = curr->dirty_card_queue();
2468     extra_cards += dcq.size();
2469   }
2470   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2471   size_t buffer_size = dcqs.buffer_size();
2472   size_t buffer_num = dcqs.completed_buffers_num();
2473 
2474   return buffer_size * buffer_num + extra_cards;
2475 }
2476 
2477 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2478   // We don't nominate objects with many remembered set entries, on
2479   // the assumption that such objects are likely still live.
2480   HeapRegionRemSet* rem_set = r->rem_set();
2481 
2482   return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2483          rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2484          G1EagerReclaimHumongousObjects && rem_set->is_empty();
2485 }
2486 
2487 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2488  private:
2489   size_t _total_humongous;
2490   size_t _candidate_humongous;


2534     // object, those remembered sets would need to be cleaned up.
2535     //
2536     // We also treat is_typeArray() objects specially, allowing them
2537     // to be reclaimed even if allocated before the start of
2538     // concurrent mark.  For this we rely on mark stack insertion to
2539     // exclude is_typeArray() objects, preventing reclaiming an object
2540     // that is in the mark stack.  We also rely on the metadata for
2541     // such objects to be built-in and so ensured to be kept live.
2542     // Frequent allocation and drop of large binary blobs is an
2543     // important use case for eager reclaim, and this special handling
2544     // may reduce needed headroom.
2545 
2546     return obj->is_typeArray() &&
2547            g1h->is_potential_eager_reclaim_candidate(region);
2548   }
2549 
2550  public:
2551   RegisterHumongousWithInCSetFastTestClosure()
2552   : _total_humongous(0),
2553     _candidate_humongous(0),
2554     _dcq(&JavaThread::dirty_card_queue_set()) {
2555   }
2556 
2557   virtual bool do_heap_region(HeapRegion* r) {
2558     if (!r->is_starts_humongous()) {
2559       return false;
2560     }
2561     G1CollectedHeap* g1h = G1CollectedHeap::heap();
2562 
2563     bool is_candidate = humongous_region_is_candidate(g1h, r);
2564     uint rindex = r->hrm_index();
2565     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2566     if (is_candidate) {
2567       _candidate_humongous++;
2568       g1h->register_humongous_region_with_cset(rindex);
2569       // Is_candidate already filters out humongous object with large remembered sets.
2570       // If we have a humongous object with a few remembered sets, we simply flush these
2571       // remembered set entries into the DCQS. That will result in automatic
2572       // re-evaluation of their remembered set entries during the following evacuation
2573       // phase.
2574       if (!r->rem_set()->is_empty()) {


3610     _queue(queue), _g1h(g1h) { }
3611 
3612   virtual void work(uint worker_id) {
3613     G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
3614     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
3615 
3616     RedirtyLoggedCardTableEntryClosure cl(_g1h);
3617     _queue->par_apply_closure_to_all_completed_buffers(&cl);
3618 
3619     phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3620   }
3621 };
3622 
3623 void G1CollectedHeap::redirty_logged_cards() {
3624   double redirty_logged_cards_start = os::elapsedTime();
3625 
3626   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3627   dirty_card_queue_set().reset_for_par_iteration();
3628   workers()->run_task(&redirty_task);
3629 
3630   DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
3631   dcq.merge_bufferlists(&dirty_card_queue_set());
3632   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3633 
3634   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3635 }
3636 
3637 // Weak Reference Processing support
3638 
3639 // An always "is_alive" closure that is used to preserve referents.
3640 // If the object is non-null then it's alive.  Used in the preservation
3641 // of referent objects that are pointed to by reference objects
3642 // discovered by the CM ref processor.
3643 class G1AlwaysAliveClosure: public BoolObjectClosure {
3644   G1CollectedHeap* _g1;
3645 public:
3646   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3647   bool do_object_b(oop p) {
3648     if (p != NULL) {
3649       return true;
3650     }




  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/g1/bufferingOopClosure.hpp"
  32 #include "gc/g1/g1Allocator.inline.hpp"
  33 #include "gc/g1/g1BarrierSet.hpp"
  34 #include "gc/g1/g1CollectedHeap.inline.hpp"
  35 #include "gc/g1/g1CollectionSet.hpp"
  36 #include "gc/g1/g1CollectorPolicy.hpp"
  37 #include "gc/g1/g1CollectorState.hpp"
  38 #include "gc/g1/g1ConcurrentRefine.hpp"
  39 #include "gc/g1/g1ConcurrentRefineThread.hpp"
  40 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  41 #include "gc/g1/g1EvacStats.inline.hpp"
  42 #include "gc/g1/g1FullCollector.hpp"
  43 #include "gc/g1/g1GCPhaseTimes.hpp"
  44 #include "gc/g1/g1HeapSizingPolicy.hpp"
  45 #include "gc/g1/g1HeapTransition.hpp"
  46 #include "gc/g1/g1HeapVerifier.hpp"
  47 #include "gc/g1/g1HotCardCache.hpp"
  48 #include "gc/g1/g1MemoryPool.hpp"
  49 #include "gc/g1/g1OopClosures.inline.hpp"
  50 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  51 #include "gc/g1/g1Policy.hpp"
  52 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  53 #include "gc/g1/g1RemSet.hpp"


1020   resize_if_necessary_after_full_collection();
1021 
1022   // Rebuild the strong code root lists for each region
1023   rebuild_strong_code_roots();
1024 
1025   // Start a new incremental collection set for the next pause
1026   start_new_collection_set();
1027 
1028   _allocator->init_mutator_alloc_region();
1029 
1030   // Post collection state updates.
1031   MetaspaceGC::compute_new_size();
1032 }
1033 
1034 void G1CollectedHeap::abort_refinement() {
1035   if (_hot_card_cache->use_cache()) {
1036     _hot_card_cache->reset_hot_cache();
1037   }
1038 
1039   // Discard all remembered set updates.
1040   G1BarrierSet::dirty_card_queue_set().abandon_logs();
1041   assert(dirty_card_queue_set().completed_buffers_num() == 0, "DCQS should be empty");
1042 }
1043 
1044 void G1CollectedHeap::verify_after_full_collection() {
1045   _hrm.verify_optional();
1046   _verifier->verify_region_sets_optional();
1047   _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
1048   // Clear the previous marking bitmap, if needed for bitmap verification.
1049   // Note we cannot do this when we clear the next marking bitmap in
1050   // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1051   // objects marked during a full GC against the previous bitmap.
1052   // But we need to clear it before calling check_bitmaps below since
1053   // the full GC has compacted objects and updated TAMS but not updated
1054   // the prev bitmap.
1055   if (G1VerifyBitmaps) {
1056     GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
1057     _cm->clear_prev_bitmap(workers());
1058   }
1059   _verifier->check_bitmaps("Full GC End");
1060 


1620   }
1621 
1622   // Create the G1ConcurrentMark data structure and thread.
1623   // (Must do this late, so that "max_regions" is defined.)
1624   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1625   if (_cm == NULL || !_cm->completed_initialization()) {
1626     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1627     return JNI_ENOMEM;
1628   }
1629   _cm_thread = _cm->cm_thread();
1630 
1631   // Now expand into the initial heap size.
1632   if (!expand(init_byte_size, _workers)) {
1633     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1634     return JNI_ENOMEM;
1635   }
1636 
1637   // Perform any initialization actions delegated to the policy.
1638   g1_policy()->init(this, &_collection_set);
1639 
1640   G1BarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
1641                                                  SATB_Q_FL_lock,
1642                                                  G1SATBProcessCompletedThreshold,
1643                                                  Shared_SATB_Q_lock);
1644 
1645   jint ecode = initialize_concurrent_refinement();
1646   if (ecode != JNI_OK) {
1647     return ecode;
1648   }
1649 
1650   ecode = initialize_young_gen_sampling_thread();
1651   if (ecode != JNI_OK) {
1652     return ecode;
1653   }
1654 
1655   G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1656                                                   DirtyCardQ_FL_lock,
1657                                                   (int)concurrent_refine()->yellow_zone(),
1658                                                   (int)concurrent_refine()->red_zone(),
1659                                                   Shared_DirtyCardQ_lock,
1660                                                   NULL,  // fl_owner
1661                                                   true); // init_free_ids
1662 
1663   dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1664                                     DirtyCardQ_FL_lock,
1665                                     -1, // never trigger processing
1666                                     -1, // no limit on length
1667                                     Shared_DirtyCardQ_lock,
1668                                     &G1BarrierSet::dirty_card_queue_set());
1669 
1670   // Here we allocate the dummy HeapRegion that is required by the
1671   // G1AllocRegion class.
1672   HeapRegion* dummy_region = _hrm.get_dummy_region();
1673 
1674   // We'll re-use the same region whether the alloc region will
1675   // require BOT updates or not and, if it doesn't, then a non-young
1676   // region will complain that it cannot support allocations without
1677   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1678   dummy_region->set_eden();
1679   // Make sure it's full.
1680   dummy_region->set_top(dummy_region->end());
1681   G1AllocRegion::setup(this, dummy_region);
1682 
1683   _allocator->init_mutator_alloc_region();
1684 
1685   // Do create of the monitoring and management support so that
1686   // values in the heap have been properly initialized.
1687   _g1mm = new G1MonitoringSupport(this);
1688 


1817   return _collector_policy;
1818 }
1819 
1820 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1821   return &_soft_ref_policy;
1822 }
1823 
1824 size_t G1CollectedHeap::capacity() const {
1825   return _hrm.length() * HeapRegion::GrainBytes;
1826 }
1827 
1828 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1829   return _hrm.total_free_bytes();
1830 }
1831 
1832 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1833   _hot_card_cache->drain(cl, worker_i);
1834 }
1835 
1836 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
1837   DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1838   size_t n_completed_buffers = 0;
1839   while (dcqs.apply_closure_during_gc(cl, worker_i)) {
1840     n_completed_buffers++;
1841   }
1842   g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
1843   dcqs.clear_n_completed_buffers();
1844   assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
1845 }
1846 
1847 // Computes the sum of the storage used by the various regions.
1848 size_t G1CollectedHeap::used() const {
1849   size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1850   if (_archive_allocator != NULL) {
1851     result += _archive_allocator->used();
1852   }
1853   return result;
1854 }
1855 
1856 size_t G1CollectedHeap::used_unlocked() const {
1857   return _summary_bytes_used;


2451   *succeeded = ret_succeeded;
2452 
2453   assert_heap_not_locked();
2454   return result;
2455 }
2456 
2457 void G1CollectedHeap::do_concurrent_mark() {
2458   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2459   if (!_cm_thread->in_progress()) {
2460     _cm_thread->set_started();
2461     CGC_lock->notify();
2462   }
2463 }
2464 
2465 size_t G1CollectedHeap::pending_card_num() {
2466   size_t extra_cards = 0;
2467   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {
2468     DirtyCardQueue& dcq = curr->dirty_card_queue();
2469     extra_cards += dcq.size();
2470   }
2471   DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2472   size_t buffer_size = dcqs.buffer_size();
2473   size_t buffer_num = dcqs.completed_buffers_num();
2474 
2475   return buffer_size * buffer_num + extra_cards;
2476 }
2477 
2478 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2479   // We don't nominate objects with many remembered set entries, on
2480   // the assumption that such objects are likely still live.
2481   HeapRegionRemSet* rem_set = r->rem_set();
2482 
2483   return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2484          rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2485          G1EagerReclaimHumongousObjects && rem_set->is_empty();
2486 }
2487 
2488 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2489  private:
2490   size_t _total_humongous;
2491   size_t _candidate_humongous;


2535     // object, those remembered sets would need to be cleaned up.
2536     //
2537     // We also treat is_typeArray() objects specially, allowing them
2538     // to be reclaimed even if allocated before the start of
2539     // concurrent mark.  For this we rely on mark stack insertion to
2540     // exclude is_typeArray() objects, preventing reclaiming an object
2541     // that is in the mark stack.  We also rely on the metadata for
2542     // such objects to be built-in and so ensured to be kept live.
2543     // Frequent allocation and drop of large binary blobs is an
2544     // important use case for eager reclaim, and this special handling
2545     // may reduce needed headroom.
2546 
2547     return obj->is_typeArray() &&
2548            g1h->is_potential_eager_reclaim_candidate(region);
2549   }
2550 
2551  public:
2552   RegisterHumongousWithInCSetFastTestClosure()
2553   : _total_humongous(0),
2554     _candidate_humongous(0),
2555     _dcq(&G1BarrierSet::dirty_card_queue_set()) {
2556   }
2557 
2558   virtual bool do_heap_region(HeapRegion* r) {
2559     if (!r->is_starts_humongous()) {
2560       return false;
2561     }
2562     G1CollectedHeap* g1h = G1CollectedHeap::heap();
2563 
2564     bool is_candidate = humongous_region_is_candidate(g1h, r);
2565     uint rindex = r->hrm_index();
2566     g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2567     if (is_candidate) {
2568       _candidate_humongous++;
2569       g1h->register_humongous_region_with_cset(rindex);
2570       // Is_candidate already filters out humongous object with large remembered sets.
2571       // If we have a humongous object with a few remembered sets, we simply flush these
2572       // remembered set entries into the DCQS. That will result in automatic
2573       // re-evaluation of their remembered set entries during the following evacuation
2574       // phase.
2575       if (!r->rem_set()->is_empty()) {


3611     _queue(queue), _g1h(g1h) { }
3612 
3613   virtual void work(uint worker_id) {
3614     G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
3615     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
3616 
3617     RedirtyLoggedCardTableEntryClosure cl(_g1h);
3618     _queue->par_apply_closure_to_all_completed_buffers(&cl);
3619 
3620     phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3621   }
3622 };
3623 
3624 void G1CollectedHeap::redirty_logged_cards() {
3625   double redirty_logged_cards_start = os::elapsedTime();
3626 
3627   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3628   dirty_card_queue_set().reset_for_par_iteration();
3629   workers()->run_task(&redirty_task);
3630 
3631   DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
3632   dcq.merge_bufferlists(&dirty_card_queue_set());
3633   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3634 
3635   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3636 }
3637 
3638 // Weak Reference Processing support
3639 
3640 // An always "is_alive" closure that is used to preserve referents.
3641 // If the object is non-null then it's alive.  Used in the preservation
3642 // of referent objects that are pointed to by reference objects
3643 // discovered by the CM ref processor.
3644 class G1AlwaysAliveClosure: public BoolObjectClosure {
3645   G1CollectedHeap* _g1;
3646 public:
3647   G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3648   bool do_object_b(oop p) {
3649     if (p != NULL) {
3650       return true;
3651     }


< prev index next >