< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 56066 : [mq]: card_units


1063 
1064   // Purge code root memory
1065   purge_code_root_memory();
1066 
1067   // Start a new incremental collection set for the next pause
1068   start_new_collection_set();
1069 
1070   _allocator->init_mutator_alloc_region();
1071 
1072   // Post collection state updates.
1073   MetaspaceGC::compute_new_size();
1074 }
1075 
1076 void G1CollectedHeap::abort_refinement() {
1077   if (_hot_card_cache->use_cache()) {
1078     _hot_card_cache->reset_hot_cache();
1079   }
1080 
1081   // Discard all remembered set updates.
1082   G1BarrierSet::dirty_card_queue_set().abandon_logs();
1083   assert(G1BarrierSet::dirty_card_queue_set().num_completed_buffers() == 0,
1084          "DCQS should be empty");
1085 }
1086 
1087 void G1CollectedHeap::verify_after_full_collection() {
1088   _hrm->verify_optional();
1089   _verifier->verify_region_sets_optional();
1090   _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
1091   // Clear the previous marking bitmap, if needed for bitmap verification.
1092   // Note we cannot do this when we clear the next marking bitmap in
1093   // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1094   // objects marked during a full GC against the previous bitmap.
1095   // But we need to clear it before calling check_bitmaps below since
1096   // the full GC has compacted objects and updated TAMS but not updated
1097   // the prev bitmap.
1098   if (G1VerifyBitmaps) {
1099     GCTraceTime(Debug, gc) tm("Clear Prev Bitmap for Verification");
1100     _cm->clear_prev_bitmap(workers());
1101   }
1102   // This call implicitly verifies that the next bitmap is clear after Full GC.
1103   _verifier->check_bitmaps("Full GC End");


1666 
1667   ReservedSpace heap_rs = Universe::reserve_heap(reserved_byte_size,
1668                                                  HeapAlignment);
1669 
1670   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1671 
1672   // Create the barrier set for the entire reserved region.
1673   G1CardTable* ct = new G1CardTable(reserved_region());
1674   ct->initialize();
1675   G1BarrierSet* bs = new G1BarrierSet(ct);
1676   bs->initialize();
1677   assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1678   BarrierSet::set_barrier_set(bs);
1679   _card_table = ct;
1680 
1681   G1BarrierSet::satb_mark_queue_set().initialize(this,
1682                                                  &bs->satb_mark_queue_buffer_allocator(),
1683                                                  G1SATBProcessCompletedThreshold,
1684                                                  G1SATBBufferEnqueueingThresholdPercent);
1685 
1686   // process_completed_buffers_threshold and max_completed_buffers are updated
1687   // later, based on the concurrent refinement object.
1688   G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1689                                                   &bs->dirty_card_queue_buffer_allocator(),
1690                                                   true); // init_free_ids
1691 
1692   // Create the hot card cache.
1693   _hot_card_cache = new G1HotCardCache(this);
1694 
1695   // Carve out the G1 part of the heap.
1696   ReservedSpace g1_rs = heap_rs.first_part(reserved_byte_size);
1697   size_t page_size = actual_reserved_page_size(heap_rs);
1698   G1RegionToSpaceMapper* heap_storage =
1699     G1RegionToSpaceMapper::create_heap_mapper(g1_rs,
1700                                               g1_rs.size(),
1701                                               page_size,
1702                                               HeapRegion::GrainBytes,
1703                                               1,
1704                                               mtJavaHeap);
1705   if(heap_storage == NULL) {
1706     vm_shutdown_during_initialization("Could not initialize G1 heap");


1795   if (!expand(init_byte_size, _workers)) {
1796     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1797     return JNI_ENOMEM;
1798   }
1799 
1800   // Perform any initialization actions delegated to the policy.
1801   policy()->init(this, &_collection_set);
1802 
1803   jint ecode = initialize_concurrent_refinement();
1804   if (ecode != JNI_OK) {
1805     return ecode;
1806   }
1807 
1808   ecode = initialize_young_gen_sampling_thread();
1809   if (ecode != JNI_OK) {
1810     return ecode;
1811   }
1812 
1813   {
1814     G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1815     dcqs.set_process_completed_buffers_threshold(concurrent_refine()->yellow_zone());
1816     dcqs.set_max_completed_buffers(concurrent_refine()->red_zone());
1817   }
1818 
1819   // Here we allocate the dummy HeapRegion that is required by the
1820   // G1AllocRegion class.
1821   HeapRegion* dummy_region = _hrm->get_dummy_region();
1822 
1823   // We'll re-use the same region whether the alloc region will
1824   // require BOT updates or not and, if it doesn't, then a non-young
1825   // region will complain that it cannot support allocations without
1826   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1827   dummy_region->set_eden();
1828   // Make sure it's full.
1829   dummy_region->set_top(dummy_region->end());
1830   G1AllocRegion::setup(this, dummy_region);
1831 
1832   _allocator->init_mutator_alloc_region();
1833 
1834   // Do create of the monitoring and management support so that
1835   // values in the heap have been properly initialized.
1836   _g1mm = new G1MonitoringSupport(this);


1935 }
1936 
1937 size_t G1CollectedHeap::capacity() const {
1938   return _hrm->length() * HeapRegion::GrainBytes;
1939 }
1940 
1941 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1942   return _hrm->total_free_bytes();
1943 }
1944 
1945 void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i) {
1946   _hot_card_cache->drain(cl, worker_i);
1947 }
1948 
1949 void G1CollectedHeap::iterate_dirty_card_closure(G1CardTableEntryClosure* cl, uint worker_i) {
1950   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1951   size_t n_completed_buffers = 0;
1952   while (dcqs.apply_closure_during_gc(cl, worker_i)) {
1953     n_completed_buffers++;
1954   }
1955   assert(dcqs.num_completed_buffers() == 0, "Completed buffers exist!");
1956   phase_times()->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_i, n_completed_buffers, G1GCPhaseTimes::MergeLBProcessedBuffers);
1957 }
1958 
1959 // Computes the sum of the storage used by the various regions.
1960 size_t G1CollectedHeap::used() const {
1961   size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1962   if (_archive_allocator != NULL) {
1963     result += _archive_allocator->used();
1964   }
1965   return result;
1966 }
1967 
1968 size_t G1CollectedHeap::used_unlocked() const {
1969   return _summary_bytes_used;
1970 }
1971 
1972 class SumUsedClosure: public HeapRegionClosure {
1973   size_t _used;
1974 public:
1975   SumUsedClosure() : _used(0) {}


2597 
2598 void G1CollectedHeap::do_concurrent_mark() {
2599   MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
2600   if (!_cm_thread->in_progress()) {
2601     _cm_thread->set_started();
2602     CGC_lock->notify();
2603   }
2604 }
2605 
2606 size_t G1CollectedHeap::pending_card_num() {
2607   struct CountCardsClosure : public ThreadClosure {
2608     size_t _cards;
2609     CountCardsClosure() : _cards(0) {}
2610     virtual void do_thread(Thread* t) {
2611       _cards += G1ThreadLocalData::dirty_card_queue(t).size();
2612     }
2613   } count_from_threads;
2614   Threads::threads_do(&count_from_threads);
2615 
2616   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2617   dcqs.verify_num_entries_in_completed_buffers();
2618 
2619   return dcqs.num_entries_in_completed_buffers() + count_from_threads._cards;
2620 }
2621 
2622 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2623   // We don't nominate objects with many remembered set entries, on
2624   // the assumption that such objects are likely still live.
2625   HeapRegionRemSet* rem_set = r->rem_set();
2626 
2627   return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2628          rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2629          G1EagerReclaimHumongousObjects && rem_set->is_empty();
2630 }
2631 
2632 class RegisterRegionsWithRegionAttrTableClosure : public HeapRegionClosure {
2633  private:
2634   size_t _total_humongous;
2635   size_t _candidate_humongous;
2636 
2637   bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {
2638     assert(region->is_starts_humongous(), "Must start a humongous object");
2639 




1063 
1064   // Purge code root memory
1065   purge_code_root_memory();
1066 
1067   // Start a new incremental collection set for the next pause
1068   start_new_collection_set();
1069 
1070   _allocator->init_mutator_alloc_region();
1071 
1072   // Post collection state updates.
1073   MetaspaceGC::compute_new_size();
1074 }
1075 
1076 void G1CollectedHeap::abort_refinement() {
1077   if (_hot_card_cache->use_cache()) {
1078     _hot_card_cache->reset_hot_cache();
1079   }
1080 
1081   // Discard all remembered set updates.
1082   G1BarrierSet::dirty_card_queue_set().abandon_logs();
1083   assert(G1BarrierSet::dirty_card_queue_set().num_cards() == 0,
1084          "DCQS should be empty");
1085 }
1086 
1087 void G1CollectedHeap::verify_after_full_collection() {
1088   _hrm->verify_optional();
1089   _verifier->verify_region_sets_optional();
1090   _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
1091   // Clear the previous marking bitmap, if needed for bitmap verification.
1092   // Note we cannot do this when we clear the next marking bitmap in
1093   // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1094   // objects marked during a full GC against the previous bitmap.
1095   // But we need to clear it before calling check_bitmaps below since
1096   // the full GC has compacted objects and updated TAMS but not updated
1097   // the prev bitmap.
1098   if (G1VerifyBitmaps) {
1099     GCTraceTime(Debug, gc) tm("Clear Prev Bitmap for Verification");
1100     _cm->clear_prev_bitmap(workers());
1101   }
1102   // This call implicitly verifies that the next bitmap is clear after Full GC.
1103   _verifier->check_bitmaps("Full GC End");


1666 
1667   ReservedSpace heap_rs = Universe::reserve_heap(reserved_byte_size,
1668                                                  HeapAlignment);
1669 
1670   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
1671 
1672   // Create the barrier set for the entire reserved region.
1673   G1CardTable* ct = new G1CardTable(reserved_region());
1674   ct->initialize();
1675   G1BarrierSet* bs = new G1BarrierSet(ct);
1676   bs->initialize();
1677   assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1678   BarrierSet::set_barrier_set(bs);
1679   _card_table = ct;
1680 
1681   G1BarrierSet::satb_mark_queue_set().initialize(this,
1682                                                  &bs->satb_mark_queue_buffer_allocator(),
1683                                                  G1SATBProcessCompletedThreshold,
1684                                                  G1SATBBufferEnqueueingThresholdPercent);
1685 
1686   // process_cards_threshold and max_cards are updated
1687   // later, based on the concurrent refinement object.
1688   G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
1689                                                   &bs->dirty_card_queue_buffer_allocator(),
1690                                                   true); // init_free_ids
1691 
1692   // Create the hot card cache.
1693   _hot_card_cache = new G1HotCardCache(this);
1694 
1695   // Carve out the G1 part of the heap.
1696   ReservedSpace g1_rs = heap_rs.first_part(reserved_byte_size);
1697   size_t page_size = actual_reserved_page_size(heap_rs);
1698   G1RegionToSpaceMapper* heap_storage =
1699     G1RegionToSpaceMapper::create_heap_mapper(g1_rs,
1700                                               g1_rs.size(),
1701                                               page_size,
1702                                               HeapRegion::GrainBytes,
1703                                               1,
1704                                               mtJavaHeap);
1705   if(heap_storage == NULL) {
1706     vm_shutdown_during_initialization("Could not initialize G1 heap");


1795   if (!expand(init_byte_size, _workers)) {
1796     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1797     return JNI_ENOMEM;
1798   }
1799 
1800   // Perform any initialization actions delegated to the policy.
1801   policy()->init(this, &_collection_set);
1802 
1803   jint ecode = initialize_concurrent_refinement();
1804   if (ecode != JNI_OK) {
1805     return ecode;
1806   }
1807 
1808   ecode = initialize_young_gen_sampling_thread();
1809   if (ecode != JNI_OK) {
1810     return ecode;
1811   }
1812 
1813   {
1814     G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1815     dcqs.set_process_cards_threshold(concurrent_refine()->yellow_zone());
1816     dcqs.set_max_cards(concurrent_refine()->red_zone());
1817   }
1818 
1819   // Here we allocate the dummy HeapRegion that is required by the
1820   // G1AllocRegion class.
1821   HeapRegion* dummy_region = _hrm->get_dummy_region();
1822 
1823   // We'll re-use the same region whether the alloc region will
1824   // require BOT updates or not and, if it doesn't, then a non-young
1825   // region will complain that it cannot support allocations without
1826   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1827   dummy_region->set_eden();
1828   // Make sure it's full.
1829   dummy_region->set_top(dummy_region->end());
1830   G1AllocRegion::setup(this, dummy_region);
1831 
1832   _allocator->init_mutator_alloc_region();
1833 
1834   // Do create of the monitoring and management support so that
1835   // values in the heap have been properly initialized.
1836   _g1mm = new G1MonitoringSupport(this);


1935 }
1936 
1937 size_t G1CollectedHeap::capacity() const {
1938   return _hrm->length() * HeapRegion::GrainBytes;
1939 }
1940 
1941 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1942   return _hrm->total_free_bytes();
1943 }
1944 
1945 void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i) {
1946   _hot_card_cache->drain(cl, worker_i);
1947 }
1948 
1949 void G1CollectedHeap::iterate_dirty_card_closure(G1CardTableEntryClosure* cl, uint worker_i) {
1950   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1951   size_t n_completed_buffers = 0;
1952   while (dcqs.apply_closure_during_gc(cl, worker_i)) {
1953     n_completed_buffers++;
1954   }
1955   assert(dcqs.num_cards() == 0, "Completed buffers exist!");
1956   phase_times()->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_i, n_completed_buffers, G1GCPhaseTimes::MergeLBProcessedBuffers);
1957 }
1958 
1959 // Computes the sum of the storage used by the various regions.
1960 size_t G1CollectedHeap::used() const {
1961   size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1962   if (_archive_allocator != NULL) {
1963     result += _archive_allocator->used();
1964   }
1965   return result;
1966 }
1967 
1968 size_t G1CollectedHeap::used_unlocked() const {
1969   return _summary_bytes_used;
1970 }
1971 
1972 class SumUsedClosure: public HeapRegionClosure {
1973   size_t _used;
1974 public:
1975   SumUsedClosure() : _used(0) {}


2597 
2598 void G1CollectedHeap::do_concurrent_mark() {
2599   MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
2600   if (!_cm_thread->in_progress()) {
2601     _cm_thread->set_started();
2602     CGC_lock->notify();
2603   }
2604 }
2605 
2606 size_t G1CollectedHeap::pending_card_num() {
2607   struct CountCardsClosure : public ThreadClosure {
2608     size_t _cards;
2609     CountCardsClosure() : _cards(0) {}
2610     virtual void do_thread(Thread* t) {
2611       _cards += G1ThreadLocalData::dirty_card_queue(t).size();
2612     }
2613   } count_from_threads;
2614   Threads::threads_do(&count_from_threads);
2615 
2616   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2617   dcqs.verify_num_cards();
2618 
2619   return dcqs.num_cards() + count_from_threads._cards;
2620 }
2621 
2622 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2623   // We don't nominate objects with many remembered set entries, on
2624   // the assumption that such objects are likely still live.
2625   HeapRegionRemSet* rem_set = r->rem_set();
2626 
2627   return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2628          rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2629          G1EagerReclaimHumongousObjects && rem_set->is_empty();
2630 }
2631 
2632 class RegisterRegionsWithRegionAttrTableClosure : public HeapRegionClosure {
2633  private:
2634   size_t _total_humongous;
2635   size_t _candidate_humongous;
2636 
2637   bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {
2638     assert(region->is_starts_humongous(), "Must start a humongous object");
2639 


< prev index next >