< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page




 628       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
 629                                 HeapRegion::GrainWords * HeapWordSize * commits);
 630 
 631     }
 632 
 633     // Mark each G1 region touched by the range as archive, add it to
 634     // the old set, and set top.
 635     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 636     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 637     prev_last_region = last_region;
 638 
 639     while (curr_region != NULL) {
 640       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 641              "Region already in use (index %u)", curr_region->hrm_index());
 642       if (open) {
 643         curr_region->set_open_archive();
 644       } else {
 645         curr_region->set_closed_archive();
 646       }
 647       _hr_printer.alloc(curr_region);
 648       _archive_set.add(curr_region);
 649       HeapWord* top;
 650       HeapRegion* next_region;
 651       if (curr_region != last_region) {
 652         top = curr_region->end();
 653         next_region = _hrm.next_region_in_heap(curr_region);
 654       } else {
 655         top = last_address + 1;
 656         next_region = NULL;
 657       }
 658       curr_region->set_top(top);
 659       curr_region->set_first_dead(top);
 660       curr_region->set_end_of_live(top);
 661       curr_region = next_region;
 662     }
 663 
 664     // Notify mark-sweep of the archive
 665     G1ArchiveAllocator::set_range_archive(curr_range, open);
 666   }
 667   return true;
 668 }


1590 
1591 jint G1CollectedHeap::initialize_concurrent_refinement() {
1592   jint ecode = JNI_OK;
1593   _cr = G1ConcurrentRefine::create(&ecode);
1594   return ecode;
1595 }
1596 
1597 jint G1CollectedHeap::initialize_young_gen_sampling_thread() {
1598   _young_gen_sampling_thread = new G1YoungRemSetSamplingThread();
1599   if (_young_gen_sampling_thread->osthread() == NULL) {
1600     vm_shutdown_during_initialization("Could not create G1YoungRemSetSamplingThread");
1601     return JNI_ENOMEM;
1602   }
1603   return JNI_OK;
1604 }
1605 
1606 jint G1CollectedHeap::initialize() {
1607   os::enable_vtime();
1608 
1609   // Necessary to satisfy locking discipline assertions.
1610 
1611   MutexLocker x(Heap_lock);
1612 
1613   // While there are no constraints in the GC code that HeapWordSize
1614   // be any particular value, there are multiple other areas in the
1615   // system which believe this to be true (e.g. oop->object_size in some
1616   // cases incorrectly returns the size in wordSize units rather than
1617   // HeapWordSize).
1618   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1619 
1620   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1621   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1622   size_t heap_alignment = collector_policy()->heap_alignment();
1623 
1624   // Ensure that the sizes are properly aligned.
1625   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1626   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1627   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1628 
1629   // Reserve the maximum.
1630 


1817   _young_gen_sampling_thread->stop();
1818   _cm_thread->stop();
1819   if (G1StringDedup::is_enabled()) {
1820     G1StringDedup::stop();
1821   }
1822 }
1823 
1824 void G1CollectedHeap::safepoint_synchronize_begin() {
1825   SuspendibleThreadSet::synchronize();
1826 }
1827 
1828 void G1CollectedHeap::safepoint_synchronize_end() {
1829   SuspendibleThreadSet::desynchronize();
1830 }
1831 
1832 size_t G1CollectedHeap::conservative_max_heap_alignment() {
1833   return HeapRegion::max_region_size();
1834 }
1835 
1836 void G1CollectedHeap::post_initialize() {



1837   CollectedHeap::post_initialize();
1838   ref_processing_init();
1839 }
1840 
1841 void G1CollectedHeap::ref_processing_init() {
1842   // Reference processing in G1 currently works as follows:
1843   //
1844   // * There are two reference processor instances. One is
1845   //   used to record and process discovered references
1846   //   during concurrent marking; the other is used to
1847   //   record and process references during STW pauses
1848   //   (both full and incremental).
1849   // * Both ref processors need to 'span' the entire heap as
1850   //   the regions in the collection set may be dotted around.
1851   //
1852   // * For the concurrent marking ref processor:
1853   //   * Reference discovery is enabled at initial marking.
1854   //   * Reference discovery is disabled and the discovered
1855   //     references processed etc during remarking.
1856   //   * Reference discovery is MT (see below).


2883       verify_type = G1HeapVerifier::G1VerifyConcurrentStart;
2884     } else if (collector_state()->in_young_only_phase()) {
2885       if (collector_state()->in_young_gc_before_mixed()) {
2886         gc_string.append("(Prepare Mixed)");
2887       } else {
2888         gc_string.append("(Normal)");
2889       }
2890       verify_type = G1HeapVerifier::G1VerifyYoungNormal;
2891     } else {
2892       gc_string.append("(Mixed)");
2893       verify_type = G1HeapVerifier::G1VerifyMixed;
2894     }
2895     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2896 
2897     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2898                                                                   workers()->active_workers(),
2899                                                                   Threads::number_of_non_daemon_threads());
2900     active_workers = workers()->update_active_workers(active_workers);
2901     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2902 
2903     G1MonitoringScope ms(g1mm(),
2904                          false /* full_gc */,
2905                          collector_state()->yc_type() == Mixed /* all_memory_pools_affected */);
2906 
2907     G1HeapTransition heap_transition(this);
2908     size_t heap_used_bytes_before_gc = used();
2909 
2910     // Don't dynamically change the number of GC threads this early.  A value of
2911     // 0 is used to indicate serial work.  When parallel work is done,
2912     // it will be set.
2913 
2914     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2915       IsGCActiveMark x;
2916 
2917       gc_prologue(false);
2918 
2919       if (VerifyRememberedSets) {
2920         log_info(gc, verify)("[Verifying RemSets before GC]");
2921         VerifyRegionRemSetClosure v_cl;
2922         heap_region_iterate(&v_cl);
2923       }
2924 
2925       _verifier->verify_before_gc(verify_type);




 628       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
 629                                 HeapRegion::GrainWords * HeapWordSize * commits);
 630 
 631     }
 632 
 633     // Mark each G1 region touched by the range as archive, add it to
 634     // the old set, and set top.
 635     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 636     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 637     prev_last_region = last_region;
 638 
 639     while (curr_region != NULL) {
 640       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 641              "Region already in use (index %u)", curr_region->hrm_index());
 642       if (open) {
 643         curr_region->set_open_archive();
 644       } else {
 645         curr_region->set_closed_archive();
 646       }
 647       _hr_printer.alloc(curr_region);
 648       archive_set_add(curr_region);
 649       HeapWord* top;
 650       HeapRegion* next_region;
 651       if (curr_region != last_region) {
 652         top = curr_region->end();
 653         next_region = _hrm.next_region_in_heap(curr_region);
 654       } else {
 655         top = last_address + 1;
 656         next_region = NULL;
 657       }
 658       curr_region->set_top(top);
 659       curr_region->set_first_dead(top);
 660       curr_region->set_end_of_live(top);
 661       curr_region = next_region;
 662     }
 663 
 664     // Notify mark-sweep of the archive
 665     G1ArchiveAllocator::set_range_archive(curr_range, open);
 666   }
 667   return true;
 668 }


1590 
1591 jint G1CollectedHeap::initialize_concurrent_refinement() {
1592   jint ecode = JNI_OK;
1593   _cr = G1ConcurrentRefine::create(&ecode);
1594   return ecode;
1595 }
1596 
1597 jint G1CollectedHeap::initialize_young_gen_sampling_thread() {
1598   _young_gen_sampling_thread = new G1YoungRemSetSamplingThread();
1599   if (_young_gen_sampling_thread->osthread() == NULL) {
1600     vm_shutdown_during_initialization("Could not create G1YoungRemSetSamplingThread");
1601     return JNI_ENOMEM;
1602   }
1603   return JNI_OK;
1604 }
1605 
1606 jint G1CollectedHeap::initialize() {
1607   os::enable_vtime();
1608 
1609   // Necessary to satisfy locking discipline assertions.

1610   MutexLocker x(Heap_lock);
1611 
1612   // While there are no constraints in the GC code that HeapWordSize
1613   // be any particular value, there are multiple other areas in the
1614   // system which believe this to be true (e.g. oop->object_size in some
1615   // cases incorrectly returns the size in wordSize units rather than
1616   // HeapWordSize).
1617   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1618 
1619   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1620   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1621   size_t heap_alignment = collector_policy()->heap_alignment();
1622 
1623   // Ensure that the sizes are properly aligned.
1624   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1625   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1626   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1627 
1628   // Reserve the maximum.
1629 


1816   _young_gen_sampling_thread->stop();
1817   _cm_thread->stop();
1818   if (G1StringDedup::is_enabled()) {
1819     G1StringDedup::stop();
1820   }
1821 }
1822 
1823 void G1CollectedHeap::safepoint_synchronize_begin() {
1824   SuspendibleThreadSet::synchronize();
1825 }
1826 
1827 void G1CollectedHeap::safepoint_synchronize_end() {
1828   SuspendibleThreadSet::desynchronize();
1829 }
1830 
1831 size_t G1CollectedHeap::conservative_max_heap_alignment() {
1832   return HeapRegion::max_region_size();
1833 }
1834 
1835 void G1CollectedHeap::post_initialize() {
1836   // Necessary to satisfy locking discipline assertions.
1837   MutexLockerEx x(Heap_lock);
1838 
1839   CollectedHeap::post_initialize();
1840   ref_processing_init();
1841 }
1842 
1843 void G1CollectedHeap::ref_processing_init() {
1844   // Reference processing in G1 currently works as follows:
1845   //
1846   // * There are two reference processor instances. One is
1847   //   used to record and process discovered references
1848   //   during concurrent marking; the other is used to
1849   //   record and process references during STW pauses
1850   //   (both full and incremental).
1851   // * Both ref processors need to 'span' the entire heap as
1852   //   the regions in the collection set may be dotted around.
1853   //
1854   // * For the concurrent marking ref processor:
1855   //   * Reference discovery is enabled at initial marking.
1856   //   * Reference discovery is disabled and the discovered
1857   //     references processed etc during remarking.
1858   //   * Reference discovery is MT (see below).


2885       verify_type = G1HeapVerifier::G1VerifyConcurrentStart;
2886     } else if (collector_state()->in_young_only_phase()) {
2887       if (collector_state()->in_young_gc_before_mixed()) {
2888         gc_string.append("(Prepare Mixed)");
2889       } else {
2890         gc_string.append("(Normal)");
2891       }
2892       verify_type = G1HeapVerifier::G1VerifyYoungNormal;
2893     } else {
2894       gc_string.append("(Mixed)");
2895       verify_type = G1HeapVerifier::G1VerifyMixed;
2896     }
2897     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2898 
2899     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2900                                                                   workers()->active_workers(),
2901                                                                   Threads::number_of_non_daemon_threads());
2902     active_workers = workers()->update_active_workers(active_workers);
2903     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2904 
2905     G1MonitoringScope ms(g1mm(), false /* full_gc */, collector_state()->yc_type() == Mixed /* mixed_gc */);


2906 
2907     G1HeapTransition heap_transition(this);
2908     size_t heap_used_bytes_before_gc = used();
2909 
2910     // Don't dynamically change the number of GC threads this early.  A value of
2911     // 0 is used to indicate serial work.  When parallel work is done,
2912     // it will be set.
2913 
2914     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2915       IsGCActiveMark x;
2916 
2917       gc_prologue(false);
2918 
2919       if (VerifyRememberedSets) {
2920         log_info(gc, verify)("[Verifying RemSets before GC]");
2921         VerifyRegionRemSetClosure v_cl;
2922         heap_region_iterate(&v_cl);
2923       }
2924 
2925       _verifier->verify_before_gc(verify_type);


< prev index next >