< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page




 627       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
 628                                 HeapRegion::GrainWords * HeapWordSize * commits);
 629 
 630     }
 631 
 632     // Mark each G1 region touched by the range as archive, add it to
 633     // the old set, and set top.
 634     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 635     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 636     prev_last_region = last_region;
 637 
 638     while (curr_region != NULL) {
 639       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 640              "Region already in use (index %u)", curr_region->hrm_index());
 641       if (open) {
 642         curr_region->set_open_archive();
 643       } else {
 644         curr_region->set_closed_archive();
 645       }
 646       _hr_printer.alloc(curr_region);
 647       _archive_set.add(curr_region);
 648       HeapWord* top;
 649       HeapRegion* next_region;
 650       if (curr_region != last_region) {
 651         top = curr_region->end();
 652         next_region = _hrm.next_region_in_heap(curr_region);
 653       } else {
 654         top = last_address + 1;
 655         next_region = NULL;
 656       }
 657       curr_region->set_top(top);
 658       curr_region->set_first_dead(top);
 659       curr_region->set_end_of_live(top);
 660       curr_region = next_region;
 661     }
 662 
 663     // Notify mark-sweep of the archive
 664     G1ArchiveAllocator::set_range_archive(curr_range, open);
 665   }
 666   return true;
 667 }


1589 
1590 jint G1CollectedHeap::initialize_concurrent_refinement() {
1591   jint ecode = JNI_OK;
1592   _cr = G1ConcurrentRefine::create(&ecode);
1593   return ecode;
1594 }
1595 
1596 jint G1CollectedHeap::initialize_young_gen_sampling_thread() {
1597   _young_gen_sampling_thread = new G1YoungRemSetSamplingThread();
1598   if (_young_gen_sampling_thread->osthread() == NULL) {
1599     vm_shutdown_during_initialization("Could not create G1YoungRemSetSamplingThread");
1600     return JNI_ENOMEM;
1601   }
1602   return JNI_OK;
1603 }
1604 
1605 jint G1CollectedHeap::initialize() {
1606   os::enable_vtime();
1607 
1608   // Necessary to satisfy locking discipline assertions.
1609 
1610   MutexLocker x(Heap_lock);
1611 
1612   // While there are no constraints in the GC code that HeapWordSize
1613   // be any particular value, there are multiple other areas in the
1614   // system which believe this to be true (e.g. oop->object_size in some
1615   // cases incorrectly returns the size in wordSize units rather than
1616   // HeapWordSize).
1617   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1618 
1619   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1620   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1621   size_t heap_alignment = collector_policy()->heap_alignment();
1622 
1623   // Ensure that the sizes are properly aligned.
1624   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1625   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1626   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1627 
1628   // Reserve the maximum.
1629 


1816   _young_gen_sampling_thread->stop();
1817   _cm_thread->stop();
1818   if (G1StringDedup::is_enabled()) {
1819     G1StringDedup::stop();
1820   }
1821 }
1822 
1823 void G1CollectedHeap::safepoint_synchronize_begin() {
1824   SuspendibleThreadSet::synchronize();
1825 }
1826 
1827 void G1CollectedHeap::safepoint_synchronize_end() {
1828   SuspendibleThreadSet::desynchronize();
1829 }
1830 
1831 size_t G1CollectedHeap::conservative_max_heap_alignment() {
1832   return HeapRegion::max_region_size();
1833 }
1834 
1835 void G1CollectedHeap::post_initialize() {



1836   CollectedHeap::post_initialize();
1837   ref_processing_init();
1838 }
1839 
1840 void G1CollectedHeap::ref_processing_init() {
1841   // Reference processing in G1 currently works as follows:
1842   //
1843   // * There are two reference processor instances. One is
1844   //   used to record and process discovered references
1845   //   during concurrent marking; the other is used to
1846   //   record and process references during STW pauses
1847   //   (both full and incremental).
1848   // * Both ref processors need to 'span' the entire heap as
1849   //   the regions in the collection set may be dotted around.
1850   //
1851   // * For the concurrent marking ref processor:
1852   //   * Reference discovery is enabled at initial marking.
1853   //   * Reference discovery is disabled and the discovered
1854   //     references processed etc during remarking.
1855   //   * Reference discovery is MT (see below).


2882       verify_type = G1HeapVerifier::G1VerifyConcurrentStart;
2883     } else if (collector_state()->in_young_only_phase()) {
2884       if (collector_state()->in_young_gc_before_mixed()) {
2885         gc_string.append("(Prepare Mixed)");
2886       } else {
2887         gc_string.append("(Normal)");
2888       }
2889       verify_type = G1HeapVerifier::G1VerifyYoungNormal;
2890     } else {
2891       gc_string.append("(Mixed)");
2892       verify_type = G1HeapVerifier::G1VerifyMixed;
2893     }
2894     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2895 
2896     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2897                                                                   workers()->active_workers(),
2898                                                                   Threads::number_of_non_daemon_threads());
2899     active_workers = workers()->update_active_workers(active_workers);
2900     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2901 
2902     G1MonitoringScope ms(g1mm(),
2903                          false /* full_gc */,
2904                          collector_state()->yc_type() == Mixed /* all_memory_pools_affected */);
2905 
2906     G1HeapTransition heap_transition(this);
2907     size_t heap_used_bytes_before_gc = used();
2908 
2909     // Don't dynamically change the number of GC threads this early.  A value of
2910     // 0 is used to indicate serial work.  When parallel work is done,
2911     // it will be set.
2912 
2913     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2914       IsGCActiveMark x;
2915 
2916       gc_prologue(false);
2917 
2918       if (VerifyRememberedSets) {
2919         log_info(gc, verify)("[Verifying RemSets before GC]");
2920         VerifyRegionRemSetClosure v_cl;
2921         heap_region_iterate(&v_cl);
2922       }
2923 
2924       _verifier->verify_before_gc(verify_type);




 627       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
 628                                 HeapRegion::GrainWords * HeapWordSize * commits);
 629 
 630     }
 631 
 632     // Mark each G1 region touched by the range as archive, add it to
 633     // the old set, and set top.
 634     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 635     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 636     prev_last_region = last_region;
 637 
 638     while (curr_region != NULL) {
 639       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 640              "Region already in use (index %u)", curr_region->hrm_index());
 641       if (open) {
 642         curr_region->set_open_archive();
 643       } else {
 644         curr_region->set_closed_archive();
 645       }
 646       _hr_printer.alloc(curr_region);
 647       archive_set_add(curr_region);
 648       HeapWord* top;
 649       HeapRegion* next_region;
 650       if (curr_region != last_region) {
 651         top = curr_region->end();
 652         next_region = _hrm.next_region_in_heap(curr_region);
 653       } else {
 654         top = last_address + 1;
 655         next_region = NULL;
 656       }
 657       curr_region->set_top(top);
 658       curr_region->set_first_dead(top);
 659       curr_region->set_end_of_live(top);
 660       curr_region = next_region;
 661     }
 662 
 663     // Notify mark-sweep of the archive
 664     G1ArchiveAllocator::set_range_archive(curr_range, open);
 665   }
 666   return true;
 667 }


1589 
1590 jint G1CollectedHeap::initialize_concurrent_refinement() {
1591   jint ecode = JNI_OK;
1592   _cr = G1ConcurrentRefine::create(&ecode);
1593   return ecode;
1594 }
1595 
1596 jint G1CollectedHeap::initialize_young_gen_sampling_thread() {
1597   _young_gen_sampling_thread = new G1YoungRemSetSamplingThread();
1598   if (_young_gen_sampling_thread->osthread() == NULL) {
1599     vm_shutdown_during_initialization("Could not create G1YoungRemSetSamplingThread");
1600     return JNI_ENOMEM;
1601   }
1602   return JNI_OK;
1603 }
1604 
1605 jint G1CollectedHeap::initialize() {
1606   os::enable_vtime();
1607 
1608   // Necessary to satisfy locking discipline assertions.

1609   MutexLocker x(Heap_lock);
1610 
1611   // While there are no constraints in the GC code that HeapWordSize
1612   // be any particular value, there are multiple other areas in the
1613   // system which believe this to be true (e.g. oop->object_size in some
1614   // cases incorrectly returns the size in wordSize units rather than
1615   // HeapWordSize).
1616   guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1617 
1618   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
1619   size_t max_byte_size = collector_policy()->max_heap_byte_size();
1620   size_t heap_alignment = collector_policy()->heap_alignment();
1621 
1622   // Ensure that the sizes are properly aligned.
1623   Universe::check_alignment(init_byte_size, HeapRegion::GrainBytes, "g1 heap");
1624   Universe::check_alignment(max_byte_size, HeapRegion::GrainBytes, "g1 heap");
1625   Universe::check_alignment(max_byte_size, heap_alignment, "g1 heap");
1626 
1627   // Reserve the maximum.
1628 


1815   _young_gen_sampling_thread->stop();
1816   _cm_thread->stop();
1817   if (G1StringDedup::is_enabled()) {
1818     G1StringDedup::stop();
1819   }
1820 }
1821 
1822 void G1CollectedHeap::safepoint_synchronize_begin() {
1823   SuspendibleThreadSet::synchronize();
1824 }
1825 
1826 void G1CollectedHeap::safepoint_synchronize_end() {
1827   SuspendibleThreadSet::desynchronize();
1828 }
1829 
1830 size_t G1CollectedHeap::conservative_max_heap_alignment() {
1831   return HeapRegion::max_region_size();
1832 }
1833 
1834 void G1CollectedHeap::post_initialize() {
1835   // Necessary to satisfy locking discipline assertions.
1836   MutexLockerEx x(Heap_lock);
1837 
1838   CollectedHeap::post_initialize();
1839   ref_processing_init();
1840 }
1841 
1842 void G1CollectedHeap::ref_processing_init() {
1843   // Reference processing in G1 currently works as follows:
1844   //
1845   // * There are two reference processor instances. One is
1846   //   used to record and process discovered references
1847   //   during concurrent marking; the other is used to
1848   //   record and process references during STW pauses
1849   //   (both full and incremental).
1850   // * Both ref processors need to 'span' the entire heap as
1851   //   the regions in the collection set may be dotted around.
1852   //
1853   // * For the concurrent marking ref processor:
1854   //   * Reference discovery is enabled at initial marking.
1855   //   * Reference discovery is disabled and the discovered
1856   //     references processed etc during remarking.
1857   //   * Reference discovery is MT (see below).


2884       verify_type = G1HeapVerifier::G1VerifyConcurrentStart;
2885     } else if (collector_state()->in_young_only_phase()) {
2886       if (collector_state()->in_young_gc_before_mixed()) {
2887         gc_string.append("(Prepare Mixed)");
2888       } else {
2889         gc_string.append("(Normal)");
2890       }
2891       verify_type = G1HeapVerifier::G1VerifyYoungNormal;
2892     } else {
2893       gc_string.append("(Mixed)");
2894       verify_type = G1HeapVerifier::G1VerifyMixed;
2895     }
2896     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2897 
2898     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2899                                                                   workers()->active_workers(),
2900                                                                   Threads::number_of_non_daemon_threads());
2901     active_workers = workers()->update_active_workers(active_workers);
2902     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2903 
2904     G1MonitoringScope ms(g1mm(), false /* full_gc */, collector_state()->yc_type() == Mixed /* mixed_gc */);


2905 
2906     G1HeapTransition heap_transition(this);
2907     size_t heap_used_bytes_before_gc = used();
2908 
2909     // Don't dynamically change the number of GC threads this early.  A value of
2910     // 0 is used to indicate serial work.  When parallel work is done,
2911     // it will be set.
2912 
2913     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
2914       IsGCActiveMark x;
2915 
2916       gc_prologue(false);
2917 
2918       if (VerifyRememberedSets) {
2919         log_info(gc, verify)("[Verifying RemSets before GC]");
2920         VerifyRegionRemSetClosure v_cl;
2921         heap_region_iterate(&v_cl);
2922       }
2923 
2924       _verifier->verify_before_gc(verify_type);


< prev index next >