731 } else if (region_offset(dest_addr) == 0) {
732 // Data from cur_region will be copied to the start of the destination
733 // region.
734 _region_data[dest_region_1].set_source_region(cur_region);
735 }
736
737 _region_data[cur_region].set_destination_count(destination_count);
738 _region_data[cur_region].set_data_location(region_to_addr(cur_region));
739 dest_addr += words;
740 }
741
742 ++cur_region;
743 }
744
745 *target_next = dest_addr;
746 return true;
747 }
748
749 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
750 assert(addr != NULL, "Should detect NULL oop earlier");
751 assert(PSParallelCompact::gc_heap()->is_in(addr), "not in heap");
752 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
753
754 // Region covering the object.
755 RegionData* const region_ptr = addr_to_region_ptr(addr);
756 HeapWord* result = region_ptr->destination();
757
758 // If the entire Region is live, the new location is region->destination + the
759 // offset of the object within in the Region.
760
761 // Run some performance tests to determine if this special case pays off. It
762 // is worth it for pointers into the dense prefix. If the optimization to
763 // avoid pointer updates in regions that only point to the dense prefix is
764 // ever implemented, this should be revisited.
765 if (region_ptr->data_size() == RegionSize) {
766 result += region_offset(addr);
767 return result;
768 }
769
770 // Otherwise, the new location is region->destination + block offset + the
771 // number of live words in the Block that are (a) to the left of addr and (b)
819 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
820
821 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
822
823 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
824 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
825
826 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure;
827 PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
828
829 void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
830
831 void PSParallelCompact::FollowKlassClosure::do_klass(Klass* klass) {
832 klass->oops_do(_mark_and_push_closure);
833 }
834 void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
835 klass->oops_do(&PSParallelCompact::_adjust_pointer_closure);
836 }
837
838 void PSParallelCompact::post_initialize() {
839 ParallelScavengeHeap* heap = gc_heap();
840 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
841
842 MemRegion mr = heap->reserved_region();
843 _ref_processor =
844 new ReferenceProcessor(mr, // span
845 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
846 (int) ParallelGCThreads, // mt processing degree
847 true, // mt discovery
848 (int) ParallelGCThreads, // mt discovery degree
849 true, // atomic_discovery
850 &_is_alive_closure); // non-header is alive closure
851 _counters = new CollectorCounters("PSParallelCompact", 1);
852
853 // Initialize static fields in ParCompactionManager.
854 ParCompactionManager::initialize(mark_bitmap());
855 }
856
857 bool PSParallelCompact::initialize() {
858 ParallelScavengeHeap* heap = gc_heap();
859 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
860 MemRegion mr = heap->reserved_region();
861
862 // Was the old gen get allocated successfully?
863 if (!heap->old_gen()->is_allocated()) {
864 return false;
865 }
866
867 initialize_space_info();
868 initialize_dead_wood_limiter();
869
870 if (!_mark_bitmap.initialize(mr)) {
871 vm_shutdown_during_initialization(
872 err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
873 "garbage collection for the requested " SIZE_FORMAT "KB heap.",
874 _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
875 return false;
876 }
877
878 if (!_summary_data.initialize(mr)) {
879 vm_shutdown_during_initialization(
880 err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
881 "garbage collection for the requested " SIZE_FORMAT "KB heap.",
882 _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
883 return false;
884 }
885
886 return true;
887 }
888
889 void PSParallelCompact::initialize_space_info()
890 {
891 memset(&_space_info, 0, sizeof(_space_info));
892
893 ParallelScavengeHeap* heap = gc_heap();
894 PSYoungGen* young_gen = heap->young_gen();
895
896 _space_info[old_space_id].set_space(heap->old_gen()->object_space());
897 _space_info[eden_space_id].set_space(young_gen->eden_space());
898 _space_info[from_space_id].set_space(young_gen->from_space());
899 _space_info[to_space_id].set_space(young_gen->to_space());
900
901 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
902 }
903
904 void PSParallelCompact::initialize_dead_wood_limiter()
905 {
906 const size_t max = 100;
907 _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
908 _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
909 _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
910 DEBUG_ONLY(_dwl_initialized = true;)
911 _dwl_adjustment = normal_distribution(1.0);
912 }
913
956 const size_t beg_region = _summary_data.addr_to_region_idx(bot);
957 const size_t end_region =
958 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
959 _summary_data.clear_range(beg_region, end_region);
960
961 // Clear the data used to 'split' regions.
962 SplitInfo& split_info = _space_info[id].split_info();
963 if (split_info.is_valid()) {
964 split_info.clear();
965 }
966 DEBUG_ONLY(split_info.verify_clear();)
967 }
968
969 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
970 {
971 // Update the from & to space pointers in space_info, since they are swapped
972 // at each young gen gc. Do the update unconditionally (even though a
973 // promotion failure does not swap spaces) because an unknown number of minor
974 // collections will have swapped the spaces an unknown number of times.
975 GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
976 ParallelScavengeHeap* heap = gc_heap();
977 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
978 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
979
980 pre_gc_values->fill(heap);
981
982 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
983 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
984
985 // Increment the invocation count
986 heap->increment_total_collections(true);
987
988 // We need to track unique mark sweep invocations as well.
989 _total_invocations++;
990
991 heap->print_heap_before_gc();
992 heap->trace_heap_before_gc(&_gc_tracer);
993
994 // Fill in TLABs
995 heap->accumulate_statistics_all_tlabs();
996 heap->ensure_parsability(true); // retire TLABs
1011
1012 // Have worker threads release resources the next time they run a task.
1013 gc_task_manager()->release_all_resources();
1014 }
1015
1016 void PSParallelCompact::post_compact()
1017 {
1018 GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
1019
1020 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1021 // Clear the marking bitmap, summary data and split info.
1022 clear_data_covering_space(SpaceId(id));
1023 // Update top(). Must be done after clearing the bitmap and summary data.
1024 _space_info[id].publish_new_top();
1025 }
1026
1027 MutableSpace* const eden_space = _space_info[eden_space_id].space();
1028 MutableSpace* const from_space = _space_info[from_space_id].space();
1029 MutableSpace* const to_space = _space_info[to_space_id].space();
1030
1031 ParallelScavengeHeap* heap = gc_heap();
1032 bool eden_empty = eden_space->is_empty();
1033 if (!eden_empty) {
1034 eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1035 heap->young_gen(), heap->old_gen());
1036 }
1037
1038 // Update heap occupancy information which is used as input to the soft ref
1039 // clearing policy at the next gc.
1040 Universe::update_heap_info_at_gc();
1041
1042 bool young_gen_empty = eden_empty && from_space->is_empty() &&
1043 to_space->is_empty();
1044
1045 ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
1046 MemRegion old_mr = heap->old_gen()->reserved();
1047 if (young_gen_empty) {
1048 modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
1049 } else {
1050 modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
1051 }
1949 }
1950 }
1951
1952 // This method should contain all heap-specific policy for invoking a full
1953 // collection. invoke_no_policy() will only attempt to compact the heap; it
1954 // will do nothing further. If we need to bail out for policy reasons, scavenge
1955 // before full gc, or any other specialized behavior, it needs to be added here.
1956 //
1957 // Note that this method should only be called from the vm_thread while at a
1958 // safepoint.
1959 //
1960 // Note that the all_soft_refs_clear flag in the collector policy
1961 // may be true because this method can be called without intervening
1962 // activity. For example when the heap space is tight and full measure
1963 // are being taken to free space.
1964 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1965 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1966 assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1967 "should be in vm thread");
1968
1969 ParallelScavengeHeap* heap = gc_heap();
1970 GCCause::Cause gc_cause = heap->gc_cause();
1971 assert(!heap->is_gc_active(), "not reentrant");
1972
1973 PSAdaptiveSizePolicy* policy = heap->size_policy();
1974 IsGCActiveMark mark;
1975
1976 if (ScavengeBeforeFullGC) {
1977 PSScavenge::invoke_no_policy();
1978 }
1979
1980 const bool clear_all_soft_refs =
1981 heap->collector_policy()->should_clear_all_soft_refs();
1982
1983 PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1984 maximum_heap_compaction);
1985 }
1986
1987 // This method contains no policy. You should probably
1988 // be calling invoke() instead.
1989 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1990 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1991 assert(ref_processor() != NULL, "Sanity");
1992
1993 if (GC_locker::check_active_before_gc()) {
1994 return false;
1995 }
1996
1997 ParallelScavengeHeap* heap = gc_heap();
1998
1999 _gc_timer.register_gc_start();
2000 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
2001
2002 TimeStamp marking_start;
2003 TimeStamp compaction_start;
2004 TimeStamp collection_exit;
2005
2006 GCCause::Cause gc_cause = heap->gc_cause();
2007 PSYoungGen* young_gen = heap->young_gen();
2008 PSOldGen* old_gen = heap->old_gen();
2009 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
2010
2011 // The scope of casr should end after code that can change
2012 // CollectorPolicy::_should_clear_all_soft_refs.
2013 ClearedAllSoftRefs casr(maximum_heap_compaction,
2014 heap->collector_policy());
2015
2016 if (ZapUnusedHeapArea) {
2017 // Save information needed to minimize mangling
2330 // full GCs and the value to use is unclear. Something like
2331 //
2332 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2333
2334 size_policy->set_bytes_absorbed_from_eden(absorb_size);
2335 return true;
2336 }
2337
2338 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2339 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2340 "shouldn't return NULL");
2341 return ParallelScavengeHeap::gc_task_manager();
2342 }
2343
2344 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2345 bool maximum_heap_compaction,
2346 ParallelOldTracer *gc_tracer) {
2347 // Recursively traverse all live objects and mark them
2348 GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2349
2350 ParallelScavengeHeap* heap = gc_heap();
2351 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2352 uint active_gc_threads = heap->gc_task_manager()->active_workers();
2353 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2354 ParallelTaskTerminator terminator(active_gc_threads, qset);
2355
2356 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2357 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
2358
2359 // Need new claim bits before marking starts.
2360 ClassLoaderDataGraph::clear_claimed_marks();
2361
2362 {
2363 GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2364
2365 ParallelScavengeHeap::ParStrongRootsScope psrs;
2366
2367 GCTaskQueue* q = GCTaskQueue::create();
2368
2369 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2370 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2670 const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2671
2672 for (const rd_t* cur = beg; cur < end; ++cur) {
2673 ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2674 }
2675 out->print("%u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2676 for (size_t i = 0; i < histo_len; ++i) {
2677 out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2678 histo[i], 100.0 * histo[i] / region_cnt);
2679 }
2680 out->cr();
2681 }
2682 }
2683 }
2684 #endif // #ifdef ASSERT
2685
2686 void PSParallelCompact::compact() {
2687 // trace("5");
2688 GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2689
2690 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
2691 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
2692 PSOldGen* old_gen = heap->old_gen();
2693 old_gen->start_array()->reset();
2694 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2695 uint active_gc_threads = heap->gc_task_manager()->active_workers();
2696 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2697 ParallelTaskTerminator terminator(active_gc_threads, qset);
2698
2699 GCTaskQueue* q = GCTaskQueue::create();
2700 enqueue_region_draining_tasks(q, active_gc_threads);
2701 enqueue_dense_prefix_tasks(q, active_gc_threads);
2702 enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2703
2704 {
2705 GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2706
2707 gc_task_manager()->execute_and_wait(q);
2708
2709 #ifdef ASSERT
2710 // Verify that all regions have been processed before the deferred updates.
2711 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2822 ParMarkBitMap::IterationStatus status;
2823 status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
2824 dense_prefix_end);
2825 if (status == ParMarkBitMap::incomplete) {
2826 update_closure.do_addr(update_closure.source());
2827 }
2828 }
2829
2830 // Mark the regions as filled.
2831 RegionData* const beg_cp = sd.region(beg_region);
2832 RegionData* const end_cp = sd.region(end_region);
2833 for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
2834 cp->set_completed();
2835 }
2836 }
2837
2838 // Return the SpaceId for the space containing addr. If addr is not in the
2839 // heap, last_space_id is returned. In debug mode it expects the address to be
2840 // in the heap and asserts such.
2841 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
2842 assert(Universe::heap()->is_in_reserved(addr), "addr not in the heap");
2843
2844 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2845 if (_space_info[id].space()->contains(addr)) {
2846 return SpaceId(id);
2847 }
2848 }
2849
2850 assert(false, "no space contains the addr");
2851 return last_space_id;
2852 }
2853
2854 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
2855 SpaceId id) {
2856 assert(id < last_space_id, "bad space id");
2857
2858 ParallelCompactData& sd = summary_data();
2859 const SpaceInfo* const space_info = _space_info + id;
2860 ObjectStartArray* const start_array = space_info->start_array();
2861
2862 const MutableSpace* const space = space_info->space();
|
731 } else if (region_offset(dest_addr) == 0) {
732 // Data from cur_region will be copied to the start of the destination
733 // region.
734 _region_data[dest_region_1].set_source_region(cur_region);
735 }
736
737 _region_data[cur_region].set_destination_count(destination_count);
738 _region_data[cur_region].set_data_location(region_to_addr(cur_region));
739 dest_addr += words;
740 }
741
742 ++cur_region;
743 }
744
745 *target_next = dest_addr;
746 return true;
747 }
748
749 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
750 assert(addr != NULL, "Should detect NULL oop earlier");
751 assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
752 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
753
754 // Region covering the object.
755 RegionData* const region_ptr = addr_to_region_ptr(addr);
756 HeapWord* result = region_ptr->destination();
757
758 // If the entire Region is live, the new location is region->destination + the
759 // offset of the object within in the Region.
760
761 // Run some performance tests to determine if this special case pays off. It
762 // is worth it for pointers into the dense prefix. If the optimization to
763 // avoid pointer updates in regions that only point to the dense prefix is
764 // ever implemented, this should be revisited.
765 if (region_ptr->data_size() == RegionSize) {
766 result += region_offset(addr);
767 return result;
768 }
769
770 // Otherwise, the new location is region->destination + block offset + the
771 // number of live words in the Block that are (a) to the left of addr and (b)
819 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
820
821 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
822
823 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
824 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
825
826 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure;
827 PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
828
829 void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
830
831 void PSParallelCompact::FollowKlassClosure::do_klass(Klass* klass) {
832 klass->oops_do(_mark_and_push_closure);
833 }
834 void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
835 klass->oops_do(&PSParallelCompact::_adjust_pointer_closure);
836 }
837
838 void PSParallelCompact::post_initialize() {
839 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
840 MemRegion mr = heap->reserved_region();
841 _ref_processor =
842 new ReferenceProcessor(mr, // span
843 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
844 (int) ParallelGCThreads, // mt processing degree
845 true, // mt discovery
846 (int) ParallelGCThreads, // mt discovery degree
847 true, // atomic_discovery
848 &_is_alive_closure); // non-header is alive closure
849 _counters = new CollectorCounters("PSParallelCompact", 1);
850
851 // Initialize static fields in ParCompactionManager.
852 ParCompactionManager::initialize(mark_bitmap());
853 }
854
855 bool PSParallelCompact::initialize() {
856 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
857 MemRegion mr = heap->reserved_region();
858
859 // Was the old gen get allocated successfully?
860 if (!heap->old_gen()->is_allocated()) {
861 return false;
862 }
863
864 initialize_space_info();
865 initialize_dead_wood_limiter();
866
867 if (!_mark_bitmap.initialize(mr)) {
868 vm_shutdown_during_initialization(
869 err_msg("Unable to allocate " SIZE_FORMAT "KB bitmaps for parallel "
870 "garbage collection for the requested " SIZE_FORMAT "KB heap.",
871 _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
872 return false;
873 }
874
875 if (!_summary_data.initialize(mr)) {
876 vm_shutdown_during_initialization(
877 err_msg("Unable to allocate " SIZE_FORMAT "KB card tables for parallel "
878 "garbage collection for the requested " SIZE_FORMAT "KB heap.",
879 _summary_data.reserved_byte_size()/K, mr.byte_size()/K));
880 return false;
881 }
882
883 return true;
884 }
885
886 void PSParallelCompact::initialize_space_info()
887 {
888 memset(&_space_info, 0, sizeof(_space_info));
889
890 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
891 PSYoungGen* young_gen = heap->young_gen();
892
893 _space_info[old_space_id].set_space(heap->old_gen()->object_space());
894 _space_info[eden_space_id].set_space(young_gen->eden_space());
895 _space_info[from_space_id].set_space(young_gen->from_space());
896 _space_info[to_space_id].set_space(young_gen->to_space());
897
898 _space_info[old_space_id].set_start_array(heap->old_gen()->start_array());
899 }
900
901 void PSParallelCompact::initialize_dead_wood_limiter()
902 {
903 const size_t max = 100;
904 _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
905 _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
906 _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
907 DEBUG_ONLY(_dwl_initialized = true;)
908 _dwl_adjustment = normal_distribution(1.0);
909 }
910
953 const size_t beg_region = _summary_data.addr_to_region_idx(bot);
954 const size_t end_region =
955 _summary_data.addr_to_region_idx(_summary_data.region_align_up(max_top));
956 _summary_data.clear_range(beg_region, end_region);
957
958 // Clear the data used to 'split' regions.
959 SplitInfo& split_info = _space_info[id].split_info();
960 if (split_info.is_valid()) {
961 split_info.clear();
962 }
963 DEBUG_ONLY(split_info.verify_clear();)
964 }
965
966 void PSParallelCompact::pre_compact(PreGCValues* pre_gc_values)
967 {
968 // Update the from & to space pointers in space_info, since they are swapped
969 // at each young gen gc. Do the update unconditionally (even though a
970 // promotion failure does not swap spaces) because an unknown number of minor
971 // collections will have swapped the spaces an unknown number of times.
972 GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
973 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
974 _space_info[from_space_id].set_space(heap->young_gen()->from_space());
975 _space_info[to_space_id].set_space(heap->young_gen()->to_space());
976
977 pre_gc_values->fill(heap);
978
979 DEBUG_ONLY(add_obj_count = add_obj_size = 0;)
980 DEBUG_ONLY(mark_bitmap_count = mark_bitmap_size = 0;)
981
982 // Increment the invocation count
983 heap->increment_total_collections(true);
984
985 // We need to track unique mark sweep invocations as well.
986 _total_invocations++;
987
988 heap->print_heap_before_gc();
989 heap->trace_heap_before_gc(&_gc_tracer);
990
991 // Fill in TLABs
992 heap->accumulate_statistics_all_tlabs();
993 heap->ensure_parsability(true); // retire TLABs
1008
1009 // Have worker threads release resources the next time they run a task.
1010 gc_task_manager()->release_all_resources();
1011 }
1012
1013 void PSParallelCompact::post_compact()
1014 {
1015 GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
1016
1017 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1018 // Clear the marking bitmap, summary data and split info.
1019 clear_data_covering_space(SpaceId(id));
1020 // Update top(). Must be done after clearing the bitmap and summary data.
1021 _space_info[id].publish_new_top();
1022 }
1023
1024 MutableSpace* const eden_space = _space_info[eden_space_id].space();
1025 MutableSpace* const from_space = _space_info[from_space_id].space();
1026 MutableSpace* const to_space = _space_info[to_space_id].space();
1027
1028 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1029 bool eden_empty = eden_space->is_empty();
1030 if (!eden_empty) {
1031 eden_empty = absorb_live_data_from_eden(heap->size_policy(),
1032 heap->young_gen(), heap->old_gen());
1033 }
1034
1035 // Update heap occupancy information which is used as input to the soft ref
1036 // clearing policy at the next gc.
1037 Universe::update_heap_info_at_gc();
1038
1039 bool young_gen_empty = eden_empty && from_space->is_empty() &&
1040 to_space->is_empty();
1041
1042 ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
1043 MemRegion old_mr = heap->old_gen()->reserved();
1044 if (young_gen_empty) {
1045 modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
1046 } else {
1047 modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
1048 }
1946 }
1947 }
1948
1949 // This method should contain all heap-specific policy for invoking a full
1950 // collection. invoke_no_policy() will only attempt to compact the heap; it
1951 // will do nothing further. If we need to bail out for policy reasons, scavenge
1952 // before full gc, or any other specialized behavior, it needs to be added here.
1953 //
1954 // Note that this method should only be called from the vm_thread while at a
1955 // safepoint.
1956 //
1957 // Note that the all_soft_refs_clear flag in the collector policy
1958 // may be true because this method can be called without intervening
1959 // activity. For example when the heap space is tight and full measure
1960 // are being taken to free space.
1961 void PSParallelCompact::invoke(bool maximum_heap_compaction) {
1962 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1963 assert(Thread::current() == (Thread*)VMThread::vm_thread(),
1964 "should be in vm thread");
1965
1966 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1967 GCCause::Cause gc_cause = heap->gc_cause();
1968 assert(!heap->is_gc_active(), "not reentrant");
1969
1970 PSAdaptiveSizePolicy* policy = heap->size_policy();
1971 IsGCActiveMark mark;
1972
1973 if (ScavengeBeforeFullGC) {
1974 PSScavenge::invoke_no_policy();
1975 }
1976
1977 const bool clear_all_soft_refs =
1978 heap->collector_policy()->should_clear_all_soft_refs();
1979
1980 PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
1981 maximum_heap_compaction);
1982 }
1983
1984 // This method contains no policy. You should probably
1985 // be calling invoke() instead.
1986 bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1987 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
1988 assert(ref_processor() != NULL, "Sanity");
1989
1990 if (GC_locker::check_active_before_gc()) {
1991 return false;
1992 }
1993
1994 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
1995
1996 _gc_timer.register_gc_start();
1997 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
1998
1999 TimeStamp marking_start;
2000 TimeStamp compaction_start;
2001 TimeStamp collection_exit;
2002
2003 GCCause::Cause gc_cause = heap->gc_cause();
2004 PSYoungGen* young_gen = heap->young_gen();
2005 PSOldGen* old_gen = heap->old_gen();
2006 PSAdaptiveSizePolicy* size_policy = heap->size_policy();
2007
2008 // The scope of casr should end after code that can change
2009 // CollectorPolicy::_should_clear_all_soft_refs.
2010 ClearedAllSoftRefs casr(maximum_heap_compaction,
2011 heap->collector_policy());
2012
2013 if (ZapUnusedHeapArea) {
2014 // Save information needed to minimize mangling
2327 // full GCs and the value to use is unclear. Something like
2328 //
2329 // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
2330
2331 size_policy->set_bytes_absorbed_from_eden(absorb_size);
2332 return true;
2333 }
2334
2335 GCTaskManager* const PSParallelCompact::gc_task_manager() {
2336 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
2337 "shouldn't return NULL");
2338 return ParallelScavengeHeap::gc_task_manager();
2339 }
2340
2341 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2342 bool maximum_heap_compaction,
2343 ParallelOldTracer *gc_tracer) {
2344 // Recursively traverse all live objects and mark them
2345 GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2346
2347 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2348 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2349 uint active_gc_threads = heap->gc_task_manager()->active_workers();
2350 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2351 ParallelTaskTerminator terminator(active_gc_threads, qset);
2352
2353 PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
2354 PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
2355
2356 // Need new claim bits before marking starts.
2357 ClassLoaderDataGraph::clear_claimed_marks();
2358
2359 {
2360 GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2361
2362 ParallelScavengeHeap::ParStrongRootsScope psrs;
2363
2364 GCTaskQueue* q = GCTaskQueue::create();
2365
2366 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::universe));
2367 q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::jni_handles));
2667 const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2668
2669 for (const rd_t* cur = beg; cur < end; ++cur) {
2670 ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2671 }
2672 out->print("%u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2673 for (size_t i = 0; i < histo_len; ++i) {
2674 out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2675 histo[i], 100.0 * histo[i] / region_cnt);
2676 }
2677 out->cr();
2678 }
2679 }
2680 }
2681 #endif // #ifdef ASSERT
2682
2683 void PSParallelCompact::compact() {
2684 // trace("5");
2685 GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2686
2687 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2688 PSOldGen* old_gen = heap->old_gen();
2689 old_gen->start_array()->reset();
2690 uint parallel_gc_threads = heap->gc_task_manager()->workers();
2691 uint active_gc_threads = heap->gc_task_manager()->active_workers();
2692 TaskQueueSetSuper* qset = ParCompactionManager::region_array();
2693 ParallelTaskTerminator terminator(active_gc_threads, qset);
2694
2695 GCTaskQueue* q = GCTaskQueue::create();
2696 enqueue_region_draining_tasks(q, active_gc_threads);
2697 enqueue_dense_prefix_tasks(q, active_gc_threads);
2698 enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
2699
2700 {
2701 GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
2702
2703 gc_task_manager()->execute_and_wait(q);
2704
2705 #ifdef ASSERT
2706 // Verify that all regions have been processed before the deferred updates.
2707 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2818 ParMarkBitMap::IterationStatus status;
2819 status = mbm->iterate(&update_closure, &fill_closure, beg_addr, end_addr,
2820 dense_prefix_end);
2821 if (status == ParMarkBitMap::incomplete) {
2822 update_closure.do_addr(update_closure.source());
2823 }
2824 }
2825
2826 // Mark the regions as filled.
2827 RegionData* const beg_cp = sd.region(beg_region);
2828 RegionData* const end_cp = sd.region(end_region);
2829 for (RegionData* cp = beg_cp; cp < end_cp; ++cp) {
2830 cp->set_completed();
2831 }
2832 }
2833
2834 // Return the SpaceId for the space containing addr. If addr is not in the
2835 // heap, last_space_id is returned. In debug mode it expects the address to be
2836 // in the heap and asserts such.
2837 PSParallelCompact::SpaceId PSParallelCompact::space_id(HeapWord* addr) {
2838 assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
2839
2840 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
2841 if (_space_info[id].space()->contains(addr)) {
2842 return SpaceId(id);
2843 }
2844 }
2845
2846 assert(false, "no space contains the addr");
2847 return last_space_id;
2848 }
2849
2850 void PSParallelCompact::update_deferred_objects(ParCompactionManager* cm,
2851 SpaceId id) {
2852 assert(id < last_space_id, "bad space id");
2853
2854 ParallelCompactData& sd = summary_data();
2855 const SpaceInfo* const space_info = _space_info + id;
2856 ObjectStartArray* const start_array = space_info->start_array();
2857
2858 const MutableSpace* const space = space_info->space();
|