1 /*
2 * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
734 // Data from cur_region will be copied to the start of dest_region_2.
735 _region_data[dest_region_2].set_source_region(cur_region);
736 } else if (region_offset(dest_addr) == 0) {
737 // Data from cur_region will be copied to the start of the destination
738 // region.
739 _region_data[dest_region_1].set_source_region(cur_region);
740 }
741
742 _region_data[cur_region].set_destination_count(destination_count);
743 _region_data[cur_region].set_data_location(region_to_addr(cur_region));
744 dest_addr += words;
745 }
746
747 ++cur_region;
748 }
749
750 *target_next = dest_addr;
751 return true;
752 }
753
754 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
755 assert(addr != NULL, "Should detect NULL oop earlier");
756 assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
757 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
758
759 // Region covering the object.
760 RegionData* const region_ptr = addr_to_region_ptr(addr);
761 HeapWord* result = region_ptr->destination();
762
763 // If the entire Region is live, the new location is region->destination + the
764 // offset of the object within in the Region.
765
766 // Run some performance tests to determine if this special case pays off. It
767 // is worth it for pointers into the dense prefix. If the optimization to
768 // avoid pointer updates in regions that only point to the dense prefix is
769 // ever implemented, this should be revisited.
770 if (region_ptr->data_size() == RegionSize) {
771 result += region_offset(addr);
772 return result;
773 }
774
775 // Otherwise, the new location is region->destination + block offset + the
776 // number of live words in the Block that are (a) to the left of addr and (b)
777 // due to objects that start in the Block.
778
779 // Fill in the block table if necessary. This is unsynchronized, so multiple
780 // threads may fill the block table for a region (harmless, since it is
781 // idempotent).
782 if (!region_ptr->blocks_filled()) {
783 PSParallelCompact::fill_blocks(addr_to_region_idx(addr));
784 region_ptr->set_blocks_filled();
785 }
786
787 HeapWord* const search_start = block_align_down(addr);
788 const size_t block_offset = addr_to_block_ptr(addr)->offset();
789
790 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
791 const size_t live = bitmap->live_words_in_range(search_start, oop(addr));
792 result += block_offset + live;
793 DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result));
794 return result;
795 }
796
797 #ifdef ASSERT
798 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
799 {
800 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
801 const size_t* const end = (const size_t*)vspace->committed_high_addr();
802 for (const size_t* p = beg; p < end; ++p) {
803 assert(*p == 0, "not zero");
804 }
805 }
806
807 void ParallelCompactData::verify_clear()
808 {
809 verify_clear(_region_vspace);
810 verify_clear(_block_vspace);
811 }
812 #endif // #ifdef ASSERT
813
814 STWGCTimer PSParallelCompact::_gc_timer;
815 ParallelOldTracer PSParallelCompact::_gc_tracer;
816 elapsedTimer PSParallelCompact::_accumulated_time;
817 unsigned int PSParallelCompact::_total_invocations = 0;
818 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
819 jlong PSParallelCompact::_time_of_last_gc = 0;
820 CollectorCounters* PSParallelCompact::_counters = NULL;
821 ParMarkBitMap PSParallelCompact::_mark_bitmap;
822 ParallelCompactData PSParallelCompact::_summary_data;
823
824 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
825
826 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
827
828 PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure;
829 PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
830
831 void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
832 klass->oops_do(&PSParallelCompact::_adjust_pointer_closure);
833 }
834
835 void PSParallelCompact::post_initialize() {
836 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
837 MemRegion mr = heap->reserved_region();
838 _ref_processor =
839 new ReferenceProcessor(mr, // span
840 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
841 ParallelGCThreads, // mt processing degree
842 true, // mt discovery
843 ParallelGCThreads, // mt discovery degree
844 true, // atomic_discovery
845 &_is_alive_closure); // non-header is alive closure
846 _counters = new CollectorCounters("PSParallelCompact", 1);
847
848 // Initialize static fields in ParCompactionManager.
849 ParCompactionManager::initialize(mark_bitmap());
850 }
851
852 bool PSParallelCompact::initialize() {
960 // Fill in TLABs
961 heap->accumulate_statistics_all_tlabs();
962 heap->ensure_parsability(true); // retire TLABs
963
964 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
965 HandleMark hm; // Discard invalid handles created during verification
966 Universe::verify("Before GC");
967 }
968
969 // Verify object start arrays
970 if (VerifyObjectStartArray &&
971 VerifyBeforeGC) {
972 heap->old_gen()->verify_object_start_array();
973 }
974
975 DEBUG_ONLY(mark_bitmap()->verify_clear();)
976 DEBUG_ONLY(summary_data().verify_clear();)
977
978 // Have worker threads release resources the next time they run a task.
979 gc_task_manager()->release_all_resources();
980 }
981
982 void PSParallelCompact::post_compact()
983 {
984 GCTraceTime(Trace, gc, phases) tm("Post Compact", &_gc_timer);
985
986 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
987 // Clear the marking bitmap, summary data and split info.
988 clear_data_covering_space(SpaceId(id));
989 // Update top(). Must be done after clearing the bitmap and summary data.
990 _space_info[id].publish_new_top();
991 }
992
993 MutableSpace* const eden_space = _space_info[eden_space_id].space();
994 MutableSpace* const from_space = _space_info[from_space_id].space();
995 MutableSpace* const to_space = _space_info[to_space_id].space();
996
997 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
998 bool eden_empty = eden_space->is_empty();
999 if (!eden_empty) {
1784
1785 ref_processor()->enable_discovery();
1786 ref_processor()->setup_policy(maximum_heap_compaction);
1787
1788 bool marked_for_unloading = false;
1789
1790 marking_start.update();
1791 marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
1792
1793 bool max_on_system_gc = UseMaximumCompactionOnSystemGC
1794 && GCCause::is_user_requested_gc(gc_cause);
1795 summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
1796
1797 #if defined(COMPILER2) || INCLUDE_JVMCI
1798 assert(DerivedPointerTable::is_active(), "Sanity");
1799 DerivedPointerTable::set_active(false);
1800 #endif
1801
1802 // adjust_roots() updates Universe::_intArrayKlassObj which is
1803 // needed by the compaction for filling holes in the dense prefix.
1804 adjust_roots();
1805
1806 compaction_start.update();
1807 compact();
1808
1809 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
1810 // done before resizing.
1811 post_compact();
1812
1813 // Let the size policy know we're done
1814 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1815
1816 if (UseAdaptiveSizePolicy) {
1817 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1818 log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
1819 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1820
1821 // Don't check if the size_policy is ready here. Let
1822 // the size_policy check that internally.
1823 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1824 AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
2125 CodeCache::do_unloading(is_alive_closure(), purged_class);
2126
2127 // Prune dead klasses from subklass/sibling/implementor lists.
2128 Klass::clean_weak_klass_links(is_alive_closure());
2129
2130 // Delete entries for dead interned strings.
2131 StringTable::unlink(is_alive_closure());
2132
2133 // Clean up unreferenced symbols in symbol table.
2134 SymbolTable::unlink();
2135 _gc_tracer.report_object_count_after_gc(is_alive_closure());
2136 }
2137
2138 // This should be moved to the shared markSweep code!
2139 class PSAlwaysTrueClosure: public BoolObjectClosure {
2140 public:
2141 bool do_object_b(oop p) { return true; }
2142 };
2143 static PSAlwaysTrueClosure always_true;
2144
2145 void PSParallelCompact::adjust_roots() {
2146 // Adjust the pointers to reflect the new locations
2147 GCTraceTime(Trace, gc, phases) tm("Adjust Roots", &_gc_timer);
2148
2149 // Need new claim bits when tracing through and adjusting pointers.
2150 ClassLoaderDataGraph::clear_claimed_marks();
2151
2152 // General strong roots.
2153 Universe::oops_do(adjust_pointer_closure());
2154 JNIHandles::oops_do(adjust_pointer_closure()); // Global (strong) JNI handles
2155 CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
2156 Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
2157 ObjectSynchronizer::oops_do(adjust_pointer_closure());
2158 FlatProfiler::oops_do(adjust_pointer_closure());
2159 Management::oops_do(adjust_pointer_closure());
2160 JvmtiExport::oops_do(adjust_pointer_closure());
2161 SystemDictionary::oops_do(adjust_pointer_closure());
2162 ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
2163
2164 // Now adjust pointers in remaining weak roots. (All of which should
2165 // have been cleared if they pointed to non-surviving objects.)
2166 // Global (weak) JNI handles
2167 JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
2168
2169 CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
2170 CodeCache::blobs_do(&adjust_from_blobs);
2171 StringTable::oops_do(adjust_pointer_closure());
2172 ref_processor()->weak_oops_do(adjust_pointer_closure());
2173 // Roots were visited so references into the young gen in roots
2174 // may have been scanned. Process them also.
2175 // Should the reference processor have a span that excludes
2176 // young gen objects?
2177 PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
2178 }
2179
2180 // Helper class to print 8 region numbers per line and then print the total at the end.
2181 class FillableRegionLogger : public StackObj {
2182 private:
2183 LogHandle(gc, compaction) log;
2184 static const int LineLength = 8;
2185 size_t _regions[LineLength];
2186 int _next_index;
2187 bool _enabled;
2188 size_t _total_regions;
2189 public:
2190 FillableRegionLogger() : _next_index(0), _total_regions(0), _enabled(develop_log_is_enabled(Trace, gc, compaction)) { }
2191 ~FillableRegionLogger() {
2192 log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
2193 }
2194
2195 void print_line() {
2196 if (!_enabled || _next_index == 0) {
2197 return;
3045
3046 void MoveAndUpdateClosure::copy_partial_obj()
3047 {
3048 size_t words = words_remaining();
3049
3050 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3051 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3052 if (end_addr < range_end) {
3053 words = bitmap()->obj_size(source(), end_addr);
3054 }
3055
3056 // This test is necessary; if omitted, the pointer updates to a partial object
3057 // that crosses the dense prefix boundary could be overwritten.
3058 if (source() != destination()) {
3059 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3060 Copy::aligned_conjoint_words(source(), destination(), words);
3061 }
3062 update_state(words);
3063 }
3064
3065 void InstanceKlass::oop_pc_update_pointers(oop obj) {
3066 oop_oop_iterate_oop_maps<true>(obj, PSParallelCompact::adjust_pointer_closure());
3067 }
3068
3069 void InstanceMirrorKlass::oop_pc_update_pointers(oop obj) {
3070 InstanceKlass::oop_pc_update_pointers(obj);
3071
3072 oop_oop_iterate_statics<true>(obj, PSParallelCompact::adjust_pointer_closure());
3073 }
3074
3075 void InstanceClassLoaderKlass::oop_pc_update_pointers(oop obj) {
3076 InstanceKlass::oop_pc_update_pointers(obj);
3077 }
3078
3079 #ifdef ASSERT
3080 template <class T> static void trace_reference_gc(const char *s, oop obj,
3081 T* referent_addr,
3082 T* next_addr,
3083 T* discovered_addr) {
3084 log_develop_trace(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj));
3085 log_develop_trace(gc, ref)(" referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3086 p2i(referent_addr), referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
3087 log_develop_trace(gc, ref)(" next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3088 p2i(next_addr), next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
3089 log_develop_trace(gc, ref)(" discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3090 p2i(discovered_addr), discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);
3091 }
3092 #endif
3093
3094 template <class T>
3095 static void oop_pc_update_pointers_specialized(oop obj) {
3096 T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
3097 PSParallelCompact::adjust_pointer(referent_addr);
3098 T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
3099 PSParallelCompact::adjust_pointer(next_addr);
3100 T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
3101 PSParallelCompact::adjust_pointer(discovered_addr);
3102 debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj,
3103 referent_addr, next_addr, discovered_addr);)
3104 }
3105
3106 void InstanceRefKlass::oop_pc_update_pointers(oop obj) {
3107 InstanceKlass::oop_pc_update_pointers(obj);
3108
3109 if (UseCompressedOops) {
3110 oop_pc_update_pointers_specialized<narrowOop>(obj);
3111 } else {
3112 oop_pc_update_pointers_specialized<oop>(obj);
3113 }
3114 }
3115
3116 void ObjArrayKlass::oop_pc_update_pointers(oop obj) {
3117 assert(obj->is_objArray(), "obj must be obj array");
3118 oop_oop_iterate_elements<true>(objArrayOop(obj), PSParallelCompact::adjust_pointer_closure());
3119 }
3120
3121 void TypeArrayKlass::oop_pc_update_pointers(oop obj) {
3122 assert(obj->is_typeArray(),"must be a type array");
3123 }
3124
3125 ParMarkBitMapClosure::IterationStatus
3126 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3127 assert(destination() != NULL, "sanity");
3128 assert(bitmap()->obj_size(addr) == words, "bad size");
3129
3130 _source = addr;
3131 assert(PSParallelCompact::summary_data().calc_new_pointer(source()) ==
3132 destination(), "wrong destination");
3133
3134 if (words > words_remaining()) {
3135 return ParMarkBitMap::would_overflow;
3136 }
3137
3138 // The start_array must be updated even if the object is not moving.
3139 if (_start_array != NULL) {
3140 _start_array->allocate_block(destination());
3141 }
3142
3143 if (destination() != source()) {
3144 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3145 Copy::aligned_conjoint_words(source(), destination(), words);
3146 }
3147
3148 oop moved_oop = (oop) destination();
3149 compaction_manager()->update_contents(moved_oop);
3150 assert(moved_oop->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop));
3151
|
1 /*
2 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
734 // Data from cur_region will be copied to the start of dest_region_2.
735 _region_data[dest_region_2].set_source_region(cur_region);
736 } else if (region_offset(dest_addr) == 0) {
737 // Data from cur_region will be copied to the start of the destination
738 // region.
739 _region_data[dest_region_1].set_source_region(cur_region);
740 }
741
742 _region_data[cur_region].set_destination_count(destination_count);
743 _region_data[cur_region].set_data_location(region_to_addr(cur_region));
744 dest_addr += words;
745 }
746
747 ++cur_region;
748 }
749
750 *target_next = dest_addr;
751 return true;
752 }
753
754 HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr, ParCompactionManager* cm) {
755 assert(addr != NULL, "Should detect NULL oop earlier");
756 assert(ParallelScavengeHeap::heap()->is_in(addr), "not in heap");
757 assert(PSParallelCompact::mark_bitmap()->is_marked(addr), "not marked");
758
759 // Region covering the object.
760 RegionData* const region_ptr = addr_to_region_ptr(addr);
761 HeapWord* result = region_ptr->destination();
762
763 // If the entire Region is live, the new location is region->destination + the
764 // offset of the object within in the Region.
765
766 // Run some performance tests to determine if this special case pays off. It
767 // is worth it for pointers into the dense prefix. If the optimization to
768 // avoid pointer updates in regions that only point to the dense prefix is
769 // ever implemented, this should be revisited.
770 if (region_ptr->data_size() == RegionSize) {
771 result += region_offset(addr);
772 return result;
773 }
774
775 // Otherwise, the new location is region->destination + block offset + the
776 // number of live words in the Block that are (a) to the left of addr and (b)
777 // due to objects that start in the Block.
778
779 // Fill in the block table if necessary. This is unsynchronized, so multiple
780 // threads may fill the block table for a region (harmless, since it is
781 // idempotent).
782 if (!region_ptr->blocks_filled()) {
783 PSParallelCompact::fill_blocks(addr_to_region_idx(addr));
784 region_ptr->set_blocks_filled();
785 }
786
787 HeapWord* const search_start = block_align_down(addr);
788 const size_t block_offset = addr_to_block_ptr(addr)->offset();
789
790 const ParMarkBitMap* bitmap = PSParallelCompact::mark_bitmap();
791 const size_t live = bitmap->live_words_in_range(cm, search_start, oop(addr));
792 result += block_offset + live;
793 DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result));
794 return result;
795 }
796
797 #ifdef ASSERT
798 void ParallelCompactData::verify_clear(const PSVirtualSpace* vspace)
799 {
800 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
801 const size_t* const end = (const size_t*)vspace->committed_high_addr();
802 for (const size_t* p = beg; p < end; ++p) {
803 assert(*p == 0, "not zero");
804 }
805 }
806
807 void ParallelCompactData::verify_clear()
808 {
809 verify_clear(_region_vspace);
810 verify_clear(_block_vspace);
811 }
812 #endif // #ifdef ASSERT
813
814 STWGCTimer PSParallelCompact::_gc_timer;
815 ParallelOldTracer PSParallelCompact::_gc_tracer;
816 elapsedTimer PSParallelCompact::_accumulated_time;
817 unsigned int PSParallelCompact::_total_invocations = 0;
818 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
819 jlong PSParallelCompact::_time_of_last_gc = 0;
820 CollectorCounters* PSParallelCompact::_counters = NULL;
821 ParMarkBitMap PSParallelCompact::_mark_bitmap;
822 ParallelCompactData PSParallelCompact::_summary_data;
823
824 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
825
826 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
827
828 void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
829 PSParallelCompact::AdjustPointerClosure closure(_cm);
830 klass->oops_do(&closure);
831 }
832
833 void PSParallelCompact::post_initialize() {
834 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
835 MemRegion mr = heap->reserved_region();
836 _ref_processor =
837 new ReferenceProcessor(mr, // span
838 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
839 ParallelGCThreads, // mt processing degree
840 true, // mt discovery
841 ParallelGCThreads, // mt discovery degree
842 true, // atomic_discovery
843 &_is_alive_closure); // non-header is alive closure
844 _counters = new CollectorCounters("PSParallelCompact", 1);
845
846 // Initialize static fields in ParCompactionManager.
847 ParCompactionManager::initialize(mark_bitmap());
848 }
849
850 bool PSParallelCompact::initialize() {
958 // Fill in TLABs
959 heap->accumulate_statistics_all_tlabs();
960 heap->ensure_parsability(true); // retire TLABs
961
962 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
963 HandleMark hm; // Discard invalid handles created during verification
964 Universe::verify("Before GC");
965 }
966
967 // Verify object start arrays
968 if (VerifyObjectStartArray &&
969 VerifyBeforeGC) {
970 heap->old_gen()->verify_object_start_array();
971 }
972
973 DEBUG_ONLY(mark_bitmap()->verify_clear();)
974 DEBUG_ONLY(summary_data().verify_clear();)
975
976 // Have worker threads release resources the next time they run a task.
977 gc_task_manager()->release_all_resources();
978
979 ParCompactionManager::reset_cache_for_bitmap();
980 }
981
982 void PSParallelCompact::post_compact()
983 {
984 GCTraceTime(Trace, gc, phases) tm("Post Compact", &_gc_timer);
985
986 for (unsigned int id = old_space_id; id < last_space_id; ++id) {
987 // Clear the marking bitmap, summary data and split info.
988 clear_data_covering_space(SpaceId(id));
989 // Update top(). Must be done after clearing the bitmap and summary data.
990 _space_info[id].publish_new_top();
991 }
992
993 MutableSpace* const eden_space = _space_info[eden_space_id].space();
994 MutableSpace* const from_space = _space_info[from_space_id].space();
995 MutableSpace* const to_space = _space_info[to_space_id].space();
996
997 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
998 bool eden_empty = eden_space->is_empty();
999 if (!eden_empty) {
1784
1785 ref_processor()->enable_discovery();
1786 ref_processor()->setup_policy(maximum_heap_compaction);
1787
1788 bool marked_for_unloading = false;
1789
1790 marking_start.update();
1791 marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
1792
1793 bool max_on_system_gc = UseMaximumCompactionOnSystemGC
1794 && GCCause::is_user_requested_gc(gc_cause);
1795 summary_phase(vmthread_cm, maximum_heap_compaction || max_on_system_gc);
1796
1797 #if defined(COMPILER2) || INCLUDE_JVMCI
1798 assert(DerivedPointerTable::is_active(), "Sanity");
1799 DerivedPointerTable::set_active(false);
1800 #endif
1801
1802 // adjust_roots() updates Universe::_intArrayKlassObj which is
1803 // needed by the compaction for filling holes in the dense prefix.
1804 adjust_roots(vmthread_cm);
1805
1806 compaction_start.update();
1807 compact();
1808
1809 // Reset the mark bitmap, summary data, and do other bookkeeping. Must be
1810 // done before resizing.
1811 post_compact();
1812
1813 // Let the size policy know we're done
1814 size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
1815
1816 if (UseAdaptiveSizePolicy) {
1817 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
1818 log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
1819 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
1820
1821 // Don't check if the size_policy is ready here. Let
1822 // the size_policy check that internally.
1823 if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
1824 AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
2125 CodeCache::do_unloading(is_alive_closure(), purged_class);
2126
2127 // Prune dead klasses from subklass/sibling/implementor lists.
2128 Klass::clean_weak_klass_links(is_alive_closure());
2129
2130 // Delete entries for dead interned strings.
2131 StringTable::unlink(is_alive_closure());
2132
2133 // Clean up unreferenced symbols in symbol table.
2134 SymbolTable::unlink();
2135 _gc_tracer.report_object_count_after_gc(is_alive_closure());
2136 }
2137
2138 // This should be moved to the shared markSweep code!
2139 class PSAlwaysTrueClosure: public BoolObjectClosure {
2140 public:
2141 bool do_object_b(oop p) { return true; }
2142 };
2143 static PSAlwaysTrueClosure always_true;
2144
2145 void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
2146 // Adjust the pointers to reflect the new locations
2147 GCTraceTime(Trace, gc, phases) tm("Adjust Roots", &_gc_timer);
2148
2149 // Need new claim bits when tracing through and adjusting pointers.
2150 ClassLoaderDataGraph::clear_claimed_marks();
2151
2152 PSParallelCompact::AdjustPointerClosure closure(cm);
2153 PSParallelCompact::AdjustKlassClosure kclosure(cm);
2154
2155 // General strong roots.
2156 Universe::oops_do(&closure);
2157 JNIHandles::oops_do(&closure); // Global (strong) JNI handles
2158 CLDToOopClosure adjust_from_cld(&closure);
2159 Threads::oops_do(&closure, &adjust_from_cld, NULL);
2160 ObjectSynchronizer::oops_do(&closure);
2161 FlatProfiler::oops_do(&closure);
2162 Management::oops_do(&closure);
2163 JvmtiExport::oops_do(&closure);
2164 SystemDictionary::oops_do(&closure);
2165 ClassLoaderDataGraph::oops_do(&closure, &kclosure, true);
2166
2167 // Now adjust pointers in remaining weak roots. (All of which should
2168 // have been cleared if they pointed to non-surviving objects.)
2169 // Global (weak) JNI handles
2170 JNIHandles::weak_oops_do(&always_true, &closure);
2171
2172 CodeBlobToOopClosure adjust_from_blobs(&closure, CodeBlobToOopClosure::FixRelocations);
2173 CodeCache::blobs_do(&adjust_from_blobs);
2174 StringTable::oops_do(&closure);
2175 ref_processor()->weak_oops_do(&closure);
2176 // Roots were visited so references into the young gen in roots
2177 // may have been scanned. Process them also.
2178 // Should the reference processor have a span that excludes
2179 // young gen objects?
2180 PSScavenge::reference_processor()->weak_oops_do(&closure);
2181 }
2182
2183 // Helper class to print 8 region numbers per line and then print the total at the end.
2184 class FillableRegionLogger : public StackObj {
2185 private:
2186 LogHandle(gc, compaction) log;
2187 static const int LineLength = 8;
2188 size_t _regions[LineLength];
2189 int _next_index;
2190 bool _enabled;
2191 size_t _total_regions;
2192 public:
2193 FillableRegionLogger() : _next_index(0), _total_regions(0), _enabled(develop_log_is_enabled(Trace, gc, compaction)) { }
2194 ~FillableRegionLogger() {
2195 log.trace(SIZE_FORMAT " initially fillable regions", _total_regions);
2196 }
2197
2198 void print_line() {
2199 if (!_enabled || _next_index == 0) {
2200 return;
3048
3049 void MoveAndUpdateClosure::copy_partial_obj()
3050 {
3051 size_t words = words_remaining();
3052
3053 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3054 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3055 if (end_addr < range_end) {
3056 words = bitmap()->obj_size(source(), end_addr);
3057 }
3058
3059 // This test is necessary; if omitted, the pointer updates to a partial object
3060 // that crosses the dense prefix boundary could be overwritten.
3061 if (source() != destination()) {
3062 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3063 Copy::aligned_conjoint_words(source(), destination(), words);
3064 }
3065 update_state(words);
3066 }
3067
3068 void InstanceKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3069 PSParallelCompact::AdjustPointerClosure closure(cm);
3070 oop_oop_iterate_oop_maps<true>(obj, &closure);
3071 }
3072
3073 void InstanceMirrorKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3074 InstanceKlass::oop_pc_update_pointers(obj, cm);
3075
3076 PSParallelCompact::AdjustPointerClosure closure(cm);
3077 oop_oop_iterate_statics<true>(obj, &closure);
3078 }
3079
3080 void InstanceClassLoaderKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3081 InstanceKlass::oop_pc_update_pointers(obj, cm);
3082 }
3083
3084 #ifdef ASSERT
3085 template <class T> static void trace_reference_gc(const char *s, oop obj,
3086 T* referent_addr,
3087 T* next_addr,
3088 T* discovered_addr) {
3089 log_develop_trace(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj));
3090 log_develop_trace(gc, ref)(" referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3091 p2i(referent_addr), referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
3092 log_develop_trace(gc, ref)(" next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3093 p2i(next_addr), next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
3094 log_develop_trace(gc, ref)(" discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
3095 p2i(discovered_addr), discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);
3096 }
3097 #endif
3098
3099 template <class T>
3100 static void oop_pc_update_pointers_specialized(oop obj, ParCompactionManager* cm) {
3101 T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
3102 PSParallelCompact::adjust_pointer(referent_addr, cm);
3103 T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
3104 PSParallelCompact::adjust_pointer(next_addr, cm);
3105 T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
3106 PSParallelCompact::adjust_pointer(discovered_addr, cm);
3107 debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj,
3108 referent_addr, next_addr, discovered_addr);)
3109 }
3110
3111 void InstanceRefKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3112 InstanceKlass::oop_pc_update_pointers(obj, cm);
3113
3114 if (UseCompressedOops) {
3115 oop_pc_update_pointers_specialized<narrowOop>(obj, cm);
3116 } else {
3117 oop_pc_update_pointers_specialized<oop>(obj, cm);
3118 }
3119 }
3120
3121 void ObjArrayKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3122 assert(obj->is_objArray(), "obj must be obj array");
3123 PSParallelCompact::AdjustPointerClosure closure(cm);
3124 oop_oop_iterate_elements<true>(objArrayOop(obj), &closure);
3125 }
3126
3127 void TypeArrayKlass::oop_pc_update_pointers(oop obj, ParCompactionManager* cm) {
3128 assert(obj->is_typeArray(),"must be a type array");
3129 }
3130
3131 ParMarkBitMapClosure::IterationStatus
3132 MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
3133 assert(destination() != NULL, "sanity");
3134 assert(bitmap()->obj_size(addr) == words, "bad size");
3135
3136 _source = addr;
3137 assert(PSParallelCompact::summary_data().calc_new_pointer(source(), compaction_manager()) ==
3138 destination(), "wrong destination");
3139
3140 if (words > words_remaining()) {
3141 return ParMarkBitMap::would_overflow;
3142 }
3143
3144 // The start_array must be updated even if the object is not moving.
3145 if (_start_array != NULL) {
3146 _start_array->allocate_block(destination());
3147 }
3148
3149 if (destination() != source()) {
3150 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3151 Copy::aligned_conjoint_words(source(), destination(), words);
3152 }
3153
3154 oop moved_oop = (oop) destination();
3155 compaction_manager()->update_contents(moved_oop);
3156 assert(moved_oop->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop));
3157
|