826 {
827 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
828 const size_t* const end = (const size_t*)vspace->committed_high_addr();
829 for (const size_t* p = beg; p < end; ++p) {
830 assert(*p == 0, "not zero");
831 }
832 }
833
834 void ParallelCompactData::verify_clear()
835 {
836 verify_clear(_region_vspace);
837 verify_clear(_block_vspace);
838 }
839 #endif // #ifdef ASSERT
840
841 STWGCTimer PSParallelCompact::_gc_timer;
842 ParallelOldTracer PSParallelCompact::_gc_tracer;
843 elapsedTimer PSParallelCompact::_accumulated_time;
844 unsigned int PSParallelCompact::_total_invocations = 0;
845 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
846 jlong PSParallelCompact::_time_of_last_gc = 0;
847 CollectorCounters* PSParallelCompact::_counters = NULL;
848 ParMarkBitMap PSParallelCompact::_mark_bitmap;
849 ParallelCompactData PSParallelCompact::_summary_data;
850
851 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
852
853 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
854
855 class PCReferenceProcessor: public ReferenceProcessor {
856 public:
857 PCReferenceProcessor(
858 BoolObjectClosure* is_subject_to_discovery,
859 BoolObjectClosure* is_alive_non_header) :
860 ReferenceProcessor(is_subject_to_discovery,
861 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
862 ParallelGCThreads, // mt processing degree
863 true, // mt discovery
864 ParallelGCThreads, // mt discovery degree
865 true, // atomic_discovery
866 is_alive_non_header) {
1053 if (young_gen_empty) {
1054 ct->clear(MemRegion(old_mr.start(), old_mr.end()));
1055 } else {
1056 ct->invalidate(MemRegion(old_mr.start(), old_mr.end()));
1057 }
1058
1059 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1060 ClassLoaderDataGraph::purge();
1061 MetaspaceUtils::verify_metrics();
1062
1063 heap->prune_scavengable_nmethods();
1064
1065 #if COMPILER2_OR_JVMCI
1066 DerivedPointerTable::update_pointers();
1067 #endif
1068
1069 if (ZapUnusedHeapArea) {
1070 heap->gen_mangle_unused_area();
1071 }
1072
1073 // Update time of last GC
1074 reset_millis_since_last_gc();
1075 }
1076
1077 HeapWord*
1078 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
1079 bool maximum_compaction)
1080 {
1081 const size_t region_size = ParallelCompactData::RegionSize;
1082 const ParallelCompactData& sd = summary_data();
1083
1084 const MutableSpace* const space = _space_info[id].space();
1085 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
1086 const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
1087 const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
1088
1089 // Skip full regions at the beginning of the space--they are necessarily part
1090 // of the dense prefix.
1091 size_t full_count = 0;
1092 const RegionData* cp;
1093 for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
1094 ++full_count;
3178
3179 size_t beg_bit = bitmap->words_to_bits(region_idx << Log2RegionSize);
3180 const size_t range_end = beg_bit + bitmap->words_to_bits(RegionSize);
3181 size_t live_bits = bitmap->words_to_bits(partial_obj_size);
3182 beg_bit = bitmap->find_obj_beg(beg_bit + live_bits, range_end);
3183 while (beg_bit < range_end) {
3184 const size_t new_block = beg_bit >> Log2BitsPerBlock;
3185 if (new_block != cur_block) {
3186 cur_block = new_block;
3187 sd.block(cur_block)->set_offset(bitmap->bits_to_words(live_bits));
3188 }
3189
3190 const size_t end_bit = bitmap->find_obj_end(beg_bit, range_end);
3191 if (end_bit < range_end - 1) {
3192 live_bits += end_bit - beg_bit + 1;
3193 beg_bit = bitmap->find_obj_beg(end_bit + 1, range_end);
3194 } else {
3195 return;
3196 }
3197 }
3198 }
3199
3200 jlong PSParallelCompact::millis_since_last_gc() {
3201 // We need a monotonically non-decreasing time in ms but
3202 // os::javaTimeMillis() does not guarantee monotonicity.
3203 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
3204 jlong ret_val = now - _time_of_last_gc;
3205 // XXX See note in genCollectedHeap::millis_since_last_gc().
3206 if (ret_val < 0) {
3207 NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
3208 return 0;
3209 }
3210 return ret_val;
3211 }
3212
3213 void PSParallelCompact::reset_millis_since_last_gc() {
3214 // We need a monotonically non-decreasing time in ms but
3215 // os::javaTimeMillis() does not guarantee monotonicity.
3216 _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
3217 }
3218
3219 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
3220 {
3221 if (source() != copy_destination()) {
3222 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3223 Copy::aligned_conjoint_words(source(), copy_destination(), words_remaining());
3224 }
3225 update_state(words_remaining());
3226 assert(is_full(), "sanity");
3227 return ParMarkBitMap::full;
3228 }
3229
3230 void MoveAndUpdateClosure::copy_partial_obj()
3231 {
3232 size_t words = words_remaining();
3233
3234 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3235 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3236 if (end_addr < range_end) {
|
826 {
827 const size_t* const beg = (const size_t*)vspace->committed_low_addr();
828 const size_t* const end = (const size_t*)vspace->committed_high_addr();
829 for (const size_t* p = beg; p < end; ++p) {
830 assert(*p == 0, "not zero");
831 }
832 }
833
834 void ParallelCompactData::verify_clear()
835 {
836 verify_clear(_region_vspace);
837 verify_clear(_block_vspace);
838 }
839 #endif // #ifdef ASSERT
840
841 STWGCTimer PSParallelCompact::_gc_timer;
842 ParallelOldTracer PSParallelCompact::_gc_tracer;
843 elapsedTimer PSParallelCompact::_accumulated_time;
844 unsigned int PSParallelCompact::_total_invocations = 0;
845 unsigned int PSParallelCompact::_maximum_compaction_gc_num = 0;
846 CollectorCounters* PSParallelCompact::_counters = NULL;
847 ParMarkBitMap PSParallelCompact::_mark_bitmap;
848 ParallelCompactData PSParallelCompact::_summary_data;
849
850 PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
851
852 bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
853
854 class PCReferenceProcessor: public ReferenceProcessor {
855 public:
856 PCReferenceProcessor(
857 BoolObjectClosure* is_subject_to_discovery,
858 BoolObjectClosure* is_alive_non_header) :
859 ReferenceProcessor(is_subject_to_discovery,
860 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
861 ParallelGCThreads, // mt processing degree
862 true, // mt discovery
863 ParallelGCThreads, // mt discovery degree
864 true, // atomic_discovery
865 is_alive_non_header) {
1052 if (young_gen_empty) {
1053 ct->clear(MemRegion(old_mr.start(), old_mr.end()));
1054 } else {
1055 ct->invalidate(MemRegion(old_mr.start(), old_mr.end()));
1056 }
1057
1058 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1059 ClassLoaderDataGraph::purge();
1060 MetaspaceUtils::verify_metrics();
1061
1062 heap->prune_scavengable_nmethods();
1063
1064 #if COMPILER2_OR_JVMCI
1065 DerivedPointerTable::update_pointers();
1066 #endif
1067
1068 if (ZapUnusedHeapArea) {
1069 heap->gen_mangle_unused_area();
1070 }
1071
1072 // Signal that we have completed a visit to all live objects.
1073 Universe::heap()->record_whole_heap_examined_timestamp();
1074 }
1075
1076 HeapWord*
1077 PSParallelCompact::compute_dense_prefix_via_density(const SpaceId id,
1078 bool maximum_compaction)
1079 {
1080 const size_t region_size = ParallelCompactData::RegionSize;
1081 const ParallelCompactData& sd = summary_data();
1082
1083 const MutableSpace* const space = _space_info[id].space();
1084 HeapWord* const top_aligned_up = sd.region_align_up(space->top());
1085 const RegionData* const beg_cp = sd.addr_to_region_ptr(space->bottom());
1086 const RegionData* const end_cp = sd.addr_to_region_ptr(top_aligned_up);
1087
1088 // Skip full regions at the beginning of the space--they are necessarily part
1089 // of the dense prefix.
1090 size_t full_count = 0;
1091 const RegionData* cp;
1092 for (cp = beg_cp; cp < end_cp && cp->data_size() == region_size; ++cp) {
1093 ++full_count;
3177
3178 size_t beg_bit = bitmap->words_to_bits(region_idx << Log2RegionSize);
3179 const size_t range_end = beg_bit + bitmap->words_to_bits(RegionSize);
3180 size_t live_bits = bitmap->words_to_bits(partial_obj_size);
3181 beg_bit = bitmap->find_obj_beg(beg_bit + live_bits, range_end);
3182 while (beg_bit < range_end) {
3183 const size_t new_block = beg_bit >> Log2BitsPerBlock;
3184 if (new_block != cur_block) {
3185 cur_block = new_block;
3186 sd.block(cur_block)->set_offset(bitmap->bits_to_words(live_bits));
3187 }
3188
3189 const size_t end_bit = bitmap->find_obj_end(beg_bit, range_end);
3190 if (end_bit < range_end - 1) {
3191 live_bits += end_bit - beg_bit + 1;
3192 beg_bit = bitmap->find_obj_beg(end_bit + 1, range_end);
3193 } else {
3194 return;
3195 }
3196 }
3197 }
3198
3199 ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
3200 {
3201 if (source() != copy_destination()) {
3202 DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
3203 Copy::aligned_conjoint_words(source(), copy_destination(), words_remaining());
3204 }
3205 update_state(words_remaining());
3206 assert(is_full(), "sanity");
3207 return ParMarkBitMap::full;
3208 }
3209
3210 void MoveAndUpdateClosure::copy_partial_obj()
3211 {
3212 size_t words = words_remaining();
3213
3214 HeapWord* const range_end = MIN2(source() + words, bitmap()->region_end());
3215 HeapWord* const end_addr = bitmap()->find_obj_end(source(), range_end);
3216 if (end_addr < range_end) {
|