291 _thread_num, _promotion_failed_info.first_size());
292 }
293 }
294
295 class ParScanThreadStateSet: private ResourceArray {
296 public:
297 // Initializes states for the specified number of threads;
298 ParScanThreadStateSet(int num_threads,
299 Space& to_space,
300 ParNewGeneration& gen,
301 Generation& old_gen,
302 ObjToScanQueueSet& queue_set,
303 Stack<oop, mtGC>* overflow_stacks_,
304 size_t desired_plab_sz,
305 ParallelTaskTerminator& term);
306
307 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
308
309 inline ParScanThreadState& thread_state(int i);
310
311 void trace_promotion_failed(YoungGCTracer& gc_tracer);
312 void reset(int active_workers, bool promotion_failed);
313 void flush();
314
315 #if TASKQUEUE_STATS
316 static void
317 print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
318 void print_termination_stats(outputStream* const st = gclog_or_tty);
319 static void
320 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
321 void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
322 void reset_stats();
323 #endif // TASKQUEUE_STATS
324
325 private:
326 ParallelTaskTerminator& _term;
327 ParNewGeneration& _gen;
328 Generation& _next_gen;
329 public:
330 bool is_valid(int id) const { return id < length(); }
331 ParallelTaskTerminator* terminator() { return &_term; }
340 : ResourceArray(sizeof(ParScanThreadState), num_threads),
341 _gen(gen), _next_gen(old_gen), _term(term)
342 {
343 assert(num_threads > 0, "sanity check!");
344 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
345 "overflow_stack allocation mismatch");
346 // Initialize states.
347 for (int i = 0; i < num_threads; ++i) {
348 new ((ParScanThreadState*)_data + i)
349 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
350 overflow_stacks, desired_plab_sz, term);
351 }
352 }
353
354 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
355 {
356 assert(i >= 0 && i < length(), "sanity check!");
357 return ((ParScanThreadState*)_data)[i];
358 }
359
360 void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) {
361 for (int i = 0; i < length(); ++i) {
362 if (thread_state(i).promotion_failed()) {
363 gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info());
364 thread_state(i).promotion_failed_info().reset();
365 }
366 }
367 }
368
369 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
370 {
371 _term.reset_for_reuse(active_threads);
372 if (promotion_failed) {
373 for (int i = 0; i < length(); ++i) {
374 thread_state(i).print_promotion_failure_size();
375 }
376 }
377 }
378
379 #if TASKQUEUE_STATS
380 void
381 ParScanThreadState::reset_stats()
382 {
383 taskqueue_stats().reset();
866 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
867 OopsInGenClosure* cur,
868 OopsInGenClosure* older) :
869 _gch(gch), _level(level),
870 _scan_cur_or_nonheap(cur), _scan_older(older)
871 {}
872
873 void EvacuateFollowersClosureGeneral::do_void() {
874 do {
875 // Beware: this call will lead to closure applications via virtual
876 // calls.
877 _gch->oop_since_save_marks_iterate(_level,
878 _scan_cur_or_nonheap,
879 _scan_older);
880 } while (!_gch->no_allocs_since_save_marks(_level));
881 }
882
883
884 // A Generation that does parallel young-gen collection.
885
886 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) {
887 assert(_promo_failure_scan_stack.is_empty(), "post condition");
888 _promo_failure_scan_stack.clear(true); // Clear cached segments.
889
890 remove_forwarding_pointers();
891 if (PrintGCDetails) {
892 gclog_or_tty->print(" (promotion failed)");
893 }
894 // All the spaces are in play for mark-sweep.
895 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
896 from()->set_next_compaction_space(to());
897 gch->set_incremental_collection_failed();
898 // Inform the next generation that a promotion failure occurred.
899 _next_gen->promotion_failure_occurred();
900
901 // Trace promotion failure in the parallel GC threads
902 thread_state_set.trace_promotion_failed(gc_tracer);
903 // Single threaded code may have reported promotion failure to the global state
904 if (_promotion_failed_info.has_failed()) {
905 gc_tracer.report_promotion_failed(_promotion_failed_info);
906 }
907 // Reset the PromotionFailureALot counters.
908 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
909 }
910
911 void ParNewGeneration::collect(bool full,
912 bool clear_all_soft_refs,
913 size_t size,
914 bool is_tlab) {
915 assert(full || size > 0, "otherwise we don't want to collect");
916
917 GenCollectedHeap* gch = GenCollectedHeap::heap();
918
919 _gc_timer->register_gc_start();
920
921 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
922 "not a CMS generational heap");
923 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
924 FlexibleWorkGang* workers = gch->workers();
925 assert(workers != NULL, "Need workgang for parallel work");
926 int active_workers =
927 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
928 workers->active_workers(),
929 Threads::number_of_non_daemon_threads());
930 workers->set_active_workers(active_workers);
931 assert(gch->n_gens() == 2,
932 "Par collection currently only works with single older gen.");
933 _next_gen = gch->next_gen(this);
934
935 // If the next generation is too full to accommodate worst-case promotion
936 // from this generation, pass on collection; let the next generation
937 // do it.
938 if (!collection_attempt_is_safe()) {
939 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one
940 return;
941 }
942 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
943
944 ParNewTracer gc_tracer;
945 gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
946 gch->trace_heap_before_gc(&gc_tracer);
947
948 init_assuming_no_promotion_failure();
949
950 if (UseAdaptiveSizePolicy) {
951 set_survivor_overflow(false);
952 size_policy->minor_collection_begin();
953 }
954
955 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
956 // Capture heap used before collection (for printing).
957 size_t gch_prev_used = gch->used();
958
959 SpecializationStats::clear();
960
961 age_table()->clear();
962 to()->clear(SpaceDecorator::Mangle);
963
964 gch->save_marks();
965 assert(workers != NULL, "Need parallel worker threads.");
966 int n_workers = active_workers;
967
968 // Set the correct parallelism (number of queues) in the reference processor
969 ref_processor()->set_active_mt_degree(n_workers);
970
971 // Always set the terminator for the active number of workers
972 // because only those workers go through the termination protocol.
973 ParallelTaskTerminator _term(n_workers, task_queues());
974 ParScanThreadStateSet thread_state_set(workers->active_workers(),
975 *to(), *this, *_next_gen, *task_queues(),
977
978 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
979 gch->set_par_threads(n_workers);
980 gch->rem_set()->prepare_for_younger_refs_iterate(true);
981 // It turns out that even when we're using 1 thread, doing the work in a
982 // separate thread causes wide variance in run times. We can't help this
983 // in the multi-threaded case, but we special-case n=1 here to get
984 // repeatable measurements of the 1-thread overhead of the parallel code.
985 if (n_workers > 1) {
986 GenCollectedHeap::StrongRootsScope srs(gch);
987 workers->run_task(&tsk);
988 } else {
989 GenCollectedHeap::StrongRootsScope srs(gch);
990 tsk.work(0);
991 }
992 thread_state_set.reset(0 /* Bad value in debug if not reset */,
993 promotion_failed());
994
995 // Trace and reset failed promotion info.
996 if (promotion_failed()) {
997 thread_state_set.trace_promotion_failed(gc_tracer);
998 }
999
1000 // Process (weak) reference objects found during scavenge.
1001 ReferenceProcessor* rp = ref_processor();
1002 IsAliveClosure is_alive(this);
1003 ScanWeakRefClosure scan_weak_ref(this);
1004 KeepAliveClosure keep_alive(&scan_weak_ref);
1005 ScanClosure scan_without_gc_barrier(this, false);
1006 ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
1007 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
1008 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
1009 &scan_without_gc_barrier, &scan_with_gc_barrier);
1010 rp->setup_policy(clear_all_soft_refs);
1011 // Can the mt_degree be set later (at run_task() time would be best)?
1012 rp->set_active_mt_degree(active_workers);
1013 ReferenceProcessorStats stats;
1014 if (rp->processing_is_mt()) {
1015 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1016 stats = rp->process_discovered_references(&is_alive, &keep_alive,
1017 &evacuate_followers, &task_executor,
1018 _gc_timer, gc_tracer.gc_id());
1019 } else {
1020 thread_state_set.flush();
1021 gch->set_par_threads(0); // 0 ==> non-parallel.
1022 gch->save_marks();
1023 stats = rp->process_discovered_references(&is_alive, &keep_alive,
1024 &evacuate_followers, NULL,
1025 _gc_timer, gc_tracer.gc_id());
1026 }
1027 gc_tracer.report_gc_reference_stats(stats);
1028 if (!promotion_failed()) {
1029 // Swap the survivor spaces.
1030 eden()->clear(SpaceDecorator::Mangle);
1031 from()->clear(SpaceDecorator::Mangle);
1032 if (ZapUnusedHeapArea) {
1033 // This is now done here because of the piece-meal mangling which
1034 // can check for valid mangling at intermediate points in the
1035 // collection(s). When a minor collection fails to collect
1036 // sufficient space resizing of the young generation can occur
1037 // an redistribute the spaces in the young generation. Mangle
1038 // here so that unzapped regions don't get distributed to
1039 // other spaces.
1040 to()->mangle_unused_area();
1041 }
1042 swap_spaces();
1043
1044 // A successful scavenge should restart the GC time limit count which is
1045 // for full GC's.
1046 size_policy->reset_gc_overhead_limit_count();
1047
1048 assert(to()->is_empty(), "to space should be empty now");
1049
1050 adjust_desired_tenuring_threshold();
1051 } else {
1052 handle_promotion_failed(gch, thread_state_set, gc_tracer);
1053 }
1054 // set new iteration safe limit for the survivor spaces
1055 from()->set_concurrent_iteration_safe_limit(from()->top());
1056 to()->set_concurrent_iteration_safe_limit(to()->top());
1057
1058 if (ResizePLAB) {
1059 plab_stats()->adjust_desired_plab_sz(n_workers);
1060 }
1061
1062 if (PrintGC && !PrintGCDetails) {
1063 gch->print_heap_change(gch_prev_used);
1064 }
1065
1066 TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats());
1067 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats());
1068
1069 if (UseAdaptiveSizePolicy) {
1070 size_policy->minor_collection_end(gch->gc_cause());
1071 size_policy->avg_survived()->sample(from()->used());
1072 }
1073
1074 // We need to use a monotonically non-decreasing time in ms
1075 // or we will see time-warp warnings and os::javaTimeMillis()
1076 // does not guarantee monotonicity.
1077 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1078 update_time_of_last_gc(now);
1079
1080 SpecializationStats::print();
1081
1082 rp->set_enqueuing_is_done(true);
1083 if (rp->processing_is_mt()) {
1084 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1085 rp->enqueue_discovered_references(&task_executor);
1086 } else {
1087 rp->enqueue_discovered_references(NULL);
1088 }
1089 rp->verify_no_references_recorded();
1090
1091 gch->trace_heap_after_gc(&gc_tracer);
1092 gc_tracer.report_tenuring_threshold(tenuring_threshold());
1093
1094 _gc_timer->register_gc_end();
1095
1096 gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1097 }
1098
1099 static int sum;
1100 void ParNewGeneration::waste_some_time() {
1101 for (int i = 0; i < 100; i++) {
1102 sum += i;
1103 }
1104 }
1105
1106 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
1107
1108 // Because of concurrency, there are times where an object for which
1109 // "is_forwarded()" is true contains an "interim" forwarding pointer
1110 // value. Such a value will soon be overwritten with a real value.
1111 // This method requires "obj" to have a forwarding pointer, and waits, if
1112 // necessary for a real one to be inserted, and returns it.
1113
1114 oop ParNewGeneration::real_forwardee(oop obj) {
1115 oop forward_ptr = obj->forwardee();
1116 if (forward_ptr != ClaimedForwardPtr) {
|
291 _thread_num, _promotion_failed_info.first_size());
292 }
293 }
294
295 class ParScanThreadStateSet: private ResourceArray {
296 public:
297 // Initializes states for the specified number of threads;
298 ParScanThreadStateSet(int num_threads,
299 Space& to_space,
300 ParNewGeneration& gen,
301 Generation& old_gen,
302 ObjToScanQueueSet& queue_set,
303 Stack<oop, mtGC>* overflow_stacks_,
304 size_t desired_plab_sz,
305 ParallelTaskTerminator& term);
306
307 ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
308
309 inline ParScanThreadState& thread_state(int i);
310
311 void trace_promotion_failed(const YoungGCTracer* gc_tracer);
312 void reset(int active_workers, bool promotion_failed);
313 void flush();
314
315 #if TASKQUEUE_STATS
316 static void
317 print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
318 void print_termination_stats(outputStream* const st = gclog_or_tty);
319 static void
320 print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
321 void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
322 void reset_stats();
323 #endif // TASKQUEUE_STATS
324
325 private:
326 ParallelTaskTerminator& _term;
327 ParNewGeneration& _gen;
328 Generation& _next_gen;
329 public:
330 bool is_valid(int id) const { return id < length(); }
331 ParallelTaskTerminator* terminator() { return &_term; }
340 : ResourceArray(sizeof(ParScanThreadState), num_threads),
341 _gen(gen), _next_gen(old_gen), _term(term)
342 {
343 assert(num_threads > 0, "sanity check!");
344 assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
345 "overflow_stack allocation mismatch");
346 // Initialize states.
347 for (int i = 0; i < num_threads; ++i) {
348 new ((ParScanThreadState*)_data + i)
349 ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
350 overflow_stacks, desired_plab_sz, term);
351 }
352 }
353
354 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
355 {
356 assert(i >= 0 && i < length(), "sanity check!");
357 return ((ParScanThreadState*)_data)[i];
358 }
359
360 void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) {
361 for (int i = 0; i < length(); ++i) {
362 if (thread_state(i).promotion_failed()) {
363 gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info());
364 thread_state(i).promotion_failed_info().reset();
365 }
366 }
367 }
368
369 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
370 {
371 _term.reset_for_reuse(active_threads);
372 if (promotion_failed) {
373 for (int i = 0; i < length(); ++i) {
374 thread_state(i).print_promotion_failure_size();
375 }
376 }
377 }
378
379 #if TASKQUEUE_STATS
380 void
381 ParScanThreadState::reset_stats()
382 {
383 taskqueue_stats().reset();
866 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
867 OopsInGenClosure* cur,
868 OopsInGenClosure* older) :
869 _gch(gch), _level(level),
870 _scan_cur_or_nonheap(cur), _scan_older(older)
871 {}
872
873 void EvacuateFollowersClosureGeneral::do_void() {
874 do {
875 // Beware: this call will lead to closure applications via virtual
876 // calls.
877 _gch->oop_since_save_marks_iterate(_level,
878 _scan_cur_or_nonheap,
879 _scan_older);
880 } while (!_gch->no_allocs_since_save_marks(_level));
881 }
882
883
884 // A Generation that does parallel young-gen collection.
885
886 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
887 assert(_promo_failure_scan_stack.is_empty(), "post condition");
888 _promo_failure_scan_stack.clear(true); // Clear cached segments.
889
890 remove_forwarding_pointers();
891 if (PrintGCDetails) {
892 gclog_or_tty->print(" (promotion failed)");
893 }
894 // All the spaces are in play for mark-sweep.
895 swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
896 from()->set_next_compaction_space(to());
897 gch->set_incremental_collection_failed();
898 // Inform the next generation that a promotion failure occurred.
899 _next_gen->promotion_failure_occurred();
900
901 // Trace promotion failure in the parallel GC threads
902 thread_state_set.trace_promotion_failed(gc_tracer());
903 // Single threaded code may have reported promotion failure to the global state
904 if (_promotion_failed_info.has_failed()) {
905 _gc_tracer.report_promotion_failed(_promotion_failed_info);
906 }
907 // Reset the PromotionFailureALot counters.
908 NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
909 }
910
911 void ParNewGeneration::collect(bool full,
912 bool clear_all_soft_refs,
913 size_t size,
914 bool is_tlab) {
915 assert(full || size > 0, "otherwise we don't want to collect");
916
917 GenCollectedHeap* gch = GenCollectedHeap::heap();
918
919 _gc_timer->register_gc_start();
920
921 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
922 "not a CMS generational heap");
923 AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
924 FlexibleWorkGang* workers = gch->workers();
925 assert(workers != NULL, "Need workgang for parallel work");
926 int active_workers =
927 AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
928 workers->active_workers(),
929 Threads::number_of_non_daemon_threads());
930 workers->set_active_workers(active_workers);
931 assert(gch->n_gens() == 2,
932 "Par collection currently only works with single older gen.");
933 _next_gen = gch->next_gen(this);
934
935 // If the next generation is too full to accommodate worst-case promotion
936 // from this generation, pass on collection; let the next generation
937 // do it.
938 if (!collection_attempt_is_safe()) {
939 gch->set_incremental_collection_failed(); // slight lie, in that we did not even attempt one
940 return;
941 }
942 assert(to()->is_empty(), "Else not collection_attempt_is_safe");
943
944 _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
945 gch->trace_heap_before_gc(gc_tracer());
946
947 init_assuming_no_promotion_failure();
948
949 if (UseAdaptiveSizePolicy) {
950 set_survivor_overflow(false);
951 size_policy->minor_collection_begin();
952 }
953
954 GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id());
955 // Capture heap used before collection (for printing).
956 size_t gch_prev_used = gch->used();
957
958 SpecializationStats::clear();
959
960 age_table()->clear();
961 to()->clear(SpaceDecorator::Mangle);
962
963 gch->save_marks();
964 assert(workers != NULL, "Need parallel worker threads.");
965 int n_workers = active_workers;
966
967 // Set the correct parallelism (number of queues) in the reference processor
968 ref_processor()->set_active_mt_degree(n_workers);
969
970 // Always set the terminator for the active number of workers
971 // because only those workers go through the termination protocol.
972 ParallelTaskTerminator _term(n_workers, task_queues());
973 ParScanThreadStateSet thread_state_set(workers->active_workers(),
974 *to(), *this, *_next_gen, *task_queues(),
976
977 ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
978 gch->set_par_threads(n_workers);
979 gch->rem_set()->prepare_for_younger_refs_iterate(true);
980 // It turns out that even when we're using 1 thread, doing the work in a
981 // separate thread causes wide variance in run times. We can't help this
982 // in the multi-threaded case, but we special-case n=1 here to get
983 // repeatable measurements of the 1-thread overhead of the parallel code.
984 if (n_workers > 1) {
985 GenCollectedHeap::StrongRootsScope srs(gch);
986 workers->run_task(&tsk);
987 } else {
988 GenCollectedHeap::StrongRootsScope srs(gch);
989 tsk.work(0);
990 }
991 thread_state_set.reset(0 /* Bad value in debug if not reset */,
992 promotion_failed());
993
994 // Trace and reset failed promotion info.
995 if (promotion_failed()) {
996 thread_state_set.trace_promotion_failed(gc_tracer());
997 }
998
999 // Process (weak) reference objects found during scavenge.
1000 ReferenceProcessor* rp = ref_processor();
1001 IsAliveClosure is_alive(this);
1002 ScanWeakRefClosure scan_weak_ref(this);
1003 KeepAliveClosure keep_alive(&scan_weak_ref);
1004 ScanClosure scan_without_gc_barrier(this, false);
1005 ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
1006 set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
1007 EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
1008 &scan_without_gc_barrier, &scan_with_gc_barrier);
1009 rp->setup_policy(clear_all_soft_refs);
1010 // Can the mt_degree be set later (at run_task() time would be best)?
1011 rp->set_active_mt_degree(active_workers);
1012 ReferenceProcessorStats stats;
1013 if (rp->processing_is_mt()) {
1014 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1015 stats = rp->process_discovered_references(&is_alive, &keep_alive,
1016 &evacuate_followers, &task_executor,
1017 _gc_timer, _gc_tracer.gc_id());
1018 } else {
1019 thread_state_set.flush();
1020 gch->set_par_threads(0); // 0 ==> non-parallel.
1021 gch->save_marks();
1022 stats = rp->process_discovered_references(&is_alive, &keep_alive,
1023 &evacuate_followers, NULL,
1024 _gc_timer, _gc_tracer.gc_id());
1025 }
1026 _gc_tracer.report_gc_reference_stats(stats);
1027 if (!promotion_failed()) {
1028 // Swap the survivor spaces.
1029 eden()->clear(SpaceDecorator::Mangle);
1030 from()->clear(SpaceDecorator::Mangle);
1031 if (ZapUnusedHeapArea) {
1032 // This is now done here because of the piece-meal mangling which
1033 // can check for valid mangling at intermediate points in the
1034 // collection(s). When a minor collection fails to collect
1035 // sufficient space resizing of the young generation can occur
1036 // an redistribute the spaces in the young generation. Mangle
1037 // here so that unzapped regions don't get distributed to
1038 // other spaces.
1039 to()->mangle_unused_area();
1040 }
1041 swap_spaces();
1042
1043 // A successful scavenge should restart the GC time limit count which is
1044 // for full GC's.
1045 size_policy->reset_gc_overhead_limit_count();
1046
1047 assert(to()->is_empty(), "to space should be empty now");
1048
1049 adjust_desired_tenuring_threshold();
1050 } else {
1051 handle_promotion_failed(gch, thread_state_set);
1052 }
1053 // set new iteration safe limit for the survivor spaces
1054 from()->set_concurrent_iteration_safe_limit(from()->top());
1055 to()->set_concurrent_iteration_safe_limit(to()->top());
1056
1057 if (ResizePLAB) {
1058 plab_stats()->adjust_desired_plab_sz(n_workers);
1059 }
1060
1061 if (PrintGC && !PrintGCDetails) {
1062 gch->print_heap_change(gch_prev_used);
1063 }
1064
1065 TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats());
1066 TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats());
1067
1068 if (UseAdaptiveSizePolicy) {
1069 size_policy->minor_collection_end(gch->gc_cause());
1070 size_policy->avg_survived()->sample(from()->used());
1071 }
1072
1073 // We need to use a monotonically non-decreasing time in ms
1074 // or we will see time-warp warnings and os::javaTimeMillis()
1075 // does not guarantee monotonicity.
1076 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1077 update_time_of_last_gc(now);
1078
1079 SpecializationStats::print();
1080
1081 rp->set_enqueuing_is_done(true);
1082 if (rp->processing_is_mt()) {
1083 ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1084 rp->enqueue_discovered_references(&task_executor);
1085 } else {
1086 rp->enqueue_discovered_references(NULL);
1087 }
1088 rp->verify_no_references_recorded();
1089
1090 gch->trace_heap_after_gc(gc_tracer());
1091 _gc_tracer.report_tenuring_threshold(tenuring_threshold());
1092
1093 _gc_timer->register_gc_end();
1094
1095 _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1096 }
1097
1098 static int sum;
1099 void ParNewGeneration::waste_some_time() {
1100 for (int i = 0; i < 100; i++) {
1101 sum += i;
1102 }
1103 }
1104
1105 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
1106
1107 // Because of concurrency, there are times where an object for which
1108 // "is_forwarded()" is true contains an "interim" forwarding pointer
1109 // value. Such a value will soon be overwritten with a real value.
1110 // This method requires "obj" to have a forwarding pointer, and waits, if
1111 // necessary for a real one to be inserted, and returns it.
1112
1113 oop ParNewGeneration::real_forwardee(oop obj) {
1114 oop forward_ptr = obj->forwardee();
1115 if (forward_ptr != ClaimedForwardPtr) {
|