< prev index

src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Print this page
rev 7515 : imported patch 8066566
rev 7517 : const * instead of &


 291                         _thread_num, _promotion_failed_info.first_size());
 292   }
 293 }
 294 
 295 class ParScanThreadStateSet: private ResourceArray {
 296 public:
 297   // Initializes states for the specified number of threads;
 298   ParScanThreadStateSet(int                     num_threads,
 299                         Space&                  to_space,
 300                         ParNewGeneration&       gen,
 301                         Generation&             old_gen,
 302                         ObjToScanQueueSet&      queue_set,
 303                         Stack<oop, mtGC>*       overflow_stacks_,
 304                         size_t                  desired_plab_sz,
 305                         ParallelTaskTerminator& term);
 306 
 307   ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
 308 
 309   inline ParScanThreadState& thread_state(int i);
 310 
 311   void trace_promotion_failed(YoungGCTracer& gc_tracer);
 312   void reset(int active_workers, bool promotion_failed);
 313   void flush();
 314 
 315   #if TASKQUEUE_STATS
 316   static void
 317     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
 318   void print_termination_stats(outputStream* const st = gclog_or_tty);
 319   static void
 320     print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 321   void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
 322   void reset_stats();
 323   #endif // TASKQUEUE_STATS
 324 
 325 private:
 326   ParallelTaskTerminator& _term;
 327   ParNewGeneration&       _gen;
 328   Generation&             _next_gen;
 329  public:
 330   bool is_valid(int id) const { return id < length(); }
 331   ParallelTaskTerminator* terminator() { return &_term; }


 340   : ResourceArray(sizeof(ParScanThreadState), num_threads),
 341     _gen(gen), _next_gen(old_gen), _term(term)
 342 {
 343   assert(num_threads > 0, "sanity check!");
 344   assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
 345          "overflow_stack allocation mismatch");
 346   // Initialize states.
 347   for (int i = 0; i < num_threads; ++i) {
 348     new ((ParScanThreadState*)_data + i)
 349         ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
 350                            overflow_stacks, desired_plab_sz, term);
 351   }
 352 }
 353 
 354 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
 355 {
 356   assert(i >= 0 && i < length(), "sanity check!");
 357   return ((ParScanThreadState*)_data)[i];
 358 }
 359 
 360 void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) {
 361   for (int i = 0; i < length(); ++i) {
 362     if (thread_state(i).promotion_failed()) {
 363       gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info());
 364       thread_state(i).promotion_failed_info().reset();
 365     }
 366   }
 367 }
 368 
 369 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
 370 {
 371   _term.reset_for_reuse(active_threads);
 372   if (promotion_failed) {
 373     for (int i = 0; i < length(); ++i) {
 374       thread_state(i).print_promotion_failure_size();
 375     }
 376   }
 377 }
 378 
 379 #if TASKQUEUE_STATS
 380 void
 381 ParScanThreadState::reset_stats()
 382 {
 383   taskqueue_stats().reset();


 882 
 883 
 884 // A Generation that does parallel young-gen collection.
 885 
 886 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
 887   assert(_promo_failure_scan_stack.is_empty(), "post condition");
 888   _promo_failure_scan_stack.clear(true); // Clear cached segments.
 889 
 890   remove_forwarding_pointers();
 891   if (PrintGCDetails) {
 892     gclog_or_tty->print(" (promotion failed)");
 893   }
 894   // All the spaces are in play for mark-sweep.
 895   swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
 896   from()->set_next_compaction_space(to());
 897   gch->set_incremental_collection_failed();
 898   // Inform the next generation that a promotion failure occurred.
 899   _next_gen->promotion_failure_occurred();
 900 
 901   // Trace promotion failure in the parallel GC threads
 902   thread_state_set.trace_promotion_failed(_gc_tracer);
 903   // Single threaded code may have reported promotion failure to the global state
 904   if (_promotion_failed_info.has_failed()) {
 905     _gc_tracer.report_promotion_failed(_promotion_failed_info);
 906   }
 907   // Reset the PromotionFailureALot counters.
 908   NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 909 }
 910 
 911 void ParNewGeneration::collect(bool   full,
 912                                bool   clear_all_soft_refs,
 913                                size_t size,
 914                                bool   is_tlab) {
 915   assert(full || size > 0, "otherwise we don't want to collect");
 916 
 917   GenCollectedHeap* gch = GenCollectedHeap::heap();
 918 
 919   _gc_timer->register_gc_start();
 920 
 921   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 922     "not a CMS generational heap");


 925   assert(workers != NULL, "Need workgang for parallel work");
 926   int active_workers =
 927       AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 928                                    workers->active_workers(),
 929                                    Threads::number_of_non_daemon_threads());
 930   workers->set_active_workers(active_workers);
 931   assert(gch->n_gens() == 2,
 932          "Par collection currently only works with single older gen.");
 933   _next_gen = gch->next_gen(this);
 934 
 935   // If the next generation is too full to accommodate worst-case promotion
 936   // from this generation, pass on collection; let the next generation
 937   // do it.
 938   if (!collection_attempt_is_safe()) {
 939     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 940     return;
 941   }
 942   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 943 
 944   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 945   gch->trace_heap_before_gc(&_gc_tracer);
 946 
 947   init_assuming_no_promotion_failure();
 948 
 949   if (UseAdaptiveSizePolicy) {
 950     set_survivor_overflow(false);
 951     size_policy->minor_collection_begin();
 952   }
 953 
 954   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id());
 955   // Capture heap used before collection (for printing).
 956   size_t gch_prev_used = gch->used();
 957 
 958   SpecializationStats::clear();
 959 
 960   age_table()->clear();
 961   to()->clear(SpaceDecorator::Mangle);
 962 
 963   gch->save_marks();
 964   assert(workers != NULL, "Need parallel worker threads.");
 965   int n_workers = active_workers;


 976 
 977   ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
 978   gch->set_par_threads(n_workers);
 979   gch->rem_set()->prepare_for_younger_refs_iterate(true);
 980   // It turns out that even when we're using 1 thread, doing the work in a
 981   // separate thread causes wide variance in run times.  We can't help this
 982   // in the multi-threaded case, but we special-case n=1 here to get
 983   // repeatable measurements of the 1-thread overhead of the parallel code.
 984   if (n_workers > 1) {
 985     GenCollectedHeap::StrongRootsScope srs(gch);
 986     workers->run_task(&tsk);
 987   } else {
 988     GenCollectedHeap::StrongRootsScope srs(gch);
 989     tsk.work(0);
 990   }
 991   thread_state_set.reset(0 /* Bad value in debug if not reset */,
 992                          promotion_failed());
 993 
 994   // Trace and reset failed promotion info.
 995   if (promotion_failed()) {
 996     thread_state_set.trace_promotion_failed(_gc_tracer);
 997   }
 998 
 999   // Process (weak) reference objects found during scavenge.
1000   ReferenceProcessor* rp = ref_processor();
1001   IsAliveClosure is_alive(this);
1002   ScanWeakRefClosure scan_weak_ref(this);
1003   KeepAliveClosure keep_alive(&scan_weak_ref);
1004   ScanClosure               scan_without_gc_barrier(this, false);
1005   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
1006   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
1007   EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
1008     &scan_without_gc_barrier, &scan_with_gc_barrier);
1009   rp->setup_policy(clear_all_soft_refs);
1010   // Can  the mt_degree be set later (at run_task() time would be best)?
1011   rp->set_active_mt_degree(active_workers);
1012   ReferenceProcessorStats stats;
1013   if (rp->processing_is_mt()) {
1014     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1015     stats = rp->process_discovered_references(&is_alive, &keep_alive,
1016                                               &evacuate_followers, &task_executor,


1070     size_policy->avg_survived()->sample(from()->used());
1071   }
1072 
1073   // We need to use a monotonically non-decreasing time in ms
1074   // or we will see time-warp warnings and os::javaTimeMillis()
1075   // does not guarantee monotonicity.
1076   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1077   update_time_of_last_gc(now);
1078 
1079   SpecializationStats::print();
1080 
1081   rp->set_enqueuing_is_done(true);
1082   if (rp->processing_is_mt()) {
1083     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1084     rp->enqueue_discovered_references(&task_executor);
1085   } else {
1086     rp->enqueue_discovered_references(NULL);
1087   }
1088   rp->verify_no_references_recorded();
1089 
1090   gch->trace_heap_after_gc(&_gc_tracer);
1091   _gc_tracer.report_tenuring_threshold(tenuring_threshold());
1092 
1093   _gc_timer->register_gc_end();
1094 
1095   _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1096 }
1097 
1098 static int sum;
1099 void ParNewGeneration::waste_some_time() {
1100   for (int i = 0; i < 100; i++) {
1101     sum += i;
1102   }
1103 }
1104 
1105 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
1106 
1107 // Because of concurrency, there are times where an object for which
1108 // "is_forwarded()" is true contains an "interim" forwarding pointer
1109 // value.  Such a value will soon be overwritten with a real value.
1110 // This method requires "obj" to have a forwarding pointer, and waits, if




 291                         _thread_num, _promotion_failed_info.first_size());
 292   }
 293 }
 294 
 295 class ParScanThreadStateSet: private ResourceArray {
 296 public:
 297   // Initializes states for the specified number of threads;
 298   ParScanThreadStateSet(int                     num_threads,
 299                         Space&                  to_space,
 300                         ParNewGeneration&       gen,
 301                         Generation&             old_gen,
 302                         ObjToScanQueueSet&      queue_set,
 303                         Stack<oop, mtGC>*       overflow_stacks_,
 304                         size_t                  desired_plab_sz,
 305                         ParallelTaskTerminator& term);
 306 
 307   ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
 308 
 309   inline ParScanThreadState& thread_state(int i);
 310 
 311   void trace_promotion_failed(const YoungGCTracer* gc_tracer);
 312   void reset(int active_workers, bool promotion_failed);
 313   void flush();
 314 
 315   #if TASKQUEUE_STATS
 316   static void
 317     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
 318   void print_termination_stats(outputStream* const st = gclog_or_tty);
 319   static void
 320     print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 321   void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
 322   void reset_stats();
 323   #endif // TASKQUEUE_STATS
 324 
 325 private:
 326   ParallelTaskTerminator& _term;
 327   ParNewGeneration&       _gen;
 328   Generation&             _next_gen;
 329  public:
 330   bool is_valid(int id) const { return id < length(); }
 331   ParallelTaskTerminator* terminator() { return &_term; }


 340   : ResourceArray(sizeof(ParScanThreadState), num_threads),
 341     _gen(gen), _next_gen(old_gen), _term(term)
 342 {
 343   assert(num_threads > 0, "sanity check!");
 344   assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
 345          "overflow_stack allocation mismatch");
 346   // Initialize states.
 347   for (int i = 0; i < num_threads; ++i) {
 348     new ((ParScanThreadState*)_data + i)
 349         ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
 350                            overflow_stacks, desired_plab_sz, term);
 351   }
 352 }
 353 
 354 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
 355 {
 356   assert(i >= 0 && i < length(), "sanity check!");
 357   return ((ParScanThreadState*)_data)[i];
 358 }
 359 
 360 void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) {
 361   for (int i = 0; i < length(); ++i) {
 362     if (thread_state(i).promotion_failed()) {
 363       gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info());
 364       thread_state(i).promotion_failed_info().reset();
 365     }
 366   }
 367 }
 368 
 369 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
 370 {
 371   _term.reset_for_reuse(active_threads);
 372   if (promotion_failed) {
 373     for (int i = 0; i < length(); ++i) {
 374       thread_state(i).print_promotion_failure_size();
 375     }
 376   }
 377 }
 378 
 379 #if TASKQUEUE_STATS
 380 void
 381 ParScanThreadState::reset_stats()
 382 {
 383   taskqueue_stats().reset();


 882 
 883 
 884 // A Generation that does parallel young-gen collection.
 885 
 886 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
 887   assert(_promo_failure_scan_stack.is_empty(), "post condition");
 888   _promo_failure_scan_stack.clear(true); // Clear cached segments.
 889 
 890   remove_forwarding_pointers();
 891   if (PrintGCDetails) {
 892     gclog_or_tty->print(" (promotion failed)");
 893   }
 894   // All the spaces are in play for mark-sweep.
 895   swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
 896   from()->set_next_compaction_space(to());
 897   gch->set_incremental_collection_failed();
 898   // Inform the next generation that a promotion failure occurred.
 899   _next_gen->promotion_failure_occurred();
 900 
 901   // Trace promotion failure in the parallel GC threads
 902   thread_state_set.trace_promotion_failed(gc_tracer());
 903   // Single threaded code may have reported promotion failure to the global state
 904   if (_promotion_failed_info.has_failed()) {
 905     _gc_tracer.report_promotion_failed(_promotion_failed_info);
 906   }
 907   // Reset the PromotionFailureALot counters.
 908   NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 909 }
 910 
 911 void ParNewGeneration::collect(bool   full,
 912                                bool   clear_all_soft_refs,
 913                                size_t size,
 914                                bool   is_tlab) {
 915   assert(full || size > 0, "otherwise we don't want to collect");
 916 
 917   GenCollectedHeap* gch = GenCollectedHeap::heap();
 918 
 919   _gc_timer->register_gc_start();
 920 
 921   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 922     "not a CMS generational heap");


 925   assert(workers != NULL, "Need workgang for parallel work");
 926   int active_workers =
 927       AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 928                                    workers->active_workers(),
 929                                    Threads::number_of_non_daemon_threads());
 930   workers->set_active_workers(active_workers);
 931   assert(gch->n_gens() == 2,
 932          "Par collection currently only works with single older gen.");
 933   _next_gen = gch->next_gen(this);
 934 
 935   // If the next generation is too full to accommodate worst-case promotion
 936   // from this generation, pass on collection; let the next generation
 937   // do it.
 938   if (!collection_attempt_is_safe()) {
 939     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 940     return;
 941   }
 942   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 943 
 944   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 945   gch->trace_heap_before_gc(gc_tracer());
 946 
 947   init_assuming_no_promotion_failure();
 948 
 949   if (UseAdaptiveSizePolicy) {
 950     set_survivor_overflow(false);
 951     size_policy->minor_collection_begin();
 952   }
 953 
 954   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id());
 955   // Capture heap used before collection (for printing).
 956   size_t gch_prev_used = gch->used();
 957 
 958   SpecializationStats::clear();
 959 
 960   age_table()->clear();
 961   to()->clear(SpaceDecorator::Mangle);
 962 
 963   gch->save_marks();
 964   assert(workers != NULL, "Need parallel worker threads.");
 965   int n_workers = active_workers;


 976 
 977   ParNewGenTask tsk(this, _next_gen, reserved().end(), &thread_state_set);
 978   gch->set_par_threads(n_workers);
 979   gch->rem_set()->prepare_for_younger_refs_iterate(true);
 980   // It turns out that even when we're using 1 thread, doing the work in a
 981   // separate thread causes wide variance in run times.  We can't help this
 982   // in the multi-threaded case, but we special-case n=1 here to get
 983   // repeatable measurements of the 1-thread overhead of the parallel code.
 984   if (n_workers > 1) {
 985     GenCollectedHeap::StrongRootsScope srs(gch);
 986     workers->run_task(&tsk);
 987   } else {
 988     GenCollectedHeap::StrongRootsScope srs(gch);
 989     tsk.work(0);
 990   }
 991   thread_state_set.reset(0 /* Bad value in debug if not reset */,
 992                          promotion_failed());
 993 
 994   // Trace and reset failed promotion info.
 995   if (promotion_failed()) {
 996     thread_state_set.trace_promotion_failed(gc_tracer());
 997   }
 998 
 999   // Process (weak) reference objects found during scavenge.
1000   ReferenceProcessor* rp = ref_processor();
1001   IsAliveClosure is_alive(this);
1002   ScanWeakRefClosure scan_weak_ref(this);
1003   KeepAliveClosure keep_alive(&scan_weak_ref);
1004   ScanClosure               scan_without_gc_barrier(this, false);
1005   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
1006   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
1007   EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
1008     &scan_without_gc_barrier, &scan_with_gc_barrier);
1009   rp->setup_policy(clear_all_soft_refs);
1010   // Can  the mt_degree be set later (at run_task() time would be best)?
1011   rp->set_active_mt_degree(active_workers);
1012   ReferenceProcessorStats stats;
1013   if (rp->processing_is_mt()) {
1014     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1015     stats = rp->process_discovered_references(&is_alive, &keep_alive,
1016                                               &evacuate_followers, &task_executor,


1070     size_policy->avg_survived()->sample(from()->used());
1071   }
1072 
1073   // We need to use a monotonically non-decreasing time in ms
1074   // or we will see time-warp warnings and os::javaTimeMillis()
1075   // does not guarantee monotonicity.
1076   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1077   update_time_of_last_gc(now);
1078 
1079   SpecializationStats::print();
1080 
1081   rp->set_enqueuing_is_done(true);
1082   if (rp->processing_is_mt()) {
1083     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1084     rp->enqueue_discovered_references(&task_executor);
1085   } else {
1086     rp->enqueue_discovered_references(NULL);
1087   }
1088   rp->verify_no_references_recorded();
1089 
1090   gch->trace_heap_after_gc(gc_tracer());
1091   _gc_tracer.report_tenuring_threshold(tenuring_threshold());
1092 
1093   _gc_timer->register_gc_end();
1094 
1095   _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1096 }
1097 
1098 static int sum;
1099 void ParNewGeneration::waste_some_time() {
1100   for (int i = 0; i < 100; i++) {
1101     sum += i;
1102   }
1103 }
1104 
1105 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
1106 
1107 // Because of concurrency, there are times where an object for which
1108 // "is_forwarded()" is true contains an "interim" forwarding pointer
1109 // value.  Such a value will soon be overwritten with a real value.
1110 // This method requires "obj" to have a forwarding pointer, and waits, if


< prev index