< prev index next >

src/share/vm/gc/cms/parNewGeneration.cpp

Print this page




  28 #include "gc/cms/parNewGeneration.inline.hpp"
  29 #include "gc/cms/parOopClosures.inline.hpp"
  30 #include "gc/serial/defNewGeneration.inline.hpp"
  31 #include "gc/shared/adaptiveSizePolicy.hpp"
  32 #include "gc/shared/ageTable.hpp"
  33 #include "gc/shared/copyFailedInfo.hpp"
  34 #include "gc/shared/gcHeapSummary.hpp"
  35 #include "gc/shared/gcTimer.hpp"
  36 #include "gc/shared/gcTrace.hpp"
  37 #include "gc/shared/gcTraceTime.hpp"
  38 #include "gc/shared/genCollectedHeap.hpp"
  39 #include "gc/shared/genOopClosures.inline.hpp"
  40 #include "gc/shared/generation.hpp"
  41 #include "gc/shared/plab.inline.hpp"
  42 #include "gc/shared/referencePolicy.hpp"
  43 #include "gc/shared/space.hpp"
  44 #include "gc/shared/spaceDecorator.hpp"
  45 #include "gc/shared/strongRootsScope.hpp"
  46 #include "gc/shared/taskqueue.inline.hpp"
  47 #include "gc/shared/workgroup.hpp"

  48 #include "memory/resourceArea.hpp"
  49 #include "oops/objArrayOop.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "runtime/atomic.inline.hpp"
  52 #include "runtime/handles.hpp"
  53 #include "runtime/handles.inline.hpp"
  54 #include "runtime/java.hpp"
  55 #include "runtime/thread.inline.hpp"
  56 #include "utilities/copy.hpp"
  57 #include "utilities/globalDefinitions.hpp"
  58 #include "utilities/stack.inline.hpp"
  59 
  60 ParScanThreadState::ParScanThreadState(Space* to_space_,
  61                                        ParNewGeneration* young_gen_,
  62                                        Generation* old_gen_,
  63                                        int thread_num_,
  64                                        ObjToScanQueueSet* work_queue_set_,
  65                                        Stack<oop, mtGC>* overflow_stacks_,
  66                                        size_t desired_plab_sz_,
  67                                        ParallelTaskTerminator& term_) :


 253         // It's conceivable that we may be able to use the
 254         // buffer we just grabbed for subsequent small requests
 255         // even if not for this one.
 256       } else {
 257         // We're used up.
 258         _to_space_full = true;
 259       }
 260     } else {
 261       // Too large; allocate the object individually.
 262       obj = sp->par_allocate(word_sz);
 263     }
 264   }
 265   return obj;
 266 }
 267 
 268 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
 269   to_space_alloc_buffer()->undo_allocation(obj, word_sz);
 270 }
 271 
 272 void ParScanThreadState::print_promotion_failure_size() {
 273   if (_promotion_failed_info.has_failed() && PrintPromotionFailure) {
 274     gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
 275                         _thread_num, _promotion_failed_info.first_size());
 276   }
 277 }
 278 
 279 class ParScanThreadStateSet: private ResourceArray {
 280 public:
 281   // Initializes states for the specified number of threads;
 282   ParScanThreadStateSet(int                     num_threads,
 283                         Space&                  to_space,
 284                         ParNewGeneration&       young_gen,
 285                         Generation&             old_gen,
 286                         ObjToScanQueueSet&      queue_set,
 287                         Stack<oop, mtGC>*       overflow_stacks_,
 288                         size_t                  desired_plab_sz,
 289                         ParallelTaskTerminator& term);
 290 
 291   ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
 292 
 293   inline ParScanThreadState& thread_state(int i);
 294 
 295   void trace_promotion_failed(const YoungGCTracer* gc_tracer);
 296   void reset(uint active_workers, bool promotion_failed);
 297   void flush();
 298 
 299   #if TASKQUEUE_STATS
 300   static void
 301     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
 302   void print_termination_stats(outputStream* const st = gclog_or_tty);
 303   static void
 304     print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 305   void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
 306   void reset_stats();
 307   #endif // TASKQUEUE_STATS
 308 
 309 private:
 310   ParallelTaskTerminator& _term;
 311   ParNewGeneration&       _young_gen;
 312   Generation&             _old_gen;
 313  public:
 314   bool is_valid(int id) const { return id < length(); }
 315   ParallelTaskTerminator* terminator() { return &_term; }
 316 };
 317 
 318 ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
 319                                              Space& to_space,
 320                                              ParNewGeneration& young_gen,
 321                                              Generation& old_gen,
 322                                              ObjToScanQueueSet& queue_set,
 323                                              Stack<oop, mtGC>* overflow_stacks,
 324                                              size_t desired_plab_sz,
 325                                              ParallelTaskTerminator& term)


 366 void ParScanThreadState::reset_stats() {
 367   taskqueue_stats().reset();
 368   _term_attempts = 0;
 369   _overflow_refills = 0;
 370   _overflow_refill_objs = 0;
 371 }
 372 
 373 void ParScanThreadStateSet::reset_stats() {
 374   for (int i = 0; i < length(); ++i) {
 375     thread_state(i).reset_stats();
 376   }
 377 }
 378 
 379 void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) {
 380   st->print_raw_cr("GC Termination Stats");
 381   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------");
 382   st->print_raw_cr("thr     ms        ms       %       ms       %   attempts");
 383   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------");
 384 }
 385 
 386 void ParScanThreadStateSet::print_termination_stats(outputStream* const st) {








 387   print_termination_stats_hdr(st);
 388 
 389   for (int i = 0; i < length(); ++i) {
 390     const ParScanThreadState & pss = thread_state(i);
 391     const double elapsed_ms = pss.elapsed_time() * 1000.0;
 392     const double s_roots_ms = pss.strong_roots_time() * 1000.0;
 393     const double term_ms = pss.term_time() * 1000.0;
 394     st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8),
 395                  i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
 396                  term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
 397   }
 398 }
 399 
 400 // Print stats related to work queue activity.
 401 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
 402   st->print_raw_cr("GC Task Stats");
 403   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
 404   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 405 }
 406 
 407 void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) {






 408   print_taskqueue_stats_hdr(st);
 409 
 410   TaskQueueStats totals;
 411   for (int i = 0; i < length(); ++i) {
 412     const ParScanThreadState & pss = thread_state(i);
 413     const TaskQueueStats & stats = pss.taskqueue_stats();
 414     st->print("%3d ", i); stats.print(st); st->cr();
 415     totals += stats;
 416 
 417     if (pss.overflow_refills() > 0) {
 418       st->print_cr("    " SIZE_FORMAT_W(10) " overflow refills    "
 419                    SIZE_FORMAT_W(10) " overflow objects",
 420                    pss.overflow_refills(), pss.overflow_refill_objs());
 421     }
 422   }
 423   st->print("tot "); totals.print(st); st->cr();
 424 
 425   DEBUG_ONLY(totals.verify());
 426 }
 427 #endif // TASKQUEUE_STATS


 806   _scan_cur_or_nonheap(cur), _scan_older(older)
 807 { }
 808 
 809 void EvacuateFollowersClosureGeneral::do_void() {
 810   do {
 811     // Beware: this call will lead to closure applications via virtual
 812     // calls.
 813     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
 814                                        _scan_cur_or_nonheap,
 815                                        _scan_older);
 816   } while (!_gch->no_allocs_since_save_marks());
 817 }
 818 
 819 // A Generation that does parallel young-gen collection.
 820 
 821 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
 822   assert(_promo_failure_scan_stack.is_empty(), "post condition");
 823   _promo_failure_scan_stack.clear(true); // Clear cached segments.
 824 
 825   remove_forwarding_pointers();
 826   if (PrintGCDetails) {
 827     gclog_or_tty->print(" (promotion failed)");
 828   }
 829   // All the spaces are in play for mark-sweep.
 830   swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
 831   from()->set_next_compaction_space(to());
 832   gch->set_incremental_collection_failed();
 833   // Inform the next generation that a promotion failure occurred.
 834   _old_gen->promotion_failure_occurred();
 835 
 836   // Trace promotion failure in the parallel GC threads
 837   thread_state_set.trace_promotion_failed(gc_tracer());
 838   // Single threaded code may have reported promotion failure to the global state
 839   if (_promotion_failed_info.has_failed()) {
 840     _gc_tracer.report_promotion_failed(_promotion_failed_info);
 841   }
 842   // Reset the PromotionFailureALot counters.
 843   NOT_PRODUCT(gch->reset_promotion_should_fail();)
 844 }
 845 
 846 void ParNewGeneration::collect(bool   full,
 847                                bool   clear_all_soft_refs,
 848                                size_t size,


 865 
 866   // If the next generation is too full to accommodate worst-case promotion
 867   // from this generation, pass on collection; let the next generation
 868   // do it.
 869   if (!collection_attempt_is_safe()) {
 870     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 871     return;
 872   }
 873   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 874 
 875   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 876   gch->trace_heap_before_gc(gc_tracer());
 877 
 878   init_assuming_no_promotion_failure();
 879 
 880   if (UseAdaptiveSizePolicy) {
 881     set_survivor_overflow(false);
 882     size_policy->minor_collection_begin();
 883   }
 884 
 885   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
 886   // Capture heap used before collection (for printing).
 887   size_t gch_prev_used = gch->used();
 888 
 889   age_table()->clear();
 890   to()->clear(SpaceDecorator::Mangle);
 891 
 892   gch->save_marks();
 893 
 894   // Set the correct parallelism (number of queues) in the reference processor
 895   ref_processor()->set_active_mt_degree(active_workers);
 896 
 897   // Always set the terminator for the active number of workers
 898   // because only those workers go through the termination protocol.
 899   ParallelTaskTerminator _term(active_workers, task_queues());
 900   ParScanThreadStateSet thread_state_set(active_workers,
 901                                          *to(), *this, *_old_gen, *task_queues(),
 902                                          _overflow_stacks, desired_plab_sz(), _term);
 903 
 904   thread_state_set.reset(active_workers, promotion_failed());
 905 
 906   {
 907     StrongRootsScope srs(active_workers);


 973     swap_spaces();
 974 
 975     // A successful scavenge should restart the GC time limit count which is
 976     // for full GC's.
 977     size_policy->reset_gc_overhead_limit_count();
 978 
 979     assert(to()->is_empty(), "to space should be empty now");
 980 
 981     adjust_desired_tenuring_threshold();
 982   } else {
 983     handle_promotion_failed(gch, thread_state_set);
 984   }
 985   // set new iteration safe limit for the survivor spaces
 986   from()->set_concurrent_iteration_safe_limit(from()->top());
 987   to()->set_concurrent_iteration_safe_limit(to()->top());
 988 
 989   if (ResizePLAB) {
 990     plab_stats()->adjust_desired_plab_sz();
 991   }
 992 
 993   if (PrintGC && !PrintGCDetails) {
 994     gch->print_heap_change(gch_prev_used);
 995   }
 996 
 997   TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats());
 998   TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats());
 999 
1000   if (UseAdaptiveSizePolicy) {
1001     size_policy->minor_collection_end(gch->gc_cause());
1002     size_policy->avg_survived()->sample(from()->used());
1003   }
1004 
1005   // We need to use a monotonically non-decreasing time in ms
1006   // or we will see time-warp warnings and os::javaTimeMillis()
1007   // does not guarantee monotonicity.
1008   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1009   update_time_of_last_gc(now);
1010 
1011   rp->set_enqueuing_is_done(true);
1012   if (rp->processing_is_mt()) {
1013     ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
1014     rp->enqueue_discovered_references(&task_executor);
1015   } else {
1016     rp->enqueue_discovered_references(NULL);
1017   }
1018   rp->verify_no_references_recorded();


1131 
1132       preserve_mark_if_necessary(old, m);
1133       par_scan_state->register_promotion_failure(sz);
1134     }
1135 
1136     old->forward_to(new_obj);
1137     forward_ptr = NULL;
1138   } else {
1139     // Is in to-space; do copying ourselves.
1140     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1141     assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
1142     forward_ptr = old->forward_to_atomic(new_obj);
1143     // Restore the mark word copied above.
1144     new_obj->set_mark(m);
1145     // Increment age if obj still in new generation
1146     new_obj->incr_age();
1147     par_scan_state->age_table()->add(new_obj, sz);
1148   }
1149   assert(new_obj != NULL, "just checking");
1150 
1151 #ifndef PRODUCT
1152   // This code must come after the CAS test, or it will print incorrect
1153   // information.
1154   if (TraceScavenge) {
1155     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
1156        is_in_reserved(new_obj) ? "copying" : "tenuring",
1157        new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size());
1158   }
1159 #endif
1160 
1161   if (forward_ptr == NULL) {
1162     oop obj_to_push = new_obj;
1163     if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
1164       // Length field used as index of next element to be scanned.
1165       // Real length can be obtained from real_forwardee()
1166       arrayOop(old)->set_length(0);
1167       obj_to_push = old;
1168       assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
1169              "push forwarded object");
1170     }
1171     // Push it on one of the queues of to-be-scanned objects.
1172     bool simulate_overflow = false;
1173     NOT_PRODUCT(
1174       if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
1175         // simulate a stack overflow
1176         simulate_overflow = true;
1177       }
1178     )
1179     if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
1180       // Add stats for overflow pushes.
1181       if (Verbose && PrintGCDetails) {
1182         gclog_or_tty->print("queue overflow!\n");
1183       }
1184       push_on_overflow_list(old, par_scan_state);
1185       TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
1186     }
1187 
1188     return new_obj;
1189   }
1190 
1191   // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
1192   // allocate it?
1193   if (is_in_reserved(new_obj)) {
1194     // Must be in to_space.
1195     assert(to()->is_in_reserved(new_obj), "Checking");
1196     if (forward_ptr == ClaimedForwardPtr) {
1197       // Wait to get the real forwarding pointer value.
1198       forward_ptr = real_forwardee(old);
1199     }
1200     par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
1201   }
1202 
1203   return forward_ptr;




  28 #include "gc/cms/parNewGeneration.inline.hpp"
  29 #include "gc/cms/parOopClosures.inline.hpp"
  30 #include "gc/serial/defNewGeneration.inline.hpp"
  31 #include "gc/shared/adaptiveSizePolicy.hpp"
  32 #include "gc/shared/ageTable.hpp"
  33 #include "gc/shared/copyFailedInfo.hpp"
  34 #include "gc/shared/gcHeapSummary.hpp"
  35 #include "gc/shared/gcTimer.hpp"
  36 #include "gc/shared/gcTrace.hpp"
  37 #include "gc/shared/gcTraceTime.hpp"
  38 #include "gc/shared/genCollectedHeap.hpp"
  39 #include "gc/shared/genOopClosures.inline.hpp"
  40 #include "gc/shared/generation.hpp"
  41 #include "gc/shared/plab.inline.hpp"
  42 #include "gc/shared/referencePolicy.hpp"
  43 #include "gc/shared/space.hpp"
  44 #include "gc/shared/spaceDecorator.hpp"
  45 #include "gc/shared/strongRootsScope.hpp"
  46 #include "gc/shared/taskqueue.inline.hpp"
  47 #include "gc/shared/workgroup.hpp"
  48 #include "logging/log.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "oops/objArrayOop.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "runtime/atomic.inline.hpp"
  53 #include "runtime/handles.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/java.hpp"
  56 #include "runtime/thread.inline.hpp"
  57 #include "utilities/copy.hpp"
  58 #include "utilities/globalDefinitions.hpp"
  59 #include "utilities/stack.inline.hpp"
  60 
  61 ParScanThreadState::ParScanThreadState(Space* to_space_,
  62                                        ParNewGeneration* young_gen_,
  63                                        Generation* old_gen_,
  64                                        int thread_num_,
  65                                        ObjToScanQueueSet* work_queue_set_,
  66                                        Stack<oop, mtGC>* overflow_stacks_,
  67                                        size_t desired_plab_sz_,
  68                                        ParallelTaskTerminator& term_) :


 254         // It's conceivable that we may be able to use the
 255         // buffer we just grabbed for subsequent small requests
 256         // even if not for this one.
 257       } else {
 258         // We're used up.
 259         _to_space_full = true;
 260       }
 261     } else {
 262       // Too large; allocate the object individually.
 263       obj = sp->par_allocate(word_sz);
 264     }
 265   }
 266   return obj;
 267 }
 268 
 269 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
 270   to_space_alloc_buffer()->undo_allocation(obj, word_sz);
 271 }
 272 
 273 void ParScanThreadState::print_promotion_failure_size() {
 274   if (_promotion_failed_info.has_failed()) {
 275     log_trace(gc, promotion)(" (%d: promotion failure size = " SIZE_FORMAT ") ",
 276                              _thread_num, _promotion_failed_info.first_size());
 277   }
 278 }
 279 
 280 class ParScanThreadStateSet: private ResourceArray {
 281 public:
 282   // Initializes states for the specified number of threads;
 283   ParScanThreadStateSet(int                     num_threads,
 284                         Space&                  to_space,
 285                         ParNewGeneration&       young_gen,
 286                         Generation&             old_gen,
 287                         ObjToScanQueueSet&      queue_set,
 288                         Stack<oop, mtGC>*       overflow_stacks_,
 289                         size_t                  desired_plab_sz,
 290                         ParallelTaskTerminator& term);
 291 
 292   ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
 293 
 294   inline ParScanThreadState& thread_state(int i);
 295 
 296   void trace_promotion_failed(const YoungGCTracer* gc_tracer);
 297   void reset(uint active_workers, bool promotion_failed);
 298   void flush();
 299 
 300   #if TASKQUEUE_STATS
 301   static void
 302     print_termination_stats_hdr(outputStream* const st);
 303   void print_termination_stats();
 304   static void
 305     print_taskqueue_stats_hdr(outputStream* const st);
 306   void print_taskqueue_stats();
 307   void reset_stats();
 308   #endif // TASKQUEUE_STATS
 309 
 310 private:
 311   ParallelTaskTerminator& _term;
 312   ParNewGeneration&       _young_gen;
 313   Generation&             _old_gen;
 314  public:
 315   bool is_valid(int id) const { return id < length(); }
 316   ParallelTaskTerminator* terminator() { return &_term; }
 317 };
 318 
 319 ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
 320                                              Space& to_space,
 321                                              ParNewGeneration& young_gen,
 322                                              Generation& old_gen,
 323                                              ObjToScanQueueSet& queue_set,
 324                                              Stack<oop, mtGC>* overflow_stacks,
 325                                              size_t desired_plab_sz,
 326                                              ParallelTaskTerminator& term)


 367 void ParScanThreadState::reset_stats() {
 368   taskqueue_stats().reset();
 369   _term_attempts = 0;
 370   _overflow_refills = 0;
 371   _overflow_refill_objs = 0;
 372 }
 373 
 374 void ParScanThreadStateSet::reset_stats() {
 375   for (int i = 0; i < length(); ++i) {
 376     thread_state(i).reset_stats();
 377   }
 378 }
 379 
 380 void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) {
 381   st->print_raw_cr("GC Termination Stats");
 382   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------");
 383   st->print_raw_cr("thr     ms        ms       %       ms       %   attempts");
 384   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------");
 385 }
 386 
 387 void ParScanThreadStateSet::print_termination_stats() {
 388   LogHandle(gc, task, stats) log;
 389   if (!log.is_debug()) {
 390     return;
 391   }
 392 
 393   ResourceMark rm;
 394   outputStream* st = log.debug_stream();
 395 
 396   print_termination_stats_hdr(st);
 397 
 398   for (int i = 0; i < length(); ++i) {
 399     const ParScanThreadState & pss = thread_state(i);
 400     const double elapsed_ms = pss.elapsed_time() * 1000.0;
 401     const double s_roots_ms = pss.strong_roots_time() * 1000.0;
 402     const double term_ms = pss.term_time() * 1000.0;
 403     st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8),
 404                  i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
 405                  term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
 406   }
 407 }
 408 
 409 // Print stats related to work queue activity.
 410 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
 411   st->print_raw_cr("GC Task Stats");
 412   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
 413   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 414 }
 415 
 416 void ParScanThreadStateSet::print_taskqueue_stats() {
 417   LogHandle(gc, task, stats) log;
 418   if (!log.is_develop()) {
 419     return;
 420   }
 421   ResourceMark rm;
 422   outputStream* st = log.develop_stream();
 423   print_taskqueue_stats_hdr(st);
 424 
 425   TaskQueueStats totals;
 426   for (int i = 0; i < length(); ++i) {
 427     const ParScanThreadState & pss = thread_state(i);
 428     const TaskQueueStats & stats = pss.taskqueue_stats();
 429     st->print("%3d ", i); stats.print(st); st->cr();
 430     totals += stats;
 431 
 432     if (pss.overflow_refills() > 0) {
 433       st->print_cr("    " SIZE_FORMAT_W(10) " overflow refills    "
 434                    SIZE_FORMAT_W(10) " overflow objects",
 435                    pss.overflow_refills(), pss.overflow_refill_objs());
 436     }
 437   }
 438   st->print("tot "); totals.print(st); st->cr();
 439 
 440   DEBUG_ONLY(totals.verify());
 441 }
 442 #endif // TASKQUEUE_STATS


 821   _scan_cur_or_nonheap(cur), _scan_older(older)
 822 { }
 823 
 824 void EvacuateFollowersClosureGeneral::do_void() {
 825   do {
 826     // Beware: this call will lead to closure applications via virtual
 827     // calls.
 828     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
 829                                        _scan_cur_or_nonheap,
 830                                        _scan_older);
 831   } while (!_gch->no_allocs_since_save_marks());
 832 }
 833 
 834 // A Generation that does parallel young-gen collection.
 835 
 836 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
 837   assert(_promo_failure_scan_stack.is_empty(), "post condition");
 838   _promo_failure_scan_stack.clear(true); // Clear cached segments.
 839 
 840   remove_forwarding_pointers();
 841   log_info(gc, promotion)("Promotion failed");


 842   // All the spaces are in play for mark-sweep.
 843   swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
 844   from()->set_next_compaction_space(to());
 845   gch->set_incremental_collection_failed();
 846   // Inform the next generation that a promotion failure occurred.
 847   _old_gen->promotion_failure_occurred();
 848 
 849   // Trace promotion failure in the parallel GC threads
 850   thread_state_set.trace_promotion_failed(gc_tracer());
 851   // Single threaded code may have reported promotion failure to the global state
 852   if (_promotion_failed_info.has_failed()) {
 853     _gc_tracer.report_promotion_failed(_promotion_failed_info);
 854   }
 855   // Reset the PromotionFailureALot counters.
 856   NOT_PRODUCT(gch->reset_promotion_should_fail();)
 857 }
 858 
 859 void ParNewGeneration::collect(bool   full,
 860                                bool   clear_all_soft_refs,
 861                                size_t size,


 878 
 879   // If the next generation is too full to accommodate worst-case promotion
 880   // from this generation, pass on collection; let the next generation
 881   // do it.
 882   if (!collection_attempt_is_safe()) {
 883     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 884     return;
 885   }
 886   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 887 
 888   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 889   gch->trace_heap_before_gc(gc_tracer());
 890 
 891   init_assuming_no_promotion_failure();
 892 
 893   if (UseAdaptiveSizePolicy) {
 894     set_survivor_overflow(false);
 895     size_policy->minor_collection_begin();
 896   }
 897 
 898   GCTraceTime(Trace, gc) t1("ParNew", NULL, gch->gc_cause());


 899 
 900   age_table()->clear();
 901   to()->clear(SpaceDecorator::Mangle);
 902 
 903   gch->save_marks();
 904 
 905   // Set the correct parallelism (number of queues) in the reference processor
 906   ref_processor()->set_active_mt_degree(active_workers);
 907 
 908   // Always set the terminator for the active number of workers
 909   // because only those workers go through the termination protocol.
 910   ParallelTaskTerminator _term(active_workers, task_queues());
 911   ParScanThreadStateSet thread_state_set(active_workers,
 912                                          *to(), *this, *_old_gen, *task_queues(),
 913                                          _overflow_stacks, desired_plab_sz(), _term);
 914 
 915   thread_state_set.reset(active_workers, promotion_failed());
 916 
 917   {
 918     StrongRootsScope srs(active_workers);


 984     swap_spaces();
 985 
 986     // A successful scavenge should restart the GC time limit count which is
 987     // for full GC's.
 988     size_policy->reset_gc_overhead_limit_count();
 989 
 990     assert(to()->is_empty(), "to space should be empty now");
 991 
 992     adjust_desired_tenuring_threshold();
 993   } else {
 994     handle_promotion_failed(gch, thread_state_set);
 995   }
 996   // set new iteration safe limit for the survivor spaces
 997   from()->set_concurrent_iteration_safe_limit(from()->top());
 998   to()->set_concurrent_iteration_safe_limit(to()->top());
 999 
1000   if (ResizePLAB) {
1001     plab_stats()->adjust_desired_plab_sz();
1002   }
1003 
1004   TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
1005   TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());




1006 
1007   if (UseAdaptiveSizePolicy) {
1008     size_policy->minor_collection_end(gch->gc_cause());
1009     size_policy->avg_survived()->sample(from()->used());
1010   }
1011 
1012   // We need to use a monotonically non-decreasing time in ms
1013   // or we will see time-warp warnings and os::javaTimeMillis()
1014   // does not guarantee monotonicity.
1015   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1016   update_time_of_last_gc(now);
1017 
1018   rp->set_enqueuing_is_done(true);
1019   if (rp->processing_is_mt()) {
1020     ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
1021     rp->enqueue_discovered_references(&task_executor);
1022   } else {
1023     rp->enqueue_discovered_references(NULL);
1024   }
1025   rp->verify_no_references_recorded();


1138 
1139       preserve_mark_if_necessary(old, m);
1140       par_scan_state->register_promotion_failure(sz);
1141     }
1142 
1143     old->forward_to(new_obj);
1144     forward_ptr = NULL;
1145   } else {
1146     // Is in to-space; do copying ourselves.
1147     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1148     assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
1149     forward_ptr = old->forward_to_atomic(new_obj);
1150     // Restore the mark word copied above.
1151     new_obj->set_mark(m);
1152     // Increment age if obj still in new generation
1153     new_obj->incr_age();
1154     par_scan_state->age_table()->add(new_obj, sz);
1155   }
1156   assert(new_obj != NULL, "just checking");
1157 

1158   // This code must come after the CAS test, or it will print incorrect
1159   // information.
1160   log_develop(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",

1161        is_in_reserved(new_obj) ? "copying" : "tenuring",
1162        new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size());


1163 
1164   if (forward_ptr == NULL) {
1165     oop obj_to_push = new_obj;
1166     if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
1167       // Length field used as index of next element to be scanned.
1168       // Real length can be obtained from real_forwardee()
1169       arrayOop(old)->set_length(0);
1170       obj_to_push = old;
1171       assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
1172              "push forwarded object");
1173     }
1174     // Push it on one of the queues of to-be-scanned objects.
1175     bool simulate_overflow = false;
1176     NOT_PRODUCT(
1177       if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
1178         // simulate a stack overflow
1179         simulate_overflow = true;
1180       }
1181     )
1182     if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
1183       // Add stats for overflow pushes.
1184       log_develop(gc)("Queue overflow");


1185       push_on_overflow_list(old, par_scan_state);
1186       TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
1187     }
1188 
1189     return new_obj;
1190   }
1191 
1192   // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
1193   // allocate it?
1194   if (is_in_reserved(new_obj)) {
1195     // Must be in to_space.
1196     assert(to()->is_in_reserved(new_obj), "Checking");
1197     if (forward_ptr == ClaimedForwardPtr) {
1198       // Wait to get the real forwarding pointer value.
1199       forward_ptr = real_forwardee(old);
1200     }
1201     par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
1202   }
1203 
1204   return forward_ptr;


< prev index next >