1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/compactibleFreeListSpace.hpp"
  27 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
  28 #include "gc/cms/parNewGeneration.inline.hpp"
  29 #include "gc/cms/parOopClosures.inline.hpp"
  30 #include "gc/serial/defNewGeneration.inline.hpp"
  31 #include "gc/shared/adaptiveSizePolicy.hpp"
  32 #include "gc/shared/ageTable.inline.hpp"
  33 #include "gc/shared/copyFailedInfo.hpp"
  34 #include "gc/shared/gcHeapSummary.hpp"
  35 #include "gc/shared/gcTimer.hpp"
  36 #include "gc/shared/gcTrace.hpp"
  37 #include "gc/shared/gcTraceTime.inline.hpp"
  38 #include "gc/shared/genCollectedHeap.hpp"
  39 #include "gc/shared/genOopClosures.inline.hpp"
  40 #include "gc/shared/generation.hpp"
  41 #include "gc/shared/plab.inline.hpp"
  42 #include "gc/shared/preservedMarks.inline.hpp"
  43 #include "gc/shared/referencePolicy.hpp"
  44 #include "gc/shared/space.hpp"
  45 #include "gc/shared/spaceDecorator.hpp"
  46 #include "gc/shared/strongRootsScope.hpp"
  47 #include "gc/shared/taskqueue.inline.hpp"
  48 #include "gc/shared/workgroup.hpp"
  49 #include "logging/log.hpp"
  50 #include "logging/logStream.hpp"
  51 #include "memory/resourceArea.hpp"
  52 #include "oops/objArrayOop.hpp"
  53 #include "oops/oop.inline.hpp"
  54 #include "runtime/atomic.hpp"
  55 #include "runtime/handles.hpp"
  56 #include "runtime/handles.inline.hpp"
  57 #include "runtime/java.hpp"
  58 #include "runtime/thread.inline.hpp"
  59 #include "utilities/copy.hpp"
  60 #include "utilities/globalDefinitions.hpp"
  61 #include "utilities/stack.inline.hpp"
  62 
  63 ParScanThreadState::ParScanThreadState(Space* to_space_,
  64                                        ParNewGeneration* young_gen_,
  65                                        Generation* old_gen_,
  66                                        int thread_num_,
  67                                        ObjToScanQueueSet* work_queue_set_,
  68                                        Stack<oop, mtGC>* overflow_stacks_,
  69                                        PreservedMarks* preserved_marks_,
  70                                        size_t desired_plab_sz_,
  71                                        ParallelTaskTerminator& term_) :
  72   _to_space(to_space_),
  73   _old_gen(old_gen_),
  74   _young_gen(young_gen_),
  75   _thread_num(thread_num_),
  76   _work_queue(work_queue_set_->queue(thread_num_)),
  77   _to_space_full(false),
  78   _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
  79   _preserved_marks(preserved_marks_),
  80   _ageTable(false), // false ==> not the global age table, no perf data.
  81   _to_space_alloc_buffer(desired_plab_sz_),
  82   _to_space_closure(young_gen_, this),
  83   _old_gen_closure(young_gen_, this),
  84   _to_space_root_closure(young_gen_, this),
  85   _old_gen_root_closure(young_gen_, this),
  86   _older_gen_closure(young_gen_, this),
  87   _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
  88                       &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
  89                       work_queue_set_, &term_),
  90   _is_alive_closure(young_gen_),
  91   _scan_weak_ref_closure(young_gen_, this),
  92   _keep_alive_closure(&_scan_weak_ref_closure),
  93   _strong_roots_time(0.0),
  94   _term_time(0.0)
  95 {
  96   #if TASKQUEUE_STATS
  97   _term_attempts = 0;
  98   _overflow_refills = 0;
  99   _overflow_refill_objs = 0;
 100   #endif // TASKQUEUE_STATS
 101 
 102   _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num());
 103   _hash_seed = 17;  // Might want to take time-based random value.
 104   _start = os::elapsedTime();
 105   _old_gen_closure.set_generation(old_gen_);
 106   _old_gen_root_closure.set_generation(old_gen_);
 107 }
 108 
 109 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
 110                                               size_t plab_word_size) {
 111   ChunkArray* sca = survivor_chunk_array();
 112   if (sca != NULL) {
 113     // A non-null SCA implies that we want the PLAB data recorded.
 114     sca->record_sample(plab_start, plab_word_size);
 115   }
 116 }
 117 
 118 bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
 119   return new_obj->is_objArray() &&
 120          arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
 121          new_obj != old_obj;
 122 }
 123 
 124 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
 125   assert(old->is_objArray(), "must be obj array");
 126   assert(old->is_forwarded(), "must be forwarded");
 127   assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap.");
 128   assert(!old_gen()->is_in(old), "must be in young generation.");
 129 
 130   objArrayOop obj = objArrayOop(old->forwardee());
 131   // Process ParGCArrayScanChunk elements now
 132   // and push the remainder back onto queue
 133   int start     = arrayOop(old)->length();
 134   int end       = obj->length();
 135   int remainder = end - start;
 136   assert(start <= end, "just checking");
 137   if (remainder > 2 * ParGCArrayScanChunk) {
 138     // Test above combines last partial chunk with a full chunk
 139     end = start + ParGCArrayScanChunk;
 140     arrayOop(old)->set_length(end);
 141     // Push remainder.
 142     bool ok = work_queue()->push(old);
 143     assert(ok, "just popped, push must be okay");
 144   } else {
 145     // Restore length so that it can be used if there
 146     // is a promotion failure and forwarding pointers
 147     // must be removed.
 148     arrayOop(old)->set_length(end);
 149   }
 150 
 151   // process our set of indices (include header in first chunk)
 152   // should make sure end is even (aligned to HeapWord in case of compressed oops)
 153   if ((HeapWord *)obj < young_old_boundary()) {
 154     // object is in to_space
 155     obj->oop_iterate_range(&_to_space_closure, start, end);
 156   } else {
 157     // object is in old generation
 158     obj->oop_iterate_range(&_old_gen_closure, start, end);
 159   }
 160 }
 161 
 162 void ParScanThreadState::trim_queues(int max_size) {
 163   ObjToScanQueue* queue = work_queue();
 164   do {
 165     while (queue->size() > (juint)max_size) {
 166       oop obj_to_scan;
 167       if (queue->pop_local(obj_to_scan)) {
 168         if ((HeapWord *)obj_to_scan < young_old_boundary()) {
 169           if (obj_to_scan->is_objArray() &&
 170               obj_to_scan->is_forwarded() &&
 171               obj_to_scan->forwardee() != obj_to_scan) {
 172             scan_partial_array_and_push_remainder(obj_to_scan);
 173           } else {
 174             // object is in to_space
 175             obj_to_scan->oop_iterate(&_to_space_closure);
 176           }
 177         } else {
 178           // object is in old generation
 179           obj_to_scan->oop_iterate(&_old_gen_closure);
 180         }
 181       }
 182     }
 183     // For the  case of compressed oops, we have a private, non-shared
 184     // overflow stack, so we eagerly drain it so as to more evenly
 185     // distribute load early. Note: this may be good to do in
 186     // general rather than delay for the final stealing phase.
 187     // If applicable, we'll transfer a set of objects over to our
 188     // work queue, allowing them to be stolen and draining our
 189     // private overflow stack.
 190   } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this));
 191 }
 192 
 193 bool ParScanThreadState::take_from_overflow_stack() {
 194   assert(ParGCUseLocalOverflow, "Else should not call");
 195   assert(young_gen()->overflow_list() == NULL, "Error");
 196   ObjToScanQueue* queue = work_queue();
 197   Stack<oop, mtGC>* const of_stack = overflow_stack();
 198   const size_t num_overflow_elems = of_stack->size();
 199   const size_t space_available = queue->max_elems() - queue->size();
 200   const size_t num_take_elems = MIN3(space_available / 4,
 201                                      ParGCDesiredObjsFromOverflowList,
 202                                      num_overflow_elems);
 203   // Transfer the most recent num_take_elems from the overflow
 204   // stack to our work queue.
 205   for (size_t i = 0; i != num_take_elems; i++) {
 206     oop cur = of_stack->pop();
 207     oop obj_to_push = cur->forwardee();
 208     assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap");
 209     assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
 210     assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
 211     if (should_be_partially_scanned(obj_to_push, cur)) {
 212       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
 213       obj_to_push = cur;
 214     }
 215     bool ok = queue->push(obj_to_push);
 216     assert(ok, "Should have succeeded");
 217   }
 218   assert(young_gen()->overflow_list() == NULL, "Error");
 219   return num_take_elems > 0;  // was something transferred?
 220 }
 221 
 222 void ParScanThreadState::push_on_overflow_stack(oop p) {
 223   assert(ParGCUseLocalOverflow, "Else should not call");
 224   overflow_stack()->push(p);
 225   assert(young_gen()->overflow_list() == NULL, "Error");
 226 }
 227 
 228 HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
 229   // If the object is small enough, try to reallocate the buffer.
 230   HeapWord* obj = NULL;
 231   if (!_to_space_full) {
 232     PLAB* const plab = to_space_alloc_buffer();
 233     Space* const sp  = to_space();
 234     if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) {
 235       // Is small enough; abandon this buffer and start a new one.
 236       plab->retire();
 237       // The minimum size has to be twice SurvivorAlignmentInBytes to
 238       // allow for padding used in the alignment of 1 word.  A padding
 239       // of 1 is too small for a filler word so the padding size will
 240       // be increased by SurvivorAlignmentInBytes.
 241       size_t min_usable_size = 2 * static_cast<size_t>(SurvivorAlignmentInBytes >> LogHeapWordSize);
 242       size_t buf_size = MAX2(plab->word_sz(), min_usable_size);
 243       HeapWord* buf_space = sp->par_allocate(buf_size);
 244       if (buf_space == NULL) {
 245         const size_t min_bytes = MAX2(PLAB::min_size(), min_usable_size) << LogHeapWordSize;
 246         size_t free_bytes = sp->free();
 247         while(buf_space == NULL && free_bytes >= min_bytes) {
 248           buf_size = free_bytes >> LogHeapWordSize;
 249           assert(buf_size == (size_t)align_object_size(buf_size), "Invariant");
 250           buf_space  = sp->par_allocate(buf_size);
 251           free_bytes = sp->free();
 252         }
 253       }
 254       if (buf_space != NULL) {
 255         plab->set_buf(buf_space, buf_size);
 256         record_survivor_plab(buf_space, buf_size);
 257         obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
 258         // Note that we cannot compare buf_size < word_sz below
 259         // because of AlignmentReserve (see PLAB::allocate()).
 260         assert(obj != NULL || plab->words_remaining() < word_sz,
 261                "Else should have been able to allocate requested object size "
 262                SIZE_FORMAT ", PLAB size " SIZE_FORMAT ", SurvivorAlignmentInBytes "
 263                SIZE_FORMAT ", words_remaining " SIZE_FORMAT,
 264                word_sz, buf_size, SurvivorAlignmentInBytes, plab->words_remaining());
 265         // It's conceivable that we may be able to use the
 266         // buffer we just grabbed for subsequent small requests
 267         // even if not for this one.
 268       } else {
 269         // We're used up.
 270         _to_space_full = true;
 271       }
 272     } else {
 273       // Too large; allocate the object individually.
 274       obj = sp->par_allocate(word_sz);
 275     }
 276   }
 277   return obj;
 278 }
 279 
 280 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
 281   to_space_alloc_buffer()->undo_allocation(obj, word_sz);
 282 }
 283 
 284 void ParScanThreadState::print_promotion_failure_size() {
 285   if (_promotion_failed_info.has_failed()) {
 286     log_trace(gc, promotion)(" (%d: promotion failure size = " SIZE_FORMAT ") ",
 287                              _thread_num, _promotion_failed_info.first_size());
 288   }
 289 }
 290 
 291 class ParScanThreadStateSet: StackObj {
 292 public:
 293   // Initializes states for the specified number of threads;
 294   ParScanThreadStateSet(int                     num_threads,
 295                         Space&                  to_space,
 296                         ParNewGeneration&       young_gen,
 297                         Generation&             old_gen,
 298                         ObjToScanQueueSet&      queue_set,
 299                         Stack<oop, mtGC>*       overflow_stacks_,
 300                         PreservedMarksSet&      preserved_marks_set,
 301                         size_t                  desired_plab_sz,
 302                         ParallelTaskTerminator& term);
 303 
 304   ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
 305 
 306   inline ParScanThreadState& thread_state(int i);
 307 
 308   void trace_promotion_failed(const YoungGCTracer* gc_tracer);
 309   void reset(uint active_workers, bool promotion_failed);
 310   void flush();
 311 
 312   #if TASKQUEUE_STATS
 313   static void
 314     print_termination_stats_hdr(outputStream* const st);
 315   void print_termination_stats();
 316   static void
 317     print_taskqueue_stats_hdr(outputStream* const st);
 318   void print_taskqueue_stats();
 319   void reset_stats();
 320   #endif // TASKQUEUE_STATS
 321 
 322 private:
 323   ParallelTaskTerminator& _term;
 324   ParNewGeneration&       _young_gen;
 325   Generation&             _old_gen;
 326   ParScanThreadState*     _per_thread_states;
 327   const int               _num_threads;
 328  public:
 329   bool is_valid(int id) const { return id < _num_threads; }
 330   ParallelTaskTerminator* terminator() { return &_term; }
 331 };
 332 
 333 ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
 334                                              Space& to_space,
 335                                              ParNewGeneration& young_gen,
 336                                              Generation& old_gen,
 337                                              ObjToScanQueueSet& queue_set,
 338                                              Stack<oop, mtGC>* overflow_stacks,
 339                                              PreservedMarksSet& preserved_marks_set,
 340                                              size_t desired_plab_sz,
 341                                              ParallelTaskTerminator& term)
 342   : _young_gen(young_gen),
 343     _old_gen(old_gen),
 344     _term(term),
 345     _per_thread_states(NEW_RESOURCE_ARRAY(ParScanThreadState, num_threads)),
 346     _num_threads(num_threads)
 347 {
 348   assert(num_threads > 0, "sanity check!");
 349   assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
 350          "overflow_stack allocation mismatch");
 351   // Initialize states.
 352   for (int i = 0; i < num_threads; ++i) {
 353     new(_per_thread_states + i)
 354       ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set,
 355                          overflow_stacks, preserved_marks_set.get(i),
 356                          desired_plab_sz, term);
 357   }
 358 }
 359 
 360 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {
 361   assert(i >= 0 && i < _num_threads, "sanity check!");
 362   return _per_thread_states[i];
 363 }
 364 
 365 void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) {
 366   for (int i = 0; i < _num_threads; ++i) {
 367     if (thread_state(i).promotion_failed()) {
 368       gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info());
 369       thread_state(i).promotion_failed_info().reset();
 370     }
 371   }
 372 }
 373 
 374 void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) {
 375   _term.reset_for_reuse(active_threads);
 376   if (promotion_failed) {
 377     for (int i = 0; i < _num_threads; ++i) {
 378       thread_state(i).print_promotion_failure_size();
 379     }
 380   }
 381 }
 382 
 383 #if TASKQUEUE_STATS
 384 void ParScanThreadState::reset_stats() {
 385   taskqueue_stats().reset();
 386   _term_attempts = 0;
 387   _overflow_refills = 0;
 388   _overflow_refill_objs = 0;
 389 }
 390 
 391 void ParScanThreadStateSet::reset_stats() {
 392   for (int i = 0; i < _num_threads; ++i) {
 393     thread_state(i).reset_stats();
 394   }
 395 }
 396 
 397 void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) {
 398   st->print_raw_cr("GC Termination Stats");
 399   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------");
 400   st->print_raw_cr("thr     ms        ms       %       ms       %   attempts");
 401   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------");
 402 }
 403 
 404 void ParScanThreadStateSet::print_termination_stats() {
 405   Log(gc, task, stats) log;
 406   if (!log.is_debug()) {
 407     return;
 408   }
 409 
 410   ResourceMark rm;
 411   LogStream ls(log.debug());
 412   outputStream* st = &ls;
 413 
 414   print_termination_stats_hdr(st);
 415 
 416   for (int i = 0; i < _num_threads; ++i) {
 417     const ParScanThreadState & pss = thread_state(i);
 418     const double elapsed_ms = pss.elapsed_time() * 1000.0;
 419     const double s_roots_ms = pss.strong_roots_time() * 1000.0;
 420     const double term_ms = pss.term_time() * 1000.0;
 421     st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8),
 422                  i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
 423                  term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
 424   }
 425 }
 426 
 427 // Print stats related to work queue activity.
 428 void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
 429   st->print_raw_cr("GC Task Stats");
 430   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
 431   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 432 }
 433 
 434 void ParScanThreadStateSet::print_taskqueue_stats() {
 435   if (!log_develop_is_enabled(Trace, gc, task, stats)) {
 436     return;
 437   }
 438   Log(gc, task, stats) log;
 439   ResourceMark rm;
 440   LogStream ls(log.trace());
 441   outputStream* st = &ls;
 442   print_taskqueue_stats_hdr(st);
 443 
 444   TaskQueueStats totals;
 445   for (int i = 0; i < _num_threads; ++i) {
 446     const ParScanThreadState & pss = thread_state(i);
 447     const TaskQueueStats & stats = pss.taskqueue_stats();
 448     st->print("%3d ", i); stats.print(st); st->cr();
 449     totals += stats;
 450 
 451     if (pss.overflow_refills() > 0) {
 452       st->print_cr("    " SIZE_FORMAT_W(10) " overflow refills    "
 453                    SIZE_FORMAT_W(10) " overflow objects",
 454                    pss.overflow_refills(), pss.overflow_refill_objs());
 455     }
 456   }
 457   st->print("tot "); totals.print(st); st->cr();
 458 
 459   DEBUG_ONLY(totals.verify());
 460 }
 461 #endif // TASKQUEUE_STATS
 462 
 463 void ParScanThreadStateSet::flush() {
 464   // Work in this loop should be kept as lightweight as
 465   // possible since this might otherwise become a bottleneck
 466   // to scaling. Should we add heavy-weight work into this
 467   // loop, consider parallelizing the loop into the worker threads.
 468   for (int i = 0; i < _num_threads; ++i) {
 469     ParScanThreadState& par_scan_state = thread_state(i);
 470 
 471     // Flush stats related to To-space PLAB activity and
 472     // retire the last buffer.
 473     par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats());
 474 
 475     // Every thread has its own age table.  We need to merge
 476     // them all into one.
 477     AgeTable *local_table = par_scan_state.age_table();
 478     _young_gen.age_table()->merge(local_table);
 479 
 480     // Inform old gen that we're done.
 481     _old_gen.par_promote_alloc_done(i);
 482   }
 483 
 484   if (UseConcMarkSweepGC) {
 485     // We need to call this even when ResizeOldPLAB is disabled
 486     // so as to avoid breaking some asserts. While we may be able
 487     // to avoid this by reorganizing the code a bit, I am loathe
 488     // to do that unless we find cases where ergo leads to bad
 489     // performance.
 490     CompactibleFreeListSpaceLAB::compute_desired_plab_size();
 491   }
 492 }
 493 
 494 ParScanClosure::ParScanClosure(ParNewGeneration* g,
 495                                ParScanThreadState* par_scan_state) :
 496   OopsInClassLoaderDataOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
 497   _boundary = _g->reserved().end();
 498 }
 499 
 500 void ParScanWithBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, false); }
 501 void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
 502 
 503 void ParScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, false); }
 504 void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
 505 
 506 void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, true); }
 507 void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
 508 
 509 void ParRootScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, true); }
 510 void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
 511 
 512 ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
 513                                              ParScanThreadState* par_scan_state)
 514   : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
 515 {}
 516 
 517 void ParScanWeakRefClosure::do_oop(oop* p)       { ParScanWeakRefClosure::do_oop_work(p); }
 518 void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
 519 
 520 #ifdef WIN32
 521 #pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
 522 #endif
 523 
 524 ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
 525     ParScanThreadState* par_scan_state_,
 526     ParScanWithoutBarrierClosure* to_space_closure_,
 527     ParScanWithBarrierClosure* old_gen_closure_,
 528     ParRootScanWithoutBarrierClosure* to_space_root_closure_,
 529     ParNewGeneration* par_gen_,
 530     ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
 531     ObjToScanQueueSet* task_queues_,
 532     ParallelTaskTerminator* terminator_) :
 533 
 534     _par_scan_state(par_scan_state_),
 535     _to_space_closure(to_space_closure_),
 536     _old_gen_closure(old_gen_closure_),
 537     _to_space_root_closure(to_space_root_closure_),
 538     _old_gen_root_closure(old_gen_root_closure_),
 539     _par_gen(par_gen_),
 540     _task_queues(task_queues_),
 541     _terminator(terminator_)
 542 {}
 543 
 544 void ParEvacuateFollowersClosure::do_void() {
 545   ObjToScanQueue* work_q = par_scan_state()->work_queue();
 546 
 547   while (true) {
 548     // Scan to-space and old-gen objs until we run out of both.
 549     oop obj_to_scan;
 550     par_scan_state()->trim_queues(0);
 551 
 552     // We have no local work, attempt to steal from other threads.
 553 
 554     // Attempt to steal work from promoted.
 555     if (task_queues()->steal(par_scan_state()->thread_num(),
 556                              par_scan_state()->hash_seed(),
 557                              obj_to_scan)) {
 558       bool res = work_q->push(obj_to_scan);
 559       assert(res, "Empty queue should have room for a push.");
 560 
 561       // If successful, goto Start.
 562       continue;
 563 
 564       // Try global overflow list.
 565     } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
 566       continue;
 567     }
 568 
 569     // Otherwise, offer termination.
 570     par_scan_state()->start_term_time();
 571     if (terminator()->offer_termination()) break;
 572     par_scan_state()->end_term_time();
 573   }
 574   assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
 575          "Broken overflow list?");
 576   // Finish the last termination pause.
 577   par_scan_state()->end_term_time();
 578 }
 579 
 580 ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen,
 581                              Generation* old_gen,
 582                              HeapWord* young_old_boundary,
 583                              ParScanThreadStateSet* state_set,
 584                              StrongRootsScope* strong_roots_scope) :
 585     AbstractGangTask("ParNewGeneration collection"),
 586     _young_gen(young_gen), _old_gen(old_gen),
 587     _young_old_boundary(young_old_boundary),
 588     _state_set(state_set),
 589     _strong_roots_scope(strong_roots_scope)
 590 {}
 591 
 592 void ParNewGenTask::work(uint worker_id) {
 593   GenCollectedHeap* gch = GenCollectedHeap::heap();
 594   // Since this is being done in a separate thread, need new resource
 595   // and handle marks.
 596   ResourceMark rm;
 597   HandleMark hm;
 598 
 599   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 600   assert(_state_set->is_valid(worker_id), "Should not have been called");
 601 
 602   par_scan_state.set_young_old_boundary(_young_old_boundary);
 603 
 604   CLDScanClosure cld_scan_closure(&par_scan_state.to_space_root_closure(),
 605                                   gch->rem_set()->cld_rem_set()->accumulate_modified_oops());
 606 
 607   par_scan_state.start_strong_roots();
 608   gch->young_process_roots(_strong_roots_scope,
 609                            &par_scan_state.to_space_root_closure(),
 610                            &par_scan_state.older_gen_closure(),
 611                            &cld_scan_closure);
 612 
 613   par_scan_state.end_strong_roots();
 614 
 615   // "evacuate followers".
 616   par_scan_state.evacuate_followers_closure().do_void();
 617 
 618   // This will collapse this worker's promoted object list that's
 619   // created during the main ParNew parallel phase of ParNew. This has
 620   // to be called after all workers have finished promoting objects
 621   // and scanning promoted objects. It should be safe calling it from
 622   // here, given that we can only reach here after all thread have
 623   // offered termination, i.e., after there is no more work to be
 624   // done. It will also disable promotion tracking for the rest of
 625   // this GC as it's not necessary to be on during reference processing.
 626   _old_gen->par_oop_since_save_marks_iterate_done((int) worker_id);
 627 }
 628 
 629 ParNewGeneration::ParNewGeneration(ReservedSpace rs, size_t initial_byte_size)
 630   : DefNewGeneration(rs, initial_byte_size, "PCopy"),
 631   _overflow_list(NULL),
 632   _is_alive_closure(this),
 633   _plab_stats("Young", YoungPLABSize, PLABWeight)
 634 {
 635   NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
 636   NOT_PRODUCT(_num_par_pushes = 0;)
 637   _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
 638   guarantee(_task_queues != NULL, "task_queues allocation failure.");
 639 
 640   for (uint i = 0; i < ParallelGCThreads; i++) {
 641     ObjToScanQueue *q = new ObjToScanQueue();
 642     guarantee(q != NULL, "work_queue Allocation failure.");
 643     _task_queues->register_queue(i, q);
 644   }
 645 
 646   for (uint i = 0; i < ParallelGCThreads; i++) {
 647     _task_queues->queue(i)->initialize();
 648   }
 649 
 650   _overflow_stacks = NULL;
 651   if (ParGCUseLocalOverflow) {
 652     // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ','
 653     typedef Stack<oop, mtGC> GCOopStack;
 654 
 655     _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
 656     for (size_t i = 0; i < ParallelGCThreads; ++i) {
 657       new (_overflow_stacks + i) Stack<oop, mtGC>();
 658     }
 659   }
 660 
 661   if (UsePerfData) {
 662     EXCEPTION_MARK;
 663     ResourceMark rm;
 664 
 665     const char* cname =
 666          PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
 667     PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
 668                                      ParallelGCThreads, CHECK);
 669   }
 670 }
 671 
 672 // ParNewGeneration::
 673 ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
 674   DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
 675 
 676 template <class T>
 677 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
 678 #ifdef ASSERT
 679   {
 680     assert(!oopDesc::is_null(*p), "expected non-null ref");
 681     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
 682     // We never expect to see a null reference being processed
 683     // as a weak reference.
 684     assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
 685   }
 686 #endif // ASSERT
 687 
 688   _par_cl->do_oop_nv(p);
 689 
 690   if (GenCollectedHeap::heap()->is_in_reserved(p)) {
 691     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
 692     _rs->write_ref_field_gc_par(p, obj);
 693   }
 694 }
 695 
 696 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p)       { ParKeepAliveClosure::do_oop_work(p); }
 697 void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
 698 
 699 // ParNewGeneration::
 700 KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
 701   DefNewGeneration::KeepAliveClosure(cl) {}
 702 
 703 template <class T>
 704 void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
 705 #ifdef ASSERT
 706   {
 707     assert(!oopDesc::is_null(*p), "expected non-null ref");
 708     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
 709     // We never expect to see a null reference being processed
 710     // as a weak reference.
 711     assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
 712   }
 713 #endif // ASSERT
 714 
 715   _cl->do_oop_nv(p);
 716 
 717   if (GenCollectedHeap::heap()->is_in_reserved(p)) {
 718     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
 719     _rs->write_ref_field_gc_par(p, obj);
 720   }
 721 }
 722 
 723 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p)       { KeepAliveClosure::do_oop_work(p); }
 724 void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
 725 
 726 template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
 727   T heap_oop = oopDesc::load_heap_oop(p);
 728   if (!oopDesc::is_null(heap_oop)) {
 729     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
 730     if ((HeapWord*)obj < _boundary) {
 731       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
 732       oop new_obj = obj->is_forwarded()
 733                       ? obj->forwardee()
 734                       : _g->DefNewGeneration::copy_to_survivor_space(obj);
 735       oopDesc::encode_store_heap_oop_not_null(p, new_obj);
 736     }
 737     if (_gc_barrier) {
 738       // If p points to a younger generation, mark the card.
 739       if ((HeapWord*)obj < _gen_boundary) {
 740         _rs->write_ref_field_gc_par(p, obj);
 741       }
 742     }
 743   }
 744 }
 745 
 746 void ScanClosureWithParBarrier::do_oop(oop* p)       { ScanClosureWithParBarrier::do_oop_work(p); }
 747 void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
 748 
 749 class ParNewRefProcTaskProxy: public AbstractGangTask {
 750   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
 751 public:
 752   ParNewRefProcTaskProxy(ProcessTask& task,
 753                          ParNewGeneration& young_gen,
 754                          Generation& old_gen,
 755                          HeapWord* young_old_boundary,
 756                          ParScanThreadStateSet& state_set);
 757 
 758 private:
 759   virtual void work(uint worker_id);
 760 private:
 761   ParNewGeneration&      _young_gen;
 762   ProcessTask&           _task;
 763   Generation&            _old_gen;
 764   HeapWord*              _young_old_boundary;
 765   ParScanThreadStateSet& _state_set;
 766 };
 767 
 768 ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
 769                                                ParNewGeneration& young_gen,
 770                                                Generation& old_gen,
 771                                                HeapWord* young_old_boundary,
 772                                                ParScanThreadStateSet& state_set)
 773   : AbstractGangTask("ParNewGeneration parallel reference processing"),
 774     _young_gen(young_gen),
 775     _task(task),
 776     _old_gen(old_gen),
 777     _young_old_boundary(young_old_boundary),
 778     _state_set(state_set)
 779 { }
 780 
 781 void ParNewRefProcTaskProxy::work(uint worker_id) {
 782   ResourceMark rm;
 783   HandleMark hm;
 784   ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
 785   par_scan_state.set_young_old_boundary(_young_old_boundary);
 786   _task.work(worker_id, par_scan_state.is_alive_closure(),
 787              par_scan_state.keep_alive_closure(),
 788              par_scan_state.evacuate_followers_closure());
 789 }
 790 
 791 class ParNewRefEnqueueTaskProxy: public AbstractGangTask {
 792   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
 793   EnqueueTask& _task;
 794 
 795 public:
 796   ParNewRefEnqueueTaskProxy(EnqueueTask& task)
 797     : AbstractGangTask("ParNewGeneration parallel reference enqueue"),
 798       _task(task)
 799   { }
 800 
 801   virtual void work(uint worker_id) {
 802     _task.work(worker_id);
 803   }
 804 };
 805 
 806 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
 807   GenCollectedHeap* gch = GenCollectedHeap::heap();
 808   WorkGang* workers = gch->workers();
 809   assert(workers != NULL, "Need parallel worker threads.");
 810   _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
 811   ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen,
 812                                  _young_gen.reserved().end(), _state_set);
 813   workers->run_task(&rp_task);
 814   _state_set.reset(0 /* bad value in debug if not reset */,
 815                    _young_gen.promotion_failed());
 816 }
 817 
 818 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
 819   GenCollectedHeap* gch = GenCollectedHeap::heap();
 820   WorkGang* workers = gch->workers();
 821   assert(workers != NULL, "Need parallel worker threads.");
 822   ParNewRefEnqueueTaskProxy enq_task(task);
 823   workers->run_task(&enq_task);
 824 }
 825 
 826 void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
 827   _state_set.flush();
 828   GenCollectedHeap* gch = GenCollectedHeap::heap();
 829   gch->save_marks();
 830 }
 831 
 832 ScanClosureWithParBarrier::
 833 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
 834   ScanClosure(g, gc_barrier)
 835 { }
 836 
 837 EvacuateFollowersClosureGeneral::
 838 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
 839                                 OopsInGenClosure* cur,
 840                                 OopsInGenClosure* older) :
 841   _gch(gch),
 842   _scan_cur_or_nonheap(cur), _scan_older(older)
 843 { }
 844 
 845 void EvacuateFollowersClosureGeneral::do_void() {
 846   do {
 847     // Beware: this call will lead to closure applications via virtual
 848     // calls.
 849     _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
 850                                        _scan_cur_or_nonheap,
 851                                        _scan_older);
 852   } while (!_gch->no_allocs_since_save_marks());
 853 }
 854 
 855 // A Generation that does parallel young-gen collection.
 856 
 857 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
 858   assert(_promo_failure_scan_stack.is_empty(), "post condition");
 859   _promo_failure_scan_stack.clear(true); // Clear cached segments.
 860 
 861   remove_forwarding_pointers();
 862   log_info(gc, promotion)("Promotion failed");
 863   // All the spaces are in play for mark-sweep.
 864   swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
 865   from()->set_next_compaction_space(to());
 866   gch->set_incremental_collection_failed();
 867   // Inform the next generation that a promotion failure occurred.
 868   _old_gen->promotion_failure_occurred();
 869 
 870   // Trace promotion failure in the parallel GC threads
 871   thread_state_set.trace_promotion_failed(gc_tracer());
 872   // Single threaded code may have reported promotion failure to the global state
 873   if (_promotion_failed_info.has_failed()) {
 874     _gc_tracer.report_promotion_failed(_promotion_failed_info);
 875   }
 876   // Reset the PromotionFailureALot counters.
 877   NOT_PRODUCT(gch->reset_promotion_should_fail();)
 878 }
 879 
 880 void ParNewGeneration::collect(bool   full,
 881                                bool   clear_all_soft_refs,
 882                                size_t size,
 883                                bool   is_tlab) {
 884   assert(full || size > 0, "otherwise we don't want to collect");
 885 
 886   GenCollectedHeap* gch = GenCollectedHeap::heap();
 887 
 888   _gc_timer->register_gc_start();
 889 
 890   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 891   WorkGang* workers = gch->workers();
 892   assert(workers != NULL, "Need workgang for parallel work");
 893   uint active_workers =
 894        AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 895                                                workers->active_workers(),
 896                                                Threads::number_of_non_daemon_threads());
 897   active_workers = workers->update_active_workers(active_workers);
 898   log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
 899 
 900   _old_gen = gch->old_gen();
 901 
 902   // If the next generation is too full to accommodate worst-case promotion
 903   // from this generation, pass on collection; let the next generation
 904   // do it.
 905   if (!collection_attempt_is_safe()) {
 906     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 907     return;
 908   }
 909   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 910 
 911   _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 912   gch->trace_heap_before_gc(gc_tracer());
 913 
 914   init_assuming_no_promotion_failure();
 915 
 916   if (UseAdaptiveSizePolicy) {
 917     set_survivor_overflow(false);
 918     size_policy->minor_collection_begin();
 919   }
 920 
 921   GCTraceTime(Trace, gc, phases) t1("ParNew", NULL, gch->gc_cause());
 922 
 923   age_table()->clear();
 924   to()->clear(SpaceDecorator::Mangle);
 925 
 926   gch->save_marks();
 927 
 928   // Set the correct parallelism (number of queues) in the reference processor
 929   ref_processor()->set_active_mt_degree(active_workers);
 930 
 931   // Need to initialize the preserved marks before the ThreadStateSet c'tor.
 932   _preserved_marks_set.init(active_workers);
 933 
 934   // Always set the terminator for the active number of workers
 935   // because only those workers go through the termination protocol.
 936   ParallelTaskTerminator _term(active_workers, task_queues());
 937   ParScanThreadStateSet thread_state_set(active_workers,
 938                                          *to(), *this, *_old_gen, *task_queues(),
 939                                          _overflow_stacks, _preserved_marks_set,
 940                                          desired_plab_sz(), _term);
 941 
 942   thread_state_set.reset(active_workers, promotion_failed());
 943 
 944   {
 945     StrongRootsScope srs(active_workers);
 946 
 947     ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
 948     gch->rem_set()->prepare_for_younger_refs_iterate(true);
 949     // It turns out that even when we're using 1 thread, doing the work in a
 950     // separate thread causes wide variance in run times.  We can't help this
 951     // in the multi-threaded case, but we special-case n=1 here to get
 952     // repeatable measurements of the 1-thread overhead of the parallel code.
 953     // Might multiple workers ever be used?  If yes, initialization
 954     // has been done such that the single threaded path should not be used.
 955     if (workers->total_workers() > 1) {
 956       workers->run_task(&tsk);
 957     } else {
 958       tsk.work(0);
 959     }
 960   }
 961 
 962   thread_state_set.reset(0 /* Bad value in debug if not reset */,
 963                          promotion_failed());
 964 
 965   // Trace and reset failed promotion info.
 966   if (promotion_failed()) {
 967     thread_state_set.trace_promotion_failed(gc_tracer());
 968   }
 969 
 970   // Process (weak) reference objects found during scavenge.
 971   ReferenceProcessor* rp = ref_processor();
 972   IsAliveClosure is_alive(this);
 973   ScanWeakRefClosure scan_weak_ref(this);
 974   KeepAliveClosure keep_alive(&scan_weak_ref);
 975   ScanClosure               scan_without_gc_barrier(this, false);
 976   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
 977   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
 978   EvacuateFollowersClosureGeneral evacuate_followers(gch,
 979     &scan_without_gc_barrier, &scan_with_gc_barrier);
 980   rp->setup_policy(clear_all_soft_refs);
 981   // Can  the mt_degree be set later (at run_task() time would be best)?
 982   rp->set_active_mt_degree(active_workers);
 983   ReferenceProcessorStats stats;
 984   ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q());
 985   if (rp->processing_is_mt()) {
 986     ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
 987     stats = rp->process_discovered_references(&is_alive, &keep_alive,
 988                                               &evacuate_followers, &task_executor,
 989                                               &pt);
 990   } else {
 991     thread_state_set.flush();
 992     gch->save_marks();
 993     stats = rp->process_discovered_references(&is_alive, &keep_alive,
 994                                               &evacuate_followers, NULL,
 995                                               &pt);
 996   }
 997   _gc_tracer.report_gc_reference_stats(stats);
 998   _gc_tracer.report_tenuring_threshold(tenuring_threshold());
 999   pt.print_all_references();
1000 
1001   if (!promotion_failed()) {
1002     // Swap the survivor spaces.
1003     eden()->clear(SpaceDecorator::Mangle);
1004     from()->clear(SpaceDecorator::Mangle);
1005     if (ZapUnusedHeapArea) {
1006       // This is now done here because of the piece-meal mangling which
1007       // can check for valid mangling at intermediate points in the
1008       // collection(s).  When a young collection fails to collect
1009       // sufficient space resizing of the young generation can occur
1010       // and redistribute the spaces in the young generation.  Mangle
1011       // here so that unzapped regions don't get distributed to
1012       // other spaces.
1013       to()->mangle_unused_area();
1014     }
1015     swap_spaces();
1016 
1017     // A successful scavenge should restart the GC time limit count which is
1018     // for full GC's.
1019     size_policy->reset_gc_overhead_limit_count();
1020 
1021     assert(to()->is_empty(), "to space should be empty now");
1022 
1023     adjust_desired_tenuring_threshold();
1024   } else {
1025     handle_promotion_failed(gch, thread_state_set);
1026   }
1027   _preserved_marks_set.reclaim();
1028   // set new iteration safe limit for the survivor spaces
1029   from()->set_concurrent_iteration_safe_limit(from()->top());
1030   to()->set_concurrent_iteration_safe_limit(to()->top());
1031 
1032   plab_stats()->adjust_desired_plab_sz();
1033 
1034   TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
1035   TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());
1036 
1037   if (UseAdaptiveSizePolicy) {
1038     size_policy->minor_collection_end(gch->gc_cause());
1039     size_policy->avg_survived()->sample(from()->used());
1040   }
1041 
1042   // We need to use a monotonically non-decreasing time in ms
1043   // or we will see time-warp warnings and os::javaTimeMillis()
1044   // does not guarantee monotonicity.
1045   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1046   update_time_of_last_gc(now);
1047 
1048   rp->set_enqueuing_is_done(true);
1049   if (rp->processing_is_mt()) {
1050     ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
1051     rp->enqueue_discovered_references(&task_executor, &pt);
1052   } else {
1053     rp->enqueue_discovered_references(NULL, &pt);
1054   }
1055   rp->verify_no_references_recorded();
1056 
1057   gch->trace_heap_after_gc(gc_tracer());
1058 
1059   pt.print_enqueue_phase();
1060 
1061   _gc_timer->register_gc_end();
1062 
1063   _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1064 }
1065 
1066 size_t ParNewGeneration::desired_plab_sz() {
1067   return _plab_stats.desired_plab_sz(GenCollectedHeap::heap()->workers()->active_workers());
1068 }
1069 
1070 static int sum;
1071 void ParNewGeneration::waste_some_time() {
1072   for (int i = 0; i < 100; i++) {
1073     sum += i;
1074   }
1075 }
1076 
1077 static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
1078 
1079 // Because of concurrency, there are times where an object for which
1080 // "is_forwarded()" is true contains an "interim" forwarding pointer
1081 // value.  Such a value will soon be overwritten with a real value.
1082 // This method requires "obj" to have a forwarding pointer, and waits, if
1083 // necessary for a real one to be inserted, and returns it.
1084 
1085 oop ParNewGeneration::real_forwardee(oop obj) {
1086   oop forward_ptr = obj->forwardee();
1087   if (forward_ptr != ClaimedForwardPtr) {
1088     return forward_ptr;
1089   } else {
1090     return real_forwardee_slow(obj);
1091   }
1092 }
1093 
1094 oop ParNewGeneration::real_forwardee_slow(oop obj) {
1095   // Spin-read if it is claimed but not yet written by another thread.
1096   oop forward_ptr = obj->forwardee();
1097   while (forward_ptr == ClaimedForwardPtr) {
1098     waste_some_time();
1099     assert(obj->is_forwarded(), "precondition");
1100     forward_ptr = obj->forwardee();
1101   }
1102   return forward_ptr;
1103 }
1104 
1105 // Multiple GC threads may try to promote an object.  If the object
1106 // is successfully promoted, a forwarding pointer will be installed in
1107 // the object in the young generation.  This method claims the right
1108 // to install the forwarding pointer before it copies the object,
1109 // thus avoiding the need to undo the copy as in
1110 // copy_to_survivor_space_avoiding_with_undo.
1111 
1112 oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
1113                                              oop old,
1114                                              size_t sz,
1115                                              markOop m) {
1116   // In the sequential version, this assert also says that the object is
1117   // not forwarded.  That might not be the case here.  It is the case that
1118   // the caller observed it to be not forwarded at some time in the past.
1119   assert(is_in_reserved(old), "shouldn't be scavenging this oop");
1120 
1121   // The sequential code read "old->age()" below.  That doesn't work here,
1122   // since the age is in the mark word, and that might be overwritten with
1123   // a forwarding pointer by a parallel thread.  So we must save the mark
1124   // word in a local and then analyze it.
1125   oopDesc dummyOld;
1126   dummyOld.set_mark(m);
1127   assert(!dummyOld.is_forwarded(),
1128          "should not be called with forwarding pointer mark word.");
1129 
1130   oop new_obj = NULL;
1131   oop forward_ptr;
1132 
1133   // Try allocating obj in to-space (unless too old)
1134   if (dummyOld.age() < tenuring_threshold()) {
1135     new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
1136     if (new_obj == NULL) {
1137       set_survivor_overflow(true);
1138     }
1139   }
1140 
1141   if (new_obj == NULL) {
1142     // Either to-space is full or we decided to promote try allocating obj tenured
1143 
1144     // Attempt to install a null forwarding pointer (atomically),
1145     // to claim the right to install the real forwarding pointer.
1146     forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);
1147     if (forward_ptr != NULL) {
1148       // someone else beat us to it.
1149         return real_forwardee(old);
1150     }
1151 
1152     if (!_promotion_failed) {
1153       new_obj = _old_gen->par_promote(par_scan_state->thread_num(),
1154                                       old, m, sz);
1155     }
1156 
1157     if (new_obj == NULL) {
1158       // promotion failed, forward to self
1159       _promotion_failed = true;
1160       new_obj = old;
1161 
1162       par_scan_state->preserved_marks()->push_if_necessary(old, m);
1163       par_scan_state->register_promotion_failure(sz);
1164     }
1165 
1166     old->forward_to(new_obj);
1167     forward_ptr = NULL;
1168   } else {
1169     // Is in to-space; do copying ourselves.
1170     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1171     assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
1172     forward_ptr = old->forward_to_atomic(new_obj);
1173     // Restore the mark word copied above.
1174     new_obj->set_mark(m);
1175     // Increment age if obj still in new generation
1176     new_obj->incr_age();
1177     par_scan_state->age_table()->add(new_obj, sz);
1178   }
1179   assert(new_obj != NULL, "just checking");
1180 
1181   // This code must come after the CAS test, or it will print incorrect
1182   // information.
1183   log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
1184                                   is_in_reserved(new_obj) ? "copying" : "tenuring",
1185                                   new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size());
1186 
1187   if (forward_ptr == NULL) {
1188     oop obj_to_push = new_obj;
1189     if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
1190       // Length field used as index of next element to be scanned.
1191       // Real length can be obtained from real_forwardee()
1192       arrayOop(old)->set_length(0);
1193       obj_to_push = old;
1194       assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
1195              "push forwarded object");
1196     }
1197     // Push it on one of the queues of to-be-scanned objects.
1198     bool simulate_overflow = false;
1199     NOT_PRODUCT(
1200       if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
1201         // simulate a stack overflow
1202         simulate_overflow = true;
1203       }
1204     )
1205     if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
1206       // Add stats for overflow pushes.
1207       log_develop_trace(gc)("Queue Overflow");
1208       push_on_overflow_list(old, par_scan_state);
1209       TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
1210     }
1211 
1212     return new_obj;
1213   }
1214 
1215   // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
1216   // allocate it?
1217   if (is_in_reserved(new_obj)) {
1218     // Must be in to_space.
1219     assert(to()->is_in_reserved(new_obj), "Checking");
1220     if (forward_ptr == ClaimedForwardPtr) {
1221       // Wait to get the real forwarding pointer value.
1222       forward_ptr = real_forwardee(old);
1223     }
1224     par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
1225   }
1226 
1227   return forward_ptr;
1228 }
1229 
1230 #ifndef PRODUCT
1231 // It's OK to call this multi-threaded;  the worst thing
1232 // that can happen is that we'll get a bunch of closely
1233 // spaced simulated overflows, but that's OK, in fact
1234 // probably good as it would exercise the overflow code
1235 // under contention.
1236 bool ParNewGeneration::should_simulate_overflow() {
1237   if (_overflow_counter-- <= 0) { // just being defensive
1238     _overflow_counter = ParGCWorkQueueOverflowInterval;
1239     return true;
1240   } else {
1241     return false;
1242   }
1243 }
1244 #endif
1245 
1246 // In case we are using compressed oops, we need to be careful.
1247 // If the object being pushed is an object array, then its length
1248 // field keeps track of the "grey boundary" at which the next
1249 // incremental scan will be done (see ParGCArrayScanChunk).
1250 // When using compressed oops, this length field is kept in the
1251 // lower 32 bits of the erstwhile klass word and cannot be used
1252 // for the overflow chaining pointer (OCP below). As such the OCP
1253 // would itself need to be compressed into the top 32-bits in this
1254 // case. Unfortunately, see below, in the event that we have a
1255 // promotion failure, the node to be pushed on the list can be
1256 // outside of the Java heap, so the heap-based pointer compression
1257 // would not work (we would have potential aliasing between C-heap
1258 // and Java-heap pointers). For this reason, when using compressed
1259 // oops, we simply use a worker-thread-local, non-shared overflow
1260 // list in the form of a growable array, with a slightly different
1261 // overflow stack draining strategy. If/when we start using fat
1262 // stacks here, we can go back to using (fat) pointer chains
1263 // (although some performance comparisons would be useful since
1264 // single global lists have their own performance disadvantages
1265 // as we were made painfully aware not long ago, see 6786503).
1266 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
1267 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
1268   assert(is_in_reserved(from_space_obj), "Should be from this generation");
1269   if (ParGCUseLocalOverflow) {
1270     // In the case of compressed oops, we use a private, not-shared
1271     // overflow stack.
1272     par_scan_state->push_on_overflow_stack(from_space_obj);
1273   } else {
1274     assert(!UseCompressedOops, "Error");
1275     // if the object has been forwarded to itself, then we cannot
1276     // use the klass pointer for the linked list.  Instead we have
1277     // to allocate an oopDesc in the C-Heap and use that for the linked list.
1278     // XXX This is horribly inefficient when a promotion failure occurs
1279     // and should be fixed. XXX FIX ME !!!
1280 #ifndef PRODUCT
1281     Atomic::inc(&_num_par_pushes);
1282     assert(_num_par_pushes > 0, "Tautology");
1283 #endif
1284     if (from_space_obj->forwardee() == from_space_obj) {
1285       oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC);
1286       listhead->forward_to(from_space_obj);
1287       from_space_obj = listhead;
1288     }
1289     oop observed_overflow_list = _overflow_list;
1290     oop cur_overflow_list;
1291     do {
1292       cur_overflow_list = observed_overflow_list;
1293       if (cur_overflow_list != BUSY) {
1294         from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
1295       } else {
1296         from_space_obj->set_klass_to_list_ptr(NULL);
1297       }
1298       observed_overflow_list =
1299         Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list);
1300     } while (cur_overflow_list != observed_overflow_list);
1301   }
1302 }
1303 
1304 bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
1305   bool res;
1306 
1307   if (ParGCUseLocalOverflow) {
1308     res = par_scan_state->take_from_overflow_stack();
1309   } else {
1310     assert(!UseCompressedOops, "Error");
1311     res = take_from_overflow_list_work(par_scan_state);
1312   }
1313   return res;
1314 }
1315 
1316 
1317 // *NOTE*: The overflow list manipulation code here and
1318 // in CMSCollector:: are very similar in shape,
1319 // except that in the CMS case we thread the objects
1320 // directly into the list via their mark word, and do
1321 // not need to deal with special cases below related
1322 // to chunking of object arrays and promotion failure
1323 // handling.
1324 // CR 6797058 has been filed to attempt consolidation of
1325 // the common code.
1326 // Because of the common code, if you make any changes in
1327 // the code below, please check the CMS version to see if
1328 // similar changes might be needed.
1329 // See CMSCollector::par_take_from_overflow_list() for
1330 // more extensive documentation comments.
1331 bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) {
1332   ObjToScanQueue* work_q = par_scan_state->work_queue();
1333   // How many to take?
1334   size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
1335                                  (size_t)ParGCDesiredObjsFromOverflowList);
1336 
1337   assert(!UseCompressedOops, "Error");
1338   assert(par_scan_state->overflow_stack() == NULL, "Error");
1339   if (_overflow_list == NULL) return false;
1340 
1341   // Otherwise, there was something there; try claiming the list.
1342   oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
1343   // Trim off a prefix of at most objsFromOverflow items
1344   Thread* tid = Thread::current();
1345   size_t spin_count = ParallelGCThreads;
1346   size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
1347   for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
1348     // someone grabbed it before we did ...
1349     // ... we spin for a short while...
1350     os::sleep(tid, sleep_time_millis, false);
1351     if (_overflow_list == NULL) {
1352       // nothing left to take
1353       return false;
1354     } else if (_overflow_list != BUSY) {
1355      // try and grab the prefix
1356      prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
1357     }
1358   }
1359   if (prefix == NULL || prefix == BUSY) {
1360      // Nothing to take or waited long enough
1361      if (prefix == NULL) {
1362        // Write back the NULL in case we overwrote it with BUSY above
1363        // and it is still the same value.
1364        (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
1365      }
1366      return false;
1367   }
1368   assert(prefix != NULL && prefix != BUSY, "Error");
1369   oop cur = prefix;
1370   for (size_t i = 1; i < objsFromOverflow; ++i) {
1371     oop next = cur->list_ptr_from_klass();
1372     if (next == NULL) break;
1373     cur = next;
1374   }
1375   assert(cur != NULL, "Loop postcondition");
1376 
1377   // Reattach remaining (suffix) to overflow list
1378   oop suffix = cur->list_ptr_from_klass();
1379   if (suffix == NULL) {
1380     // Write back the NULL in lieu of the BUSY we wrote
1381     // above and it is still the same value.
1382     if (_overflow_list == BUSY) {
1383       (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
1384     }
1385   } else {
1386     assert(suffix != BUSY, "Error");
1387     // suffix will be put back on global list
1388     cur->set_klass_to_list_ptr(NULL);     // break off suffix
1389     // It's possible that the list is still in the empty(busy) state
1390     // we left it in a short while ago; in that case we may be
1391     // able to place back the suffix.
1392     oop observed_overflow_list = _overflow_list;
1393     oop cur_overflow_list = observed_overflow_list;
1394     bool attached = false;
1395     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
1396       observed_overflow_list =
1397         Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
1398       if (cur_overflow_list == observed_overflow_list) {
1399         attached = true;
1400         break;
1401       } else cur_overflow_list = observed_overflow_list;
1402     }
1403     if (!attached) {
1404       // Too bad, someone else got in in between; we'll need to do a splice.
1405       // Find the last item of suffix list
1406       oop last = suffix;
1407       while (true) {
1408         oop next = last->list_ptr_from_klass();
1409         if (next == NULL) break;
1410         last = next;
1411       }
1412       // Atomically prepend suffix to current overflow list
1413       observed_overflow_list = _overflow_list;
1414       do {
1415         cur_overflow_list = observed_overflow_list;
1416         if (cur_overflow_list != BUSY) {
1417           // Do the splice ...
1418           last->set_klass_to_list_ptr(cur_overflow_list);
1419         } else { // cur_overflow_list == BUSY
1420           last->set_klass_to_list_ptr(NULL);
1421         }
1422         observed_overflow_list =
1423           Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
1424       } while (cur_overflow_list != observed_overflow_list);
1425     }
1426   }
1427 
1428   // Push objects on prefix list onto this thread's work queue
1429   assert(prefix != NULL && prefix != BUSY, "program logic");
1430   cur = prefix;
1431   ssize_t n = 0;
1432   while (cur != NULL) {
1433     oop obj_to_push = cur->forwardee();
1434     oop next        = cur->list_ptr_from_klass();
1435     cur->set_klass(obj_to_push->klass());
1436     // This may be an array object that is self-forwarded. In that case, the list pointer
1437     // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
1438     if (!is_in_reserved(cur)) {
1439       // This can become a scaling bottleneck when there is work queue overflow coincident
1440       // with promotion failure.
1441       oopDesc* f = cur;
1442       FREE_C_HEAP_ARRAY(oopDesc, f);
1443     } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1444       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1445       obj_to_push = cur;
1446     }
1447     bool ok = work_q->push(obj_to_push);
1448     assert(ok, "Should have succeeded");
1449     cur = next;
1450     n++;
1451   }
1452   TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1453 #ifndef PRODUCT
1454   assert(_num_par_pushes >= n, "Too many pops?");
1455   Atomic::sub(n, &_num_par_pushes);
1456 #endif
1457   return true;
1458 }
1459 #undef BUSY
1460 
1461 void ParNewGeneration::ref_processor_init() {
1462   if (_ref_processor == NULL) {
1463     // Allocate and initialize a reference processor
1464     _ref_processor =
1465       new ReferenceProcessor(_reserved,                  // span
1466                              ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1467                              ParallelGCThreads,          // mt processing degree
1468                              refs_discovery_is_mt(),     // mt discovery
1469                              ParallelGCThreads,          // mt discovery degree
1470                              refs_discovery_is_atomic(), // atomic_discovery
1471                              NULL);                      // is_alive_non_header
1472   }
1473 }
1474 
1475 const char* ParNewGeneration::name() const {
1476   return "par new generation";
1477 }