src/share/vm/gc_implementation/parNew/parNewGeneration.cpp

Print this page
rev 4773 : 8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>
   1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
  27 #include "gc_implementation/parNew/parNewGeneration.hpp"
  28 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
  29 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
  30 #include "gc_implementation/shared/ageTable.hpp"
  31 #include "gc_implementation/shared/parGCAllocBuffer.hpp"





  32 #include "gc_implementation/shared/spaceDecorator.hpp"
  33 #include "memory/defNewGeneration.inline.hpp"
  34 #include "memory/genCollectedHeap.hpp"
  35 #include "memory/genOopClosures.inline.hpp"
  36 #include "memory/generation.hpp"
  37 #include "memory/generation.inline.hpp"
  38 #include "memory/referencePolicy.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/sharedHeap.hpp"
  41 #include "memory/space.hpp"
  42 #include "oops/objArrayOop.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "oops/oop.pcgc.inline.hpp"
  45 #include "runtime/handles.hpp"
  46 #include "runtime/handles.inline.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/thread.hpp"
  49 #include "utilities/copy.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 #include "utilities/workgroup.hpp"


  58                                        ParNewGeneration* gen_,
  59                                        Generation* old_gen_,
  60                                        int thread_num_,
  61                                        ObjToScanQueueSet* work_queue_set_,
  62                                        Stack<oop, mtGC>* overflow_stacks_,
  63                                        size_t desired_plab_sz_,
  64                                        ParallelTaskTerminator& term_) :
  65   _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
  66   _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
  67   _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
  68   _ageTable(false), // false ==> not the global age table, no perf data.
  69   _to_space_alloc_buffer(desired_plab_sz_),
  70   _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
  71   _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
  72   _older_gen_closure(gen_, this),
  73   _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
  74                       &_to_space_root_closure, gen_, &_old_gen_root_closure,
  75                       work_queue_set_, &term_),
  76   _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
  77   _keep_alive_closure(&_scan_weak_ref_closure),
  78   _promotion_failure_size(0),
  79   _strong_roots_time(0.0), _term_time(0.0)
  80 {
  81   #if TASKQUEUE_STATS
  82   _term_attempts = 0;
  83   _overflow_refills = 0;
  84   _overflow_refill_objs = 0;
  85   #endif // TASKQUEUE_STATS
  86 
  87   _survivor_chunk_array =
  88     (ChunkArray*) old_gen()->get_data_recorder(thread_num());
  89   _hash_seed = 17;  // Might want to take time-based random value.
  90   _start = os::elapsedTime();
  91   _old_gen_closure.set_generation(old_gen_);
  92   _old_gen_root_closure.set_generation(old_gen_);
  93 }
  94 #ifdef _MSC_VER
  95 #pragma warning( pop )
  96 #endif
  97 
  98 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,


 262       // Too large; allocate the object individually.
 263       obj = sp->par_allocate(word_sz);
 264     }
 265   }
 266   return obj;
 267 }
 268 
 269 
 270 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
 271                                                 size_t word_sz) {
 272   // Is the alloc in the current alloc buffer?
 273   if (to_space_alloc_buffer()->contains(obj)) {
 274     assert(to_space_alloc_buffer()->contains(obj + word_sz - 1),
 275            "Should contain whole object.");
 276     to_space_alloc_buffer()->undo_allocation(obj, word_sz);
 277   } else {
 278     CollectedHeap::fill_with_object(obj, word_sz);
 279   }
 280 }
 281 
 282 void ParScanThreadState::print_and_clear_promotion_failure_size() {
 283   if (_promotion_failure_size != 0) {
 284     if (PrintPromotionFailure) {
 285       gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
 286         _thread_num, _promotion_failure_size);
 287     }
 288     _promotion_failure_size = 0;
 289   }
 290 }
 291 
 292 class ParScanThreadStateSet: private ResourceArray {
 293 public:
 294   // Initializes states for the specified number of threads;
 295   ParScanThreadStateSet(int                     num_threads,
 296                         Space&                  to_space,
 297                         ParNewGeneration&       gen,
 298                         Generation&             old_gen,
 299                         ObjToScanQueueSet&      queue_set,
 300                         Stack<oop, mtGC>*       overflow_stacks_,
 301                         size_t                  desired_plab_sz,
 302                         ParallelTaskTerminator& term);
 303 
 304   ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
 305 
 306   inline ParScanThreadState& thread_state(int i);
 307 

 308   void reset(int active_workers, bool promotion_failed);
 309   void flush();
 310 
 311   #if TASKQUEUE_STATS
 312   static void
 313     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
 314   void print_termination_stats(outputStream* const st = gclog_or_tty);
 315   static void
 316     print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 317   void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
 318   void reset_stats();
 319   #endif // TASKQUEUE_STATS
 320 
 321 private:
 322   ParallelTaskTerminator& _term;
 323   ParNewGeneration&       _gen;
 324   Generation&             _next_gen;
 325  public:
 326   bool is_valid(int id) const { return id < length(); }
 327   ParallelTaskTerminator* terminator() { return &_term; }


 336   : ResourceArray(sizeof(ParScanThreadState), num_threads),
 337     _gen(gen), _next_gen(old_gen), _term(term)
 338 {
 339   assert(num_threads > 0, "sanity check!");
 340   assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
 341          "overflow_stack allocation mismatch");
 342   // Initialize states.
 343   for (int i = 0; i < num_threads; ++i) {
 344     new ((ParScanThreadState*)_data + i)
 345         ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
 346                            overflow_stacks, desired_plab_sz, term);
 347   }
 348 }
 349 
 350 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
 351 {
 352   assert(i >= 0 && i < length(), "sanity check!");
 353   return ((ParScanThreadState*)_data)[i];
 354 }
 355 








 356 
 357 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
 358 {
 359   _term.reset_for_reuse(active_threads);
 360   if (promotion_failed) {
 361     for (int i = 0; i < length(); ++i) {
 362       thread_state(i).print_and_clear_promotion_failure_size();
 363     }
 364   }
 365 }
 366 
 367 #if TASKQUEUE_STATS
 368 void
 369 ParScanThreadState::reset_stats()
 370 {
 371   taskqueue_stats().reset();
 372   _term_attempts = 0;
 373   _overflow_refills = 0;
 374   _overflow_refill_objs = 0;
 375 }
 376 
 377 void ParScanThreadStateSet::reset_stats()
 378 {
 379   for (int i = 0; i < length(); ++i) {
 380     thread_state(i).reset_stats();
 381   }
 382 }


 566 }
 567 
 568 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,
 569                 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
 570     AbstractGangTask("ParNewGeneration collection"),
 571     _gen(gen), _next_gen(next_gen),
 572     _young_old_boundary(young_old_boundary),
 573     _state_set(state_set)
 574   {}
 575 
 576 // Reset the terminator for the given number of
 577 // active threads.
 578 void ParNewGenTask::set_for_termination(int active_workers) {
 579   _state_set->reset(active_workers, _gen->promotion_failed());
 580   // Should the heap be passed in?  There's only 1 for now so
 581   // grab it instead.
 582   GenCollectedHeap* gch = GenCollectedHeap::heap();
 583   gch->set_n_termination(active_workers);
 584 }
 585 
 586 // The "i" passed to this method is the part of the work for
 587 // this thread.  It is not the worker ID.  The "i" is derived
 588 // from _started_workers which is incremented in internal_note_start()
 589 // called in GangWorker loop() and which is called under the
 590 // which is  called under the protection of the gang monitor and is
 591 // called after a task is started.  So "i" is based on
 592 // first-come-first-served.
 593 
 594 void ParNewGenTask::work(uint worker_id) {
 595   GenCollectedHeap* gch = GenCollectedHeap::heap();
 596   // Since this is being done in a separate thread, need new resource
 597   // and handle marks.
 598   ResourceMark rm;
 599   HandleMark hm;
 600   // We would need multiple old-gen queues otherwise.
 601   assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
 602 
 603   Generation* old_gen = gch->next_gen(_gen);
 604 
 605   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 606   assert(_state_set->is_valid(worker_id), "Should not have been called");
 607 
 608   par_scan_state.set_young_old_boundary(_young_old_boundary);
 609 
 610   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
 611                                       gch->rem_set()->klass_rem_set());
 612 
 613   int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;


 859 
 860 EvacuateFollowersClosureGeneral::
 861 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
 862                                 OopsInGenClosure* cur,
 863                                 OopsInGenClosure* older) :
 864   _gch(gch), _level(level),
 865   _scan_cur_or_nonheap(cur), _scan_older(older)
 866 {}
 867 
 868 void EvacuateFollowersClosureGeneral::do_void() {
 869   do {
 870     // Beware: this call will lead to closure applications via virtual
 871     // calls.
 872     _gch->oop_since_save_marks_iterate(_level,
 873                                        _scan_cur_or_nonheap,
 874                                        _scan_older);
 875   } while (!_gch->no_allocs_since_save_marks(_level));
 876 }
 877 
 878 


 879 bool ParNewGeneration::_avoid_promotion_undo = false;
 880 
 881 // A Generation that does parallel young-gen collection.























 882 
 883 void ParNewGeneration::collect(bool   full,
 884                                bool   clear_all_soft_refs,
 885                                size_t size,
 886                                bool   is_tlab) {
 887   assert(full || size > 0, "otherwise we don't want to collect");

 888   GenCollectedHeap* gch = GenCollectedHeap::heap();



 889   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 890     "not a CMS generational heap");
 891   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 892   FlexibleWorkGang* workers = gch->workers();
 893   assert(workers != NULL, "Need workgang for parallel work");
 894   int active_workers =
 895       AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 896                                    workers->active_workers(),
 897                                    Threads::number_of_non_daemon_threads());
 898   workers->set_active_workers(active_workers);
 899   _next_gen = gch->next_gen(this);
 900   assert(_next_gen != NULL,
 901     "This must be the youngest gen, and not the only gen");
 902   assert(gch->n_gens() == 2,
 903          "Par collection currently only works with single older gen.");
 904   // Do we have to avoid promotion_undo?
 905   if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
 906     set_avoid_promotion_undo(true);
 907   }
 908 
 909   // If the next generation is too full to accomodate worst-case promotion
 910   // from this generation, pass on collection; let the next generation
 911   // do it.
 912   if (!collection_attempt_is_safe()) {
 913     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 914     return;
 915   }
 916   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 917 




 918   init_assuming_no_promotion_failure();
 919 
 920   if (UseAdaptiveSizePolicy) {
 921     set_survivor_overflow(false);
 922     size_policy->minor_collection_begin();
 923   }
 924 
 925   TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty);
 926   // Capture heap used before collection (for printing).
 927   size_t gch_prev_used = gch->used();
 928 
 929   SpecializationStats::clear();
 930 
 931   age_table()->clear();
 932   to()->clear(SpaceDecorator::Mangle);
 933 
 934   gch->save_marks();
 935   assert(workers != NULL, "Need parallel worker threads.");
 936   int n_workers = active_workers;
 937 
 938   // Set the correct parallelism (number of queues) in the reference processor
 939   ref_processor()->set_active_mt_degree(n_workers);
 940 
 941   // Always set the terminator for the active number of workers
 942   // because only those workers go through the termination protocol.
 943   ParallelTaskTerminator _term(n_workers, task_queues());
 944   ParScanThreadStateSet thread_state_set(workers->active_workers(),
 945                                          *to(), *this, *_next_gen, *task_queues(),


 958   } else {
 959     GenCollectedHeap::StrongRootsScope srs(gch);
 960     tsk.work(0);
 961   }
 962   thread_state_set.reset(0 /* Bad value in debug if not reset */,
 963                          promotion_failed());
 964 
 965   // Process (weak) reference objects found during scavenge.
 966   ReferenceProcessor* rp = ref_processor();
 967   IsAliveClosure is_alive(this);
 968   ScanWeakRefClosure scan_weak_ref(this);
 969   KeepAliveClosure keep_alive(&scan_weak_ref);
 970   ScanClosure               scan_without_gc_barrier(this, false);
 971   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
 972   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
 973   EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
 974     &scan_without_gc_barrier, &scan_with_gc_barrier);
 975   rp->setup_policy(clear_all_soft_refs);
 976   // Can  the mt_degree be set later (at run_task() time would be best)?
 977   rp->set_active_mt_degree(active_workers);

 978   if (rp->processing_is_mt()) {
 979     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
 980     rp->process_discovered_references(&is_alive, &keep_alive,
 981                                       &evacuate_followers, &task_executor);

 982   } else {
 983     thread_state_set.flush();
 984     gch->set_par_threads(0);  // 0 ==> non-parallel.
 985     gch->save_marks();
 986     rp->process_discovered_references(&is_alive, &keep_alive,
 987                                       &evacuate_followers, NULL);

 988   }

 989   if (!promotion_failed()) {
 990     // Swap the survivor spaces.
 991     eden()->clear(SpaceDecorator::Mangle);
 992     from()->clear(SpaceDecorator::Mangle);
 993     if (ZapUnusedHeapArea) {
 994       // This is now done here because of the piece-meal mangling which
 995       // can check for valid mangling at intermediate points in the
 996       // collection(s).  When a minor collection fails to collect
 997       // sufficient space resizing of the young generation can occur
 998       // an redistribute the spaces in the young generation.  Mangle
 999       // here so that unzapped regions don't get distributed to
1000       // other spaces.
1001       to()->mangle_unused_area();
1002     }
1003     swap_spaces();
1004 
1005     // A successful scavenge should restart the GC time limit count which is
1006     // for full GC's.
1007     size_policy->reset_gc_overhead_limit_count();
1008 
1009     assert(to()->is_empty(), "to space should be empty now");
1010 
1011     adjust_desired_tenuring_threshold();
1012   } else {
1013     assert(_promo_failure_scan_stack.is_empty(), "post condition");
1014     _promo_failure_scan_stack.clear(true); // Clear cached segments.
1015 
1016     remove_forwarding_pointers();
1017     if (PrintGCDetails) {
1018       gclog_or_tty->print(" (promotion failed)");
1019     }
1020     // All the spaces are in play for mark-sweep.
1021     swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
1022     from()->set_next_compaction_space(to());
1023     gch->set_incremental_collection_failed();
1024     // Inform the next generation that a promotion failure occurred.
1025     _next_gen->promotion_failure_occurred();
1026 
1027     // Reset the PromotionFailureALot counters.
1028     NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
1029   }
1030   // set new iteration safe limit for the survivor spaces
1031   from()->set_concurrent_iteration_safe_limit(from()->top());
1032   to()->set_concurrent_iteration_safe_limit(to()->top());
1033 
1034   if (ResizePLAB) {
1035     plab_stats()->adjust_desired_plab_sz(n_workers);
1036   }
1037 
1038   if (PrintGC && !PrintGCDetails) {
1039     gch->print_heap_change(gch_prev_used);
1040   }
1041 
1042   if (PrintGCDetails && ParallelGCVerbose) {
1043     TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
1044     TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());
1045   }
1046 
1047   if (UseAdaptiveSizePolicy) {
1048     size_policy->minor_collection_end(gch->gc_cause());
1049     size_policy->avg_survived()->sample(from()->used());
1050   }
1051 
1052   // We need to use a monotonically non-deccreasing time in ms
1053   // or we will see time-warp warnings and os::javaTimeMillis()
1054   // does not guarantee monotonicity.
1055   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1056   update_time_of_last_gc(now);
1057 
1058   SpecializationStats::print();
1059 
1060   rp->set_enqueuing_is_done(true);
1061   if (rp->processing_is_mt()) {
1062     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1063     rp->enqueue_discovered_references(&task_executor);
1064   } else {
1065     rp->enqueue_discovered_references(NULL);
1066   }
1067   rp->verify_no_references_recorded();







1068 }
1069 
1070 static int sum;
1071 void ParNewGeneration::waste_some_time() {
1072   for (int i = 0; i < 100; i++) {
1073     sum += i;
1074   }
1075 }
1076 
1077 static const oop ClaimedForwardPtr = oop(0x4);
1078 
1079 // Because of concurrency, there are times where an object for which
1080 // "is_forwarded()" is true contains an "interim" forwarding pointer
1081 // value.  Such a value will soon be overwritten with a real value.
1082 // This method requires "obj" to have a forwarding pointer, and waits, if
1083 // necessary for a real one to be inserted, and returns it.
1084 
1085 oop ParNewGeneration::real_forwardee(oop obj) {
1086   oop forward_ptr = obj->forwardee();
1087   if (forward_ptr != ClaimedForwardPtr) {


1157     // Either to-space is full or we decided to promote
1158     // try allocating obj tenured
1159 
1160     // Attempt to install a null forwarding pointer (atomically),
1161     // to claim the right to install the real forwarding pointer.
1162     forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);
1163     if (forward_ptr != NULL) {
1164       // someone else beat us to it.
1165         return real_forwardee(old);
1166     }
1167 
1168     new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
1169                                        old, m, sz);
1170 
1171     if (new_obj == NULL) {
1172       // promotion failed, forward to self
1173       _promotion_failed = true;
1174       new_obj = old;
1175 
1176       preserve_mark_if_necessary(old, m);
1177       // Log the size of the maiden promotion failure
1178       par_scan_state->log_promotion_failure(sz);
1179     }
1180 
1181     old->forward_to(new_obj);
1182     forward_ptr = NULL;
1183   } else {
1184     // Is in to-space; do copying ourselves.
1185     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1186     forward_ptr = old->forward_to_atomic(new_obj);
1187     // Restore the mark word copied above.
1188     new_obj->set_mark(m);
1189     // Increment age if obj still in new generation
1190     new_obj->incr_age();
1191     par_scan_state->age_table()->add(new_obj, sz);
1192   }
1193   assert(new_obj != NULL, "just checking");
1194 
1195 #ifndef PRODUCT
1196   // This code must come after the CAS test, or it will print incorrect
1197   // information.
1198   if (TraceScavenge) {


1283 
1284   if (new_obj == NULL) {
1285     // Either to-space is full or we decided to promote
1286     // try allocating obj tenured
1287     new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
1288                                        old, m, sz);
1289 
1290     if (new_obj == NULL) {
1291       // promotion failed, forward to self
1292       forward_ptr = old->forward_to_atomic(old);
1293       new_obj = old;
1294 
1295       if (forward_ptr != NULL) {
1296         return forward_ptr;   // someone else succeeded
1297       }
1298 
1299       _promotion_failed = true;
1300       failed_to_promote = true;
1301 
1302       preserve_mark_if_necessary(old, m);
1303       // Log the size of the maiden promotion failure
1304       par_scan_state->log_promotion_failure(sz);
1305     }
1306   } else {
1307     // Is in to-space; do copying ourselves.
1308     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1309     // Restore the mark word copied above.
1310     new_obj->set_mark(m);
1311     // Increment age if new_obj still in new generation
1312     new_obj->incr_age();
1313     par_scan_state->age_table()->add(new_obj, sz);
1314   }
1315   assert(new_obj != NULL, "just checking");
1316 
1317 #ifndef PRODUCT
1318   // This code must come after the CAS test, or it will print incorrect
1319   // information.
1320   if (TraceScavenge) {
1321     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
1322        is_in_reserved(new_obj) ? "copying" : "tenuring",
1323        new_obj->klass()->internal_name(), old, new_obj, new_obj->size());
1324   }


1582       oopDesc* f = cur;
1583       FREE_C_HEAP_ARRAY(oopDesc, f, mtGC);
1584     } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1585       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1586       obj_to_push = cur;
1587     }
1588     bool ok = work_q->push(obj_to_push);
1589     assert(ok, "Should have succeeded");
1590     cur = next;
1591     n++;
1592   }
1593   TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1594 #ifndef PRODUCT
1595   assert(_num_par_pushes >= n, "Too many pops?");
1596   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
1597 #endif
1598   return true;
1599 }
1600 #undef BUSY
1601 
1602 void ParNewGeneration::ref_processor_init()
1603 {
1604   if (_ref_processor == NULL) {
1605     // Allocate and initialize a reference processor
1606     _ref_processor =
1607       new ReferenceProcessor(_reserved,                  // span
1608                              ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1609                              (int) ParallelGCThreads,    // mt processing degree
1610                              refs_discovery_is_mt(),     // mt discovery
1611                              (int) ParallelGCThreads,    // mt discovery degree
1612                              refs_discovery_is_atomic(), // atomic_discovery
1613                              NULL,                       // is_alive_non_header
1614                              false);                     // write barrier for next field updates
1615   }
1616 }
1617 
1618 const char* ParNewGeneration::name() const {
1619   return "par new generation";
1620 }
   1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
  27 #include "gc_implementation/parNew/parNewGeneration.hpp"
  28 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
  29 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
  30 #include "gc_implementation/shared/ageTable.hpp"
  31 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
  32 #include "gc_implementation/shared/gcHeapSummary.hpp"
  33 #include "gc_implementation/shared/gcTimer.hpp"
  34 #include "gc_implementation/shared/gcTrace.hpp"
  35 #include "gc_implementation/shared/gcTraceTime.hpp"
  36 #include "gc_implementation/shared/copyFailedInfo.hpp"
  37 #include "gc_implementation/shared/spaceDecorator.hpp"
  38 #include "memory/defNewGeneration.inline.hpp"
  39 #include "memory/genCollectedHeap.hpp"
  40 #include "memory/genOopClosures.inline.hpp"
  41 #include "memory/generation.hpp"
  42 #include "memory/generation.inline.hpp"
  43 #include "memory/referencePolicy.hpp"
  44 #include "memory/resourceArea.hpp"
  45 #include "memory/sharedHeap.hpp"
  46 #include "memory/space.hpp"
  47 #include "oops/objArrayOop.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "oops/oop.pcgc.inline.hpp"
  50 #include "runtime/handles.hpp"
  51 #include "runtime/handles.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/thread.hpp"
  54 #include "utilities/copy.hpp"
  55 #include "utilities/globalDefinitions.hpp"
  56 #include "utilities/workgroup.hpp"


  63                                        ParNewGeneration* gen_,
  64                                        Generation* old_gen_,
  65                                        int thread_num_,
  66                                        ObjToScanQueueSet* work_queue_set_,
  67                                        Stack<oop, mtGC>* overflow_stacks_,
  68                                        size_t desired_plab_sz_,
  69                                        ParallelTaskTerminator& term_) :
  70   _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_),
  71   _work_queue(work_queue_set_->queue(thread_num_)), _to_space_full(false),
  72   _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
  73   _ageTable(false), // false ==> not the global age table, no perf data.
  74   _to_space_alloc_buffer(desired_plab_sz_),
  75   _to_space_closure(gen_, this), _old_gen_closure(gen_, this),
  76   _to_space_root_closure(gen_, this), _old_gen_root_closure(gen_, this),
  77   _older_gen_closure(gen_, this),
  78   _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
  79                       &_to_space_root_closure, gen_, &_old_gen_root_closure,
  80                       work_queue_set_, &term_),
  81   _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
  82   _keep_alive_closure(&_scan_weak_ref_closure),

  83   _strong_roots_time(0.0), _term_time(0.0)
  84 {
  85   #if TASKQUEUE_STATS
  86   _term_attempts = 0;
  87   _overflow_refills = 0;
  88   _overflow_refill_objs = 0;
  89   #endif // TASKQUEUE_STATS
  90 
  91   _survivor_chunk_array =
  92     (ChunkArray*) old_gen()->get_data_recorder(thread_num());
  93   _hash_seed = 17;  // Might want to take time-based random value.
  94   _start = os::elapsedTime();
  95   _old_gen_closure.set_generation(old_gen_);
  96   _old_gen_root_closure.set_generation(old_gen_);
  97 }
  98 #ifdef _MSC_VER
  99 #pragma warning( pop )
 100 #endif
 101 
 102 void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,


 266       // Too large; allocate the object individually.
 267       obj = sp->par_allocate(word_sz);
 268     }
 269   }
 270   return obj;
 271 }
 272 
 273 
 274 void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj,
 275                                                 size_t word_sz) {
 276   // Is the alloc in the current alloc buffer?
 277   if (to_space_alloc_buffer()->contains(obj)) {
 278     assert(to_space_alloc_buffer()->contains(obj + word_sz - 1),
 279            "Should contain whole object.");
 280     to_space_alloc_buffer()->undo_allocation(obj, word_sz);
 281   } else {
 282     CollectedHeap::fill_with_object(obj, word_sz);
 283   }
 284 }
 285 
 286 void ParScanThreadState::print_promotion_failure_size() {
 287   if (_promotion_failed_info.has_failed() && PrintPromotionFailure) {

 288     gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
 289                         _thread_num, _promotion_failed_info.first_size());


 290   }
 291 }
 292 
 293 class ParScanThreadStateSet: private ResourceArray {
 294 public:
 295   // Initializes states for the specified number of threads;
 296   ParScanThreadStateSet(int                     num_threads,
 297                         Space&                  to_space,
 298                         ParNewGeneration&       gen,
 299                         Generation&             old_gen,
 300                         ObjToScanQueueSet&      queue_set,
 301                         Stack<oop, mtGC>*       overflow_stacks_,
 302                         size_t                  desired_plab_sz,
 303                         ParallelTaskTerminator& term);
 304 
 305   ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
 306 
 307   inline ParScanThreadState& thread_state(int i);
 308 
 309   void trace_promotion_failed(YoungGCTracer& gc_tracer);
 310   void reset(int active_workers, bool promotion_failed);
 311   void flush();
 312 
 313   #if TASKQUEUE_STATS
 314   static void
 315     print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
 316   void print_termination_stats(outputStream* const st = gclog_or_tty);
 317   static void
 318     print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
 319   void print_taskqueue_stats(outputStream* const st = gclog_or_tty);
 320   void reset_stats();
 321   #endif // TASKQUEUE_STATS
 322 
 323 private:
 324   ParallelTaskTerminator& _term;
 325   ParNewGeneration&       _gen;
 326   Generation&             _next_gen;
 327  public:
 328   bool is_valid(int id) const { return id < length(); }
 329   ParallelTaskTerminator* terminator() { return &_term; }


 338   : ResourceArray(sizeof(ParScanThreadState), num_threads),
 339     _gen(gen), _next_gen(old_gen), _term(term)
 340 {
 341   assert(num_threads > 0, "sanity check!");
 342   assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
 343          "overflow_stack allocation mismatch");
 344   // Initialize states.
 345   for (int i = 0; i < num_threads; ++i) {
 346     new ((ParScanThreadState*)_data + i)
 347         ParScanThreadState(&to_space, &gen, &old_gen, i, &queue_set,
 348                            overflow_stacks, desired_plab_sz, term);
 349   }
 350 }
 351 
 352 inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
 353 {
 354   assert(i >= 0 && i < length(), "sanity check!");
 355   return ((ParScanThreadState*)_data)[i];
 356 }
 357 
 358 void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) {
 359   for (int i = 0; i < length(); ++i) {
 360     if (thread_state(i).promotion_failed()) {
 361       gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info());
 362       thread_state(i).promotion_failed_info().reset();
 363     }
 364   }
 365 }
 366 
 367 void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
 368 {
 369   _term.reset_for_reuse(active_threads);
 370   if (promotion_failed) {
 371     for (int i = 0; i < length(); ++i) {
 372       thread_state(i).print_promotion_failure_size();
 373     }
 374   }
 375 }
 376 
 377 #if TASKQUEUE_STATS
 378 void
 379 ParScanThreadState::reset_stats()
 380 {
 381   taskqueue_stats().reset();
 382   _term_attempts = 0;
 383   _overflow_refills = 0;
 384   _overflow_refill_objs = 0;
 385 }
 386 
 387 void ParScanThreadStateSet::reset_stats()
 388 {
 389   for (int i = 0; i < length(); ++i) {
 390     thread_state(i).reset_stats();
 391   }
 392 }


 576 }
 577 
 578 ParNewGenTask::ParNewGenTask(ParNewGeneration* gen, Generation* next_gen,
 579                 HeapWord* young_old_boundary, ParScanThreadStateSet* state_set) :
 580     AbstractGangTask("ParNewGeneration collection"),
 581     _gen(gen), _next_gen(next_gen),
 582     _young_old_boundary(young_old_boundary),
 583     _state_set(state_set)
 584   {}
 585 
 586 // Reset the terminator for the given number of
 587 // active threads.
 588 void ParNewGenTask::set_for_termination(int active_workers) {
 589   _state_set->reset(active_workers, _gen->promotion_failed());
 590   // Should the heap be passed in?  There's only 1 for now so
 591   // grab it instead.
 592   GenCollectedHeap* gch = GenCollectedHeap::heap();
 593   gch->set_n_termination(active_workers);
 594 }
 595 








 596 void ParNewGenTask::work(uint worker_id) {
 597   GenCollectedHeap* gch = GenCollectedHeap::heap();
 598   // Since this is being done in a separate thread, need new resource
 599   // and handle marks.
 600   ResourceMark rm;
 601   HandleMark hm;
 602   // We would need multiple old-gen queues otherwise.
 603   assert(gch->n_gens() == 2, "Par young collection currently only works with one older gen.");
 604 
 605   Generation* old_gen = gch->next_gen(_gen);
 606 
 607   ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
 608   assert(_state_set->is_valid(worker_id), "Should not have been called");
 609 
 610   par_scan_state.set_young_old_boundary(_young_old_boundary);
 611 
 612   KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
 613                                       gch->rem_set()->klass_rem_set());
 614 
 615   int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;


 861 
 862 EvacuateFollowersClosureGeneral::
 863 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch, int level,
 864                                 OopsInGenClosure* cur,
 865                                 OopsInGenClosure* older) :
 866   _gch(gch), _level(level),
 867   _scan_cur_or_nonheap(cur), _scan_older(older)
 868 {}
 869 
 870 void EvacuateFollowersClosureGeneral::do_void() {
 871   do {
 872     // Beware: this call will lead to closure applications via virtual
 873     // calls.
 874     _gch->oop_since_save_marks_iterate(_level,
 875                                        _scan_cur_or_nonheap,
 876                                        _scan_older);
 877   } while (!_gch->no_allocs_since_save_marks(_level));
 878 }
 879 
 880 
 881 // A Generation that does parallel young-gen collection.
 882 
 883 bool ParNewGeneration::_avoid_promotion_undo = false;
 884 
 885 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) {
 886   assert(_promo_failure_scan_stack.is_empty(), "post condition");
 887   _promo_failure_scan_stack.clear(true); // Clear cached segments.
 888 
 889   remove_forwarding_pointers();
 890   if (PrintGCDetails) {
 891     gclog_or_tty->print(" (promotion failed)");
 892   }
 893   // All the spaces are in play for mark-sweep.
 894   swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
 895   from()->set_next_compaction_space(to());
 896   gch->set_incremental_collection_failed();
 897   // Inform the next generation that a promotion failure occurred.
 898   _next_gen->promotion_failure_occurred();
 899 
 900   // Trace promotion failure in the parallel GC threads
 901   thread_state_set.trace_promotion_failed(gc_tracer);
 902   // Single threaded code may have reported promotion failure to the global state
 903   if (_promotion_failed_info.has_failed()) {
 904     gc_tracer.report_promotion_failed(_promotion_failed_info);
 905   }
 906   // Reset the PromotionFailureALot counters.
 907   NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
 908 }
 909 
 910 void ParNewGeneration::collect(bool   full,
 911                                bool   clear_all_soft_refs,
 912                                size_t size,
 913                                bool   is_tlab) {
 914   assert(full || size > 0, "otherwise we don't want to collect");
 915 
 916   GenCollectedHeap* gch = GenCollectedHeap::heap();
 917 
 918   _gc_timer->register_gc_start(os::elapsed_counter());
 919 
 920   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 921     "not a CMS generational heap");
 922   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
 923   FlexibleWorkGang* workers = gch->workers();
 924   assert(workers != NULL, "Need workgang for parallel work");
 925   int active_workers =
 926       AdaptiveSizePolicy::calc_active_workers(workers->total_workers(),
 927                                    workers->active_workers(),
 928                                    Threads::number_of_non_daemon_threads());
 929   workers->set_active_workers(active_workers);
 930   _next_gen = gch->next_gen(this);
 931   assert(_next_gen != NULL,
 932     "This must be the youngest gen, and not the only gen");
 933   assert(gch->n_gens() == 2,
 934          "Par collection currently only works with single older gen.");
 935   // Do we have to avoid promotion_undo?
 936   if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
 937     set_avoid_promotion_undo(true);
 938   }
 939 
 940   // If the next generation is too full to accommodate worst-case promotion
 941   // from this generation, pass on collection; let the next generation
 942   // do it.
 943   if (!collection_attempt_is_safe()) {
 944     gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
 945     return;
 946   }
 947   assert(to()->is_empty(), "Else not collection_attempt_is_safe");
 948 
 949   ParNewTracer gc_tracer;
 950   gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
 951   gch->trace_heap_before_gc(&gc_tracer);
 952 
 953   init_assuming_no_promotion_failure();
 954 
 955   if (UseAdaptiveSizePolicy) {
 956     set_survivor_overflow(false);
 957     size_policy->minor_collection_begin();
 958   }
 959 
 960   GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
 961   // Capture heap used before collection (for printing).
 962   size_t gch_prev_used = gch->used();
 963 
 964   SpecializationStats::clear();
 965 
 966   age_table()->clear();
 967   to()->clear(SpaceDecorator::Mangle);
 968 
 969   gch->save_marks();
 970   assert(workers != NULL, "Need parallel worker threads.");
 971   int n_workers = active_workers;
 972 
 973   // Set the correct parallelism (number of queues) in the reference processor
 974   ref_processor()->set_active_mt_degree(n_workers);
 975 
 976   // Always set the terminator for the active number of workers
 977   // because only those workers go through the termination protocol.
 978   ParallelTaskTerminator _term(n_workers, task_queues());
 979   ParScanThreadStateSet thread_state_set(workers->active_workers(),
 980                                          *to(), *this, *_next_gen, *task_queues(),


 993   } else {
 994     GenCollectedHeap::StrongRootsScope srs(gch);
 995     tsk.work(0);
 996   }
 997   thread_state_set.reset(0 /* Bad value in debug if not reset */,
 998                          promotion_failed());
 999 
1000   // Process (weak) reference objects found during scavenge.
1001   ReferenceProcessor* rp = ref_processor();
1002   IsAliveClosure is_alive(this);
1003   ScanWeakRefClosure scan_weak_ref(this);
1004   KeepAliveClosure keep_alive(&scan_weak_ref);
1005   ScanClosure               scan_without_gc_barrier(this, false);
1006   ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
1007   set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
1008   EvacuateFollowersClosureGeneral evacuate_followers(gch, _level,
1009     &scan_without_gc_barrier, &scan_with_gc_barrier);
1010   rp->setup_policy(clear_all_soft_refs);
1011   // Can  the mt_degree be set later (at run_task() time would be best)?
1012   rp->set_active_mt_degree(active_workers);
1013   ReferenceProcessorStats stats;
1014   if (rp->processing_is_mt()) {
1015     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1016     stats = rp->process_discovered_references(&is_alive, &keep_alive,
1017                                               &evacuate_followers, &task_executor,
1018                                               _gc_timer);
1019   } else {
1020     thread_state_set.flush();
1021     gch->set_par_threads(0);  // 0 ==> non-parallel.
1022     gch->save_marks();
1023     stats = rp->process_discovered_references(&is_alive, &keep_alive,
1024                                               &evacuate_followers, NULL,
1025                                               _gc_timer);
1026   }
1027   gc_tracer.report_gc_reference_stats(stats);
1028   if (!promotion_failed()) {
1029     // Swap the survivor spaces.
1030     eden()->clear(SpaceDecorator::Mangle);
1031     from()->clear(SpaceDecorator::Mangle);
1032     if (ZapUnusedHeapArea) {
1033       // This is now done here because of the piece-meal mangling which
1034       // can check for valid mangling at intermediate points in the
1035       // collection(s).  When a minor collection fails to collect
1036       // sufficient space resizing of the young generation can occur
1037       // an redistribute the spaces in the young generation.  Mangle
1038       // here so that unzapped regions don't get distributed to
1039       // other spaces.
1040       to()->mangle_unused_area();
1041     }
1042     swap_spaces();
1043 
1044     // A successful scavenge should restart the GC time limit count which is
1045     // for full GC's.
1046     size_policy->reset_gc_overhead_limit_count();
1047 
1048     assert(to()->is_empty(), "to space should be empty now");
1049 
1050     adjust_desired_tenuring_threshold();
1051   } else {
1052     handle_promotion_failed(gch, thread_state_set, gc_tracer);















1053   }
1054   // set new iteration safe limit for the survivor spaces
1055   from()->set_concurrent_iteration_safe_limit(from()->top());
1056   to()->set_concurrent_iteration_safe_limit(to()->top());
1057 
1058   if (ResizePLAB) {
1059     plab_stats()->adjust_desired_plab_sz(n_workers);
1060   }
1061 
1062   if (PrintGC && !PrintGCDetails) {
1063     gch->print_heap_change(gch_prev_used);
1064   }
1065 
1066   if (PrintGCDetails && ParallelGCVerbose) {
1067     TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
1068     TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());
1069   }
1070 
1071   if (UseAdaptiveSizePolicy) {
1072     size_policy->minor_collection_end(gch->gc_cause());
1073     size_policy->avg_survived()->sample(from()->used());
1074   }
1075 
1076   // We need to use a monotonically non-deccreasing time in ms
1077   // or we will see time-warp warnings and os::javaTimeMillis()
1078   // does not guarantee monotonicity.
1079   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1080   update_time_of_last_gc(now);
1081 
1082   SpecializationStats::print();
1083 
1084   rp->set_enqueuing_is_done(true);
1085   if (rp->processing_is_mt()) {
1086     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
1087     rp->enqueue_discovered_references(&task_executor);
1088   } else {
1089     rp->enqueue_discovered_references(NULL);
1090   }
1091   rp->verify_no_references_recorded();
1092 
1093   gch->trace_heap_after_gc(&gc_tracer);
1094   gc_tracer.report_tenuring_threshold(tenuring_threshold());
1095 
1096   _gc_timer->register_gc_end(os::elapsed_counter());
1097 
1098   gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
1099 }
1100 
1101 static int sum;
1102 void ParNewGeneration::waste_some_time() {
1103   for (int i = 0; i < 100; i++) {
1104     sum += i;
1105   }
1106 }
1107 
1108 static const oop ClaimedForwardPtr = oop(0x4);
1109 
1110 // Because of concurrency, there are times where an object for which
1111 // "is_forwarded()" is true contains an "interim" forwarding pointer
1112 // value.  Such a value will soon be overwritten with a real value.
1113 // This method requires "obj" to have a forwarding pointer, and waits, if
1114 // necessary for a real one to be inserted, and returns it.
1115 
1116 oop ParNewGeneration::real_forwardee(oop obj) {
1117   oop forward_ptr = obj->forwardee();
1118   if (forward_ptr != ClaimedForwardPtr) {


1188     // Either to-space is full or we decided to promote
1189     // try allocating obj tenured
1190 
1191     // Attempt to install a null forwarding pointer (atomically),
1192     // to claim the right to install the real forwarding pointer.
1193     forward_ptr = old->forward_to_atomic(ClaimedForwardPtr);
1194     if (forward_ptr != NULL) {
1195       // someone else beat us to it.
1196         return real_forwardee(old);
1197     }
1198 
1199     new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
1200                                        old, m, sz);
1201 
1202     if (new_obj == NULL) {
1203       // promotion failed, forward to self
1204       _promotion_failed = true;
1205       new_obj = old;
1206 
1207       preserve_mark_if_necessary(old, m);
1208       par_scan_state->register_promotion_failure(sz);

1209     }
1210 
1211     old->forward_to(new_obj);
1212     forward_ptr = NULL;
1213   } else {
1214     // Is in to-space; do copying ourselves.
1215     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1216     forward_ptr = old->forward_to_atomic(new_obj);
1217     // Restore the mark word copied above.
1218     new_obj->set_mark(m);
1219     // Increment age if obj still in new generation
1220     new_obj->incr_age();
1221     par_scan_state->age_table()->add(new_obj, sz);
1222   }
1223   assert(new_obj != NULL, "just checking");
1224 
1225 #ifndef PRODUCT
1226   // This code must come after the CAS test, or it will print incorrect
1227   // information.
1228   if (TraceScavenge) {


1313 
1314   if (new_obj == NULL) {
1315     // Either to-space is full or we decided to promote
1316     // try allocating obj tenured
1317     new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
1318                                        old, m, sz);
1319 
1320     if (new_obj == NULL) {
1321       // promotion failed, forward to self
1322       forward_ptr = old->forward_to_atomic(old);
1323       new_obj = old;
1324 
1325       if (forward_ptr != NULL) {
1326         return forward_ptr;   // someone else succeeded
1327       }
1328 
1329       _promotion_failed = true;
1330       failed_to_promote = true;
1331 
1332       preserve_mark_if_necessary(old, m);
1333       par_scan_state->register_promotion_failure(sz);

1334     }
1335   } else {
1336     // Is in to-space; do copying ourselves.
1337     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
1338     // Restore the mark word copied above.
1339     new_obj->set_mark(m);
1340     // Increment age if new_obj still in new generation
1341     new_obj->incr_age();
1342     par_scan_state->age_table()->add(new_obj, sz);
1343   }
1344   assert(new_obj != NULL, "just checking");
1345 
1346 #ifndef PRODUCT
1347   // This code must come after the CAS test, or it will print incorrect
1348   // information.
1349   if (TraceScavenge) {
1350     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
1351        is_in_reserved(new_obj) ? "copying" : "tenuring",
1352        new_obj->klass()->internal_name(), old, new_obj, new_obj->size());
1353   }


1611       oopDesc* f = cur;
1612       FREE_C_HEAP_ARRAY(oopDesc, f, mtGC);
1613     } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
1614       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
1615       obj_to_push = cur;
1616     }
1617     bool ok = work_q->push(obj_to_push);
1618     assert(ok, "Should have succeeded");
1619     cur = next;
1620     n++;
1621   }
1622   TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
1623 #ifndef PRODUCT
1624   assert(_num_par_pushes >= n, "Too many pops?");
1625   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
1626 #endif
1627   return true;
1628 }
1629 #undef BUSY
1630 
1631 void ParNewGeneration::ref_processor_init() {

1632   if (_ref_processor == NULL) {
1633     // Allocate and initialize a reference processor
1634     _ref_processor =
1635       new ReferenceProcessor(_reserved,                  // span
1636                              ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
1637                              (int) ParallelGCThreads,    // mt processing degree
1638                              refs_discovery_is_mt(),     // mt discovery
1639                              (int) ParallelGCThreads,    // mt discovery degree
1640                              refs_discovery_is_atomic(), // atomic_discovery
1641                              NULL,                       // is_alive_non_header
1642                              false);                     // write barrier for next field updates
1643   }
1644 }
1645 
1646 const char* ParNewGeneration::name() const {
1647   return "par new generation";
1648 }