< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7992 : G1RootProcessor
rev 7993 : Convert G1 to G1RootProcessor
rev 7996 : imported patch trace-metadata-comment
rev 7998 : imported patch thomas-comments
rev 7999 : imported patch eriks-comments


  31 #include "classfile/metadataOnStackMark.hpp"
  32 #include "classfile/stringTable.hpp"
  33 #include "code/codeCache.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  36 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  37 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  38 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  39 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  40 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  41 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  42 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  43 #include "gc_implementation/g1/g1EvacFailure.hpp"
  44 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  45 #include "gc_implementation/g1/g1Log.hpp"
  46 #include "gc_implementation/g1/g1MarkSweep.hpp"
  47 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  48 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
  49 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
  50 #include "gc_implementation/g1/g1RemSet.inline.hpp"

  51 #include "gc_implementation/g1/g1StringDedup.hpp"
  52 #include "gc_implementation/g1/g1YCTypes.hpp"
  53 #include "gc_implementation/g1/heapRegion.inline.hpp"
  54 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  55 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  56 #include "gc_implementation/g1/vm_operations_g1.hpp"
  57 #include "gc_implementation/shared/gcHeapSummary.hpp"
  58 #include "gc_implementation/shared/gcTimer.hpp"
  59 #include "gc_implementation/shared/gcTrace.hpp"
  60 #include "gc_implementation/shared/gcTraceTime.hpp"
  61 #include "gc_implementation/shared/isGCActiveMark.hpp"
  62 #include "memory/allocation.hpp"
  63 #include "memory/gcLocker.inline.hpp"
  64 #include "memory/generationSpec.hpp"
  65 #include "memory/iterator.hpp"
  66 #include "memory/referenceProcessor.hpp"
  67 #include "oops/oop.inline.hpp"
  68 #include "oops/oop.pcgc.inline.hpp"
  69 #include "runtime/atomic.inline.hpp"
  70 #include "runtime/orderAccess.inline.hpp"


  72 #include "utilities/globalDefinitions.hpp"
  73 
  74 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  75 
  76 // turn it on so that the contents of the young list (scan-only /
  77 // to-be-collected) are printed at "strategic" points before / during
  78 // / after the collection --- this is useful for debugging
  79 #define YOUNG_LIST_VERBOSE 0
  80 // CURRENT STATUS
  81 // This file is under construction.  Search for "FIXME".
  82 
  83 // INVARIANTS/NOTES
  84 //
  85 // All allocation activity covered by the G1CollectedHeap interface is
  86 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  87 // and allocate_new_tlab, which are the "entry" points to the
  88 // allocation code from the rest of the JVM.  (Note that this does not
  89 // apply to TLAB allocation, which is not part of this interface: it
  90 // is done by clients of this interface.)
  91 
  92 // Notes on implementation of parallelism in different tasks.
  93 //
  94 // G1ParVerifyTask uses heap_region_par_iterate() for parallelism.
  95 // The number of GC workers is passed to heap_region_par_iterate().
  96 // It does use run_task() which sets _n_workers in the task.
  97 // G1ParTask executes g1_process_roots() ->
  98 // SharedHeap::process_roots() which calls eventually to
  99 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
 100 // SequentialSubTasksDone.  SharedHeap::process_roots() also
 101 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
 102 //
 103 
 104 // Local to this file.
 105 
 106 class RefineCardTableEntryClosure: public CardTableEntryClosure {
 107   bool _concurrent;
 108 public:
 109   RefineCardTableEntryClosure() : _concurrent(true) { }
 110 
 111   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 112     bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
 113     // This path is executed by the concurrent refine or mutator threads,
 114     // concurrently, and so we do not care if card_ptr contains references
 115     // that point into the collection set.
 116     assert(!oops_into_cset, "should be");
 117 
 118     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 119       // Caller will actually yield.
 120       return false;
 121     }
 122     // Otherwise, we finished successfully; return true.
 123     return true;


1750   _hrm.verify_optional();
1751   verify_region_sets_optional();
1752 }
1753 
1754 // Public methods.
1755 
1756 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1757 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1758 #endif // _MSC_VER
1759 
1760 
1761 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1762   SharedHeap(policy_),
1763   _g1_policy(policy_),
1764   _dirty_card_queue_set(false),
1765   _into_cset_dirty_card_queue_set(false),
1766   _is_alive_closure_cm(this),
1767   _is_alive_closure_stw(this),
1768   _ref_processor_cm(NULL),
1769   _ref_processor_stw(NULL),
1770   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1771   _bot_shared(NULL),
1772   _evac_failure_scan_stack(NULL),
1773   _mark_in_progress(false),
1774   _cg1r(NULL),
1775   _g1mm(NULL),
1776   _refine_cte_cl(NULL),
1777   _full_collection(false),
1778   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1779   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1780   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1781   _humongous_is_live(),
1782   _has_humongous_reclaim_candidates(false),
1783   _free_regions_coming(false),
1784   _young_list(new YoungList(this)),
1785   _gc_time_stamp(0),
1786   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1787   _old_plab_stats(OldPLABSize, PLABWeight),
1788   _expand_heap_after_alloc_failure(true),
1789   _surviving_young_words(NULL),
1790   _old_marking_cycles_started(0),
1791   _old_marking_cycles_completed(0),
1792   _concurrent_cycle_started(false),
1793   _heap_summary_sent(false),
1794   _in_cset_fast_test(),
1795   _dirty_cards_region_list(NULL),
1796   _worker_cset_start_region(NULL),
1797   _worker_cset_start_region_time_stamp(NULL),
1798   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1799   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1800   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1801   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1802 
1803   _g1h = this;
1804   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1805     vm_exit_during_initialization("Failed necessary allocation.");
1806   }
1807 
1808   _allocator = G1Allocator::create_allocator(_g1h);
1809   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1810 
1811   int n_queues = MAX2((int)ParallelGCThreads, 1);
1812   _task_queues = new RefToScanQueueSet(n_queues);
1813 
1814   uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1815   assert(n_rem_sets > 0, "Invariant.");
1816 
1817   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1818   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1819   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1820 
1821   for (int i = 0; i < n_queues; i++) {
1822     RefToScanQueue* q = new RefToScanQueue();
1823     q->initialize();
1824     _task_queues->register_queue(i, q);
1825     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1826   }


3090     }
3091   }
3092 };
3093 
3094 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3095   if (SafepointSynchronize::is_at_safepoint()) {
3096     assert(Thread::current()->is_VM_thread(),
3097            "Expected to be executed serially by the VM thread at this point");
3098 
3099     if (!silent) { gclog_or_tty->print("Roots "); }
3100     VerifyRootsClosure rootsCl(vo);
3101     VerifyKlassClosure klassCl(this, &rootsCl);
3102     CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3103 
3104     // We apply the relevant closures to all the oops in the
3105     // system dictionary, class loader data graph, the string table
3106     // and the nmethods in the code cache.
3107     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3108     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3109 
3110     process_all_roots(true,            // activate StrongRootsScope
3111                       SO_AllCodeCache, // roots scanning options
3112                       &rootsCl,
3113                       &cldCl,
3114                       &blobsCl);

3115 
3116     bool failures = rootsCl.failures() || codeRootsCl.failures();
3117 
3118     if (vo != VerifyOption_G1UseMarkWord) {
3119       // If we're verifying during a full GC then the region sets
3120       // will have been torn down at the start of the GC. Therefore
3121       // verifying the region sets will fail. So we only verify
3122       // the region sets when not in a full GC.
3123       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3124       verify_region_sets();
3125     }
3126 
3127     if (!silent) { gclog_or_tty->print("HeapRegions "); }
3128     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3129 
3130       G1ParVerifyTask task(this, vo);
3131       assert(UseDynamicNumberOfGCThreads ||
3132         workers()->active_workers() == workers()->total_workers(),
3133         "If not dynamic should be using all the workers");
3134       int n_workers = workers()->active_workers();


4343       : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4344   void do_klass(Klass* klass) {
4345     // If the klass has not been dirtied we know that there's
4346     // no references into  the young gen and we can skip it.
4347    if (!_process_only_dirty || klass->has_modified_oops()) {
4348       // Clean the klass since we're going to scavenge all the metadata.
4349       klass->clear_modified_oops();
4350 
4351       // Tell the closure that this klass is the Klass to scavenge
4352       // and is the one to dirty if oops are left pointing into the young gen.
4353       _closure->set_scanned_klass(klass);
4354 
4355       klass->oops_do(_closure);
4356 
4357       _closure->set_scanned_klass(NULL);
4358     }
4359     _count++;
4360   }
4361 };
4362 
4363 class G1CodeBlobClosure : public CodeBlobClosure {
4364   class HeapRegionGatheringOopClosure : public OopClosure {
4365     G1CollectedHeap* _g1h;
4366     OopClosure* _work;
4367     nmethod* _nm;
4368 
4369     template <typename T>
4370     void do_oop_work(T* p) {
4371       _work->do_oop(p);
4372       T oop_or_narrowoop = oopDesc::load_heap_oop(p);
4373       if (!oopDesc::is_null(oop_or_narrowoop)) {
4374         oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
4375         HeapRegion* hr = _g1h->heap_region_containing_raw(o);
4376         assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
4377         hr->add_strong_code_root(_nm);
4378       }
4379     }
4380 
4381   public:
4382     HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
4383 
4384     void do_oop(oop* o) {
4385       do_oop_work(o);
4386     }
4387 
4388     void do_oop(narrowOop* o) {
4389       do_oop_work(o);
4390     }
4391 
4392     void set_nm(nmethod* nm) {
4393       _nm = nm;
4394     }
4395   };
4396 
4397   HeapRegionGatheringOopClosure _oc;
4398 public:
4399   G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
4400 
4401   void do_code_blob(CodeBlob* cb) {
4402     nmethod* nm = cb->as_nmethod_or_null();
4403     if (nm != NULL) {
4404       if (!nm->test_set_oops_do_mark()) {
4405         _oc.set_nm(nm);
4406         nm->oops_do(&_oc);
4407         nm->fix_oop_relocations();
4408       }
4409     }
4410   }
4411 };
4412 
4413 class G1ParTask : public AbstractGangTask {
4414 protected:
4415   G1CollectedHeap*       _g1h;
4416   RefToScanQueueSet      *_queues;

4417   ParallelTaskTerminator _terminator;
4418   uint _n_workers;
4419 
4420   Mutex _stats_lock;
4421   Mutex* stats_lock() { return &_stats_lock; }
4422 
4423 public:
4424   G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
4425     : AbstractGangTask("G1 collection"),
4426       _g1h(g1h),
4427       _queues(task_queues),

4428       _terminator(0, _queues),
4429       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4430   {}
4431 
4432   RefToScanQueueSet* queues() { return _queues; }
4433 
4434   RefToScanQueue *work_queue(int i) {
4435     return queues()->queue(i);
4436   }
4437 
4438   ParallelTaskTerminator* terminator() { return &_terminator; }
4439 
4440   virtual void set_for_termination(int active_workers) {
4441     // This task calls set_n_termination() in par_non_clean_card_iterate_work()
4442     // in the young space (_par_seq_tasks) in the G1 heap
4443     // for SequentialSubTasksDone.
4444     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
4445     // both of which need setting by set_n_termination().
4446     _g1h->SharedHeap::set_n_termination(active_workers);
4447     _g1h->set_n_termination(active_workers);
4448     terminator()->reset_for_reuse(active_workers);
4449     _n_workers = active_workers;
4450   }
4451 
4452   // Helps out with CLD processing.
4453   //
4454   // During InitialMark we need to:
4455   // 1) Scavenge all CLDs for the young GC.
4456   // 2) Mark all objects directly reachable from strong CLDs.
4457   template <G1Mark do_mark_object>
4458   class G1CLDClosure : public CLDClosure {
4459     G1ParCopyClosure<G1BarrierNone,  do_mark_object>* _oop_closure;
4460     G1ParCopyClosure<G1BarrierKlass, do_mark_object>  _oop_in_klass_closure;
4461     G1KlassScanClosure                                _klass_in_cld_closure;
4462     bool                                              _claim;
4463 
4464    public:
4465     G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4466                  bool only_young, bool claim)
4467         : _oop_closure(oop_closure),


4496 
4497       bool only_young = _g1h->g1_policy()->gcs_are_young();
4498 
4499       // Non-IM young GC.
4500       G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
4501       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
4502                                                                                only_young, // Only process dirty klasses.
4503                                                                                false);     // No need to claim CLDs.
4504       // IM young GC.
4505       //    Strong roots closures.
4506       G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, &pss, rp);
4507       G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
4508                                                                                false, // Process all klasses.
4509                                                                                true); // Need to claim CLDs.
4510       //    Weak roots closures.
4511       G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4512       G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4513                                                                                     false, // Process all klasses.
4514                                                                                     true); // Need to claim CLDs.
4515 
4516       G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
4517       G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
4518       // IM Weak code roots are handled later.
4519 
4520       OopClosure* strong_root_cl;
4521       OopClosure* weak_root_cl;
4522       CLDClosure* strong_cld_cl;
4523       CLDClosure* weak_cld_cl;
4524       CodeBlobClosure* strong_code_cl;

4525 
4526       if (_g1h->g1_policy()->during_initial_mark_pause()) {
4527         // We also need to mark copied objects.
4528         strong_root_cl = &scan_mark_root_cl;
4529         strong_cld_cl  = &scan_mark_cld_cl;
4530         strong_code_cl = &scan_mark_code_cl;
4531         if (ClassUnloadingWithConcurrentMark) {
4532           weak_root_cl = &scan_mark_weak_root_cl;
4533           weak_cld_cl  = &scan_mark_weak_cld_cl;

4534         } else {
4535           weak_root_cl = &scan_mark_root_cl;
4536           weak_cld_cl  = &scan_mark_cld_cl;
4537         }
4538       } else {
4539         strong_root_cl = &scan_only_root_cl;
4540         weak_root_cl   = &scan_only_root_cl;
4541         strong_cld_cl  = &scan_only_cld_cl;
4542         weak_cld_cl    = &scan_only_cld_cl;
4543         strong_code_cl = &scan_only_code_cl;
4544       }
4545 
4546 
4547       G1ParPushHeapRSClosure  push_heap_rs_cl(_g1h, &pss);
4548 
4549       pss.start_strong_roots();
4550       _g1h->g1_process_roots(strong_root_cl,

4551                              weak_root_cl,
4552                              &push_heap_rs_cl,
4553                              strong_cld_cl,
4554                              weak_cld_cl,
4555                              strong_code_cl,
4556                              worker_id);
4557 




4558       pss.end_strong_roots();
4559 
4560       {
4561         double start = os::elapsedTime();
4562         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4563         evac.do_void();
4564         double elapsed_sec = os::elapsedTime() - start;
4565         double term_sec = pss.term_time();
4566         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4567         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4568         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, pss.term_attempts());
4569       }
4570       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4571       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4572 
4573       if (PrintTerminationStats) {
4574         MutexLocker x(stats_lock());
4575         pss.print_termination_stats(worker_id);
4576       }
4577 
4578       assert(pss.queue_is_empty(), "should be empty");
4579 
4580       // Close the inner scope so that the ResourceMark and HandleMark
4581       // destructors are executed here and are included as part of the
4582       // "GC Worker Time".
4583     }
4584     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4585   }
4586 };
4587 
4588 // *** Common G1 Evacuation Stuff
4589 
4590 // This method is run in a GC worker.
4591 
4592 void
4593 G1CollectedHeap::
4594 g1_process_roots(OopClosure* scan_non_heap_roots,
4595                  OopClosure* scan_non_heap_weak_roots,
4596                  G1ParPushHeapRSClosure* scan_rs,
4597                  CLDClosure* scan_strong_clds,
4598                  CLDClosure* scan_weak_clds,
4599                  CodeBlobClosure* scan_strong_code,
4600                  uint worker_i) {
4601 
4602   // First scan the shared roots.
4603   double ext_roots_start = os::elapsedTime();
4604   double closure_app_time_sec = 0.0;
4605 
4606   bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
4607   bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
4608 
4609   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4610   BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
4611 
4612   process_roots(false, // no scoping; this is parallel code
4613                 SharedHeap::SO_None,
4614                 &buf_scan_non_heap_roots,
4615                 &buf_scan_non_heap_weak_roots,
4616                 scan_strong_clds,
4617                 // Unloading Initial Marks handle the weak CLDs separately.
4618                 (trace_metadata ? NULL : scan_weak_clds),
4619                 scan_strong_code);
4620 
4621   // Now the CM ref_processor roots.
4622   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4623     // We need to treat the discovered reference lists of the
4624     // concurrent mark ref processor as roots and keep entries
4625     // (which are added by the marking threads) on them live
4626     // until they can be processed at the end of marking.
4627     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4628   }
4629 
4630   if (trace_metadata) {
4631     // Barrier to make sure all workers passed
4632     // the strong CLD and strong nmethods phases.
4633     active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
4634 
4635     // Now take the complement of the strong CLDs.
4636     ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
4637   }
4638 
4639   // Finish up any enqueued closure apps (attributed as object copy time).
4640   buf_scan_non_heap_roots.done();
4641   buf_scan_non_heap_weak_roots.done();
4642 
4643   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
4644       + buf_scan_non_heap_weak_roots.closure_app_seconds();
4645 
4646   g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);
4647 
4648   double ext_root_time_sec = os::elapsedTime() - ext_roots_start - obj_copy_time_sec;
4649   g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::ExtRootScan, worker_i, ext_root_time_sec);
4650 
4651   // During conc marking we have to filter the per-thread SATB buffers
4652   // to make sure we remove any oops into the CSet (which will show up
4653   // as implicitly live).
4654   {
4655     G1GCParPhaseTimesTracker x(g1_policy()->phase_times(), G1GCPhaseTimes::SATBFiltering, worker_i);
4656     if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers) && mark_in_progress()) {
4657       JavaThread::satb_mark_queue_set().filter_thread_buffers();
4658     }
4659   }
4660 
4661   // Now scan the complement of the collection set.
4662   G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
4663 
4664   g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4665 
4666   _process_strong_tasks->all_tasks_completed();
4667 }
4668 
4669 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4670 private:
4671   BoolObjectClosure* _is_alive;
4672   int _initial_string_table_size;
4673   int _initial_symbol_table_size;
4674 
4675   bool  _process_strings;
4676   int _strings_processed;
4677   int _strings_removed;
4678 
4679   bool  _process_symbols;
4680   int _symbols_processed;
4681   int _symbols_removed;
4682 
4683 public:
4684   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
4685     AbstractGangTask("String/Symbol Unlinking"),
4686     _is_alive(is_alive),
4687     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
4688     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {


5595   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5596 
5597   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5598 
5599   // Disable the hot card cache.
5600   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5601   hot_card_cache->reset_hot_cache_claimed_index();
5602   hot_card_cache->set_use_cache(false);
5603 
5604   uint n_workers;
5605   n_workers =
5606     AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5607                                    workers()->active_workers(),
5608                                    Threads::number_of_non_daemon_threads());
5609   assert(UseDynamicNumberOfGCThreads ||
5610          n_workers == workers()->total_workers(),
5611          "If not dynamic should be using all the  workers");
5612   workers()->set_active_workers(n_workers);
5613   set_par_threads(n_workers);
5614 
5615   G1ParTask g1_par_task(this, _task_queues);
5616 
5617   init_for_evac_failure(NULL);
5618 
5619   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5620   double start_par_time_sec = os::elapsedTime();
5621   double end_par_time_sec;
5622 
5623   {
5624     StrongRootsScope srs(this);

5625     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5626     if (g1_policy()->during_initial_mark_pause()) {
5627       ClassLoaderDataGraph::clear_claimed_marks();
5628     }
5629 
5630      // The individual threads will set their evac-failure closures.
5631      if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5632      // These tasks use ShareHeap::_process_strong_tasks
5633      assert(UseDynamicNumberOfGCThreads ||
5634             workers()->active_workers() == workers()->total_workers(),
5635             "If not dynamic should be using all the  workers");
5636     workers()->run_task(&g1_par_task);
5637     end_par_time_sec = os::elapsedTime();
5638 
5639     // Closing the inner scope will execute the destructor
5640     // for the StrongRootsScope object. We record the current
5641     // elapsed time before closing the scope so that time
5642     // taken for the SRS destructor is NOT included in the
5643     // reported parallel time.
5644   }
5645 
5646   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5647 
5648   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5649   phase_times->record_par_time(par_time_ms);
5650 
5651   double code_root_fixup_time_ms =
5652         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5653   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5654 
5655   set_par_threads(0);
5656 
5657   // Process any discovered reference objects - we have
5658   // to do this _before_ we retire the GC alloc regions
5659   // as we may have to copy some 'reachable' referent
5660   // objects (and their reachable sub-graphs) that were
5661   // not copied during the pause.
5662   process_discovered_references(n_workers);




  31 #include "classfile/metadataOnStackMark.hpp"
  32 #include "classfile/stringTable.hpp"
  33 #include "code/codeCache.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  36 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  37 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  38 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  39 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  40 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  41 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  42 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  43 #include "gc_implementation/g1/g1EvacFailure.hpp"
  44 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  45 #include "gc_implementation/g1/g1Log.hpp"
  46 #include "gc_implementation/g1/g1MarkSweep.hpp"
  47 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  48 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
  49 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
  50 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  51 #include "gc_implementation/g1/g1RootProcessor.hpp"
  52 #include "gc_implementation/g1/g1StringDedup.hpp"
  53 #include "gc_implementation/g1/g1YCTypes.hpp"
  54 #include "gc_implementation/g1/heapRegion.inline.hpp"
  55 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  56 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  57 #include "gc_implementation/g1/vm_operations_g1.hpp"
  58 #include "gc_implementation/shared/gcHeapSummary.hpp"
  59 #include "gc_implementation/shared/gcTimer.hpp"
  60 #include "gc_implementation/shared/gcTrace.hpp"
  61 #include "gc_implementation/shared/gcTraceTime.hpp"
  62 #include "gc_implementation/shared/isGCActiveMark.hpp"
  63 #include "memory/allocation.hpp"
  64 #include "memory/gcLocker.inline.hpp"
  65 #include "memory/generationSpec.hpp"
  66 #include "memory/iterator.hpp"
  67 #include "memory/referenceProcessor.hpp"
  68 #include "oops/oop.inline.hpp"
  69 #include "oops/oop.pcgc.inline.hpp"
  70 #include "runtime/atomic.inline.hpp"
  71 #include "runtime/orderAccess.inline.hpp"


  73 #include "utilities/globalDefinitions.hpp"
  74 
  75 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  76 
  77 // turn it on so that the contents of the young list (scan-only /
  78 // to-be-collected) are printed at "strategic" points before / during
  79 // / after the collection --- this is useful for debugging
  80 #define YOUNG_LIST_VERBOSE 0
  81 // CURRENT STATUS
  82 // This file is under construction.  Search for "FIXME".
  83 
  84 // INVARIANTS/NOTES
  85 //
  86 // All allocation activity covered by the G1CollectedHeap interface is
  87 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  88 // and allocate_new_tlab, which are the "entry" points to the
  89 // allocation code from the rest of the JVM.  (Note that this does not
  90 // apply to TLAB allocation, which is not part of this interface: it
  91 // is done by clients of this interface.)
  92 












  93 // Local to this file.
  94 
  95 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  96   bool _concurrent;
  97 public:
  98   RefineCardTableEntryClosure() : _concurrent(true) { }
  99 
 100   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 101     bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
 102     // This path is executed by the concurrent refine or mutator threads,
 103     // concurrently, and so we do not care if card_ptr contains references
 104     // that point into the collection set.
 105     assert(!oops_into_cset, "should be");
 106 
 107     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 108       // Caller will actually yield.
 109       return false;
 110     }
 111     // Otherwise, we finished successfully; return true.
 112     return true;


1739   _hrm.verify_optional();
1740   verify_region_sets_optional();
1741 }
1742 
1743 // Public methods.
1744 
1745 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1746 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1747 #endif // _MSC_VER
1748 
1749 
1750 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1751   SharedHeap(policy_),
1752   _g1_policy(policy_),
1753   _dirty_card_queue_set(false),
1754   _into_cset_dirty_card_queue_set(false),
1755   _is_alive_closure_cm(this),
1756   _is_alive_closure_stw(this),
1757   _ref_processor_cm(NULL),
1758   _ref_processor_stw(NULL),

1759   _bot_shared(NULL),
1760   _evac_failure_scan_stack(NULL),
1761   _mark_in_progress(false),
1762   _cg1r(NULL),
1763   _g1mm(NULL),
1764   _refine_cte_cl(NULL),
1765   _full_collection(false),
1766   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1767   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1768   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1769   _humongous_is_live(),
1770   _has_humongous_reclaim_candidates(false),
1771   _free_regions_coming(false),
1772   _young_list(new YoungList(this)),
1773   _gc_time_stamp(0),
1774   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1775   _old_plab_stats(OldPLABSize, PLABWeight),
1776   _expand_heap_after_alloc_failure(true),
1777   _surviving_young_words(NULL),
1778   _old_marking_cycles_started(0),
1779   _old_marking_cycles_completed(0),
1780   _concurrent_cycle_started(false),
1781   _heap_summary_sent(false),
1782   _in_cset_fast_test(),
1783   _dirty_cards_region_list(NULL),
1784   _worker_cset_start_region(NULL),
1785   _worker_cset_start_region_time_stamp(NULL),
1786   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1787   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1788   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1789   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1790 
1791   _g1h = this;



1792 
1793   _allocator = G1Allocator::create_allocator(_g1h);
1794   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1795 
1796   int n_queues = MAX2((int)ParallelGCThreads, 1);
1797   _task_queues = new RefToScanQueueSet(n_queues);
1798 
1799   uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1800   assert(n_rem_sets > 0, "Invariant.");
1801 
1802   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1803   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1804   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1805 
1806   for (int i = 0; i < n_queues; i++) {
1807     RefToScanQueue* q = new RefToScanQueue();
1808     q->initialize();
1809     _task_queues->register_queue(i, q);
1810     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1811   }


3075     }
3076   }
3077 };
3078 
3079 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3080   if (SafepointSynchronize::is_at_safepoint()) {
3081     assert(Thread::current()->is_VM_thread(),
3082            "Expected to be executed serially by the VM thread at this point");
3083 
3084     if (!silent) { gclog_or_tty->print("Roots "); }
3085     VerifyRootsClosure rootsCl(vo);
3086     VerifyKlassClosure klassCl(this, &rootsCl);
3087     CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3088 
3089     // We apply the relevant closures to all the oops in the
3090     // system dictionary, class loader data graph, the string table
3091     // and the nmethods in the code cache.
3092     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3093     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3094 
3095     {
3096       G1RootProcessor root_processor(this);
3097       root_processor.process_all_roots(&rootsCl,
3098                                        &cldCl,
3099                                        &blobsCl);
3100     }
3101 
3102     bool failures = rootsCl.failures() || codeRootsCl.failures();
3103 
3104     if (vo != VerifyOption_G1UseMarkWord) {
3105       // If we're verifying during a full GC then the region sets
3106       // will have been torn down at the start of the GC. Therefore
3107       // verifying the region sets will fail. So we only verify
3108       // the region sets when not in a full GC.
3109       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3110       verify_region_sets();
3111     }
3112 
3113     if (!silent) { gclog_or_tty->print("HeapRegions "); }
3114     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3115 
3116       G1ParVerifyTask task(this, vo);
3117       assert(UseDynamicNumberOfGCThreads ||
3118         workers()->active_workers() == workers()->total_workers(),
3119         "If not dynamic should be using all the workers");
3120       int n_workers = workers()->active_workers();


4329       : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4330   void do_klass(Klass* klass) {
4331     // If the klass has not been dirtied we know that there's
4332     // no references into  the young gen and we can skip it.
4333    if (!_process_only_dirty || klass->has_modified_oops()) {
4334       // Clean the klass since we're going to scavenge all the metadata.
4335       klass->clear_modified_oops();
4336 
4337       // Tell the closure that this klass is the Klass to scavenge
4338       // and is the one to dirty if oops are left pointing into the young gen.
4339       _closure->set_scanned_klass(klass);
4340 
4341       klass->oops_do(_closure);
4342 
4343       _closure->set_scanned_klass(NULL);
4344     }
4345     _count++;
4346   }
4347 };
4348 


















































4349 class G1ParTask : public AbstractGangTask {
4350 protected:
4351   G1CollectedHeap*       _g1h;
4352   RefToScanQueueSet      *_queues;
4353   G1RootProcessor*       _root_processor;
4354   ParallelTaskTerminator _terminator;
4355   uint _n_workers;
4356 
4357   Mutex _stats_lock;
4358   Mutex* stats_lock() { return &_stats_lock; }
4359 
4360 public:
4361   G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor)
4362     : AbstractGangTask("G1 collection"),
4363       _g1h(g1h),
4364       _queues(task_queues),
4365       _root_processor(root_processor),
4366       _terminator(0, _queues),
4367       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4368   {}
4369 
4370   RefToScanQueueSet* queues() { return _queues; }
4371 
4372   RefToScanQueue *work_queue(int i) {
4373     return queues()->queue(i);
4374   }
4375 
4376   ParallelTaskTerminator* terminator() { return &_terminator; }
4377 
4378   virtual void set_for_termination(int active_workers) {
4379     _root_processor->set_num_workers(active_workers);






4380     terminator()->reset_for_reuse(active_workers);
4381     _n_workers = active_workers;
4382   }
4383 
4384   // Helps out with CLD processing.
4385   //
4386   // During InitialMark we need to:
4387   // 1) Scavenge all CLDs for the young GC.
4388   // 2) Mark all objects directly reachable from strong CLDs.
4389   template <G1Mark do_mark_object>
4390   class G1CLDClosure : public CLDClosure {
4391     G1ParCopyClosure<G1BarrierNone,  do_mark_object>* _oop_closure;
4392     G1ParCopyClosure<G1BarrierKlass, do_mark_object>  _oop_in_klass_closure;
4393     G1KlassScanClosure                                _klass_in_cld_closure;
4394     bool                                              _claim;
4395 
4396    public:
4397     G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4398                  bool only_young, bool claim)
4399         : _oop_closure(oop_closure),


4428 
4429       bool only_young = _g1h->g1_policy()->gcs_are_young();
4430 
4431       // Non-IM young GC.
4432       G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
4433       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
4434                                                                                only_young, // Only process dirty klasses.
4435                                                                                false);     // No need to claim CLDs.
4436       // IM young GC.
4437       //    Strong roots closures.
4438       G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, &pss, rp);
4439       G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
4440                                                                                false, // Process all klasses.
4441                                                                                true); // Need to claim CLDs.
4442       //    Weak roots closures.
4443       G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4444       G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4445                                                                                     false, // Process all klasses.
4446                                                                                     true); // Need to claim CLDs.
4447 




4448       OopClosure* strong_root_cl;
4449       OopClosure* weak_root_cl;
4450       CLDClosure* strong_cld_cl;
4451       CLDClosure* weak_cld_cl;
4452 
4453       bool trace_metadata = false;
4454 
4455       if (_g1h->g1_policy()->during_initial_mark_pause()) {
4456         // We also need to mark copied objects.
4457         strong_root_cl = &scan_mark_root_cl;
4458         strong_cld_cl  = &scan_mark_cld_cl;

4459         if (ClassUnloadingWithConcurrentMark) {
4460           weak_root_cl = &scan_mark_weak_root_cl;
4461           weak_cld_cl  = &scan_mark_weak_cld_cl;
4462           trace_metadata = true;
4463         } else {
4464           weak_root_cl = &scan_mark_root_cl;
4465           weak_cld_cl  = &scan_mark_cld_cl;
4466         }
4467       } else {
4468         strong_root_cl = &scan_only_root_cl;
4469         weak_root_cl   = &scan_only_root_cl;
4470         strong_cld_cl  = &scan_only_cld_cl;
4471         weak_cld_cl    = &scan_only_cld_cl;

4472       }
4473 



4474       pss.start_strong_roots();
4475 
4476       _root_processor->evacuate_roots(strong_root_cl,
4477                                       weak_root_cl,

4478                                       strong_cld_cl,
4479                                       weak_cld_cl,
4480                                       trace_metadata,
4481                                       worker_id);
4482 
4483       G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4484       _root_processor->scan_remembered_sets(&push_heap_rs_cl,
4485                                            weak_root_cl,
4486                                            worker_id);
4487       pss.end_strong_roots();
4488 
4489       {
4490         double start = os::elapsedTime();
4491         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4492         evac.do_void();
4493         double elapsed_sec = os::elapsedTime() - start;
4494         double term_sec = pss.term_time();
4495         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4496         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4497         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, pss.term_attempts());
4498       }
4499       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4500       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4501 
4502       if (PrintTerminationStats) {
4503         MutexLocker x(stats_lock());
4504         pss.print_termination_stats(worker_id);
4505       }
4506 
4507       assert(pss.queue_is_empty(), "should be empty");
4508 
4509       // Close the inner scope so that the ResourceMark and HandleMark
4510       // destructors are executed here and are included as part of the
4511       // "GC Worker Time".
4512     }
4513     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4514   }
4515 };
4516 

















































































4517 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4518 private:
4519   BoolObjectClosure* _is_alive;
4520   int _initial_string_table_size;
4521   int _initial_symbol_table_size;
4522 
4523   bool  _process_strings;
4524   int _strings_processed;
4525   int _strings_removed;
4526 
4527   bool  _process_symbols;
4528   int _symbols_processed;
4529   int _symbols_removed;
4530 
4531 public:
4532   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
4533     AbstractGangTask("String/Symbol Unlinking"),
4534     _is_alive(is_alive),
4535     _process_strings(process_strings), _strings_processed(0), _strings_removed(0),
4536     _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0) {


5443   NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5444 
5445   g1_rem_set()->prepare_for_oops_into_collection_set_do();
5446 
5447   // Disable the hot card cache.
5448   G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5449   hot_card_cache->reset_hot_cache_claimed_index();
5450   hot_card_cache->set_use_cache(false);
5451 
5452   uint n_workers;
5453   n_workers =
5454     AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5455                                    workers()->active_workers(),
5456                                    Threads::number_of_non_daemon_threads());
5457   assert(UseDynamicNumberOfGCThreads ||
5458          n_workers == workers()->total_workers(),
5459          "If not dynamic should be using all the  workers");
5460   workers()->set_active_workers(n_workers);
5461   set_par_threads(n_workers);
5462 

5463 
5464   init_for_evac_failure(NULL);
5465 
5466   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5467   double start_par_time_sec = os::elapsedTime();
5468   double end_par_time_sec;
5469 
5470   {
5471     G1RootProcessor root_processor(this);
5472     G1ParTask g1_par_task(this, _task_queues, &root_processor);
5473     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5474     if (g1_policy()->during_initial_mark_pause()) {
5475       ClassLoaderDataGraph::clear_claimed_marks();
5476     }
5477 
5478      // The individual threads will set their evac-failure closures.
5479      if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5480      // These tasks use ShareHeap::_process_strong_tasks
5481      assert(UseDynamicNumberOfGCThreads ||
5482             workers()->active_workers() == workers()->total_workers(),
5483             "If not dynamic should be using all the  workers");
5484     workers()->run_task(&g1_par_task);
5485     end_par_time_sec = os::elapsedTime();
5486 
5487     // Closing the inner scope will execute the destructor
5488     // for the G1RootProcessor object. We record the current
5489     // elapsed time before closing the scope so that time
5490     // taken for the destructor is NOT included in the
5491     // reported parallel time.
5492   }
5493 
5494   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5495 
5496   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5497   phase_times->record_par_time(par_time_ms);
5498 
5499   double code_root_fixup_time_ms =
5500         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5501   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5502 
5503   set_par_threads(0);
5504 
5505   // Process any discovered reference objects - we have
5506   // to do this _before_ we retire the GC alloc regions
5507   // as we may have to copy some 'reachable' referent
5508   // objects (and their reachable sub-graphs) that were
5509   // not copied during the pause.
5510   process_discovered_references(n_workers);


< prev index next >