< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page




  29 #include "precompiled.hpp"
  30 #include "classfile/metadataOnStackMark.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  34 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  35 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  36 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  37 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  38 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  39 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  40 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  41 #include "gc_implementation/g1/g1EvacFailure.hpp"
  42 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  43 #include "gc_implementation/g1/g1Log.hpp"
  44 #include "gc_implementation/g1/g1MarkSweep.hpp"
  45 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  46 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
  47 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
  48 #include "gc_implementation/g1/g1RemSet.inline.hpp"

  49 #include "gc_implementation/g1/g1StringDedup.hpp"
  50 #include "gc_implementation/g1/g1YCTypes.hpp"
  51 #include "gc_implementation/g1/heapRegion.inline.hpp"
  52 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  53 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  54 #include "gc_implementation/g1/vm_operations_g1.hpp"
  55 #include "gc_implementation/shared/gcHeapSummary.hpp"
  56 #include "gc_implementation/shared/gcTimer.hpp"
  57 #include "gc_implementation/shared/gcTrace.hpp"
  58 #include "gc_implementation/shared/gcTraceTime.hpp"
  59 #include "gc_implementation/shared/isGCActiveMark.hpp"
  60 #include "memory/allocation.hpp"
  61 #include "memory/gcLocker.inline.hpp"
  62 #include "memory/generationSpec.hpp"
  63 #include "memory/iterator.hpp"
  64 #include "memory/referenceProcessor.hpp"
  65 #include "oops/oop.inline.hpp"
  66 #include "oops/oop.pcgc.inline.hpp"
  67 #include "runtime/orderAccess.inline.hpp"
  68 #include "runtime/vmThread.hpp"
  69 
  70 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  71 
  72 // turn it on so that the contents of the young list (scan-only /
  73 // to-be-collected) are printed at "strategic" points before / during
  74 // / after the collection --- this is useful for debugging
  75 #define YOUNG_LIST_VERBOSE 0
  76 // CURRENT STATUS
  77 // This file is under construction.  Search for "FIXME".
  78 
  79 // INVARIANTS/NOTES
  80 //
  81 // All allocation activity covered by the G1CollectedHeap interface is
  82 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  83 // and allocate_new_tlab, which are the "entry" points to the
  84 // allocation code from the rest of the JVM.  (Note that this does not
  85 // apply to TLAB allocation, which is not part of this interface: it
  86 // is done by clients of this interface.)
  87 
  88 // Notes on implementation of parallelism in different tasks.
  89 //
  90 // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism.
  91 // The number of GC workers is passed to heap_region_par_iterate_chunked().
  92 // It does use run_task() which sets _n_workers in the task.
  93 // G1ParTask executes g1_process_roots() ->
  94 // SharedHeap::process_roots() which calls eventually to
  95 // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
  96 // SequentialSubTasksDone.  SharedHeap::process_roots() also
  97 // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
  98 //
  99 
 100 // Local to this file.
 101 
 102 class RefineCardTableEntryClosure: public CardTableEntryClosure {
 103   bool _concurrent;
 104 public:
 105   RefineCardTableEntryClosure() : _concurrent(true) { }
 106 
 107   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 108     bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
 109     // This path is executed by the concurrent refine or mutator threads,
 110     // concurrently, and so we do not care if card_ptr contains references
 111     // that point into the collection set.
 112     assert(!oops_into_cset, "should be");
 113 
 114     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 115       // Caller will actually yield.
 116       return false;
 117     }
 118     // Otherwise, we finished successfully; return true.
 119     return true;


1837   _hrm.verify_optional();
1838   verify_region_sets_optional();
1839 }
1840 
1841 // Public methods.
1842 
1843 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1844 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1845 #endif // _MSC_VER
1846 
1847 
1848 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1849   SharedHeap(policy_),
1850   _g1_policy(policy_),
1851   _dirty_card_queue_set(false),
1852   _into_cset_dirty_card_queue_set(false),
1853   _is_alive_closure_cm(this),
1854   _is_alive_closure_stw(this),
1855   _ref_processor_cm(NULL),
1856   _ref_processor_stw(NULL),
1857   _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1858   _bot_shared(NULL),
1859   _evac_failure_scan_stack(NULL),
1860   _mark_in_progress(false),
1861   _cg1r(NULL),
1862   _g1mm(NULL),
1863   _refine_cte_cl(NULL),
1864   _full_collection(false),
1865   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1866   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1867   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1868   _humongous_is_live(),
1869   _has_humongous_reclaim_candidates(false),
1870   _free_regions_coming(false),
1871   _young_list(new YoungList(this)),
1872   _gc_time_stamp(0),
1873   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1874   _old_plab_stats(OldPLABSize, PLABWeight),
1875   _expand_heap_after_alloc_failure(true),
1876   _surviving_young_words(NULL),
1877   _old_marking_cycles_started(0),
1878   _old_marking_cycles_completed(0),
1879   _concurrent_cycle_started(false),
1880   _in_cset_fast_test(),
1881   _dirty_cards_region_list(NULL),
1882   _worker_cset_start_region(NULL),
1883   _worker_cset_start_region_time_stamp(NULL),
1884   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1885   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1886   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1887   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1888 
1889   _g1h = this;
1890   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1891     vm_exit_during_initialization("Failed necessary allocation.");
1892   }
1893 
1894   _allocator = G1Allocator::create_allocator(_g1h);
1895   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1896 
1897   int n_queues = MAX2((int)ParallelGCThreads, 1);
1898   _task_queues = new RefToScanQueueSet(n_queues);
1899 
1900   uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1901   assert(n_rem_sets > 0, "Invariant.");
1902 
1903   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1904   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
1905   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1906 
1907   for (int i = 0; i < n_queues; i++) {
1908     RefToScanQueue* q = new RefToScanQueue();
1909     q->initialize();
1910     _task_queues->register_queue(i, q);
1911     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1912   }


3274     }
3275   }
3276 };
3277 
3278 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3279   if (SafepointSynchronize::is_at_safepoint()) {
3280     assert(Thread::current()->is_VM_thread(),
3281            "Expected to be executed serially by the VM thread at this point");
3282 
3283     if (!silent) { gclog_or_tty->print("Roots "); }
3284     VerifyRootsClosure rootsCl(vo);
3285     VerifyKlassClosure klassCl(this, &rootsCl);
3286     CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3287 
3288     // We apply the relevant closures to all the oops in the
3289     // system dictionary, class loader data graph, the string table
3290     // and the nmethods in the code cache.
3291     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3292     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3293 
3294     process_all_roots(true,            // activate StrongRootsScope
3295                       SO_AllCodeCache, // roots scanning options
3296                       &rootsCl,
3297                       &cldCl,
3298                       &blobsCl);

3299 
3300     bool failures = rootsCl.failures() || codeRootsCl.failures();
3301 
3302     if (vo != VerifyOption_G1UseMarkWord) {
3303       // If we're verifying during a full GC then the region sets
3304       // will have been torn down at the start of the GC. Therefore
3305       // verifying the region sets will fail. So we only verify
3306       // the region sets when not in a full GC.
3307       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3308       verify_region_sets();
3309     }
3310 
3311     if (!silent) { gclog_or_tty->print("HeapRegions "); }
3312     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3313       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3314              "sanity check");
3315 
3316       G1ParVerifyTask task(this, vo);
3317       assert(UseDynamicNumberOfGCThreads ||
3318         workers()->active_workers() == workers()->total_workers(),


4565       : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4566   void do_klass(Klass* klass) {
4567     // If the klass has not been dirtied we know that there's
4568     // no references into  the young gen and we can skip it.
4569    if (!_process_only_dirty || klass->has_modified_oops()) {
4570       // Clean the klass since we're going to scavenge all the metadata.
4571       klass->clear_modified_oops();
4572 
4573       // Tell the closure that this klass is the Klass to scavenge
4574       // and is the one to dirty if oops are left pointing into the young gen.
4575       _closure->set_scanned_klass(klass);
4576 
4577       klass->oops_do(_closure);
4578 
4579       _closure->set_scanned_klass(NULL);
4580     }
4581     _count++;
4582   }
4583 };
4584 
4585 class G1CodeBlobClosure : public CodeBlobClosure {
4586   class HeapRegionGatheringOopClosure : public OopClosure {
4587     G1CollectedHeap* _g1h;
4588     OopClosure* _work;
4589     nmethod* _nm;
4590 
4591     template <typename T>
4592     void do_oop_work(T* p) {
4593       _work->do_oop(p);
4594       T oop_or_narrowoop = oopDesc::load_heap_oop(p);
4595       if (!oopDesc::is_null(oop_or_narrowoop)) {
4596         oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
4597         HeapRegion* hr = _g1h->heap_region_containing_raw(o);
4598         assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
4599         hr->add_strong_code_root(_nm);
4600       }
4601     }
4602 
4603   public:
4604     HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
4605 
4606     void do_oop(oop* o) {
4607       do_oop_work(o);
4608     }
4609 
4610     void do_oop(narrowOop* o) {
4611       do_oop_work(o);
4612     }
4613 
4614     void set_nm(nmethod* nm) {
4615       _nm = nm;
4616     }
4617   };
4618 
4619   HeapRegionGatheringOopClosure _oc;
4620 public:
4621   G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
4622 
4623   void do_code_blob(CodeBlob* cb) {
4624     nmethod* nm = cb->as_nmethod_or_null();
4625     if (nm != NULL) {
4626       if (!nm->test_set_oops_do_mark()) {
4627         _oc.set_nm(nm);
4628         nm->oops_do(&_oc);
4629         nm->fix_oop_relocations();
4630       }
4631     }
4632   }
4633 };
4634 
4635 class G1ParTask : public AbstractGangTask {
4636 protected:
4637   G1CollectedHeap*       _g1h;
4638   RefToScanQueueSet      *_queues;

4639   ParallelTaskTerminator _terminator;
4640   uint _n_workers;
4641 
4642   Mutex _stats_lock;
4643   Mutex* stats_lock() { return &_stats_lock; }
4644 
4645 public:
4646   G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
4647     : AbstractGangTask("G1 collection"),
4648       _g1h(g1h),
4649       _queues(task_queues),

4650       _terminator(0, _queues),
4651       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4652   {}
4653 
4654   RefToScanQueueSet* queues() { return _queues; }
4655 
4656   RefToScanQueue *work_queue(int i) {
4657     return queues()->queue(i);
4658   }
4659 
4660   ParallelTaskTerminator* terminator() { return &_terminator; }
4661 
4662   virtual void set_for_termination(int active_workers) {
4663     // This task calls set_n_termination() in par_non_clean_card_iterate_work()
4664     // in the young space (_par_seq_tasks) in the G1 heap
4665     // for SequentialSubTasksDone.
4666     // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
4667     // both of which need setting by set_n_termination().
4668     _g1h->SharedHeap::set_n_termination(active_workers);
4669     _g1h->set_n_termination(active_workers);
4670     terminator()->reset_for_reuse(active_workers);
4671     _n_workers = active_workers;
4672   }
4673 
4674   // Helps out with CLD processing.
4675   //
4676   // During InitialMark we need to:
4677   // 1) Scavenge all CLDs for the young GC.
4678   // 2) Mark all objects directly reachable from strong CLDs.
4679   template <G1Mark do_mark_object>
4680   class G1CLDClosure : public CLDClosure {
4681     G1ParCopyClosure<G1BarrierNone,  do_mark_object>* _oop_closure;
4682     G1ParCopyClosure<G1BarrierKlass, do_mark_object>  _oop_in_klass_closure;
4683     G1KlassScanClosure                                _klass_in_cld_closure;
4684     bool                                              _claim;
4685 
4686    public:
4687     G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4688                  bool only_young, bool claim)
4689         : _oop_closure(oop_closure),


4718 
4719       bool only_young = _g1h->g1_policy()->gcs_are_young();
4720 
4721       // Non-IM young GC.
4722       G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
4723       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
4724                                                                                only_young, // Only process dirty klasses.
4725                                                                                false);     // No need to claim CLDs.
4726       // IM young GC.
4727       //    Strong roots closures.
4728       G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, &pss, rp);
4729       G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
4730                                                                                false, // Process all klasses.
4731                                                                                true); // Need to claim CLDs.
4732       //    Weak roots closures.
4733       G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4734       G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4735                                                                                     false, // Process all klasses.
4736                                                                                     true); // Need to claim CLDs.
4737 
4738       G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
4739       G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
4740       // IM Weak code roots are handled later.
4741 
4742       OopClosure* strong_root_cl;
4743       OopClosure* weak_root_cl;
4744       CLDClosure* strong_cld_cl;
4745       CLDClosure* weak_cld_cl;
4746       CodeBlobClosure* strong_code_cl;

4747 
4748       if (_g1h->g1_policy()->during_initial_mark_pause()) {
4749         // We also need to mark copied objects.
4750         strong_root_cl = &scan_mark_root_cl;
4751         strong_cld_cl  = &scan_mark_cld_cl;
4752         strong_code_cl = &scan_mark_code_cl;
4753         if (ClassUnloadingWithConcurrentMark) {
4754           weak_root_cl = &scan_mark_weak_root_cl;
4755           weak_cld_cl  = &scan_mark_weak_cld_cl;

4756         } else {
4757           weak_root_cl = &scan_mark_root_cl;
4758           weak_cld_cl  = &scan_mark_cld_cl;
4759         }
4760       } else {
4761         strong_root_cl = &scan_only_root_cl;
4762         weak_root_cl   = &scan_only_root_cl;
4763         strong_cld_cl  = &scan_only_cld_cl;
4764         weak_cld_cl    = &scan_only_cld_cl;
4765         strong_code_cl = &scan_only_code_cl;
4766       }
4767 
4768 
4769       G1ParPushHeapRSClosure  push_heap_rs_cl(_g1h, &pss);
4770 
4771       pss.start_strong_roots();
4772       _g1h->g1_process_roots(strong_root_cl,

4773                              weak_root_cl,
4774                              &push_heap_rs_cl,
4775                              strong_cld_cl,
4776                              weak_cld_cl,
4777                              strong_code_cl,
4778                              worker_id);
4779 




4780       pss.end_strong_roots();
4781 
4782       {
4783         double start = os::elapsedTime();
4784         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4785         evac.do_void();
4786         double elapsed_sec = os::elapsedTime() - start;
4787         double term_sec = pss.term_time();
4788         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4789         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4790         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, pss.term_attempts());
4791       }
4792       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4793       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4794 
4795       if (ParallelGCVerbose) {
4796         MutexLocker x(stats_lock());
4797         pss.print_termination_stats(worker_id);
4798       }
4799 
4800       assert(pss.queue_is_empty(), "should be empty");
4801 
4802       // Close the inner scope so that the ResourceMark and HandleMark
4803       // destructors are executed here and are included as part of the
4804       // "GC Worker Time".
4805     }
4806     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4807   }
4808 };
4809 
4810 // *** Common G1 Evacuation Stuff
4811 
4812 // This method is run in a GC worker.
4813 
4814 void
4815 G1CollectedHeap::
4816 g1_process_roots(OopClosure* scan_non_heap_roots,
4817                  OopClosure* scan_non_heap_weak_roots,
4818                  OopsInHeapRegionClosure* scan_rs,
4819                  CLDClosure* scan_strong_clds,
4820                  CLDClosure* scan_weak_clds,
4821                  CodeBlobClosure* scan_strong_code,
4822                  uint worker_i) {
4823 
4824   // First scan the shared roots.
4825   double ext_roots_start = os::elapsedTime();
4826   double closure_app_time_sec = 0.0;
4827 
4828   bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
4829   bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
4830 
4831   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4832   BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
4833 
4834   process_roots(false, // no scoping; this is parallel code
4835                 SharedHeap::SO_None,
4836                 &buf_scan_non_heap_roots,
4837                 &buf_scan_non_heap_weak_roots,
4838                 scan_strong_clds,
4839                 // Unloading Initial Marks handle the weak CLDs separately.
4840                 (trace_metadata ? NULL : scan_weak_clds),
4841                 scan_strong_code);
4842 
4843   // Now the CM ref_processor roots.
4844   if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4845     // We need to treat the discovered reference lists of the
4846     // concurrent mark ref processor as roots and keep entries
4847     // (which are added by the marking threads) on them live
4848     // until they can be processed at the end of marking.
4849     ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4850   }
4851 
4852   if (trace_metadata) {
4853     // Barrier to make sure all workers passed
4854     // the strong CLD and strong nmethods phases.
4855     active_strong_roots_scope()->wait_until_all_workers_done_with_threads(n_par_threads());
4856 
4857     // Now take the complement of the strong CLDs.
4858     ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
4859   }
4860 
4861   // Finish up any enqueued closure apps (attributed as object copy time).
4862   buf_scan_non_heap_roots.done();
4863   buf_scan_non_heap_weak_roots.done();
4864 
4865   double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
4866       + buf_scan_non_heap_weak_roots.closure_app_seconds();
4867 
4868   g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);
4869 
4870   double ext_root_time_sec = os::elapsedTime() - ext_roots_start - obj_copy_time_sec;
4871   g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::ExtRootScan, worker_i, ext_root_time_sec);
4872 
4873   // During conc marking we have to filter the per-thread SATB buffers
4874   // to make sure we remove any oops into the CSet (which will show up
4875   // as implicitly live).
4876   {
4877     G1GCParPhaseTimesTracker x(g1_policy()->phase_times(), G1GCPhaseTimes::SATBFiltering, worker_i);
4878     if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers) && mark_in_progress()) {
4879       JavaThread::satb_mark_queue_set().filter_thread_buffers();
4880     }
4881   }
4882 
4883   // Now scan the complement of the collection set.
4884   G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
4885 
4886   g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
4887 
4888   _process_strong_tasks->all_tasks_completed();
4889 }
4890 
4891 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4892 private:
4893   BoolObjectClosure* _is_alive;
4894   int _initial_string_table_size;
4895   int _initial_symbol_table_size;
4896 
4897   bool  _process_strings;
4898   int _strings_processed;
4899   int _strings_removed;
4900 
4901   bool  _process_symbols;
4902   int _symbols_processed;
4903   int _symbols_removed;
4904 
4905   bool _do_in_parallel;
4906 public:
4907   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
4908     AbstractGangTask("String/Symbol Unlinking"),
4909     _is_alive(is_alive),
4910     _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),


5858   hot_card_cache->reset_hot_cache_claimed_index();
5859   hot_card_cache->set_use_cache(false);
5860 
5861   uint n_workers;
5862   if (G1CollectedHeap::use_parallel_gc_threads()) {
5863     n_workers =
5864       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5865                                      workers()->active_workers(),
5866                                      Threads::number_of_non_daemon_threads());
5867     assert(UseDynamicNumberOfGCThreads ||
5868            n_workers == workers()->total_workers(),
5869            "If not dynamic should be using all the  workers");
5870     workers()->set_active_workers(n_workers);
5871     set_par_threads(n_workers);
5872   } else {
5873     assert(n_par_threads() == 0,
5874            "Should be the original non-parallel value");
5875     n_workers = 1;
5876   }
5877 
5878   G1ParTask g1_par_task(this, _task_queues);
5879 
5880   init_for_evac_failure(NULL);
5881 
5882   rem_set()->prepare_for_younger_refs_iterate(true);
5883 
5884   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5885   double start_par_time_sec = os::elapsedTime();
5886   double end_par_time_sec;
5887 
5888   {
5889     StrongRootsScope srs(this);

5890     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5891     if (g1_policy()->during_initial_mark_pause()) {
5892       ClassLoaderDataGraph::clear_claimed_marks();
5893     }
5894 
5895     if (G1CollectedHeap::use_parallel_gc_threads()) {
5896       // The individual threads will set their evac-failure closures.
5897       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5898       // These tasks use ShareHeap::_process_strong_tasks
5899       assert(UseDynamicNumberOfGCThreads ||
5900              workers()->active_workers() == workers()->total_workers(),
5901              "If not dynamic should be using all the  workers");
5902       workers()->run_task(&g1_par_task);
5903     } else {
5904       g1_par_task.set_for_termination(n_workers);
5905       g1_par_task.work(0);
5906     }
5907     end_par_time_sec = os::elapsedTime();
5908 
5909     // Closing the inner scope will execute the destructor
5910     // for the StrongRootsScope object. We record the current
5911     // elapsed time before closing the scope so that time
5912     // taken for the SRS destructor is NOT included in the
5913     // reported parallel time.
5914   }
5915 
5916   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5917 
5918   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5919   phase_times->record_par_time(par_time_ms);
5920 
5921   double code_root_fixup_time_ms =
5922         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5923   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5924 
5925   set_par_threads(0);
5926 
5927   // Process any discovered reference objects - we have
5928   // to do this _before_ we retire the GC alloc regions
5929   // as we may have to copy some 'reachable' referent
5930   // objects (and their reachable sub-graphs) that were
5931   // not copied during the pause.
5932   process_discovered_references(n_workers);




  29 #include "precompiled.hpp"
  30 #include "classfile/metadataOnStackMark.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  34 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  35 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  36 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  37 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  38 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  39 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  40 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  41 #include "gc_implementation/g1/g1EvacFailure.hpp"
  42 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  43 #include "gc_implementation/g1/g1Log.hpp"
  44 #include "gc_implementation/g1/g1MarkSweep.hpp"
  45 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  46 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
  47 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
  48 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  49 #include "gc_implementation/g1/g1RootProcessor.hpp"
  50 #include "gc_implementation/g1/g1StringDedup.hpp"
  51 #include "gc_implementation/g1/g1YCTypes.hpp"
  52 #include "gc_implementation/g1/heapRegion.inline.hpp"
  53 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  54 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  55 #include "gc_implementation/g1/vm_operations_g1.hpp"
  56 #include "gc_implementation/shared/gcHeapSummary.hpp"
  57 #include "gc_implementation/shared/gcTimer.hpp"
  58 #include "gc_implementation/shared/gcTrace.hpp"
  59 #include "gc_implementation/shared/gcTraceTime.hpp"
  60 #include "gc_implementation/shared/isGCActiveMark.hpp"
  61 #include "memory/allocation.hpp"
  62 #include "memory/gcLocker.inline.hpp"
  63 #include "memory/generationSpec.hpp"
  64 #include "memory/iterator.hpp"
  65 #include "memory/referenceProcessor.hpp"
  66 #include "oops/oop.inline.hpp"
  67 #include "oops/oop.pcgc.inline.hpp"
  68 #include "runtime/orderAccess.inline.hpp"
  69 #include "runtime/vmThread.hpp"
  70 
  71 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  72 
  73 // turn it on so that the contents of the young list (scan-only /
  74 // to-be-collected) are printed at "strategic" points before / during
  75 // / after the collection --- this is useful for debugging
  76 #define YOUNG_LIST_VERBOSE 0
  77 // CURRENT STATUS
  78 // This file is under construction.  Search for "FIXME".
  79 
  80 // INVARIANTS/NOTES
  81 //
  82 // All allocation activity covered by the G1CollectedHeap interface is
  83 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  84 // and allocate_new_tlab, which are the "entry" points to the
  85 // allocation code from the rest of the JVM.  (Note that this does not
  86 // apply to TLAB allocation, which is not part of this interface: it
  87 // is done by clients of this interface.)
  88 












  89 // Local to this file.
  90 
  91 class RefineCardTableEntryClosure: public CardTableEntryClosure {
  92   bool _concurrent;
  93 public:
  94   RefineCardTableEntryClosure() : _concurrent(true) { }
  95 
  96   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
  97     bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
  98     // This path is executed by the concurrent refine or mutator threads,
  99     // concurrently, and so we do not care if card_ptr contains references
 100     // that point into the collection set.
 101     assert(!oops_into_cset, "should be");
 102 
 103     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 104       // Caller will actually yield.
 105       return false;
 106     }
 107     // Otherwise, we finished successfully; return true.
 108     return true;


1826   _hrm.verify_optional();
1827   verify_region_sets_optional();
1828 }
1829 
1830 // Public methods.
1831 
1832 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1833 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1834 #endif // _MSC_VER
1835 
1836 
1837 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1838   SharedHeap(policy_),
1839   _g1_policy(policy_),
1840   _dirty_card_queue_set(false),
1841   _into_cset_dirty_card_queue_set(false),
1842   _is_alive_closure_cm(this),
1843   _is_alive_closure_stw(this),
1844   _ref_processor_cm(NULL),
1845   _ref_processor_stw(NULL),

1846   _bot_shared(NULL),
1847   _evac_failure_scan_stack(NULL),
1848   _mark_in_progress(false),
1849   _cg1r(NULL),
1850   _g1mm(NULL),
1851   _refine_cte_cl(NULL),
1852   _full_collection(false),
1853   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1854   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1855   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1856   _humongous_is_live(),
1857   _has_humongous_reclaim_candidates(false),
1858   _free_regions_coming(false),
1859   _young_list(new YoungList(this)),
1860   _gc_time_stamp(0),
1861   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1862   _old_plab_stats(OldPLABSize, PLABWeight),
1863   _expand_heap_after_alloc_failure(true),
1864   _surviving_young_words(NULL),
1865   _old_marking_cycles_started(0),
1866   _old_marking_cycles_completed(0),
1867   _concurrent_cycle_started(false),
1868   _in_cset_fast_test(),
1869   _dirty_cards_region_list(NULL),
1870   _worker_cset_start_region(NULL),
1871   _worker_cset_start_region_time_stamp(NULL),
1872   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1873   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1874   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1875   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1876 
1877   _g1h = this;



1878 
1879   _allocator = G1Allocator::create_allocator(_g1h);
1880   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1881 
1882   int n_queues = MAX2((int)ParallelGCThreads, 1);
1883   _task_queues = new RefToScanQueueSet(n_queues);
1884 
1885   uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1886   assert(n_rem_sets > 0, "Invariant.");
1887 
1888   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1889   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
1890   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1891 
1892   for (int i = 0; i < n_queues; i++) {
1893     RefToScanQueue* q = new RefToScanQueue();
1894     q->initialize();
1895     _task_queues->register_queue(i, q);
1896     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1897   }


3259     }
3260   }
3261 };
3262 
3263 void G1CollectedHeap::verify(bool silent, VerifyOption vo) {
3264   if (SafepointSynchronize::is_at_safepoint()) {
3265     assert(Thread::current()->is_VM_thread(),
3266            "Expected to be executed serially by the VM thread at this point");
3267 
3268     if (!silent) { gclog_or_tty->print("Roots "); }
3269     VerifyRootsClosure rootsCl(vo);
3270     VerifyKlassClosure klassCl(this, &rootsCl);
3271     CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
3272 
3273     // We apply the relevant closures to all the oops in the
3274     // system dictionary, class loader data graph, the string table
3275     // and the nmethods in the code cache.
3276     G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
3277     G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
3278 
3279     {
3280       G1RootProcessor root_processor(this);
3281       root_processor.process_all_roots(&rootsCl,
3282                                        &cldCl,
3283                                        &blobsCl);
3284     }
3285 
3286     bool failures = rootsCl.failures() || codeRootsCl.failures();
3287 
3288     if (vo != VerifyOption_G1UseMarkWord) {
3289       // If we're verifying during a full GC then the region sets
3290       // will have been torn down at the start of the GC. Therefore
3291       // verifying the region sets will fail. So we only verify
3292       // the region sets when not in a full GC.
3293       if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
3294       verify_region_sets();
3295     }
3296 
3297     if (!silent) { gclog_or_tty->print("HeapRegions "); }
3298     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
3299       assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue),
3300              "sanity check");
3301 
3302       G1ParVerifyTask task(this, vo);
3303       assert(UseDynamicNumberOfGCThreads ||
3304         workers()->active_workers() == workers()->total_workers(),


4551       : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4552   void do_klass(Klass* klass) {
4553     // If the klass has not been dirtied we know that there's
4554     // no references into  the young gen and we can skip it.
4555    if (!_process_only_dirty || klass->has_modified_oops()) {
4556       // Clean the klass since we're going to scavenge all the metadata.
4557       klass->clear_modified_oops();
4558 
4559       // Tell the closure that this klass is the Klass to scavenge
4560       // and is the one to dirty if oops are left pointing into the young gen.
4561       _closure->set_scanned_klass(klass);
4562 
4563       klass->oops_do(_closure);
4564 
4565       _closure->set_scanned_klass(NULL);
4566     }
4567     _count++;
4568   }
4569 };
4570 


















































4571 class G1ParTask : public AbstractGangTask {
4572 protected:
4573   G1CollectedHeap*       _g1h;
4574   RefToScanQueueSet      *_queues;
4575   G1RootProcessor*       _root_processor;
4576   ParallelTaskTerminator _terminator;
4577   uint _n_workers;
4578 
4579   Mutex _stats_lock;
4580   Mutex* stats_lock() { return &_stats_lock; }
4581 
4582 public:
4583   G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues, G1RootProcessor* root_processor)
4584     : AbstractGangTask("G1 collection"),
4585       _g1h(g1h),
4586       _queues(task_queues),
4587       _root_processor(root_processor),
4588       _terminator(0, _queues),
4589       _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4590   {}
4591 
4592   RefToScanQueueSet* queues() { return _queues; }
4593 
4594   RefToScanQueue *work_queue(int i) {
4595     return queues()->queue(i);
4596   }
4597 
4598   ParallelTaskTerminator* terminator() { return &_terminator; }
4599 
4600   virtual void set_for_termination(int active_workers) {
4601     _root_processor->set_num_workers(active_workers);






4602     terminator()->reset_for_reuse(active_workers);
4603     _n_workers = active_workers;
4604   }
4605 
4606   // Helps out with CLD processing.
4607   //
4608   // During InitialMark we need to:
4609   // 1) Scavenge all CLDs for the young GC.
4610   // 2) Mark all objects directly reachable from strong CLDs.
4611   template <G1Mark do_mark_object>
4612   class G1CLDClosure : public CLDClosure {
4613     G1ParCopyClosure<G1BarrierNone,  do_mark_object>* _oop_closure;
4614     G1ParCopyClosure<G1BarrierKlass, do_mark_object>  _oop_in_klass_closure;
4615     G1KlassScanClosure                                _klass_in_cld_closure;
4616     bool                                              _claim;
4617 
4618    public:
4619     G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
4620                  bool only_young, bool claim)
4621         : _oop_closure(oop_closure),


4650 
4651       bool only_young = _g1h->g1_policy()->gcs_are_young();
4652 
4653       // Non-IM young GC.
4654       G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
4655       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
4656                                                                                only_young, // Only process dirty klasses.
4657                                                                                false);     // No need to claim CLDs.
4658       // IM young GC.
4659       //    Strong roots closures.
4660       G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, &pss, rp);
4661       G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
4662                                                                                false, // Process all klasses.
4663                                                                                true); // Need to claim CLDs.
4664       //    Weak roots closures.
4665       G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4666       G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4667                                                                                     false, // Process all klasses.
4668                                                                                     true); // Need to claim CLDs.
4669 




4670       OopClosure* strong_root_cl;
4671       OopClosure* weak_root_cl;
4672       CLDClosure* strong_cld_cl;
4673       CLDClosure* weak_cld_cl;
4674 
4675       bool trace_metadata = false;
4676 
4677       if (_g1h->g1_policy()->during_initial_mark_pause()) {
4678         // We also need to mark copied objects.
4679         strong_root_cl = &scan_mark_root_cl;
4680         strong_cld_cl  = &scan_mark_cld_cl;

4681         if (ClassUnloadingWithConcurrentMark) {
4682           weak_root_cl = &scan_mark_weak_root_cl;
4683           weak_cld_cl  = &scan_mark_weak_cld_cl;
4684           trace_metadata = true;
4685         } else {
4686           weak_root_cl = &scan_mark_root_cl;
4687           weak_cld_cl  = &scan_mark_cld_cl;
4688         }
4689       } else {
4690         strong_root_cl = &scan_only_root_cl;
4691         weak_root_cl   = &scan_only_root_cl;
4692         strong_cld_cl  = &scan_only_cld_cl;
4693         weak_cld_cl    = &scan_only_cld_cl;

4694       }
4695 



4696       pss.start_strong_roots();
4697 
4698       _root_processor->evacuate_roots(strong_root_cl,
4699                                       weak_root_cl,

4700                                       strong_cld_cl,
4701                                       weak_cld_cl,
4702                                       trace_metadata,
4703                                       worker_id);
4704 
4705       G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4706       _root_processor->scan_remembered_sets(&push_heap_rs_cl,
4707                                             weak_root_cl,
4708                                             worker_id);
4709       pss.end_strong_roots();
4710 
4711       {
4712         double start = os::elapsedTime();
4713         G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4714         evac.do_void();
4715         double elapsed_sec = os::elapsedTime() - start;
4716         double term_sec = pss.term_time();
4717         _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
4718         _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
4719         _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, pss.term_attempts());
4720       }
4721       _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4722       _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4723 
4724       if (ParallelGCVerbose) {
4725         MutexLocker x(stats_lock());
4726         pss.print_termination_stats(worker_id);
4727       }
4728 
4729       assert(pss.queue_is_empty(), "should be empty");
4730 
4731       // Close the inner scope so that the ResourceMark and HandleMark
4732       // destructors are executed here and are included as part of the
4733       // "GC Worker Time".
4734     }
4735     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime());
4736   }
4737 };
4738 

















































































4739 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
4740 private:
4741   BoolObjectClosure* _is_alive;
4742   int _initial_string_table_size;
4743   int _initial_symbol_table_size;
4744 
4745   bool  _process_strings;
4746   int _strings_processed;
4747   int _strings_removed;
4748 
4749   bool  _process_symbols;
4750   int _symbols_processed;
4751   int _symbols_removed;
4752 
4753   bool _do_in_parallel;
4754 public:
4755   G1StringSymbolTableUnlinkTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols) :
4756     AbstractGangTask("String/Symbol Unlinking"),
4757     _is_alive(is_alive),
4758     _do_in_parallel(G1CollectedHeap::use_parallel_gc_threads()),


5706   hot_card_cache->reset_hot_cache_claimed_index();
5707   hot_card_cache->set_use_cache(false);
5708 
5709   uint n_workers;
5710   if (G1CollectedHeap::use_parallel_gc_threads()) {
5711     n_workers =
5712       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5713                                      workers()->active_workers(),
5714                                      Threads::number_of_non_daemon_threads());
5715     assert(UseDynamicNumberOfGCThreads ||
5716            n_workers == workers()->total_workers(),
5717            "If not dynamic should be using all the  workers");
5718     workers()->set_active_workers(n_workers);
5719     set_par_threads(n_workers);
5720   } else {
5721     assert(n_par_threads() == 0,
5722            "Should be the original non-parallel value");
5723     n_workers = 1;
5724   }
5725 

5726 
5727   init_for_evac_failure(NULL);
5728 
5729   rem_set()->prepare_for_younger_refs_iterate(true);
5730 
5731   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5732   double start_par_time_sec = os::elapsedTime();
5733   double end_par_time_sec;
5734 
5735   {
5736     G1RootProcessor root_processor(this);
5737     G1ParTask g1_par_task(this, _task_queues, &root_processor);
5738     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5739     if (g1_policy()->during_initial_mark_pause()) {
5740       ClassLoaderDataGraph::clear_claimed_marks();
5741     }
5742 
5743     if (G1CollectedHeap::use_parallel_gc_threads()) {
5744       // The individual threads will set their evac-failure closures.
5745       if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5746       // These tasks use ShareHeap::_process_strong_tasks
5747       assert(UseDynamicNumberOfGCThreads ||
5748              workers()->active_workers() == workers()->total_workers(),
5749              "If not dynamic should be using all the  workers");
5750       workers()->run_task(&g1_par_task);
5751     } else {
5752       g1_par_task.set_for_termination(n_workers);
5753       g1_par_task.work(0);
5754     }
5755     end_par_time_sec = os::elapsedTime();
5756 
5757     // Closing the inner scope will execute the destructor
5758     // for the G1RootProcessor object. We record the current
5759     // elapsed time before closing the scope so that time
5760     // taken for the destructor is NOT included in the
5761     // reported parallel time.
5762   }
5763 
5764   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
5765 
5766   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5767   phase_times->record_par_time(par_time_ms);
5768 
5769   double code_root_fixup_time_ms =
5770         (os::elapsedTime() - end_par_time_sec) * 1000.0;
5771   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
5772 
5773   set_par_threads(0);
5774 
5775   // Process any discovered reference objects - we have
5776   // to do this _before_ we retire the GC alloc regions
5777   // as we may have to copy some 'reachable' referent
5778   // objects (and their reachable sub-graphs) that were
5779   // not copied during the pause.
5780   process_discovered_references(n_workers);


< prev index next >