< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 49944 : imported patch 8201492-properly-implement-non-contiguous-reference-processing


1382 }
1383 
1384 // Public methods.
1385 
1386 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1387   CollectedHeap(),
1388   _young_gen_sampling_thread(NULL),
1389   _collector_policy(collector_policy),
1390   _soft_ref_policy(),
1391   _card_table(NULL),
1392   _memory_manager("G1 Young Generation", "end of minor GC"),
1393   _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
1394   _eden_pool(NULL),
1395   _survivor_pool(NULL),
1396   _old_pool(NULL),
1397   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1398   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1399   _g1_policy(new G1Policy(_gc_timer_stw)),
1400   _collection_set(this, _g1_policy),
1401   _dirty_card_queue_set(false),
1402   _is_alive_closure_cm(this),
1403   _is_alive_closure_stw(this),

1404   _ref_processor_cm(NULL),
1405   _ref_processor_stw(NULL),

1406   _bot(NULL),
1407   _hot_card_cache(NULL),
1408   _g1_rem_set(NULL),
1409   _cr(NULL),
1410   _g1mm(NULL),
1411   _preserved_marks_set(true /* in_c_heap */),
1412   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1413   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1414   _humongous_reclaim_candidates(),
1415   _has_humongous_reclaim_candidates(false),
1416   _archive_allocator(NULL),
1417   _summary_bytes_used(0),
1418   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1419   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1420   _expand_heap_after_alloc_failure(true),
1421   _old_marking_cycles_started(0),
1422   _old_marking_cycles_completed(0),
1423   _in_cset_fast_test() {
1424 
1425   _workers = new WorkGang("GC Thread", ParallelGCThreads,


1758   //   * Reference discovery requires a barrier (see below).
1759   //   * Reference processing may or may not be MT
1760   //     (depending on the value of ParallelRefProcEnabled
1761   //     and ParallelGCThreads).
1762   //   * A full GC disables reference discovery by the CM
1763   //     ref processor and abandons any entries on it's
1764   //     discovered lists.
1765   //
1766   // * For the STW processor:
1767   //   * Non MT discovery is enabled at the start of a full GC.
1768   //   * Processing and enqueueing during a full GC is non-MT.
1769   //   * During a full GC, references are processed after marking.
1770   //
1771   //   * Discovery (may or may not be MT) is enabled at the start
1772   //     of an incremental evacuation pause.
1773   //   * References are processed near the end of a STW evacuation pause.
1774   //   * For both types of GC:
1775   //     * Discovery is atomic - i.e. not concurrent.
1776   //     * Reference discovery will not need a barrier.
1777 
1778   MemRegion mr = reserved_region();
1779 
1780   bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
1781 
1782   // Concurrent Mark ref processor
1783   _ref_processor_cm =
1784     new ReferenceProcessor(mr,    // span
1785                            mt_processing,
1786                                 // mt processing
1787                            ParallelGCThreads,
1788                                 // degree of mt processing
1789                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
1790                                 // mt discovery
1791                            MAX2(ParallelGCThreads, ConcGCThreads),
1792                                 // degree of mt discovery
1793                            false,
1794                                 // Reference discovery is not atomic
1795                            &_is_alive_closure_cm);
1796                                 // is alive closure
1797                                 // (for efficiency/performance)
1798 
1799   // STW ref processor
1800   _ref_processor_stw =
1801     new ReferenceProcessor(mr,    // span
1802                            mt_processing,
1803                                 // mt processing
1804                            ParallelGCThreads,
1805                                 // degree of mt processing
1806                            (ParallelGCThreads > 1),
1807                                 // mt discovery
1808                            ParallelGCThreads,
1809                                 // degree of mt discovery
1810                            true,
1811                                 // Reference discovery is atomic
1812                            &_is_alive_closure_stw);
1813                                 // is alive closure
1814                                 // (for efficiency/performance)
1815 }
1816 
1817 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1818   return _collector_policy;
1819 }
1820 
1821 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1822   return &_soft_ref_policy;
1823 }
1824 
1825 size_t G1CollectedHeap::capacity() const {
1826   return _hrm.length() * HeapRegion::GrainBytes;
1827 }
1828 
1829 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1830   return _hrm.total_free_bytes();
1831 }
1832 
1833 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1834   _hot_card_cache->drain(cl, worker_i);


3614     phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3615   }
3616 };
3617 
3618 void G1CollectedHeap::redirty_logged_cards() {
3619   double redirty_logged_cards_start = os::elapsedTime();
3620 
3621   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3622   dirty_card_queue_set().reset_for_par_iteration();
3623   workers()->run_task(&redirty_task);
3624 
3625   DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
3626   dcq.merge_bufferlists(&dirty_card_queue_set());
3627   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3628 
3629   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3630 }
3631 
3632 // Weak Reference Processing support
3633 
3634 // An always "is_alive" closure that is used to preserve referents.
3635 // If the object is non-null then it's alive.  Used in the preservation
3636 // of referent objects that are pointed to by reference objects
3637 // discovered by the CM ref processor.
3638 class G1AlwaysAliveClosure: public BoolObjectClosure {
3639 public:
3640   bool do_object_b(oop p) {
3641     if (p != NULL) {
3642       return true;
3643     }
3644     return false;
3645   }
3646 };
3647 
3648 bool G1STWIsAliveClosure::do_object_b(oop p) {
3649   // An object is reachable if it is outside the collection set,
3650   // or is inside and copied.
3651   return !_g1h->is_in_cset(p) || p->is_forwarded();
3652 }
3653 









3654 // Non Copying Keep Alive closure
3655 class G1KeepAliveClosure: public OopClosure {
3656   G1CollectedHeap*_g1h;
3657 public:
3658   G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}
3659   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
3660   void do_oop(oop* p) {
3661     oop obj = *p;
3662     assert(obj != NULL, "the caller should have filtered out NULL values");
3663 
3664     const InCSetState cset_state =_g1h->in_cset_state(obj);
3665     if (!cset_state.is_in_cset_or_humongous()) {
3666       return;
3667     }
3668     if (cset_state.is_in_cset()) {
3669       assert( obj->is_forwarded(), "invariant" );
3670       *p = obj->forwardee();
3671     } else {
3672       assert(!obj->is_forwarded(), "invariant" );
3673       assert(cset_state.is_humongous(),


3864 
3865   virtual void work(uint worker_id) {
3866     _enq_task.work(worker_id);
3867   }
3868 };
3869 
3870 // Driver routine for parallel reference enqueueing.
3871 // Creates an instance of the ref enqueueing gang
3872 // task and has the worker threads execute it.
3873 
3874 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
3875   assert(_workers != NULL, "Need parallel worker threads.");
3876 
3877   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
3878 
3879   _workers->run_task(&enq_task_proxy);
3880 }
3881 
3882 // End of weak reference support closures
3883 
3884 // Abstract task used to preserve (i.e. copy) any referent objects
3885 // that are in the collection set and are pointed to by reference
3886 // objects discovered by the CM ref processor.
3887 
3888 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
3889 protected:
3890   G1CollectedHeap*         _g1h;
3891   G1ParScanThreadStateSet* _pss;
3892   RefToScanQueueSet*       _queues;
3893   ParallelTaskTerminator   _terminator;
3894   uint                     _n_workers;
3895 
3896 public:
3897   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, int workers, RefToScanQueueSet *task_queues) :
3898     AbstractGangTask("ParPreserveCMReferents"),
3899     _g1h(g1h),
3900     _pss(per_thread_states),
3901     _queues(task_queues),
3902     _terminator(workers, _queues),
3903     _n_workers(workers)
3904   {
3905     g1h->ref_processor_cm()->set_active_mt_degree(workers);
3906   }
3907 
3908   void work(uint worker_id) {
3909     G1GCParPhaseTimesTracker x(_g1h->g1_policy()->phase_times(), G1GCPhaseTimes::PreserveCMReferents, worker_id);
3910 
3911     ResourceMark rm;
3912     HandleMark   hm;
3913 
3914     G1ParScanThreadState*          pss = _pss->state_for_worker(worker_id);
3915     pss->set_ref_discoverer(NULL);
3916     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
3917 
3918     // Is alive closure
3919     G1AlwaysAliveClosure always_alive;
3920 
3921     // Copying keep alive closure. Applied to referent objects that need
3922     // to be copied.
3923     G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
3924 
3925     ReferenceProcessor* rp = _g1h->ref_processor_cm();
3926 
3927     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
3928     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
3929 
3930     // limit is set using max_num_q() - which was set using ParallelGCThreads.
3931     // So this must be true - but assert just in case someone decides to
3932     // change the worker ids.
3933     assert(worker_id < limit, "sanity");
3934     assert(!rp->discovery_is_atomic(), "check this code");
3935 
3936     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
3937     for (uint idx = worker_id; idx < limit; idx += stride) {
3938       DiscoveredList& ref_list = rp->discovered_refs()[idx];
3939 
3940       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
3941       while (iter.has_next()) {
3942         // Since discovery is not atomic for the CM ref processor, we
3943         // can see some null referent objects.
3944         iter.load_ptrs(DEBUG_ONLY(true));
3945         oop ref = iter.obj();
3946 
3947         // This will filter nulls.
3948         if (iter.is_referent_alive()) {
3949           iter.make_referent_alive();
3950         }
3951         iter.move_to_next();
3952       }
3953     }
3954 
3955     // Drain the queue - which may cause stealing
3956     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
3957     drain_queue.do_void();
3958     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
3959     assert(pss->queue_is_empty(), "should be");
3960   }
3961 };
3962 
3963 void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
3964   // Any reference objects, in the collection set, that were 'discovered'
3965   // by the CM ref processor should have already been copied (either by
3966   // applying the external root copy closure to the discovered lists, or
3967   // by following an RSet entry).
3968   //
3969   // But some of the referents, that are in the collection set, that these
3970   // reference objects point to may not have been copied: the STW ref
3971   // processor would have seen that the reference object had already
3972   // been 'discovered' and would have skipped discovering the reference,
3973   // but would not have treated the reference object as a regular oop.
3974   // As a result the copy closure would not have been applied to the
3975   // referent object.
3976   //
3977   // We need to explicitly copy these referent objects - the references
3978   // will be processed at the end of remarking.
3979   //
3980   // We also need to do this copying before we process the reference
3981   // objects discovered by the STW ref processor in case one of these
3982   // referents points to another object which is also referenced by an
3983   // object discovered by the STW ref processor.
3984   double preserve_cm_referents_time = 0.0;
3985 
3986   // To avoid spawning task when there is no work to do, check that
3987   // a concurrent cycle is active and that some references have been
3988   // discovered.
3989   if (concurrent_mark()->cm_thread()->during_cycle() &&
3990       ref_processor_cm()->has_discovered_references()) {
3991     double preserve_cm_referents_start = os::elapsedTime();
3992     uint no_of_gc_workers = workers()->active_workers();
3993     G1ParPreserveCMReferentsTask keep_cm_referents(this,
3994                                                    per_thread_states,
3995                                                    no_of_gc_workers,
3996                                                    _task_queues);
3997     workers()->run_task(&keep_cm_referents);
3998     preserve_cm_referents_time = os::elapsedTime() - preserve_cm_referents_start;
3999   }
4000 
4001   g1_policy()->phase_times()->record_preserve_cm_referents_time_ms(preserve_cm_referents_time * 1000.0);
4002 }
4003 
4004 // Weak Reference processing during an evacuation pause (part 1).
4005 void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
4006   double ref_proc_start = os::elapsedTime();
4007 
4008   ReferenceProcessor* rp = _ref_processor_stw;
4009   assert(rp->discovery_enabled(), "should have been enabled");
4010 
4011   // Closure to test whether a referent is alive.
4012   G1STWIsAliveClosure is_alive(this);
4013 
4014   // Even when parallel reference processing is enabled, the processing
4015   // of JNI refs is serial and performed serially by the current thread
4016   // rather than by a worker. The following PSS will be used for processing
4017   // JNI refs.
4018 
4019   // Use only a single queue for this PSS.
4020   G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);
4021   pss->set_ref_discoverer(NULL);
4022   assert(pss->queue_is_empty(), "pre-condition");
4023 


4169     // for the G1RootProcessor object. We record the current
4170     // elapsed time before closing the scope so that time
4171     // taken for the destructor is NOT included in the
4172     // reported parallel time.
4173   }
4174 
4175   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4176   phase_times->record_par_time(par_time_ms);
4177 
4178   double code_root_fixup_time_ms =
4179         (os::elapsedTime() - end_par_time_sec) * 1000.0;
4180   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4181 }
4182 
4183 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4184   // Process any discovered reference objects - we have
4185   // to do this _before_ we retire the GC alloc regions
4186   // as we may have to copy some 'reachable' referent
4187   // objects (and their reachable sub-graphs) that were
4188   // not copied during the pause.
4189   preserve_cm_referents(per_thread_states);
4190   process_discovered_references(per_thread_states);
4191 
4192   G1STWIsAliveClosure is_alive(this);
4193   G1KeepAliveClosure keep_alive(this);
4194 
4195   {
4196     double start = os::elapsedTime();
4197 
4198     WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
4199 
4200     double time_ms = (os::elapsedTime() - start) * 1000.0;
4201     g1_policy()->phase_times()->record_weak_ref_proc_time(time_ms);
4202   }
4203 
4204   if (G1StringDedup::is_enabled()) {
4205     double fixup_start = os::elapsedTime();
4206 
4207     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4208 
4209     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;




1382 }
1383 
1384 // Public methods.
1385 
1386 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1387   CollectedHeap(),
1388   _young_gen_sampling_thread(NULL),
1389   _collector_policy(collector_policy),
1390   _soft_ref_policy(),
1391   _card_table(NULL),
1392   _memory_manager("G1 Young Generation", "end of minor GC"),
1393   _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
1394   _eden_pool(NULL),
1395   _survivor_pool(NULL),
1396   _old_pool(NULL),
1397   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1398   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1399   _g1_policy(new G1Policy(_gc_timer_stw)),
1400   _collection_set(this, _g1_policy),
1401   _dirty_card_queue_set(false),
1402   _ref_processor_stw(NULL),
1403   _is_alive_closure_stw(this),
1404   _is_subject_to_discovery_stw(this),
1405   _ref_processor_cm(NULL),
1406   _is_alive_closure_cm(this),
1407   _is_subject_to_discovery_cm(this),
1408   _bot(NULL),
1409   _hot_card_cache(NULL),
1410   _g1_rem_set(NULL),
1411   _cr(NULL),
1412   _g1mm(NULL),
1413   _preserved_marks_set(true /* in_c_heap */),
1414   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1415   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1416   _humongous_reclaim_candidates(),
1417   _has_humongous_reclaim_candidates(false),
1418   _archive_allocator(NULL),
1419   _summary_bytes_used(0),
1420   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1421   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1422   _expand_heap_after_alloc_failure(true),
1423   _old_marking_cycles_started(0),
1424   _old_marking_cycles_completed(0),
1425   _in_cset_fast_test() {
1426 
1427   _workers = new WorkGang("GC Thread", ParallelGCThreads,


1760   //   * Reference discovery requires a barrier (see below).
1761   //   * Reference processing may or may not be MT
1762   //     (depending on the value of ParallelRefProcEnabled
1763   //     and ParallelGCThreads).
1764   //   * A full GC disables reference discovery by the CM
1765   //     ref processor and abandons any entries on it's
1766   //     discovered lists.
1767   //
1768   // * For the STW processor:
1769   //   * Non MT discovery is enabled at the start of a full GC.
1770   //   * Processing and enqueueing during a full GC is non-MT.
1771   //   * During a full GC, references are processed after marking.
1772   //
1773   //   * Discovery (may or may not be MT) is enabled at the start
1774   //     of an incremental evacuation pause.
1775   //   * References are processed near the end of a STW evacuation pause.
1776   //   * For both types of GC:
1777   //     * Discovery is atomic - i.e. not concurrent.
1778   //     * Reference discovery will not need a barrier.
1779 


1780   bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
1781 
1782   // Concurrent Mark ref processor
1783   _ref_processor_cm =
1784     new ReferenceProcessor(&_is_subject_to_discovery_cm,
1785                            mt_processing,                                  // mt processing
1786                            ParallelGCThreads,                              // degree of mt processing
1787                            (ParallelGCThreads > 1) || (ConcGCThreads > 1), // mt discovery
1788                            MAX2(ParallelGCThreads, ConcGCThreads),         // degree of mt discovery
1789                            false,                                          // Reference discovery is not atomic
1790                            &_is_alive_closure_cm);                         // is alive closure







1791 
1792   // STW ref processor
1793   _ref_processor_stw =
1794     new ReferenceProcessor(&_is_subject_to_discovery_stw,                 
1795                            mt_processing,                        // mt processing
1796                            ParallelGCThreads,                    // degree of mt processing
1797                            (ParallelGCThreads > 1),              // mt discovery
1798                            ParallelGCThreads,                    // degree of mt discovery
1799                            true,                                 // Reference discovery is atomic
1800                            &_is_alive_closure_stw);              // is alive closure







1801 }
1802 
1803 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1804   return _collector_policy;
1805 }
1806 
1807 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1808   return &_soft_ref_policy;
1809 }
1810 
1811 size_t G1CollectedHeap::capacity() const {
1812   return _hrm.length() * HeapRegion::GrainBytes;
1813 }
1814 
1815 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1816   return _hrm.total_free_bytes();
1817 }
1818 
1819 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1820   _hot_card_cache->drain(cl, worker_i);


3600     phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3601   }
3602 };
3603 
3604 void G1CollectedHeap::redirty_logged_cards() {
3605   double redirty_logged_cards_start = os::elapsedTime();
3606 
3607   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3608   dirty_card_queue_set().reset_for_par_iteration();
3609   workers()->run_task(&redirty_task);
3610 
3611   DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
3612   dcq.merge_bufferlists(&dirty_card_queue_set());
3613   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3614 
3615   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3616 }
3617 
3618 // Weak Reference Processing support
3619 














3620 bool G1STWIsAliveClosure::do_object_b(oop p) {
3621   // An object is reachable if it is outside the collection set,
3622   // or is inside and copied.
3623   return !_g1h->is_in_cset(p) || p->is_forwarded();
3624 }
3625 
3626 bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
3627   assert(obj != NULL, "must not be NULL");
3628   assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
3629   // The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below
3630   // may falsely indicate that this is not the case here: however the collection set only
3631   // contains old regions when concurrent mark is not running.
3632   return _g1h->is_in_cset(obj) || _g1h->heap_region_containing(obj)->is_survivor();
3633 }
3634 
3635 // Non Copying Keep Alive closure
3636 class G1KeepAliveClosure: public OopClosure {
3637   G1CollectedHeap*_g1h;
3638 public:
3639   G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}
3640   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
3641   void do_oop(oop* p) {
3642     oop obj = *p;
3643     assert(obj != NULL, "the caller should have filtered out NULL values");
3644 
3645     const InCSetState cset_state =_g1h->in_cset_state(obj);
3646     if (!cset_state.is_in_cset_or_humongous()) {
3647       return;
3648     }
3649     if (cset_state.is_in_cset()) {
3650       assert( obj->is_forwarded(), "invariant" );
3651       *p = obj->forwardee();
3652     } else {
3653       assert(!obj->is_forwarded(), "invariant" );
3654       assert(cset_state.is_humongous(),


3845 
3846   virtual void work(uint worker_id) {
3847     _enq_task.work(worker_id);
3848   }
3849 };
3850 
3851 // Driver routine for parallel reference enqueueing.
3852 // Creates an instance of the ref enqueueing gang
3853 // task and has the worker threads execute it.
3854 
3855 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
3856   assert(_workers != NULL, "Need parallel worker threads.");
3857 
3858   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
3859 
3860   _workers->run_task(&enq_task_proxy);
3861 }
3862 
3863 // End of weak reference support closures
3864 
























































































































3865 // Weak Reference processing during an evacuation pause (part 1).
3866 void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
3867   double ref_proc_start = os::elapsedTime();
3868 
3869   ReferenceProcessor* rp = _ref_processor_stw;
3870   assert(rp->discovery_enabled(), "should have been enabled");
3871 
3872   // Closure to test whether a referent is alive.
3873   G1STWIsAliveClosure is_alive(this);
3874 
3875   // Even when parallel reference processing is enabled, the processing
3876   // of JNI refs is serial and performed serially by the current thread
3877   // rather than by a worker. The following PSS will be used for processing
3878   // JNI refs.
3879 
3880   // Use only a single queue for this PSS.
3881   G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);
3882   pss->set_ref_discoverer(NULL);
3883   assert(pss->queue_is_empty(), "pre-condition");
3884 


4030     // for the G1RootProcessor object. We record the current
4031     // elapsed time before closing the scope so that time
4032     // taken for the destructor is NOT included in the
4033     // reported parallel time.
4034   }
4035 
4036   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4037   phase_times->record_par_time(par_time_ms);
4038 
4039   double code_root_fixup_time_ms =
4040         (os::elapsedTime() - end_par_time_sec) * 1000.0;
4041   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4042 }
4043 
4044 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4045   // Process any discovered reference objects - we have
4046   // to do this _before_ we retire the GC alloc regions
4047   // as we may have to copy some 'reachable' referent
4048   // objects (and their reachable sub-graphs) that were
4049   // not copied during the pause.

4050   process_discovered_references(per_thread_states);
4051 
4052   G1STWIsAliveClosure is_alive(this);
4053   G1KeepAliveClosure keep_alive(this);
4054 
4055   {
4056     double start = os::elapsedTime();
4057 
4058     WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
4059 
4060     double time_ms = (os::elapsedTime() - start) * 1000.0;
4061     g1_policy()->phase_times()->record_weak_ref_proc_time(time_ms);
4062   }
4063 
4064   if (G1StringDedup::is_enabled()) {
4065     double fixup_start = os::elapsedTime();
4066 
4067     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4068 
4069     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;


< prev index next >