< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 49744 : imported patch 8200426-sangheon-review
rev 49747 : [mq]: 6672778-partial-queue-trimming
rev 49748 : imported patch 6672778-refactoring
rev 49749 : imported patch 6672778-stefanj-review
rev 49757 : imported patch 8201596-weak-processing-missing
rev 49758 : imported patch 8201492-properly-implement-non-contiguous-reference-processing


1381 }
1382 
1383 // Public methods.
1384 
1385 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1386   CollectedHeap(),
1387   _young_gen_sampling_thread(NULL),
1388   _collector_policy(collector_policy),
1389   _soft_ref_policy(),
1390   _card_table(NULL),
1391   _memory_manager("G1 Young Generation", "end of minor GC"),
1392   _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
1393   _eden_pool(NULL),
1394   _survivor_pool(NULL),
1395   _old_pool(NULL),
1396   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1397   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1398   _g1_policy(new G1Policy(_gc_timer_stw)),
1399   _collection_set(this, _g1_policy),
1400   _dirty_card_queue_set(false),
1401   _is_alive_closure_cm(this),
1402   _is_alive_closure_stw(this),

1403   _ref_processor_cm(NULL),
1404   _ref_processor_stw(NULL),

1405   _bot(NULL),
1406   _hot_card_cache(NULL),
1407   _g1_rem_set(NULL),
1408   _cr(NULL),
1409   _g1mm(NULL),
1410   _preserved_marks_set(true /* in_c_heap */),
1411   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1412   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1413   _humongous_reclaim_candidates(),
1414   _has_humongous_reclaim_candidates(false),
1415   _archive_allocator(NULL),
1416   _summary_bytes_used(0),
1417   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1418   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1419   _expand_heap_after_alloc_failure(true),
1420   _old_marking_cycles_started(0),
1421   _old_marking_cycles_completed(0),
1422   _in_cset_fast_test() {
1423 
1424   _workers = new WorkGang("GC Thread", ParallelGCThreads,


1757   //   * Reference discovery requires a barrier (see below).
1758   //   * Reference processing may or may not be MT
1759   //     (depending on the value of ParallelRefProcEnabled
1760   //     and ParallelGCThreads).
1761   //   * A full GC disables reference discovery by the CM
1762   //     ref processor and abandons any entries on it's
1763   //     discovered lists.
1764   //
1765   // * For the STW processor:
1766   //   * Non MT discovery is enabled at the start of a full GC.
1767   //   * Processing and enqueueing during a full GC is non-MT.
1768   //   * During a full GC, references are processed after marking.
1769   //
1770   //   * Discovery (may or may not be MT) is enabled at the start
1771   //     of an incremental evacuation pause.
1772   //   * References are processed near the end of a STW evacuation pause.
1773   //   * For both types of GC:
1774   //     * Discovery is atomic - i.e. not concurrent.
1775   //     * Reference discovery will not need a barrier.
1776 
1777   MemRegion mr = reserved_region();
1778 
1779   bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
1780 
1781   // Concurrent Mark ref processor
1782   _ref_processor_cm =
1783     new ReferenceProcessor(mr,    // span
1784                            mt_processing,
1785                                 // mt processing
1786                            ParallelGCThreads,
1787                                 // degree of mt processing
1788                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
1789                                 // mt discovery
1790                            MAX2(ParallelGCThreads, ConcGCThreads),
1791                                 // degree of mt discovery
1792                            false,
1793                                 // Reference discovery is not atomic
1794                            &_is_alive_closure_cm);
1795                                 // is alive closure
1796                                 // (for efficiency/performance)
1797 
1798   // STW ref processor
1799   _ref_processor_stw =
1800     new ReferenceProcessor(mr,    // span
1801                            mt_processing,
1802                                 // mt processing
1803                            ParallelGCThreads,
1804                                 // degree of mt processing
1805                            (ParallelGCThreads > 1),
1806                                 // mt discovery
1807                            ParallelGCThreads,
1808                                 // degree of mt discovery
1809                            true,
1810                                 // Reference discovery is atomic
1811                            &_is_alive_closure_stw);
1812                                 // is alive closure
1813                                 // (for efficiency/performance)
1814 }
1815 
1816 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1817   return _collector_policy;
1818 }
1819 
1820 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1821   return &_soft_ref_policy;
1822 }
1823 
1824 size_t G1CollectedHeap::capacity() const {
1825   return _hrm.length() * HeapRegion::GrainBytes;
1826 }
1827 
1828 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1829   return _hrm.total_free_bytes();
1830 }
1831 
1832 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1833   _hot_card_cache->drain(cl, worker_i);


3616     phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3617   }
3618 };
3619 
3620 void G1CollectedHeap::redirty_logged_cards() {
3621   double redirty_logged_cards_start = os::elapsedTime();
3622 
3623   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3624   dirty_card_queue_set().reset_for_par_iteration();
3625   workers()->run_task(&redirty_task);
3626 
3627   DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
3628   dcq.merge_bufferlists(&dirty_card_queue_set());
3629   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3630 
3631   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3632 }
3633 
3634 // Weak Reference Processing support
3635 
3636 // An always "is_alive" closure that is used to preserve referents.
3637 // If the object is non-null then it's alive.  Used in the preservation
3638 // of referent objects that are pointed to by reference objects
3639 // discovered by the CM ref processor.
3640 class G1AlwaysAliveClosure: public BoolObjectClosure {
3641 public:
3642   bool do_object_b(oop p) {
3643     if (p != NULL) {
3644       return true;
3645     }
3646     return false;
3647   }
3648 };
3649 
3650 bool G1STWIsAliveClosure::do_object_b(oop p) {
3651   // An object is reachable if it is outside the collection set,
3652   // or is inside and copied.
3653   return !_g1h->is_in_cset(p) || p->is_forwarded();
3654 }
3655 









3656 // Non Copying Keep Alive closure
3657 class G1KeepAliveClosure: public OopClosure {
3658   G1CollectedHeap*_g1h;
3659 public:
3660   G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}
3661   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
3662   void do_oop(oop* p) {
3663     oop obj = *p;
3664     assert(obj != NULL, "the caller should have filtered out NULL values");
3665 
3666     const InCSetState cset_state =_g1h->in_cset_state(obj);
3667     if (!cset_state.is_in_cset_or_humongous()) {
3668       return;
3669     }
3670     if (cset_state.is_in_cset()) {
3671       assert( obj->is_forwarded(), "invariant" );
3672       *p = obj->forwardee();
3673     } else {
3674       assert(!obj->is_forwarded(), "invariant" );
3675       assert(cset_state.is_humongous(),


3866 
3867   virtual void work(uint worker_id) {
3868     _enq_task.work(worker_id);
3869   }
3870 };
3871 
3872 // Driver routine for parallel reference enqueueing.
3873 // Creates an instance of the ref enqueueing gang
3874 // task and has the worker threads execute it.
3875 
3876 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
3877   assert(_workers != NULL, "Need parallel worker threads.");
3878 
3879   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
3880 
3881   _workers->run_task(&enq_task_proxy);
3882 }
3883 
3884 // End of weak reference support closures
3885 
3886 // Abstract task used to preserve (i.e. copy) any referent objects
3887 // that are in the collection set and are pointed to by reference
3888 // objects discovered by the CM ref processor.
3889 
3890 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
3891 protected:
3892   G1CollectedHeap*         _g1h;
3893   G1ParScanThreadStateSet* _pss;
3894   RefToScanQueueSet*       _queues;
3895   ParallelTaskTerminator   _terminator;
3896   uint                     _n_workers;
3897 
3898 public:
3899   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h, G1ParScanThreadStateSet* per_thread_states, int workers, RefToScanQueueSet *task_queues) :
3900     AbstractGangTask("ParPreserveCMReferents"),
3901     _g1h(g1h),
3902     _pss(per_thread_states),
3903     _queues(task_queues),
3904     _terminator(workers, _queues),
3905     _n_workers(workers)
3906   {
3907     g1h->ref_processor_cm()->set_active_mt_degree(workers);
3908   }
3909 
3910   void work(uint worker_id) {
3911     G1GCParPhaseTimesTracker x(_g1h->g1_policy()->phase_times(), G1GCPhaseTimes::PreserveCMReferents, worker_id);
3912 
3913     ResourceMark rm;
3914     HandleMark   hm;
3915 
3916     G1ParScanThreadState*          pss = _pss->state_for_worker(worker_id);
3917     pss->set_ref_processor(NULL);
3918     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
3919 
3920     // Is alive closure
3921     G1AlwaysAliveClosure always_alive;
3922 
3923     // Copying keep alive closure. Applied to referent objects that need
3924     // to be copied.
3925     G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
3926 
3927     ReferenceProcessor* rp = _g1h->ref_processor_cm();
3928 
3929     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
3930     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
3931 
3932     // limit is set using max_num_q() - which was set using ParallelGCThreads.
3933     // So this must be true - but assert just in case someone decides to
3934     // change the worker ids.
3935     assert(worker_id < limit, "sanity");
3936     assert(!rp->discovery_is_atomic(), "check this code");
3937 
3938     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
3939     for (uint idx = worker_id; idx < limit; idx += stride) {
3940       DiscoveredList& ref_list = rp->discovered_refs()[idx];
3941 
3942       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
3943       while (iter.has_next()) {
3944         // Since discovery is not atomic for the CM ref processor, we
3945         // can see some null referent objects.
3946         iter.load_ptrs(DEBUG_ONLY(true));
3947         oop ref = iter.obj();
3948 
3949         // This will filter nulls.
3950         if (iter.is_referent_alive()) {
3951           iter.make_referent_alive();
3952         }
3953         iter.move_to_next();
3954       }
3955     }
3956 
3957     // Drain the queue - which may cause stealing
3958     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
3959     drain_queue.do_void();
3960     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
3961     assert(pss->queue_is_empty(), "should be");
3962   }
3963 };
3964 
3965 void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
3966   // Any reference objects, in the collection set, that were 'discovered'
3967   // by the CM ref processor should have already been copied (either by
3968   // applying the external root copy closure to the discovered lists, or
3969   // by following an RSet entry).
3970   //
3971   // But some of the referents, that are in the collection set, that these
3972   // reference objects point to may not have been copied: the STW ref
3973   // processor would have seen that the reference object had already
3974   // been 'discovered' and would have skipped discovering the reference,
3975   // but would not have treated the reference object as a regular oop.
3976   // As a result the copy closure would not have been applied to the
3977   // referent object.
3978   //
3979   // We need to explicitly copy these referent objects - the references
3980   // will be processed at the end of remarking.
3981   //
3982   // We also need to do this copying before we process the reference
3983   // objects discovered by the STW ref processor in case one of these
3984   // referents points to another object which is also referenced by an
3985   // object discovered by the STW ref processor.
3986   double preserve_cm_referents_time = 0.0;
3987 
3988   // To avoid spawning task when there is no work to do, check that
3989   // a concurrent cycle is active and that some references have been
3990   // discovered.
3991   if (concurrent_mark()->cm_thread()->during_cycle() &&
3992       ref_processor_cm()->has_discovered_references()) {
3993     double preserve_cm_referents_start = os::elapsedTime();
3994     uint no_of_gc_workers = workers()->active_workers();
3995     G1ParPreserveCMReferentsTask keep_cm_referents(this,
3996                                                    per_thread_states,
3997                                                    no_of_gc_workers,
3998                                                    _task_queues);
3999     workers()->run_task(&keep_cm_referents);
4000     preserve_cm_referents_time = os::elapsedTime() - preserve_cm_referents_start;
4001   }
4002 
4003   g1_policy()->phase_times()->record_preserve_cm_referents_time_ms(preserve_cm_referents_time * 1000.0);
4004 }
4005 
4006 // Weak Reference processing during an evacuation pause (part 1).
4007 void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
4008   double ref_proc_start = os::elapsedTime();
4009 
4010   ReferenceProcessor* rp = _ref_processor_stw;
4011   assert(rp->discovery_enabled(), "should have been enabled");
4012 
4013   // Closure to test whether a referent is alive.
4014   G1STWIsAliveClosure is_alive(this);
4015 
4016   // Even when parallel reference processing is enabled, the processing
4017   // of JNI refs is serial and performed serially by the current thread
4018   // rather than by a worker. The following PSS will be used for processing
4019   // JNI refs.
4020 
4021   // Use only a single queue for this PSS.
4022   G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);
4023   pss->set_ref_processor(NULL);
4024   assert(pss->queue_is_empty(), "pre-condition");
4025 


4171     // for the G1RootProcessor object. We record the current
4172     // elapsed time before closing the scope so that time
4173     // taken for the destructor is NOT included in the
4174     // reported parallel time.
4175   }
4176 
4177   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4178   phase_times->record_par_time(par_time_ms);
4179 
4180   double code_root_fixup_time_ms =
4181         (os::elapsedTime() - end_par_time_sec) * 1000.0;
4182   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4183 }
4184 
4185 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4186   // Process any discovered reference objects - we have
4187   // to do this _before_ we retire the GC alloc regions
4188   // as we may have to copy some 'reachable' referent
4189   // objects (and their reachable sub-graphs) that were
4190   // not copied during the pause.
4191   preserve_cm_referents(per_thread_states);
4192   process_discovered_references(per_thread_states);
4193 
4194   G1STWIsAliveClosure is_alive(this);
4195   G1KeepAliveClosure keep_alive(this);
4196 
4197   {
4198     double start = os::elapsedTime();
4199 
4200     WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
4201 
4202     double time_ms = (os::elapsedTime() - start) * 1000.0;
4203     g1_policy()->phase_times()->record_weak_ref_proc_time(time_ms);
4204   }
4205 
4206   if (G1StringDedup::is_enabled()) {
4207     double fixup_start = os::elapsedTime();
4208 
4209     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4210 
4211     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;




1381 }
1382 
1383 // Public methods.
1384 
1385 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1386   CollectedHeap(),
1387   _young_gen_sampling_thread(NULL),
1388   _collector_policy(collector_policy),
1389   _soft_ref_policy(),
1390   _card_table(NULL),
1391   _memory_manager("G1 Young Generation", "end of minor GC"),
1392   _full_gc_memory_manager("G1 Old Generation", "end of major GC"),
1393   _eden_pool(NULL),
1394   _survivor_pool(NULL),
1395   _old_pool(NULL),
1396   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1397   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1398   _g1_policy(new G1Policy(_gc_timer_stw)),
1399   _collection_set(this, _g1_policy),
1400   _dirty_card_queue_set(false),
1401   _ref_processor_stw(NULL),
1402   _is_alive_closure_stw(this),
1403   _is_subject_to_discovery_stw(this),
1404   _ref_processor_cm(NULL),
1405   _is_alive_closure_cm(this),
1406   _is_subject_to_discovery_cm(this),
1407   _bot(NULL),
1408   _hot_card_cache(NULL),
1409   _g1_rem_set(NULL),
1410   _cr(NULL),
1411   _g1mm(NULL),
1412   _preserved_marks_set(true /* in_c_heap */),
1413   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1414   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1415   _humongous_reclaim_candidates(),
1416   _has_humongous_reclaim_candidates(false),
1417   _archive_allocator(NULL),
1418   _summary_bytes_used(0),
1419   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1420   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1421   _expand_heap_after_alloc_failure(true),
1422   _old_marking_cycles_started(0),
1423   _old_marking_cycles_completed(0),
1424   _in_cset_fast_test() {
1425 
1426   _workers = new WorkGang("GC Thread", ParallelGCThreads,


1759   //   * Reference discovery requires a barrier (see below).
1760   //   * Reference processing may or may not be MT
1761   //     (depending on the value of ParallelRefProcEnabled
1762   //     and ParallelGCThreads).
1763   //   * A full GC disables reference discovery by the CM
1764   //     ref processor and abandons any entries on it's
1765   //     discovered lists.
1766   //
1767   // * For the STW processor:
1768   //   * Non MT discovery is enabled at the start of a full GC.
1769   //   * Processing and enqueueing during a full GC is non-MT.
1770   //   * During a full GC, references are processed after marking.
1771   //
1772   //   * Discovery (may or may not be MT) is enabled at the start
1773   //     of an incremental evacuation pause.
1774   //   * References are processed near the end of a STW evacuation pause.
1775   //   * For both types of GC:
1776   //     * Discovery is atomic - i.e. not concurrent.
1777   //     * Reference discovery will not need a barrier.
1778 


1779   bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
1780 
1781   // Concurrent Mark ref processor
1782   _ref_processor_cm =
1783     new ReferenceProcessor(&_is_subject_to_discovery_cm,
1784                            mt_processing,                                  // mt processing
1785                            ParallelGCThreads,                              // degree of mt processing
1786                            (ParallelGCThreads > 1) || (ConcGCThreads > 1), // mt discovery
1787                            MAX2(ParallelGCThreads, ConcGCThreads),         // degree of mt discovery
1788                            false,                                          // Reference discovery is not atomic
1789                            &_is_alive_closure_cm);                         // is alive closure







1790 
1791   // STW ref processor
1792   _ref_processor_stw =
1793     new ReferenceProcessor(&_is_subject_to_discovery_stw,                 
1794                            mt_processing,                        // mt processing
1795                            ParallelGCThreads,                    // degree of mt processing
1796                            (ParallelGCThreads > 1),              // mt discovery
1797                            ParallelGCThreads,                    // degree of mt discovery
1798                            true,                                 // Reference discovery is atomic
1799                            &_is_alive_closure_stw);              // is alive closure







1800 }
1801 
1802 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1803   return _collector_policy;
1804 }
1805 
1806 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1807   return &_soft_ref_policy;
1808 }
1809 
1810 size_t G1CollectedHeap::capacity() const {
1811   return _hrm.length() * HeapRegion::GrainBytes;
1812 }
1813 
1814 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1815   return _hrm.total_free_bytes();
1816 }
1817 
1818 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1819   _hot_card_cache->drain(cl, worker_i);


3602     phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3603   }
3604 };
3605 
3606 void G1CollectedHeap::redirty_logged_cards() {
3607   double redirty_logged_cards_start = os::elapsedTime();
3608 
3609   G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3610   dirty_card_queue_set().reset_for_par_iteration();
3611   workers()->run_task(&redirty_task);
3612 
3613   DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
3614   dcq.merge_bufferlists(&dirty_card_queue_set());
3615   assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3616 
3617   g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3618 }
3619 
3620 // Weak Reference Processing support
3621 














3622 bool G1STWIsAliveClosure::do_object_b(oop p) {
3623   // An object is reachable if it is outside the collection set,
3624   // or is inside and copied.
3625   return !_g1h->is_in_cset(p) || p->is_forwarded();
3626 }
3627 
3628 bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
3629   assert(obj != NULL, "must not be NULL");
3630   assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
3631   // The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below
3632   // may falsely indicate that this is not the case here: however the collection set only
3633   // contains old regions when concurrent mark is not running.
3634   return _g1h->is_in_cset(obj) || _g1h->heap_region_containing(obj)->is_survivor();
3635 }
3636 
3637 // Non Copying Keep Alive closure
3638 class G1KeepAliveClosure: public OopClosure {
3639   G1CollectedHeap*_g1h;
3640 public:
3641   G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}
3642   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
3643   void do_oop(oop* p) {
3644     oop obj = *p;
3645     assert(obj != NULL, "the caller should have filtered out NULL values");
3646 
3647     const InCSetState cset_state =_g1h->in_cset_state(obj);
3648     if (!cset_state.is_in_cset_or_humongous()) {
3649       return;
3650     }
3651     if (cset_state.is_in_cset()) {
3652       assert( obj->is_forwarded(), "invariant" );
3653       *p = obj->forwardee();
3654     } else {
3655       assert(!obj->is_forwarded(), "invariant" );
3656       assert(cset_state.is_humongous(),


3847 
3848   virtual void work(uint worker_id) {
3849     _enq_task.work(worker_id);
3850   }
3851 };
3852 
3853 // Driver routine for parallel reference enqueueing.
3854 // Creates an instance of the ref enqueueing gang
3855 // task and has the worker threads execute it.
3856 
3857 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
3858   assert(_workers != NULL, "Need parallel worker threads.");
3859 
3860   G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
3861 
3862   _workers->run_task(&enq_task_proxy);
3863 }
3864 
3865 // End of weak reference support closures
3866 
























































































































3867 // Weak Reference processing during an evacuation pause (part 1).
3868 void G1CollectedHeap::process_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
3869   double ref_proc_start = os::elapsedTime();
3870 
3871   ReferenceProcessor* rp = _ref_processor_stw;
3872   assert(rp->discovery_enabled(), "should have been enabled");
3873 
3874   // Closure to test whether a referent is alive.
3875   G1STWIsAliveClosure is_alive(this);
3876 
3877   // Even when parallel reference processing is enabled, the processing
3878   // of JNI refs is serial and performed serially by the current thread
3879   // rather than by a worker. The following PSS will be used for processing
3880   // JNI refs.
3881 
3882   // Use only a single queue for this PSS.
3883   G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);
3884   pss->set_ref_processor(NULL);
3885   assert(pss->queue_is_empty(), "pre-condition");
3886 


4032     // for the G1RootProcessor object. We record the current
4033     // elapsed time before closing the scope so that time
4034     // taken for the destructor is NOT included in the
4035     // reported parallel time.
4036   }
4037 
4038   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4039   phase_times->record_par_time(par_time_ms);
4040 
4041   double code_root_fixup_time_ms =
4042         (os::elapsedTime() - end_par_time_sec) * 1000.0;
4043   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4044 }
4045 
4046 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4047   // Process any discovered reference objects - we have
4048   // to do this _before_ we retire the GC alloc regions
4049   // as we may have to copy some 'reachable' referent
4050   // objects (and their reachable sub-graphs) that were
4051   // not copied during the pause.

4052   process_discovered_references(per_thread_states);
4053 
4054   G1STWIsAliveClosure is_alive(this);
4055   G1KeepAliveClosure keep_alive(this);
4056 
4057   {
4058     double start = os::elapsedTime();
4059 
4060     WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
4061 
4062     double time_ms = (os::elapsedTime() - start) * 1000.0;
4063     g1_policy()->phase_times()->record_weak_ref_proc_time(time_ms);
4064   }
4065 
4066   if (G1StringDedup::is_enabled()) {
4067     double fixup_start = os::elapsedTime();
4068 
4069     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4070 
4071     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;


< prev index next >