< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page




  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  31 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  32 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  33 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  34 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  35 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  36 #include "gc_implementation/g1/g1CollectorPolicy.hpp"

  37 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  38 #include "gc_implementation/g1/g1EvacFailure.hpp"
  39 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  40 #include "gc_implementation/g1/g1Log.hpp"
  41 #include "gc_implementation/g1/g1MarkSweep.hpp"
  42 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  43 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
  44 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
  45 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  46 #include "gc_implementation/g1/g1RootProcessor.hpp"
  47 #include "gc_implementation/g1/g1StringDedup.hpp"
  48 #include "gc_implementation/g1/g1YCTypes.hpp"
  49 #include "gc_implementation/g1/heapRegion.inline.hpp"
  50 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  51 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  52 #include "gc_implementation/g1/vm_operations_g1.hpp"
  53 #include "gc_implementation/shared/gcHeapSummary.hpp"
  54 #include "gc_implementation/shared/gcTimer.hpp"
  55 #include "gc_implementation/shared/gcTrace.hpp"
  56 #include "gc_implementation/shared/gcTraceTime.hpp"


1021   }
1022 
1023   ShouldNotReachHere();
1024   return NULL;
1025 }
1026 
1027 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1028                                                            AllocationContext_t context,
1029                                                            bool expect_null_mutator_alloc_region) {
1030   assert_at_safepoint(true /* should_be_vm_thread */);
1031   assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
1032                                              !expect_null_mutator_alloc_region,
1033          "the current alloc region was unexpectedly found to be non-NULL");
1034 
1035   if (!is_humongous(word_size)) {
1036     return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
1037                                                       false /* bot_updates */);
1038   } else {
1039     HeapWord* result = humongous_obj_allocate(word_size, context);
1040     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1041       g1_policy()->set_initiate_conc_mark_if_possible();
1042     }
1043     return result;
1044   }
1045 
1046   ShouldNotReachHere();
1047 }
1048 
1049 class PostMCRemSetClearClosure: public HeapRegionClosure {
1050   G1CollectedHeap* _g1h;
1051   ModRefBarrierSet* _mr_bs;
1052 public:
1053   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1054     _g1h(g1h), _mr_bs(mr_bs) {}
1055 
1056   bool doHeapRegion(HeapRegion* r) {
1057     HeapRegionRemSet* hrrs = r->rem_set();
1058 
1059     if (r->is_continues_humongous()) {
1060       // We'll assert that the strong code root list and RSet is empty
1061       assert(hrrs->strong_code_roots_list_length() == 0, "sanity");


1232 
1233       // Make sure we'll choose a new allocation region afterwards.
1234       _allocator->release_mutator_alloc_region();
1235       _allocator->abandon_gc_alloc_regions();
1236       g1_rem_set()->cleanupHRRS();
1237 
1238       // We should call this after we retire any currently active alloc
1239       // regions so that all the ALLOC / RETIRE events are generated
1240       // before the start GC event.
1241       _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1242 
1243       // We may have added regions to the current incremental collection
1244       // set between the last GC or pause and now. We need to clear the
1245       // incremental collection set and then start rebuilding it afresh
1246       // after this full GC.
1247       abandon_collection_set(g1_policy()->inc_cset_head());
1248       g1_policy()->clear_incremental_cset();
1249       g1_policy()->stop_incremental_cset_building();
1250 
1251       tear_down_region_sets(false /* free_list_only */);
1252       g1_policy()->set_gcs_are_young(true);
1253 
1254       // See the comments in g1CollectedHeap.hpp and
1255       // G1CollectedHeap::ref_processing_init() about
1256       // how reference processing currently works in G1.
1257 
1258       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1259       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1260 
1261       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1262       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1263 
1264       ref_processor_stw()->enable_discovery();
1265       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1266 
1267       // Do collection work
1268       {
1269         HandleMark hm;  // Discard invalid handles created during gc
1270         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1271       }
1272 


1713 }
1714 
1715 // Public methods.
1716 
1717 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1718 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1719 #endif // _MSC_VER
1720 
1721 
1722 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1723   CollectedHeap(),
1724   _g1_policy(policy_),
1725   _dirty_card_queue_set(false),
1726   _into_cset_dirty_card_queue_set(false),
1727   _is_alive_closure_cm(this),
1728   _is_alive_closure_stw(this),
1729   _ref_processor_cm(NULL),
1730   _ref_processor_stw(NULL),
1731   _bot_shared(NULL),
1732   _evac_failure_scan_stack(NULL),
1733   _mark_in_progress(false),
1734   _cg1r(NULL),
1735   _g1mm(NULL),
1736   _refine_cte_cl(NULL),
1737   _full_collection(false),
1738   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1739   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1740   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1741   _humongous_reclaim_candidates(),
1742   _has_humongous_reclaim_candidates(false),
1743   _free_regions_coming(false),
1744   _young_list(new YoungList(this)),
1745   _gc_time_stamp(0),
1746   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1747   _old_plab_stats(OldPLABSize, PLABWeight),
1748   _expand_heap_after_alloc_failure(true),
1749   _surviving_young_words(NULL),
1750   _old_marking_cycles_started(0),
1751   _old_marking_cycles_completed(0),
1752   _concurrent_cycle_started(false),
1753   _heap_summary_sent(false),
1754   _in_cset_fast_test(),
1755   _dirty_cards_region_list(NULL),
1756   _worker_cset_start_region(NULL),
1757   _worker_cset_start_region_time_stamp(NULL),
1758   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1759   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1760   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1761   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1762 
1763   _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
1764                           /* are_GC_task_threads */true,
1765                           /* are_ConcurrentGC_threads */false);
1766   _workers->initialize_workers();
1767 
1768   _allocator = G1Allocator::create_allocator(this);
1769   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1770 
1771   int n_queues = MAX2((int)ParallelGCThreads, 1);
1772   _task_queues = new RefToScanQueueSet(n_queues);


2286                  _old_marking_cycles_started, _old_marking_cycles_completed));
2287 
2288   _old_marking_cycles_completed += 1;
2289 
2290   // We need to clear the "in_progress" flag in the CM thread before
2291   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2292   // is set) so that if a waiter requests another System.gc() it doesn't
2293   // incorrectly see that a marking cycle is still in progress.
2294   if (concurrent) {
2295     _cmThread->clear_in_progress();
2296   }
2297 
2298   // This notify_all() will ensure that a thread that called
2299   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2300   // and it's waiting for a full GC to finish will be woken up. It is
2301   // waiting in VM_G1IncCollectionPause::doit_epilogue().
2302   FullGCCount_lock->notify_all();
2303 }
2304 
2305 void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
2306   _concurrent_cycle_started = true;
2307   _gc_timer_cm->register_gc_start(start_time);
2308 
2309   _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
2310   trace_heap_before_gc(_gc_tracer_cm);
2311 }
2312 
2313 void G1CollectedHeap::register_concurrent_cycle_end() {
2314   if (_concurrent_cycle_started) {
2315     if (_cm->has_aborted()) {
2316       _gc_tracer_cm->report_concurrent_mode_failure();
2317     }
2318 
2319     _gc_timer_cm->register_gc_end();
2320     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2321 
2322     // Clear state variables to prepare for the next concurrent cycle.
2323     _concurrent_cycle_started = false;
2324     _heap_summary_sent = false;
2325   }
2326 }
2327 
2328 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
2329   if (_concurrent_cycle_started) {
2330     // This function can be called when:
2331     //  the cleanup pause is run
2332     //  the concurrent cycle is aborted before the cleanup pause.
2333     //  the concurrent cycle is aborted after the cleanup pause,
2334     //   but before the concurrent cycle end has been registered.
2335     // Make sure that we only send the heap information once.
2336     if (!_heap_summary_sent) {
2337       trace_heap_after_gc(_gc_tracer_cm);
2338       _heap_summary_sent = true;
2339     }
2340   }
2341 }
2342 
2343 G1YCType G1CollectedHeap::yc_type() {
2344   bool is_young = g1_policy()->gcs_are_young();
2345   bool is_initial_mark = g1_policy()->during_initial_mark_pause();
2346   bool is_during_mark = mark_in_progress();
2347 
2348   if (is_initial_mark) {
2349     return InitialMark;
2350   } else if (is_during_mark) {
2351     return DuringMark;
2352   } else if (is_young) {
2353     return Normal;
2354   } else {
2355     return Mixed;
2356   }
2357 }
2358 
2359 void G1CollectedHeap::collect(GCCause::Cause cause) {
2360   assert_heap_not_locked();
2361 
2362   uint gc_count_before;
2363   uint old_marking_count_before;
2364   uint full_gc_count_before;
2365   bool retry_gc;
2366 


3591 
3592   DEBUG_ONLY(totals.verify());
3593 }
3594 
3595 void G1CollectedHeap::reset_taskqueue_stats() {
3596   const uint n = workers()->total_workers();
3597   for (uint i = 0; i < n; ++i) {
3598     task_queue(i)->stats.reset();
3599   }
3600 }
3601 #endif // TASKQUEUE_STATS
3602 
3603 void G1CollectedHeap::log_gc_header() {
3604   if (!G1Log::fine()) {
3605     return;
3606   }
3607 
3608   gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3609 
3610   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3611     .append(g1_policy()->gcs_are_young() ? "(young)" : "(mixed)")
3612     .append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
3613 
3614   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3615 }
3616 
3617 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3618   if (!G1Log::fine()) {
3619     return;
3620   }
3621 
3622   if (G1Log::finer()) {
3623     if (evacuation_failed()) {
3624       gclog_or_tty->print(" (to-space exhausted)");
3625     }
3626     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3627     g1_policy()->phase_times()->note_gc_end();
3628     g1_policy()->phase_times()->print(pause_time_sec);
3629     g1_policy()->print_detailed_heap_transition();
3630   } else {
3631     if (evacuation_failed()) {
3632       gclog_or_tty->print("--");


3649   _gc_timer_stw->register_gc_start();
3650 
3651   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3652 
3653   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3654   ResourceMark rm;
3655 
3656   G1Log::update_level();
3657   print_heap_before_gc();
3658   trace_heap_before_gc(_gc_tracer_stw);
3659 
3660   verify_region_sets_optional();
3661   verify_dirty_young_regions();
3662 
3663   // This call will decide whether this pause is an initial-mark
3664   // pause. If it is, during_initial_mark_pause() will return true
3665   // for the duration of this pause.
3666   g1_policy()->decide_on_conc_mark_initiation();
3667 
3668   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3669   assert(!g1_policy()->during_initial_mark_pause() ||
3670           g1_policy()->gcs_are_young(), "sanity");
3671 
3672   // We also do not allow mixed GCs during marking.
3673   assert(!mark_in_progress() || g1_policy()->gcs_are_young(), "sanity");
3674 
3675   // Record whether this pause is an initial mark. When the current
3676   // thread has completed its logging output and it's safe to signal
3677   // the CM thread, the flag's value in the policy has been reset.
3678   bool should_start_conc_mark = g1_policy()->during_initial_mark_pause();
3679 
3680   // Inner scope for scope based logging, timers, and stats collection
3681   {
3682     EvacuationInfo evacuation_info;
3683 
3684     if (g1_policy()->during_initial_mark_pause()) {
3685       // We are about to start a marking cycle, so we increment the
3686       // full collection counter.
3687       increment_old_marking_cycles_started();
3688       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3689     }
3690 
3691     _gc_tracer_stw->report_yc_type(yc_type());
3692 
3693     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3694 
3695     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3696                                                                   workers()->active_workers(),
3697                                                                   Threads::number_of_non_daemon_threads());
3698     assert(UseDynamicNumberOfGCThreads ||
3699            active_workers == workers()->total_workers(),
3700            "If not dynamic should be using all the  workers");
3701     workers()->set_active_workers(active_workers);
3702 
3703     double pause_start_sec = os::elapsedTime();
3704     g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
3705     log_gc_header();
3706 
3707     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3708     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3709 
3710     // If the secondary_free_list is not empty, append it to the
3711     // free_list. No need to wait for the cleanup operation to finish;
3712     // the region allocation code will check the secondary_free_list
3713     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3714     // set, skip this step so that the region allocation code has to
3715     // get entries from the secondary_free_list.
3716     if (!G1StressConcRegionFreeing) {
3717       append_secondary_free_list_if_not_empty_with_lock();
3718     }
3719 
3720     assert(check_young_list_well_formed(), "young list should be well formed");
3721 
3722     // Don't dynamically change the number of GC threads this early.  A value of
3723     // 0 is used to indicate serial work.  When parallel work is done,
3724     // it will be set.


3778         g1_policy()->record_collection_pause_start(sample_start_time_sec);
3779 
3780         double scan_wait_start = os::elapsedTime();
3781         // We have to wait until the CM threads finish scanning the
3782         // root regions as it's the only way to ensure that all the
3783         // objects on them have been correctly scanned before we start
3784         // moving them during the GC.
3785         bool waited = _cm->root_regions()->wait_until_scan_finished();
3786         double wait_time_ms = 0.0;
3787         if (waited) {
3788           double scan_wait_end = os::elapsedTime();
3789           wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3790         }
3791         g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3792 
3793 #if YOUNG_LIST_VERBOSE
3794         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
3795         _young_list->print();
3796 #endif // YOUNG_LIST_VERBOSE
3797 
3798         if (g1_policy()->during_initial_mark_pause()) {
3799           concurrent_mark()->checkpointRootsInitialPre();
3800         }
3801 
3802 #if YOUNG_LIST_VERBOSE
3803         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3804         _young_list->print();
3805         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3806 #endif // YOUNG_LIST_VERBOSE
3807 
3808         g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
3809 
3810         register_humongous_regions_with_cset();
3811 
3812         assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3813 
3814         _cm->note_start_of_gc();
3815         // We call this after finalize_cset() to
3816         // ensure that the CSet has been finalized.
3817         _cm->verify_no_cset_oops();
3818 


3867         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3868                                              _young_list->first_survivor_region(),
3869                                              _young_list->last_survivor_region());
3870 
3871         _young_list->reset_auxilary_lists();
3872 
3873         if (evacuation_failed()) {
3874           _allocator->set_used(recalculate_used());
3875           uint n_queues = MAX2((int)ParallelGCThreads, 1);
3876           for (uint i = 0; i < n_queues; i++) {
3877             if (_evacuation_failed_info_array[i].has_failed()) {
3878               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3879             }
3880           }
3881         } else {
3882           // The "used" of the the collection set have already been subtracted
3883           // when they were freed.  Add in the bytes evacuated.
3884           _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
3885         }
3886 
3887         if (g1_policy()->during_initial_mark_pause()) {
3888           // We have to do this before we notify the CM threads that
3889           // they can start working to make sure that all the
3890           // appropriate initialization is done on the CM object.
3891           concurrent_mark()->checkpointRootsInitialPost();
3892           set_marking_started();
3893           // Note that we don't actually trigger the CM thread at
3894           // this point. We do that later when we're sure that
3895           // the current thread has completed its logging output.
3896         }
3897 
3898         allocate_dummy_regions();
3899 
3900 #if YOUNG_LIST_VERBOSE
3901         gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
3902         _young_list->print();
3903         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3904 #endif // YOUNG_LIST_VERBOSE
3905 
3906         _allocator->init_mutator_alloc_region();
3907 
3908         {
3909           size_t expand_bytes = g1_policy()->expansion_amount();
3910           if (expand_bytes > 0) {
3911             size_t bytes_before = capacity();
3912             // No need for an ergo verbose message here,


4358       cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4359     }
4360   };
4361 
4362   void work(uint worker_id) {
4363     if (worker_id >= _n_workers) return;  // no work needed this round
4364 
4365     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, os::elapsedTime());
4366 
4367     {
4368       ResourceMark rm;
4369       HandleMark   hm;
4370 
4371       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
4372 
4373       G1ParScanThreadState            pss(_g1h, worker_id, rp);
4374       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4375 
4376       pss.set_evac_failure_closure(&evac_failure_cl);
4377 
4378       bool only_young = _g1h->g1_policy()->gcs_are_young();
4379 
4380       // Non-IM young GC.
4381       G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
4382       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
4383                                                                                only_young, // Only process dirty klasses.
4384                                                                                false);     // No need to claim CLDs.
4385       // IM young GC.
4386       //    Strong roots closures.
4387       G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, &pss, rp);
4388       G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
4389                                                                                false, // Process all klasses.
4390                                                                                true); // Need to claim CLDs.
4391       //    Weak roots closures.
4392       G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4393       G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4394                                                                                     false, // Process all klasses.
4395                                                                                     true); // Need to claim CLDs.
4396 
4397       OopClosure* strong_root_cl;
4398       OopClosure* weak_root_cl;
4399       CLDClosure* strong_cld_cl;
4400       CLDClosure* weak_cld_cl;
4401 
4402       bool trace_metadata = false;
4403 
4404       if (_g1h->g1_policy()->during_initial_mark_pause()) {
4405         // We also need to mark copied objects.
4406         strong_root_cl = &scan_mark_root_cl;
4407         strong_cld_cl  = &scan_mark_cld_cl;
4408         if (ClassUnloadingWithConcurrentMark) {
4409           weak_root_cl = &scan_mark_weak_root_cl;
4410           weak_cld_cl  = &scan_mark_weak_cld_cl;
4411           trace_metadata = true;
4412         } else {
4413           weak_root_cl = &scan_mark_root_cl;
4414           weak_cld_cl  = &scan_mark_cld_cl;
4415         }
4416       } else {
4417         strong_root_cl = &scan_only_root_cl;
4418         weak_root_cl   = &scan_only_root_cl;
4419         strong_cld_cl  = &scan_only_cld_cl;
4420         weak_cld_cl    = &scan_only_cld_cl;
4421       }
4422 
4423       pss.start_strong_roots();
4424 


5045   {}
5046 
5047   virtual void work(uint worker_id) {
5048     // The reference processing task executed by a single worker.
5049     ResourceMark rm;
5050     HandleMark   hm;
5051 
5052     G1STWIsAliveClosure is_alive(_g1h);
5053 
5054     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
5055     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5056 
5057     pss.set_evac_failure_closure(&evac_failure_cl);
5058 
5059     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5060 
5061     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5062 
5063     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5064 
5065     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5066       // We also need to mark copied objects.
5067       copy_non_heap_cl = &copy_mark_non_heap_cl;
5068     }
5069 
5070     // Keep alive closure.
5071     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5072 
5073     // Complete GC closure
5074     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5075 
5076     // Call the reference processing task's work routine.
5077     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5078 
5079     // Note we cannot assert that the refs array is empty here as not all
5080     // of the processing tasks (specifically phase2 - pp2_work) execute
5081     // the complete_gc closure (which ordinarily would drain the queue) so
5082     // the queue may not be empty.
5083   }
5084 };
5085 


5150     _n_workers(workers)
5151   { }
5152 
5153   void work(uint worker_id) {
5154     ResourceMark rm;
5155     HandleMark   hm;
5156 
5157     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
5158     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5159 
5160     pss.set_evac_failure_closure(&evac_failure_cl);
5161 
5162     assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5163 
5164     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5165 
5166     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5167 
5168     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5169 
5170     if (_g1h->g1_policy()->during_initial_mark_pause()) {
5171       // We also need to mark copied objects.
5172       copy_non_heap_cl = &copy_mark_non_heap_cl;
5173     }
5174 
5175     // Is alive closure
5176     G1AlwaysAliveClosure always_alive(_g1h);
5177 
5178     // Copying keep alive closure. Applied to referent objects that need
5179     // to be copied.
5180     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5181 
5182     ReferenceProcessor* rp = _g1h->ref_processor_cm();
5183 
5184     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5185     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5186 
5187     // limit is set using max_num_q() - which was set using ParallelGCThreads.
5188     // So this must be true - but assert just in case someone decides to
5189     // change the worker ids.
5190     assert(worker_id < limit, "sanity");


5265   // JNI refs.
5266 
5267   // Use only a single queue for this PSS.
5268   G1ParScanThreadState            pss(this, 0, NULL);
5269 
5270   // We do not embed a reference processor in the copying/scanning
5271   // closures while we're actually processing the discovered
5272   // reference objects.
5273   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5274 
5275   pss.set_evac_failure_closure(&evac_failure_cl);
5276 
5277   assert(pss.queue_is_empty(), "pre-condition");
5278 
5279   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
5280 
5281   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5282 
5283   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5284 
5285   if (g1_policy()->during_initial_mark_pause()) {
5286     // We also need to mark copied objects.
5287     copy_non_heap_cl = &copy_mark_non_heap_cl;
5288   }
5289 
5290   // Keep alive closure.
5291   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss);
5292 
5293   // Serial Complete GC closure
5294   G1STWDrainQueueClosure drain_queue(this, &pss);
5295 
5296   // Setup the soft refs policy...
5297   rp->setup_policy(false);
5298 
5299   ReferenceProcessorStats stats;
5300   if (!rp->processing_is_mt()) {
5301     // Serial reference processing...
5302     stats = rp->process_discovered_references(&is_alive,
5303                                               &keep_alive,
5304                                               &drain_queue,
5305                                               NULL,


5378   hot_card_cache->reset_hot_cache_claimed_index();
5379   hot_card_cache->set_use_cache(false);
5380 
5381   const uint n_workers = workers()->active_workers();
5382   assert(UseDynamicNumberOfGCThreads ||
5383          n_workers == workers()->total_workers(),
5384          "If not dynamic should be using all the  workers");
5385   set_par_threads(n_workers);
5386 
5387 
5388   init_for_evac_failure(NULL);
5389 
5390   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5391   double start_par_time_sec = os::elapsedTime();
5392   double end_par_time_sec;
5393 
5394   {
5395     G1RootProcessor root_processor(this);
5396     G1ParTask g1_par_task(this, _task_queues, &root_processor);
5397     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5398     if (g1_policy()->during_initial_mark_pause()) {
5399       ClassLoaderDataGraph::clear_claimed_marks();
5400     }
5401 
5402      // The individual threads will set their evac-failure closures.
5403      if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5404      // These tasks use ShareHeap::_process_strong_tasks
5405      assert(UseDynamicNumberOfGCThreads ||
5406             workers()->active_workers() == workers()->total_workers(),
5407             "If not dynamic should be using all the  workers");
5408     workers()->run_task(&g1_par_task);
5409     end_par_time_sec = os::elapsedTime();
5410 
5411     // Closing the inner scope will execute the destructor
5412     // for the G1RootProcessor object. We record the current
5413     // elapsed time before closing the scope so that time
5414     // taken for the destructor is NOT included in the
5415     // reported parallel time.
5416   }
5417 
5418   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();


5639     return false;
5640   }
5641   return true;
5642 }
5643 
5644 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5645   CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5646   CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5647 
5648   HeapWord* bottom = hr->bottom();
5649   HeapWord* ptams  = hr->prev_top_at_mark_start();
5650   HeapWord* ntams  = hr->next_top_at_mark_start();
5651   HeapWord* end    = hr->end();
5652 
5653   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5654 
5655   bool res_n = true;
5656   // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5657   // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5658   // if we happen to be in that state.
5659   if (mark_in_progress() || !_cmThread->in_progress()) {
5660     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5661   }
5662   if (!res_p || !res_n) {
5663     gclog_or_tty->print_cr("#### Bitmap verification failed for "HR_FORMAT,
5664                            HR_FORMAT_PARAMS(hr));
5665     gclog_or_tty->print_cr("#### Caller: %s", caller);
5666     return false;
5667   }
5668   return true;
5669 }
5670 
5671 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5672   if (!G1VerifyBitmaps) return;
5673 
5674   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5675 }
5676 
5677 class G1VerifyBitmapClosure : public HeapRegionClosure {
5678 private:
5679   const char* _caller;


6337 
6338   if (count < g1_policy()->max_regions(dest)) {
6339     const bool is_survivor = (dest.is_young());
6340     HeapRegion* new_alloc_region = new_region(word_size,
6341                                               !is_survivor,
6342                                               true /* do_expand */);
6343     if (new_alloc_region != NULL) {
6344       // We really only need to do this for old regions given that we
6345       // should never scan survivors. But it doesn't hurt to do it
6346       // for survivors too.
6347       new_alloc_region->record_timestamp();
6348       if (is_survivor) {
6349         new_alloc_region->set_survivor();
6350         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6351         check_bitmaps("Survivor Region Allocation", new_alloc_region);
6352       } else {
6353         new_alloc_region->set_old();
6354         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6355         check_bitmaps("Old Region Allocation", new_alloc_region);
6356       }
6357       bool during_im = g1_policy()->during_initial_mark_pause();
6358       new_alloc_region->note_start_of_copying(during_im);
6359       return new_alloc_region;
6360     }
6361   }
6362   return NULL;
6363 }
6364 
6365 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6366                                              size_t allocated_bytes,
6367                                              InCSetState dest) {
6368   bool during_im = g1_policy()->during_initial_mark_pause();
6369   alloc_region->note_end_of_copying(during_im);
6370   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6371   if (dest.is_young()) {
6372     young_list()->add_survivor_region(alloc_region);
6373   } else {
6374     _old_set.add(alloc_region);
6375   }
6376   _hr_printer.retire(alloc_region);
6377 }
6378 
6379 // Heap region set verification
6380 
6381 class VerifyRegionListsClosure : public HeapRegionClosure {
6382 private:
6383   HeapRegionSet*   _old_set;
6384   HeapRegionSet*   _humongous_set;
6385   HeapRegionManager*   _hrm;
6386 
6387 public:
6388   HeapRegionSetCount _old_count;




  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/metadataOnStackMark.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "code/icBuffer.hpp"
  30 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  31 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  32 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  33 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  34 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  35 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  36 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  37 #include "gc_implementation/g1/g1CollectorState.hpp"
  38 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  39 #include "gc_implementation/g1/g1EvacFailure.hpp"
  40 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  41 #include "gc_implementation/g1/g1Log.hpp"
  42 #include "gc_implementation/g1/g1MarkSweep.hpp"
  43 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  44 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
  45 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
  46 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  47 #include "gc_implementation/g1/g1RootProcessor.hpp"
  48 #include "gc_implementation/g1/g1StringDedup.hpp"
  49 #include "gc_implementation/g1/g1YCTypes.hpp"
  50 #include "gc_implementation/g1/heapRegion.inline.hpp"
  51 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  52 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  53 #include "gc_implementation/g1/vm_operations_g1.hpp"
  54 #include "gc_implementation/shared/gcHeapSummary.hpp"
  55 #include "gc_implementation/shared/gcTimer.hpp"
  56 #include "gc_implementation/shared/gcTrace.hpp"
  57 #include "gc_implementation/shared/gcTraceTime.hpp"


1022   }
1023 
1024   ShouldNotReachHere();
1025   return NULL;
1026 }
1027 
1028 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1029                                                            AllocationContext_t context,
1030                                                            bool expect_null_mutator_alloc_region) {
1031   assert_at_safepoint(true /* should_be_vm_thread */);
1032   assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
1033                                              !expect_null_mutator_alloc_region,
1034          "the current alloc region was unexpectedly found to be non-NULL");
1035 
1036   if (!is_humongous(word_size)) {
1037     return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
1038                                                       false /* bot_updates */);
1039   } else {
1040     HeapWord* result = humongous_obj_allocate(word_size, context);
1041     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1042       collector_state()->set_initiate_conc_mark_if_possible(true);
1043     }
1044     return result;
1045   }
1046 
1047   ShouldNotReachHere();
1048 }
1049 
1050 class PostMCRemSetClearClosure: public HeapRegionClosure {
1051   G1CollectedHeap* _g1h;
1052   ModRefBarrierSet* _mr_bs;
1053 public:
1054   PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1055     _g1h(g1h), _mr_bs(mr_bs) {}
1056 
1057   bool doHeapRegion(HeapRegion* r) {
1058     HeapRegionRemSet* hrrs = r->rem_set();
1059 
1060     if (r->is_continues_humongous()) {
1061       // We'll assert that the strong code root list and RSet is empty
1062       assert(hrrs->strong_code_roots_list_length() == 0, "sanity");


1233 
1234       // Make sure we'll choose a new allocation region afterwards.
1235       _allocator->release_mutator_alloc_region();
1236       _allocator->abandon_gc_alloc_regions();
1237       g1_rem_set()->cleanupHRRS();
1238 
1239       // We should call this after we retire any currently active alloc
1240       // regions so that all the ALLOC / RETIRE events are generated
1241       // before the start GC event.
1242       _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1243 
1244       // We may have added regions to the current incremental collection
1245       // set between the last GC or pause and now. We need to clear the
1246       // incremental collection set and then start rebuilding it afresh
1247       // after this full GC.
1248       abandon_collection_set(g1_policy()->inc_cset_head());
1249       g1_policy()->clear_incremental_cset();
1250       g1_policy()->stop_incremental_cset_building();
1251 
1252       tear_down_region_sets(false /* free_list_only */);
1253       collector_state()->set_gcs_are_young(true);
1254 
1255       // See the comments in g1CollectedHeap.hpp and
1256       // G1CollectedHeap::ref_processing_init() about
1257       // how reference processing currently works in G1.
1258 
1259       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1260       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1261 
1262       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1263       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1264 
1265       ref_processor_stw()->enable_discovery();
1266       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1267 
1268       // Do collection work
1269       {
1270         HandleMark hm;  // Discard invalid handles created during gc
1271         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1272       }
1273 


1714 }
1715 
1716 // Public methods.
1717 
1718 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1719 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1720 #endif // _MSC_VER
1721 
1722 
1723 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1724   CollectedHeap(),
1725   _g1_policy(policy_),
1726   _dirty_card_queue_set(false),
1727   _into_cset_dirty_card_queue_set(false),
1728   _is_alive_closure_cm(this),
1729   _is_alive_closure_stw(this),
1730   _ref_processor_cm(NULL),
1731   _ref_processor_stw(NULL),
1732   _bot_shared(NULL),
1733   _evac_failure_scan_stack(NULL),

1734   _cg1r(NULL),
1735   _g1mm(NULL),
1736   _refine_cte_cl(NULL),

1737   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1738   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1739   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1740   _humongous_reclaim_candidates(),
1741   _has_humongous_reclaim_candidates(false),
1742   _free_regions_coming(false),
1743   _young_list(new YoungList(this)),
1744   _gc_time_stamp(0),
1745   _survivor_plab_stats(YoungPLABSize, PLABWeight),
1746   _old_plab_stats(OldPLABSize, PLABWeight),
1747   _expand_heap_after_alloc_failure(true),
1748   _surviving_young_words(NULL),
1749   _old_marking_cycles_started(0),
1750   _old_marking_cycles_completed(0),

1751   _heap_summary_sent(false),
1752   _in_cset_fast_test(),
1753   _dirty_cards_region_list(NULL),
1754   _worker_cset_start_region(NULL),
1755   _worker_cset_start_region_time_stamp(NULL),
1756   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1757   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1758   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1759   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1760 
1761   _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
1762                           /* are_GC_task_threads */true,
1763                           /* are_ConcurrentGC_threads */false);
1764   _workers->initialize_workers();
1765 
1766   _allocator = G1Allocator::create_allocator(this);
1767   _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1768 
1769   int n_queues = MAX2((int)ParallelGCThreads, 1);
1770   _task_queues = new RefToScanQueueSet(n_queues);


2284                  _old_marking_cycles_started, _old_marking_cycles_completed));
2285 
2286   _old_marking_cycles_completed += 1;
2287 
2288   // We need to clear the "in_progress" flag in the CM thread before
2289   // we wake up any waiters (especially when ExplicitInvokesConcurrent
2290   // is set) so that if a waiter requests another System.gc() it doesn't
2291   // incorrectly see that a marking cycle is still in progress.
2292   if (concurrent) {
2293     _cmThread->clear_in_progress();
2294   }
2295 
2296   // This notify_all() will ensure that a thread that called
2297   // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2298   // and it's waiting for a full GC to finish will be woken up. It is
2299   // waiting in VM_G1IncCollectionPause::doit_epilogue().
2300   FullGCCount_lock->notify_all();
2301 }
2302 
2303 void G1CollectedHeap::register_concurrent_cycle_start(const Ticks& start_time) {
2304   collector_state()->set_concurrent_cycle_started(true);
2305   _gc_timer_cm->register_gc_start(start_time);
2306 
2307   _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
2308   trace_heap_before_gc(_gc_tracer_cm);
2309 }
2310 
2311 void G1CollectedHeap::register_concurrent_cycle_end() {
2312   if (collector_state()->concurrent_cycle_started()) {
2313     if (_cm->has_aborted()) {
2314       _gc_tracer_cm->report_concurrent_mode_failure();
2315     }
2316 
2317     _gc_timer_cm->register_gc_end();
2318     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2319 
2320     // Clear state variables to prepare for the next concurrent cycle.
2321      collector_state()->set_concurrent_cycle_started(false);
2322     _heap_summary_sent = false;
2323   }
2324 }
2325 
2326 void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
2327   if (collector_state()->concurrent_cycle_started()) {
2328     // This function can be called when:
2329     //  the cleanup pause is run
2330     //  the concurrent cycle is aborted before the cleanup pause.
2331     //  the concurrent cycle is aborted after the cleanup pause,
2332     //   but before the concurrent cycle end has been registered.
2333     // Make sure that we only send the heap information once.
2334     if (!_heap_summary_sent) {
2335       trace_heap_after_gc(_gc_tracer_cm);
2336       _heap_summary_sent = true;
2337     }
2338   }
2339 }
2340 
2341 G1YCType G1CollectedHeap::yc_type() {
2342   bool is_young = collector_state()->gcs_are_young();
2343   bool is_initial_mark = collector_state()->during_initial_mark_pause();
2344   bool is_during_mark = collector_state()->mark_in_progress();
2345 
2346   if (is_initial_mark) {
2347     return InitialMark;
2348   } else if (is_during_mark) {
2349     return DuringMark;
2350   } else if (is_young) {
2351     return Normal;
2352   } else {
2353     return Mixed;
2354   }
2355 }
2356 
2357 void G1CollectedHeap::collect(GCCause::Cause cause) {
2358   assert_heap_not_locked();
2359 
2360   uint gc_count_before;
2361   uint old_marking_count_before;
2362   uint full_gc_count_before;
2363   bool retry_gc;
2364 


3589 
3590   DEBUG_ONLY(totals.verify());
3591 }
3592 
3593 void G1CollectedHeap::reset_taskqueue_stats() {
3594   const uint n = workers()->total_workers();
3595   for (uint i = 0; i < n; ++i) {
3596     task_queue(i)->stats.reset();
3597   }
3598 }
3599 #endif // TASKQUEUE_STATS
3600 
3601 void G1CollectedHeap::log_gc_header() {
3602   if (!G1Log::fine()) {
3603     return;
3604   }
3605 
3606   gclog_or_tty->gclog_stamp(_gc_tracer_stw->gc_id());
3607 
3608   GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
3609     .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)")
3610     .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : "");
3611 
3612   gclog_or_tty->print("[%s", (const char*)gc_cause_str);
3613 }
3614 
3615 void G1CollectedHeap::log_gc_footer(double pause_time_sec) {
3616   if (!G1Log::fine()) {
3617     return;
3618   }
3619 
3620   if (G1Log::finer()) {
3621     if (evacuation_failed()) {
3622       gclog_or_tty->print(" (to-space exhausted)");
3623     }
3624     gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec);
3625     g1_policy()->phase_times()->note_gc_end();
3626     g1_policy()->phase_times()->print(pause_time_sec);
3627     g1_policy()->print_detailed_heap_transition();
3628   } else {
3629     if (evacuation_failed()) {
3630       gclog_or_tty->print("--");


3647   _gc_timer_stw->register_gc_start();
3648 
3649   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3650 
3651   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3652   ResourceMark rm;
3653 
3654   G1Log::update_level();
3655   print_heap_before_gc();
3656   trace_heap_before_gc(_gc_tracer_stw);
3657 
3658   verify_region_sets_optional();
3659   verify_dirty_young_regions();
3660 
3661   // This call will decide whether this pause is an initial-mark
3662   // pause. If it is, during_initial_mark_pause() will return true
3663   // for the duration of this pause.
3664   g1_policy()->decide_on_conc_mark_initiation();
3665 
3666   // We do not allow initial-mark to be piggy-backed on a mixed GC.
3667   assert(!collector_state()->during_initial_mark_pause() ||
3668           collector_state()->gcs_are_young(), "sanity");
3669 
3670   // We also do not allow mixed GCs during marking.
3671   assert(!collector_state()->mark_in_progress() || collector_state()->gcs_are_young(), "sanity");
3672 
3673   // Record whether this pause is an initial mark. When the current
3674   // thread has completed its logging output and it's safe to signal
3675   // the CM thread, the flag's value in the policy has been reset.
3676   bool should_start_conc_mark = collector_state()->during_initial_mark_pause();
3677 
3678   // Inner scope for scope based logging, timers, and stats collection
3679   {
3680     EvacuationInfo evacuation_info;
3681 
3682     if (collector_state()->during_initial_mark_pause()) {
3683       // We are about to start a marking cycle, so we increment the
3684       // full collection counter.
3685       increment_old_marking_cycles_started();
3686       register_concurrent_cycle_start(_gc_timer_stw->gc_start());
3687     }
3688 
3689     _gc_tracer_stw->report_yc_type(yc_type());
3690 
3691     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
3692 
3693     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
3694                                                                   workers()->active_workers(),
3695                                                                   Threads::number_of_non_daemon_threads());
3696     assert(UseDynamicNumberOfGCThreads ||
3697            active_workers == workers()->total_workers(),
3698            "If not dynamic should be using all the  workers");
3699     workers()->set_active_workers(active_workers);
3700 
3701     double pause_start_sec = os::elapsedTime();
3702     g1_policy()->phase_times()->note_gc_start(active_workers, collector_state()->mark_in_progress());
3703     log_gc_header();
3704 
3705     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
3706     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
3707 
3708     // If the secondary_free_list is not empty, append it to the
3709     // free_list. No need to wait for the cleanup operation to finish;
3710     // the region allocation code will check the secondary_free_list
3711     // and wait if necessary. If the G1StressConcRegionFreeing flag is
3712     // set, skip this step so that the region allocation code has to
3713     // get entries from the secondary_free_list.
3714     if (!G1StressConcRegionFreeing) {
3715       append_secondary_free_list_if_not_empty_with_lock();
3716     }
3717 
3718     assert(check_young_list_well_formed(), "young list should be well formed");
3719 
3720     // Don't dynamically change the number of GC threads this early.  A value of
3721     // 0 is used to indicate serial work.  When parallel work is done,
3722     // it will be set.


3776         g1_policy()->record_collection_pause_start(sample_start_time_sec);
3777 
3778         double scan_wait_start = os::elapsedTime();
3779         // We have to wait until the CM threads finish scanning the
3780         // root regions as it's the only way to ensure that all the
3781         // objects on them have been correctly scanned before we start
3782         // moving them during the GC.
3783         bool waited = _cm->root_regions()->wait_until_scan_finished();
3784         double wait_time_ms = 0.0;
3785         if (waited) {
3786           double scan_wait_end = os::elapsedTime();
3787           wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3788         }
3789         g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3790 
3791 #if YOUNG_LIST_VERBOSE
3792         gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
3793         _young_list->print();
3794 #endif // YOUNG_LIST_VERBOSE
3795 
3796         if (collector_state()->during_initial_mark_pause()) {
3797           concurrent_mark()->checkpointRootsInitialPre();
3798         }
3799 
3800 #if YOUNG_LIST_VERBOSE
3801         gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
3802         _young_list->print();
3803         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3804 #endif // YOUNG_LIST_VERBOSE
3805 
3806         g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
3807 
3808         register_humongous_regions_with_cset();
3809 
3810         assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3811 
3812         _cm->note_start_of_gc();
3813         // We call this after finalize_cset() to
3814         // ensure that the CSet has been finalized.
3815         _cm->verify_no_cset_oops();
3816 


3865         g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3866                                              _young_list->first_survivor_region(),
3867                                              _young_list->last_survivor_region());
3868 
3869         _young_list->reset_auxilary_lists();
3870 
3871         if (evacuation_failed()) {
3872           _allocator->set_used(recalculate_used());
3873           uint n_queues = MAX2((int)ParallelGCThreads, 1);
3874           for (uint i = 0; i < n_queues; i++) {
3875             if (_evacuation_failed_info_array[i].has_failed()) {
3876               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3877             }
3878           }
3879         } else {
3880           // The "used" of the the collection set have already been subtracted
3881           // when they were freed.  Add in the bytes evacuated.
3882           _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
3883         }
3884 
3885         if (collector_state()->during_initial_mark_pause()) {
3886           // We have to do this before we notify the CM threads that
3887           // they can start working to make sure that all the
3888           // appropriate initialization is done on the CM object.
3889           concurrent_mark()->checkpointRootsInitialPost();
3890           collector_state()->set_mark_in_progress(true);
3891           // Note that we don't actually trigger the CM thread at
3892           // this point. We do that later when we're sure that
3893           // the current thread has completed its logging output.
3894         }
3895 
3896         allocate_dummy_regions();
3897 
3898 #if YOUNG_LIST_VERBOSE
3899         gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
3900         _young_list->print();
3901         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3902 #endif // YOUNG_LIST_VERBOSE
3903 
3904         _allocator->init_mutator_alloc_region();
3905 
3906         {
3907           size_t expand_bytes = g1_policy()->expansion_amount();
3908           if (expand_bytes > 0) {
3909             size_t bytes_before = capacity();
3910             // No need for an ergo verbose message here,


4356       cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4357     }
4358   };
4359 
4360   void work(uint worker_id) {
4361     if (worker_id >= _n_workers) return;  // no work needed this round
4362 
4363     _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, os::elapsedTime());
4364 
4365     {
4366       ResourceMark rm;
4367       HandleMark   hm;
4368 
4369       ReferenceProcessor*             rp = _g1h->ref_processor_stw();
4370 
4371       G1ParScanThreadState            pss(_g1h, worker_id, rp);
4372       G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4373 
4374       pss.set_evac_failure_closure(&evac_failure_cl);
4375 
4376       bool only_young = _g1h->collector_state()->gcs_are_young();
4377 
4378       // Non-IM young GC.
4379       G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, &pss, rp);
4380       G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
4381                                                                                only_young, // Only process dirty klasses.
4382                                                                                false);     // No need to claim CLDs.
4383       // IM young GC.
4384       //    Strong roots closures.
4385       G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, &pss, rp);
4386       G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
4387                                                                                false, // Process all klasses.
4388                                                                                true); // Need to claim CLDs.
4389       //    Weak roots closures.
4390       G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4391       G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4392                                                                                     false, // Process all klasses.
4393                                                                                     true); // Need to claim CLDs.
4394 
4395       OopClosure* strong_root_cl;
4396       OopClosure* weak_root_cl;
4397       CLDClosure* strong_cld_cl;
4398       CLDClosure* weak_cld_cl;
4399 
4400       bool trace_metadata = false;
4401 
4402       if (_g1h->collector_state()->during_initial_mark_pause()) {
4403         // We also need to mark copied objects.
4404         strong_root_cl = &scan_mark_root_cl;
4405         strong_cld_cl  = &scan_mark_cld_cl;
4406         if (ClassUnloadingWithConcurrentMark) {
4407           weak_root_cl = &scan_mark_weak_root_cl;
4408           weak_cld_cl  = &scan_mark_weak_cld_cl;
4409           trace_metadata = true;
4410         } else {
4411           weak_root_cl = &scan_mark_root_cl;
4412           weak_cld_cl  = &scan_mark_cld_cl;
4413         }
4414       } else {
4415         strong_root_cl = &scan_only_root_cl;
4416         weak_root_cl   = &scan_only_root_cl;
4417         strong_cld_cl  = &scan_only_cld_cl;
4418         weak_cld_cl    = &scan_only_cld_cl;
4419       }
4420 
4421       pss.start_strong_roots();
4422 


5043   {}
5044 
5045   virtual void work(uint worker_id) {
5046     // The reference processing task executed by a single worker.
5047     ResourceMark rm;
5048     HandleMark   hm;
5049 
5050     G1STWIsAliveClosure is_alive(_g1h);
5051 
5052     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
5053     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5054 
5055     pss.set_evac_failure_closure(&evac_failure_cl);
5056 
5057     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5058 
5059     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5060 
5061     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5062 
5063     if (_g1h->collector_state()->during_initial_mark_pause()) {
5064       // We also need to mark copied objects.
5065       copy_non_heap_cl = &copy_mark_non_heap_cl;
5066     }
5067 
5068     // Keep alive closure.
5069     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5070 
5071     // Complete GC closure
5072     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5073 
5074     // Call the reference processing task's work routine.
5075     _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5076 
5077     // Note we cannot assert that the refs array is empty here as not all
5078     // of the processing tasks (specifically phase2 - pp2_work) execute
5079     // the complete_gc closure (which ordinarily would drain the queue) so
5080     // the queue may not be empty.
5081   }
5082 };
5083 


5148     _n_workers(workers)
5149   { }
5150 
5151   void work(uint worker_id) {
5152     ResourceMark rm;
5153     HandleMark   hm;
5154 
5155     G1ParScanThreadState            pss(_g1h, worker_id, NULL);
5156     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5157 
5158     pss.set_evac_failure_closure(&evac_failure_cl);
5159 
5160     assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5161 
5162     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
5163 
5164     G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5165 
5166     OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5167 
5168     if (_g1h->collector_state()->during_initial_mark_pause()) {
5169       // We also need to mark copied objects.
5170       copy_non_heap_cl = &copy_mark_non_heap_cl;
5171     }
5172 
5173     // Is alive closure
5174     G1AlwaysAliveClosure always_alive(_g1h);
5175 
5176     // Copying keep alive closure. Applied to referent objects that need
5177     // to be copied.
5178     G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5179 
5180     ReferenceProcessor* rp = _g1h->ref_processor_cm();
5181 
5182     uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5183     uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5184 
5185     // limit is set using max_num_q() - which was set using ParallelGCThreads.
5186     // So this must be true - but assert just in case someone decides to
5187     // change the worker ids.
5188     assert(worker_id < limit, "sanity");


5263   // JNI refs.
5264 
5265   // Use only a single queue for this PSS.
5266   G1ParScanThreadState            pss(this, 0, NULL);
5267 
5268   // We do not embed a reference processor in the copying/scanning
5269   // closures while we're actually processing the discovered
5270   // reference objects.
5271   G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5272 
5273   pss.set_evac_failure_closure(&evac_failure_cl);
5274 
5275   assert(pss.queue_is_empty(), "pre-condition");
5276 
5277   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
5278 
5279   G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5280 
5281   OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
5282 
5283   if (collector_state()->during_initial_mark_pause()) {
5284     // We also need to mark copied objects.
5285     copy_non_heap_cl = &copy_mark_non_heap_cl;
5286   }
5287 
5288   // Keep alive closure.
5289   G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss);
5290 
5291   // Serial Complete GC closure
5292   G1STWDrainQueueClosure drain_queue(this, &pss);
5293 
5294   // Setup the soft refs policy...
5295   rp->setup_policy(false);
5296 
5297   ReferenceProcessorStats stats;
5298   if (!rp->processing_is_mt()) {
5299     // Serial reference processing...
5300     stats = rp->process_discovered_references(&is_alive,
5301                                               &keep_alive,
5302                                               &drain_queue,
5303                                               NULL,


5376   hot_card_cache->reset_hot_cache_claimed_index();
5377   hot_card_cache->set_use_cache(false);
5378 
5379   const uint n_workers = workers()->active_workers();
5380   assert(UseDynamicNumberOfGCThreads ||
5381          n_workers == workers()->total_workers(),
5382          "If not dynamic should be using all the  workers");
5383   set_par_threads(n_workers);
5384 
5385 
5386   init_for_evac_failure(NULL);
5387 
5388   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5389   double start_par_time_sec = os::elapsedTime();
5390   double end_par_time_sec;
5391 
5392   {
5393     G1RootProcessor root_processor(this);
5394     G1ParTask g1_par_task(this, _task_queues, &root_processor);
5395     // InitialMark needs claim bits to keep track of the marked-through CLDs.
5396     if (collector_state()->during_initial_mark_pause()) {
5397       ClassLoaderDataGraph::clear_claimed_marks();
5398     }
5399 
5400      // The individual threads will set their evac-failure closures.
5401      if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5402      // These tasks use ShareHeap::_process_strong_tasks
5403      assert(UseDynamicNumberOfGCThreads ||
5404             workers()->active_workers() == workers()->total_workers(),
5405             "If not dynamic should be using all the  workers");
5406     workers()->run_task(&g1_par_task);
5407     end_par_time_sec = os::elapsedTime();
5408 
5409     // Closing the inner scope will execute the destructor
5410     // for the G1RootProcessor object. We record the current
5411     // elapsed time before closing the scope so that time
5412     // taken for the destructor is NOT included in the
5413     // reported parallel time.
5414   }
5415 
5416   G1GCPhaseTimes* phase_times = g1_policy()->phase_times();


5637     return false;
5638   }
5639   return true;
5640 }
5641 
5642 bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
5643   CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
5644   CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
5645 
5646   HeapWord* bottom = hr->bottom();
5647   HeapWord* ptams  = hr->prev_top_at_mark_start();
5648   HeapWord* ntams  = hr->next_top_at_mark_start();
5649   HeapWord* end    = hr->end();
5650 
5651   bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
5652 
5653   bool res_n = true;
5654   // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
5655   // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
5656   // if we happen to be in that state.
5657   if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
5658     res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
5659   }
5660   if (!res_p || !res_n) {
5661     gclog_or_tty->print_cr("#### Bitmap verification failed for "HR_FORMAT,
5662                            HR_FORMAT_PARAMS(hr));
5663     gclog_or_tty->print_cr("#### Caller: %s", caller);
5664     return false;
5665   }
5666   return true;
5667 }
5668 
5669 void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
5670   if (!G1VerifyBitmaps) return;
5671 
5672   guarantee(verify_bitmaps(caller, hr), "bitmap verification");
5673 }
5674 
5675 class G1VerifyBitmapClosure : public HeapRegionClosure {
5676 private:
5677   const char* _caller;


6335 
6336   if (count < g1_policy()->max_regions(dest)) {
6337     const bool is_survivor = (dest.is_young());
6338     HeapRegion* new_alloc_region = new_region(word_size,
6339                                               !is_survivor,
6340                                               true /* do_expand */);
6341     if (new_alloc_region != NULL) {
6342       // We really only need to do this for old regions given that we
6343       // should never scan survivors. But it doesn't hurt to do it
6344       // for survivors too.
6345       new_alloc_region->record_timestamp();
6346       if (is_survivor) {
6347         new_alloc_region->set_survivor();
6348         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6349         check_bitmaps("Survivor Region Allocation", new_alloc_region);
6350       } else {
6351         new_alloc_region->set_old();
6352         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
6353         check_bitmaps("Old Region Allocation", new_alloc_region);
6354       }
6355       bool during_im = collector_state()->during_initial_mark_pause();
6356       new_alloc_region->note_start_of_copying(during_im);
6357       return new_alloc_region;
6358     }
6359   }
6360   return NULL;
6361 }
6362 
6363 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6364                                              size_t allocated_bytes,
6365                                              InCSetState dest) {
6366   bool during_im = collector_state()->during_initial_mark_pause();
6367   alloc_region->note_end_of_copying(during_im);
6368   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6369   if (dest.is_young()) {
6370     young_list()->add_survivor_region(alloc_region);
6371   } else {
6372     _old_set.add(alloc_region);
6373   }
6374   _hr_printer.retire(alloc_region);
6375 }
6376 
6377 // Heap region set verification
6378 
6379 class VerifyRegionListsClosure : public HeapRegionClosure {
6380 private:
6381   HeapRegionSet*   _old_set;
6382   HeapRegionSet*   _humongous_set;
6383   HeapRegionManager*   _hrm;
6384 
6385 public:
6386   HeapRegionSetCount _old_count;


< prev index next >