< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 11545 : [mq]: 8159978-collection-set-as-array
rev 11546 : [mq]: 8159978-erikh-review


1239 
1240       // Disable discovery and empty the discovered lists
1241       // for the CM ref processor.
1242       ref_processor_cm()->disable_discovery();
1243       ref_processor_cm()->abandon_partial_discovery();
1244       ref_processor_cm()->verify_no_references_recorded();
1245 
1246       // Abandon current iterations of concurrent marking and concurrent
1247       // refinement, if any are in progress.
1248       concurrent_mark()->abort();
1249 
1250       // Make sure we'll choose a new allocation region afterwards.
1251       _allocator->release_mutator_alloc_region();
1252       _allocator->abandon_gc_alloc_regions();
1253       g1_rem_set()->cleanupHRRS();
1254 
1255       // We may have added regions to the current incremental collection
1256       // set between the last GC or pause and now. We need to clear the
1257       // incremental collection set and then start rebuilding it afresh
1258       // after this full GC.
1259       abandon_collection_set(collection_set()->inc_head());
1260       collection_set()->clear_incremental();
1261       collection_set()->stop_incremental_building();
1262 
1263       tear_down_region_sets(false /* free_list_only */);
1264       collector_state()->set_gcs_are_young(true);
1265 
1266       // See the comments in g1CollectedHeap.hpp and
1267       // G1CollectedHeap::ref_processing_init() about
1268       // how reference processing currently works in G1.
1269 
1270       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1271       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1272 
1273       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1274       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1275 
1276       ref_processor_stw()->enable_discovery();
1277       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1278 
1279       // Do collection work
1280       {
1281         HandleMark hm;  // Discard invalid handles created during gc


1362 
1363       _hrm.verify_optional();
1364       _verifier->verify_region_sets_optional();
1365 
1366       _verifier->verify_after_gc();
1367 
1368       // Clear the previous marking bitmap, if needed for bitmap verification.
1369       // Note we cannot do this when we clear the next marking bitmap in
1370       // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1371       // objects marked during a full GC against the previous bitmap.
1372       // But we need to clear it before calling check_bitmaps below since
1373       // the full GC has compacted objects and updated TAMS but not updated
1374       // the prev bitmap.
1375       if (G1VerifyBitmaps) {
1376         GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
1377         _cm->clear_prev_bitmap(workers());
1378       }
1379       _verifier->check_bitmaps("Full GC End");
1380 
1381       // Start a new incremental collection set for the next pause
1382       assert(collection_set()->head() == NULL, "must be");
1383       collection_set()->start_incremental_building();
1384 
1385       clear_cset_fast_test();
1386 
1387       _allocator->init_mutator_alloc_region();
1388 
1389       g1_policy()->record_full_collection_end();
1390 
1391       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1392       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1393       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1394       // before any GC notifications are raised.
1395       g1mm()->update_sizes();
1396 
1397       gc_epilogue(true);
1398 
1399       heap_transition.print();
1400 
1401       print_heap_after_gc();
1402       print_heap_regions();


1707   _g1_rem_set(NULL),
1708   _cg1r(NULL),
1709   _g1mm(NULL),
1710   _refine_cte_cl(NULL),
1711   _preserved_marks_set(true /* in_c_heap */),
1712   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1713   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1714   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1715   _humongous_reclaim_candidates(),
1716   _has_humongous_reclaim_candidates(false),
1717   _archive_allocator(NULL),
1718   _free_regions_coming(false),
1719   _gc_time_stamp(0),
1720   _summary_bytes_used(0),
1721   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1722   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1723   _expand_heap_after_alloc_failure(true),
1724   _old_marking_cycles_started(0),
1725   _old_marking_cycles_completed(0),
1726   _in_cset_fast_test(),
1727   _worker_cset_start_region(NULL),
1728   _worker_cset_start_region_time_stamp(NULL),
1729   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1730   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
1731 
1732   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1733                           /* are_GC_task_threads */true,
1734                           /* are_ConcurrentGC_threads */false);
1735   _workers->initialize_workers();
1736   _verifier = new G1HeapVerifier(this);
1737 
1738   _allocator = G1Allocator::create_allocator(this);
1739 
1740   _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1741 
1742   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1743 
1744   // Override the default _filler_array_max_size so that no humongous filler
1745   // objects are created.
1746   _filler_array_max_size = _humongous_object_threshold_in_words;
1747 
1748   uint n_queues = ParallelGCThreads;
1749   _task_queues = new RefToScanQueueSet(n_queues);
1750 
1751   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1752   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1753   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1754 
1755   for (uint i = 0; i < n_queues; i++) {
1756     RefToScanQueue* q = new RefToScanQueue();
1757     q->initialize();
1758     _task_queues->register_queue(i, q);
1759     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1760   }
1761   clear_cset_start_regions();
1762 
1763   // Initialize the G1EvacuationFailureALot counters and flags.
1764   NOT_PRODUCT(reset_evacuation_should_fail();)
1765 
1766   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1767 }
1768 
1769 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1770                                                                  size_t size,
1771                                                                  size_t translation_factor) {
1772   size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1773   // Allocate a new reserved space, preferring to use large pages.
1774   ReservedSpace rs(size, preferred_page_size);
1775   G1RegionToSpaceMapper* result  =
1776     G1RegionToSpaceMapper::create_mapper(rs,
1777                                          size,
1778                                          rs.alignment(),
1779                                          HeapRegion::GrainBytes,
1780                                          translation_factor,
1781                                          mtGC);


1970 
1971   // We'll re-use the same region whether the alloc region will
1972   // require BOT updates or not and, if it doesn't, then a non-young
1973   // region will complain that it cannot support allocations without
1974   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1975   dummy_region->set_eden();
1976   // Make sure it's full.
1977   dummy_region->set_top(dummy_region->end());
1978   G1AllocRegion::setup(this, dummy_region);
1979 
1980   _allocator->init_mutator_alloc_region();
1981 
1982   // Do create of the monitoring and management support so that
1983   // values in the heap have been properly initialized.
1984   _g1mm = new G1MonitoringSupport(this);
1985 
1986   G1StringDedup::initialize();
1987 
1988   _preserved_marks_set.init(ParallelGCThreads);
1989 


1990   return JNI_OK;
1991 }
1992 
1993 void G1CollectedHeap::stop() {
1994   // Stop all concurrent threads. We do this to make sure these threads
1995   // do not continue to execute and access resources (e.g. logging)
1996   // that are destroyed during shutdown.
1997   _cg1r->stop();
1998   _cmThread->stop();
1999   if (G1StringDedup::is_enabled()) {
2000     G1StringDedup::stop();
2001   }
2002 }
2003 
2004 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2005   return HeapRegion::max_region_size();
2006 }
2007 
2008 void G1CollectedHeap::post_initialize() {
2009   ref_processing_init();


2403   }
2404 };
2405 
2406 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2407   IterateObjectClosureRegionClosure blk(cl);
2408   heap_region_iterate(&blk);
2409 }
2410 
2411 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2412   _hrm.iterate(cl);
2413 }
2414 
2415 void
2416 G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
2417                                          uint worker_id,
2418                                          HeapRegionClaimer *hrclaimer,
2419                                          bool concurrent) const {
2420   _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
2421 }
2422 
2423 // Clear the cached CSet starting regions and (more importantly)
2424 // the time stamps. Called when we reset the GC time stamp.
2425 void G1CollectedHeap::clear_cset_start_regions() {
2426   assert(_worker_cset_start_region != NULL, "sanity");
2427   assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
2428 
2429   for (uint i = 0; i < ParallelGCThreads; i++) {
2430     _worker_cset_start_region[i] = NULL;
2431     _worker_cset_start_region_time_stamp[i] = 0;
2432   }
2433 }
2434 
2435 // Given the id of a worker, obtain or calculate a suitable
2436 // starting region for iterating over the current collection set.
2437 HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
2438   assert(get_gc_time_stamp() > 0, "should have been updated by now");
2439 
2440   HeapRegion* result = NULL;
2441   unsigned gc_time_stamp = get_gc_time_stamp();
2442 
2443   if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
2444     // Cached starting region for current worker was set
2445     // during the current pause - so it's valid.
2446     // Note: the cached starting heap region may be NULL
2447     // (when the collection set is empty).
2448     result = _worker_cset_start_region[worker_i];
2449     assert(result == NULL || result->in_collection_set(), "sanity");
2450     return result;
2451   }
2452 
2453   // The cached entry was not valid so let's calculate
2454   // a suitable starting heap region for this worker.
2455 
2456   // We want the parallel threads to start their collection
2457   // set iteration at different collection set regions to
2458   // avoid contention.
2459   // If we have:
2460   //          n collection set regions
2461   //          p threads
2462   // Then thread t will start at region floor ((t * n) / p)
2463 
2464   result = collection_set()->head();
2465   uint cs_size = collection_set()->region_length();
2466   uint active_workers = workers()->active_workers();
2467 
2468   uint end_ind   = (cs_size * worker_i) / active_workers;
2469   uint start_ind = 0;
2470 
2471   if (worker_i > 0 &&
2472       _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
2473     // Previous workers starting region is valid
2474     // so let's iterate from there
2475     start_ind = (cs_size * (worker_i - 1)) / active_workers;
2476     OrderAccess::loadload();
2477     result = _worker_cset_start_region[worker_i - 1];
2478   }
2479 
2480   for (uint i = start_ind; i < end_ind; i++) {
2481     result = result->next_in_collection_set();
2482   }
2483 
2484   // Note: the calculated starting heap region may be NULL
2485   // (when the collection set is empty).
2486   assert(result == NULL || result->in_collection_set(), "sanity");
2487   assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
2488          "should be updated only once per pause");
2489   _worker_cset_start_region[worker_i] = result;
2490   OrderAccess::storestore();
2491   _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
2492   return result;
2493 }
2494 
2495 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2496   HeapRegion* r = collection_set()->head();
2497   while (r != NULL) {
2498     HeapRegion* next = r->next_in_collection_set();
2499     if (cl->doHeapRegion(r)) {
2500       cl->incomplete();
2501       return;
2502     }
2503     r = next;
2504   }
2505 }
2506 
2507 void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
2508                                                   HeapRegionClosure *cl) {
2509   if (r == NULL) {
2510     // The CSet is empty so there's nothing to do.
2511     return;
2512   }
2513 
2514   assert(r->in_collection_set(),
2515          "Start region must be a member of the collection set.");
2516   HeapRegion* cur = r;
2517   while (cur != NULL) {
2518     HeapRegion* next = cur->next_in_collection_set();
2519     if (cl->doHeapRegion(cur) && false) {
2520       cl->incomplete();
2521       return;
2522     }
2523     cur = next;
2524   }
2525   cur = collection_set()->head();
2526   while (cur != r) {
2527     HeapRegion* next = cur->next_in_collection_set();
2528     if (cl->doHeapRegion(cur) && false) {
2529       cl->incomplete();
2530       return;
2531     }
2532     cur = next;
2533   }
2534 }
2535 
2536 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2537   HeapRegion* result = _hrm.next_region_in_heap(from);
2538   while (result != NULL && result->is_pinned()) {
2539     result = _hrm.next_region_in_heap(result);
2540   }
2541   return result;
2542 }
2543 
2544 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2545   HeapRegion* hr = heap_region_containing(addr);
2546   return hr->block_start(addr);
2547 }
2548 
2549 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2550   HeapRegion* hr = heap_region_containing(addr);
2551   return hr->block_size(addr);
2552 }
2553 


3073     task_queue(i)->stats.reset();
3074   }
3075 }
3076 #endif // TASKQUEUE_STATS
3077 
3078 void G1CollectedHeap::wait_for_root_region_scanning() {
3079   double scan_wait_start = os::elapsedTime();
3080   // We have to wait until the CM threads finish scanning the
3081   // root regions as it's the only way to ensure that all the
3082   // objects on them have been correctly scanned before we start
3083   // moving them during the GC.
3084   bool waited = _cm->root_regions()->wait_until_scan_finished();
3085   double wait_time_ms = 0.0;
3086   if (waited) {
3087     double scan_wait_end = os::elapsedTime();
3088     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
3089   }
3090   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
3091 }
3092 








3093 bool
3094 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
3095   assert_at_safepoint(true /* should_be_vm_thread */);
3096   guarantee(!is_gc_active(), "collection is not reentrant");
3097 
3098   if (GCLocker::check_active_before_gc()) {
3099     return false;
3100   }
3101 
3102   _gc_timer_stw->register_gc_start();
3103 
3104   GCIdMark gc_id_mark;
3105   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3106 
3107   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3108   ResourceMark rm;
3109 
3110   g1_policy()->note_gc_start();
3111 
3112   wait_for_root_region_scanning();


3251 
3252         evacuation_info.set_collectionset_regions(collection_set()->region_length());
3253 
3254         // Make sure the remembered sets are up to date. This needs to be
3255         // done before register_humongous_regions_with_cset(), because the
3256         // remembered sets are used there to choose eager reclaim candidates.
3257         // If the remembered sets are not up to date we might miss some
3258         // entries that need to be handled.
3259         g1_rem_set()->cleanupHRRS();
3260 
3261         register_humongous_regions_with_cset();
3262 
3263         assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3264 
3265         _cm->note_start_of_gc();
3266         // We call this after finalize_cset() to
3267         // ensure that the CSet has been finalized.
3268         _cm->verify_no_cset_oops();
3269 
3270         if (_hr_printer.is_active()) {
3271           HeapRegion* hr = collection_set()->head();
3272           while (hr != NULL) {
3273             _hr_printer.cset(hr);
3274             hr = hr->next_in_collection_set();
3275           }
3276         }
3277 
3278         // Initialize the GC alloc regions.
3279         _allocator->init_gc_alloc_regions(evacuation_info);
3280 
3281         G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
3282         pre_evacuate_collection_set();
3283 
3284         // Actually do the work...
3285         evacuate_collection_set(evacuation_info, &per_thread_states);
3286 
3287         post_evacuate_collection_set(evacuation_info, &per_thread_states);
3288 
3289         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3290         free_collection_set(collection_set()->head(), evacuation_info, surviving_young_words);
3291 
3292         eagerly_reclaim_humongous_regions();
3293 
3294         collection_set()->clear_head();
3295 
3296         record_obj_copy_mem_stats();
3297         _survivor_evac_stats.adjust_desired_plab_sz();
3298         _old_evac_stats.adjust_desired_plab_sz();
3299 
3300         // Start a new incremental collection set for the next pause.
3301         collection_set()->start_incremental_building();
3302 
3303         clear_cset_fast_test();
3304 
3305         guarantee(_eden.length() == 0, "eden should have been cleared");
3306         g1_policy()->transfer_survivors_to_cset(survivor());
3307 
3308         if (evacuation_failed()) {
3309           set_used(recalculate_used());
3310           if (_archive_allocator != NULL) {
3311             _archive_allocator->clear_used();
3312           }
3313           for (uint i = 0; i < ParallelGCThreads; i++) {
3314             if (_evacuation_failed_info_array[i].has_failed()) {
3315               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);


4687   HeapRegionClaimer _hrclaimer;
4688 
4689 public:
4690   G1ParScrubRemSetTask(G1RemSet* g1_rs, uint num_workers) :
4691     AbstractGangTask("G1 ScrubRS"),
4692     _g1rs(g1_rs),
4693     _hrclaimer(num_workers) {
4694   }
4695 
4696   void work(uint worker_id) {
4697     _g1rs->scrub(worker_id, &_hrclaimer);
4698   }
4699 };
4700 
4701 void G1CollectedHeap::scrub_rem_set() {
4702   uint num_workers = workers()->active_workers();
4703   G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), num_workers);
4704   workers()->run_task(&g1_par_scrub_rs_task);
4705 }
4706 
4707 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4708   size_t pre_used = 0;
4709   FreeRegionList local_free_list("Local List for CSet Freeing");
4710 
4711   double young_time_ms     = 0.0;
4712   double non_young_time_ms = 0.0;
4713 
4714   _eden.clear();
4715 
4716   G1Policy* policy = g1_policy();
4717 
4718   double start_sec = os::elapsedTime();
4719   bool non_young = true;
4720 
4721   HeapRegion* cur = cs_head;
4722   int age_bound = -1;
4723   size_t rs_lengths = 0;
4724 
4725   while (cur != NULL) {
4726     assert(!is_on_master_free_list(cur), "sanity");
4727     if (non_young) {
4728       if (cur->is_young()) {
4729         double end_sec = os::elapsedTime();
4730         double elapsed_ms = (end_sec - start_sec) * 1000.0;
4731         non_young_time_ms += elapsed_ms;
4732 
4733         start_sec = os::elapsedTime();
4734         non_young = false;










4735       }
4736     } else {
4737       if (!cur->is_young()) {
4738         double end_sec = os::elapsedTime();
4739         double elapsed_ms = (end_sec - start_sec) * 1000.0;
4740         young_time_ms += elapsed_ms;
4741 
4742         start_sec = os::elapsedTime();
4743         non_young = true;
4744       }
4745     }
4746 
4747     rs_lengths += cur->rem_set()->occupied_locked();

4748 
4749     HeapRegion* next = cur->next_in_collection_set();
4750     assert(cur->in_collection_set(), "bad CS");
4751     cur->set_next_in_collection_set(NULL);
4752     clear_in_cset(cur);
4753 
4754     if (cur->is_young()) {
4755       int index = cur->young_index_in_cset();
4756       assert(index != -1, "invariant");
4757       assert((uint) index < collection_set()->young_region_length(), "invariant");
4758       size_t words_survived = surviving_young_words[index];
4759       cur->record_surv_words_in_group(words_survived);
4760 






4761     } else {
4762       int index = cur->young_index_in_cset();
4763       assert(index == -1, "invariant");
4764     }
4765 
4766     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
4767             (!cur->is_young() && cur->young_index_in_cset() == -1),
4768             "invariant" );
4769 
4770     if (!cur->evacuation_failed()) {
4771       MemRegion used_mr = cur->used_region();
4772 
4773       // And the region is empty.
4774       assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
4775       pre_used += cur->used();
4776       free_region(cur, &local_free_list, false /* par */, true /* locked */);
4777     } else {
4778       cur->uninstall_surv_rate_group();
4779       if (cur->is_young()) {
4780         cur->set_young_index_in_cset(-1);
4781       }
4782       cur->set_evacuation_failed(false);
4783       // When moving a young gen region to old gen, we "allocate" that whole region
4784       // there. This is in addition to any already evacuated objects. Notify the
4785       // policy about that.
4786       // Old gen regions do not cause an additional allocation: both the objects
4787       // still in the region and the ones already moved are accounted for elsewhere.
4788       if (cur->is_young()) {
4789         policy->add_bytes_allocated_in_old_since_last_gc(HeapRegion::GrainBytes);
4790       }
4791       // The region is now considered to be old.
4792       cur->set_old();
4793       // Do some allocation statistics accounting. Regions that failed evacuation
4794       // are always made old, so there is no need to update anything in the young
4795       // gen statistics, but we need to update old gen statistics.
4796       size_t used_words = cur->marked_bytes() / HeapWordSize;
4797       _old_evac_stats.add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
4798       _old_set.add(cur);
4799       evacuation_info.increment_collectionset_used_after(cur->used());

4800     }
4801     cur = next;






4802   }
4803 
4804   evacuation_info.set_regions_freed(local_free_list.length());
4805   policy->record_max_rs_lengths(rs_lengths);



















4806   policy->cset_regions_freed();
4807 
4808   double end_sec = os::elapsedTime();
4809   double elapsed_ms = (end_sec - start_sec) * 1000.0;
4810 
4811   if (non_young) {
4812     non_young_time_ms += elapsed_ms;
4813   } else {
4814     young_time_ms += elapsed_ms;
4815   }
4816 
4817   prepend_to_freelist(&local_free_list);
4818   decrement_summary_bytes(pre_used);
4819   policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
4820   policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
4821 }
4822 
4823 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4824  private:
4825   FreeRegionList* _free_region_list;
4826   HeapRegionSet* _proxy_set;
4827   uint _humongous_regions_removed;
4828   size_t _freed_bytes;
4829  public:
4830 
4831   G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4832     _free_region_list(free_region_list), _humongous_regions_removed(0), _freed_bytes(0) {
4833   }
4834 
4835   virtual bool doHeapRegion(HeapRegion* r) {
4836     if (!r->is_starts_humongous()) {
4837       return false;
4838     }
4839 
4840     G1CollectedHeap* g1h = G1CollectedHeap::heap();


4943   heap_region_iterate(&cl);
4944 
4945   remove_from_old_sets(0, cl.humongous_free_count());
4946 
4947   G1HRPrinter* hrp = hr_printer();
4948   if (hrp->is_active()) {
4949     FreeRegionListIterator iter(&local_cleanup_list);
4950     while (iter.more_available()) {
4951       HeapRegion* hr = iter.get_next();
4952       hrp->cleanup(hr);
4953     }
4954   }
4955 
4956   prepend_to_freelist(&local_cleanup_list);
4957   decrement_summary_bytes(cl.bytes_freed());
4958 
4959   g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
4960                                                                     cl.humongous_free_count());
4961 }
4962 
4963 // This routine is similar to the above but does not record
4964 // any policy statistics or update free lists; we are abandoning
4965 // the current incremental collection set in preparation of a
4966 // full collection. After the full GC we will start to build up
4967 // the incremental collection set again.
4968 // This is only called when we're doing a full collection
4969 // and is immediately followed by the tearing down of the young list.
4970 
4971 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
4972   HeapRegion* cur = cs_head;
4973 
4974   while (cur != NULL) {
4975     HeapRegion* next = cur->next_in_collection_set();
4976     assert(cur->in_collection_set(), "bad CS");
4977     cur->set_next_in_collection_set(NULL);
4978     clear_in_cset(cur);
4979     cur->set_young_index_in_cset(-1);
4980     cur = next;
4981   }








4982 }
4983 
4984 void G1CollectedHeap::set_free_regions_coming() {
4985   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
4986 
4987   assert(!free_regions_coming(), "pre-condition");
4988   _free_regions_coming = true;
4989 }
4990 
4991 void G1CollectedHeap::reset_free_regions_coming() {
4992   assert(free_regions_coming(), "pre-condition");
4993 
4994   {
4995     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
4996     _free_regions_coming = false;
4997     SecondaryFreeList_lock->notify_all();
4998   }
4999 
5000   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : reset free regions coming");
5001 }




1239 
1240       // Disable discovery and empty the discovered lists
1241       // for the CM ref processor.
1242       ref_processor_cm()->disable_discovery();
1243       ref_processor_cm()->abandon_partial_discovery();
1244       ref_processor_cm()->verify_no_references_recorded();
1245 
1246       // Abandon current iterations of concurrent marking and concurrent
1247       // refinement, if any are in progress.
1248       concurrent_mark()->abort();
1249 
1250       // Make sure we'll choose a new allocation region afterwards.
1251       _allocator->release_mutator_alloc_region();
1252       _allocator->abandon_gc_alloc_regions();
1253       g1_rem_set()->cleanupHRRS();
1254 
1255       // We may have added regions to the current incremental collection
1256       // set between the last GC or pause and now. We need to clear the
1257       // incremental collection set and then start rebuilding it afresh
1258       // after this full GC.
1259       abandon_collection_set(collection_set());


1260 
1261       tear_down_region_sets(false /* free_list_only */);
1262       collector_state()->set_gcs_are_young(true);
1263 
1264       // See the comments in g1CollectedHeap.hpp and
1265       // G1CollectedHeap::ref_processing_init() about
1266       // how reference processing currently works in G1.
1267 
1268       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1269       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1270 
1271       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1272       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1273 
1274       ref_processor_stw()->enable_discovery();
1275       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1276 
1277       // Do collection work
1278       {
1279         HandleMark hm;  // Discard invalid handles created during gc


1360 
1361       _hrm.verify_optional();
1362       _verifier->verify_region_sets_optional();
1363 
1364       _verifier->verify_after_gc();
1365 
1366       // Clear the previous marking bitmap, if needed for bitmap verification.
1367       // Note we cannot do this when we clear the next marking bitmap in
1368       // G1ConcurrentMark::abort() above since VerifyDuringGC verifies the
1369       // objects marked during a full GC against the previous bitmap.
1370       // But we need to clear it before calling check_bitmaps below since
1371       // the full GC has compacted objects and updated TAMS but not updated
1372       // the prev bitmap.
1373       if (G1VerifyBitmaps) {
1374         GCTraceTime(Debug, gc)("Clear Bitmap for Verification");
1375         _cm->clear_prev_bitmap(workers());
1376       }
1377       _verifier->check_bitmaps("Full GC End");
1378 
1379       // Start a new incremental collection set for the next pause

1380       collection_set()->start_incremental_building();
1381 
1382       clear_cset_fast_test();
1383 
1384       _allocator->init_mutator_alloc_region();
1385 
1386       g1_policy()->record_full_collection_end();
1387 
1388       // We must call G1MonitoringSupport::update_sizes() in the same scoping level
1389       // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
1390       // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
1391       // before any GC notifications are raised.
1392       g1mm()->update_sizes();
1393 
1394       gc_epilogue(true);
1395 
1396       heap_transition.print();
1397 
1398       print_heap_after_gc();
1399       print_heap_regions();


1704   _g1_rem_set(NULL),
1705   _cg1r(NULL),
1706   _g1mm(NULL),
1707   _refine_cte_cl(NULL),
1708   _preserved_marks_set(true /* in_c_heap */),
1709   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1710   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1711   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1712   _humongous_reclaim_candidates(),
1713   _has_humongous_reclaim_candidates(false),
1714   _archive_allocator(NULL),
1715   _free_regions_coming(false),
1716   _gc_time_stamp(0),
1717   _summary_bytes_used(0),
1718   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1719   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1720   _expand_heap_after_alloc_failure(true),
1721   _old_marking_cycles_started(0),
1722   _old_marking_cycles_completed(0),
1723   _in_cset_fast_test(),


1724   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1725   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
1726 
1727   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1728                           /* are_GC_task_threads */true,
1729                           /* are_ConcurrentGC_threads */false);
1730   _workers->initialize_workers();
1731   _verifier = new G1HeapVerifier(this);
1732 
1733   _allocator = G1Allocator::create_allocator(this);
1734 
1735   _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1736 
1737   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1738 
1739   // Override the default _filler_array_max_size so that no humongous filler
1740   // objects are created.
1741   _filler_array_max_size = _humongous_object_threshold_in_words;
1742 
1743   uint n_queues = ParallelGCThreads;
1744   _task_queues = new RefToScanQueueSet(n_queues);
1745 


1746   _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1747 
1748   for (uint i = 0; i < n_queues; i++) {
1749     RefToScanQueue* q = new RefToScanQueue();
1750     q->initialize();
1751     _task_queues->register_queue(i, q);
1752     ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1753   }

1754 
1755   // Initialize the G1EvacuationFailureALot counters and flags.
1756   NOT_PRODUCT(reset_evacuation_should_fail();)
1757 
1758   guarantee(_task_queues != NULL, "task_queues allocation failure.");
1759 }
1760 
1761 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1762                                                                  size_t size,
1763                                                                  size_t translation_factor) {
1764   size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1765   // Allocate a new reserved space, preferring to use large pages.
1766   ReservedSpace rs(size, preferred_page_size);
1767   G1RegionToSpaceMapper* result  =
1768     G1RegionToSpaceMapper::create_mapper(rs,
1769                                          size,
1770                                          rs.alignment(),
1771                                          HeapRegion::GrainBytes,
1772                                          translation_factor,
1773                                          mtGC);


1962 
1963   // We'll re-use the same region whether the alloc region will
1964   // require BOT updates or not and, if it doesn't, then a non-young
1965   // region will complain that it cannot support allocations without
1966   // BOT updates. So we'll tag the dummy region as eden to avoid that.
1967   dummy_region->set_eden();
1968   // Make sure it's full.
1969   dummy_region->set_top(dummy_region->end());
1970   G1AllocRegion::setup(this, dummy_region);
1971 
1972   _allocator->init_mutator_alloc_region();
1973 
1974   // Do create of the monitoring and management support so that
1975   // values in the heap have been properly initialized.
1976   _g1mm = new G1MonitoringSupport(this);
1977 
1978   G1StringDedup::initialize();
1979 
1980   _preserved_marks_set.init(ParallelGCThreads);
1981 
1982   _collection_set.set_max_length(max_regions());
1983 
1984   return JNI_OK;
1985 }
1986 
1987 void G1CollectedHeap::stop() {
1988   // Stop all concurrent threads. We do this to make sure these threads
1989   // do not continue to execute and access resources (e.g. logging)
1990   // that are destroyed during shutdown.
1991   _cg1r->stop();
1992   _cmThread->stop();
1993   if (G1StringDedup::is_enabled()) {
1994     G1StringDedup::stop();
1995   }
1996 }
1997 
1998 size_t G1CollectedHeap::conservative_max_heap_alignment() {
1999   return HeapRegion::max_region_size();
2000 }
2001 
2002 void G1CollectedHeap::post_initialize() {
2003   ref_processing_init();


2397   }
2398 };
2399 
2400 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2401   IterateObjectClosureRegionClosure blk(cl);
2402   heap_region_iterate(&blk);
2403 }
2404 
2405 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2406   _hrm.iterate(cl);
2407 }
2408 
2409 void
2410 G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
2411                                          uint worker_id,
2412                                          HeapRegionClaimer *hrclaimer,
2413                                          bool concurrent) const {
2414   _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
2415 }
2416 








































































2417 void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
2418   _collection_set.iterate(cl);








2419 }
2420 
2421 void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
2422   _collection_set.iterate_from(cl, worker_id, workers()->active_workers());

























2423 }
2424 
2425 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2426   HeapRegion* result = _hrm.next_region_in_heap(from);
2427   while (result != NULL && result->is_pinned()) {
2428     result = _hrm.next_region_in_heap(result);
2429   }
2430   return result;
2431 }
2432 
2433 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2434   HeapRegion* hr = heap_region_containing(addr);
2435   return hr->block_start(addr);
2436 }
2437 
2438 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2439   HeapRegion* hr = heap_region_containing(addr);
2440   return hr->block_size(addr);
2441 }
2442 


2962     task_queue(i)->stats.reset();
2963   }
2964 }
2965 #endif // TASKQUEUE_STATS
2966 
2967 void G1CollectedHeap::wait_for_root_region_scanning() {
2968   double scan_wait_start = os::elapsedTime();
2969   // We have to wait until the CM threads finish scanning the
2970   // root regions as it's the only way to ensure that all the
2971   // objects on them have been correctly scanned before we start
2972   // moving them during the GC.
2973   bool waited = _cm->root_regions()->wait_until_scan_finished();
2974   double wait_time_ms = 0.0;
2975   if (waited) {
2976     double scan_wait_end = os::elapsedTime();
2977     wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2978   }
2979   g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2980 }
2981 
2982 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2983 public:
2984   virtual bool doHeapRegion(HeapRegion* r) {
2985     G1CollectedHeap::heap()->hr_printer()->cset(r);
2986     return false;
2987   }
2988 };
2989 
2990 bool
2991 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2992   assert_at_safepoint(true /* should_be_vm_thread */);
2993   guarantee(!is_gc_active(), "collection is not reentrant");
2994 
2995   if (GCLocker::check_active_before_gc()) {
2996     return false;
2997   }
2998 
2999   _gc_timer_stw->register_gc_start();
3000 
3001   GCIdMark gc_id_mark;
3002   _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
3003 
3004   SvcGCMarker sgcm(SvcGCMarker::MINOR);
3005   ResourceMark rm;
3006 
3007   g1_policy()->note_gc_start();
3008 
3009   wait_for_root_region_scanning();


3148 
3149         evacuation_info.set_collectionset_regions(collection_set()->region_length());
3150 
3151         // Make sure the remembered sets are up to date. This needs to be
3152         // done before register_humongous_regions_with_cset(), because the
3153         // remembered sets are used there to choose eager reclaim candidates.
3154         // If the remembered sets are not up to date we might miss some
3155         // entries that need to be handled.
3156         g1_rem_set()->cleanupHRRS();
3157 
3158         register_humongous_regions_with_cset();
3159 
3160         assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
3161 
3162         _cm->note_start_of_gc();
3163         // We call this after finalize_cset() to
3164         // ensure that the CSet has been finalized.
3165         _cm->verify_no_cset_oops();
3166 
3167         if (_hr_printer.is_active()) {
3168           G1PrintCollectionSetClosure cl;
3169           _collection_set.iterate(&cl);



3170         }
3171 
3172         // Initialize the GC alloc regions.
3173         _allocator->init_gc_alloc_regions(evacuation_info);
3174 
3175         G1ParScanThreadStateSet per_thread_states(this, workers()->active_workers(), collection_set()->young_region_length());
3176         pre_evacuate_collection_set();
3177 
3178         // Actually do the work...
3179         evacuate_collection_set(evacuation_info, &per_thread_states);
3180 
3181         post_evacuate_collection_set(evacuation_info, &per_thread_states);
3182 
3183         const size_t* surviving_young_words = per_thread_states.surviving_young_words();
3184         free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
3185 
3186         eagerly_reclaim_humongous_regions();
3187 


3188         record_obj_copy_mem_stats();
3189         _survivor_evac_stats.adjust_desired_plab_sz();
3190         _old_evac_stats.adjust_desired_plab_sz();
3191 
3192         // Start a new incremental collection set for the next pause.
3193         collection_set()->start_incremental_building();
3194 
3195         clear_cset_fast_test();
3196 
3197         guarantee(_eden.length() == 0, "eden should have been cleared");
3198         g1_policy()->transfer_survivors_to_cset(survivor());
3199 
3200         if (evacuation_failed()) {
3201           set_used(recalculate_used());
3202           if (_archive_allocator != NULL) {
3203             _archive_allocator->clear_used();
3204           }
3205           for (uint i = 0; i < ParallelGCThreads; i++) {
3206             if (_evacuation_failed_info_array[i].has_failed()) {
3207               _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);


4579   HeapRegionClaimer _hrclaimer;
4580 
4581 public:
4582   G1ParScrubRemSetTask(G1RemSet* g1_rs, uint num_workers) :
4583     AbstractGangTask("G1 ScrubRS"),
4584     _g1rs(g1_rs),
4585     _hrclaimer(num_workers) {
4586   }
4587 
4588   void work(uint worker_id) {
4589     _g1rs->scrub(worker_id, &_hrclaimer);
4590   }
4591 };
4592 
4593 void G1CollectedHeap::scrub_rem_set() {
4594   uint num_workers = workers()->active_workers();
4595   G1ParScrubRemSetTask g1_par_scrub_rs_task(g1_rem_set(), num_workers);
4596   workers()->run_task(&g1_par_scrub_rs_task);
4597 }
4598 
4599 class G1FreeCollectionSetClosure : public HeapRegionClosure {
4600 private:
4601   const size_t* _surviving_young_words;














4602 
4603   FreeRegionList _local_free_list;
4604   size_t _rs_lengths;
4605   // Bytes used in successfully evacuated regions before the evacuation.
4606   size_t _before_used_bytes;
4607   // Bytes used in unsucessfully evacuated regions before the evacuation
4608   size_t _after_used_bytes;

4609 
4610   double _young_time;
4611   double _non_young_time;
4612 public:
4613   G1FreeCollectionSetClosure(const size_t* surviving_young_words) :
4614     HeapRegionClosure(),
4615     _surviving_young_words(surviving_young_words),
4616     _local_free_list("Local Region List for CSet Freeing"),
4617     _rs_lengths(0),
4618     _before_used_bytes(0),
4619     _after_used_bytes(0),
4620     _young_time(0.0),
4621     _non_young_time(0.0) {
4622   }





4623 
4624   virtual bool doHeapRegion(HeapRegion* r) {
4625     double start_time = os::elapsedTime(); 


4626 
4627     G1CollectedHeap* g1h = G1CollectedHeap::heap();      
4628     assert(!g1h->is_on_master_free_list(r), "sanity");
4629 
4630     _rs_lengths += r->rem_set()->occupied_locked();



4631 
4632     assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
4633     g1h->clear_in_cset(r);




4634 
4635     if (r->is_young()) {
4636       int index = r->young_index_in_cset();
4637       assert(index != -1, "Young index in collection set must not be -1 for region %u", r->hrm_index());
4638       assert((uint) index < g1h->collection_set()->young_region_length(), "invariant");
4639       size_t words_survived = _surviving_young_words[index];
4640       r->record_surv_words_in_group(words_survived);
4641     } else {
4642       assert(r->young_index_in_cset() == -1, "Young index for old region %u in collection set must be -1", r->hrm_index());

4643     }
4644 
4645     if (!r->evacuation_failed()) {
4646       MemRegion used_mr = r->used_region();




4647 
4648       // And the region is empty.
4649       assert(!used_mr.is_empty(), "Should not have empty regions in a collection set.");
4650       _before_used_bytes += r->used();
4651       g1h->free_region(r, &_local_free_list, false /* par */, true /* locked */);
4652     } else {
4653       r->uninstall_surv_rate_group();
4654       r->set_young_index_in_cset(-1);
4655       r->set_evacuation_failed(false);


4656       // When moving a young gen region to old gen, we "allocate" that whole region
4657       // there. This is in addition to any already evacuated objects. Notify the
4658       // policy about that.
4659       // Old gen regions do not cause an additional allocation: both the objects
4660       // still in the region and the ones already moved are accounted for elsewhere.
4661       if (r->is_young()) {
4662         g1h->g1_policy()->add_bytes_allocated_in_old_since_last_gc(HeapRegion::GrainBytes);
4663       }
4664       // The region is now considered to be old.
4665       r->set_old();
4666       // Do some allocation statistics accounting. Regions that failed evacuation
4667       // are always made old, so there is no need to update anything in the young
4668       // gen statistics, but we need to update old gen statistics.
4669       size_t used_words = r->marked_bytes() / HeapWordSize;
4670       
4671       g1h->alloc_buffer_stats(InCSetState::Old)->add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
4672       g1h->old_set_add(r);
4673       _after_used_bytes += r->used();
4674     }
4675 
4676     if (r->is_young()) {
4677       _young_time += os::elapsedTime() - start_time;
4678     } else {
4679       _non_young_time += os::elapsedTime() - start_time;
4680     }
4681     return false;
4682   }
4683 
4684   FreeRegionList* local_free_list() { return &_local_free_list; }
4685   size_t rs_lengths() const { return _rs_lengths; }
4686   size_t before_used_bytes() const { return _before_used_bytes; }
4687   size_t after_used_bytes() const { return _after_used_bytes; }
4688 
4689   double young_time() const { return _young_time; }
4690   double non_young_time() const { return _non_young_time; }
4691 };
4692 
4693 void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
4694   _eden.clear();
4695 
4696   G1FreeCollectionSetClosure cl(surviving_young_words);
4697   collection_set_iterate(&cl);
4698 
4699   evacuation_info.set_regions_freed(cl.local_free_list()->length());
4700   evacuation_info.increment_collectionset_used_after(cl.after_used_bytes());
4701 
4702   G1Policy* policy = g1_policy();
4703 
4704   policy->record_max_rs_lengths(cl.rs_lengths());
4705   policy->cset_regions_freed();
4706 
4707   prepend_to_freelist(cl.local_free_list());
4708   decrement_summary_bytes(cl.before_used_bytes());
4709 
4710   policy->phase_times()->record_young_free_cset_time_ms(cl.young_time() * 1000.0);
4711   policy->phase_times()->record_non_young_free_cset_time_ms(cl.non_young_time() * 1000.0);



4712 
4713   collection_set->clear();



4714 }
4715 
4716 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4717  private:
4718   FreeRegionList* _free_region_list;
4719   HeapRegionSet* _proxy_set;
4720   uint _humongous_regions_removed;
4721   size_t _freed_bytes;
4722  public:
4723 
4724   G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4725     _free_region_list(free_region_list), _humongous_regions_removed(0), _freed_bytes(0) {
4726   }
4727 
4728   virtual bool doHeapRegion(HeapRegion* r) {
4729     if (!r->is_starts_humongous()) {
4730       return false;
4731     }
4732 
4733     G1CollectedHeap* g1h = G1CollectedHeap::heap();


4836   heap_region_iterate(&cl);
4837 
4838   remove_from_old_sets(0, cl.humongous_free_count());
4839 
4840   G1HRPrinter* hrp = hr_printer();
4841   if (hrp->is_active()) {
4842     FreeRegionListIterator iter(&local_cleanup_list);
4843     while (iter.more_available()) {
4844       HeapRegion* hr = iter.get_next();
4845       hrp->cleanup(hr);
4846     }
4847   }
4848 
4849   prepend_to_freelist(&local_cleanup_list);
4850   decrement_summary_bytes(cl.bytes_freed());
4851 
4852   g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
4853                                                                     cl.humongous_free_count());
4854 }
4855 
4856 class G1AbandonCollectionSetClosure : public HeapRegionClosure {
4857 public:
4858   virtual bool doHeapRegion(HeapRegion* r) {
4859     assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
4860     G1CollectedHeap::heap()->clear_in_cset(r);
4861     r->set_young_index_in_cset(-1);
4862     return false;











4863   }
4864 };
4865 
4866 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
4867   G1AbandonCollectionSetClosure cl;
4868   collection_set->iterate(&cl);
4869 
4870   collection_set->clear();
4871   collection_set->stop_incremental_building();
4872 }
4873 
4874 void G1CollectedHeap::set_free_regions_coming() {
4875   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
4876 
4877   assert(!free_regions_coming(), "pre-condition");
4878   _free_regions_coming = true;
4879 }
4880 
4881 void G1CollectedHeap::reset_free_regions_coming() {
4882   assert(free_regions_coming(), "pre-condition");
4883 
4884   {
4885     MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
4886     _free_regions_coming = false;
4887     SecondaryFreeList_lock->notify_all();
4888   }
4889 
4890   log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : reset free regions coming");
4891 }


< prev index next >