src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 6802 : imported patch refactor-heapregionseq
rev 6803 : imported patch bengt-review-1
rev 6804 : imported patch commit-uncommit-within-heap
rev 6805 : imported patch mikael-suggestions
rev 6806 : [mq]: bengt-suggestions


  28 #endif
  29 
  30 #include "precompiled.hpp"
  31 #include "classfile/stringTable.hpp"
  32 #include "code/codeCache.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  35 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  36 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  37 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  38 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  39 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  40 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  41 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  42 #include "gc_implementation/g1/g1EvacFailure.hpp"
  43 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  44 #include "gc_implementation/g1/g1Log.hpp"
  45 #include "gc_implementation/g1/g1MarkSweep.hpp"
  46 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  47 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"

  48 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  49 #include "gc_implementation/g1/g1StringDedup.hpp"
  50 #include "gc_implementation/g1/g1YCTypes.hpp"
  51 #include "gc_implementation/g1/heapRegion.inline.hpp"
  52 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  53 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  54 #include "gc_implementation/g1/vm_operations_g1.hpp"
  55 #include "gc_implementation/shared/gcHeapSummary.hpp"
  56 #include "gc_implementation/shared/gcTimer.hpp"
  57 #include "gc_implementation/shared/gcTrace.hpp"
  58 #include "gc_implementation/shared/gcTraceTime.hpp"
  59 #include "gc_implementation/shared/isGCActiveMark.hpp"
  60 #include "memory/allocation.hpp"
  61 #include "memory/gcLocker.inline.hpp"
  62 #include "memory/generationSpec.hpp"
  63 #include "memory/iterator.hpp"
  64 #include "memory/referenceProcessor.hpp"
  65 #include "oops/oop.inline.hpp"
  66 #include "oops/oop.pcgc.inline.hpp"
  67 #include "runtime/atomic.inline.hpp"


 364   const char* names[] = {"YOUNG", "SURVIVOR"};
 365 
 366   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
 367     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
 368     HeapRegion *curr = lists[list];
 369     if (curr == NULL)
 370       gclog_or_tty->print_cr("  empty");
 371     while (curr != NULL) {
 372       gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
 373                              HR_FORMAT_PARAMS(curr),
 374                              curr->prev_top_at_mark_start(),
 375                              curr->next_top_at_mark_start(),
 376                              curr->age_in_surv_rate_group_cond());
 377       curr = curr->get_next_young_region();
 378     }
 379   }
 380 
 381   gclog_or_tty->cr();
 382 }
 383 








 384 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
 385 {
 386   // Claim the right to put the region on the dirty cards region list
 387   // by installing a self pointer.
 388   HeapRegion* next = hr->get_next_dirty_cards_region();
 389   if (next == NULL) {
 390     HeapRegion* res = (HeapRegion*)
 391       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
 392                           NULL);
 393     if (res == NULL) {
 394       HeapRegion* head;
 395       do {
 396         // Put the region to the dirty cards region list.
 397         head = _dirty_cards_region_list;
 398         next = (HeapRegion*)
 399           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
 400         if (next == head) {
 401           assert(hr->get_next_dirty_cards_region() == hr,
 402                  "hr->get_next_dirty_cards_region() != hr");
 403           if (next == NULL) {


 743   verify_region_sets_optional();
 744 
 745   uint first = G1_NO_HRS_INDEX;
 746   uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
 747 
 748   if (obj_regions == 1) {
 749     // Only one region to allocate, try to use a fast path by directly allocating
 750     // from the free lists. Do not try to expand here, we will potentially do that
 751     // later.
 752     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
 753     if (hr != NULL) {
 754       first = hr->hrs_index();
 755     }
 756   } else {
 757     // We can't allocate humongous regions spanning more than one region while
 758     // cleanupComplete() is running, since some of the regions we find to be
 759     // empty might not yet be added to the free list. It is not straightforward
 760     // to know in which list they are on so that we can remove them. We only
 761     // need to do this if we need to allocate more than one region to satisfy the
 762     // current humongous allocation request. If we are only allocating one region
 763     // we use the one-region region allocation code (see above), or end up here.

 764     wait_while_free_regions_coming();
 765     append_secondary_free_list_if_not_empty_with_lock();
 766 
 767     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 768     // are lucky enough to find some.
 769     first = _hrs.find_contiguous(obj_regions, true);
 770     if (first != G1_NO_HRS_INDEX) {
 771       _hrs.allocate_free_regions_starting_at(first, obj_regions);
 772     }
 773   }
 774 
 775   if (first == G1_NO_HRS_INDEX) {
 776     // Policy: We could not find enough regions for the humongous object in the
 777     // free list. Look through the heap to find a mix of free and uncommitted regions.
 778     // If so, try expansion.
 779     first = _hrs.find_contiguous(obj_regions, false);
 780     if (first != G1_NO_HRS_INDEX) {
 781       // We found something. Make sure these regions are committed, i.e. expand
 782       // the heap. Alternatively we could do a defragmentation GC.
 783       ergo_verbose1(ErgoHeapSizing,
 784                     "attempt heap expansion",
 785                     ergo_format_reason("humongous allocation request failed")
 786                     ergo_format_byte("allocation request"),
 787                     word_size * HeapWordSize);
 788 
 789       _hrs.expand_at(first, obj_regions);
 790       g1_policy()->record_new_heap_size(num_regions());
 791 
 792 #ifdef ASSERT
 793       for (uint i = first; i < first + obj_regions; ++i) {
 794         HeapRegion* hr = region_at(i);
 795         assert(hr->is_empty(), "sanity");
 796         assert(is_on_master_free_list(hr), "sanity");
 797       }
 798 #endif
 799       _hrs.allocate_free_regions_starting_at(first, obj_regions);


1937   // is calculated by subtracting the requested size from the
1938   // 32Gb boundary and using the result as the base address for
1939   // heap reservation. If the requested size is not aligned to
1940   // HeapRegion::GrainBytes (i.e. the alignment that is passed
1941   // into the ReservedHeapSpace constructor) then the actual
1942   // base of the reserved heap may end up differing from the
1943   // address that was requested (i.e. the preferred heap base).
1944   // If this happens then we could end up using a non-optimal
1945   // compressed oops mode.
1946 
1947   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1948                                                  heap_alignment);
1949 
1950   // It is important to do this in a way such that concurrent readers can't
1951   // temporarily think something is in the heap.  (I've actually seen this
1952   // happen in asserts: DLD.)
1953   _reserved.set_word_size(0);
1954   _reserved.set_start((HeapWord*)heap_rs.base());
1955   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
1956 
1957   _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
1958 
1959   // Create the gen rem set (and barrier set) for the entire reserved region.
1960   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
1961   set_barrier_set(rem_set()->bs());
1962   if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
1963     vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
1964     return JNI_ENOMEM;
1965   }
1966 
1967   // Also create a G1 rem set.
1968   _g1_rem_set = new G1RemSet(this, g1_barrier_set());
1969 
1970   // Carve out the G1 part of the heap.
1971 
1972   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1973   _hrs.initialize(g1_rs);





















































1974 
1975   assert(_hrs.max_length() == _expansion_regions,
1976          err_msg("max length: %u expansion regions: %u",
1977                  _hrs.max_length(), _expansion_regions));
1978 
1979   // Do later initialization work for concurrent refinement.
1980   _cg1r->init();
1981 
1982   // 6843694 - ensure that the maximum region index can fit
1983   // in the remembered set structures.
1984   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1985   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1986 
1987   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1988   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1989   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1990             "too many cards per region");
1991 
1992   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1993 
1994   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
1995                                              heap_word_size(init_byte_size));
1996 
1997   _g1h = this;
1998 
1999   _in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
2000   _humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
2001 
2002   // Create the ConcurrentMark data structure and thread.
2003   // (Must do this late, so that "max_regions" is defined.)
2004   _cm = new ConcurrentMark(this, heap_rs);
2005   if (_cm == NULL || !_cm->completed_initialization()) {
2006     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2007     return JNI_ENOMEM;
2008   }
2009   _cmThread = _cm->cmThread();
2010 
2011   // Initialize the from_card cache structure of HeapRegionRemSet.
2012   HeapRegionRemSet::init_heap(max_regions());
2013 
2014   // Now expand into the initial heap size.
2015   if (!expand(init_byte_size)) {
2016     vm_shutdown_during_initialization("Failed to allocate initial heap.");
2017     return JNI_ENOMEM;
2018   }
2019 
2020   // Perform any initialization actions delegated to the policy.
2021   g1_policy()->init();
2022 
2023   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2024                                                SATB_Q_FL_lock,


2041                                       Shared_DirtyCardQ_lock,
2042                                       &JavaThread::dirty_card_queue_set());
2043   }
2044 
2045   // Initialize the card queue set used to hold cards containing
2046   // references into the collection set.
2047   _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
2048                                              DirtyCardQ_CBL_mon,
2049                                              DirtyCardQ_FL_lock,
2050                                              -1, // never trigger processing
2051                                              -1, // no limit on length
2052                                              Shared_DirtyCardQ_lock,
2053                                              &JavaThread::dirty_card_queue_set());
2054 
2055   // In case we're keeping closure specialization stats, initialize those
2056   // counts and that mechanism.
2057   SpecializationStats::clear();
2058 
2059   // Here we allocate the dummy HeapRegion that is required by the
2060   // G1AllocRegion class.
2061 
2062   HeapRegion* dummy_region = _hrs.get_dummy_region();

2063   // We'll re-use the same region whether the alloc region will
2064   // require BOT updates or not and, if it doesn't, then a non-young
2065   // region will complain that it cannot support allocations without
2066   // BOT updates. So we'll tag the dummy region as young to avoid that.
2067   dummy_region->set_young();
2068   // Make sure it's full.
2069   dummy_region->set_top(dummy_region->end());
2070   G1AllocRegion::setup(this, dummy_region);
2071 
2072   init_mutator_alloc_region();
2073 
2074   // Do create of the monitoring and management support so that
2075   // values in the heap have been properly initialized.
2076   _g1mm = new G1MonitoringSupport(this);
2077 
2078   G1StringDedup::initialize();
2079 
2080   return JNI_OK;
2081 }
2082 


2463           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2464 
2465         // Schedule a standard evacuation pause. We're setting word_size
2466         // to 0 which means that we are not requesting a post-GC allocation.
2467         VM_G1IncCollectionPause op(gc_count_before,
2468                                    0,     /* word_size */
2469                                    false, /* should_initiate_conc_mark */
2470                                    g1_policy()->max_pause_time_ms(),
2471                                    cause);
2472         VMThread::execute(&op);
2473       } else {
2474         // Schedule a Full GC.
2475         VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
2476         VMThread::execute(&op);
2477       }
2478     }
2479   } while (retry_gc);
2480 }
2481 
2482 bool G1CollectedHeap::is_in(const void* p) const {
2483   if (_hrs.committed().contains(p)) {
2484     // Given that we know that p is in the committed space,
2485     // heap_region_containing_raw() should successfully
2486     // return the containing region.
2487     HeapRegion* hr = heap_region_containing_raw(p);
2488     return hr->is_in(p);
2489   } else {
2490     return false;
2491   }
2492 }
2493 












2494 // Iteration functions.
2495 
2496 // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
2497 
2498 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2499   ExtendedOopClosure* _cl;
2500 public:
2501   IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
2502   bool doHeapRegion(HeapRegion* r) {
2503     if (!r->continuesHumongous()) {
2504       r->oop_iterate(_cl);
2505     }
2506     return false;
2507   }
2508 };
2509 
2510 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
2511   IterateOopClosureRegionClosure blk(cl);
2512   heap_region_iterate(&blk);
2513 }


3351   }
3352   return false; // keep some compilers happy
3353 }
3354 
3355 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3356                                        const VerifyOption vo) const {
3357   switch (vo) {
3358   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
3359   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
3360   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
3361   default:                            ShouldNotReachHere();
3362   }
3363   return false; // keep some compilers happy
3364 }
3365 
3366 void G1CollectedHeap::print_on(outputStream* st) const {
3367   st->print(" %-20s", "garbage-first heap");
3368   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3369             capacity()/K, used_unlocked()/K);
3370   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3371             _hrs.committed().start(),
3372             _hrs.committed().end(),
3373             _hrs.reserved().end());
3374   st->cr();
3375   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3376   uint young_regions = _young_list->length();
3377   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3378             (size_t) young_regions * HeapRegion::GrainBytes / K);
3379   uint survivor_regions = g1_policy()->recorded_survivor_regions();
3380   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3381             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3382   st->cr();
3383   MetaspaceAux::print_on(st);
3384 }
3385 
3386 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3387   print_on(st);
3388 
3389   // Print the per-region information.
3390   st->cr();
3391   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3392                "HS=humongous(starts), HC=humongous(continues), "


4103         // saved_mark_word() will return top() between pauses, i.e.,
4104         // during concurrent refinement. So we don't need the
4105         // is_gc_active() check to decided which top to use when
4106         // scanning cards (see CR 7039627).
4107         increment_gc_time_stamp();
4108 
4109         verify_after_gc();
4110         check_bitmaps("GC End");
4111 
4112         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4113         ref_processor_stw()->verify_no_references_recorded();
4114 
4115         // CM reference discovery will be re-enabled if necessary.
4116       }
4117 
4118       // We should do this after we potentially expand the heap so
4119       // that all the COMMIT events are generated before the end GC
4120       // event, and after we retire the GC alloc regions so that all
4121       // RETIRE events are generated before the end GC event.
4122       _hr_printer.end_gc(false /* full */, (size_t) total_collections());
4123 
4124       if (mark_in_progress()) {
4125         concurrent_mark()->update_heap_boundaries(_hrs.committed());
4126       }
4127 
4128 #ifdef TRACESPINNING
4129       ParallelTaskTerminator::print_termination_counts();
4130 #endif
4131 
4132       gc_epilogue(false);
4133     }
4134 
4135     // Print the remainder of the GC log output.
4136     log_gc_footer(os::elapsedTime() - pause_start_sec);
4137 
4138     // It is not yet to safe to tell the concurrent mark to
4139     // start as we have some optional output below. We don't want the
4140     // output from the concurrent mark thread interfering with this
4141     // logging output either.
4142 
4143     _hrs.verify_optional();
4144     verify_region_sets_optional();
4145 
4146     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());




  28 #endif
  29 
  30 #include "precompiled.hpp"
  31 #include "classfile/stringTable.hpp"
  32 #include "code/codeCache.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "gc_implementation/g1/bufferingOopClosure.hpp"
  35 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  36 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  37 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
  38 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  39 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  40 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  41 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
  42 #include "gc_implementation/g1/g1EvacFailure.hpp"
  43 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  44 #include "gc_implementation/g1/g1Log.hpp"
  45 #include "gc_implementation/g1/g1MarkSweep.hpp"
  46 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  47 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
  48 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
  49 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  50 #include "gc_implementation/g1/g1StringDedup.hpp"
  51 #include "gc_implementation/g1/g1YCTypes.hpp"
  52 #include "gc_implementation/g1/heapRegion.inline.hpp"
  53 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  54 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  55 #include "gc_implementation/g1/vm_operations_g1.hpp"
  56 #include "gc_implementation/shared/gcHeapSummary.hpp"
  57 #include "gc_implementation/shared/gcTimer.hpp"
  58 #include "gc_implementation/shared/gcTrace.hpp"
  59 #include "gc_implementation/shared/gcTraceTime.hpp"
  60 #include "gc_implementation/shared/isGCActiveMark.hpp"
  61 #include "memory/allocation.hpp"
  62 #include "memory/gcLocker.inline.hpp"
  63 #include "memory/generationSpec.hpp"
  64 #include "memory/iterator.hpp"
  65 #include "memory/referenceProcessor.hpp"
  66 #include "oops/oop.inline.hpp"
  67 #include "oops/oop.pcgc.inline.hpp"
  68 #include "runtime/atomic.inline.hpp"


 365   const char* names[] = {"YOUNG", "SURVIVOR"};
 366 
 367   for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
 368     gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
 369     HeapRegion *curr = lists[list];
 370     if (curr == NULL)
 371       gclog_or_tty->print_cr("  empty");
 372     while (curr != NULL) {
 373       gclog_or_tty->print_cr("  "HR_FORMAT", P: "PTR_FORMAT "N: "PTR_FORMAT", age: %4d",
 374                              HR_FORMAT_PARAMS(curr),
 375                              curr->prev_top_at_mark_start(),
 376                              curr->next_top_at_mark_start(),
 377                              curr->age_in_surv_rate_group_cond());
 378       curr = curr->get_next_young_region();
 379     }
 380   }
 381 
 382   gclog_or_tty->cr();
 383 }
 384 
 385 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
 386   OtherRegionsTable::invalidate(start_idx, num_regions);
 387 }
 388 
 389 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions) {
 390   reset_from_card_cache(start_idx, num_regions);
 391 }
 392 
 393 void G1CollectedHeap::push_dirty_cards_region(HeapRegion* hr)
 394 {
 395   // Claim the right to put the region on the dirty cards region list
 396   // by installing a self pointer.
 397   HeapRegion* next = hr->get_next_dirty_cards_region();
 398   if (next == NULL) {
 399     HeapRegion* res = (HeapRegion*)
 400       Atomic::cmpxchg_ptr(hr, hr->next_dirty_cards_region_addr(),
 401                           NULL);
 402     if (res == NULL) {
 403       HeapRegion* head;
 404       do {
 405         // Put the region to the dirty cards region list.
 406         head = _dirty_cards_region_list;
 407         next = (HeapRegion*)
 408           Atomic::cmpxchg_ptr(hr, &_dirty_cards_region_list, head);
 409         if (next == head) {
 410           assert(hr->get_next_dirty_cards_region() == hr,
 411                  "hr->get_next_dirty_cards_region() != hr");
 412           if (next == NULL) {


 752   verify_region_sets_optional();
 753 
 754   uint first = G1_NO_HRS_INDEX;
 755   uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
 756 
 757   if (obj_regions == 1) {
 758     // Only one region to allocate, try to use a fast path by directly allocating
 759     // from the free lists. Do not try to expand here, we will potentially do that
 760     // later.
 761     HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
 762     if (hr != NULL) {
 763       first = hr->hrs_index();
 764     }
 765   } else {
 766     // We can't allocate humongous regions spanning more than one region while
 767     // cleanupComplete() is running, since some of the regions we find to be
 768     // empty might not yet be added to the free list. It is not straightforward
 769     // to know in which list they are on so that we can remove them. We only
 770     // need to do this if we need to allocate more than one region to satisfy the
 771     // current humongous allocation request. If we are only allocating one region
 772     // we use the one-region region allocation code (see above), that already
 773     // potentially waits for regions from the secondary free list.
 774     wait_while_free_regions_coming();
 775     append_secondary_free_list_if_not_empty_with_lock();
 776 
 777     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 778     // are lucky enough to find some.
 779     first = _hrs.find_contiguous_only_empty(obj_regions);
 780     if (first != G1_NO_HRS_INDEX) {
 781       _hrs.allocate_free_regions_starting_at(first, obj_regions);
 782     }
 783   }
 784 
 785   if (first == G1_NO_HRS_INDEX) {
 786     // Policy: We could not find enough regions for the humongous object in the
 787     // free list. Look through the heap to find a mix of free and uncommitted regions.
 788     // If so, try expansion.
 789     first = _hrs.find_contiguous_empty_or_unavailable(obj_regions);
 790     if (first != G1_NO_HRS_INDEX) {
 791       // We found something. Make sure these regions are committed, i.e. expand
 792       // the heap. Alternatively we could do a defragmentation GC.
 793       ergo_verbose1(ErgoHeapSizing,
 794                     "attempt heap expansion",
 795                     ergo_format_reason("humongous allocation request failed")
 796                     ergo_format_byte("allocation request"),
 797                     word_size * HeapWordSize);
 798 
 799       _hrs.expand_at(first, obj_regions);
 800       g1_policy()->record_new_heap_size(num_regions());
 801 
 802 #ifdef ASSERT
 803       for (uint i = first; i < first + obj_regions; ++i) {
 804         HeapRegion* hr = region_at(i);
 805         assert(hr->is_empty(), "sanity");
 806         assert(is_on_master_free_list(hr), "sanity");
 807       }
 808 #endif
 809       _hrs.allocate_free_regions_starting_at(first, obj_regions);


1947   // is calculated by subtracting the requested size from the
1948   // 32Gb boundary and using the result as the base address for
1949   // heap reservation. If the requested size is not aligned to
1950   // HeapRegion::GrainBytes (i.e. the alignment that is passed
1951   // into the ReservedHeapSpace constructor) then the actual
1952   // base of the reserved heap may end up differing from the
1953   // address that was requested (i.e. the preferred heap base).
1954   // If this happens then we could end up using a non-optimal
1955   // compressed oops mode.
1956 
1957   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
1958                                                  heap_alignment);
1959 
1960   // It is important to do this in a way such that concurrent readers can't
1961   // temporarily think something is in the heap.  (I've actually seen this
1962   // happen in asserts: DLD.)
1963   _reserved.set_word_size(0);
1964   _reserved.set_start((HeapWord*)heap_rs.base());
1965   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
1966 


1967   // Create the gen rem set (and barrier set) for the entire reserved region.
1968   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
1969   set_barrier_set(rem_set()->bs());
1970   if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
1971     vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
1972     return JNI_ENOMEM;
1973   }
1974 
1975   // Also create a G1 rem set.
1976   _g1_rem_set = new G1RemSet(this, g1_barrier_set());
1977 
1978   // Carve out the G1 part of the heap.
1979 
1980   ReservedSpace g1_rs = heap_rs.first_part(max_byte_size);
1981   G1RegionToSpaceMapper* heap_storage =
1982     G1RegionToSpaceMapper::create_mapper(g1_rs,
1983                                          UseLargePages ? os::large_page_size() : os::vm_page_size(),
1984                                          HeapRegion::GrainBytes,
1985                                          1,
1986                                          mtJavaHeap);
1987   heap_storage->set_mapping_changed_listener(&_listener);
1988 
1989   // Reserve space for the block offset table. We do not support automatic uncommit
1990   // for the card table at this time. BOT only.
1991   ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
1992   G1RegionToSpaceMapper* bot_storage =
1993     G1RegionToSpaceMapper::create_mapper(bot_rs,
1994                                          os::vm_page_size(),
1995                                          HeapRegion::GrainBytes,
1996                                          G1BlockOffsetSharedArray::N_bytes,
1997                                          mtGC);
1998 
1999   ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
2000   G1RegionToSpaceMapper* cardtable_storage =
2001     G1RegionToSpaceMapper::create_mapper(cardtable_rs,
2002                                          os::vm_page_size(),
2003                                          HeapRegion::GrainBytes,
2004                                          G1BlockOffsetSharedArray::N_bytes,
2005                                          mtGC);
2006   g1_barrier_set()->initialize(cardtable_storage);
2007   
2008   // Reserve space for the card counts table.
2009   ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize));
2010   G1RegionToSpaceMapper* card_counts_storage =
2011     G1RegionToSpaceMapper::create_mapper(card_counts_rs,
2012                                          os::vm_page_size(),
2013                                          HeapRegion::GrainBytes,
2014                                          G1BlockOffsetSharedArray::N_bytes,
2015                                          mtGC);
2016 
2017   // Reserve space for prev and next bitmap.
2018   size_t bitmap_size = CMBitMap::compute_size(g1_rs.size());
2019 
2020   ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
2021   G1RegionToSpaceMapper* prev_bitmap_storage =
2022     G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs,
2023                                          os::vm_page_size(),
2024                                          HeapRegion::GrainBytes,
2025                                          CMBitMap::mark_distance(),
2026                                          mtGC);
2027 
2028   ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size));
2029   G1RegionToSpaceMapper* next_bitmap_storage =
2030     G1RegionToSpaceMapper::create_mapper(next_bitmap_rs,
2031                                          os::vm_page_size(),
2032                                          HeapRegion::GrainBytes,
2033                                          CMBitMap::mark_distance(),
2034                                          mtGC);
2035 
2036   _hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);


2037  
2038    // Do later initialization work for concurrent refinement.
2039   _cg1r->init(card_counts_storage);
2040 
2041   // 6843694 - ensure that the maximum region index can fit
2042   // in the remembered set structures.
2043   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2044   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2045 
2046   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2047   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2048   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2049             "too many cards per region");
2050 
2051   FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2052 
2053   _bot_shared = new G1BlockOffsetSharedArray(_reserved, bot_storage);

2054 
2055   _g1h = this;
2056 
2057   _in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
2058   _humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes);
2059 
2060   // Create the ConcurrentMark data structure and thread.
2061   // (Must do this late, so that "max_regions" is defined.)
2062   _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
2063   if (_cm == NULL || !_cm->completed_initialization()) {
2064     vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2065     return JNI_ENOMEM;
2066   }
2067   _cmThread = _cm->cmThread();
2068 
2069   // Initialize the from_card cache structure of HeapRegionRemSet.
2070   HeapRegionRemSet::init_heap(max_regions());
2071 
2072   // Now expand into the initial heap size.
2073   if (!expand(init_byte_size)) {
2074     vm_shutdown_during_initialization("Failed to allocate initial heap.");
2075     return JNI_ENOMEM;
2076   }
2077 
2078   // Perform any initialization actions delegated to the policy.
2079   g1_policy()->init();
2080 
2081   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
2082                                                SATB_Q_FL_lock,


2099                                       Shared_DirtyCardQ_lock,
2100                                       &JavaThread::dirty_card_queue_set());
2101   }
2102 
2103   // Initialize the card queue set used to hold cards containing
2104   // references into the collection set.
2105   _into_cset_dirty_card_queue_set.initialize(NULL, // Should never be called by the Java code
2106                                              DirtyCardQ_CBL_mon,
2107                                              DirtyCardQ_FL_lock,
2108                                              -1, // never trigger processing
2109                                              -1, // no limit on length
2110                                              Shared_DirtyCardQ_lock,
2111                                              &JavaThread::dirty_card_queue_set());
2112 
2113   // In case we're keeping closure specialization stats, initialize those
2114   // counts and that mechanism.
2115   SpecializationStats::clear();
2116 
2117   // Here we allocate the dummy HeapRegion that is required by the
2118   // G1AllocRegion class.

2119   HeapRegion* dummy_region = _hrs.get_dummy_region();
2120 
2121   // We'll re-use the same region whether the alloc region will
2122   // require BOT updates or not and, if it doesn't, then a non-young
2123   // region will complain that it cannot support allocations without
2124   // BOT updates. So we'll tag the dummy region as young to avoid that.
2125   dummy_region->set_young();
2126   // Make sure it's full.
2127   dummy_region->set_top(dummy_region->end());
2128   G1AllocRegion::setup(this, dummy_region);
2129 
2130   init_mutator_alloc_region();
2131 
2132   // Do create of the monitoring and management support so that
2133   // values in the heap have been properly initialized.
2134   _g1mm = new G1MonitoringSupport(this);
2135 
2136   G1StringDedup::initialize();
2137 
2138   return JNI_OK;
2139 }
2140 


2521           DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2522 
2523         // Schedule a standard evacuation pause. We're setting word_size
2524         // to 0 which means that we are not requesting a post-GC allocation.
2525         VM_G1IncCollectionPause op(gc_count_before,
2526                                    0,     /* word_size */
2527                                    false, /* should_initiate_conc_mark */
2528                                    g1_policy()->max_pause_time_ms(),
2529                                    cause);
2530         VMThread::execute(&op);
2531       } else {
2532         // Schedule a Full GC.
2533         VM_G1CollectFull op(gc_count_before, old_marking_count_before, cause);
2534         VMThread::execute(&op);
2535       }
2536     }
2537   } while (retry_gc);
2538 }
2539 
2540 bool G1CollectedHeap::is_in(const void* p) const {
2541   if (_hrs.reserved().contains(p)) {
2542     // Given that we know that p is in the reserved space,
2543     // heap_region_containing_raw() should successfully
2544     // return the containing region.
2545     HeapRegion* hr = heap_region_containing_raw(p);
2546     return hr->is_in(p);
2547   } else {
2548     return false;
2549   }
2550 }
2551 
2552 #ifdef ASSERT
2553 bool G1CollectedHeap::is_in_exact(const void* p) const {
2554   bool contains = reserved_region().contains(p);
2555   bool available = _hrs.is_available(addr_to_region((HeapWord*)p));
2556   if (contains && available) {
2557     return true;
2558   } else {
2559     return false;
2560   }
2561 }
2562 #endif
2563 
2564 // Iteration functions.
2565 
2566 // Applies an ExtendedOopClosure onto all references of objects within a HeapRegion.
2567 
2568 class IterateOopClosureRegionClosure: public HeapRegionClosure {
2569   ExtendedOopClosure* _cl;
2570 public:
2571   IterateOopClosureRegionClosure(ExtendedOopClosure* cl) : _cl(cl) {}
2572   bool doHeapRegion(HeapRegion* r) {
2573     if (!r->continuesHumongous()) {
2574       r->oop_iterate(_cl);
2575     }
2576     return false;
2577   }
2578 };
2579 
2580 void G1CollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
2581   IterateOopClosureRegionClosure blk(cl);
2582   heap_region_iterate(&blk);
2583 }


3421   }
3422   return false; // keep some compilers happy
3423 }
3424 
3425 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3426                                        const VerifyOption vo) const {
3427   switch (vo) {
3428   case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
3429   case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
3430   case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
3431   default:                            ShouldNotReachHere();
3432   }
3433   return false; // keep some compilers happy
3434 }
3435 
3436 void G1CollectedHeap::print_on(outputStream* st) const {
3437   st->print(" %-20s", "garbage-first heap");
3438   st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3439             capacity()/K, used_unlocked()/K);
3440   st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
3441             _hrs.reserved().start(),
3442             _hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords,
3443             _hrs.reserved().end());
3444   st->cr();
3445   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3446   uint young_regions = _young_list->length();
3447   st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3448             (size_t) young_regions * HeapRegion::GrainBytes / K);
3449   uint survivor_regions = g1_policy()->recorded_survivor_regions();
3450   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3451             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3452   st->cr();
3453   MetaspaceAux::print_on(st);
3454 }
3455 
3456 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3457   print_on(st);
3458 
3459   // Print the per-region information.
3460   st->cr();
3461   st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3462                "HS=humongous(starts), HC=humongous(continues), "


4173         // saved_mark_word() will return top() between pauses, i.e.,
4174         // during concurrent refinement. So we don't need the
4175         // is_gc_active() check to decided which top to use when
4176         // scanning cards (see CR 7039627).
4177         increment_gc_time_stamp();
4178 
4179         verify_after_gc();
4180         check_bitmaps("GC End");
4181 
4182         assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
4183         ref_processor_stw()->verify_no_references_recorded();
4184 
4185         // CM reference discovery will be re-enabled if necessary.
4186       }
4187 
4188       // We should do this after we potentially expand the heap so
4189       // that all the COMMIT events are generated before the end GC
4190       // event, and after we retire the GC alloc regions so that all
4191       // RETIRE events are generated before the end GC event.
4192       _hr_printer.end_gc(false /* full */, (size_t) total_collections());




4193 
4194 #ifdef TRACESPINNING
4195       ParallelTaskTerminator::print_termination_counts();
4196 #endif
4197 
4198       gc_epilogue(false);
4199     }
4200 
4201     // Print the remainder of the GC log output.
4202     log_gc_footer(os::elapsedTime() - pause_start_sec);
4203 
4204     // It is not yet to safe to tell the concurrent mark to
4205     // start as we have some optional output below. We don't want the
4206     // output from the concurrent mark thread interfering with this
4207     // logging output either.
4208 
4209     _hrs.verify_optional();
4210     verify_region_sets_optional();
4211 
4212     TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats());