< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 56323 : imported patch 8220310.mut.0
rev 56324 : imported patch 8220310.mut.1_thomas
rev 56326 : [mq]: 8220310.mut.1-3_kim


 152 
 153 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 154   // The from card cache is not the memory that is actually committed. So we cannot
 155   // take advantage of the zero_filled parameter.
 156   reset_from_card_cache(start_idx, num_regions);
 157 }
 158 
 159 Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
 160   Ticks start = Ticks::now();
 161   workers()->run_task(task, workers()->active_workers());
 162   return Ticks::now() - start;
 163 }
 164 
 165 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
 166                                              MemRegion mr) {
 167   return new HeapRegion(hrs_index, bot(), mr);
 168 }
 169 
 170 // Private methods.
 171 
 172 HeapRegion* G1CollectedHeap::new_region(size_t word_size, HeapRegionType type, bool do_expand) {



 173   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
 174          "the only time we use this to allocate a humongous region is "
 175          "when we are allocating a single humongous region");
 176 
 177   HeapRegion* res = _hrm->allocate_free_region(type);
 178 
 179   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 180     // Currently, only attempts to allocate GC alloc regions set
 181     // do_expand to true. So, we should only reach here during a
 182     // safepoint. If this assumption changes we might have to
 183     // reconsider the use of _expand_heap_after_alloc_failure.
 184     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 185 
 186     log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
 187                               word_size * HeapWordSize);
 188 
 189     if (expand(word_size * HeapWordSize)) {
 190       // Given that expand() succeeded in expanding the heap, and we
 191       // always expand the heap by an amount aligned to the heap
 192       // region size, the free list should in theory not be empty.
 193       // In either case allocate_free_region() will check for NULL.
 194       res = _hrm->allocate_free_region(type);
 195     } else {
 196       _expand_heap_after_alloc_failure = false;
 197     }
 198   }
 199   return res;
 200 }
 201 
 202 HeapWord*
 203 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 204                                                            uint num_regions,
 205                                                            size_t word_size) {
 206   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 207   assert(is_humongous(word_size), "word_size should be humongous");
 208   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 209 
 210   // Index of last region in the series.
 211   uint last = first + num_regions - 1;
 212 
 213   // We need to initialize the region(s) we just discovered. This is
 214   // a bit tricky given that it can happen concurrently with


 345   } else {
 346     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 347     // are lucky enough to find some.
 348     first = _hrm->find_contiguous_only_empty(obj_regions);
 349     if (first != G1_NO_HRM_INDEX) {
 350       _hrm->allocate_free_regions_starting_at(first, obj_regions);
 351     }
 352   }
 353 
 354   if (first == G1_NO_HRM_INDEX) {
 355     // Policy: We could not find enough regions for the humongous object in the
 356     // free list. Look through the heap to find a mix of free and uncommitted regions.
 357     // If so, try expansion.
 358     first = _hrm->find_contiguous_empty_or_unavailable(obj_regions);
 359     if (first != G1_NO_HRM_INDEX) {
 360       // We found something. Make sure these regions are committed, i.e. expand
 361       // the heap. Alternatively we could do a defragmentation GC.
 362       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
 363                                     word_size * HeapWordSize);
 364 
 365       _hrm->expand_at(first, obj_regions, workers());
 366       policy()->record_new_heap_size(num_regions());
 367 
 368 #ifdef ASSERT
 369       for (uint i = first; i < first + obj_regions; ++i) {
 370         HeapRegion* hr = region_at(i);
 371         assert(hr->is_free(), "sanity");
 372         assert(hr->is_empty(), "sanity");
 373         assert(is_on_master_free_list(hr), "sanity");
 374       }
 375 #endif
 376       _hrm->allocate_free_regions_starting_at(first, obj_regions);
 377     } else {
 378       // Policy: Potentially trigger a defragmentation GC.
 379     }
 380   }
 381 
 382   HeapWord* result = NULL;
 383   if (first != G1_NO_HRM_INDEX) {
 384     result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
 385     assert(result != NULL, "it should always return a valid result");


1206          minimum_desired_capacity, maximum_desired_capacity);
1207 
1208   // Should not be greater than the heap max size. No need to adjust
1209   // it with respect to the heap min size as it's a lower bound (i.e.,
1210   // we'll try to make the capacity larger than it, not smaller).
1211   minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize);
1212   // Should not be less than the heap min size. No need to adjust it
1213   // with respect to the heap max size as it's an upper bound (i.e.,
1214   // we'll try to make the capacity smaller than it, not greater).
1215   maximum_desired_capacity =  MAX2(maximum_desired_capacity, MinHeapSize);
1216 
1217   if (capacity_after_gc < minimum_desired_capacity) {
1218     // Don't expand unless it's significant
1219     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1220 
1221     log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity). "
1222                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1223                               "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1224                               capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);
1225 
1226     expand(expand_bytes, _workers);
1227 
1228     // No expansion, now see if we want to shrink
1229   } else if (capacity_after_gc > maximum_desired_capacity) {
1230     // Capacity too large, compute shrinking size
1231     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1232 
1233     log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity). "
1234                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1235                               "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1236                               capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
1237 
1238     shrink(shrink_bytes);
1239   }
1240 }
1241 
1242 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1243                                                             bool do_gc,
1244                                                             bool clear_all_soft_refs,
1245                                                             bool expect_null_mutator_alloc_region,
1246                                                             bool* gc_succeeded) {


1317   // complete compaction phase than we've tried so far might be
1318   // appropriate.
1319   return NULL;
1320 }
1321 
1322 // Attempting to expand the heap sufficiently
1323 // to support an allocation of the given "word_size".  If
1324 // successful, perform the allocation and return the address of the
1325 // allocated block, or else "NULL".
1326 
1327 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1328   assert_at_safepoint_on_vm_thread();
1329 
1330   _verifier->verify_region_sets_optional();
1331 
1332   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1333   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1334                             word_size * HeapWordSize);
1335 
1336 
1337   if (expand(expand_bytes, _workers)) {
1338     _hrm->verify_optional();
1339     _verifier->verify_region_sets_optional();
1340     return attempt_allocation_at_safepoint(word_size,
1341                                            false /* expect_null_mutator_alloc_region */);
1342   }
1343   return NULL;
1344 }
1345 
1346 bool G1CollectedHeap::expand(size_t expand_bytes, WorkGang* pretouch_workers, double* expand_time_ms) {
1347   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1348   aligned_expand_bytes = align_up(aligned_expand_bytes,
1349                                        HeapRegion::GrainBytes);
1350 
1351   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1352                             expand_bytes, aligned_expand_bytes);
1353 
1354   if (is_maximal_no_gc()) {
1355     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1356     return false;
1357   }
1358 
1359   double expand_heap_start_time_sec = os::elapsedTime();
1360   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1361   assert(regions_to_expand > 0, "Must expand by at least one region");
1362 
1363   uint expanded_by = _hrm->expand_by(regions_to_expand, pretouch_workers);
1364   if (expand_time_ms != NULL) {
1365     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1366   }
1367 
1368   if (expanded_by > 0) {
1369     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1370     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1371     policy()->record_new_heap_size(num_regions());
1372   } else {
1373     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1374 
1375     // The expansion of the virtual storage space was unsuccessful.
1376     // Let's see if it was because we ran out of swap.
1377     if (G1ExitOnExpansionFailure &&
1378         _hrm->available() >= regions_to_expand) {
1379       // We had head room...
1380       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1381     }
1382   }
1383   return regions_to_expand > 0;
1384 }
1385 
1386 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1387   size_t aligned_shrink_bytes =
1388     ReservedSpace::page_align_size_down(shrink_bytes);
1389   aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1390                                          HeapRegion::GrainBytes);
1391   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1392 
1393   uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
1394   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1395 
1396 
1397   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1398                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1399   if (num_regions_removed > 0) {
1400     policy()->record_new_heap_size(num_regions());
1401   } else {
1402     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1403   }
1404 }
1405 
1406 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1407   _verifier->verify_region_sets_optional();
1408 
1409   // We should only reach here at the end of a Full GC or during Remark which
1410   // means we should not not be holding to any GC alloc regions. The method
1411   // below will make sure of that and do any remaining clean up.
1412   _allocator->abandon_gc_alloc_regions();
1413 
1414   // Instead of tearing down / rebuilding the free lists here, we
1415   // could instead use the remove_all_pending() method on free_list to
1416   // remove only the ones that we need to remove.


1478     } else {
1479       guarantee(Heap_lock->owned_by_self(),
1480                 "master humongous set MT safety protocol outside a safepoint");
1481     }
1482   }
1483   bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
1484   const char* get_description() { return "Humongous Regions"; }
1485 };
1486 
1487 G1CollectedHeap::G1CollectedHeap() :
1488   CollectedHeap(),
1489   _young_gen_sampling_thread(NULL),
1490   _workers(NULL),
1491   _card_table(NULL),
1492   _soft_ref_policy(),
1493   _old_set("Old Region Set", new OldRegionSetChecker()),
1494   _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
1495   _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1496   _bot(NULL),
1497   _listener(),

1498   _hrm(NULL),
1499   _allocator(NULL),
1500   _verifier(NULL),
1501   _summary_bytes_used(0),
1502   _archive_allocator(NULL),
1503   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1504   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1505   _expand_heap_after_alloc_failure(true),
1506   _g1mm(NULL),
1507   _humongous_reclaim_candidates(),
1508   _has_humongous_reclaim_candidates(false),
1509   _hr_printer(),
1510   _collector_state(),
1511   _old_marking_cycles_started(0),
1512   _old_marking_cycles_completed(0),
1513   _eden(),
1514   _survivor(),
1515   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1516   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1517   _policy(G1Policy::create_policy(_gc_timer_stw)),


1761 
1762   _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1763 
1764   {
1765     HeapWord* start = _hrm->reserved().start();
1766     HeapWord* end = _hrm->reserved().end();
1767     size_t granularity = HeapRegion::GrainBytes;
1768 
1769     _region_attr.initialize(start, end, granularity);
1770     _humongous_reclaim_candidates.initialize(start, end, granularity);
1771   }
1772 
1773   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1774                           true /* are_GC_task_threads */,
1775                           false /* are_ConcurrentGC_threads */);
1776   if (_workers == NULL) {
1777     return JNI_ENOMEM;
1778   }
1779   _workers->initialize_workers();
1780 


1781   // Create the G1ConcurrentMark data structure and thread.
1782   // (Must do this late, so that "max_regions" is defined.)
1783   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1784   if (_cm == NULL || !_cm->completed_initialization()) {
1785     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1786     return JNI_ENOMEM;
1787   }
1788   _cm_thread = _cm->cm_thread();
1789 
1790   // Now expand into the initial heap size.
1791   if (!expand(init_byte_size, _workers)) {
1792     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1793     return JNI_ENOMEM;
1794   }
1795 
1796   // Perform any initialization actions delegated to the policy.
1797   policy()->init(this, &_collection_set);
1798 
1799   jint ecode = initialize_concurrent_refinement();
1800   if (ecode != JNI_OK) {
1801     return ecode;
1802   }
1803 
1804   ecode = initialize_young_gen_sampling_thread();
1805   if (ecode != JNI_OK) {
1806     return ecode;
1807   }
1808 
1809   {
1810     G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1811     dcqs.set_process_cards_threshold(concurrent_refine()->yellow_zone());


2869   _verifier->verify_before_gc(type);
2870   _verifier->check_bitmaps("GC Start");
2871 }
2872 
2873 void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) {
2874   if (VerifyRememberedSets) {
2875     log_info(gc, verify)("[Verifying RemSets after GC]");
2876     VerifyRegionRemSetClosure v_cl;
2877     heap_region_iterate(&v_cl);
2878   }
2879   _verifier->verify_after_gc(type);
2880   _verifier->check_bitmaps("GC End");
2881 }
2882 
2883 void G1CollectedHeap::expand_heap_after_young_collection(){
2884   size_t expand_bytes = _heap_sizing_policy->expansion_amount();
2885   if (expand_bytes > 0) {
2886     // No need for an ergo logging here,
2887     // expansion_amount() does this when it returns a value > 0.
2888     double expand_ms;
2889     if (!expand(expand_bytes, _workers, &expand_ms)) {
2890       // We failed to expand the heap. Cannot do anything about it.
2891     }
2892     phase_times()->record_expand_heap_time(expand_ms);
2893   }
2894 }
2895 
2896 const char* G1CollectedHeap::young_gc_name() const {
2897   if (collector_state()->in_initial_mark_gc()) {
2898     return "Pause Young (Concurrent Start)";
2899   } else if (collector_state()->in_young_only_phase()) {
2900     if (collector_state()->in_young_gc_before_mixed()) {
2901       return "Pause Young (Prepare Mixed)";
2902     } else {
2903       return "Pause Young (Normal)";
2904     }
2905   } else {
2906     return "Pause Young (Mixed)";
2907   }
2908 }
2909 


4530   if (!free_list_only) {
4531     _eden.clear();
4532     _survivor.clear();
4533   }
4534 
4535   RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
4536   heap_region_iterate(&cl);
4537 
4538   if (!free_list_only) {
4539     set_used(cl.total_used());
4540     if (_archive_allocator != NULL) {
4541       _archive_allocator->clear_used();
4542     }
4543   }
4544   assert_used_and_recalculate_used_equal(this);
4545 }
4546 
4547 // Methods for the mutator alloc region
4548 
4549 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4550                                                       bool force) {

4551   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4552   bool should_allocate = policy()->should_allocate_mutator_region();
4553   if (force || should_allocate) {
4554     HeapRegion* new_alloc_region = new_region(word_size,
4555                                               HeapRegionType::Eden,
4556                                               false /* do_expand */);

4557     if (new_alloc_region != NULL) {
4558       set_region_short_lived_locked(new_alloc_region);
4559       _hr_printer.alloc(new_alloc_region, !should_allocate);
4560       _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
4561       _policy->remset_tracker()->update_at_allocate(new_alloc_region);
4562       return new_alloc_region;
4563     }
4564   }
4565   return NULL;
4566 }
4567 
4568 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
4569                                                   size_t allocated_bytes) {
4570   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4571   assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
4572 
4573   collection_set()->add_eden_region(alloc_region);
4574   increase_used(allocated_bytes);
4575   _eden.add_used_bytes(allocated_bytes);
4576   _hr_printer.retire(alloc_region);




 152 
 153 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
 154   // The from card cache is not the memory that is actually committed. So we cannot
 155   // take advantage of the zero_filled parameter.
 156   reset_from_card_cache(start_idx, num_regions);
 157 }
 158 
 159 Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
 160   Ticks start = Ticks::now();
 161   workers()->run_task(task, workers()->active_workers());
 162   return Ticks::now() - start;
 163 }
 164 
 165 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
 166                                              MemRegion mr) {
 167   return new HeapRegion(hrs_index, bot(), mr);
 168 }
 169 
 170 // Private methods.
 171 
 172 HeapRegion* G1CollectedHeap::new_region(size_t word_size,
 173                                         HeapRegionType type,
 174                                         bool do_expand,
 175                                         uint node_index) {
 176   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
 177          "the only time we use this to allocate a humongous region is "
 178          "when we are allocating a single humongous region");
 179 
 180   HeapRegion* res = _hrm->allocate_free_region(type, node_index);
 181 
 182   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
 183     // Currently, only attempts to allocate GC alloc regions set
 184     // do_expand to true. So, we should only reach here during a
 185     // safepoint. If this assumption changes we might have to
 186     // reconsider the use of _expand_heap_after_alloc_failure.
 187     assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 188 
 189     log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
 190                               word_size * HeapWordSize);
 191 
 192     if (expand(word_size * HeapWordSize, node_index)) {
 193       // Given that expand() succeeded in expanding the heap, and we
 194       // always expand the heap by an amount aligned to the heap
 195       // region size, the free list should in theory not be empty.
 196       // In either case allocate_free_region() will check for NULL.
 197       res = _hrm->allocate_free_region(type, node_index);
 198     } else {
 199       _expand_heap_after_alloc_failure = false;
 200     }
 201   }
 202   return res;
 203 }
 204 
 205 HeapWord*
 206 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
 207                                                            uint num_regions,
 208                                                            size_t word_size) {
 209   assert(first != G1_NO_HRM_INDEX, "pre-condition");
 210   assert(is_humongous(word_size), "word_size should be humongous");
 211   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 212 
 213   // Index of last region in the series.
 214   uint last = first + num_regions - 1;
 215 
 216   // We need to initialize the region(s) we just discovered. This is
 217   // a bit tricky given that it can happen concurrently with


 348   } else {
 349     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 350     // are lucky enough to find some.
 351     first = _hrm->find_contiguous_only_empty(obj_regions);
 352     if (first != G1_NO_HRM_INDEX) {
 353       _hrm->allocate_free_regions_starting_at(first, obj_regions);
 354     }
 355   }
 356 
 357   if (first == G1_NO_HRM_INDEX) {
 358     // Policy: We could not find enough regions for the humongous object in the
 359     // free list. Look through the heap to find a mix of free and uncommitted regions.
 360     // If so, try expansion.
 361     first = _hrm->find_contiguous_empty_or_unavailable(obj_regions);
 362     if (first != G1_NO_HRM_INDEX) {
 363       // We found something. Make sure these regions are committed, i.e. expand
 364       // the heap. Alternatively we could do a defragmentation GC.
 365       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
 366                                     word_size * HeapWordSize);
 367 
 368       _hrm->expand_at(first, obj_regions, G1MemoryNodeManager::AnyNodeIndex, workers());
 369       policy()->record_new_heap_size(num_regions());
 370 
 371 #ifdef ASSERT
 372       for (uint i = first; i < first + obj_regions; ++i) {
 373         HeapRegion* hr = region_at(i);
 374         assert(hr->is_free(), "sanity");
 375         assert(hr->is_empty(), "sanity");
 376         assert(is_on_master_free_list(hr), "sanity");
 377       }
 378 #endif
 379       _hrm->allocate_free_regions_starting_at(first, obj_regions);
 380     } else {
 381       // Policy: Potentially trigger a defragmentation GC.
 382     }
 383   }
 384 
 385   HeapWord* result = NULL;
 386   if (first != G1_NO_HRM_INDEX) {
 387     result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
 388     assert(result != NULL, "it should always return a valid result");


1209          minimum_desired_capacity, maximum_desired_capacity);
1210 
1211   // Should not be greater than the heap max size. No need to adjust
1212   // it with respect to the heap min size as it's a lower bound (i.e.,
1213   // we'll try to make the capacity larger than it, not smaller).
1214   minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize);
1215   // Should not be less than the heap min size. No need to adjust it
1216   // with respect to the heap max size as it's an upper bound (i.e.,
1217   // we'll try to make the capacity smaller than it, not greater).
1218   maximum_desired_capacity =  MAX2(maximum_desired_capacity, MinHeapSize);
1219 
1220   if (capacity_after_gc < minimum_desired_capacity) {
1221     // Don't expand unless it's significant
1222     size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
1223 
1224     log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity). "
1225                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1226                               "min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1227                               capacity_after_gc, used_after_gc, used(), minimum_desired_capacity, MinHeapFreeRatio);
1228 
1229     expand(expand_bytes, G1MemoryNodeManager::AnyNodeIndex, _workers);
1230 
1231     // No expansion, now see if we want to shrink
1232   } else if (capacity_after_gc > maximum_desired_capacity) {
1233     // Capacity too large, compute shrinking size
1234     size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity;
1235 
1236     log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity). "
1237                               "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B live: " SIZE_FORMAT "B "
1238                               "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1239                               capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
1240 
1241     shrink(shrink_bytes);
1242   }
1243 }
1244 
1245 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1246                                                             bool do_gc,
1247                                                             bool clear_all_soft_refs,
1248                                                             bool expect_null_mutator_alloc_region,
1249                                                             bool* gc_succeeded) {


1320   // complete compaction phase than we've tried so far might be
1321   // appropriate.
1322   return NULL;
1323 }
1324 
1325 // Attempting to expand the heap sufficiently
1326 // to support an allocation of the given "word_size".  If
1327 // successful, perform the allocation and return the address of the
1328 // allocated block, or else "NULL".
1329 
1330 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1331   assert_at_safepoint_on_vm_thread();
1332 
1333   _verifier->verify_region_sets_optional();
1334 
1335   size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1336   log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1337                             word_size * HeapWordSize);
1338 
1339 
1340   if (expand(expand_bytes, G1MemoryNodeManager::AnyNodeIndex, _workers)) {
1341     _hrm->verify_optional();
1342     _verifier->verify_region_sets_optional();
1343     return attempt_allocation_at_safepoint(word_size,
1344                                            false /* expect_null_mutator_alloc_region */);
1345   }
1346   return NULL;
1347 }
1348 
1349 bool G1CollectedHeap::expand(size_t expand_bytes, uint node_index, WorkGang* pretouch_workers, double* expand_time_ms) {
1350   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
1351   aligned_expand_bytes = align_up(aligned_expand_bytes,
1352                                        HeapRegion::GrainBytes);
1353 
1354   log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: " SIZE_FORMAT "B expansion amount: " SIZE_FORMAT "B",
1355                             expand_bytes, aligned_expand_bytes);
1356 
1357   if (is_maximal_no_gc()) {
1358     log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1359     return false;
1360   }
1361 
1362   double expand_heap_start_time_sec = os::elapsedTime();
1363   uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes);
1364   assert(regions_to_expand > 0, "Must expand by at least one region");
1365 
1366   uint expanded_by = _hrm->expand_by(regions_to_expand, node_index, pretouch_workers);
1367   if (expand_time_ms != NULL) {
1368     *expand_time_ms = (os::elapsedTime() - expand_heap_start_time_sec) * MILLIUNITS;
1369   }
1370 
1371   if (expanded_by > 0) {
1372     size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1373     assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1374     policy()->record_new_heap_size(num_regions());
1375   } else {
1376     log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1377 
1378     // The expansion of the virtual storage space was unsuccessful.
1379     // Let's see if it was because we ran out of swap.
1380     if (G1ExitOnExpansionFailure &&
1381         _hrm->available() >= regions_to_expand) {
1382       // We had head room...
1383       vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1384     }
1385   }
1386   return regions_to_expand > 0;
1387 }
1388 
1389 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1390   size_t aligned_shrink_bytes =
1391     ReservedSpace::page_align_size_down(shrink_bytes);
1392   aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1393                                          HeapRegion::GrainBytes);
1394   uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1395 
1396   uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
1397   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1398 

1399   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1400                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1401   if (num_regions_removed > 0) {
1402     policy()->record_new_heap_size(num_regions());
1403   } else {
1404     log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1405   }
1406 }
1407 
1408 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1409   _verifier->verify_region_sets_optional();
1410 
1411   // We should only reach here at the end of a Full GC or during Remark which
1412   // means we should not not be holding to any GC alloc regions. The method
1413   // below will make sure of that and do any remaining clean up.
1414   _allocator->abandon_gc_alloc_regions();
1415 
1416   // Instead of tearing down / rebuilding the free lists here, we
1417   // could instead use the remove_all_pending() method on free_list to
1418   // remove only the ones that we need to remove.


1480     } else {
1481       guarantee(Heap_lock->owned_by_self(),
1482                 "master humongous set MT safety protocol outside a safepoint");
1483     }
1484   }
1485   bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
1486   const char* get_description() { return "Humongous Regions"; }
1487 };
1488 
1489 G1CollectedHeap::G1CollectedHeap() :
1490   CollectedHeap(),
1491   _young_gen_sampling_thread(NULL),
1492   _workers(NULL),
1493   _card_table(NULL),
1494   _soft_ref_policy(),
1495   _old_set("Old Region Set", new OldRegionSetChecker()),
1496   _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
1497   _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1498   _bot(NULL),
1499   _listener(),
1500   _mem_node_mgr(G1MemoryNodeManager::create()),
1501   _hrm(NULL),
1502   _allocator(NULL),
1503   _verifier(NULL),
1504   _summary_bytes_used(0),
1505   _archive_allocator(NULL),
1506   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1507   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1508   _expand_heap_after_alloc_failure(true),
1509   _g1mm(NULL),
1510   _humongous_reclaim_candidates(),
1511   _has_humongous_reclaim_candidates(false),
1512   _hr_printer(),
1513   _collector_state(),
1514   _old_marking_cycles_started(0),
1515   _old_marking_cycles_completed(0),
1516   _eden(),
1517   _survivor(),
1518   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1519   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1520   _policy(G1Policy::create_policy(_gc_timer_stw)),


1764 
1765   _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1766 
1767   {
1768     HeapWord* start = _hrm->reserved().start();
1769     HeapWord* end = _hrm->reserved().end();
1770     size_t granularity = HeapRegion::GrainBytes;
1771 
1772     _region_attr.initialize(start, end, granularity);
1773     _humongous_reclaim_candidates.initialize(start, end, granularity);
1774   }
1775 
1776   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1777                           true /* are_GC_task_threads */,
1778                           false /* are_ConcurrentGC_threads */);
1779   if (_workers == NULL) {
1780     return JNI_ENOMEM;
1781   }
1782   _workers->initialize_workers();
1783 
1784   _mem_node_mgr->set_page_size(page_size);
1785 
1786   // Create the G1ConcurrentMark data structure and thread.
1787   // (Must do this late, so that "max_regions" is defined.)
1788   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1789   if (_cm == NULL || !_cm->completed_initialization()) {
1790     vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1791     return JNI_ENOMEM;
1792   }
1793   _cm_thread = _cm->cm_thread();
1794 
1795   // Now expand into the initial heap size.
1796   if (!expand(init_byte_size, G1MemoryNodeManager::AnyNodeIndex, _workers)) {
1797     vm_shutdown_during_initialization("Failed to allocate initial heap.");
1798     return JNI_ENOMEM;
1799   }
1800 
1801   // Perform any initialization actions delegated to the policy.
1802   policy()->init(this, &_collection_set);
1803 
1804   jint ecode = initialize_concurrent_refinement();
1805   if (ecode != JNI_OK) {
1806     return ecode;
1807   }
1808 
1809   ecode = initialize_young_gen_sampling_thread();
1810   if (ecode != JNI_OK) {
1811     return ecode;
1812   }
1813 
1814   {
1815     G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1816     dcqs.set_process_cards_threshold(concurrent_refine()->yellow_zone());


2874   _verifier->verify_before_gc(type);
2875   _verifier->check_bitmaps("GC Start");
2876 }
2877 
2878 void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) {
2879   if (VerifyRememberedSets) {
2880     log_info(gc, verify)("[Verifying RemSets after GC]");
2881     VerifyRegionRemSetClosure v_cl;
2882     heap_region_iterate(&v_cl);
2883   }
2884   _verifier->verify_after_gc(type);
2885   _verifier->check_bitmaps("GC End");
2886 }
2887 
2888 void G1CollectedHeap::expand_heap_after_young_collection(){
2889   size_t expand_bytes = _heap_sizing_policy->expansion_amount();
2890   if (expand_bytes > 0) {
2891     // No need for an ergo logging here,
2892     // expansion_amount() does this when it returns a value > 0.
2893     double expand_ms;
2894     if (!expand(expand_bytes, G1MemoryNodeManager::AnyNodeIndex, _workers, &expand_ms)) {
2895       // We failed to expand the heap. Cannot do anything about it.
2896     }
2897     phase_times()->record_expand_heap_time(expand_ms);
2898   }
2899 }
2900 
2901 const char* G1CollectedHeap::young_gc_name() const {
2902   if (collector_state()->in_initial_mark_gc()) {
2903     return "Pause Young (Concurrent Start)";
2904   } else if (collector_state()->in_young_only_phase()) {
2905     if (collector_state()->in_young_gc_before_mixed()) {
2906       return "Pause Young (Prepare Mixed)";
2907     } else {
2908       return "Pause Young (Normal)";
2909     }
2910   } else {
2911     return "Pause Young (Mixed)";
2912   }
2913 }
2914 


4535   if (!free_list_only) {
4536     _eden.clear();
4537     _survivor.clear();
4538   }
4539 
4540   RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
4541   heap_region_iterate(&cl);
4542 
4543   if (!free_list_only) {
4544     set_used(cl.total_used());
4545     if (_archive_allocator != NULL) {
4546       _archive_allocator->clear_used();
4547     }
4548   }
4549   assert_used_and_recalculate_used_equal(this);
4550 }
4551 
4552 // Methods for the mutator alloc region
4553 
4554 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4555                                                       bool force,
4556                                                       uint node_index) {
4557   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4558   bool should_allocate = policy()->should_allocate_mutator_region();
4559   if (force || should_allocate) {
4560     HeapRegion* new_alloc_region = new_region(word_size,
4561                                               HeapRegionType::Eden,
4562                                               false /* do_expand */,
4563                                               node_index);
4564     if (new_alloc_region != NULL) {
4565       set_region_short_lived_locked(new_alloc_region);
4566       _hr_printer.alloc(new_alloc_region, !should_allocate);
4567       _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
4568       _policy->remset_tracker()->update_at_allocate(new_alloc_region);
4569       return new_alloc_region;
4570     }
4571   }
4572   return NULL;
4573 }
4574 
4575 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
4576                                                   size_t allocated_bytes) {
4577   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4578   assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
4579 
4580   collection_set()->add_eden_region(alloc_region);
4581   increase_used(allocated_bytes);
4582   _eden.add_used_bytes(allocated_bytes);
4583   _hr_printer.retire(alloc_region);


< prev index next >