< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 55208 : imported patch 8220089.webrev.0
rev 55209 : imported patch 8220089.webrev.1
rev 55210 : imported patch 8220089.webrev.2
rev 55212 : imported patch 8220089.webrev.4


2241 
2242 void G1CollectedHeap::collection_set_iterate_increment_from(HeapRegionClosure *cl, uint worker_id) {
2243   _collection_set.iterate_incremental_part_from(cl, worker_id, workers()->active_workers());
2244 }
2245 
2246 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2247   HeapRegion* hr = heap_region_containing(addr);
2248   return hr->block_start(addr);
2249 }
2250 
2251 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2252   HeapRegion* hr = heap_region_containing(addr);
2253   return hr->block_is_obj(addr);
2254 }
2255 
2256 bool G1CollectedHeap::supports_tlab_allocation() const {
2257   return true;
2258 }
2259 
2260 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2261   return (_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
2262 }
2263 
2264 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2265   return _eden.length() * HeapRegion::GrainBytes;
2266 }
2267 
2268 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2269 // must be equal to the humongous object limit.
2270 size_t G1CollectedHeap::max_tlab_size() const {
2271   return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2272 }
2273 
2274 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2275   return _allocator->unsafe_max_tlab_alloc();
2276 }
2277 
2278 size_t G1CollectedHeap::max_capacity() const {
2279   return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2280 }
2281 


2472     tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
2473     tty->print_cr("========================================");
2474     tty->cr();
2475   }
2476 };
2477 
2478 void G1CollectedHeap::print_cset_rsets() {
2479   PrintRSetsClosure cl("Printing CSet RSets");
2480   collection_set_iterate_all(&cl);
2481 }
2482 
2483 void G1CollectedHeap::print_all_rsets() {
2484   PrintRSetsClosure cl("Printing All RSets");;
2485   heap_region_iterate(&cl);
2486 }
2487 #endif // PRODUCT
2488 
2489 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
2490 
2491   size_t eden_used_bytes = _eden.used_bytes();
2492   size_t survivor_used_bytes = _survivor.used_bytes();
2493   size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
2494 
2495   size_t eden_capacity_bytes =
2496     (policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
2497 
2498   VirtualSpaceSummary heap_summary = create_heap_space_summary();
2499   return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
2500                        eden_capacity_bytes, survivor_used_bytes, num_regions());
2501 }
2502 
2503 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
2504   return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
2505                        stats->unused(), stats->used(), stats->region_end_waste(),
2506                        stats->regions_filled(), stats->direct_allocated(),
2507                        stats->failure_used(), stats->failure_waste());
2508 }
2509 
2510 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2511   const G1HeapSummary& heap_summary = create_g1_heap_summary();
2512   gc_tracer->report_gc_heap_summary(when, heap_summary);


2860 
2861 void G1CollectedHeap::start_new_collection_set() {
2862   double start = os::elapsedTime();
2863 
2864   collection_set()->start_incremental_building();
2865 
2866   clear_region_attr();
2867 
2868   guarantee(_eden.length() == 0, "eden should have been cleared");
2869   policy()->transfer_survivors_to_cset(survivor());
2870 
2871   // We redo the verification but now wrt to the new CSet which
2872   // has just got initialized after the previous CSet was freed.
2873   _cm->verify_no_collection_set_oops();
2874 
2875   phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
2876 }
2877 
2878 void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms) {
2879 
2880   _collection_set.finalize_initial_collection_set(target_pause_time_ms, &_survivor);
2881   evacuation_info.set_collectionset_regions(collection_set()->region_length() +
2882                                             collection_set()->optional_region_length());
2883 
2884   _cm->verify_no_collection_set_oops();
2885 
2886   if (_hr_printer.is_active()) {
2887     G1PrintCollectionSetClosure cl(&_hr_printer);
2888     _collection_set.iterate(&cl);
2889     _collection_set.iterate_optional(&cl);
2890   }
2891 }
2892 
2893 G1HeapVerifier::G1VerifyType G1CollectedHeap::young_collection_verify_type() const {
2894   if (collector_state()->in_initial_mark_gc()) {
2895     return G1HeapVerifier::G1VerifyConcurrentStart;
2896   } else if (collector_state()->in_young_only_phase()) {
2897     return G1HeapVerifier::G1VerifyYoungNormal;
2898   } else {
2899     return G1HeapVerifier::G1VerifyMixed;
2900   }


4373   }
4374 };
4375 
4376 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
4377   G1AbandonCollectionSetClosure cl;
4378   collection_set_iterate_all(&cl);
4379 
4380   collection_set->clear();
4381   collection_set->stop_incremental_building();
4382 }
4383 
4384 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
4385   return _allocator->is_retained_old_region(hr);
4386 }
4387 
4388 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
4389   _eden.add(hr);
4390   _policy->set_region_eden(hr);
4391 }
4392 





4393 #ifdef ASSERT
4394 
4395 class NoYoungRegionsClosure: public HeapRegionClosure {
4396 private:
4397   bool _success;
4398 public:
4399   NoYoungRegionsClosure() : _success(true) { }
4400   bool do_heap_region(HeapRegion* r) {
4401     if (r->is_young()) {
4402       log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
4403                             p2i(r->bottom()), p2i(r->end()));
4404       _success = false;
4405     }
4406     return false;
4407   }
4408   bool success() { return _success; }
4409 };
4410 
4411 bool G1CollectedHeap::check_young_list_empty() {
4412   bool ret = (young_regions_count() == 0);


4529   assert_at_safepoint_on_vm_thread();
4530 
4531   if (!free_list_only) {
4532     _eden.clear();
4533     _survivor.clear();
4534   }
4535 
4536   RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
4537   heap_region_iterate(&cl);
4538 
4539   if (!free_list_only) {
4540     set_used(cl.total_used());
4541     if (_archive_allocator != NULL) {
4542       _archive_allocator->clear_used();
4543     }
4544   }
4545   assert_used_and_recalculate_used_equal(this);
4546 }
4547 
4548 // Methods for the mutator alloc region
















4549 
4550 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4551                                                       bool force) {
4552   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4553   bool should_allocate = policy()->should_allocate_mutator_region();
4554   if (force || should_allocate) {
4555     HeapRegion* new_alloc_region = new_region(word_size,
4556                                               HeapRegionType::Eden,
4557                                               false /* do_expand */);
4558     if (new_alloc_region != NULL) {
4559       set_region_short_lived_locked(new_alloc_region);
4560       _hr_printer.alloc(new_alloc_region, !should_allocate);
4561       _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
4562       _policy->remset_tracker()->update_at_allocate(new_alloc_region);
4563       return new_alloc_region;
4564     }
4565   }
4566   return NULL;
4567 }
4568 
4569 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
4570                                                   size_t allocated_bytes) {
4571   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4572   assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
4573 
4574   collection_set()->add_eden_region(alloc_region);
4575   increase_used(allocated_bytes);
4576   _eden.add_used_bytes(allocated_bytes);
4577   _hr_printer.retire(alloc_region);
4578 
4579   // We update the eden sizes here, when the region is retired,
4580   // instead of when it's allocated, since this is the point that its
4581   // used space has been recorded in _summary_bytes_used.
4582   g1mm()->update_eden_size();
4583 }
4584 




4585 // Methods for the GC alloc regions
4586 
4587 bool G1CollectedHeap::has_more_regions(G1HeapRegionAttr dest) {
4588   if (dest.is_old()) {
4589     return true;
4590   } else {
4591     return survivor_regions_count() < policy()->max_survivor_regions();
4592   }
4593 }
4594 
4595 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest) {
4596   assert(FreeList_lock->owned_by_self(), "pre-condition");
4597 
4598   if (!has_more_regions(dest)) {
4599     return NULL;
4600   }
4601 
4602   HeapRegionType type;
4603   if (dest.is_young()) {
4604     type = HeapRegionType::Survivor;


4623     register_region_with_region_attr(new_alloc_region);
4624     _hr_printer.alloc(new_alloc_region);
4625     return new_alloc_region;
4626   }
4627   return NULL;
4628 }
4629 
4630 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
4631                                              size_t allocated_bytes,
4632                                              G1HeapRegionAttr dest) {
4633   policy()->record_bytes_copied_during_gc(allocated_bytes);
4634   if (dest.is_old()) {
4635     old_set_add(alloc_region);
4636   } else {
4637     assert(dest.is_young(), "Retiring alloc region should be young (%d)", dest.type());
4638     _survivor.add_used_bytes(allocated_bytes);
4639   }
4640 
4641   bool const during_im = collector_state()->in_initial_mark_gc();
4642   if (during_im && allocated_bytes > 0) {
4643     _cm->root_regions()->add(alloc_region);
4644   }
4645   _hr_printer.retire(alloc_region);
4646 }
4647 






4648 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
4649   bool expanded = false;
4650   uint index = _hrm->find_highest_free(&expanded);
4651 
4652   if (index != G1_NO_HRM_INDEX) {
4653     if (expanded) {
4654       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
4655                                 HeapRegion::GrainWords * HeapWordSize);
4656     }
4657     _hrm->allocate_free_regions_starting_at(index, 1);
4658     return region_at(index);
4659   }
4660   return NULL;
4661 }
4662 
4663 // Optimized nmethod scanning
4664 
4665 class RegisterNMethodOopClosure: public OopClosure {
4666   G1CollectedHeap* _g1h;
4667   nmethod* _nm;


4754 void G1CollectedHeap::rebuild_strong_code_roots() {
4755   RebuildStrongCodeRootClosure blob_cl(this);
4756   CodeCache::blobs_do(&blob_cl);
4757 }
4758 
4759 void G1CollectedHeap::initialize_serviceability() {
4760   _g1mm->initialize_serviceability();
4761 }
4762 
4763 MemoryUsage G1CollectedHeap::memory_usage() {
4764   return _g1mm->memory_usage();
4765 }
4766 
4767 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
4768   return _g1mm->memory_managers();
4769 }
4770 
4771 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
4772   return _g1mm->memory_pools();
4773 }






















2241 
2242 void G1CollectedHeap::collection_set_iterate_increment_from(HeapRegionClosure *cl, uint worker_id) {
2243   _collection_set.iterate_incremental_part_from(cl, worker_id, workers()->active_workers());
2244 }
2245 
2246 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2247   HeapRegion* hr = heap_region_containing(addr);
2248   return hr->block_start(addr);
2249 }
2250 
2251 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2252   HeapRegion* hr = heap_region_containing(addr);
2253   return hr->block_is_obj(addr);
2254 }
2255 
2256 bool G1CollectedHeap::supports_tlab_allocation() const {
2257   return true;
2258 }
2259 
2260 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2261   return (_policy->young_list_target_length() - survivor_regions_count()) * HeapRegion::GrainBytes;
2262 }
2263 
2264 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2265   return _eden.length() * HeapRegion::GrainBytes;
2266 }
2267 
2268 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2269 // must be equal to the humongous object limit.
2270 size_t G1CollectedHeap::max_tlab_size() const {
2271   return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2272 }
2273 
2274 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2275   return _allocator->unsafe_max_tlab_alloc();
2276 }
2277 
2278 size_t G1CollectedHeap::max_capacity() const {
2279   return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2280 }
2281 


2472     tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum);
2473     tty->print_cr("========================================");
2474     tty->cr();
2475   }
2476 };
2477 
2478 void G1CollectedHeap::print_cset_rsets() {
2479   PrintRSetsClosure cl("Printing CSet RSets");
2480   collection_set_iterate_all(&cl);
2481 }
2482 
2483 void G1CollectedHeap::print_all_rsets() {
2484   PrintRSetsClosure cl("Printing All RSets");;
2485   heap_region_iterate(&cl);
2486 }
2487 #endif // PRODUCT
2488 
2489 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
2490 
2491   size_t eden_used_bytes = _eden.used_bytes();
2492   size_t survivor_used_bytes = survivor_regions_used_bytes();
2493   size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
2494 
2495   size_t eden_capacity_bytes =
2496     (policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
2497 
2498   VirtualSpaceSummary heap_summary = create_heap_space_summary();
2499   return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
2500                        eden_capacity_bytes, survivor_used_bytes, num_regions());
2501 }
2502 
2503 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
2504   return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
2505                        stats->unused(), stats->used(), stats->region_end_waste(),
2506                        stats->regions_filled(), stats->direct_allocated(),
2507                        stats->failure_used(), stats->failure_waste());
2508 }
2509 
2510 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2511   const G1HeapSummary& heap_summary = create_g1_heap_summary();
2512   gc_tracer->report_gc_heap_summary(when, heap_summary);


2860 
2861 void G1CollectedHeap::start_new_collection_set() {
2862   double start = os::elapsedTime();
2863 
2864   collection_set()->start_incremental_building();
2865 
2866   clear_region_attr();
2867 
2868   guarantee(_eden.length() == 0, "eden should have been cleared");
2869   policy()->transfer_survivors_to_cset(survivor());
2870 
2871   // We redo the verification but now wrt to the new CSet which
2872   // has just got initialized after the previous CSet was freed.
2873   _cm->verify_no_collection_set_oops();
2874 
2875   phase_times()->record_start_new_cset_time_ms((os::elapsedTime() - start) * 1000.0);
2876 }
2877 
2878 void G1CollectedHeap::calculate_collection_set(G1EvacuationInfo& evacuation_info, double target_pause_time_ms) {
2879 
2880   _collection_set.finalize_initial_collection_set(target_pause_time_ms);
2881   evacuation_info.set_collectionset_regions(collection_set()->region_length() +
2882                                             collection_set()->optional_region_length());
2883 
2884   _cm->verify_no_collection_set_oops();
2885 
2886   if (_hr_printer.is_active()) {
2887     G1PrintCollectionSetClosure cl(&_hr_printer);
2888     _collection_set.iterate(&cl);
2889     _collection_set.iterate_optional(&cl);
2890   }
2891 }
2892 
2893 G1HeapVerifier::G1VerifyType G1CollectedHeap::young_collection_verify_type() const {
2894   if (collector_state()->in_initial_mark_gc()) {
2895     return G1HeapVerifier::G1VerifyConcurrentStart;
2896   } else if (collector_state()->in_young_only_phase()) {
2897     return G1HeapVerifier::G1VerifyYoungNormal;
2898   } else {
2899     return G1HeapVerifier::G1VerifyMixed;
2900   }


4373   }
4374 };
4375 
4376 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
4377   G1AbandonCollectionSetClosure cl;
4378   collection_set_iterate_all(&cl);
4379 
4380   collection_set->clear();
4381   collection_set->stop_incremental_building();
4382 }
4383 
4384 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
4385   return _allocator->is_retained_old_region(hr);
4386 }
4387 
4388 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
4389   _eden.add(hr);
4390   _policy->set_region_eden(hr);
4391 }
4392 
4393 void G1CollectedHeap::set_retained_region_short_lived_locked(HeapRegion* hr) {
4394   _eden.add(hr);
4395   _policy->set_region_eden_from_survivor(hr);
4396 }
4397 
4398 #ifdef ASSERT
4399 
4400 class NoYoungRegionsClosure: public HeapRegionClosure {
4401 private:
4402   bool _success;
4403 public:
4404   NoYoungRegionsClosure() : _success(true) { }
4405   bool do_heap_region(HeapRegion* r) {
4406     if (r->is_young()) {
4407       log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
4408                             p2i(r->bottom()), p2i(r->end()));
4409       _success = false;
4410     }
4411     return false;
4412   }
4413   bool success() { return _success; }
4414 };
4415 
4416 bool G1CollectedHeap::check_young_list_empty() {
4417   bool ret = (young_regions_count() == 0);


4534   assert_at_safepoint_on_vm_thread();
4535 
4536   if (!free_list_only) {
4537     _eden.clear();
4538     _survivor.clear();
4539   }
4540 
4541   RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
4542   heap_region_iterate(&cl);
4543 
4544   if (!free_list_only) {
4545     set_used(cl.total_used());
4546     if (_archive_allocator != NULL) {
4547       _archive_allocator->clear_used();
4548     }
4549   }
4550   assert_used_and_recalculate_used_equal(this);
4551 }
4552 
4553 // Methods for the mutator alloc region
4554 void G1CollectedHeap::update_as_mutator_region(HeapRegion* alloc_region, bool is_reused) {
4555   const char* msg;
4556 
4557   if (is_reused) {
4558     set_retained_region_short_lived_locked(alloc_region);
4559     _hr_printer.reuse(alloc_region);
4560     msg = "Mutator Region Allocation (Reuse)";
4561   } else {
4562     set_region_short_lived_locked(alloc_region);
4563     _hr_printer.alloc(alloc_region, !policy()->should_allocate_mutator_region());
4564     msg = "Mutator Region Allocation";
4565   }
4566 
4567   _verifier->check_bitmaps(msg, alloc_region);
4568   _policy->remset_tracker()->update_at_allocate(alloc_region);
4569 }
4570 
4571 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4572                                                       bool force) {
4573   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4574   bool should_allocate = policy()->should_allocate_mutator_region();
4575   if (force || should_allocate) {
4576     HeapRegion* new_alloc_region = new_region(word_size,
4577                                               HeapRegionType::Eden,
4578                                               false /* do_expand */);
4579     if (new_alloc_region != NULL) {
4580       update_as_mutator_region(new_alloc_region, false);



4581       return new_alloc_region;
4582     }
4583   }
4584   return NULL;
4585 }
4586 
4587 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
4588                                                   size_t allocated_bytes) {
4589   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4590   assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
4591 
4592   collection_set()->add_eden_region(alloc_region);
4593   increase_used(allocated_bytes);
4594   _eden.add_used_bytes(alloc_region, allocated_bytes);
4595   _hr_printer.retire(alloc_region);
4596 
4597   // We update the eden sizes here, when the region is retired,
4598   // instead of when it's allocated, since this is the point that its
4599   // used space has been recorded in _summary_bytes_used.
4600   g1mm()->update_eden_size();
4601 }
4602 
4603 void G1CollectedHeap::reuse_retained_survivor_region(HeapRegion* alloc_region) {
4604   update_as_mutator_region(alloc_region, true);
4605 }
4606 
4607 // Methods for the GC alloc regions
4608 
4609 bool G1CollectedHeap::has_more_regions(G1HeapRegionAttr dest) {
4610   if (dest.is_old()) {
4611     return true;
4612   } else {
4613     return survivor_regions_count() < policy()->max_survivor_regions();
4614   }
4615 }
4616 
4617 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest) {
4618   assert(FreeList_lock->owned_by_self(), "pre-condition");
4619 
4620   if (!has_more_regions(dest)) {
4621     return NULL;
4622   }
4623 
4624   HeapRegionType type;
4625   if (dest.is_young()) {
4626     type = HeapRegionType::Survivor;


4645     register_region_with_region_attr(new_alloc_region);
4646     _hr_printer.alloc(new_alloc_region);
4647     return new_alloc_region;
4648   }
4649   return NULL;
4650 }
4651 
4652 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
4653                                              size_t allocated_bytes,
4654                                              G1HeapRegionAttr dest) {
4655   policy()->record_bytes_copied_during_gc(allocated_bytes);
4656   if (dest.is_old()) {
4657     old_set_add(alloc_region);
4658   } else {
4659     assert(dest.is_young(), "Retiring alloc region should be young (%d)", dest.type());
4660     _survivor.add_used_bytes(allocated_bytes);
4661   }
4662 
4663   bool const during_im = collector_state()->in_initial_mark_gc();
4664   if (during_im && allocated_bytes > 0) {
4665     _cm->root_regions()->add(alloc_region->next_top_at_mark_start(), alloc_region->top());
4666   }
4667   _hr_printer.retire(alloc_region);
4668 }
4669 
4670 void G1CollectedHeap::update_retained_survivor_gc_alloc_region(HeapRegion* alloc_region) {
4671   _survivor.remove(alloc_region);
4672   // To reuse the given region, setting EdenTag is also necessary but it will be done when
4673   // the region is initialized as a mutator region.
4674 }
4675 
4676 HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
4677   bool expanded = false;
4678   uint index = _hrm->find_highest_free(&expanded);
4679 
4680   if (index != G1_NO_HRM_INDEX) {
4681     if (expanded) {
4682       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
4683                                 HeapRegion::GrainWords * HeapWordSize);
4684     }
4685     _hrm->allocate_free_regions_starting_at(index, 1);
4686     return region_at(index);
4687   }
4688   return NULL;
4689 }
4690 
4691 // Optimized nmethod scanning
4692 
4693 class RegisterNMethodOopClosure: public OopClosure {
4694   G1CollectedHeap* _g1h;
4695   nmethod* _nm;


4782 void G1CollectedHeap::rebuild_strong_code_roots() {
4783   RebuildStrongCodeRootClosure blob_cl(this);
4784   CodeCache::blobs_do(&blob_cl);
4785 }
4786 
4787 void G1CollectedHeap::initialize_serviceability() {
4788   _g1mm->initialize_serviceability();
4789 }
4790 
4791 MemoryUsage G1CollectedHeap::memory_usage() {
4792   return _g1mm->memory_usage();
4793 }
4794 
4795 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
4796   return _g1mm->memory_managers();
4797 }
4798 
4799 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
4800   return _g1mm->memory_pools();
4801 }
4802 
4803 uint G1CollectedHeap::eden_regions_count() const {
4804   assert(_eden.length() >= _survivor.retained_length(), "Eden length (%u) should be "
4805          "larger than or equal to retained region (%u).",
4806          _eden.length(), _survivor.retained_length());
4807   return _eden.length() - _survivor.retained_length();
4808 }
4809 
4810 uint G1CollectedHeap::survivor_regions_count() const {
4811   return _survivor.length() + _survivor.retained_length();
4812 }
4813 
4814 size_t G1CollectedHeap::eden_regions_used_bytes() const {
4815   return _eden.used_bytes();
4816 }
4817 
4818 size_t G1CollectedHeap::survivor_regions_used_bytes() const {
4819   return _survivor.used_bytes() + _survivor.retained_used_bytes();
4820 }
4821 
< prev index next >