< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 56811 : [mq]: 8189737-heapregion-remove-space-inheritance


 647     while (curr_region != NULL) {
 648       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 649              "Region already in use (index %u)", curr_region->hrm_index());
 650       if (open) {
 651         curr_region->set_open_archive();
 652       } else {
 653         curr_region->set_closed_archive();
 654       }
 655       _hr_printer.alloc(curr_region);
 656       _archive_set.add(curr_region);
 657       HeapWord* top;
 658       HeapRegion* next_region;
 659       if (curr_region != last_region) {
 660         top = curr_region->end();
 661         next_region = _hrm->next_region_in_heap(curr_region);
 662       } else {
 663         top = last_address + 1;
 664         next_region = NULL;
 665       }
 666       curr_region->set_top(top);
 667       curr_region->set_first_dead(top);
 668       curr_region->set_end_of_live(top);
 669       curr_region = next_region;
 670     }
 671 
 672     // Notify mark-sweep of the archive
 673     G1ArchiveAllocator::set_range_archive(curr_range, open);
 674   }
 675   return true;
 676 }
 677 
 678 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
 679   assert(!is_init_completed(), "Expect to be called at JVM init time");
 680   assert(ranges != NULL, "MemRegion array NULL");
 681   assert(count != 0, "No MemRegions provided");
 682   MemRegion reserved = _hrm->reserved();
 683   HeapWord *prev_last_addr = NULL;
 684   HeapRegion* prev_last_region = NULL;
 685 
 686   // For each MemRegion, create filler objects, if needed, in the G1 regions
 687   // that contain the address range. The address range actually within the
 688   // MemRegion will not be modified. That is assumed to have been initialized


3961     }
3962 
3963     virtual bool do_heap_region(HeapRegion* r) {
3964       G1CollectedHeap* g1h = G1CollectedHeap::heap();
3965 
3966       assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
3967       g1h->clear_region_attr(r);
3968 
3969       if (r->is_young()) {
3970         assert(r->young_index_in_cset() != 0 && (uint)r->young_index_in_cset() <= g1h->collection_set()->young_region_length(),
3971                "Young index %u is wrong for region %u of type %s with %u young regions",
3972                r->young_index_in_cset(),
3973                r->hrm_index(),
3974                r->get_type_str(),
3975                g1h->collection_set()->young_region_length());
3976         size_t words_survived = _surviving_young_words[r->young_index_in_cset()];
3977         r->record_surv_words_in_group(words_survived);
3978       }
3979 
3980       if (!r->evacuation_failed()) {
3981         assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
3982         _before_used_bytes += r->used();
3983         g1h->free_region(r,
3984                          &_local_free_list,
3985                          true, /* skip_remset */
3986                          true, /* skip_hot_card_cache */
3987                          true  /* locked */);
3988       } else {
3989         r->uninstall_surv_rate_group();
3990         r->clear_young_index_in_cset();
3991         r->set_evacuation_failed(false);
3992         // When moving a young gen region to old gen, we "allocate" that whole region
3993         // there. This is in addition to any already evacuated objects. Notify the
3994         // policy about that.
3995         // Old gen regions do not cause an additional allocation: both the objects
3996         // still in the region and the ones already moved are accounted for elsewhere.
3997         if (r->is_young()) {
3998           _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
3999         }
4000         // The region is now considered to be old.
4001         r->set_old();




 647     while (curr_region != NULL) {
 648       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 649              "Region already in use (index %u)", curr_region->hrm_index());
 650       if (open) {
 651         curr_region->set_open_archive();
 652       } else {
 653         curr_region->set_closed_archive();
 654       }
 655       _hr_printer.alloc(curr_region);
 656       _archive_set.add(curr_region);
 657       HeapWord* top;
 658       HeapRegion* next_region;
 659       if (curr_region != last_region) {
 660         top = curr_region->end();
 661         next_region = _hrm->next_region_in_heap(curr_region);
 662       } else {
 663         top = last_address + 1;
 664         next_region = NULL;
 665       }
 666       curr_region->set_top(top);


 667       curr_region = next_region;
 668     }
 669 
 670     // Notify mark-sweep of the archive
 671     G1ArchiveAllocator::set_range_archive(curr_range, open);
 672   }
 673   return true;
 674 }
 675 
 676 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
 677   assert(!is_init_completed(), "Expect to be called at JVM init time");
 678   assert(ranges != NULL, "MemRegion array NULL");
 679   assert(count != 0, "No MemRegions provided");
 680   MemRegion reserved = _hrm->reserved();
 681   HeapWord *prev_last_addr = NULL;
 682   HeapRegion* prev_last_region = NULL;
 683 
 684   // For each MemRegion, create filler objects, if needed, in the G1 regions
 685   // that contain the address range. The address range actually within the
 686   // MemRegion will not be modified. That is assumed to have been initialized


3959     }
3960 
3961     virtual bool do_heap_region(HeapRegion* r) {
3962       G1CollectedHeap* g1h = G1CollectedHeap::heap();
3963 
3964       assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
3965       g1h->clear_region_attr(r);
3966 
3967       if (r->is_young()) {
3968         assert(r->young_index_in_cset() != 0 && (uint)r->young_index_in_cset() <= g1h->collection_set()->young_region_length(),
3969                "Young index %u is wrong for region %u of type %s with %u young regions",
3970                r->young_index_in_cset(),
3971                r->hrm_index(),
3972                r->get_type_str(),
3973                g1h->collection_set()->young_region_length());
3974         size_t words_survived = _surviving_young_words[r->young_index_in_cset()];
3975         r->record_surv_words_in_group(words_survived);
3976       }
3977 
3978       if (!r->evacuation_failed()) {
3979         assert(!r->is_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
3980         _before_used_bytes += r->used();
3981         g1h->free_region(r,
3982                          &_local_free_list,
3983                          true, /* skip_remset */
3984                          true, /* skip_hot_card_cache */
3985                          true  /* locked */);
3986       } else {
3987         r->uninstall_surv_rate_group();
3988         r->clear_young_index_in_cset();
3989         r->set_evacuation_failed(false);
3990         // When moving a young gen region to old gen, we "allocate" that whole region
3991         // there. This is in addition to any already evacuated objects. Notify the
3992         // policy about that.
3993         // Old gen regions do not cause an additional allocation: both the objects
3994         // still in the region and the ones already moved are accounted for elsewhere.
3995         if (r->is_young()) {
3996           _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
3997         }
3998         // The region is now considered to be old.
3999         r->set_old();


< prev index next >