< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page




 597     // first attempt (without holding the Heap_lock) here and the
 598     // follow-on attempt will be at the start of the next loop
 599     // iteration (after taking the Heap_lock).
 600     result = _allocator->attempt_allocation(word_size, context);
 601     if (result != NULL) {
 602       return result;
 603     }
 604 
 605     // Give a warning if we seem to be looping forever.
 606     if ((QueuedAllocationWarningCount > 0) &&
 607         (try_count % QueuedAllocationWarningCount == 0)) {
 608       log_warning(gc)("G1CollectedHeap::attempt_allocation_slow() "
 609                       "retries %d times", try_count);
 610     }
 611   }
 612 
 613   ShouldNotReachHere();
 614   return NULL;
 615 }
 616 
 617 void G1CollectedHeap::begin_archive_alloc_range() {
 618   assert_at_safepoint(true /* should_be_vm_thread */);
 619   if (_archive_allocator == NULL) {
 620     _archive_allocator = G1ArchiveAllocator::create_allocator(this);
 621   }
 622 }
 623 
 624 bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
 625   // Allocations in archive regions cannot be of a size that would be considered
 626   // humongous even for a minimum-sized region, because G1 region sizes/boundaries
 627   // may be different at archive-restore time.
 628   return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
 629 }
 630 
 631 HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
 632   assert_at_safepoint(true /* should_be_vm_thread */);
 633   assert(_archive_allocator != NULL, "_archive_allocator not initialized");
 634   if (is_archive_alloc_too_large(word_size)) {
 635     return NULL;
 636   }
 637   return _archive_allocator->archive_mem_allocate(word_size);
 638 }
 639 
 640 void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,


 644 
 645   // Call complete_archive to do the real work, filling in the MemRegion
 646   // array with the archive regions.
 647   _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
 648   delete _archive_allocator;
 649   _archive_allocator = NULL;
 650 }
 651 
 652 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
 653   assert(ranges != NULL, "MemRegion array NULL");
 654   assert(count != 0, "No MemRegions provided");
 655   MemRegion reserved = _hrm.reserved();
 656   for (size_t i = 0; i < count; i++) {
 657     if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
 658       return false;
 659     }
 660   }
 661   return true;
 662 }
 663 
 664 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) {


 665   assert(!is_init_completed(), "Expect to be called at JVM init time");
 666   assert(ranges != NULL, "MemRegion array NULL");
 667   assert(count != 0, "No MemRegions provided");
 668   MutexLockerEx x(Heap_lock);
 669 
 670   MemRegion reserved = _hrm.reserved();
 671   HeapWord* prev_last_addr = NULL;
 672   HeapRegion* prev_last_region = NULL;
 673 
 674   // Temporarily disable pretouching of heap pages. This interface is used
 675   // when mmap'ing archived heap data in, so pre-touching is wasted.
 676   FlagSetting fs(AlwaysPreTouch, false);
 677 
 678   // Enable archive object checking used by G1MarkSweep. We have to let it know
 679   // about each archive range, so that objects in those ranges aren't marked.
 680   G1ArchiveAllocator::enable_archive_object_check();
 681 
 682   // For each specified MemRegion range, allocate the corresponding G1
 683   // regions and mark them as archive regions. We expect the ranges in
 684   // ascending starting address order, without overlap.
 685   for (size_t i = 0; i < count; i++) {
 686     MemRegion curr_range = ranges[i];
 687     HeapWord* start_address = curr_range.start();
 688     size_t word_size = curr_range.word_size();
 689     HeapWord* last_address = curr_range.last();
 690     size_t commits = 0;
 691 
 692     guarantee(reserved.contains(start_address) && reserved.contains(last_address),
 693               "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
 694               p2i(start_address), p2i(last_address));
 695     guarantee(start_address > prev_last_addr,
 696               "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
 697               p2i(start_address), p2i(prev_last_addr));
 698     prev_last_addr = last_address;
 699 
 700     // Check for ranges that start in the same G1 region in which the previous
 701     // range ended, and adjust the start address so we don't try to allocate
 702     // the same region again. If the current range is entirely within that
 703     // region, skip it, just adjusting the recorded top.
 704     HeapRegion* start_region = _hrm.addr_to_region(start_address);


 709         start_region->set_top(last_address + 1);
 710         continue;
 711       }
 712       start_region->set_top(start_address);
 713       curr_range = MemRegion(start_address, last_address + 1);
 714       start_region = _hrm.addr_to_region(start_address);
 715     }
 716 
 717     // Perform the actual region allocation, exiting if it fails.
 718     // Then note how much new space we have allocated.
 719     if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
 720       return false;
 721     }
 722     increase_used(word_size * HeapWordSize);
 723     if (commits != 0) {
 724       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
 725                                 HeapRegion::GrainWords * HeapWordSize * commits);
 726 
 727     }
 728 
 729     // Mark each G1 region touched by the range as archive, add it to the old set,
 730     // and set the allocation context and top.
 731     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 732     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 733     prev_last_region = last_region;
 734 
 735     while (curr_region != NULL) {
 736       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 737              "Region already in use (index %u)", curr_region->hrm_index());
 738       curr_region->set_allocation_context(AllocationContext::system());
 739       curr_region->set_archive();




 740       _hr_printer.alloc(curr_region);
 741       _old_set.add(curr_region);
 742       if (curr_region != last_region) {
 743         curr_region->set_top(curr_region->end());



 744         curr_region = _hrm.next_region_in_heap(curr_region);
 745       } else {
 746         curr_region->set_top(last_address + 1);



 747         curr_region = NULL;
 748       }
 749     }
 750 
 751     // Notify mark-sweep of the archive range.
 752     G1ArchiveAllocator::set_range_archive(curr_range, true);
 753   }
 754   return true;
 755 }
 756 
 757 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
 758   assert(!is_init_completed(), "Expect to be called at JVM init time");
 759   assert(ranges != NULL, "MemRegion array NULL");
 760   assert(count != 0, "No MemRegions provided");
 761   MemRegion reserved = _hrm.reserved();
 762   HeapWord *prev_last_addr = NULL;
 763   HeapRegion* prev_last_region = NULL;
 764 
 765   // For each MemRegion, create filler objects, if needed, in the G1 regions
 766   // that contain the address range. The address range actually within the
 767   // MemRegion will not be modified. That is assumed to have been initialized
 768   // elsewhere, probably via an mmap of archived heap data.
 769   MutexLockerEx x(Heap_lock);
 770   for (size_t i = 0; i < count; i++) {
 771     HeapWord* start_address = ranges[i].start();
 772     HeapWord* last_address = ranges[i].last();


5196     _free_list_only(free_list_only),
5197     _old_set(old_set), _hrm(hrm), _total_used(0) {
5198     assert(_hrm->num_free_regions() == 0, "pre-condition");
5199     if (!free_list_only) {
5200       assert(_old_set->is_empty(), "pre-condition");
5201     }
5202   }
5203 
5204   bool doHeapRegion(HeapRegion* r) {
5205     if (r->is_empty()) {
5206       // Add free regions to the free list
5207       r->set_free();
5208       r->set_allocation_context(AllocationContext::system());
5209       _hrm->insert_into_free_list(r);
5210     } else if (!_free_list_only) {
5211 
5212       if (r->is_humongous()) {
5213         // We ignore humongous regions. We left the humongous set unchanged.
5214       } else {
5215         assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
5216         // We now consider all regions old, so register as such. Leave
5217         // archive regions set that way, however, while still adding
5218         // them to the old set.
5219         if (!r->is_archive()) {
5220           r->set_old();
5221         }
5222         _old_set->add(r);
5223       }
5224       _total_used += r->used();
5225     }
5226 
5227     return false;
5228   }
5229 
5230   size_t total_used() {
5231     return _total_used;
5232   }
5233 };
5234 
5235 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
5236   assert_at_safepoint(true /* should_be_vm_thread */);
5237 
5238   if (!free_list_only) {
5239     _eden.clear();
5240     _survivor.clear();
5241   }




 597     // first attempt (without holding the Heap_lock) here and the
 598     // follow-on attempt will be at the start of the next loop
 599     // iteration (after taking the Heap_lock).
 600     result = _allocator->attempt_allocation(word_size, context);
 601     if (result != NULL) {
 602       return result;
 603     }
 604 
 605     // Give a warning if we seem to be looping forever.
 606     if ((QueuedAllocationWarningCount > 0) &&
 607         (try_count % QueuedAllocationWarningCount == 0)) {
 608       log_warning(gc)("G1CollectedHeap::attempt_allocation_slow() "
 609                       "retries %d times", try_count);
 610     }
 611   }
 612 
 613   ShouldNotReachHere();
 614   return NULL;
 615 }
 616 
 617 void G1CollectedHeap::begin_archive_alloc_range(bool open) {
 618   assert_at_safepoint(true /* should_be_vm_thread */);
 619   if (_archive_allocator == NULL) {
 620     _archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
 621   }
 622 }
 623 
 624 bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
 625   // Allocations in archive regions cannot be of a size that would be considered
 626   // humongous even for a minimum-sized region, because G1 region sizes/boundaries
 627   // may be different at archive-restore time.
 628   return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
 629 }
 630 
 631 HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
 632   assert_at_safepoint(true /* should_be_vm_thread */);
 633   assert(_archive_allocator != NULL, "_archive_allocator not initialized");
 634   if (is_archive_alloc_too_large(word_size)) {
 635     return NULL;
 636   }
 637   return _archive_allocator->archive_mem_allocate(word_size);
 638 }
 639 
 640 void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,


 644 
 645   // Call complete_archive to do the real work, filling in the MemRegion
 646   // array with the archive regions.
 647   _archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
 648   delete _archive_allocator;
 649   _archive_allocator = NULL;
 650 }
 651 
 652 bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
 653   assert(ranges != NULL, "MemRegion array NULL");
 654   assert(count != 0, "No MemRegions provided");
 655   MemRegion reserved = _hrm.reserved();
 656   for (size_t i = 0; i < count; i++) {
 657     if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
 658       return false;
 659     }
 660   }
 661   return true;
 662 }
 663 
 664 bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
 665                                              size_t count,
 666                                              bool open) {
 667   assert(!is_init_completed(), "Expect to be called at JVM init time");
 668   assert(ranges != NULL, "MemRegion array NULL");
 669   assert(count != 0, "No MemRegions provided");
 670   MutexLockerEx x(Heap_lock);
 671 
 672   MemRegion reserved = _hrm.reserved();
 673   HeapWord* prev_last_addr = NULL;
 674   HeapRegion* prev_last_region = NULL;
 675 
 676   // Temporarily disable pretouching of heap pages. This interface is used
 677   // when mmap'ing archived heap data in, so pre-touching is wasted.
 678   FlagSetting fs(AlwaysPreTouch, false);
 679 
 680   // Enable archive object checking used by G1MarkSweep. We have to let it know
 681   // about each archive range, so that objects in those ranges aren't marked.
 682   G1ArchiveAllocator::enable_archive_object_check();
 683 
 684   // For each specified MemRegion range, allocate the corresponding G1
 685   // regions and mark them as archive regions. We expect the ranges
 686   // in ascending starting address order, without overlap.
 687   for (size_t i = 0; i < count; i++) {
 688     MemRegion curr_range = ranges[i];
 689     HeapWord* start_address = curr_range.start();
 690     size_t word_size = curr_range.word_size();
 691     HeapWord* last_address = curr_range.last();
 692     size_t commits = 0;
 693 
 694     guarantee(reserved.contains(start_address) && reserved.contains(last_address),
 695               "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
 696               p2i(start_address), p2i(last_address));
 697     guarantee(start_address > prev_last_addr,
 698               "Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
 699               p2i(start_address), p2i(prev_last_addr));
 700     prev_last_addr = last_address;
 701 
 702     // Check for ranges that start in the same G1 region in which the previous
 703     // range ended, and adjust the start address so we don't try to allocate
 704     // the same region again. If the current range is entirely within that
 705     // region, skip it, just adjusting the recorded top.
 706     HeapRegion* start_region = _hrm.addr_to_region(start_address);


 711         start_region->set_top(last_address + 1);
 712         continue;
 713       }
 714       start_region->set_top(start_address);
 715       curr_range = MemRegion(start_address, last_address + 1);
 716       start_region = _hrm.addr_to_region(start_address);
 717     }
 718 
 719     // Perform the actual region allocation, exiting if it fails.
 720     // Then note how much new space we have allocated.
 721     if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
 722       return false; 
 723     }
 724     increase_used(word_size * HeapWordSize);
 725     if (commits != 0) {
 726       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
 727                                 HeapRegion::GrainWords * HeapWordSize * commits);
 728 
 729     }
 730 
 731     // Mark each G1 region touched by the range as archive, add it to
 732     // the old set, and set the allocation context and top.
 733     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 734     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 735     prev_last_region = last_region;
 736 
 737     while (curr_region != NULL) {
 738       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 739              "Region already in use (index %u)", curr_region->hrm_index());
 740       curr_region->set_allocation_context(AllocationContext::system());
 741       if (open) {
 742         curr_region->set_open_archive();
 743       } else {
 744         curr_region->set_closed_archive();
 745       }
 746       _hr_printer.alloc(curr_region);
 747       _old_set.add(curr_region);
 748       if (curr_region != last_region) {
 749         HeapWord* top = curr_region->end();
 750         curr_region->set_top(top);
 751         curr_region->set_first_dead(top);
 752         curr_region->set_end_of_live(top);
 753         curr_region = _hrm.next_region_in_heap(curr_region);
 754       } else {
 755         HeapWord* top = last_address + 1;
 756         curr_region->set_top(top);
 757         curr_region->set_first_dead(top);
 758         curr_region->set_end_of_live(top);
 759         curr_region = NULL;
 760       }
 761     }
 762 
 763     // Notify mark-sweep of the archive
 764     G1ArchiveAllocator::set_range_archive(curr_range, open);
 765   }
 766   return true;
 767 }
 768 
 769 void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
 770   assert(!is_init_completed(), "Expect to be called at JVM init time");
 771   assert(ranges != NULL, "MemRegion array NULL");
 772   assert(count != 0, "No MemRegions provided");
 773   MemRegion reserved = _hrm.reserved();
 774   HeapWord *prev_last_addr = NULL;
 775   HeapRegion* prev_last_region = NULL;
 776 
 777   // For each MemRegion, create filler objects, if needed, in the G1 regions
 778   // that contain the address range. The address range actually within the
 779   // MemRegion will not be modified. That is assumed to have been initialized
 780   // elsewhere, probably via an mmap of archived heap data.
 781   MutexLockerEx x(Heap_lock);
 782   for (size_t i = 0; i < count; i++) {
 783     HeapWord* start_address = ranges[i].start();
 784     HeapWord* last_address = ranges[i].last();


5208     _free_list_only(free_list_only),
5209     _old_set(old_set), _hrm(hrm), _total_used(0) {
5210     assert(_hrm->num_free_regions() == 0, "pre-condition");
5211     if (!free_list_only) {
5212       assert(_old_set->is_empty(), "pre-condition");
5213     }
5214   }
5215 
5216   bool doHeapRegion(HeapRegion* r) {
5217     if (r->is_empty()) {
5218       // Add free regions to the free list
5219       r->set_free();
5220       r->set_allocation_context(AllocationContext::system());
5221       _hrm->insert_into_free_list(r);
5222     } else if (!_free_list_only) {
5223 
5224       if (r->is_humongous()) {
5225         // We ignore humongous regions. We left the humongous set unchanged.
5226       } else {
5227         assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
5228         // We now move all (non-humongous, non-old) regions to old gen, and register them as such.
5229         r->move_to_old();




5230         _old_set->add(r);
5231       }
5232       _total_used += r->used();
5233     }
5234 
5235     return false;
5236   }
5237 
5238   size_t total_used() {
5239     return _total_used;
5240   }
5241 };
5242 
5243 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
5244   assert_at_safepoint(true /* should_be_vm_thread */);
5245 
5246   if (!free_list_only) {
5247     _eden.clear();
5248     _survivor.clear();
5249   }


< prev index next >