< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 12309 : [mq]: 8169703-crash-with-alwayspretouch


 409 
 410     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 411     // are lucky enough to find some.
 412     first = _hrm.find_contiguous_only_empty(obj_regions);
 413     if (first != G1_NO_HRM_INDEX) {
 414       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 415     }
 416   }
 417 
 418   if (first == G1_NO_HRM_INDEX) {
 419     // Policy: We could not find enough regions for the humongous object in the
 420     // free list. Look through the heap to find a mix of free and uncommitted regions.
 421     // If so, try expansion.
 422     first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
 423     if (first != G1_NO_HRM_INDEX) {
 424       // We found something. Make sure these regions are committed, i.e. expand
 425       // the heap. Alternatively we could do a defragmentation GC.
 426       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
 427                                     word_size * HeapWordSize);
 428 
 429 
 430       _hrm.expand_at(first, obj_regions);
 431       g1_policy()->record_new_heap_size(num_regions());
 432 
 433 #ifdef ASSERT
 434       for (uint i = first; i < first + obj_regions; ++i) {
 435         HeapRegion* hr = region_at(i);
 436         assert(hr->is_free(), "sanity");
 437         assert(hr->is_empty(), "sanity");
 438         assert(is_on_master_free_list(hr), "sanity");
 439       }
 440 #endif
 441       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 442     } else {
 443       // Policy: Potentially trigger a defragmentation GC.
 444     }
 445   }
 446 
 447   HeapWord* result = NULL;
 448   if (first != G1_NO_HRM_INDEX) {
 449     result = humongous_obj_allocate_initialize_regions(first, obj_regions,
 450                                                        word_size, context);


 722 
 723     // Check for ranges that start in the same G1 region in which the previous
 724     // range ended, and adjust the start address so we don't try to allocate
 725     // the same region again. If the current range is entirely within that
 726     // region, skip it, just adjusting the recorded top.
 727     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 728     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
 729       start_address = start_region->end();
 730       if (start_address > last_address) {
 731         increase_used(word_size * HeapWordSize);
 732         start_region->set_top(last_address + 1);
 733         continue;
 734       }
 735       start_region->set_top(start_address);
 736       curr_range = MemRegion(start_address, last_address + 1);
 737       start_region = _hrm.addr_to_region(start_address);
 738     }
 739 
 740     // Perform the actual region allocation, exiting if it fails.
 741     // Then note how much new space we have allocated.
 742     if (!_hrm.allocate_containing_regions(curr_range, &commits)) {
 743       return false;
 744     }
 745     increase_used(word_size * HeapWordSize);
 746     if (commits != 0) {
 747       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
 748                                 HeapRegion::GrainWords * HeapWordSize * commits);
 749 
 750     }
 751 
 752     // Mark each G1 region touched by the range as archive, add it to the old set,
 753     // and set the allocation context and top.
 754     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 755     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 756     prev_last_region = last_region;
 757 
 758     while (curr_region != NULL) {
 759       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 760              "Region already in use (index %u)", curr_region->hrm_index());
 761       curr_region->set_allocation_context(AllocationContext::system());
 762       curr_region->set_archive();




 409 
 410     // Policy: Try only empty regions (i.e. already committed first). Maybe we
 411     // are lucky enough to find some.
 412     first = _hrm.find_contiguous_only_empty(obj_regions);
 413     if (first != G1_NO_HRM_INDEX) {
 414       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 415     }
 416   }
 417 
 418   if (first == G1_NO_HRM_INDEX) {
 419     // Policy: We could not find enough regions for the humongous object in the
 420     // free list. Look through the heap to find a mix of free and uncommitted regions.
 421     // If so, try expansion.
 422     first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
 423     if (first != G1_NO_HRM_INDEX) {
 424       // We found something. Make sure these regions are committed, i.e. expand
 425       // the heap. Alternatively we could do a defragmentation GC.
 426       log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
 427                                     word_size * HeapWordSize);
 428 
 429       _hrm.expand_at(first, obj_regions, workers());

 430       g1_policy()->record_new_heap_size(num_regions());
 431 
 432 #ifdef ASSERT
 433       for (uint i = first; i < first + obj_regions; ++i) {
 434         HeapRegion* hr = region_at(i);
 435         assert(hr->is_free(), "sanity");
 436         assert(hr->is_empty(), "sanity");
 437         assert(is_on_master_free_list(hr), "sanity");
 438       }
 439 #endif
 440       _hrm.allocate_free_regions_starting_at(first, obj_regions);
 441     } else {
 442       // Policy: Potentially trigger a defragmentation GC.
 443     }
 444   }
 445 
 446   HeapWord* result = NULL;
 447   if (first != G1_NO_HRM_INDEX) {
 448     result = humongous_obj_allocate_initialize_regions(first, obj_regions,
 449                                                        word_size, context);


 721 
 722     // Check for ranges that start in the same G1 region in which the previous
 723     // range ended, and adjust the start address so we don't try to allocate
 724     // the same region again. If the current range is entirely within that
 725     // region, skip it, just adjusting the recorded top.
 726     HeapRegion* start_region = _hrm.addr_to_region(start_address);
 727     if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
 728       start_address = start_region->end();
 729       if (start_address > last_address) {
 730         increase_used(word_size * HeapWordSize);
 731         start_region->set_top(last_address + 1);
 732         continue;
 733       }
 734       start_region->set_top(start_address);
 735       curr_range = MemRegion(start_address, last_address + 1);
 736       start_region = _hrm.addr_to_region(start_address);
 737     }
 738 
 739     // Perform the actual region allocation, exiting if it fails.
 740     // Then note how much new space we have allocated.
 741     if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
 742       return false;
 743     }
 744     increase_used(word_size * HeapWordSize);
 745     if (commits != 0) {
 746       log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B",
 747                                 HeapRegion::GrainWords * HeapWordSize * commits);
 748 
 749     }
 750 
 751     // Mark each G1 region touched by the range as archive, add it to the old set,
 752     // and set the allocation context and top.
 753     HeapRegion* curr_region = _hrm.addr_to_region(start_address);
 754     HeapRegion* last_region = _hrm.addr_to_region(last_address);
 755     prev_last_region = last_region;
 756 
 757     while (curr_region != NULL) {
 758       assert(curr_region->is_empty() && !curr_region->is_pinned(),
 759              "Region already in use (index %u)", curr_region->hrm_index());
 760       curr_region->set_allocation_context(AllocationContext::system());
 761       curr_region->set_archive();


< prev index next >