index

src/share/vm/memory/collectorPolicy.cpp

Print this page
rev 8024 : imported patch event1
* * *
imported patch event2


 580 
 581   DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
 582 }
 583 
 584 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
 585                                         bool is_tlab,
 586                                         bool* gc_overhead_limit_was_exceeded) {
 587   GenCollectedHeap *gch = GenCollectedHeap::heap();
 588 
 589   debug_only(gch->check_for_valid_allocation_state());
 590   assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
 591 
 592   // In general gc_overhead_limit_was_exceeded should be false so
 593   // set it so here and reset it to true only if the gc time
 594   // limit is being exceeded as checked below.
 595   *gc_overhead_limit_was_exceeded = false;
 596 
 597   HeapWord* result = NULL;
 598 
 599   // Loop until the allocation is satisfied, or unsatisfied after GC.
 600   for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
 601     HandleMark hm; // Discard any handles allocated in each iteration.
 602 
 603     // First allocation attempt is lock-free.
 604     Generation *young = gch->young_gen();
 605     assert(young->supports_inline_contig_alloc(),
 606       "Otherwise, must do alloc within heap lock");
 607     if (young->should_allocate(size, is_tlab)) {
 608       result = young->par_allocate(size, is_tlab);
 609       if (result != NULL) {
 610         assert(gch->is_in_reserved(result), "result not in heap");
 611         return result;
 612       }
 613     }
 614     uint gc_count_before;  // Read inside the Heap_lock locked region.
 615     {
 616       MutexLocker ml(Heap_lock);
 617       if (PrintGC && Verbose) {
 618         gclog_or_tty->print_cr("GenCollectorPolicy::mem_allocate_work:"
 619                                " attempting locked slow path allocation");
 620       }


 654         JavaThread* jthr = JavaThread::current();
 655         if (!jthr->in_critical()) {
 656           MutexUnlocker mul(Heap_lock);
 657           // Wait for JNI critical section to be exited
 658           GC_locker::stall_until_clear();
 659           gclocker_stalled_count += 1;
 660           continue;
 661         } else {
 662           if (CheckJNICalls) {
 663             fatal("Possible deadlock due to allocating while"
 664                   " in jni critical section");
 665           }
 666           return NULL;
 667         }
 668       }
 669 
 670       // Read the gc count while the heap lock is held.
 671       gc_count_before = Universe::heap()->total_collections();
 672     }
 673 
 674     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
 675     VMThread::execute(&op);
 676     if (op.prologue_succeeded()) {
 677       result = op.result();
 678       if (op.gc_locked()) {
 679          assert(result == NULL, "must be NULL if gc_locked() is true");
 680          continue;  // Retry and/or stall as necessary.
 681       }
 682 
 683       // Allocation has failed and a collection
 684       // has been done.  If the gc time limit was exceeded the
 685       // this time, return NULL so that an out-of-memory
 686       // will be thrown.  Clear gc_overhead_limit_exceeded
 687       // so that the overhead exceeded does not persist.
 688 
 689       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
 690       const bool softrefs_clear = all_soft_refs_clear();
 691 
 692       if (limit_exceeded && softrefs_clear) {
 693         *gc_overhead_limit_was_exceeded = true;
 694         size_policy()->set_gc_overhead_limit_exceeded(false);


 799     return result;
 800   }
 801 
 802   assert(!should_clear_all_soft_refs(),
 803     "Flag should have been handled and cleared prior to this point");
 804 
 805   // What else?  We might try synchronous finalization later.  If the total
 806   // space available is large enough for the allocation, then a more
 807   // complete compaction phase than we've tried so far might be
 808   // appropriate.
 809   return NULL;
 810 }
 811 
 812 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
 813                                                  ClassLoaderData* loader_data,
 814                                                  size_t word_size,
 815                                                  Metaspace::MetadataType mdtype) {
 816   uint loop_count = 0;
 817   uint gc_count = 0;
 818   uint full_gc_count = 0;

 819 
 820   assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
 821 
 822   do {
 823     MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 824     if (result != NULL) {
 825       return result;
 826     }
 827 
 828     if (GC_locker::is_active_and_needs_gc()) {
 829       // If the GC_locker is active, just expand and allocate.
 830       // If that does not succeed, wait if this thread is not
 831       // in a critical section itself.
 832       result =
 833         loader_data->metaspace_non_null()->expand_and_allocate(word_size,
 834                                                                mdtype);
 835       if (result != NULL) {
 836         return result;
 837       }
 838       JavaThread* jthr = JavaThread::current();


 848         if (CheckJNICalls) {
 849           fatal("Possible deadlock due to allocating while"
 850                 " in jni critical section");
 851         }
 852         return NULL;
 853       }
 854     }
 855 
 856     {  // Need lock to get self consistent gc_count's
 857       MutexLocker ml(Heap_lock);
 858       gc_count      = Universe::heap()->total_collections();
 859       full_gc_count = Universe::heap()->total_full_collections();
 860     }
 861 
 862     // Generate a VM operation
 863     VM_CollectForMetadataAllocation op(loader_data,
 864                                        word_size,
 865                                        mdtype,
 866                                        gc_count,
 867                                        full_gc_count,
 868                                        GCCause::_metadata_GC_threshold);

 869     VMThread::execute(&op);
 870 
 871     // If GC was locked out, try again. Check before checking success because the
 872     // prologue could have succeeded and the GC still have been locked out.
 873     if (op.gc_locked()) {
 874       continue;
 875     }
 876 
 877     if (op.prologue_succeeded()) {
 878       return op.result();
 879     }
 880     loop_count++;
 881     if ((QueuedAllocationWarningCount > 0) &&
 882         (loop_count % QueuedAllocationWarningCount == 0)) {
 883       warning("satisfy_failed_metadata_allocation() retries %d times \n\t"
 884               " size=" SIZE_FORMAT, loop_count, word_size);
 885     }
 886   } while (true);  // Until a GC is done
 887 }
 888 




 580 
 581   DEBUG_ONLY(GenCollectorPolicy::assert_size_info();)
 582 }
 583 
 584 HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
 585                                         bool is_tlab,
 586                                         bool* gc_overhead_limit_was_exceeded) {
 587   GenCollectedHeap *gch = GenCollectedHeap::heap();
 588 
 589   debug_only(gch->check_for_valid_allocation_state());
 590   assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
 591 
 592   // In general gc_overhead_limit_was_exceeded should be false so
 593   // set it so here and reset it to true only if the gc time
 594   // limit is being exceeded as checked below.
 595   *gc_overhead_limit_was_exceeded = false;
 596 
 597   HeapWord* result = NULL;
 598 
 599   // Loop until the allocation is satisfied, or unsatisfied after GC.
 600   for (uint try_count = 1, gclocker_stalled_count = 0, gc_attempt = 1; /* return or throw */; try_count += 1) {
 601     HandleMark hm; // Discard any handles allocated in each iteration.
 602 
 603     // First allocation attempt is lock-free.
 604     Generation *young = gch->young_gen();
 605     assert(young->supports_inline_contig_alloc(),
 606       "Otherwise, must do alloc within heap lock");
 607     if (young->should_allocate(size, is_tlab)) {
 608       result = young->par_allocate(size, is_tlab);
 609       if (result != NULL) {
 610         assert(gch->is_in_reserved(result), "result not in heap");
 611         return result;
 612       }
 613     }
 614     uint gc_count_before;  // Read inside the Heap_lock locked region.
 615     {
 616       MutexLocker ml(Heap_lock);
 617       if (PrintGC && Verbose) {
 618         gclog_or_tty->print_cr("GenCollectorPolicy::mem_allocate_work:"
 619                                " attempting locked slow path allocation");
 620       }


 654         JavaThread* jthr = JavaThread::current();
 655         if (!jthr->in_critical()) {
 656           MutexUnlocker mul(Heap_lock);
 657           // Wait for JNI critical section to be exited
 658           GC_locker::stall_until_clear();
 659           gclocker_stalled_count += 1;
 660           continue;
 661         } else {
 662           if (CheckJNICalls) {
 663             fatal("Possible deadlock due to allocating while"
 664                   " in jni critical section");
 665           }
 666           return NULL;
 667         }
 668       }
 669 
 670       // Read the gc count while the heap lock is held.
 671       gc_count_before = Universe::heap()->total_collections();
 672     }
 673 
 674     VM_GenCollectForAllocation op(size, is_tlab, gc_count_before, gc_attempt++);
 675     VMThread::execute(&op);
 676     if (op.prologue_succeeded()) {
 677       result = op.result();
 678       if (op.gc_locked()) {
 679          assert(result == NULL, "must be NULL if gc_locked() is true");
 680          continue;  // Retry and/or stall as necessary.
 681       }
 682 
 683       // Allocation has failed and a collection
 684       // has been done.  If the gc time limit was exceeded the
 685       // this time, return NULL so that an out-of-memory
 686       // will be thrown.  Clear gc_overhead_limit_exceeded
 687       // so that the overhead exceeded does not persist.
 688 
 689       const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
 690       const bool softrefs_clear = all_soft_refs_clear();
 691 
 692       if (limit_exceeded && softrefs_clear) {
 693         *gc_overhead_limit_was_exceeded = true;
 694         size_policy()->set_gc_overhead_limit_exceeded(false);


 799     return result;
 800   }
 801 
 802   assert(!should_clear_all_soft_refs(),
 803     "Flag should have been handled and cleared prior to this point");
 804 
 805   // What else?  We might try synchronous finalization later.  If the total
 806   // space available is large enough for the allocation, then a more
 807   // complete compaction phase than we've tried so far might be
 808   // appropriate.
 809   return NULL;
 810 }
 811 
 812 MetaWord* CollectorPolicy::satisfy_failed_metadata_allocation(
 813                                                  ClassLoaderData* loader_data,
 814                                                  size_t word_size,
 815                                                  Metaspace::MetadataType mdtype) {
 816   uint loop_count = 0;
 817   uint gc_count = 0;
 818   uint full_gc_count = 0;
 819   uint gc_attempt = 1;
 820 
 821   assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
 822 
 823   do {
 824     MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 825     if (result != NULL) {
 826       return result;
 827     }
 828 
 829     if (GC_locker::is_active_and_needs_gc()) {
 830       // If the GC_locker is active, just expand and allocate.
 831       // If that does not succeed, wait if this thread is not
 832       // in a critical section itself.
 833       result =
 834         loader_data->metaspace_non_null()->expand_and_allocate(word_size,
 835                                                                mdtype);
 836       if (result != NULL) {
 837         return result;
 838       }
 839       JavaThread* jthr = JavaThread::current();


 849         if (CheckJNICalls) {
 850           fatal("Possible deadlock due to allocating while"
 851                 " in jni critical section");
 852         }
 853         return NULL;
 854       }
 855     }
 856 
 857     {  // Need lock to get self consistent gc_count's
 858       MutexLocker ml(Heap_lock);
 859       gc_count      = Universe::heap()->total_collections();
 860       full_gc_count = Universe::heap()->total_full_collections();
 861     }
 862 
 863     // Generate a VM operation
 864     VM_CollectForMetadataAllocation op(loader_data,
 865                                        word_size,
 866                                        mdtype,
 867                                        gc_count,
 868                                        full_gc_count,
 869                                        GCCause::_metadata_GC_threshold,
 870                                        gc_attempt++);
 871     VMThread::execute(&op);
 872 
 873     // If GC was locked out, try again. Check before checking success because the
 874     // prologue could have succeeded and the GC still have been locked out.
 875     if (op.gc_locked()) {
 876       continue;
 877     }
 878 
 879     if (op.prologue_succeeded()) {
 880       return op.result();
 881     }
 882     loop_count++;
 883     if ((QueuedAllocationWarningCount > 0) &&
 884         (loop_count % QueuedAllocationWarningCount == 0)) {
 885       warning("satisfy_failed_metadata_allocation() retries %d times \n\t"
 886               " size=" SIZE_FORMAT, loop_count, word_size);
 887     }
 888   } while (true);  // Until a GC is done
 889 }
 890 


index