522
523 debug_only(gch->check_for_valid_allocation_state());
524 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
525
526 // In general gc_overhead_limit_was_exceeded should be false so
527 // set it so here and reset it to true only if the gc time
528 // limit is being exceeded as checked below.
529 *gc_overhead_limit_was_exceeded = false;
530
531 HeapWord* result = NULL;
532
533 // Loop until the allocation is satisified,
534 // or unsatisfied after GC.
535 for (int try_count = 1; /* return or throw */; try_count += 1) {
536 HandleMark hm; // discard any handles allocated in each iteration
537
538 // First allocation attempt is lock-free.
539 Generation *gen0 = gch->get_gen(0);
540 assert(gen0->supports_inline_contig_alloc(),
541 "Otherwise, must do alloc within heap lock");
542 if (gen0->should_allocate(size, is_tlab)) {
543 result = gen0->par_allocate(size, is_tlab);
544 if (result != NULL) {
545 assert(gch->is_in_reserved(result), "result not in heap");
546 return result;
547 }
548 }
549 unsigned int gc_count_before; // read inside the Heap_lock locked region
550 {
551 MutexLocker ml(Heap_lock);
552 if (PrintGC && Verbose) {
553 gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
554 " attempting locked slow path allocation");
555 }
556 // Note that only large objects get a shot at being
557 // allocated in later generations.
558 bool first_only = ! should_try_older_generation_allocation(size);
559
560 result = gch->attempt_allocation(size, is_tlab, first_only);
561 if (result != NULL) {
562 assert(gch->is_in_reserved(result), "result not in heap");
632 assert(result == NULL || gch->is_in_reserved(result),
633 "result not in heap");
634 return result;
635 }
636
637 // Give a warning if we seem to be looping forever.
638 if ((QueuedAllocationWarningCount > 0) &&
639 (try_count % QueuedAllocationWarningCount == 0)) {
640 warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
641 " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
642 }
643 }
644 }
645
646 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
647 bool is_tlab) {
648 GenCollectedHeap *gch = GenCollectedHeap::heap();
649 HeapWord* result = NULL;
650 for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
651 Generation *gen = gch->get_gen(i);
652 if (gen->should_allocate(size, is_tlab)) {
653 result = gen->expand_and_allocate(size, is_tlab);
654 }
655 }
656 assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
657 return result;
658 }
659
660 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
661 bool is_tlab) {
662 GenCollectedHeap *gch = GenCollectedHeap::heap();
663 GCCauseSetter x(gch, GCCause::_allocation_failure);
664 HeapWord* result = NULL;
665
666 assert(size != 0, "Precondition violated");
667 if (GC_locker::is_active_and_needs_gc()) {
668 // GC locker is active; instead of a collection we will attempt
669 // to expand the heap, if there's room for expansion.
670 if (!gch->is_maximal_no_gc()) {
671 result = expand_heap_and_allocate(size, is_tlab);
672 }
804 return op.result();
805 }
806 loop_count++;
807 if ((QueuedAllocationWarningCount > 0) &&
808 (loop_count % QueuedAllocationWarningCount == 0)) {
809 warning("satisfy_failed_metadata_allocation() retries %d times \n\t"
810 " size=%d", loop_count, word_size);
811 }
812 } while (true); // Until a GC is done
813 }
814
815 // Return true if any of the following is true:
816 // . the allocation won't fit into the current young gen heap
817 // . gc locker is occupied (jni critical section)
818 // . heap memory is tight -- the most recent previous collection
819 // was a full collection because a partial collection (would
820 // have) failed and is likely to fail again
821 bool GenCollectorPolicy::should_try_older_generation_allocation(
822 size_t word_size) const {
823 GenCollectedHeap* gch = GenCollectedHeap::heap();
824 size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
825 return (word_size > heap_word_size(gen0_capacity))
826 || GC_locker::is_active_and_needs_gc()
827 || gch->incremental_collection_failed();
828 }
829
830
831 //
832 // MarkSweepPolicy methods
833 //
834
835 MarkSweepPolicy::MarkSweepPolicy() {
836 initialize_all();
837 }
838
839 void MarkSweepPolicy::initialize_generations() {
840 _generations = new GenerationSpecPtr[number_of_generations()];
841 if (_generations == NULL)
842 vm_exit_during_initialization("Unable to allocate gen spec");
843
844 if (UseParNewGC) {
|
522
523 debug_only(gch->check_for_valid_allocation_state());
524 assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
525
526 // In general gc_overhead_limit_was_exceeded should be false so
527 // set it so here and reset it to true only if the gc time
528 // limit is being exceeded as checked below.
529 *gc_overhead_limit_was_exceeded = false;
530
531 HeapWord* result = NULL;
532
533 // Loop until the allocation is satisified,
534 // or unsatisfied after GC.
535 for (int try_count = 1; /* return or throw */; try_count += 1) {
536 HandleMark hm; // discard any handles allocated in each iteration
537
538 // First allocation attempt is lock-free.
539 Generation *gen0 = gch->get_gen(0);
540 assert(gen0->supports_inline_contig_alloc(),
541 "Otherwise, must do alloc within heap lock");
542 if (gen0 != NULL && gen0->should_allocate(size, is_tlab)) {
543 result = gen0->par_allocate(size, is_tlab);
544 if (result != NULL) {
545 assert(gch->is_in_reserved(result), "result not in heap");
546 return result;
547 }
548 }
549 unsigned int gc_count_before; // read inside the Heap_lock locked region
550 {
551 MutexLocker ml(Heap_lock);
552 if (PrintGC && Verbose) {
553 gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
554 " attempting locked slow path allocation");
555 }
556 // Note that only large objects get a shot at being
557 // allocated in later generations.
558 bool first_only = ! should_try_older_generation_allocation(size);
559
560 result = gch->attempt_allocation(size, is_tlab, first_only);
561 if (result != NULL) {
562 assert(gch->is_in_reserved(result), "result not in heap");
632 assert(result == NULL || gch->is_in_reserved(result),
633 "result not in heap");
634 return result;
635 }
636
637 // Give a warning if we seem to be looping forever.
638 if ((QueuedAllocationWarningCount > 0) &&
639 (try_count % QueuedAllocationWarningCount == 0)) {
640 warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
641 " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
642 }
643 }
644 }
645
646 HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
647 bool is_tlab) {
648 GenCollectedHeap *gch = GenCollectedHeap::heap();
649 HeapWord* result = NULL;
650 for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
651 Generation *gen = gch->get_gen(i);
652 if (gen != NULL && gen->should_allocate(size, is_tlab)) {
653 result = gen->expand_and_allocate(size, is_tlab);
654 }
655 }
656 assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
657 return result;
658 }
659
660 HeapWord* GenCollectorPolicy::satisfy_failed_allocation(size_t size,
661 bool is_tlab) {
662 GenCollectedHeap *gch = GenCollectedHeap::heap();
663 GCCauseSetter x(gch, GCCause::_allocation_failure);
664 HeapWord* result = NULL;
665
666 assert(size != 0, "Precondition violated");
667 if (GC_locker::is_active_and_needs_gc()) {
668 // GC locker is active; instead of a collection we will attempt
669 // to expand the heap, if there's room for expansion.
670 if (!gch->is_maximal_no_gc()) {
671 result = expand_heap_and_allocate(size, is_tlab);
672 }
804 return op.result();
805 }
806 loop_count++;
807 if ((QueuedAllocationWarningCount > 0) &&
808 (loop_count % QueuedAllocationWarningCount == 0)) {
809 warning("satisfy_failed_metadata_allocation() retries %d times \n\t"
810 " size=%d", loop_count, word_size);
811 }
812 } while (true); // Until a GC is done
813 }
814
815 // Return true if any of the following is true:
816 // . the allocation won't fit into the current young gen heap
817 // . gc locker is occupied (jni critical section)
818 // . heap memory is tight -- the most recent previous collection
819 // was a full collection because a partial collection (would
820 // have) failed and is likely to fail again
821 bool GenCollectorPolicy::should_try_older_generation_allocation(
822 size_t word_size) const {
823 GenCollectedHeap* gch = GenCollectedHeap::heap();
824 Generation* gen = gch->get_gen(0);
825 guarantee(gen != NULL, "need generation");
826
827 size_t gen0_capacity = gen->capacity_before_gc();
828 return (word_size > heap_word_size(gen0_capacity))
829 || GC_locker::is_active_and_needs_gc()
830 || gch->incremental_collection_failed();
831 }
832
833
834 //
835 // MarkSweepPolicy methods
836 //
837
838 MarkSweepPolicy::MarkSweepPolicy() {
839 initialize_all();
840 }
841
842 void MarkSweepPolicy::initialize_generations() {
843 _generations = new GenerationSpecPtr[number_of_generations()];
844 if (_generations == NULL)
845 vm_exit_during_initialization("Unable to allocate gen spec");
846
847 if (UseParNewGC) {
|