367 // Policy: Potentially trigger a defragmentation GC.
368 }
369 }
370
371 HeapWord* result = NULL;
372 if (first != G1_NO_HRM_INDEX) {
373 result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
374 assert(result != NULL, "it should always return a valid result");
375
376 // A successful humongous object allocation changes the used space
377 // information of the old generation so we need to recalculate the
378 // sizes and update the jstat counters here.
379 g1mm()->update_sizes();
380 }
381
382 _verifier->verify_region_sets_optional();
383
384 return result;
385 }
386
387 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
388 assert_heap_not_locked_and_not_at_safepoint();
389 assert(!is_humongous(word_size), "we do not allow humongous TLABs");
390
391 return attempt_allocation(word_size);
392 }
393
394 HeapWord*
395 G1CollectedHeap::mem_allocate(size_t word_size,
396 bool* gc_overhead_limit_was_exceeded) {
397 assert_heap_not_locked_and_not_at_safepoint();
398
399 if (is_humongous(word_size)) {
400 return attempt_allocation_humongous(word_size);
401 }
402 return attempt_allocation(word_size);
403 }
404
405 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
406 ResourceMark rm; // For retrieving the thread names in log messages.
407
408 // Make sure you read the note in attempt_allocation_humongous().
409
410 assert_heap_not_locked_and_not_at_safepoint();
411 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
412 "be called for humongous allocation requests");
413
414 // We should only get here after the first-level allocation attempt
415 // (attempt_allocation()) failed to allocate.
416
417 // We will loop until a) we manage to successfully perform the
418 // allocation or b) we successfully schedule a collection which
419 // fails to perform the allocation. b) is the only case when we'll
420 // return NULL.
421 HeapWord* result = NULL;
422 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
475 log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
476 SIZE_FORMAT " words", Thread::current()->name(), word_size);
477 return NULL;
478 }
479 log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
480 // The GCLocker is either active or the GCLocker initiated
481 // GC has not yet been performed. Stall until it is and
482 // then retry the allocation.
483 GCLocker::stall_until_clear();
484 gclocker_retry_count += 1;
485 }
486
487 // We can reach here if we were unsuccessful in scheduling a
488 // collection (because another thread beat us to it) or if we were
489 // stalled due to the GC locker. In either can we should retry the
490 // allocation attempt in case another thread successfully
491 // performed a collection and reclaimed enough space. We do the
492 // first attempt (without holding the Heap_lock) here and the
493 // follow-on attempt will be at the start of the next loop
494 // iteration (after taking the Heap_lock).
495
496 result = _allocator->attempt_allocation(word_size);
497 if (result != NULL) {
498 return result;
499 }
500
501 // Give a warning if we seem to be looping forever.
502 if ((QueuedAllocationWarningCount > 0) &&
503 (try_count % QueuedAllocationWarningCount == 0)) {
504 log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
505 Thread::current()->name(), try_count, word_size);
506 }
507 }
508
509 ShouldNotReachHere();
510 return NULL;
511 }
512
513 void G1CollectedHeap::begin_archive_alloc_range(bool open) {
514 assert_at_safepoint_on_vm_thread();
515 if (_archive_allocator == NULL) {
516 _archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
705 curr_region = _hrm.next_region_in_heap(curr_region);
706 } else {
707 curr_region = NULL;
708 }
709 }
710
711 prev_last_addr = last_address;
712 prev_last_region = last_region;
713
714 // Fill the memory below the allocated range with dummy object(s),
715 // if the region bottom does not match the range start, or if the previous
716 // range ended within the same G1 region, and there is a gap.
717 if (start_address != bottom_address) {
718 size_t fill_size = pointer_delta(start_address, bottom_address);
719 G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
720 increase_used(fill_size * HeapWordSize);
721 }
722 }
723 }
724
725 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size) {
726 assert_heap_not_locked_and_not_at_safepoint();
727 assert(!is_humongous(word_size), "attempt_allocation() should not "
728 "be called for humongous allocation requests");
729
730 HeapWord* result = _allocator->attempt_allocation(word_size);
731
732 if (result == NULL) {
733 result = attempt_allocation_slow(word_size);
734 }
735 assert_heap_not_locked();
736 if (result != NULL) {
737 dirty_young_block(result, word_size);
738 }
739 return result;
740 }
741
742 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
743 assert(!is_init_completed(), "Expect to be called at JVM init time");
744 assert(ranges != NULL, "MemRegion array NULL");
745 assert(count != 0, "No MemRegions provided");
746 MemRegion reserved = _hrm.reserved();
747 HeapWord* prev_last_addr = NULL;
748 HeapRegion* prev_last_region = NULL;
749 size_t size_used = 0;
750 size_t uncommitted_regions = 0;
751
752 // For each Memregion, free the G1 regions that constitute it, and
753 // notify mark-sweep that the range is no longer to be considered 'archive.'
754 MutexLockerEx x(Heap_lock);
755 for (size_t i = 0; i < count; i++) {
756 HeapWord* start_address = ranges[i].start();
757 HeapWord* last_address = ranges[i].last();
|
367 // Policy: Potentially trigger a defragmentation GC.
368 }
369 }
370
371 HeapWord* result = NULL;
372 if (first != G1_NO_HRM_INDEX) {
373 result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size);
374 assert(result != NULL, "it should always return a valid result");
375
376 // A successful humongous object allocation changes the used space
377 // information of the old generation so we need to recalculate the
378 // sizes and update the jstat counters here.
379 g1mm()->update_sizes();
380 }
381
382 _verifier->verify_region_sets_optional();
383
384 return result;
385 }
386
387 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t min_word_size,
388 size_t desired_word_size,
389 size_t* actual_word_size) {
390 assert_heap_not_locked_and_not_at_safepoint();
391 assert(!is_humongous(desired_word_size), "we do not allow humongous TLABs");
392
393 return attempt_allocation(min_word_size, desired_word_size, actual_word_size);
394 }
395
396 HeapWord*
397 G1CollectedHeap::mem_allocate(size_t word_size,
398 bool* gc_overhead_limit_was_exceeded) {
399 assert_heap_not_locked_and_not_at_safepoint();
400
401 if (is_humongous(word_size)) {
402 return attempt_allocation_humongous(word_size);
403 }
404 size_t dummy = 0;
405 return attempt_allocation(word_size, word_size, &dummy);
406 }
407
408 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
409 ResourceMark rm; // For retrieving the thread names in log messages.
410
411 // Make sure you read the note in attempt_allocation_humongous().
412
413 assert_heap_not_locked_and_not_at_safepoint();
414 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
415 "be called for humongous allocation requests");
416
417 // We should only get here after the first-level allocation attempt
418 // (attempt_allocation()) failed to allocate.
419
420 // We will loop until a) we manage to successfully perform the
421 // allocation or b) we successfully schedule a collection which
422 // fails to perform the allocation. b) is the only case when we'll
423 // return NULL.
424 HeapWord* result = NULL;
425 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
478 log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
479 SIZE_FORMAT " words", Thread::current()->name(), word_size);
480 return NULL;
481 }
482 log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
483 // The GCLocker is either active or the GCLocker initiated
484 // GC has not yet been performed. Stall until it is and
485 // then retry the allocation.
486 GCLocker::stall_until_clear();
487 gclocker_retry_count += 1;
488 }
489
490 // We can reach here if we were unsuccessful in scheduling a
491 // collection (because another thread beat us to it) or if we were
492 // stalled due to the GC locker. In either can we should retry the
493 // allocation attempt in case another thread successfully
494 // performed a collection and reclaimed enough space. We do the
495 // first attempt (without holding the Heap_lock) here and the
496 // follow-on attempt will be at the start of the next loop
497 // iteration (after taking the Heap_lock).
498 size_t dummy = 0;
499 result = _allocator->attempt_allocation(word_size, word_size, &dummy);
500 if (result != NULL) {
501 return result;
502 }
503
504 // Give a warning if we seem to be looping forever.
505 if ((QueuedAllocationWarningCount > 0) &&
506 (try_count % QueuedAllocationWarningCount == 0)) {
507 log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
508 Thread::current()->name(), try_count, word_size);
509 }
510 }
511
512 ShouldNotReachHere();
513 return NULL;
514 }
515
516 void G1CollectedHeap::begin_archive_alloc_range(bool open) {
517 assert_at_safepoint_on_vm_thread();
518 if (_archive_allocator == NULL) {
519 _archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
708 curr_region = _hrm.next_region_in_heap(curr_region);
709 } else {
710 curr_region = NULL;
711 }
712 }
713
714 prev_last_addr = last_address;
715 prev_last_region = last_region;
716
717 // Fill the memory below the allocated range with dummy object(s),
718 // if the region bottom does not match the range start, or if the previous
719 // range ended within the same G1 region, and there is a gap.
720 if (start_address != bottom_address) {
721 size_t fill_size = pointer_delta(start_address, bottom_address);
722 G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
723 increase_used(fill_size * HeapWordSize);
724 }
725 }
726 }
727
728 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
729 size_t desired_word_size,
730 size_t* actual_word_size) {
731 assert_heap_not_locked_and_not_at_safepoint();
732 assert(!is_humongous(desired_word_size), "attempt_allocation() should not "
733 "be called for humongous allocation requests");
734
735 HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
736
737 if (result == NULL) {
738 *actual_word_size = desired_word_size;
739 result = attempt_allocation_slow(desired_word_size);
740 }
741
742 assert_heap_not_locked();
743 if (result != NULL) {
744 assert(*actual_word_size != 0, "Actual size must have been set here");
745 dirty_young_block(result, *actual_word_size);
746 }
747 return result;
748 }
749
750 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
751 assert(!is_init_completed(), "Expect to be called at JVM init time");
752 assert(ranges != NULL, "MemRegion array NULL");
753 assert(count != 0, "No MemRegions provided");
754 MemRegion reserved = _hrm.reserved();
755 HeapWord* prev_last_addr = NULL;
756 HeapRegion* prev_last_region = NULL;
757 size_t size_used = 0;
758 size_t uncommitted_regions = 0;
759
760 // For each Memregion, free the G1 regions that constitute it, and
761 // notify mark-sweep that the range is no longer to be considered 'archive.'
762 MutexLockerEx x(Heap_lock);
763 for (size_t i = 0; i < count; i++) {
764 HeapWord* start_address = ranges[i].start();
765 HeapWord* last_address = ranges[i].last();
|