1 /*
2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
428 if (first != G1_NO_HRM_INDEX) {
429 result = humongous_obj_allocate_initialize_regions(first, obj_regions,
430 word_size, context);
431 assert(result != NULL, "it should always return a valid result");
432
433 // A successful humongous object allocation changes the used space
434 // information of the old generation so we need to recalculate the
435 // sizes and update the jstat counters here.
436 g1mm()->update_sizes();
437 }
438
439 _verifier->verify_region_sets_optional();
440
441 return result;
442 }
443
444 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
445 assert_heap_not_locked_and_not_at_safepoint();
446 assert(!is_humongous(word_size), "we do not allow humongous TLABs");
447
448 uint dummy_gc_count_before;
449 uint dummy_gclocker_retry_count = 0;
450 return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
451 }
452
453 HeapWord*
454 G1CollectedHeap::mem_allocate(size_t word_size,
455 bool* gc_overhead_limit_was_exceeded) {
456 assert_heap_not_locked_and_not_at_safepoint();
457
458 // Loop until the allocation is satisfied, or unsatisfied after GC.
459 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
460 uint gc_count_before;
461
462 HeapWord* result = NULL;
463 if (!is_humongous(word_size)) {
464 result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
465 } else {
466 result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
467 }
468 if (result != NULL) {
469 return result;
470 }
471
472 // Create the garbage collection operation...
473 VM_G1CollectForAllocation op(gc_count_before, word_size);
474 op.set_allocation_context(AllocationContext::current());
475
476 // ...and get the VM thread to execute it.
477 VMThread::execute(&op);
478
479 if (op.prologue_succeeded() && op.pause_succeeded()) {
480 // If the operation was successful we'll return the result even
481 // if it is NULL. If the allocation attempt failed immediately
482 // after a Full GC, it's unlikely we'll be able to allocate now.
483 HeapWord* result = op.result();
484 if (result != NULL && !is_humongous(word_size)) {
485 // Allocations that take place on VM operations do not do any
486 // card dirtying and we have to do it here. We only have to do
487 // this for non-humongous allocations, though.
488 dirty_young_block(result, word_size);
489 }
490 return result;
491 } else {
492 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
493 return NULL;
494 }
495 assert(op.result() == NULL,
496 "the result should be NULL if the VM op did not succeed");
497 }
498
499 // Give a warning if we seem to be looping forever.
500 if ((QueuedAllocationWarningCount > 0) &&
501 (try_count % QueuedAllocationWarningCount == 0)) {
502 log_warning(gc)("G1CollectedHeap::mem_allocate retries %d times", try_count);
503 }
504 }
505
506 ShouldNotReachHere();
507 return NULL;
508 }
509
510 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
511 AllocationContext_t context,
512 uint* gc_count_before_ret,
513 uint* gclocker_retry_count_ret) {
514 // Make sure you read the note in attempt_allocation_humongous().
515
516 assert_heap_not_locked_and_not_at_safepoint();
517 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
518 "be called for humongous allocation requests");
519
520 // We should only get here after the first-level allocation attempt
521 // (attempt_allocation()) failed to allocate.
522
523 // We will loop until a) we manage to successfully perform the
524 // allocation or b) we successfully schedule a collection which
525 // fails to perform the allocation. b) is the only case when we'll
526 // return NULL.
527 HeapWord* result = NULL;
528 for (int try_count = 1; /* we'll return */; try_count += 1) {
529 bool should_try_gc;
530 uint gc_count_before;
531
532 {
533 MutexLockerEx x(Heap_lock);
534 result = _allocator->attempt_allocation_locked(word_size, context);
535 if (result != NULL) {
536 return result;
537 }
538
539 if (GCLocker::is_active_and_needs_gc()) {
540 if (g1_policy()->can_expand_young_list()) {
541 // No need for an ergo verbose message here,
542 // can_expand_young_list() does this when it returns true.
543 result = _allocator->attempt_allocation_force(word_size, context);
544 if (result != NULL) {
545 return result;
546 }
547 }
548 should_try_gc = false;
549 } else {
550 // The GCLocker may not be active but the GCLocker initiated
551 // GC may not yet have been performed (GCLocker::needs_gc()
552 // returns true). In this case we do not try this GC and
553 // wait until the GCLocker initiated GC is performed, and
554 // then retry the allocation.
555 if (GCLocker::needs_gc()) {
556 should_try_gc = false;
557 } else {
558 // Read the GC count while still holding the Heap_lock.
559 gc_count_before = total_collections();
560 should_try_gc = true;
561 }
562 }
563 }
564
565 if (should_try_gc) {
566 bool succeeded;
567 result = do_collection_pause(word_size, gc_count_before, &succeeded,
568 GCCause::_g1_inc_collection_pause);
569 if (result != NULL) {
570 assert(succeeded, "only way to get back a non-NULL result");
571 return result;
572 }
573
574 if (succeeded) {
575 // If we get here we successfully scheduled a collection which
576 // failed to allocate. No point in trying to allocate
577 // further. We'll just return NULL.
578 MutexLockerEx x(Heap_lock);
579 *gc_count_before_ret = total_collections();
580 return NULL;
581 }
582 } else {
583 if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
584 MutexLockerEx x(Heap_lock);
585 *gc_count_before_ret = total_collections();
586 return NULL;
587 }
588 // The GCLocker is either active or the GCLocker initiated
589 // GC has not yet been performed. Stall until it is and
590 // then retry the allocation.
591 GCLocker::stall_until_clear();
592 (*gclocker_retry_count_ret) += 1;
593 }
594
595 // We can reach here if we were unsuccessful in scheduling a
596 // collection (because another thread beat us to it) or if we were
597 // stalled due to the GC locker. In either can we should retry the
598 // allocation attempt in case another thread successfully
599 // performed a collection and reclaimed enough space. We do the
600 // first attempt (without holding the Heap_lock) here and the
601 // follow-on attempt will be at the start of the next loop
602 // iteration (after taking the Heap_lock).
603 result = _allocator->attempt_allocation(word_size, context);
604 if (result != NULL) {
605 return result;
606 }
607
608 // Give a warning if we seem to be looping forever.
609 if ((QueuedAllocationWarningCount > 0) &&
610 (try_count % QueuedAllocationWarningCount == 0)) {
611 log_warning(gc)("G1CollectedHeap::attempt_allocation_slow() "
612 "retries %d times", try_count);
613 }
614 }
615
616 ShouldNotReachHere();
617 return NULL;
618 }
619
620 void G1CollectedHeap::begin_archive_alloc_range(bool open) {
621 assert_at_safepoint(true /* should_be_vm_thread */);
622 if (_archive_allocator == NULL) {
623 _archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
624 }
625 }
626
627 bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
628 // Allocations in archive regions cannot be of a size that would be considered
629 // humongous even for a minimum-sized region, because G1 region sizes/boundaries
630 // may be different at archive-restore time.
631 return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
632 }
813 curr_region = _hrm.next_region_in_heap(curr_region);
814 } else {
815 curr_region = NULL;
816 }
817 }
818
819 prev_last_addr = last_address;
820 prev_last_region = last_region;
821
822 // Fill the memory below the allocated range with dummy object(s),
823 // if the region bottom does not match the range start, or if the previous
824 // range ended within the same G1 region, and there is a gap.
825 if (start_address != bottom_address) {
826 size_t fill_size = pointer_delta(start_address, bottom_address);
827 G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
828 increase_used(fill_size * HeapWordSize);
829 }
830 }
831 }
832
833 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
834 uint* gc_count_before_ret,
835 uint* gclocker_retry_count_ret) {
836 assert_heap_not_locked_and_not_at_safepoint();
837 assert(!is_humongous(word_size), "attempt_allocation() should not "
838 "be called for humongous allocation requests");
839
840 AllocationContext_t context = AllocationContext::current();
841 HeapWord* result = _allocator->attempt_allocation(word_size, context);
842
843 if (result == NULL) {
844 result = attempt_allocation_slow(word_size,
845 context,
846 gc_count_before_ret,
847 gclocker_retry_count_ret);
848 }
849 assert_heap_not_locked();
850 if (result != NULL) {
851 dirty_young_block(result, word_size);
852 }
853 return result;
854 }
855
856 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
857 assert(!is_init_completed(), "Expect to be called at JVM init time");
858 assert(ranges != NULL, "MemRegion array NULL");
859 assert(count != 0, "No MemRegions provided");
860 MemRegion reserved = _hrm.reserved();
861 HeapWord* prev_last_addr = NULL;
862 HeapRegion* prev_last_region = NULL;
863 size_t size_used = 0;
864 size_t uncommitted_regions = 0;
865
866 // For each Memregion, free the G1 regions that constitute it, and
867 // notify mark-sweep that the range is no longer to be considered 'archive.'
908 if (curr_region != last_region) {
909 curr_region = _hrm.next_region_in_heap(curr_region);
910 } else {
911 curr_region = NULL;
912 }
913 _hrm.shrink_at(curr_index, 1);
914 uncommitted_regions++;
915 }
916
917 // Notify mark-sweep that this is no longer an archive range.
918 G1ArchiveAllocator::set_range_archive(ranges[i], false);
919 }
920
921 if (uncommitted_regions != 0) {
922 log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
923 HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
924 }
925 decrease_used(size_used);
926 }
927
928 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
929 uint* gc_count_before_ret,
930 uint* gclocker_retry_count_ret) {
931 // The structure of this method has a lot of similarities to
932 // attempt_allocation_slow(). The reason these two were not merged
933 // into a single one is that such a method would require several "if
934 // allocation is not humongous do this, otherwise do that"
935 // conditional paths which would obscure its flow. In fact, an early
936 // version of this code did use a unified method which was harder to
937 // follow and, as a result, it had subtle bugs that were hard to
938 // track down. So keeping these two methods separate allows each to
939 // be more readable. It will be good to keep these two in sync as
940 // much as possible.
941
942 assert_heap_not_locked_and_not_at_safepoint();
943 assert(is_humongous(word_size), "attempt_allocation_humongous() "
944 "should only be called for humongous allocations");
945
946 // Humongous objects can exhaust the heap quickly, so we should check if we
947 // need to start a marking cycle at each humongous object allocation. We do
948 // the check before we do the actual allocation. The reason for doing it
949 // before the allocation is that we avoid having to keep track of the newly
950 // allocated memory while we do a GC.
951 if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
952 word_size)) {
953 collect(GCCause::_g1_humongous_allocation);
954 }
955
956 // We will loop until a) we manage to successfully perform the
957 // allocation or b) we successfully schedule a collection which
958 // fails to perform the allocation. b) is the only case when we'll
959 // return NULL.
960 HeapWord* result = NULL;
961 for (int try_count = 1; /* we'll return */; try_count += 1) {
962 bool should_try_gc;
963 uint gc_count_before;
964
965 {
966 MutexLockerEx x(Heap_lock);
967
968 // Given that humongous objects are not allocated in young
969 // regions, we'll first try to do the allocation without doing a
970 // collection hoping that there's enough space in the heap.
971 result = humongous_obj_allocate(word_size, AllocationContext::current());
972 if (result != NULL) {
973 size_t size_in_regions = humongous_obj_size_in_regions(word_size);
974 g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
975 return result;
976 }
977
978 if (GCLocker::is_active_and_needs_gc()) {
979 should_try_gc = false;
980 } else {
981 // The GCLocker may not be active but the GCLocker initiated
982 // GC may not yet have been performed (GCLocker::needs_gc()
983 // returns true). In this case we do not try this GC and
984 // wait until the GCLocker initiated GC is performed, and
985 // then retry the allocation.
986 if (GCLocker::needs_gc()) {
987 should_try_gc = false;
988 } else {
989 // Read the GC count while still holding the Heap_lock.
990 gc_count_before = total_collections();
991 should_try_gc = true;
992 }
993 }
994 }
995
996 if (should_try_gc) {
997 // If we failed to allocate the humongous object, we should try to
998 // do a collection pause (if we're allowed) in case it reclaims
999 // enough space for the allocation to succeed after the pause.
1000
1001 bool succeeded;
1002 result = do_collection_pause(word_size, gc_count_before, &succeeded,
1003 GCCause::_g1_humongous_allocation);
1004 if (result != NULL) {
1005 assert(succeeded, "only way to get back a non-NULL result");
1006 return result;
1007 }
1008
1009 if (succeeded) {
1010 // If we get here we successfully scheduled a collection which
1011 // failed to allocate. No point in trying to allocate
1012 // further. We'll just return NULL.
1013 MutexLockerEx x(Heap_lock);
1014 *gc_count_before_ret = total_collections();
1015 return NULL;
1016 }
1017 } else {
1018 if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1019 MutexLockerEx x(Heap_lock);
1020 *gc_count_before_ret = total_collections();
1021 return NULL;
1022 }
1023 // The GCLocker is either active or the GCLocker initiated
1024 // GC has not yet been performed. Stall until it is and
1025 // then retry the allocation.
1026 GCLocker::stall_until_clear();
1027 (*gclocker_retry_count_ret) += 1;
1028 }
1029
1030 // We can reach here if we were unsuccessful in scheduling a
1031 // collection (because another thread beat us to it) or if we were
1032 // stalled due to the GC locker. In either can we should retry the
1033 // allocation attempt in case another thread successfully
1034 // performed a collection and reclaimed enough space. Give a
1035 // warning if we seem to be looping forever.
1036
1037 if ((QueuedAllocationWarningCount > 0) &&
1038 (try_count % QueuedAllocationWarningCount == 0)) {
1039 log_warning(gc)("G1CollectedHeap::attempt_allocation_humongous() "
1040 "retries %d times", try_count);
1041 }
1042 }
1043
1044 ShouldNotReachHere();
1045 return NULL;
1046 }
1047
1048 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1049 AllocationContext_t context,
1050 bool expect_null_mutator_alloc_region) {
1051 assert_at_safepoint(true /* should_be_vm_thread */);
1052 assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
1053 "the current alloc region was unexpectedly found to be non-NULL");
1054
1055 if (!is_humongous(word_size)) {
1056 return _allocator->attempt_allocation_locked(word_size, context);
1057 } else {
1058 HeapWord* result = humongous_obj_allocate(word_size, context);
1059 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1060 collector_state()->set_initiate_conc_mark_if_possible(true);
1322 "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1323 capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
1324
1325 shrink(shrink_bytes);
1326 }
1327 }
1328
1329 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1330 AllocationContext_t context,
1331 bool do_gc,
1332 bool clear_all_soft_refs,
1333 bool expect_null_mutator_alloc_region,
1334 bool* gc_succeeded) {
1335 *gc_succeeded = true;
1336 // Let's attempt the allocation first.
1337 HeapWord* result =
1338 attempt_allocation_at_safepoint(word_size,
1339 context,
1340 expect_null_mutator_alloc_region);
1341 if (result != NULL) {
1342 assert(*gc_succeeded, "sanity");
1343 return result;
1344 }
1345
1346 // In a G1 heap, we're supposed to keep allocation from failing by
1347 // incremental pauses. Therefore, at least for now, we'll favor
1348 // expansion over collection. (This might change in the future if we can
1349 // do something smarter than full collection to satisfy a failed alloc.)
1350 result = expand_and_allocate(word_size, context);
1351 if (result != NULL) {
1352 assert(*gc_succeeded, "sanity");
1353 return result;
1354 }
1355
1356 if (do_gc) {
1357 // Expansion didn't work, we'll try to do a Full GC.
1358 *gc_succeeded = do_full_collection(false, /* explicit_gc */
1359 clear_all_soft_refs);
1360 }
1361
1362 return NULL;
1363 }
1364
1365 HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1366 AllocationContext_t context,
1367 bool* succeeded) {
1368 assert_at_safepoint(true /* should_be_vm_thread */);
1369
1370 // Attempts to allocate followed by Full GC.
1371 HeapWord* result =
1372 satisfy_failed_allocation_helper(word_size,
1384 result = satisfy_failed_allocation_helper(word_size,
1385 context,
1386 true, /* do_gc */
1387 true, /* clear_all_soft_refs */
1388 true, /* expect_null_mutator_alloc_region */
1389 succeeded);
1390
1391 if (result != NULL || !*succeeded) {
1392 return result;
1393 }
1394
1395 // Attempts to allocate, no GC
1396 result = satisfy_failed_allocation_helper(word_size,
1397 context,
1398 false, /* do_gc */
1399 false, /* clear_all_soft_refs */
1400 true, /* expect_null_mutator_alloc_region */
1401 succeeded);
1402
1403 if (result != NULL) {
1404 assert(*succeeded, "sanity");
1405 return result;
1406 }
1407
1408 assert(!collector_policy()->should_clear_all_soft_refs(),
1409 "Flag should have been handled and cleared prior to this point");
1410
1411 // What else? We might try synchronous finalization later. If the total
1412 // space available is large enough for the allocation, then a more
1413 // complete compaction phase than we've tried so far might be
1414 // appropriate.
1415 assert(*succeeded, "sanity");
1416 return NULL;
1417 }
1418
1419 // Attempting to expand the heap sufficiently
1420 // to support an allocation of the given "word_size". If
1421 // successful, perform the allocation and return the address of the
1422 // allocated block, or else "NULL".
1423
1424 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1425 assert_at_safepoint(true /* should_be_vm_thread */);
1426
1427 _verifier->verify_region_sets_optional();
1428
1429 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1430 log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1431 word_size * HeapWordSize);
1432
1433
1434 if (expand(expand_bytes, _workers)) {
1435 _hrm.verify_optional();
2130 assert(!concurrent ||
2131 (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2132 "for outer caller (concurrent cycle): "
2133 "_old_marking_cycles_started = %u "
2134 "is inconsistent with _old_marking_cycles_completed = %u",
2135 _old_marking_cycles_started, _old_marking_cycles_completed);
2136
2137 _old_marking_cycles_completed += 1;
2138
2139 // We need to clear the "in_progress" flag in the CM thread before
2140 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2141 // is set) so that if a waiter requests another System.gc() it doesn't
2142 // incorrectly see that a marking cycle is still in progress.
2143 if (concurrent) {
2144 _cmThread->set_idle();
2145 }
2146
2147 // This notify_all() will ensure that a thread that called
2148 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2149 // and it's waiting for a full GC to finish will be woken up. It is
2150 // waiting in VM_G1IncCollectionPause::doit_epilogue().
2151 FullGCCount_lock->notify_all();
2152 }
2153
2154 void G1CollectedHeap::collect(GCCause::Cause cause) {
2155 assert_heap_not_locked();
2156
2157 uint gc_count_before;
2158 uint old_marking_count_before;
2159 uint full_gc_count_before;
2160 bool retry_gc;
2161
2162 do {
2163 retry_gc = false;
2164
2165 {
2166 MutexLocker ml(Heap_lock);
2167
2168 // Read the GC count while holding the Heap_lock
2169 gc_count_before = total_collections();
2170 full_gc_count_before = total_full_collections();
2171 old_marking_count_before = _old_marking_cycles_started;
2172 }
2173
2174 if (should_do_concurrent_full_gc(cause)) {
2175 // Schedule an initial-mark evacuation pause that will start a
2176 // concurrent cycle. We're setting word_size to 0 which means that
2177 // we are not requesting a post-GC allocation.
2178 VM_G1IncCollectionPause op(gc_count_before,
2179 0, /* word_size */
2180 true, /* should_initiate_conc_mark */
2181 g1_policy()->max_pause_time_ms(),
2182 cause);
2183 op.set_allocation_context(AllocationContext::current());
2184
2185 VMThread::execute(&op);
2186 if (!op.pause_succeeded()) {
2187 if (old_marking_count_before == _old_marking_cycles_started) {
2188 retry_gc = op.should_retry_gc();
2189 } else {
2190 // A Full GC happened while we were trying to schedule the
2191 // initial-mark GC. No point in starting a new cycle given
2192 // that the whole heap was collected anyway.
2193 }
2194
2195 if (retry_gc) {
2196 if (GCLocker::is_active_and_needs_gc()) {
2197 GCLocker::stall_until_clear();
2198 }
2199 }
2200 }
2201 } else {
2202 if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2203 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2204
2205 // Schedule a standard evacuation pause. We're setting word_size
2206 // to 0 which means that we are not requesting a post-GC allocation.
2207 VM_G1IncCollectionPause op(gc_count_before,
2208 0, /* word_size */
2209 false, /* should_initiate_conc_mark */
2210 g1_policy()->max_pause_time_ms(),
2211 cause);
2212 VMThread::execute(&op);
2213 } else {
2214 // Schedule a Full GC.
2215 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2216 VMThread::execute(&op);
2217 }
2218 }
2219 } while (retry_gc);
2220 }
2221
2222 bool G1CollectedHeap::is_in(const void* p) const {
2223 if (_hrm.reserved().contains(p)) {
2224 // Given that we know that p is in the reserved space,
2225 // heap_region_containing() should successfully
2226 // return the containing region.
2227 HeapRegion* hr = heap_region_containing(p);
2228 return hr->is_in(p);
2229 } else {
2230 return false;
2231 }
2602 #endif
2603 // always_do_update_barrier = true;
2604
2605 double start = os::elapsedTime();
2606 resize_all_tlabs();
2607 g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2608
2609 allocation_context_stats().update(full);
2610
2611 MemoryService::track_memory_usage();
2612 // We have just completed a GC. Update the soft reference
2613 // policy with the new heap occupancy
2614 Universe::update_heap_info_at_gc();
2615 }
2616
2617 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2618 uint gc_count_before,
2619 bool* succeeded,
2620 GCCause::Cause gc_cause) {
2621 assert_heap_not_locked_and_not_at_safepoint();
2622 VM_G1IncCollectionPause op(gc_count_before,
2623 word_size,
2624 false, /* should_initiate_conc_mark */
2625 g1_policy()->max_pause_time_ms(),
2626 gc_cause);
2627
2628 op.set_allocation_context(AllocationContext::current());
2629 VMThread::execute(&op);
2630
2631 HeapWord* result = op.result();
2632 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2633 assert(result == NULL || ret_succeeded,
2634 "the result should be NULL if the VM did not succeed");
2635 *succeeded = ret_succeeded;
2636
2637 assert_heap_not_locked();
2638 return result;
2639 }
2640
2641 void
2642 G1CollectedHeap::doConcurrentMark() {
2643 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2644 if (!_cmThread->in_progress()) {
2645 _cmThread->set_started();
2646 CGC_lock->notify();
2647 }
2648 }
|
1 /*
2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
428 if (first != G1_NO_HRM_INDEX) {
429 result = humongous_obj_allocate_initialize_regions(first, obj_regions,
430 word_size, context);
431 assert(result != NULL, "it should always return a valid result");
432
433 // A successful humongous object allocation changes the used space
434 // information of the old generation so we need to recalculate the
435 // sizes and update the jstat counters here.
436 g1mm()->update_sizes();
437 }
438
439 _verifier->verify_region_sets_optional();
440
441 return result;
442 }
443
444 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
445 assert_heap_not_locked_and_not_at_safepoint();
446 assert(!is_humongous(word_size), "we do not allow humongous TLABs");
447
448 return attempt_allocation(word_size);
449 }
450
451 HeapWord*
452 G1CollectedHeap::mem_allocate(size_t word_size,
453 bool* gc_overhead_limit_was_exceeded) {
454 assert_heap_not_locked_and_not_at_safepoint();
455
456 if (is_humongous(word_size)) {
457 return attempt_allocation_humongous(word_size);
458 }
459 return attempt_allocation(word_size);
460 }
461
462 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
463 AllocationContext_t context) {
464 ResourceMark rm; // For retrieving the thread names in log messages.
465
466 // Make sure you read the note in attempt_allocation_humongous().
467
468 assert_heap_not_locked_and_not_at_safepoint();
469 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
470 "be called for humongous allocation requests");
471
472 // We should only get here after the first-level allocation attempt
473 // (attempt_allocation()) failed to allocate.
474
475 // We will loop until a) we manage to successfully perform the
476 // allocation or b) we successfully schedule a collection which
477 // fails to perform the allocation. b) is the only case when we'll
478 // return NULL.
479 HeapWord* result = NULL;
480 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
481 bool should_try_gc;
482 uint gc_count_before;
483
484 {
485 MutexLockerEx x(Heap_lock);
486 result = _allocator->attempt_allocation_locked(word_size, context);
487 if (result != NULL) {
488 return result;
489 }
490
491 // If the GCLocker is active and we are bound for a GC, try expanding young gen.
492 // This is different to when only GCLocker::needs_gc() is set: try to avoid
493 // waiting because the GCLocker is active to not wait too long.
494 if (GCLocker::is_active_and_needs_gc() && g1_policy()->can_expand_young_list()) {
495 // No need for an ergo message here, can_expand_young_list() does this when
496 // it returns true.
497 result = _allocator->attempt_allocation_force(word_size, context);
498 if (result != NULL) {
499 return result;
500 }
501 }
502 // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
503 // the GCLocker initiated GC has been performed and then retry. This includes
504 // the case when the GC Locker is not active but has not been performed.
505 should_try_gc = !GCLocker::needs_gc();
506 // Read the GC count while still holding the Heap_lock.
507 gc_count_before = total_collections();
508 }
509
510 if (should_try_gc) {
511 bool succeeded;
512 result = do_collection_pause(word_size, gc_count_before, &succeeded,
513 GCCause::_g1_inc_collection_pause);
514 if (result != NULL) {
515 assert(succeeded, "only way to get back a non-NULL result");
516 log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
517 Thread::current()->name(), p2i(result));
518 return result;
519 }
520
521 if (succeeded) {
522 // We successfully scheduled a collection which failed to allocate. No
523 // point in trying to allocate further. We'll just return NULL.
524 log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
525 SIZE_FORMAT " words", Thread::current()->name(), word_size);
526 return NULL;
527 }
528 log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT " words",
529 Thread::current()->name(), word_size);
530 } else {
531 // Failed to schedule a collection.
532 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
533 log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
534 SIZE_FORMAT " words", Thread::current()->name(), word_size);
535 return NULL;
536 }
537 log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
538 // The GCLocker is either active or the GCLocker initiated
539 // GC has not yet been performed. Stall until it is and
540 // then retry the allocation.
541 GCLocker::stall_until_clear();
542 gclocker_retry_count += 1;
543 }
544
545 // We can reach here if we were unsuccessful in scheduling a
546 // collection (because another thread beat us to it) or if we were
547 // stalled due to the GC locker. In either can we should retry the
548 // allocation attempt in case another thread successfully
549 // performed a collection and reclaimed enough space. We do the
550 // first attempt (without holding the Heap_lock) here and the
551 // follow-on attempt will be at the start of the next loop
552 // iteration (after taking the Heap_lock).
553
554 result = _allocator->attempt_allocation(word_size, context);
555 if (result != NULL) {
556 return result;
557 }
558
559 // Give a warning if we seem to be looping forever.
560 if ((QueuedAllocationWarningCount > 0) &&
561 (try_count % QueuedAllocationWarningCount == 0)) {
562 log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
563 Thread::current()->name(), try_count, word_size);
564 }
565 }
566
567 ShouldNotReachHere();
568 return NULL;
569 }
570
571 void G1CollectedHeap::begin_archive_alloc_range(bool open) {
572 assert_at_safepoint(true /* should_be_vm_thread */);
573 if (_archive_allocator == NULL) {
574 _archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
575 }
576 }
577
578 bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
579 // Allocations in archive regions cannot be of a size that would be considered
580 // humongous even for a minimum-sized region, because G1 region sizes/boundaries
581 // may be different at archive-restore time.
582 return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
583 }
764 curr_region = _hrm.next_region_in_heap(curr_region);
765 } else {
766 curr_region = NULL;
767 }
768 }
769
770 prev_last_addr = last_address;
771 prev_last_region = last_region;
772
773 // Fill the memory below the allocated range with dummy object(s),
774 // if the region bottom does not match the range start, or if the previous
775 // range ended within the same G1 region, and there is a gap.
776 if (start_address != bottom_address) {
777 size_t fill_size = pointer_delta(start_address, bottom_address);
778 G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
779 increase_used(fill_size * HeapWordSize);
780 }
781 }
782 }
783
784 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size) {
785 assert_heap_not_locked_and_not_at_safepoint();
786 assert(!is_humongous(word_size), "attempt_allocation() should not "
787 "be called for humongous allocation requests");
788
789 AllocationContext_t context = AllocationContext::current();
790 HeapWord* result = _allocator->attempt_allocation(word_size, context);
791
792 if (result == NULL) {
793 result = attempt_allocation_slow(word_size, context);
794 }
795 assert_heap_not_locked();
796 if (result != NULL) {
797 dirty_young_block(result, word_size);
798 }
799 return result;
800 }
801
802 void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
803 assert(!is_init_completed(), "Expect to be called at JVM init time");
804 assert(ranges != NULL, "MemRegion array NULL");
805 assert(count != 0, "No MemRegions provided");
806 MemRegion reserved = _hrm.reserved();
807 HeapWord* prev_last_addr = NULL;
808 HeapRegion* prev_last_region = NULL;
809 size_t size_used = 0;
810 size_t uncommitted_regions = 0;
811
812 // For each Memregion, free the G1 regions that constitute it, and
813 // notify mark-sweep that the range is no longer to be considered 'archive.'
854 if (curr_region != last_region) {
855 curr_region = _hrm.next_region_in_heap(curr_region);
856 } else {
857 curr_region = NULL;
858 }
859 _hrm.shrink_at(curr_index, 1);
860 uncommitted_regions++;
861 }
862
863 // Notify mark-sweep that this is no longer an archive range.
864 G1ArchiveAllocator::set_range_archive(ranges[i], false);
865 }
866
867 if (uncommitted_regions != 0) {
868 log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B",
869 HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
870 }
871 decrease_used(size_used);
872 }
873
874 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
875 ResourceMark rm; // For retrieving the thread names in log messages.
876
877 // The structure of this method has a lot of similarities to
878 // attempt_allocation_slow(). The reason these two were not merged
879 // into a single one is that such a method would require several "if
880 // allocation is not humongous do this, otherwise do that"
881 // conditional paths which would obscure its flow. In fact, an early
882 // version of this code did use a unified method which was harder to
883 // follow and, as a result, it had subtle bugs that were hard to
884 // track down. So keeping these two methods separate allows each to
885 // be more readable. It will be good to keep these two in sync as
886 // much as possible.
887
888 assert_heap_not_locked_and_not_at_safepoint();
889 assert(is_humongous(word_size), "attempt_allocation_humongous() "
890 "should only be called for humongous allocations");
891
892 // Humongous objects can exhaust the heap quickly, so we should check if we
893 // need to start a marking cycle at each humongous object allocation. We do
894 // the check before we do the actual allocation. The reason for doing it
895 // before the allocation is that we avoid having to keep track of the newly
896 // allocated memory while we do a GC.
897 if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
898 word_size)) {
899 collect(GCCause::_g1_humongous_allocation);
900 }
901
902 // We will loop until a) we manage to successfully perform the
903 // allocation or b) we successfully schedule a collection which
904 // fails to perform the allocation. b) is the only case when we'll
905 // return NULL.
906 HeapWord* result = NULL;
907 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
908 bool should_try_gc;
909 uint gc_count_before;
910
911
912 {
913 MutexLockerEx x(Heap_lock);
914
915 // Given that humongous objects are not allocated in young
916 // regions, we'll first try to do the allocation without doing a
917 // collection hoping that there's enough space in the heap.
918 result = humongous_obj_allocate(word_size, AllocationContext::current());
919 if (result != NULL) {
920 size_t size_in_regions = humongous_obj_size_in_regions(word_size);
921 g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
922 return result;
923 }
924
925 // Only try a GC if the GCLocker does not signal the need for a GC. Wait until
926 // the GCLocker initiated GC has been performed and then retry. This includes
927 // the case when the GC Locker is not active but has not been performed.
928 should_try_gc = !GCLocker::needs_gc();
929 // Read the GC count while still holding the Heap_lock.
930 gc_count_before = total_collections();
931 }
932
933 if (should_try_gc) {
934 bool succeeded;
935 result = do_collection_pause(word_size, gc_count_before, &succeeded,
936 GCCause::_g1_humongous_allocation);
937 if (result != NULL) {
938 assert(succeeded, "only way to get back a non-NULL result");
939 log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
940 Thread::current()->name(), p2i(result));
941 return result;
942 }
943
944 if (succeeded) {
945 // We successfully scheduled a collection which failed to allocate. No
946 // point in trying to allocate further. We'll just return NULL.
947 log_trace(gc, alloc)("%s: Successfully scheduled collection failing to allocate "
948 SIZE_FORMAT " words", Thread::current()->name(), word_size);
949 return NULL;
950 }
951 log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating " SIZE_FORMAT "",
952 Thread::current()->name(), word_size);
953 } else {
954 // Failed to schedule a collection.
955 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
956 log_warning(gc, alloc)("%s: Retried waiting for GCLocker too often allocating "
957 SIZE_FORMAT " words", Thread::current()->name(), word_size);
958 return NULL;
959 }
960 log_trace(gc, alloc)("%s: Stall until clear", Thread::current()->name());
961 // The GCLocker is either active or the GCLocker initiated
962 // GC has not yet been performed. Stall until it is and
963 // then retry the allocation.
964 GCLocker::stall_until_clear();
965 gclocker_retry_count += 1;
966 }
967
968
969 // We can reach here if we were unsuccessful in scheduling a
970 // collection (because another thread beat us to it) or if we were
971 // stalled due to the GC locker. In either can we should retry the
972 // allocation attempt in case another thread successfully
973 // performed a collection and reclaimed enough space.
974 // Humongous object allocation always needs a lock, so we wait for the retry
975 // in the next iteration of the loop, unlike for the regular iteration case.
976 // Give a warning if we seem to be looping forever.
977
978 if ((QueuedAllocationWarningCount > 0) &&
979 (try_count % QueuedAllocationWarningCount == 0)) {
980 log_warning(gc, alloc)("%s: Retried allocation %u times for " SIZE_FORMAT " words",
981 Thread::current()->name(), try_count, word_size);
982 }
983 }
984
985 ShouldNotReachHere();
986 return NULL;
987 }
988
989 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
990 AllocationContext_t context,
991 bool expect_null_mutator_alloc_region) {
992 assert_at_safepoint(true /* should_be_vm_thread */);
993 assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
994 "the current alloc region was unexpectedly found to be non-NULL");
995
996 if (!is_humongous(word_size)) {
997 return _allocator->attempt_allocation_locked(word_size, context);
998 } else {
999 HeapWord* result = humongous_obj_allocate(word_size, context);
1000 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1001 collector_state()->set_initiate_conc_mark_if_possible(true);
1263 "maximum_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)",
1264 capacity_after_gc, used_after_gc, used(), maximum_desired_capacity, MaxHeapFreeRatio);
1265
1266 shrink(shrink_bytes);
1267 }
1268 }
1269
1270 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1271 AllocationContext_t context,
1272 bool do_gc,
1273 bool clear_all_soft_refs,
1274 bool expect_null_mutator_alloc_region,
1275 bool* gc_succeeded) {
1276 *gc_succeeded = true;
1277 // Let's attempt the allocation first.
1278 HeapWord* result =
1279 attempt_allocation_at_safepoint(word_size,
1280 context,
1281 expect_null_mutator_alloc_region);
1282 if (result != NULL) {
1283 return result;
1284 }
1285
1286 // In a G1 heap, we're supposed to keep allocation from failing by
1287 // incremental pauses. Therefore, at least for now, we'll favor
1288 // expansion over collection. (This might change in the future if we can
1289 // do something smarter than full collection to satisfy a failed alloc.)
1290 result = expand_and_allocate(word_size, context);
1291 if (result != NULL) {
1292 return result;
1293 }
1294
1295 if (do_gc) {
1296 // Expansion didn't work, we'll try to do a Full GC.
1297 *gc_succeeded = do_full_collection(false, /* explicit_gc */
1298 clear_all_soft_refs);
1299 }
1300
1301 return NULL;
1302 }
1303
1304 HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
1305 AllocationContext_t context,
1306 bool* succeeded) {
1307 assert_at_safepoint(true /* should_be_vm_thread */);
1308
1309 // Attempts to allocate followed by Full GC.
1310 HeapWord* result =
1311 satisfy_failed_allocation_helper(word_size,
1323 result = satisfy_failed_allocation_helper(word_size,
1324 context,
1325 true, /* do_gc */
1326 true, /* clear_all_soft_refs */
1327 true, /* expect_null_mutator_alloc_region */
1328 succeeded);
1329
1330 if (result != NULL || !*succeeded) {
1331 return result;
1332 }
1333
1334 // Attempts to allocate, no GC
1335 result = satisfy_failed_allocation_helper(word_size,
1336 context,
1337 false, /* do_gc */
1338 false, /* clear_all_soft_refs */
1339 true, /* expect_null_mutator_alloc_region */
1340 succeeded);
1341
1342 if (result != NULL) {
1343 return result;
1344 }
1345
1346 assert(!collector_policy()->should_clear_all_soft_refs(),
1347 "Flag should have been handled and cleared prior to this point");
1348
1349 // What else? We might try synchronous finalization later. If the total
1350 // space available is large enough for the allocation, then a more
1351 // complete compaction phase than we've tried so far might be
1352 // appropriate.
1353 return NULL;
1354 }
1355
1356 // Attempting to expand the heap sufficiently
1357 // to support an allocation of the given "word_size". If
1358 // successful, perform the allocation and return the address of the
1359 // allocated block, or else "NULL".
1360
1361 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
1362 assert_at_safepoint(true /* should_be_vm_thread */);
1363
1364 _verifier->verify_region_sets_optional();
1365
1366 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1367 log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
1368 word_size * HeapWordSize);
1369
1370
1371 if (expand(expand_bytes, _workers)) {
1372 _hrm.verify_optional();
2067 assert(!concurrent ||
2068 (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
2069 "for outer caller (concurrent cycle): "
2070 "_old_marking_cycles_started = %u "
2071 "is inconsistent with _old_marking_cycles_completed = %u",
2072 _old_marking_cycles_started, _old_marking_cycles_completed);
2073
2074 _old_marking_cycles_completed += 1;
2075
2076 // We need to clear the "in_progress" flag in the CM thread before
2077 // we wake up any waiters (especially when ExplicitInvokesConcurrent
2078 // is set) so that if a waiter requests another System.gc() it doesn't
2079 // incorrectly see that a marking cycle is still in progress.
2080 if (concurrent) {
2081 _cmThread->set_idle();
2082 }
2083
2084 // This notify_all() will ensure that a thread that called
2085 // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
2086 // and it's waiting for a full GC to finish will be woken up. It is
2087 // waiting in VM_G1CollectForAllocation::doit_epilogue().
2088 FullGCCount_lock->notify_all();
2089 }
2090
2091 void G1CollectedHeap::collect(GCCause::Cause cause) {
2092 assert_heap_not_locked();
2093
2094 uint gc_count_before;
2095 uint old_marking_count_before;
2096 uint full_gc_count_before;
2097 bool retry_gc;
2098
2099 do {
2100 retry_gc = false;
2101
2102 {
2103 MutexLocker ml(Heap_lock);
2104
2105 // Read the GC count while holding the Heap_lock
2106 gc_count_before = total_collections();
2107 full_gc_count_before = total_full_collections();
2108 old_marking_count_before = _old_marking_cycles_started;
2109 }
2110
2111 if (should_do_concurrent_full_gc(cause)) {
2112 // Schedule an initial-mark evacuation pause that will start a
2113 // concurrent cycle. We're setting word_size to 0 which means that
2114 // we are not requesting a post-GC allocation.
2115 VM_G1CollectForAllocation op(0, /* word_size */
2116 gc_count_before,
2117 cause,
2118 true, /* should_initiate_conc_mark */
2119 g1_policy()->max_pause_time_ms(),
2120 AllocationContext::current());
2121 VMThread::execute(&op);
2122 if (!op.pause_succeeded()) {
2123 if (old_marking_count_before == _old_marking_cycles_started) {
2124 retry_gc = op.should_retry_gc();
2125 } else {
2126 // A Full GC happened while we were trying to schedule the
2127 // initial-mark GC. No point in starting a new cycle given
2128 // that the whole heap was collected anyway.
2129 }
2130
2131 if (retry_gc) {
2132 if (GCLocker::is_active_and_needs_gc()) {
2133 GCLocker::stall_until_clear();
2134 }
2135 }
2136 }
2137 } else {
2138 if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
2139 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2140
2141 // Schedule a standard evacuation pause. We're setting word_size
2142 // to 0 which means that we are not requesting a post-GC allocation.
2143 VM_G1CollectForAllocation op(0, /* word_size */
2144 gc_count_before,
2145 cause,
2146 false, /* should_initiate_conc_mark */
2147 g1_policy()->max_pause_time_ms(),
2148 AllocationContext::current());
2149 VMThread::execute(&op);
2150 } else {
2151 // Schedule a Full GC.
2152 VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2153 VMThread::execute(&op);
2154 }
2155 }
2156 } while (retry_gc);
2157 }
2158
2159 bool G1CollectedHeap::is_in(const void* p) const {
2160 if (_hrm.reserved().contains(p)) {
2161 // Given that we know that p is in the reserved space,
2162 // heap_region_containing() should successfully
2163 // return the containing region.
2164 HeapRegion* hr = heap_region_containing(p);
2165 return hr->is_in(p);
2166 } else {
2167 return false;
2168 }
2539 #endif
2540 // always_do_update_barrier = true;
2541
2542 double start = os::elapsedTime();
2543 resize_all_tlabs();
2544 g1_policy()->phase_times()->record_resize_tlab_time_ms((os::elapsedTime() - start) * 1000.0);
2545
2546 allocation_context_stats().update(full);
2547
2548 MemoryService::track_memory_usage();
2549 // We have just completed a GC. Update the soft reference
2550 // policy with the new heap occupancy
2551 Universe::update_heap_info_at_gc();
2552 }
2553
2554 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2555 uint gc_count_before,
2556 bool* succeeded,
2557 GCCause::Cause gc_cause) {
2558 assert_heap_not_locked_and_not_at_safepoint();
2559 VM_G1CollectForAllocation op(word_size,
2560 gc_count_before,
2561 gc_cause,
2562 false, /* should_initiate_conc_mark */
2563 g1_policy()->max_pause_time_ms(),
2564 AllocationContext::current());
2565 VMThread::execute(&op);
2566
2567 HeapWord* result = op.result();
2568 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2569 assert(result == NULL || ret_succeeded,
2570 "the result should be NULL if the VM did not succeed");
2571 *succeeded = ret_succeeded;
2572
2573 assert_heap_not_locked();
2574 return result;
2575 }
2576
2577 void
2578 G1CollectedHeap::doConcurrentMark() {
2579 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2580 if (!_cmThread->in_progress()) {
2581 _cmThread->set_started();
2582 CGC_lock->notify();
2583 }
2584 }
|