index

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7780 : imported patch 8072621

*** 1,7 **** /* ! * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 331,341 **** void YoungList::print() { HeapRegion* lists[] = {_head, _survivor_head}; const char* names[] = {"YOUNG", "SURVIVOR"}; ! for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) { gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); HeapRegion *curr = lists[list]; if (curr == NULL) gclog_or_tty->print_cr(" empty"); while (curr != NULL) { --- 331,341 ---- void YoungList::print() { HeapRegion* lists[] = {_head, _survivor_head}; const char* names[] = {"YOUNG", "SURVIVOR"}; ! for (uint list = 0; list < ARRAY_SIZE(lists); ++list) { gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]); HeapRegion *curr = lists[list]; if (curr == NULL) gclog_or_tty->print_cr(" empty"); while (curr != NULL) {
*** 763,785 **** HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(word_size), "we do not allow humongous TLABs"); ! unsigned int dummy_gc_count_before; ! int dummy_gclocker_retry_count = 0; return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count); } HeapWord* G1CollectedHeap::mem_allocate(size_t word_size, bool* gc_overhead_limit_was_exceeded) { assert_heap_not_locked_and_not_at_safepoint(); // Loop until the allocation is satisfied, or unsatisfied after GC. ! for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { ! unsigned int gc_count_before; HeapWord* result = NULL; if (!is_humongous(word_size)) { result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count); } else { --- 763,785 ---- HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(word_size), "we do not allow humongous TLABs"); ! uint dummy_gc_count_before; ! uint dummy_gclocker_retry_count = 0; return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count); } HeapWord* G1CollectedHeap::mem_allocate(size_t word_size, bool* gc_overhead_limit_was_exceeded) { assert_heap_not_locked_and_not_at_safepoint(); // Loop until the allocation is satisfied, or unsatisfied after GC. ! for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { ! uint gc_count_before; HeapWord* result = NULL; if (!is_humongous(word_size)) { result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count); } else {
*** 827,838 **** return NULL; } HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, AllocationContext_t context, ! unsigned int *gc_count_before_ret, ! int* gclocker_retry_count_ret) { // Make sure you read the note in attempt_allocation_humongous(). assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(word_size), "attempt_allocation_slow() should not " "be called for humongous allocation requests"); --- 827,838 ---- return NULL; } HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, AllocationContext_t context, ! uint* gc_count_before_ret, ! uint* gclocker_retry_count_ret) { // Make sure you read the note in attempt_allocation_humongous(). assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(word_size), "attempt_allocation_slow() should not " "be called for humongous allocation requests");
*** 845,855 **** // fails to perform the allocation. b) is the only case when we'll // return NULL. HeapWord* result = NULL; for (int try_count = 1; /* we'll return */; try_count += 1) { bool should_try_gc; ! unsigned int gc_count_before; { MutexLockerEx x(Heap_lock); result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size, false /* bot_updates */); --- 845,855 ---- // fails to perform the allocation. b) is the only case when we'll // return NULL. HeapWord* result = NULL; for (int try_count = 1; /* we'll return */; try_count += 1) { bool should_try_gc; ! uint gc_count_before; { MutexLockerEx x(Heap_lock); result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size, false /* bot_updates */);
*** 943,954 **** ShouldNotReachHere(); return NULL; } HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, ! unsigned int * gc_count_before_ret, ! int* gclocker_retry_count_ret) { // The structure of this method has a lot of similarities to // attempt_allocation_slow(). The reason these two were not merged // into a single one is that such a method would require several "if // allocation is not humongous do this, otherwise do that" // conditional paths which would obscure its flow. In fact, an early --- 943,954 ---- ShouldNotReachHere(); return NULL; } HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, ! uint* gc_count_before_ret, ! uint* gclocker_retry_count_ret) { // The structure of this method has a lot of similarities to // attempt_allocation_slow(). The reason these two were not merged // into a single one is that such a method would require several "if // allocation is not humongous do this, otherwise do that" // conditional paths which would obscure its flow. In fact, an early
*** 977,987 **** // fails to perform the allocation. b) is the only case when we'll // return NULL. HeapWord* result = NULL; for (int try_count = 1; /* we'll return */; try_count += 1) { bool should_try_gc; ! unsigned int gc_count_before; { MutexLockerEx x(Heap_lock); // Given that humongous objects are not allocated in young --- 977,987 ---- // fails to perform the allocation. b) is the only case when we'll // return NULL. HeapWord* result = NULL; for (int try_count = 1; /* we'll return */; try_count += 1) { bool should_try_gc; ! uint gc_count_before; { MutexLockerEx x(Heap_lock); // Given that humongous objects are not allocated in young
*** 1813,1823 **** uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); assert(n_rem_sets > 0, "Invariant."); _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); ! _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC); _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC); for (int i = 0; i < n_queues; i++) { RefToScanQueue* q = new RefToScanQueue(); q->initialize(); --- 1813,1823 ---- uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets(); assert(n_rem_sets > 0, "Invariant."); _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); ! _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC); _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC); for (int i = 0; i < n_queues; i++) { RefToScanQueue* q = new RefToScanQueue(); q->initialize();
*** 2394,2406 **** } void G1CollectedHeap::collect(GCCause::Cause cause) { assert_heap_not_locked(); ! unsigned int gc_count_before; ! unsigned int old_marking_count_before; ! unsigned int full_gc_count_before; bool retry_gc; do { retry_gc = false; --- 2394,2406 ---- } void G1CollectedHeap::collect(GCCause::Cause cause) { assert_heap_not_locked(); ! uint gc_count_before; ! uint old_marking_count_before; ! uint full_gc_count_before; bool retry_gc; do { retry_gc = false;
*** 3416,3426 **** // policy with the new heap occupancy Universe::update_heap_info_at_gc(); } HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, ! unsigned int gc_count_before, bool* succeeded, GCCause::Cause gc_cause) { assert_heap_not_locked_and_not_at_safepoint(); g1_policy()->record_stop_world_start(); VM_G1IncCollectionPause op(gc_count_before, --- 3416,3426 ---- // policy with the new heap occupancy Universe::update_heap_info_at_gc(); } HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, ! uint gc_count_before, bool* succeeded, GCCause::Cause gc_cause) { assert_heap_not_locked_and_not_at_safepoint(); g1_policy()->record_stop_world_start(); VM_G1IncCollectionPause op(gc_count_before,
index