< prev index next >
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
Print this page
rev 7474 : imported patch cleanup
@@ -331,11 +331,11 @@
void YoungList::print() {
HeapRegion* lists[] = {_head, _survivor_head};
const char* names[] = {"YOUNG", "SURVIVOR"};
- for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
+ for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
HeapRegion *curr = lists[list];
if (curr == NULL)
gclog_or_tty->print_cr(" empty");
while (curr != NULL) {
@@ -763,23 +763,23 @@
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!is_humongous(word_size), "we do not allow humongous TLABs");
- unsigned int dummy_gc_count_before;
- int dummy_gclocker_retry_count = 0;
+ uint dummy_gc_count_before;
+ uint dummy_gclocker_retry_count = 0;
return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
}
HeapWord*
G1CollectedHeap::mem_allocate(size_t word_size,
bool* gc_overhead_limit_was_exceeded) {
assert_heap_not_locked_and_not_at_safepoint();
// Loop until the allocation is satisfied, or unsatisfied after GC.
- for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
- unsigned int gc_count_before;
+ for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
+ uint gc_count_before;
HeapWord* result = NULL;
if (!is_humongous(word_size)) {
result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
} else {
@@ -827,12 +827,12 @@
return NULL;
}
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
AllocationContext_t context,
- unsigned int *gc_count_before_ret,
- int* gclocker_retry_count_ret) {
+ uint* gc_count_before_ret,
+ uint* gclocker_retry_count_ret) {
// Make sure you read the note in attempt_allocation_humongous().
assert_heap_not_locked_and_not_at_safepoint();
assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
"be called for humongous allocation requests");
@@ -845,11 +845,11 @@
// fails to perform the allocation. b) is the only case when we'll
// return NULL.
HeapWord* result = NULL;
for (int try_count = 1; /* we'll return */; try_count += 1) {
bool should_try_gc;
- unsigned int gc_count_before;
+ uint gc_count_before;
{
MutexLockerEx x(Heap_lock);
result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
false /* bot_updates */);
@@ -943,12 +943,12 @@
ShouldNotReachHere();
return NULL;
}
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
- unsigned int * gc_count_before_ret,
- int* gclocker_retry_count_ret) {
+ uint* gc_count_before_ret,
+ uint* gclocker_retry_count_ret) {
// The structure of this method has a lot of similarities to
// attempt_allocation_slow(). The reason these two were not merged
// into a single one is that such a method would require several "if
// allocation is not humongous do this, otherwise do that"
// conditional paths which would obscure its flow. In fact, an early
@@ -977,11 +977,11 @@
// fails to perform the allocation. b) is the only case when we'll
// return NULL.
HeapWord* result = NULL;
for (int try_count = 1; /* we'll return */; try_count += 1) {
bool should_try_gc;
- unsigned int gc_count_before;
+ uint gc_count_before;
{
MutexLockerEx x(Heap_lock);
// Given that humongous objects are not allocated in young
@@ -1815,11 +1815,11 @@
uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
assert(n_rem_sets > 0, "Invariant.");
_worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
- _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
+ _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
_evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
for (int i = 0; i < n_queues; i++) {
RefToScanQueue* q = new RefToScanQueue();
q->initialize();
@@ -2397,13 +2397,13 @@
}
void G1CollectedHeap::collect(GCCause::Cause cause) {
assert_heap_not_locked();
- unsigned int gc_count_before;
- unsigned int old_marking_count_before;
- unsigned int full_gc_count_before;
+ uint gc_count_before;
+ uint old_marking_count_before;
+ uint full_gc_count_before;
bool retry_gc;
do {
retry_gc = false;
@@ -3421,11 +3421,11 @@
// policy with the new heap occupancy
Universe::update_heap_info_at_gc();
}
HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
- unsigned int gc_count_before,
+ uint gc_count_before,
bool* succeeded,
GCCause::Cause gc_cause) {
assert_heap_not_locked_and_not_at_safepoint();
g1_policy()->record_stop_world_start();
VM_G1IncCollectionPause op(gc_count_before,
< prev index next >