6401
6402 if (hr->is_young()) {
6403 // TODO
6404 } else if (hr->startsHumongous()) {
6405 _humongous_set->verify_next_region(hr);
6406 } else if (hr->is_empty()) {
6407 _free_list->verify_next_region(hr);
6408 } else {
6409 _old_set->verify_next_region(hr);
6410 }
6411 return false;
6412 }
6413 };
6414
6415 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
6416 HeapWord* bottom) {
6417 HeapWord* end = bottom + HeapRegion::GrainWords;
6418 MemRegion mr(bottom, end);
6419 assert(_g1_reserved.contains(mr), "invariant");
6420 // This might return NULL if the allocation fails
6421 return new HeapRegion(hrs_index, _bot_shared, mr, true /* is_zeroed */);
6422 }
6423
6424 void G1CollectedHeap::verify_region_sets() {
6425 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6426
6427 // First, check the explicit lists.
6428 _free_list.verify();
6429 {
6430 // Given that a concurrent operation might be adding regions to
6431 // the secondary free list we have to take the lock before
6432 // verifying it.
6433 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6434 _secondary_free_list.verify();
6435 }
6436 _old_set.verify();
6437 _humongous_set.verify();
6438
6439 // If a concurrent region freeing operation is in progress it will
6440 // be difficult to correctly attributed any free regions we come
6441 // across to the correct free list given that they might belong to
|
6401
6402 if (hr->is_young()) {
6403 // TODO
6404 } else if (hr->startsHumongous()) {
6405 _humongous_set->verify_next_region(hr);
6406 } else if (hr->is_empty()) {
6407 _free_list->verify_next_region(hr);
6408 } else {
6409 _old_set->verify_next_region(hr);
6410 }
6411 return false;
6412 }
6413 };
6414
6415 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
6416 HeapWord* bottom) {
6417 HeapWord* end = bottom + HeapRegion::GrainWords;
6418 MemRegion mr(bottom, end);
6419 assert(_g1_reserved.contains(mr), "invariant");
6420 // This might return NULL if the allocation fails
6421 return new HeapRegion(hrs_index, _bot_shared, mr);
6422 }
6423
6424 void G1CollectedHeap::verify_region_sets() {
6425 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6426
6427 // First, check the explicit lists.
6428 _free_list.verify();
6429 {
6430 // Given that a concurrent operation might be adding regions to
6431 // the secondary free list we have to take the lock before
6432 // verifying it.
6433 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
6434 _secondary_free_list.verify();
6435 }
6436 _old_set.verify();
6437 _humongous_set.verify();
6438
6439 // If a concurrent region freeing operation is in progress it will
6440 // be difficult to correctly attributed any free regions we come
6441 // across to the correct free list given that they might belong to
|