< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 60593 : 8252035: G1: Clean up G1CollectedHeap::*reserved* methods
Reviewed-by:


1664     create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1665 
1666   _hrm = HeapRegionManager::create_manager(this);
1667 
1668   _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1669   _card_table->initialize(cardtable_storage);
1670 
1671   // Do later initialization work for concurrent refinement.
1672   _hot_card_cache->initialize(card_counts_storage);
1673 
1674   // 6843694 - ensure that the maximum region index can fit
1675   // in the remembered set structures.
1676   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1677   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1678 
1679   // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
1680   // start within the first card.
1681   guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
1682   // Also create a G1 rem set.
1683   _rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
1684   _rem_set->initialize(max_reserved_capacity(), max_regions());
1685 
1686   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1687   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1688   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1689             "too many cards per region");
1690 
1691   FreeRegionList::set_unrealistically_long_length(max_expandable_regions() + 1);
1692 
1693   _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1694 
1695   {
1696     HeapWord* start = _hrm->reserved().start();
1697     HeapWord* end = _hrm->reserved().end();
1698     size_t granularity = HeapRegion::GrainBytes;
1699 
1700     _region_attr.initialize(start, end, granularity);
1701     _humongous_reclaim_candidates.initialize(start, end, granularity);
1702   }
1703 
1704   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1705                           true /* are_GC_task_threads */,
1706                           false /* are_ConcurrentGC_threads */);
1707   if (_workers == NULL) {
1708     return JNI_ENOMEM;
1709   }
1710   _workers->initialize_workers();
1711 
1712   _numa->set_region_info(HeapRegion::GrainBytes, page_size);
1713 
1714   // Create the G1ConcurrentMark data structure and thread.
1715   // (Must do this late, so that "max_regions" is defined.)
1716   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1717   _cm_thread = _cm->cm_thread();
1718 
1719   // Now expand into the initial heap size.
1720   if (!expand(init_byte_size, _workers)) {
1721     vm_shutdown_during_initialization("Failed to allocate initial heap.");


2254     VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2255     VMThread::execute(&op);
2256     return op.gc_succeeded();
2257   }
2258 }
2259 
2260 bool G1CollectedHeap::is_in(const void* p) const {
2261   if (_hrm->reserved().contains(p)) {
2262     // Given that we know that p is in the reserved space,
2263     // heap_region_containing() should successfully
2264     // return the containing region.
2265     HeapRegion* hr = heap_region_containing(p);
2266     return hr->is_in(p);
2267   } else {
2268     return false;
2269   }
2270 }
2271 
2272 #ifdef ASSERT
2273 bool G1CollectedHeap::is_in_exact(const void* p) const {
2274   bool contains = reserved_region().contains(p);
2275   bool available = _hrm->is_available(addr_to_region((HeapWord*)p));
2276   if (contains && available) {
2277     return true;
2278   } else {
2279     return false;
2280   }
2281 }
2282 #endif
2283 
2284 // Iteration functions.
2285 
2286 // Iterates an ObjectClosure over all objects within a HeapRegion.
2287 
2288 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2289   ObjectClosure* _cl;
2290 public:
2291   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2292   bool do_heap_region(HeapRegion* r) {
2293     if (!r->is_continues_humongous()) {
2294       r->object_iterate(_cl);


2374 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2375   return (_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
2376 }
2377 
2378 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2379   return _eden.length() * HeapRegion::GrainBytes;
2380 }
2381 
2382 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2383 // must be equal to the humongous object limit.
2384 size_t G1CollectedHeap::max_tlab_size() const {
2385   return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2386 }
2387 
2388 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2389   return _allocator->unsafe_max_tlab_alloc();
2390 }
2391 
2392 size_t G1CollectedHeap::max_capacity() const {
2393   return _hrm->max_expandable_length() * HeapRegion::GrainBytes;
2394 }
2395 
2396 size_t G1CollectedHeap::max_reserved_capacity() const {
2397   return _hrm->max_length() * HeapRegion::GrainBytes;
2398 }
2399 
2400 void G1CollectedHeap::deduplicate_string(oop str) {
2401   assert(java_lang_String::is_instance(str), "invariant");
2402 
2403   if (G1StringDedup::is_enabled()) {
2404     G1StringDedup::deduplicate(str);
2405   }
2406 }
2407 
2408 void G1CollectedHeap::prepare_for_verify() {
2409   _verifier->prepare_for_verify();
2410 }
2411 
2412 void G1CollectedHeap::verify(VerifyOption vo) {
2413   _verifier->verify(vo);
2414 }
2415 
2416 bool G1CollectedHeap::supports_concurrent_gc_breakpoints() const {
2417   return true;




1664     create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1665 
1666   _hrm = HeapRegionManager::create_manager(this);
1667 
1668   _hrm->initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
1669   _card_table->initialize(cardtable_storage);
1670 
1671   // Do later initialization work for concurrent refinement.
1672   _hot_card_cache->initialize(card_counts_storage);
1673 
1674   // 6843694 - ensure that the maximum region index can fit
1675   // in the remembered set structures.
1676   const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1677   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1678 
1679   // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
1680   // start within the first card.
1681   guarantee(g1_rs.base() >= (char*)G1CardTable::card_size, "Java heap must not start within the first card.");
1682   // Also create a G1 rem set.
1683   _rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
1684   _rem_set->initialize(max_regions());
1685 
1686   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1687   guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1688   guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1689             "too many cards per region");
1690 
1691   FreeRegionList::set_unrealistically_long_length(max_expandable_regions() + 1);
1692 
1693   _bot = new G1BlockOffsetTable(reserved(), bot_storage);
1694 
1695   {


1696     size_t granularity = HeapRegion::GrainBytes;
1697 
1698     _region_attr.initialize(reserved(), granularity);
1699     _humongous_reclaim_candidates.initialize(reserved(), granularity);
1700   }
1701 
1702   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1703                           true /* are_GC_task_threads */,
1704                           false /* are_ConcurrentGC_threads */);
1705   if (_workers == NULL) {
1706     return JNI_ENOMEM;
1707   }
1708   _workers->initialize_workers();
1709 
1710   _numa->set_region_info(HeapRegion::GrainBytes, page_size);
1711 
1712   // Create the G1ConcurrentMark data structure and thread.
1713   // (Must do this late, so that "max_regions" is defined.)
1714   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1715   _cm_thread = _cm->cm_thread();
1716 
1717   // Now expand into the initial heap size.
1718   if (!expand(init_byte_size, _workers)) {
1719     vm_shutdown_during_initialization("Failed to allocate initial heap.");


2252     VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
2253     VMThread::execute(&op);
2254     return op.gc_succeeded();
2255   }
2256 }
2257 
2258 bool G1CollectedHeap::is_in(const void* p) const {
2259   if (_hrm->reserved().contains(p)) {
2260     // Given that we know that p is in the reserved space,
2261     // heap_region_containing() should successfully
2262     // return the containing region.
2263     HeapRegion* hr = heap_region_containing(p);
2264     return hr->is_in(p);
2265   } else {
2266     return false;
2267   }
2268 }
2269 
2270 #ifdef ASSERT
2271 bool G1CollectedHeap::is_in_exact(const void* p) const {
2272   bool contains = reserved().contains(p);
2273   bool available = _hrm->is_available(addr_to_region((HeapWord*)p));
2274   if (contains && available) {
2275     return true;
2276   } else {
2277     return false;
2278   }
2279 }
2280 #endif
2281 
2282 // Iteration functions.
2283 
2284 // Iterates an ObjectClosure over all objects within a HeapRegion.
2285 
2286 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2287   ObjectClosure* _cl;
2288 public:
2289   IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2290   bool do_heap_region(HeapRegion* r) {
2291     if (!r->is_continues_humongous()) {
2292       r->object_iterate(_cl);


2372 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2373   return (_policy->young_list_target_length() - _survivor.length()) * HeapRegion::GrainBytes;
2374 }
2375 
2376 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2377   return _eden.length() * HeapRegion::GrainBytes;
2378 }
2379 
2380 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2381 // must be equal to the humongous object limit.
2382 size_t G1CollectedHeap::max_tlab_size() const {
2383   return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2384 }
2385 
2386 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2387   return _allocator->unsafe_max_tlab_alloc();
2388 }
2389 
2390 size_t G1CollectedHeap::max_capacity() const {
2391   return _hrm->max_expandable_length() * HeapRegion::GrainBytes;




2392 }
2393 
2394 void G1CollectedHeap::deduplicate_string(oop str) {
2395   assert(java_lang_String::is_instance(str), "invariant");
2396 
2397   if (G1StringDedup::is_enabled()) {
2398     G1StringDedup::deduplicate(str);
2399   }
2400 }
2401 
2402 void G1CollectedHeap::prepare_for_verify() {
2403   _verifier->prepare_for_verify();
2404 }
2405 
2406 void G1CollectedHeap::verify(VerifyOption vo) {
2407   _verifier->verify(vo);
2408 }
2409 
2410 bool G1CollectedHeap::supports_concurrent_gc_breakpoints() const {
2411   return true;


< prev index next >