< prev index next >

src/hotspot/share/gc/z/zHeap.cpp

Print this page




  59     _object_allocator(),
  60     _page_allocator(&_workers, heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
  61     _page_table(),
  62     _forwarding_table(),
  63     _mark(&_workers, &_page_table),
  64     _reference_processor(&_workers),
  65     _weak_roots_processor(&_workers),
  66     _relocate(&_workers),
  67     _relocation_set(),
  68     _unload(&_workers),
  69     _serviceability(heap_min_size(), heap_max_size()) {
  70   // Install global heap instance
  71   assert(_heap == NULL, "Already initialized");
  72   _heap = this;
  73 
  74   // Update statistics
  75   ZStatHeap::set_at_initialize(heap_min_size(), heap_max_size(), heap_max_reserve_size());
  76 }
  77 
  78 size_t ZHeap::heap_min_size() const {
  79   return MinHeapSize;
  80 }
  81 
  82 size_t ZHeap::heap_initial_size() const {
  83   return InitialHeapSize;
  84 }
  85 
  86 size_t ZHeap::heap_max_size() const {
  87   return MaxHeapSize;
  88 }
  89 
  90 size_t ZHeap::heap_max_reserve_size() const {
  91   // Reserve one small page per worker plus one shared medium page. This is still just
  92   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
  93   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
  94   return MIN2(max_reserve_size, heap_max_size());
  95 }
  96 
  97 bool ZHeap::is_initialized() const {
  98   return _page_allocator.is_initialized() && _mark.is_initialized();
  99 }
 100 
 101 size_t ZHeap::min_capacity() const {
 102   return _page_allocator.min_capacity();
 103 }


 224 }
 225 
 226 void ZHeap::undo_alloc_page(ZPage* page) {
 227   assert(page->is_allocating(), "Invalid page state");
 228 
 229   ZStatInc(ZCounterUndoPageAllocation);
 230   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 231                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 232 
 233   free_page(page, false /* reclaimed */);
 234 }
 235 
 236 void ZHeap::free_page(ZPage* page, bool reclaimed) {
 237   // Remove page table entry
 238   _page_table.remove(page);
 239 
 240   // Free page
 241   _page_allocator.free_page(page, reclaimed);
 242 }
 243 
 244 uint64_t ZHeap::uncommit(uint64_t delay) {
 245   return _page_allocator.uncommit(delay);




 246 }
 247 
 248 void ZHeap::flip_to_marked() {
 249   ZVerifyViewsFlip flip(&_page_allocator);
 250   ZAddress::flip_to_marked();
 251 }
 252 
 253 void ZHeap::flip_to_remapped() {
 254   ZVerifyViewsFlip flip(&_page_allocator);
 255   ZAddress::flip_to_remapped();
 256 }
 257 
 258 void ZHeap::mark_start() {
 259   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 260 
 261   // Update statistics
 262   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 263 
 264   // Flip address view
 265   flip_to_marked();




  59     _object_allocator(),
  60     _page_allocator(&_workers, heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
  61     _page_table(),
  62     _forwarding_table(),
  63     _mark(&_workers, &_page_table),
  64     _reference_processor(&_workers),
  65     _weak_roots_processor(&_workers),
  66     _relocate(&_workers),
  67     _relocation_set(),
  68     _unload(&_workers),
  69     _serviceability(heap_min_size(), heap_max_size()) {
  70   // Install global heap instance
  71   assert(_heap == NULL, "Already initialized");
  72   _heap = this;
  73 
  74   // Update statistics
  75   ZStatHeap::set_at_initialize(heap_min_size(), heap_max_size(), heap_max_reserve_size());
  76 }
  77 
  78 size_t ZHeap::heap_min_size() const {
  79   return MAX2(MinHeapSize, heap_max_reserve_size());
  80 }
  81 
  82 size_t ZHeap::heap_initial_size() const {
  83   return MAX2(InitialHeapSize, heap_max_reserve_size());
  84 }
  85 
  86 size_t ZHeap::heap_max_size() const {
  87   return MaxHeapSize;
  88 }
  89 
  90 size_t ZHeap::heap_max_reserve_size() const {
  91   // Reserve one small page per worker plus one shared medium page. This is still just
  92   // an estimate and doesn't guarantee that we can't run out of memory during relocation.
  93   const size_t max_reserve_size = (_workers.nworkers() * ZPageSizeSmall) + ZPageSizeMedium;
  94   return MIN2(max_reserve_size, heap_max_size());
  95 }
  96 
  97 bool ZHeap::is_initialized() const {
  98   return _page_allocator.is_initialized() && _mark.is_initialized();
  99 }
 100 
 101 size_t ZHeap::min_capacity() const {
 102   return _page_allocator.min_capacity();
 103 }


 224 }
 225 
 226 void ZHeap::undo_alloc_page(ZPage* page) {
 227   assert(page->is_allocating(), "Invalid page state");
 228 
 229   ZStatInc(ZCounterUndoPageAllocation);
 230   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 231                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 232 
 233   free_page(page, false /* reclaimed */);
 234 }
 235 
 236 void ZHeap::free_page(ZPage* page, bool reclaimed) {
 237   // Remove page table entry
 238   _page_table.remove(page);
 239 
 240   // Free page
 241   _page_allocator.free_page(page, reclaimed);
 242 }
 243 
 244 uint64_t ZHeap::uncommit() {
 245   return _page_allocator.uncommit();
 246 }
 247 
 248 void ZHeap::uncommit_cancel() {
 249   return _page_allocator.uncommit_cancel();
 250 }
 251 
 252 void ZHeap::flip_to_marked() {
 253   ZVerifyViewsFlip flip(&_page_allocator);
 254   ZAddress::flip_to_marked();
 255 }
 256 
 257 void ZHeap::flip_to_remapped() {
 258   ZVerifyViewsFlip flip(&_page_allocator);
 259   ZAddress::flip_to_remapped();
 260 }
 261 
 262 void ZHeap::mark_start() {
 263   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 264 
 265   // Update statistics
 266   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 267 
 268   // Flip address view
 269   flip_to_marked();


< prev index next >