< prev index next >

src/hotspot/share/gc/z/zHeap.cpp

Print this page




 230 void ZHeap::undo_alloc_page(ZPage* page) {
 231   assert(page->is_allocating(), "Invalid page state");
 232 
 233   ZStatInc(ZCounterUndoPageAllocation);
 234   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 235                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 236 
 237   release_page(page, false /* reclaimed */);
 238 }
 239 
 240 bool ZHeap::retain_page(ZPage* page) {
 241   return page->inc_refcount();
 242 }
 243 
 244 void ZHeap::release_page(ZPage* page, bool reclaimed) {
 245   if (page->dec_refcount()) {
 246     _page_allocator.free_page(page, reclaimed);
 247   }
 248 }
 249 
 250 void ZHeap::flip_views() {
 251   // For debugging only
 252   if (ZUnmapBadViews) {
 253     // Flip pages






 254     ZPageTableIterator iter(&_pagetable);
 255     for (ZPage* page; iter.next(&page);) {
 256       if (!page->is_detached()) {
 257         _page_allocator.flip_page(page);
 258       }
 259     }
 260 
 261     // Flip pre-mapped memory
 262     _page_allocator.flip_pre_mapped();
 263   }
 264 }
 265 












 266 void ZHeap::mark_start() {
 267   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 268 
 269   // Update statistics
 270   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 271 
 272   // Flip address view
 273   ZAddressMasks::flip_to_marked();
 274   flip_views();
 275 
 276   // Retire allocating pages
 277   _object_allocator.retire_pages();
 278 
 279   // Reset allocated/reclaimed/used statistics
 280   _page_allocator.reset_statistics();
 281 
 282   // Reset encountered/dropped/enqueued statistics
 283   _reference_processor.reset_statistics();
 284 
 285   // Enter mark phase
 286   ZGlobalPhase = ZPhaseMark;
 287 
 288   // Reset marking information and mark roots
 289   _mark.start();
 290 
 291   // Update statistics
 292   ZStatHeap::set_at_mark_start(capacity(), used());
 293 }
 294 


 449 }
 450 
 451 void ZHeap::reset_relocation_set() {
 452   ZRelocationSetIterator iter(&_relocation_set);
 453   for (ZPage* page; iter.next(&page);) {
 454     // Reset relocation information
 455     page->reset_forwarding();
 456 
 457     // Update pagetable
 458     _pagetable.clear_relocating(page);
 459   }
 460 }
 461 
 462 void ZHeap::relocate_start() {
 463   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 464 
 465   // Finish unloading of classes and code
 466   _unload.finish();
 467 
 468   // Flip address view
 469   ZAddressMasks::flip_to_remapped();
 470   flip_views();
 471 
 472   // Enter relocate phase
 473   ZGlobalPhase = ZPhaseRelocate;
 474 
 475   // Update statistics
 476   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 477   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 478 
 479   // Remap/Relocate roots
 480   _relocate.start();
 481 }
 482 
 483 uintptr_t ZHeap::relocate_object(uintptr_t addr) {
 484   assert(ZGlobalPhase == ZPhaseRelocate, "Relocate not allowed");
 485   ZPage* const page = _pagetable.get(addr);
 486   const bool retained = retain_page(page);
 487   const uintptr_t new_addr = page->relocate_object(addr);
 488   if (retained) {
 489     release_page(page, true /* reclaimed */);
 490   }




 230 void ZHeap::undo_alloc_page(ZPage* page) {
 231   assert(page->is_allocating(), "Invalid page state");
 232 
 233   ZStatInc(ZCounterUndoPageAllocation);
 234   log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: " SIZE_FORMAT,
 235                 ZThread::id(), ZThread::name(), p2i(page), page->size());
 236 
 237   release_page(page, false /* reclaimed */);
 238 }
 239 
 240 bool ZHeap::retain_page(ZPage* page) {
 241   return page->inc_refcount();
 242 }
 243 
 244 void ZHeap::release_page(ZPage* page, bool reclaimed) {
 245   if (page->dec_refcount()) {
 246     _page_allocator.free_page(page, reclaimed);
 247   }
 248 }
 249 
 250 void ZHeap::before_flip() {
 251   if (ZVerifyViews) {
 252     // Unmap all pages
 253     _page_allocator.unmap_all_pages();
 254   }
 255 }
 256 
 257 void ZHeap::after_flip() {
 258   if (ZVerifyViews) {
 259     // Map all pages
 260     ZPageTableIterator iter(&_pagetable);
 261     for (ZPage* page; iter.next(&page);) {
 262       if (!page->is_detached()) {
 263         _page_allocator.map_page(page);
 264       }
 265     }



 266   }
 267 }
 268 
 269 void ZHeap::flip_to_marked() {
 270   before_flip();
 271   ZAddressMasks::flip_to_marked();
 272   after_flip();
 273 }
 274 
 275 void ZHeap::flip_to_remapped() {
 276   before_flip();
 277   ZAddressMasks::flip_to_remapped();
 278   after_flip();
 279 }
 280 
 281 void ZHeap::mark_start() {
 282   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 283 
 284   // Update statistics
 285   ZStatSample(ZSamplerHeapUsedBeforeMark, used());
 286 
 287   // Flip address view
 288   flip_to_marked();

 289 
 290   // Retire allocating pages
 291   _object_allocator.retire_pages();
 292 
 293   // Reset allocated/reclaimed/used statistics
 294   _page_allocator.reset_statistics();
 295 
 296   // Reset encountered/dropped/enqueued statistics
 297   _reference_processor.reset_statistics();
 298 
 299   // Enter mark phase
 300   ZGlobalPhase = ZPhaseMark;
 301 
 302   // Reset marking information and mark roots
 303   _mark.start();
 304 
 305   // Update statistics
 306   ZStatHeap::set_at_mark_start(capacity(), used());
 307 }
 308 


 463 }
 464 
 465 void ZHeap::reset_relocation_set() {
 466   ZRelocationSetIterator iter(&_relocation_set);
 467   for (ZPage* page; iter.next(&page);) {
 468     // Reset relocation information
 469     page->reset_forwarding();
 470 
 471     // Update pagetable
 472     _pagetable.clear_relocating(page);
 473   }
 474 }
 475 
 476 void ZHeap::relocate_start() {
 477   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 478 
 479   // Finish unloading of classes and code
 480   _unload.finish();
 481 
 482   // Flip address view
 483   flip_to_remapped();

 484 
 485   // Enter relocate phase
 486   ZGlobalPhase = ZPhaseRelocate;
 487 
 488   // Update statistics
 489   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 490   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 491 
 492   // Remap/Relocate roots
 493   _relocate.start();
 494 }
 495 
 496 uintptr_t ZHeap::relocate_object(uintptr_t addr) {
 497   assert(ZGlobalPhase == ZPhaseRelocate, "Relocate not allowed");
 498   ZPage* const page = _pagetable.get(addr);
 499   const bool retained = retain_page(page);
 500   const uintptr_t new_addr = page->relocate_object(addr);
 501   if (retained) {
 502     release_page(page, true /* reclaimed */);
 503   }


< prev index next >