< prev index next >

src/hotspot/share/gc/z/zHeap.cpp

Print this page




  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/locationPrinter.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zHeapIterator.hpp"
  30 #include "gc/z/zMark.inline.hpp"
  31 #include "gc/z/zPage.inline.hpp"
  32 #include "gc/z/zPageTable.inline.hpp"
  33 #include "gc/z/zRelocationSet.inline.hpp"
  34 #include "gc/z/zRelocationSetSelector.hpp"
  35 #include "gc/z/zResurrection.hpp"
  36 #include "gc/z/zStat.hpp"
  37 #include "gc/z/zThread.inline.hpp"
  38 #include "gc/z/zVerify.hpp"
  39 #include "gc/z/zWorkers.inline.hpp"
  40 #include "logging/log.hpp"
  41 #include "memory/iterator.hpp"
  42 #include "memory/resourceArea.hpp"

  43 #include "runtime/safepoint.hpp"
  44 #include "runtime/thread.hpp"
  45 #include "utilities/debug.hpp"
  46 
  47 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  48 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  49 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  50 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  51 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  52 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  53 
  54 ZHeap* ZHeap::_heap = NULL;
  55 
  56 ZHeap::ZHeap() :
  57     _workers(),
  58     _object_allocator(),
  59     _page_allocator(heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
  60     _page_table(),
  61     _forwarding_table(),
  62     _mark(&_workers, &_page_table),


 298     // Marking not completed, continue concurrent mark
 299     return false;
 300   }
 301 
 302   // Enter mark completed phase
 303   ZGlobalPhase = ZPhaseMarkCompleted;
 304 
 305   // Verify after mark
 306   ZVerify::after_mark();
 307 
 308   // Update statistics
 309   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 310   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 311 
 312   // Block resurrection of weak/phantom references
 313   ZResurrection::block();
 314 
 315   // Process weak roots
 316   _weak_roots_processor.process_weak_roots();
 317 
 318   // Prepare to unload unused classes and code
 319   _unload.prepare();
 320 
 321   return true;
 322 }
 323 
 324 void ZHeap::set_soft_reference_policy(bool clear) {
 325   _reference_processor.set_soft_reference_policy(clear);
 326 }
 327 





 328 void ZHeap::process_non_strong_references() {
 329   // Process Soft/Weak/Final/PhantomReferences
 330   _reference_processor.process_references();
 331 
 332   // Process concurrent weak roots
 333   _weak_roots_processor.process_concurrent_weak_roots();
 334 
 335   // Unload unused classes and code
 336   _unload.unload();














 337 
 338   // Unblock resurrection of weak/phantom references
 339   ZResurrection::unblock();
 340 
 341   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
 342   // must be done after unblocking resurrection. Otherwise the
 343   // Finalizer thread could call Reference.get() on the Finalizers
 344   // that were just enqueued, which would incorrectly return null
 345   // during the resurrection block window, since such referents
 346   // are only Finalizable marked.
 347   _reference_processor.enqueue_references();
 348 }
 349 
 350 void ZHeap::select_relocation_set() {
 351   // Do not allow pages to be deleted
 352   _page_allocator.enable_deferred_delete();
 353 
 354   // Register relocatable pages with selector
 355   ZRelocationSetSelector selector;
 356   ZPageTableIterator pt_iter(&_page_table);


 388   ZStatRelocation::set_at_select_relocation_set(selector.relocating());
 389   ZStatHeap::set_at_select_relocation_set(selector.live(),
 390                                           selector.garbage(),
 391                                           reclaimed());
 392 }
 393 
 394 void ZHeap::reset_relocation_set() {
 395   // Reset forwarding table
 396   ZRelocationSetIterator iter(&_relocation_set);
 397   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 398     _forwarding_table.remove(forwarding);
 399   }
 400 
 401   // Reset relocation set
 402   _relocation_set.reset();
 403 }
 404 
 405 void ZHeap::relocate_start() {
 406   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 407 
 408   // Finish unloading of classes and code
 409   _unload.finish();
 410 
 411   // Flip address view
 412   flip_to_remapped();
 413 
 414   // Enter relocate phase
 415   ZGlobalPhase = ZPhaseRelocate;
 416 
 417   // Update statistics
 418   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 419   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 420 
 421   // Remap/Relocate roots
 422   _relocate.start();
 423 }
 424 
 425 void ZHeap::relocate() {
 426   // Relocate relocation set
 427   const bool success = _relocate.relocate(&_relocation_set);
 428 




  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/locationPrinter.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zHeapIterator.hpp"
  30 #include "gc/z/zMark.inline.hpp"
  31 #include "gc/z/zPage.inline.hpp"
  32 #include "gc/z/zPageTable.inline.hpp"
  33 #include "gc/z/zRelocationSet.inline.hpp"
  34 #include "gc/z/zRelocationSetSelector.hpp"
  35 #include "gc/z/zResurrection.hpp"
  36 #include "gc/z/zStat.hpp"
  37 #include "gc/z/zThread.inline.hpp"
  38 #include "gc/z/zVerify.hpp"
  39 #include "gc/z/zWorkers.inline.hpp"
  40 #include "logging/log.hpp"
  41 #include "memory/iterator.hpp"
  42 #include "memory/resourceArea.hpp"
  43 #include "runtime/handshake.hpp"
  44 #include "runtime/safepoint.hpp"
  45 #include "runtime/thread.hpp"
  46 #include "utilities/debug.hpp"
  47 
  48 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  49 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  50 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  51 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  52 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  53 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  54 
  55 ZHeap* ZHeap::_heap = NULL;
  56 
  57 ZHeap::ZHeap() :
  58     _workers(),
  59     _object_allocator(),
  60     _page_allocator(heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
  61     _page_table(),
  62     _forwarding_table(),
  63     _mark(&_workers, &_page_table),


 299     // Marking not completed, continue concurrent mark
 300     return false;
 301   }
 302 
 303   // Enter mark completed phase
 304   ZGlobalPhase = ZPhaseMarkCompleted;
 305 
 306   // Verify after mark
 307   ZVerify::after_mark();
 308 
 309   // Update statistics
 310   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 311   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 312 
 313   // Block resurrection of weak/phantom references
 314   ZResurrection::block();
 315 
 316   // Process weak roots
 317   _weak_roots_processor.process_weak_roots();
 318 
 319   // Prepare to unload stale metadata and nmethods
 320   _unload.prepare();
 321 
 322   return true;
 323 }
 324 
 325 void ZHeap::set_soft_reference_policy(bool clear) {
 326   _reference_processor.set_soft_reference_policy(clear);
 327 }
 328 
 329 class ZRendezvousClosure : public ThreadClosure {
 330 public:
 331   virtual void do_thread(Thread* thread) {}
 332 };
 333 
 334 void ZHeap::process_non_strong_references() {
 335   // Process Soft/Weak/Final/PhantomReferences
 336   _reference_processor.process_references();
 337 
 338   // Process concurrent weak roots
 339   _weak_roots_processor.process_concurrent_weak_roots();
 340 
 341   // Unlink stale metadata and nmethods
 342   _unload.unlink();
 343 
 344   // Perform a handshake. This is needed 1) to make sure that stale
 345   // metadata and nmethods are no longer observable. And 2), to
 346   // prevent the race where a mutator first loads an oop, which is
 347   // logically null but not yet cleared. Then this oop gets cleared
 348   // by the reference processor and resurrection is unblocked. At
 349   // this point the mutator could see the unblocked state and pass
 350   // this invalid oop through the normal barrier path, which would
 351   // incorrectly try to mark the oop.
 352   ZRendezvousClosure cl;
 353   Handshake::execute(&cl);
 354 
 355   // Purge stale metadata and nmethods that were unlinked
 356   _unload.purge();
 357 
 358   // Unblock resurrection of weak/phantom references
 359   ZResurrection::unblock();
 360 
 361   // Enqueue Soft/Weak/Final/PhantomReferences. Note that this
 362   // must be done after unblocking resurrection. Otherwise the
 363   // Finalizer thread could call Reference.get() on the Finalizers
 364   // that were just enqueued, which would incorrectly return null
 365   // during the resurrection block window, since such referents
 366   // are only Finalizable marked.
 367   _reference_processor.enqueue_references();
 368 }
 369 
 370 void ZHeap::select_relocation_set() {
 371   // Do not allow pages to be deleted
 372   _page_allocator.enable_deferred_delete();
 373 
 374   // Register relocatable pages with selector
 375   ZRelocationSetSelector selector;
 376   ZPageTableIterator pt_iter(&_page_table);


 408   ZStatRelocation::set_at_select_relocation_set(selector.relocating());
 409   ZStatHeap::set_at_select_relocation_set(selector.live(),
 410                                           selector.garbage(),
 411                                           reclaimed());
 412 }
 413 
 414 void ZHeap::reset_relocation_set() {
 415   // Reset forwarding table
 416   ZRelocationSetIterator iter(&_relocation_set);
 417   for (ZForwarding* forwarding; iter.next(&forwarding);) {
 418     _forwarding_table.remove(forwarding);
 419   }
 420 
 421   // Reset relocation set
 422   _relocation_set.reset();
 423 }
 424 
 425 void ZHeap::relocate_start() {
 426   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 427 
 428   // Finish unloading stale metadata and nmethods
 429   _unload.finish();
 430 
 431   // Flip address view
 432   flip_to_remapped();
 433 
 434   // Enter relocate phase
 435   ZGlobalPhase = ZPhaseRelocate;
 436 
 437   // Update statistics
 438   ZStatSample(ZSamplerHeapUsedBeforeRelocation, used());
 439   ZStatHeap::set_at_relocate_start(capacity(), allocated(), used());
 440 
 441   // Remap/Relocate roots
 442   _relocate.start();
 443 }
 444 
 445 void ZHeap::relocate() {
 446   // Relocate relocation set
 447   const bool success = _relocate.relocate(&_relocation_set);
 448 


< prev index next >