< prev index next >

src/hotspot/share/gc/z/zHeap.cpp

Print this page




   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"

  25 #include "gc/z/zAddress.hpp"
  26 #include "gc/z/zGlobals.hpp"
  27 #include "gc/z/zHeap.inline.hpp"
  28 #include "gc/z/zHeapIterator.hpp"
  29 #include "gc/z/zList.inline.hpp"
  30 #include "gc/z/zLock.inline.hpp"
  31 #include "gc/z/zMark.inline.hpp"
  32 #include "gc/z/zOopClosures.inline.hpp"
  33 #include "gc/z/zPage.inline.hpp"
  34 #include "gc/z/zPageTable.inline.hpp"
  35 #include "gc/z/zRelocationSet.inline.hpp"
  36 #include "gc/z/zResurrection.hpp"
  37 #include "gc/z/zRootsIterator.hpp"
  38 #include "gc/z/zStat.hpp"
  39 #include "gc/z/zTask.hpp"
  40 #include "gc/z/zThread.hpp"
  41 #include "gc/z/zTracer.inline.hpp"
  42 #include "gc/z/zVirtualMemory.inline.hpp"
  43 #include "gc/z/zWorkers.inline.hpp"
  44 #include "logging/log.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "runtime/safepoint.hpp"
  47 #include "runtime/thread.hpp"
  48 #include "utilities/align.hpp"
  49 #include "utilities/debug.hpp"
  50 
  51 static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  52 static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  53 static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  54 static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  55 static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  56 static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);


  57 
  58 ZHeap* ZHeap::_heap = NULL;
  59 
  60 ZHeap::ZHeap() :
  61     _initialize(),
  62     _workers(),
  63     _object_allocator(_workers.nworkers()),
  64     _page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
  65     _pagetable(),
  66     _mark(&_workers, &_pagetable),
  67     _reference_processor(&_workers),

  68     _relocate(&_workers),
  69     _relocation_set(),
  70     _serviceability(heap_min_size(), heap_max_size()) {
  71   // Install global heap instance
  72   assert(_heap == NULL, "Already initialized");
  73   _heap = this;
  74 
  75   // Update statistics
  76   ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
  77 }
  78 
  79 size_t ZHeap::heap_min_size() const {
  80   const size_t aligned_min_size = align_up(InitialHeapSize, ZPageSizeMin);
  81   return MIN2(aligned_min_size, heap_max_size());
  82 }
  83 
  84 size_t ZHeap::heap_max_size() const {
  85   const size_t aligned_max_size = align_up(MaxHeapSize, ZPageSizeMin);
  86   return MIN2(aligned_max_size, ZAddressOffsetMax);
  87 }


 297 class ZFixupPartialLoadsTask : public ZTask {
 298 private:
 299   ZThreadRootsIterator _thread_roots;
 300 
 301 public:
 302   ZFixupPartialLoadsTask() :
 303       ZTask("ZFixupPartialLoadsTask"),
 304       _thread_roots() {}
 305 
 306   virtual void work() {
 307     ZMarkRootOopClosure cl;
 308     _thread_roots.oops_do(&cl);
 309   }
 310 };
 311 
 312 void ZHeap::fixup_partial_loads() {
 313   ZFixupPartialLoadsTask task;
 314   _workers.run_parallel(&task);
 315 }
 316 
 317 class ZProcessWeakRootsTask : public ZTask {
 318 private:
 319   ZWeakRootsIterator _weak_roots;
 320 
 321 public:
 322   ZProcessWeakRootsTask() :
 323       ZTask("ZProcessWeakRootsTask"),
 324       _weak_roots() {}
 325 
 326   virtual void work() {
 327     ZPhantomIsAliveObjectClosure is_alive;
 328     ZPhantomKeepAliveOopClosure keep_alive;
 329     _weak_roots.unlink_or_oops_do(&is_alive, &keep_alive);
 330   }
 331 };
 332 
 333 void ZHeap::process_weak_roots() {
 334   ZProcessWeakRootsTask task;
 335   _workers.run_parallel(&task);
 336 }
 337 
 338 bool ZHeap::mark_end() {
 339   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 340 
 341   // C2 can generate code where a safepoint poll is inserted
 342   // between a load and the associated load barrier. To handle
 343   // this case we need to rescan the thread stack here to make
 344   // sure such oops are marked.
 345   fixup_partial_loads();
 346 
 347   // Try end marking
 348   if (!_mark.end()) {
 349     // Marking not completed, continue concurrent mark
 350     return false;
 351   }
 352 
 353   // Enter mark completed phase
 354   ZGlobalPhase = ZPhaseMarkCompleted;
 355 
 356   // Resize metaspace
 357   MetaspaceGC::compute_new_size();
 358 
 359   // Update statistics
 360   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 361   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 362 
 363   // Block resurrection of weak/phantom references
 364   ZResurrection::block();
 365 
 366   // Clean weak roots
 367   process_weak_roots();
 368 
 369   // Verification
 370   if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
 371     Universe::verify();
 372   }
 373 
 374   return true;
 375 }
 376 
 377 void ZHeap::set_soft_reference_policy(bool clear) {
 378   _reference_processor.set_soft_reference_policy(clear);
 379 }
 380 
 381 void ZHeap::process_and_enqueue_references() {
 382   // Process and enqueue discovered references

 383   _reference_processor.process_and_enqueue_references();






 384 
 385   // Unblock resurrection of weak/phantom references
 386   ZResurrection::unblock();
 387 }
 388 
 389 void ZHeap::destroy_detached_pages() {
 390   ZList<ZPage> list;
 391 
 392   _page_allocator.flush_detached_pages(&list);
 393 
 394   for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
 395     // Remove pagetable entry
 396     _pagetable.remove(page);
 397 
 398     // Delete the page
 399     _page_allocator.destroy_page(page);
 400   }
 401 }
 402 
 403 void ZHeap::select_relocation_set() {




   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/oopStorage.hpp"
  26 #include "gc/z/zAddress.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeap.inline.hpp"
  29 #include "gc/z/zHeapIterator.hpp"
  30 #include "gc/z/zList.inline.hpp"
  31 #include "gc/z/zLock.inline.hpp"
  32 #include "gc/z/zMark.inline.hpp"
  33 #include "gc/z/zOopClosures.inline.hpp"
  34 #include "gc/z/zPage.inline.hpp"
  35 #include "gc/z/zPageTable.inline.hpp"
  36 #include "gc/z/zRelocationSet.inline.hpp"
  37 #include "gc/z/zResurrection.hpp"
  38 #include "gc/z/zRootsIterator.hpp"
  39 #include "gc/z/zStat.hpp"
  40 #include "gc/z/zTask.hpp"
  41 #include "gc/z/zThread.hpp"
  42 #include "gc/z/zTracer.inline.hpp"
  43 #include "gc/z/zVirtualMemory.inline.hpp"
  44 #include "gc/z/zWorkers.inline.hpp"
  45 #include "logging/log.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "runtime/safepoint.hpp"
  48 #include "runtime/thread.hpp"
  49 #include "utilities/align.hpp"
  50 #include "utilities/debug.hpp"
  51 
  52 static const ZStatSampler  ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
  53 static const ZStatSampler  ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
  54 static const ZStatSampler  ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
  55 static const ZStatSampler  ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
  56 static const ZStatCounter  ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
  57 static const ZStatCounter  ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
  58 static const ZStatSubPhase ZPhaseConcurrentReferencesProcessing("Concurrent References Processing");
  59 static const ZStatSubPhase ZPhaseConcurrentWeakRootsProcessing("Concurrent Weak Roots Processing");
  60 
  61 ZHeap* ZHeap::_heap = NULL;
  62 
  63 ZHeap::ZHeap() :
  64     _initialize(),
  65     _workers(),
  66     _object_allocator(_workers.nworkers()),
  67     _page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
  68     _pagetable(),
  69     _mark(&_workers, &_pagetable),
  70     _reference_processor(&_workers),
  71     _weak_roots_processor(&_workers),
  72     _relocate(&_workers),
  73     _relocation_set(),
  74     _serviceability(heap_min_size(), heap_max_size()) {
  75   // Install global heap instance
  76   assert(_heap == NULL, "Already initialized");
  77   _heap = this;
  78 
  79   // Update statistics
  80   ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
  81 }
  82 
  83 size_t ZHeap::heap_min_size() const {
  84   const size_t aligned_min_size = align_up(InitialHeapSize, ZPageSizeMin);
  85   return MIN2(aligned_min_size, heap_max_size());
  86 }
  87 
  88 size_t ZHeap::heap_max_size() const {
  89   const size_t aligned_max_size = align_up(MaxHeapSize, ZPageSizeMin);
  90   return MIN2(aligned_max_size, ZAddressOffsetMax);
  91 }


 301 class ZFixupPartialLoadsTask : public ZTask {
 302 private:
 303   ZThreadRootsIterator _thread_roots;
 304 
 305 public:
 306   ZFixupPartialLoadsTask() :
 307       ZTask("ZFixupPartialLoadsTask"),
 308       _thread_roots() {}
 309 
 310   virtual void work() {
 311     ZMarkRootOopClosure cl;
 312     _thread_roots.oops_do(&cl);
 313   }
 314 };
 315 
 316 void ZHeap::fixup_partial_loads() {
 317   ZFixupPartialLoadsTask task;
 318   _workers.run_parallel(&task);
 319 }
 320 





















 321 bool ZHeap::mark_end() {
 322   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 323 
 324   // C2 can generate code where a safepoint poll is inserted
 325   // between a load and the associated load barrier. To handle
 326   // this case we need to rescan the thread stack here to make
 327   // sure such oops are marked.
 328   fixup_partial_loads();
 329 
 330   // Try end marking
 331   if (!_mark.end()) {
 332     // Marking not completed, continue concurrent mark
 333     return false;
 334   }
 335 
 336   // Enter mark completed phase
 337   ZGlobalPhase = ZPhaseMarkCompleted;
 338 
 339   // Resize metaspace
 340   MetaspaceGC::compute_new_size();
 341 
 342   // Update statistics
 343   ZStatSample(ZSamplerHeapUsedAfterMark, used());
 344   ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
 345 
 346   // Block resurrection of weak/phantom references
 347   ZResurrection::block();
 348 
 349   // Clean weak roots
 350   _weak_roots_processor.process_weak_roots();
 351 
 352   // Verification
 353   if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
 354     Universe::verify();
 355   }
 356 
 357   return true;
 358 }
 359 
 360 void ZHeap::set_soft_reference_policy(bool clear) {
 361   _reference_processor.set_soft_reference_policy(clear);
 362 }
 363 
 364 void ZHeap::concurrent_weak_processing() {
 365   {
 366     ZStatTimer timer(ZPhaseConcurrentReferencesProcessing);
 367     _reference_processor.process_and_enqueue_references();
 368   }
 369 
 370   {
 371     ZStatTimer timer(ZPhaseConcurrentWeakRootsProcessing);
 372     _weak_roots_processor.process_concurrent_weak_roots();
 373   }
 374 
 375   // Unblock resurrection of weak/phantom references
 376   ZResurrection::unblock();
 377 }
 378 
 379 void ZHeap::destroy_detached_pages() {
 380   ZList<ZPage> list;
 381 
 382   _page_allocator.flush_detached_pages(&list);
 383 
 384   for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
 385     // Remove pagetable entry
 386     _pagetable.remove(page);
 387 
 388     // Delete the page
 389     _page_allocator.destroy_page(page);
 390   }
 391 }
 392 
 393 void ZHeap::select_relocation_set() {


< prev index next >