< prev index next >

src/hotspot/share/gc/z/zCollectedHeap.cpp

Print this page




  31 #include "gc/z/zServiceability.hpp"
  32 #include "gc/z/zStat.hpp"
  33 #include "gc/z/zUtils.inline.hpp"
  34 #include "runtime/mutexLocker.hpp"
  35 
  36 ZCollectedHeap* ZCollectedHeap::heap() {
  37   CollectedHeap* heap = Universe::heap();
  38   assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
  39   assert(heap->kind() == CollectedHeap::Z, "Invalid name");
  40   return (ZCollectedHeap*)heap;
  41 }
  42 
  43 ZCollectedHeap::ZCollectedHeap(ZCollectorPolicy* policy) :
  44     _collector_policy(policy),
  45     _soft_ref_policy(),
  46     _barrier_set(),
  47     _initialize(&_barrier_set),
  48     _heap(),
  49     _director(new ZDirector()),
  50     _driver(new ZDriver()),

  51     _stat(new ZStat()),
  52     _runtime_workers() {}
  53 
  54 CollectedHeap::Name ZCollectedHeap::kind() const {
  55   return CollectedHeap::Z;
  56 }
  57 
  58 const char* ZCollectedHeap::name() const {
  59   return ZName;
  60 }
  61 
  62 jint ZCollectedHeap::initialize() {
  63   if (!_heap.is_initialized()) {
  64     return JNI_ENOMEM;
  65   }
  66 
  67   initialize_reserved_region((HeapWord*)ZAddressReservedStart,
  68                              (HeapWord*)ZAddressReservedEnd);
  69 
  70   return JNI_OK;
  71 }
  72 
  73 void ZCollectedHeap::initialize_serviceability() {
  74   _heap.serviceability_initialize();
  75 }
  76 
  77 void ZCollectedHeap::stop() {
  78   _director->stop();
  79   _driver->stop();

  80   _stat->stop();
  81 }
  82 
  83 CollectorPolicy* ZCollectedHeap::collector_policy() const {
  84   return _collector_policy;
  85 }
  86 
  87 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
  88   return &_soft_ref_policy;
  89 }
  90 
  91 size_t ZCollectedHeap::max_capacity() const {
  92   return _heap.max_capacity();
  93 }
  94 
  95 size_t ZCollectedHeap::capacity() const {
  96   return _heap.capacity();
  97 }
  98 
  99 size_t ZCollectedHeap::used() const {


 259 
 260 void ZCollectedHeap::flush_nmethod(nmethod* nm) {
 261   ZNMethod::flush_nmethod(nm);
 262 }
 263 
 264 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
 265   // Does nothing
 266 }
 267 
 268 WorkGang* ZCollectedHeap::get_safepoint_workers() {
 269   return _runtime_workers.workers();
 270 }
 271 
 272 jlong ZCollectedHeap::millis_since_last_gc() {
 273   return ZStatCycle::time_since_last() / MILLIUNITS;
 274 }
 275 
 276 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
 277   tc->do_thread(_director);
 278   tc->do_thread(_driver);

 279   tc->do_thread(_stat);
 280   _heap.worker_threads_do(tc);
 281   _runtime_workers.threads_do(tc);
 282 }
 283 
 284 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
 285   const size_t capacity_in_words = capacity() / HeapWordSize;
 286   const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
 287   return VirtualSpaceSummary(reserved_region().start(),
 288                              reserved_region().start() + capacity_in_words,
 289                              reserved_region().start() + max_capacity_in_words);
 290 }
 291 
 292 void ZCollectedHeap::safepoint_synchronize_begin() {
 293   SuspendibleThreadSet::synchronize();
 294 }
 295 
 296 void ZCollectedHeap::safepoint_synchronize_end() {
 297   SuspendibleThreadSet::desynchronize();
 298 }


 317   st->print_cr( "     GlobalSeqNum:      %u", ZGlobalSeqNum);
 318   st->print_cr( "     Offset Max:        " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
 319   st->print_cr( "     Page Size Small:   " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
 320   st->print_cr( "     Page Size Medium:  " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
 321   st->print_cr( "Metadata Bits");
 322   st->print_cr( "     Good:              " PTR_FORMAT, ZAddressGoodMask);
 323   st->print_cr( "     Bad:               " PTR_FORMAT, ZAddressBadMask);
 324   st->print_cr( "     WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
 325   st->print_cr( "     Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
 326   st->print_cr( "     Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
 327 }
 328 
 329 void ZCollectedHeap::print_extended_on(outputStream* st) const {
 330   _heap.print_extended_on(st);
 331 }
 332 
 333 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
 334   _director->print_on(st);
 335   st->cr();
 336   _driver->print_on(st);


 337   st->cr();
 338   _stat->print_on(st);
 339   st->cr();
 340   _heap.print_worker_threads_on(st);
 341   _runtime_workers.print_threads_on(st);
 342 }
 343 
 344 void ZCollectedHeap::print_tracing_info() const {
 345   // Does nothing
 346 }
 347 
 348 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
 349   _heap.verify();
 350 }
 351 
 352 bool ZCollectedHeap::is_oop(oop object) const {
 353   return CollectedHeap::is_oop(object) && _heap.is_oop(object);
 354 }


  31 #include "gc/z/zServiceability.hpp"
  32 #include "gc/z/zStat.hpp"
  33 #include "gc/z/zUtils.inline.hpp"
  34 #include "runtime/mutexLocker.hpp"
  35 
  36 ZCollectedHeap* ZCollectedHeap::heap() {
  37   CollectedHeap* heap = Universe::heap();
  38   assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
  39   assert(heap->kind() == CollectedHeap::Z, "Invalid name");
  40   return (ZCollectedHeap*)heap;
  41 }
  42 
  43 ZCollectedHeap::ZCollectedHeap(ZCollectorPolicy* policy) :
  44     _collector_policy(policy),
  45     _soft_ref_policy(),
  46     _barrier_set(),
  47     _initialize(&_barrier_set),
  48     _heap(),
  49     _director(new ZDirector()),
  50     _driver(new ZDriver()),
  51     _uncommitter(new ZUncommitter()),
  52     _stat(new ZStat()),
  53     _runtime_workers() {}
  54 
  55 CollectedHeap::Name ZCollectedHeap::kind() const {
  56   return CollectedHeap::Z;
  57 }
  58 
  59 const char* ZCollectedHeap::name() const {
  60   return ZName;
  61 }
  62 
  63 jint ZCollectedHeap::initialize() {
  64   if (!_heap.is_initialized()) {
  65     return JNI_ENOMEM;
  66   }
  67 
  68   initialize_reserved_region((HeapWord*)ZAddressReservedStart,
  69                              (HeapWord*)ZAddressReservedEnd);
  70 
  71   return JNI_OK;
  72 }
  73 
  74 void ZCollectedHeap::initialize_serviceability() {
  75   _heap.serviceability_initialize();
  76 }
  77 
  78 void ZCollectedHeap::stop() {
  79   _director->stop();
  80   _driver->stop();
  81   _uncommitter->stop();
  82   _stat->stop();
  83 }
  84 
  85 CollectorPolicy* ZCollectedHeap::collector_policy() const {
  86   return _collector_policy;
  87 }
  88 
  89 SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
  90   return &_soft_ref_policy;
  91 }
  92 
  93 size_t ZCollectedHeap::max_capacity() const {
  94   return _heap.max_capacity();
  95 }
  96 
  97 size_t ZCollectedHeap::capacity() const {
  98   return _heap.capacity();
  99 }
 100 
 101 size_t ZCollectedHeap::used() const {


 261 
 262 void ZCollectedHeap::flush_nmethod(nmethod* nm) {
 263   ZNMethod::flush_nmethod(nm);
 264 }
 265 
 266 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
 267   // Does nothing
 268 }
 269 
 270 WorkGang* ZCollectedHeap::get_safepoint_workers() {
 271   return _runtime_workers.workers();
 272 }
 273 
 274 jlong ZCollectedHeap::millis_since_last_gc() {
 275   return ZStatCycle::time_since_last() / MILLIUNITS;
 276 }
 277 
 278 void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
 279   tc->do_thread(_director);
 280   tc->do_thread(_driver);
 281   tc->do_thread(_uncommitter);
 282   tc->do_thread(_stat);
 283   _heap.worker_threads_do(tc);
 284   _runtime_workers.threads_do(tc);
 285 }
 286 
 287 VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
 288   const size_t capacity_in_words = capacity() / HeapWordSize;
 289   const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
 290   return VirtualSpaceSummary(reserved_region().start(),
 291                              reserved_region().start() + capacity_in_words,
 292                              reserved_region().start() + max_capacity_in_words);
 293 }
 294 
 295 void ZCollectedHeap::safepoint_synchronize_begin() {
 296   SuspendibleThreadSet::synchronize();
 297 }
 298 
 299 void ZCollectedHeap::safepoint_synchronize_end() {
 300   SuspendibleThreadSet::desynchronize();
 301 }


 320   st->print_cr( "     GlobalSeqNum:      %u", ZGlobalSeqNum);
 321   st->print_cr( "     Offset Max:        " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
 322   st->print_cr( "     Page Size Small:   " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
 323   st->print_cr( "     Page Size Medium:  " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
 324   st->print_cr( "Metadata Bits");
 325   st->print_cr( "     Good:              " PTR_FORMAT, ZAddressGoodMask);
 326   st->print_cr( "     Bad:               " PTR_FORMAT, ZAddressBadMask);
 327   st->print_cr( "     WeakBad:           " PTR_FORMAT, ZAddressWeakBadMask);
 328   st->print_cr( "     Marked:            " PTR_FORMAT, ZAddressMetadataMarked);
 329   st->print_cr( "     Remapped:          " PTR_FORMAT, ZAddressMetadataRemapped);
 330 }
 331 
 332 void ZCollectedHeap::print_extended_on(outputStream* st) const {
 333   _heap.print_extended_on(st);
 334 }
 335 
 336 void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
 337   _director->print_on(st);
 338   st->cr();
 339   _driver->print_on(st);
 340   st->cr();
 341   _uncommitter->print_on(st);
 342   st->cr();
 343   _stat->print_on(st);
 344   st->cr();
 345   _heap.print_worker_threads_on(st);
 346   _runtime_workers.print_threads_on(st);
 347 }
 348 
 349 void ZCollectedHeap::print_tracing_info() const {
 350   // Does nothing
 351 }
 352 
 353 void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
 354   _heap.verify();
 355 }
 356 
 357 bool ZCollectedHeap::is_oop(oop object) const {
 358   return CollectedHeap::is_oop(object) && _heap.is_oop(object);
 359 }
< prev index next >