< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page




 584   MutexLocker ml(Threads_lock);
 585 
 586   ShenandoahInitWorkerGCLABClosure init_gclabs;
 587   _workers->threads_do(&init_gclabs);
 588 
 589   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 590   // Now, we will let WorkGang to initialize gclab when new worker is created.
 591   _workers->set_initialize_gclab();
 592 
 593   _scm->initialize(_max_workers);
 594   _full_gc->initialize(_gc_timer);
 595 
 596   ref_processing_init();
 597 
 598   _heuristics->initialize();
 599 
 600   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 601 }
 602 
 603 size_t ShenandoahHeap::used() const {
 604   return OrderAccess::load_acquire(&_used);
 605 }
 606 
 607 size_t ShenandoahHeap::committed() const {
 608   OrderAccess::acquire();
 609   return _committed;
 610 }
 611 
 612 void ShenandoahHeap::increase_committed(size_t bytes) {
 613   assert_heaplock_or_safepoint();
 614   _committed += bytes;
 615 }
 616 
 617 void ShenandoahHeap::decrease_committed(size_t bytes) {
 618   assert_heaplock_or_safepoint();
 619   _committed -= bytes;
 620 }
 621 
 622 void ShenandoahHeap::increase_used(size_t bytes) {
 623   Atomic::add(bytes, &_used);
 624 }
 625 
 626 void ShenandoahHeap::set_used(size_t bytes) {
 627   OrderAccess::release_store_fence(&_used, bytes);
 628 }
 629 
 630 void ShenandoahHeap::decrease_used(size_t bytes) {
 631   assert(used() >= bytes, "never decrease heap size by more than we've left");
 632   Atomic::sub(bytes, &_used);
 633 }
 634 
 635 void ShenandoahHeap::increase_allocated(size_t bytes) {
 636   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 637 }
 638 
 639 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 640   size_t bytes = words * HeapWordSize;
 641   if (!waste) {
 642     increase_used(bytes);
 643   }
 644   increase_allocated(bytes);
 645   if (ShenandoahPacing) {
 646     control_thread()->pacing_notify_alloc(words);
 647     if (waste) {


2097 
2098 bool ShenandoahHeap::unload_classes() const {
2099   return _unload_classes.is_set();
2100 }
2101 
2102 address ShenandoahHeap::in_cset_fast_test_addr() {
2103   ShenandoahHeap* heap = ShenandoahHeap::heap();
2104   assert(heap->collection_set() != NULL, "Sanity");
2105   return (address) heap->collection_set()->biased_map_address();
2106 }
2107 
2108 address ShenandoahHeap::cancelled_gc_addr() {
2109   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
2110 }
2111 
2112 address ShenandoahHeap::gc_state_addr() {
2113   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2114 }
2115 
2116 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2117   return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2118 }
2119 
2120 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2121   OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2122 }
2123 
2124 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2125   _degenerated_gc_in_progress.set_cond(in_progress);
2126 }
2127 
2128 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2129   _full_gc_in_progress.set_cond(in_progress);
2130 }
2131 
2132 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2133   assert (is_full_gc_in_progress(), "should be");
2134   _full_gc_move_in_progress.set_cond(in_progress);
2135 }
2136 
2137 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2138   set_gc_state_mask(UPDATEREFS, in_progress);
2139 }
2140 
2141 void ShenandoahHeap::register_nmethod(nmethod* nm) {




 584   MutexLocker ml(Threads_lock);
 585 
 586   ShenandoahInitWorkerGCLABClosure init_gclabs;
 587   _workers->threads_do(&init_gclabs);
 588 
 589   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 590   // Now, we will let WorkGang to initialize gclab when new worker is created.
 591   _workers->set_initialize_gclab();
 592 
 593   _scm->initialize(_max_workers);
 594   _full_gc->initialize(_gc_timer);
 595 
 596   ref_processing_init();
 597 
 598   _heuristics->initialize();
 599 
 600   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 601 }
 602 
 603 size_t ShenandoahHeap::used() const {
 604   return Atomic::load_acquire(&_used);
 605 }
 606 
 607 size_t ShenandoahHeap::committed() const {
 608   OrderAccess::acquire();
 609   return _committed;
 610 }
 611 
 612 void ShenandoahHeap::increase_committed(size_t bytes) {
 613   assert_heaplock_or_safepoint();
 614   _committed += bytes;
 615 }
 616 
 617 void ShenandoahHeap::decrease_committed(size_t bytes) {
 618   assert_heaplock_or_safepoint();
 619   _committed -= bytes;
 620 }
 621 
 622 void ShenandoahHeap::increase_used(size_t bytes) {
 623   Atomic::add(bytes, &_used);
 624 }
 625 
 626 void ShenandoahHeap::set_used(size_t bytes) {
 627   Atomic::release_store_fence(&_used, bytes);
 628 }
 629 
 630 void ShenandoahHeap::decrease_used(size_t bytes) {
 631   assert(used() >= bytes, "never decrease heap size by more than we've left");
 632   Atomic::sub(bytes, &_used);
 633 }
 634 
 635 void ShenandoahHeap::increase_allocated(size_t bytes) {
 636   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 637 }
 638 
 639 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 640   size_t bytes = words * HeapWordSize;
 641   if (!waste) {
 642     increase_used(bytes);
 643   }
 644   increase_allocated(bytes);
 645   if (ShenandoahPacing) {
 646     control_thread()->pacing_notify_alloc(words);
 647     if (waste) {


2097 
2098 bool ShenandoahHeap::unload_classes() const {
2099   return _unload_classes.is_set();
2100 }
2101 
2102 address ShenandoahHeap::in_cset_fast_test_addr() {
2103   ShenandoahHeap* heap = ShenandoahHeap::heap();
2104   assert(heap->collection_set() != NULL, "Sanity");
2105   return (address) heap->collection_set()->biased_map_address();
2106 }
2107 
2108 address ShenandoahHeap::cancelled_gc_addr() {
2109   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
2110 }
2111 
2112 address ShenandoahHeap::gc_state_addr() {
2113   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2114 }
2115 
2116 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2117   return Atomic::load_acquire(&_bytes_allocated_since_gc_start);
2118 }
2119 
2120 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2121   Atomic::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2122 }
2123 
2124 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2125   _degenerated_gc_in_progress.set_cond(in_progress);
2126 }
2127 
2128 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2129   _full_gc_in_progress.set_cond(in_progress);
2130 }
2131 
2132 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2133   assert (is_full_gc_in_progress(), "should be");
2134   _full_gc_move_in_progress.set_cond(in_progress);
2135 }
2136 
2137 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2138   set_gc_state_mask(UPDATEREFS, in_progress);
2139 }
2140 
2141 void ShenandoahHeap::register_nmethod(nmethod* nm) {


< prev index next >