< prev index next >

src/share/vm/gc/shenandoah/shenandoahHeap.hpp

Print this page
rev 14451 : imported patch bitmap_uncommit.patch
rev 14452 : imported patch onebitmap.patch


 118     CANCELLED,
 119 
 120     // GC has not been cancelled and must not be cancelled. At least
 121     // one worker thread checks for pending safepoint and may suspend
 122     // if a safepoint is pending.
 123     NOT_CANCELLED
 124 
 125   };
 126 
 127 public:
 128   enum ShenandoahCancelCause {
 129     _oom_evacuation,
 130     _vm_stop,
 131   };
 132 private:
 133   ShenandoahHeapLock _lock;
 134   ShenandoahCollectorPolicy* _shenandoah_policy;
 135   size_t _bitmap_size;
 136   size_t _bitmap_words_per_region;
 137   MemRegion _heap_region;
 138   MemRegion _bitmap0_region;
 139   MemRegion _bitmap1_region;
 140 
 141   // Sortable array of regions
 142   ShenandoahHeapRegionSet* _ordered_regions;
 143   ShenandoahFreeSet* _free_regions;
 144   ShenandoahCollectionSet* _collection_set;
 145 
 146   ShenandoahConcurrentMark* _scm;
 147   ShenandoahPartialGC* _partial_gc;
 148   ShenandoahVerifier*  _verifier;
 149 
 150   ShenandoahConcurrentThread* _concurrent_gc_thread;
 151 
 152   ShenandoahMonitoringSupport* _monitoring_support;
 153 
 154   ShenandoahPhaseTimings*      _phase_timings;
 155   ShenandoahAllocTracker*      _alloc_tracker;
 156 
 157   size_t _num_regions;
 158   size_t _initial_size;
 159 
 160   uint _max_workers;
 161   ShenandoahWorkGang* _workers;
 162   ShenandoahWorkGang* _safepoint_workers;
 163 
 164   volatile size_t _used;
 165   volatile size_t _committed;
 166 
 167   MarkBitMap _verification_bit_map;
 168   MarkBitMap _mark_bit_map0;
 169   MarkBitMap _mark_bit_map1;
 170   MarkBitMap* _complete_mark_bit_map;
 171   MarkBitMap* _next_mark_bit_map;
 172 
 173   HeapWord** _complete_top_at_mark_starts;
 174   HeapWord** _complete_top_at_mark_starts_base;
 175 
 176   HeapWord** _next_top_at_mark_starts;
 177   HeapWord** _next_top_at_mark_starts_base;
 178 
 179   volatile jbyte _cancelled_concgc;
 180 
 181   size_t _bytes_allocated_since_cm;
 182   size_t _bytes_allocated_during_cm;
 183   size_t _allocated_last_gc;
 184   size_t _used_start_gc;
 185 
 186   unsigned int _concurrent_mark_in_progress;
 187 
 188   bool _full_gc_in_progress;
 189   bool _update_refs_in_progress;
 190 
 191   unsigned int _evacuation_in_progress;
 192   bool _need_update_refs;
 193   bool _need_reset_bitmaps;
 194 
 195   ReferenceProcessor* _ref_processor;
 196 
 197   ShenandoahForwardedIsAliveClosure _forwarded_is_alive;
 198   ShenandoahIsAliveClosure _is_alive;
 199 
 200   ConcurrentGCTimer* _gc_timer;
 201 
 202   // See allocate_memory()
 203   volatile jbyte _heap_lock;
 204 
 205   ShenandoahConnectionMatrix* _connection_matrix;
 206 
 207 #ifdef ASSERT
 208   Thread* volatile _heap_lock_owner;
 209   int     _heap_expansion_count;
 210 #endif
 211 
 212 public:
 213   ShenandoahHeap(ShenandoahCollectorPolicy* policy);


 323 
 324   void set_full_gc_in_progress(bool in_progress);
 325   bool is_full_gc_in_progress() const;
 326 
 327   void set_update_refs_in_progress(bool in_progress);
 328   bool is_update_refs_in_progress() const;
 329   static address update_refs_in_progress_addr();
 330 
 331   inline bool need_update_refs() const;
 332   void set_need_update_refs(bool update_refs);
 333 
 334   inline bool region_in_collection_set(size_t region_index) const;
 335 
 336   // Mainly there to avoid accidentally calling the templated
 337   // method below with ShenandoahHeapRegion* which would be *wrong*.
 338   inline bool in_collection_set(ShenandoahHeapRegion* r) const;
 339 
 340   template <class T>
 341   inline bool in_collection_set(T obj) const;
 342 
 343   inline bool allocated_after_next_mark_start(HeapWord* addr) const;
 344   void set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr);
 345   HeapWord* next_top_at_mark_start(HeapWord* region_base);
 346 
 347   inline bool allocated_after_complete_mark_start(HeapWord* addr) const;
 348   void set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr);
 349   HeapWord* complete_top_at_mark_start(HeapWord* region_base);
 350 
 351   // Evacuates object src. Returns the evacuated object if this thread
 352   // succeeded, otherwise rolls back the evacuation and returns the
 353   // evacuated object by the competing thread. 'succeeded' is an out
 354   // param and set to true if this thread succeeded, otherwise to false.
 355   inline oop  evacuate_object(oop src, Thread* thread, bool& evacuated);
 356   inline bool cancelled_concgc() const;
 357   inline bool check_cancelled_concgc_and_yield(bool sts_active = true);
 358   inline bool try_cancel_concgc();
 359   inline void clear_cancelled_concgc();
 360 
 361   ShenandoahHeapRegionSet* regions() const { return _ordered_regions;}
 362   ShenandoahFreeSet* free_regions() const  { return _free_regions; }
 363   ShenandoahCollectionSet* collection_set() const { return _collection_set; }
 364   void clear_free_regions();
 365   void add_free_region(ShenandoahHeapRegion* r);
 366 
 367   ShenandoahConnectionMatrix* connection_matrix() const;
 368 
 369   void increase_used(size_t bytes);
 370   void decrease_used(size_t bytes);
 371 
 372   void set_used(size_t bytes);
 373 
 374   void increase_committed(size_t bytes);
 375   void decrease_committed(size_t bytes);
 376 
 377   void handle_heap_shrinkage();
 378 
 379   size_t garbage();
 380 
 381   void reset_next_mark_bitmap(WorkGang* gang);
 382   void reset_complete_mark_bitmap(WorkGang* gang);





 383 
 384   MarkBitMap* complete_mark_bit_map();
 385   MarkBitMap* next_mark_bit_map();
 386   inline bool is_marked_complete(oop obj) const;
 387   inline bool mark_next(oop obj) const;
 388   inline bool is_marked_next(oop obj) const;
 389   bool is_next_bitmap_clear();
 390   bool is_next_bitmap_clear_range(HeapWord* start, HeapWord* end);
 391   bool is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end);
 392 
 393   bool commit_bitmaps(ShenandoahHeapRegion* r);
 394   bool uncommit_bitmaps(ShenandoahHeapRegion* r);
 395 
 396   template <class T>
 397   inline oop update_oop_ref_not_null(T* p, oop obj);
 398 
 399   template <class T>
 400   inline oop maybe_update_oop_ref_not_null(T* p, oop obj);
 401 
 402   void print_heap_regions_on(outputStream* st) const;
 403 
 404   size_t bytes_allocated_since_cm();
 405   void set_bytes_allocated_since_cm(size_t bytes);
 406 
 407   size_t trash_humongous_region_at(ShenandoahHeapRegion *r);
 408 
 409   ShenandoahMonitoringSupport* monitoring_support();
 410   ShenandoahConcurrentMark* concurrentMark() { return _scm;}
 411   ShenandoahPartialGC* partial_gc();


 437   inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
 438 
 439   template<class T>
 440   inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
 441 
 442 public:
 443   template<class T>
 444   inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
 445 
 446   template<class T>
 447   inline void marked_object_safe_iterate(ShenandoahHeapRegion* region, T* cl);
 448 
 449   template<class T>
 450   inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl);
 451 
 452   template<class T>
 453   inline void marked_object_oop_safe_iterate(ShenandoahHeapRegion* region, T* cl);
 454 
 455   GCTimer* gc_timer() const;
 456 
 457   void swap_mark_bitmaps();
 458 
 459   void cancel_concgc(GCCause::Cause cause);
 460   void cancel_concgc(ShenandoahCancelCause cause);
 461 
 462   ShenandoahHeapLock* lock() { return &_lock; }
 463   void assert_heaplock_owned_by_current_thread() PRODUCT_RETURN;
 464   void assert_heaplock_not_owned_by_current_thread() PRODUCT_RETURN;
 465   void assert_heaplock_or_safepoint() PRODUCT_RETURN;
 466 
 467 public:
 468   typedef enum {
 469     _alloc_shared,      // Allocate common, outside of TLAB
 470     _alloc_shared_gc,   // Allocate common, outside of GCLAB
 471     _alloc_tlab,        // Allocate TLAB
 472     _alloc_gclab,       // Allocate GCLAB
 473     _ALLOC_LIMIT,
 474   } AllocType;
 475 
 476   static const char* alloc_type_to_string(AllocType type) {
 477     switch (type) {
 478       case _alloc_shared:


 485         return "GCLAB";
 486       default:
 487         ShouldNotReachHere();
 488         return "";
 489     }
 490   }
 491 private:
 492   HeapWord* allocate_new_lab(size_t word_size, AllocType type);
 493   HeapWord* allocate_memory_under_lock(size_t word_size, AllocType type, bool &new_region);
 494   HeapWord* allocate_memory(size_t word_size, AllocType type);
 495   // Shenandoah functionality.
 496   inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
 497   HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
 498   HeapWord* allocate_new_gclab(size_t word_size);
 499 
 500   template<class T>
 501   inline void do_marked_object(MarkBitMap* bitmap, T* cl, oop obj);
 502 
 503   ShenandoahConcurrentThread* concurrent_thread() { return _concurrent_gc_thread; }
 504 
 505   inline bool mark_next_no_checks(oop obj) const;
 506 
 507 public:
 508   inline oop atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c);
 509   inline oop atomic_compare_exchange_oop(oop n, oop* addr, oop c);
 510 
 511 private:
 512   void ref_processing_init();
 513 
 514   GCTracer* tracer();
 515 
 516   void set_concurrent_mark_in_progress(bool in_progress);
 517 
 518   void oom_during_evacuation();
 519 
 520   HeapWord* allocate_large_memory(size_t word_size);
 521 
 522   const char* cancel_cause_to_string(ShenandoahCancelCause cause);
 523 
 524 private:
 525   size_t* _recycled_regions;


 118     CANCELLED,
 119 
 120     // GC has not been cancelled and must not be cancelled. At least
 121     // one worker thread checks for pending safepoint and may suspend
 122     // if a safepoint is pending.
 123     NOT_CANCELLED
 124 
 125   };
 126 
 127 public:
 128   enum ShenandoahCancelCause {
 129     _oom_evacuation,
 130     _vm_stop,
 131   };
 132 private:
 133   ShenandoahHeapLock _lock;
 134   ShenandoahCollectorPolicy* _shenandoah_policy;
 135   size_t _bitmap_size;
 136   size_t _bitmap_words_per_region;
 137   MemRegion _heap_region;
 138   MemRegion _bitmap_region;

 139 
 140   // Sortable array of regions
 141   ShenandoahHeapRegionSet* _ordered_regions;
 142   ShenandoahFreeSet* _free_regions;
 143   ShenandoahCollectionSet* _collection_set;
 144 
 145   ShenandoahConcurrentMark* _scm;
 146   ShenandoahPartialGC* _partial_gc;
 147   ShenandoahVerifier*  _verifier;
 148 
 149   ShenandoahConcurrentThread* _concurrent_gc_thread;
 150 
 151   ShenandoahMonitoringSupport* _monitoring_support;
 152 
 153   ShenandoahPhaseTimings*      _phase_timings;
 154   ShenandoahAllocTracker*      _alloc_tracker;
 155 
 156   size_t _num_regions;
 157   size_t _initial_size;
 158 
 159   uint _max_workers;
 160   ShenandoahWorkGang* _workers;
 161   ShenandoahWorkGang* _safepoint_workers;
 162 
 163   volatile size_t _used;
 164   volatile size_t _committed;
 165 
 166   MarkBitMap _verification_bit_map;
 167   MarkBitMap __mark_bit_map;
 168   MarkBitMap* _mark_bit_map;
 169   bool _bitmap_valid;

 170 
 171   HeapWord** _top_at_mark_starts;
 172   HeapWord** _top_at_mark_starts_base;



 173 
 174   volatile jbyte _cancelled_concgc;
 175 
 176   size_t _bytes_allocated_since_cm;
 177   size_t _bytes_allocated_during_cm;
 178   size_t _allocated_last_gc;
 179   size_t _used_start_gc;
 180 
 181   unsigned int _concurrent_mark_in_progress;
 182 
 183   bool _full_gc_in_progress;
 184   bool _update_refs_in_progress;
 185 
 186   unsigned int _evacuation_in_progress;
 187   bool _need_update_refs;
 188   bool _need_reset_bitmap;
 189 
 190   ReferenceProcessor* _ref_processor;
 191 
 192   ShenandoahForwardedIsAliveClosure _forwarded_is_alive;
 193   ShenandoahIsAliveClosure _is_alive;
 194 
 195   ConcurrentGCTimer* _gc_timer;
 196 
 197   // See allocate_memory()
 198   volatile jbyte _heap_lock;
 199 
 200   ShenandoahConnectionMatrix* _connection_matrix;
 201 
 202 #ifdef ASSERT
 203   Thread* volatile _heap_lock_owner;
 204   int     _heap_expansion_count;
 205 #endif
 206 
 207 public:
 208   ShenandoahHeap(ShenandoahCollectorPolicy* policy);


 318 
 319   void set_full_gc_in_progress(bool in_progress);
 320   bool is_full_gc_in_progress() const;
 321 
 322   void set_update_refs_in_progress(bool in_progress);
 323   bool is_update_refs_in_progress() const;
 324   static address update_refs_in_progress_addr();
 325 
 326   inline bool need_update_refs() const;
 327   void set_need_update_refs(bool update_refs);
 328 
 329   inline bool region_in_collection_set(size_t region_index) const;
 330 
 331   // Mainly there to avoid accidentally calling the templated
 332   // method below with ShenandoahHeapRegion* which would be *wrong*.
 333   inline bool in_collection_set(ShenandoahHeapRegion* r) const;
 334 
 335   template <class T>
 336   inline bool in_collection_set(T obj) const;
 337 
 338   inline bool allocated_after_mark_start(HeapWord* addr) const;
 339   void set_top_at_mark_start(HeapWord* region_base, HeapWord* addr);
 340   HeapWord* top_at_mark_start(HeapWord* region_base);




 341 
 342   // Evacuates object src. Returns the evacuated object if this thread
 343   // succeeded, otherwise rolls back the evacuation and returns the
 344   // evacuated object by the competing thread. 'succeeded' is an out
 345   // param and set to true if this thread succeeded, otherwise to false.
 346   inline oop  evacuate_object(oop src, Thread* thread, bool& evacuated);
 347   inline bool cancelled_concgc() const;
 348   inline bool check_cancelled_concgc_and_yield(bool sts_active = true);
 349   inline bool try_cancel_concgc();
 350   inline void clear_cancelled_concgc();
 351 
 352   ShenandoahHeapRegionSet* regions() const { return _ordered_regions;}
 353   ShenandoahFreeSet* free_regions() const  { return _free_regions; }
 354   ShenandoahCollectionSet* collection_set() const { return _collection_set; }
 355   void clear_free_regions();
 356   void add_free_region(ShenandoahHeapRegion* r);
 357 
 358   ShenandoahConnectionMatrix* connection_matrix() const;
 359 
 360   void increase_used(size_t bytes);
 361   void decrease_used(size_t bytes);
 362 
 363   void set_used(size_t bytes);
 364 
 365   void increase_committed(size_t bytes);
 366   void decrease_committed(size_t bytes);
 367 
 368   void handle_heap_shrinkage();
 369 
 370   size_t garbage();
 371 
 372   void reset_mark_bitmap(WorkGang* gang);
 373 
 374   MarkBitMap* mark_bit_map();
 375   inline bool is_marked(oop obj) const;
 376   inline bool mark(oop obj) const;
 377   bool is_bitmap_clear();
 378   bool is_bitmap_clear_range(HeapWord* start, HeapWord* end);
 379 
 380   bool is_bitmap_valid() const { return _bitmap_valid; }
 381   void set_bitmap_valid(bool valid) { _bitmap_valid = valid; }






 382 
 383   bool commit_bitmaps(ShenandoahHeapRegion* r);
 384   bool uncommit_bitmaps(ShenandoahHeapRegion* r);
 385 
 386   template <class T>
 387   inline oop update_oop_ref_not_null(T* p, oop obj);
 388 
 389   template <class T>
 390   inline oop maybe_update_oop_ref_not_null(T* p, oop obj);
 391 
 392   void print_heap_regions_on(outputStream* st) const;
 393 
 394   size_t bytes_allocated_since_cm();
 395   void set_bytes_allocated_since_cm(size_t bytes);
 396 
 397   size_t trash_humongous_region_at(ShenandoahHeapRegion *r);
 398 
 399   ShenandoahMonitoringSupport* monitoring_support();
 400   ShenandoahConcurrentMark* concurrentMark() { return _scm;}
 401   ShenandoahPartialGC* partial_gc();


 427   inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
 428 
 429   template<class T>
 430   inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
 431 
 432 public:
 433   template<class T>
 434   inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
 435 
 436   template<class T>
 437   inline void marked_object_safe_iterate(ShenandoahHeapRegion* region, T* cl);
 438 
 439   template<class T>
 440   inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl);
 441 
 442   template<class T>
 443   inline void marked_object_oop_safe_iterate(ShenandoahHeapRegion* region, T* cl);
 444 
 445   GCTimer* gc_timer() const;
 446 


 447   void cancel_concgc(GCCause::Cause cause);
 448   void cancel_concgc(ShenandoahCancelCause cause);
 449 
 450   ShenandoahHeapLock* lock() { return &_lock; }
 451   void assert_heaplock_owned_by_current_thread() PRODUCT_RETURN;
 452   void assert_heaplock_not_owned_by_current_thread() PRODUCT_RETURN;
 453   void assert_heaplock_or_safepoint() PRODUCT_RETURN;
 454 
 455 public:
 456   typedef enum {
 457     _alloc_shared,      // Allocate common, outside of TLAB
 458     _alloc_shared_gc,   // Allocate common, outside of GCLAB
 459     _alloc_tlab,        // Allocate TLAB
 460     _alloc_gclab,       // Allocate GCLAB
 461     _ALLOC_LIMIT,
 462   } AllocType;
 463 
 464   static const char* alloc_type_to_string(AllocType type) {
 465     switch (type) {
 466       case _alloc_shared:


 473         return "GCLAB";
 474       default:
 475         ShouldNotReachHere();
 476         return "";
 477     }
 478   }
 479 private:
 480   HeapWord* allocate_new_lab(size_t word_size, AllocType type);
 481   HeapWord* allocate_memory_under_lock(size_t word_size, AllocType type, bool &new_region);
 482   HeapWord* allocate_memory(size_t word_size, AllocType type);
 483   // Shenandoah functionality.
 484   inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
 485   HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
 486   HeapWord* allocate_new_gclab(size_t word_size);
 487 
 488   template<class T>
 489   inline void do_marked_object(MarkBitMap* bitmap, T* cl, oop obj);
 490 
 491   ShenandoahConcurrentThread* concurrent_thread() { return _concurrent_gc_thread; }
 492 
 493   inline bool mark_no_checks(oop obj) const;
 494 
 495 public:
 496   inline oop atomic_compare_exchange_oop(oop n, narrowOop* addr, oop c);
 497   inline oop atomic_compare_exchange_oop(oop n, oop* addr, oop c);
 498 
 499 private:
 500   void ref_processing_init();
 501 
 502   GCTracer* tracer();
 503 
 504   void set_concurrent_mark_in_progress(bool in_progress);
 505 
 506   void oom_during_evacuation();
 507 
 508   HeapWord* allocate_large_memory(size_t word_size);
 509 
 510   const char* cancel_cause_to_string(ShenandoahCancelCause cause);
 511 
 512 private:
 513   size_t* _recycled_regions;
< prev index next >