< prev index next >

src/share/vm/gc_implementation/shenandoah/shenandoahHeap.hpp

Print this page
rev 11461 : [backport] 8226757: Shenandoah: Make traversal and passive modes explicit
rev 11463 : Backport Traversal GC


  32 #include "gc_implementation/shenandoah/shenandoahSharedVariables.hpp"
  33 
  34 class ConcurrentGCTimer;
  35 
  36 class ShenandoahAllocTracker;
  37 class ShenandoahCollectionSet;
  38 class ShenandoahCollectorPolicy;
  39 class ShenandoahConcurrentMark;
  40 class ShenandoahControlThread;
  41 class ShenandoahGCSession;
  42 class ShenandoahFreeSet;
  43 class ShenandoahHeapRegion;
  44 class ShenandoahHeapRegionClosure;
  45 class ShenandoahMarkCompact;
  46 class ShenandoahMonitoringSupport;
  47 class ShenandoahHeuristics;
  48 class ShenandoahMarkingContext;
  49 class ShenandoahMode;
  50 class ShenandoahPhaseTimings;
  51 class ShenandoahPacer;

  52 class ShenandoahVerifier;
  53 class ShenandoahWorkGang;
  54 class VMStructs;
  55 
  56 class ShenandoahRegionIterator : public StackObj {
  57 private:
  58   ShenandoahHeap* _heap;
  59 
  60   char _pad0[DEFAULT_CACHE_LINE_SIZE];
  61   volatile jint _index;
  62   char _pad1[DEFAULT_CACHE_LINE_SIZE];
  63 
  64   // No implicit copying: iterators should be passed by reference to capture the state
  65   ShenandoahRegionIterator(const ShenandoahRegionIterator& that);
  66   ShenandoahRegionIterator& operator=(const ShenandoahRegionIterator& o);
  67 
  68 public:
  69   ShenandoahRegionIterator();
  70   ShenandoahRegionIterator(ShenandoahHeap* heap);
  71 


 218 //
 219 // GC state describes the important parts of collector state, that may be
 220 // used to make barrier selection decisions in the native and generated code.
 221 // Multiple bits can be set at once.
 222 //
 223 // Important invariant: when GC state is zero, the heap is stable, and no barriers
 224 // are required.
 225 //
 226 public:
 227   enum GCStateBitPos {
 228     // Heap has forwarded objects: need RB, ACMP, CAS barriers.
 229     HAS_FORWARDED_BITPOS   = 0,
 230 
 231     // Heap is under marking: needs SATB barriers.
 232     MARKING_BITPOS    = 1,
 233 
 234     // Heap is under evacuation: needs WB barriers. (Set together with UNSTABLE)
 235     EVACUATION_BITPOS = 2,
 236 
 237     // Heap is under updating: needs SVRB/SVWB barriers.
 238     UPDATEREFS_BITPOS = 3



 239   };
 240 
 241   enum GCState {
 242     STABLE        = 0,
 243     HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
 244     MARKING       = 1 << MARKING_BITPOS,
 245     EVACUATION    = 1 << EVACUATION_BITPOS,
 246     UPDATEREFS    = 1 << UPDATEREFS_BITPOS

 247   };
 248 
 249 private:
 250   ShenandoahSharedBitmap _gc_state;
 251   ShenandoahSharedFlag   _degenerated_gc_in_progress;
 252   ShenandoahSharedFlag   _full_gc_in_progress;
 253   ShenandoahSharedFlag   _full_gc_move_in_progress;
 254   ShenandoahSharedFlag   _progress_last_gc;
 255 
 256   void set_gc_state_mask(uint mask, bool value);
 257 
 258 public:
 259   char gc_state();
 260   static address gc_state_addr();
 261 
 262   void set_concurrent_mark_in_progress(bool in_progress);
 263   void set_evacuation_in_progress(bool in_progress);
 264   void set_update_refs_in_progress(bool in_progress);
 265   void set_degenerated_gc_in_progress(bool in_progress);
 266   void set_full_gc_in_progress(bool in_progress);
 267   void set_full_gc_move_in_progress(bool in_progress);

 268   void set_has_forwarded_objects(bool cond);
 269 
 270   inline bool is_stable() const;
 271   inline bool is_idle() const;
 272   inline bool is_concurrent_mark_in_progress() const;
 273   inline bool is_update_refs_in_progress() const;
 274   inline bool is_evacuation_in_progress() const;
 275   inline bool is_degenerated_gc_in_progress() const;
 276   inline bool is_full_gc_in_progress() const;
 277   inline bool is_full_gc_move_in_progress() const;

 278   inline bool has_forwarded_objects() const;
 279   inline bool is_gc_in_progress_mask(uint mask) const;
 280 
 281 // ---------- GC cancellation and degeneration machinery
 282 //
 283 // Cancelled GC flag is used to notify concurrent phases that they should terminate.
 284 //
 285 public:
 286   enum ShenandoahDegenPoint {
 287     _degenerated_unset,

 288     _degenerated_outside_cycle,
 289     _degenerated_mark,
 290     _degenerated_evac,
 291     _degenerated_updaterefs,
 292     _DEGENERATED_LIMIT
 293   };
 294 
 295   static const char* degen_point_to_string(ShenandoahDegenPoint point) {
 296     switch (point) {
 297       case _degenerated_unset:
 298         return "<UNSET>";


 299       case _degenerated_outside_cycle:
 300         return "Outside of Cycle";
 301       case _degenerated_mark:
 302         return "Mark";
 303       case _degenerated_evac:
 304         return "Evacuation";
 305       case _degenerated_updaterefs:
 306         return "Update Refs";
 307       default:
 308         ShouldNotReachHere();
 309         return "ERROR";
 310     }
 311   };
 312 
 313 private:
 314   ShenandoahSharedFlag _cancelled_gc;
 315   inline bool try_cancel_gc();
 316 
 317 public:
 318   static address cancelled_gc_addr();
 319 
 320   inline bool cancelled_gc() const;
 321 
 322   inline void clear_cancelled_gc();
 323 
 324   void cancel_gc(GCCause::Cause cause);
 325 
 326 // ---------- GC operations entry points
 327 //
 328 public:
 329   // Entry points to STW GC operations, these cause a related safepoint, that then
 330   // call the entry method below
 331   void vmop_entry_init_mark();
 332   void vmop_entry_final_mark();
 333   void vmop_entry_final_evac();
 334   void vmop_entry_init_updaterefs();
 335   void vmop_entry_final_updaterefs();


 336   void vmop_entry_full(GCCause::Cause cause);
 337   void vmop_degenerated(ShenandoahDegenPoint point);
 338 
 339   // Entry methods to normally STW GC operations. These set up logging, monitoring
 340   // and workers for net VM operation
 341   void entry_init_mark();
 342   void entry_final_mark();
 343   void entry_final_evac();
 344   void entry_init_updaterefs();
 345   void entry_final_updaterefs();


 346   void entry_full(GCCause::Cause cause);
 347   void entry_degenerated(int point);
 348 
 349   // Entry methods to normally concurrent GC operations. These set up logging, monitoring
 350   // for concurrent operation.
 351   void entry_reset();
 352   void entry_mark();
 353   void entry_preclean();
 354   void entry_cleanup();
 355   void entry_evac();
 356   void entry_updaterefs();

 357   void entry_uncommit(double shrink_before);
 358 
 359 private:
 360   // Actual work for the phases
 361   void op_init_mark();
 362   void op_final_mark();
 363   void op_final_evac();
 364   void op_init_updaterefs();
 365   void op_final_updaterefs();


 366   void op_full(GCCause::Cause cause);
 367   void op_degenerated(ShenandoahDegenPoint point);
 368   void op_degenerated_fail();
 369   void op_degenerated_futile();
 370 
 371   void op_reset();
 372   void op_mark();
 373   void op_preclean();
 374   void op_cleanup();
 375   void op_conc_evac();
 376   void op_stw_evac();
 377   void op_updaterefs();

 378   void op_uncommit(double shrink_before);
 379 
 380   // Messages for GC trace event, they have to be immortal for
 381   // passing around the logging/tracing systems
 382   const char* init_mark_event_message() const;
 383   const char* final_mark_event_message() const;
 384   const char* conc_mark_event_message() const;
 385   const char* degen_event_message(ShenandoahDegenPoint point) const;
 386 
 387 // ---------- GC subsystems
 388 //
 389 private:
 390   ShenandoahControlThread*   _control_thread;
 391   ShenandoahCollectorPolicy* _shenandoah_policy;
 392   ShenandoahMode*            _gc_mode;
 393   ShenandoahHeuristics*      _heuristics;
 394   ShenandoahFreeSet*         _free_set;
 395   ShenandoahConcurrentMark*  _scm;

 396   ShenandoahMarkCompact*     _full_gc;
 397   ShenandoahPacer*           _pacer;
 398   ShenandoahVerifier*        _verifier;
 399 
 400   ShenandoahAllocTracker*    _alloc_tracker;
 401   ShenandoahPhaseTimings*    _phase_timings;
 402 
 403   ShenandoahControlThread*   control_thread()          { return _control_thread;    }
 404   ShenandoahMarkCompact*     full_gc()                 { return _full_gc;           }
 405 
 406 public:
 407   ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
 408   ShenandoahHeuristics*      heuristics()        const { return _heuristics;        }
 409   ShenandoahFreeSet*         free_set()          const { return _free_set;          }
 410   ShenandoahConcurrentMark*  concurrent_mark()         { return _scm;               }


 411   ShenandoahPacer*           pacer()             const { return _pacer;             }
 412 
 413   ShenandoahPhaseTimings*    phase_timings()     const { return _phase_timings;     }
 414   ShenandoahAllocTracker*    alloc_tracker()     const { return _alloc_tracker;     }
 415 
 416   ShenandoahVerifier*        verifier();
 417 
 418 // ---------- VM subsystem bindings
 419 //
 420 private:
 421   ShenandoahMonitoringSupport* _monitoring_support;
 422   ConcurrentGCTimer* _gc_timer;
 423 
 424 public:
 425   ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; }
 426 
 427   GCTracer* tracer();
 428   GCTimer* gc_timer() const;
 429   CollectorPolicy* collector_policy() const;
 430 


 619 
 620   ShenandoahCollectionSet* collection_set() const { return _collection_set; }
 621 
 622   template <class T>
 623   inline bool in_collection_set(T obj) const;
 624 
 625   // Avoid accidentally calling the method above with ShenandoahHeapRegion*, which would be *wrong*.
 626   inline bool in_collection_set(ShenandoahHeapRegion* r) shenandoah_not_implemented_return(false);
 627 
 628   // Evacuates object src. Returns the evacuated object, either evacuated
 629   // by this thread, or by some other thread.
 630   inline oop  evacuate_object(oop src, Thread* thread);
 631 
 632   // Call before/after evacuation.
 633   void enter_evacuation();
 634   void leave_evacuation();
 635 
 636 // ---------- Helper functions
 637 //
 638 public:



 639   template <class T>
 640   inline oop maybe_update_with_forwarded(T* p);
 641 
 642   template <class T>
 643   inline oop maybe_update_with_forwarded_not_null(T* p, oop obj);
 644 
 645   template <class T>
 646   inline oop update_with_forwarded_not_null(T* p, oop obj);
 647 
 648   static inline oop cas_oop(oop n, narrowOop* addr, oop c);
 649   static inline oop cas_oop(oop n, oop* addr, oop c);
 650 
 651   void trash_humongous_region_at(ShenandoahHeapRegion *r);
 652 
 653   void stop_concurrent_marking();
 654 
 655   void roots_iterate(OopClosure* cl);
 656 
 657 private:
 658   void trash_cset_regions();


  32 #include "gc_implementation/shenandoah/shenandoahSharedVariables.hpp"
  33 
  34 class ConcurrentGCTimer;
  35 
  36 class ShenandoahAllocTracker;
  37 class ShenandoahCollectionSet;
  38 class ShenandoahCollectorPolicy;
  39 class ShenandoahConcurrentMark;
  40 class ShenandoahControlThread;
  41 class ShenandoahGCSession;
  42 class ShenandoahFreeSet;
  43 class ShenandoahHeapRegion;
  44 class ShenandoahHeapRegionClosure;
  45 class ShenandoahMarkCompact;
  46 class ShenandoahMonitoringSupport;
  47 class ShenandoahHeuristics;
  48 class ShenandoahMarkingContext;
  49 class ShenandoahMode;
  50 class ShenandoahPhaseTimings;
  51 class ShenandoahPacer;
  52 class ShenandoahTraversalGC;
  53 class ShenandoahVerifier;
  54 class ShenandoahWorkGang;
  55 class VMStructs;
  56 
  57 class ShenandoahRegionIterator : public StackObj {
  58 private:
  59   ShenandoahHeap* _heap;
  60 
  61   char _pad0[DEFAULT_CACHE_LINE_SIZE];
  62   volatile jint _index;
  63   char _pad1[DEFAULT_CACHE_LINE_SIZE];
  64 
  65   // No implicit copying: iterators should be passed by reference to capture the state
  66   ShenandoahRegionIterator(const ShenandoahRegionIterator& that);
  67   ShenandoahRegionIterator& operator=(const ShenandoahRegionIterator& o);
  68 
  69 public:
  70   ShenandoahRegionIterator();
  71   ShenandoahRegionIterator(ShenandoahHeap* heap);
  72 


 219 //
 220 // GC state describes the important parts of collector state, that may be
 221 // used to make barrier selection decisions in the native and generated code.
 222 // Multiple bits can be set at once.
 223 //
 224 // Important invariant: when GC state is zero, the heap is stable, and no barriers
 225 // are required.
 226 //
 227 public:
 228   enum GCStateBitPos {
 229     // Heap has forwarded objects: need RB, ACMP, CAS barriers.
 230     HAS_FORWARDED_BITPOS   = 0,
 231 
 232     // Heap is under marking: needs SATB barriers.
 233     MARKING_BITPOS    = 1,
 234 
 235     // Heap is under evacuation: needs WB barriers. (Set together with UNSTABLE)
 236     EVACUATION_BITPOS = 2,
 237 
 238     // Heap is under updating: needs SVRB/SVWB barriers.
 239     UPDATEREFS_BITPOS = 3,
 240 
 241     // Heap is under traversal collection
 242     TRAVERSAL_BITPOS  = 4
 243   };
 244 
 245   enum GCState {
 246     STABLE        = 0,
 247     HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
 248     MARKING       = 1 << MARKING_BITPOS,
 249     EVACUATION    = 1 << EVACUATION_BITPOS,
 250     UPDATEREFS    = 1 << UPDATEREFS_BITPOS,
 251     TRAVERSAL     = 1 << TRAVERSAL_BITPOS
 252   };
 253 
 254 private:
 255   ShenandoahSharedBitmap _gc_state;
 256   ShenandoahSharedFlag   _degenerated_gc_in_progress;
 257   ShenandoahSharedFlag   _full_gc_in_progress;
 258   ShenandoahSharedFlag   _full_gc_move_in_progress;
 259   ShenandoahSharedFlag   _progress_last_gc;
 260 
 261   void set_gc_state_mask(uint mask, bool value);
 262 
 263 public:
 264   char gc_state();
 265   static address gc_state_addr();
 266 
 267   void set_concurrent_mark_in_progress(bool in_progress);
 268   void set_evacuation_in_progress(bool in_progress);
 269   void set_update_refs_in_progress(bool in_progress);
 270   void set_degenerated_gc_in_progress(bool in_progress);
 271   void set_full_gc_in_progress(bool in_progress);
 272   void set_full_gc_move_in_progress(bool in_progress);
 273   void set_concurrent_traversal_in_progress(bool in_progress);
 274   void set_has_forwarded_objects(bool cond);
 275 
 276   inline bool is_stable() const;
 277   inline bool is_idle() const;
 278   inline bool is_concurrent_mark_in_progress() const;
 279   inline bool is_update_refs_in_progress() const;
 280   inline bool is_evacuation_in_progress() const;
 281   inline bool is_degenerated_gc_in_progress() const;
 282   inline bool is_full_gc_in_progress() const;
 283   inline bool is_full_gc_move_in_progress() const;
 284   inline bool is_concurrent_traversal_in_progress() const;
 285   inline bool has_forwarded_objects() const;
 286   inline bool is_gc_in_progress_mask(uint mask) const;
 287 
 288 // ---------- GC cancellation and degeneration machinery
 289 //
 290 // Cancelled GC flag is used to notify concurrent phases that they should terminate.
 291 //
 292 public:
 293   enum ShenandoahDegenPoint {
 294     _degenerated_unset,
 295     _degenerated_traversal,
 296     _degenerated_outside_cycle,
 297     _degenerated_mark,
 298     _degenerated_evac,
 299     _degenerated_updaterefs,
 300     _DEGENERATED_LIMIT
 301   };
 302 
 303   static const char* degen_point_to_string(ShenandoahDegenPoint point) {
 304     switch (point) {
 305       case _degenerated_unset:
 306         return "<UNSET>";
 307       case _degenerated_traversal:
 308         return "Traversal";
 309       case _degenerated_outside_cycle:
 310         return "Outside of Cycle";
 311       case _degenerated_mark:
 312         return "Mark";
 313       case _degenerated_evac:
 314         return "Evacuation";
 315       case _degenerated_updaterefs:
 316         return "Update Refs";
 317       default:
 318         ShouldNotReachHere();
 319         return "ERROR";
 320     }
 321   };
 322 
 323 private:
 324   ShenandoahSharedFlag _cancelled_gc;
 325   inline bool try_cancel_gc();
 326 
 327 public:
 328   static address cancelled_gc_addr();
 329 
 330   inline bool cancelled_gc() const;
 331 
 332   inline void clear_cancelled_gc();
 333 
 334   void cancel_gc(GCCause::Cause cause);
 335 
 336 // ---------- GC operations entry points
 337 //
 338 public:
 339   // Entry points to STW GC operations, these cause a related safepoint, that then
 340   // call the entry method below
 341   void vmop_entry_init_mark();
 342   void vmop_entry_final_mark();
 343   void vmop_entry_final_evac();
 344   void vmop_entry_init_updaterefs();
 345   void vmop_entry_final_updaterefs();
 346   void vmop_entry_init_traversal();
 347   void vmop_entry_final_traversal();
 348   void vmop_entry_full(GCCause::Cause cause);
 349   void vmop_degenerated(ShenandoahDegenPoint point);
 350 
 351   // Entry methods to normally STW GC operations. These set up logging, monitoring
 352   // and workers for net VM operation
 353   void entry_init_mark();
 354   void entry_final_mark();
 355   void entry_final_evac();
 356   void entry_init_updaterefs();
 357   void entry_final_updaterefs();
 358   void entry_init_traversal();
 359   void entry_final_traversal();
 360   void entry_full(GCCause::Cause cause);
 361   void entry_degenerated(int point);
 362 
 363   // Entry methods to normally concurrent GC operations. These set up logging, monitoring
 364   // for concurrent operation.
 365   void entry_reset();
 366   void entry_mark();
 367   void entry_preclean();
 368   void entry_cleanup();
 369   void entry_evac();
 370   void entry_updaterefs();
 371   void entry_traversal();
 372   void entry_uncommit(double shrink_before);
 373 
 374 private:
 375   // Actual work for the phases
 376   void op_init_mark();
 377   void op_final_mark();
 378   void op_final_evac();
 379   void op_init_updaterefs();
 380   void op_final_updaterefs();
 381   void op_init_traversal();
 382   void op_final_traversal();
 383   void op_full(GCCause::Cause cause);
 384   void op_degenerated(ShenandoahDegenPoint point);
 385   void op_degenerated_fail();
 386   void op_degenerated_futile();
 387 
 388   void op_reset();
 389   void op_mark();
 390   void op_preclean();
 391   void op_cleanup();
 392   void op_conc_evac();
 393   void op_stw_evac();
 394   void op_updaterefs();
 395   void op_traversal();
 396   void op_uncommit(double shrink_before);
 397 
 398   // Messages for GC trace event, they have to be immortal for
 399   // passing around the logging/tracing systems
 400   const char* init_mark_event_message() const;
 401   const char* final_mark_event_message() const;
 402   const char* conc_mark_event_message() const;
 403   const char* degen_event_message(ShenandoahDegenPoint point) const;
 404 
 405 // ---------- GC subsystems
 406 //
 407 private:
 408   ShenandoahControlThread*   _control_thread;
 409   ShenandoahCollectorPolicy* _shenandoah_policy;
 410   ShenandoahMode*            _gc_mode;
 411   ShenandoahHeuristics*      _heuristics;
 412   ShenandoahFreeSet*         _free_set;
 413   ShenandoahConcurrentMark*  _scm;
 414   ShenandoahTraversalGC*     _traversal_gc;
 415   ShenandoahMarkCompact*     _full_gc;
 416   ShenandoahPacer*           _pacer;
 417   ShenandoahVerifier*        _verifier;
 418 
 419   ShenandoahAllocTracker*    _alloc_tracker;
 420   ShenandoahPhaseTimings*    _phase_timings;
 421 
 422   ShenandoahControlThread*   control_thread()          { return _control_thread;    }
 423   ShenandoahMarkCompact*     full_gc()                 { return _full_gc;           }
 424 
 425 public:
 426   ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
 427   ShenandoahHeuristics*      heuristics()        const { return _heuristics;        }
 428   ShenandoahFreeSet*         free_set()          const { return _free_set;          }
 429   ShenandoahConcurrentMark*  concurrent_mark()         { return _scm;               }
 430   ShenandoahTraversalGC*     traversal_gc()      const { return _traversal_gc;      }
 431   bool                       is_traversal_mode() const { return _traversal_gc != NULL; }
 432   ShenandoahPacer*           pacer()             const { return _pacer;             }
 433 
 434   ShenandoahPhaseTimings*    phase_timings()     const { return _phase_timings;     }
 435   ShenandoahAllocTracker*    alloc_tracker()     const { return _alloc_tracker;     }
 436 
 437   ShenandoahVerifier*        verifier();
 438 
 439 // ---------- VM subsystem bindings
 440 //
 441 private:
 442   ShenandoahMonitoringSupport* _monitoring_support;
 443   ConcurrentGCTimer* _gc_timer;
 444 
 445 public:
 446   ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; }
 447 
 448   GCTracer* tracer();
 449   GCTimer* gc_timer() const;
 450   CollectorPolicy* collector_policy() const;
 451 


 640 
 641   ShenandoahCollectionSet* collection_set() const { return _collection_set; }
 642 
 643   template <class T>
 644   inline bool in_collection_set(T obj) const;
 645 
 646   // Avoid accidentally calling the method above with ShenandoahHeapRegion*, which would be *wrong*.
 647   inline bool in_collection_set(ShenandoahHeapRegion* r) shenandoah_not_implemented_return(false);
 648 
 649   // Evacuates object src. Returns the evacuated object, either evacuated
 650   // by this thread, or by some other thread.
 651   inline oop  evacuate_object(oop src, Thread* thread);
 652 
 653   // Call before/after evacuation.
 654   void enter_evacuation();
 655   void leave_evacuation();
 656 
 657 // ---------- Helper functions
 658 //
 659 public:
 660   template <class T>
 661   inline oop evac_update_with_forwarded(T* p);
 662 
 663   template <class T>
 664   inline oop maybe_update_with_forwarded(T* p);
 665 
 666   template <class T>
 667   inline oop maybe_update_with_forwarded_not_null(T* p, oop obj);
 668 
 669   template <class T>
 670   inline oop update_with_forwarded_not_null(T* p, oop obj);
 671 
 672   static inline oop cas_oop(oop n, narrowOop* addr, oop c);
 673   static inline oop cas_oop(oop n, oop* addr, oop c);
 674 
 675   void trash_humongous_region_at(ShenandoahHeapRegion *r);
 676 
 677   void stop_concurrent_marking();
 678 
 679   void roots_iterate(OopClosure* cl);
 680 
 681 private:
 682   void trash_cset_regions();
< prev index next >