1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
  26 
  27 #include "gc_implementation/shared/markBitMap.hpp"
  28 #include "gc_implementation/shenandoah/shenandoahAsserts.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahAllocRequest.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahLock.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahEvacOOMHandler.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahSharedVariables.hpp"
  33 
  34 class ConcurrentGCTimer;
  35 
  36 class ShenandoahAllocTracker;
  37 class ShenandoahCollectionSet;
  38 class ShenandoahCollectorPolicy;
  39 class ShenandoahConcurrentMark;
  40 class ShenandoahControlThread;
  41 class ShenandoahGCSession;
  42 class ShenandoahFreeSet;
  43 class ShenandoahHeapRegion;
  44 class ShenandoahHeapRegionClosure;
  45 class ShenandoahMarkCompact;
  46 class ShenandoahMonitoringSupport;
  47 class ShenandoahHeuristics;
  48 class ShenandoahMarkingContext;
  49 class ShenandoahMode;
  50 class ShenandoahPhaseTimings;
  51 class ShenandoahPacer;
  52 class ShenandoahVerifier;
  53 class ShenandoahWorkGang;
  54 class VMStructs;
  55 
  56 class ShenandoahRegionIterator : public StackObj {
  57 private:
  58   ShenandoahHeap* _heap;
  59 
  60   char _pad0[DEFAULT_CACHE_LINE_SIZE];
  61   volatile jint _index;
  62   char _pad1[DEFAULT_CACHE_LINE_SIZE];
  63 
  64   // No implicit copying: iterators should be passed by reference to capture the state
  65   ShenandoahRegionIterator(const ShenandoahRegionIterator& that);
  66   ShenandoahRegionIterator& operator=(const ShenandoahRegionIterator& o);
  67 
  68 public:
  69   ShenandoahRegionIterator();
  70   ShenandoahRegionIterator(ShenandoahHeap* heap);
  71 
  72   // Reset iterator to default state
  73   void reset();
  74 
  75   // Returns next region, or NULL if there are no more regions.
  76   // This is multi-thread-safe.
  77   inline ShenandoahHeapRegion* next();
  78 
  79   // This is *not* MT safe. However, in the absence of multithreaded access, it
  80   // can be used to determine if there is more work to do.
  81   bool has_next() const;
  82 };
  83 
  84 class ShenandoahHeapRegionClosure : public StackObj {
  85 public:
  86   virtual void heap_region_do(ShenandoahHeapRegion* r) = 0;
  87   virtual bool is_thread_safe() { return false; }
  88 };
  89 
  90 #ifdef ASSERT
  91 class ShenandoahAssertToSpaceClosure : public OopClosure {
  92 private:
  93   template <class T>
  94   void do_oop_nv(T* p);
  95 public:
  96   void do_oop(narrowOop* p);
  97   void do_oop(oop* p);
  98 };
  99 #endif
 100 
 101 typedef ShenandoahLock    ShenandoahHeapLock;
 102 typedef ShenandoahLocker  ShenandoahHeapLocker;
 103 
 104 // Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers
 105 // to encode forwarding data. See BrooksPointer for details on forwarding data encoding.
 106 // See ShenandoahControlThread for GC cycle structure.
 107 //
 108 class ShenandoahHeap : public SharedHeap {
 109   friend class ShenandoahAsserts;
 110   friend class VMStructs;
 111   friend class ShenandoahGCSession;
 112 
 113 // ---------- Locks that guard important data structures in Heap
 114 //
 115 private:
 116   ShenandoahHeapLock _lock;
 117 
 118 public:
 119   ShenandoahHeapLock* lock() {
 120     return &_lock;
 121   }
 122 
 123   void assert_heaplock_owned_by_current_thread()     NOT_DEBUG_RETURN;
 124   void assert_heaplock_not_owned_by_current_thread() NOT_DEBUG_RETURN;
 125   void assert_heaplock_or_safepoint()                NOT_DEBUG_RETURN;
 126 
 127 // ---------- Initialization, termination, identification, printing routines
 128 //
 129 public:
 130   static ShenandoahHeap* heap();
 131   static ShenandoahHeap* heap_no_check();
 132   static size_t conservative_max_heap_alignment();
 133 
 134   const char* name()          const { return "Shenandoah"; }
 135   ShenandoahHeap::Name kind() const { return CollectedHeap::ShenandoahHeap; }
 136 
 137   ShenandoahHeap(ShenandoahCollectorPolicy* policy);
 138   jint initialize();
 139   void post_initialize();
 140   void initialize_heuristics();
 141 
 142   void print_on(outputStream* st)               const;
 143   void print_extended_on(outputStream *st)      const;
 144   void print_tracing_info()                     const;
 145   void print_gc_threads_on(outputStream* st)    const;
 146   void print_heap_regions_on(outputStream* st)  const;
 147 
 148   void stop();
 149 
 150   void prepare_for_verify();
 151   void verify(bool silent, VerifyOption vo);
 152 
 153 // ---------- Heap counters and metrics
 154 //
 155 private:
 156            size_t _initial_size;
 157            size_t _minimum_size;
 158   char _pad0[DEFAULT_CACHE_LINE_SIZE];
 159   volatile jlong  _used;
 160   volatile size_t _committed;
 161   volatile jlong  _bytes_allocated_since_gc_start;
 162   char _pad1[DEFAULT_CACHE_LINE_SIZE];
 163 
 164 public:
 165   void increase_used(size_t bytes);
 166   void decrease_used(size_t bytes);
 167   void set_used(size_t bytes);
 168 
 169   void increase_committed(size_t bytes);
 170   void decrease_committed(size_t bytes);
 171   void increase_allocated(size_t bytes);
 172 
 173   size_t bytes_allocated_since_gc_start();
 174   void reset_bytes_allocated_since_gc_start();
 175 
 176   size_t min_capacity()     const;
 177   size_t max_capacity()     const;
 178   size_t initial_capacity() const;
 179   size_t capacity()         const;
 180   size_t used()             const;
 181   size_t committed()        const;
 182 
 183 // ---------- Workers handling
 184 //
 185 private:
 186   uint _max_workers;
 187 
 188 public:
 189   uint max_workers();
 190   void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
 191 
 192   ShenandoahWorkGang* workers() const;
 193 
 194   void gc_threads_do(ThreadClosure* tcl) const;
 195 
 196 // ---------- Heap regions handling machinery
 197 //
 198 private:
 199   MemRegion _heap_region;
 200   bool      _heap_region_special;
 201   size_t    _num_regions;
 202   ShenandoahHeapRegion** _regions;
 203   ShenandoahRegionIterator _update_refs_iterator;
 204 
 205 public:
 206   inline size_t num_regions() const { return _num_regions; }
 207   inline bool is_heap_region_special() { return _heap_region_special; }
 208 
 209   inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
 210   inline size_t heap_region_index_containing(const void* addr) const;
 211 
 212   inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
 213 
 214   void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
 215   void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
 216 
 217 // ---------- GC state machinery
 218 //
 219 // GC state describes the important parts of collector state, that may be
 220 // used to make barrier selection decisions in the native and generated code.
 221 // Multiple bits can be set at once.
 222 //
 223 // Important invariant: when GC state is zero, the heap is stable, and no barriers
 224 // are required.
 225 //
 226 public:
 227   enum GCStateBitPos {
 228     // Heap has forwarded objects: need RB, ACMP, CAS barriers.
 229     HAS_FORWARDED_BITPOS   = 0,
 230 
 231     // Heap is under marking: needs SATB barriers.
 232     MARKING_BITPOS    = 1,
 233 
 234     // Heap is under evacuation: needs WB barriers. (Set together with UNSTABLE)
 235     EVACUATION_BITPOS = 2,
 236 
 237     // Heap is under updating: needs SVRB/SVWB barriers.
 238     UPDATEREFS_BITPOS = 3
 239   };
 240 
 241   enum GCState {
 242     STABLE        = 0,
 243     HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
 244     MARKING       = 1 << MARKING_BITPOS,
 245     EVACUATION    = 1 << EVACUATION_BITPOS,
 246     UPDATEREFS    = 1 << UPDATEREFS_BITPOS
 247   };
 248 
 249 private:
 250   ShenandoahSharedBitmap _gc_state;
 251   ShenandoahSharedFlag   _degenerated_gc_in_progress;
 252   ShenandoahSharedFlag   _full_gc_in_progress;
 253   ShenandoahSharedFlag   _full_gc_move_in_progress;
 254   ShenandoahSharedFlag   _progress_last_gc;
 255 
 256   void set_gc_state_mask(uint mask, bool value);
 257 
 258 public:
 259   char gc_state();
 260   static address gc_state_addr();
 261 
 262   void set_concurrent_mark_in_progress(bool in_progress);
 263   void set_evacuation_in_progress(bool in_progress);
 264   void set_update_refs_in_progress(bool in_progress);
 265   void set_degenerated_gc_in_progress(bool in_progress);
 266   void set_full_gc_in_progress(bool in_progress);
 267   void set_full_gc_move_in_progress(bool in_progress);
 268   void set_has_forwarded_objects(bool cond);
 269 
 270   inline bool is_stable() const;
 271   inline bool is_idle() const;
 272   inline bool is_concurrent_mark_in_progress() const;
 273   inline bool is_update_refs_in_progress() const;
 274   inline bool is_evacuation_in_progress() const;
 275   inline bool is_degenerated_gc_in_progress() const;
 276   inline bool is_full_gc_in_progress() const;
 277   inline bool is_full_gc_move_in_progress() const;
 278   inline bool has_forwarded_objects() const;
 279   inline bool is_gc_in_progress_mask(uint mask) const;
 280 
 281 // ---------- GC cancellation and degeneration machinery
 282 //
 283 // Cancelled GC flag is used to notify concurrent phases that they should terminate.
 284 //
 285 public:
 286   enum ShenandoahDegenPoint {
 287     _degenerated_unset,
 288     _degenerated_outside_cycle,
 289     _degenerated_mark,
 290     _degenerated_evac,
 291     _degenerated_updaterefs,
 292     _DEGENERATED_LIMIT
 293   };
 294 
 295   static const char* degen_point_to_string(ShenandoahDegenPoint point) {
 296     switch (point) {
 297       case _degenerated_unset:
 298         return "<UNSET>";
 299       case _degenerated_outside_cycle:
 300         return "Outside of Cycle";
 301       case _degenerated_mark:
 302         return "Mark";
 303       case _degenerated_evac:
 304         return "Evacuation";
 305       case _degenerated_updaterefs:
 306         return "Update Refs";
 307       default:
 308         ShouldNotReachHere();
 309         return "ERROR";
 310     }
 311   };
 312 
 313 private:
 314   ShenandoahSharedFlag _cancelled_gc;
 315   inline bool try_cancel_gc();
 316 
 317 public:
 318   static address cancelled_gc_addr();
 319 
 320   inline bool cancelled_gc() const;
 321 
 322   inline void clear_cancelled_gc();
 323 
 324   void cancel_gc(GCCause::Cause cause);
 325 
 326 // ---------- GC operations entry points
 327 //
 328 public:
 329   // Entry points to STW GC operations, these cause a related safepoint, that then
 330   // call the entry method below
 331   void vmop_entry_init_mark();
 332   void vmop_entry_final_mark();
 333   void vmop_entry_final_evac();
 334   void vmop_entry_init_updaterefs();
 335   void vmop_entry_final_updaterefs();
 336   void vmop_entry_full(GCCause::Cause cause);
 337   void vmop_degenerated(ShenandoahDegenPoint point);
 338 
 339   // Entry methods to normally STW GC operations. These set up logging, monitoring
 340   // and workers for net VM operation
 341   void entry_init_mark();
 342   void entry_final_mark();
 343   void entry_final_evac();
 344   void entry_init_updaterefs();
 345   void entry_final_updaterefs();
 346   void entry_full(GCCause::Cause cause);
 347   void entry_degenerated(int point);
 348 
 349   // Entry methods to normally concurrent GC operations. These set up logging, monitoring
 350   // for concurrent operation.
 351   void entry_reset();
 352   void entry_mark();
 353   void entry_preclean();
 354   void entry_cleanup();
 355   void entry_evac();
 356   void entry_updaterefs();
 357   void entry_uncommit(double shrink_before);
 358 
 359 private:
 360   // Actual work for the phases
 361   void op_init_mark();
 362   void op_final_mark();
 363   void op_final_evac();
 364   void op_init_updaterefs();
 365   void op_final_updaterefs();
 366   void op_full(GCCause::Cause cause);
 367   void op_degenerated(ShenandoahDegenPoint point);
 368   void op_degenerated_fail();
 369   void op_degenerated_futile();
 370 
 371   void op_reset();
 372   void op_mark();
 373   void op_preclean();
 374   void op_cleanup();
 375   void op_conc_evac();
 376   void op_stw_evac();
 377   void op_updaterefs();
 378   void op_uncommit(double shrink_before);
 379 
 380   // Messages for GC trace event, they have to be immortal for
 381   // passing around the logging/tracing systems
 382   const char* init_mark_event_message() const;
 383   const char* final_mark_event_message() const;
 384   const char* conc_mark_event_message() const;
 385   const char* degen_event_message(ShenandoahDegenPoint point) const;
 386 
 387 // ---------- GC subsystems
 388 //
 389 private:
 390   ShenandoahControlThread*   _control_thread;
 391   ShenandoahCollectorPolicy* _shenandoah_policy;
 392   ShenandoahMode*            _gc_mode;
 393   ShenandoahHeuristics*      _heuristics;
 394   ShenandoahFreeSet*         _free_set;
 395   ShenandoahConcurrentMark*  _scm;
 396   ShenandoahMarkCompact*     _full_gc;
 397   ShenandoahPacer*           _pacer;
 398   ShenandoahVerifier*        _verifier;
 399 
 400   ShenandoahAllocTracker*    _alloc_tracker;
 401   ShenandoahPhaseTimings*    _phase_timings;
 402 
 403   ShenandoahControlThread*   control_thread()          { return _control_thread;    }
 404   ShenandoahMarkCompact*     full_gc()                 { return _full_gc;           }
 405 
 406 public:
 407   ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
 408   ShenandoahHeuristics*      heuristics()        const { return _heuristics;        }
 409   ShenandoahFreeSet*         free_set()          const { return _free_set;          }
 410   ShenandoahConcurrentMark*  concurrent_mark()         { return _scm;               }
 411   ShenandoahPacer*           pacer()             const { return _pacer;             }
 412 
 413   ShenandoahPhaseTimings*    phase_timings()     const { return _phase_timings;     }
 414   ShenandoahAllocTracker*    alloc_tracker()     const { return _alloc_tracker;     }
 415 
 416   ShenandoahVerifier*        verifier();
 417 
 418 // ---------- VM subsystem bindings
 419 //
 420 private:
 421   ShenandoahMonitoringSupport* _monitoring_support;
 422   ConcurrentGCTimer* _gc_timer;
 423 
 424 public:
 425   ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; }
 426 
 427   GCTracer* tracer();
 428   GCTimer* gc_timer() const;
 429   CollectorPolicy* collector_policy() const;
 430 
 431 // ---------- Reference processing
 432 //
 433 private:
 434   ReferenceProcessor*  _ref_processor;
 435   ShenandoahSharedFlag _process_references;
 436 
 437   void ref_processing_init();
 438 
 439 public:
 440   ReferenceProcessor* ref_processor() { return _ref_processor;}
 441   void set_process_references(bool pr);
 442   bool process_references() const;
 443 
 444 // ---------- Class Unloading
 445 //
 446 private:
 447   ShenandoahSharedFlag _unload_classes;
 448 
 449 public:
 450   void set_unload_classes(bool uc);
 451   bool unload_classes() const;
 452 
 453   // Delete entries for dead interned string and clean up unreferenced symbols
 454   // in symbol table, possibly in parallel.
 455   void unload_classes_and_cleanup_tables(bool full_gc);
 456 
 457 // ---------- Generic interface hooks
 458 // Minor things that super-interface expects us to implement to play nice with
 459 // the rest of runtime. Some of the things here are not required to be implemented,
 460 // and can be stubbed out.
 461 //
 462 public:
 463   AdaptiveSizePolicy* size_policy() shenandoah_not_implemented_return(NULL);
 464   bool is_maximal_no_gc() const shenandoah_not_implemented_return(false);
 465 
 466   bool is_in(const void* p) const;
 467 
 468   // All objects can potentially move
 469   bool is_scavengable(const void* addr) { return true; }
 470 
 471   void collect(GCCause::Cause cause);
 472   void do_full_collection(bool clear_all_soft_refs);
 473 
 474   // Used for parsing heap during error printing
 475   HeapWord* block_start(const void* addr) const;
 476   size_t block_size(const HeapWord* addr) const;
 477   bool block_is_obj(const HeapWord* addr) const;
 478 
 479   // Used for native heap walkers: heap dumpers, mostly
 480   void object_iterate(ObjectClosure* cl);
 481   void safe_object_iterate(ObjectClosure* cl);
 482   void space_iterate(SpaceClosure* scl);
 483   void oop_iterate(ExtendedOopClosure* cl);
 484   Space* space_containing(const void* oop) const;
 485 
 486   // Used by RMI
 487   jlong millis_since_last_gc();
 488 
 489   bool can_elide_tlab_store_barriers() const                  { return true;    }
 490   oop new_store_pre_barrier(JavaThread* thread, oop new_obj)  { return new_obj; }
 491   bool can_elide_initializing_store_barrier(oop new_obj)      { return true;    }
 492   bool card_mark_must_follow_store() const                    { return false;   }
 493 
 494   bool is_in_partial_collection(const void* p) shenandoah_not_implemented_return(false);
 495   bool supports_heap_inspection() const { return true; }
 496 
 497   void gc_prologue(bool b);
 498   void gc_epilogue(bool b);
 499 
 500   void acquire_pending_refs_lock();
 501   void release_pending_refs_lock();
 502 
 503 // ---------- Code roots handling hooks
 504 //
 505 public:
 506   void register_nmethod(nmethod* nm);
 507   void unregister_nmethod(nmethod* nm);
 508 
 509 // ---------- Pinning hooks
 510 //
 511 public:
 512   // Shenandoah supports per-object (per-region) pinning
 513   bool supports_object_pinning() const { return true; }
 514 
 515   oop pin_object(JavaThread* thread, oop obj);
 516   void unpin_object(JavaThread* thread, oop obj);
 517 
 518   void sync_pinned_region_status();
 519   void assert_pinned_region_status() NOT_DEBUG_RETURN;
 520 
 521 // ---------- Allocation support
 522 //
 523 private:
 524   HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region);
 525   inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
 526   HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
 527   HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
 528 
 529 public:
 530   HeapWord* allocate_memory(ShenandoahAllocRequest& request);
 531   HeapWord* mem_allocate(size_t size, bool* what);
 532 
 533   void notify_mutator_alloc_words(size_t words, bool waste);
 534 
 535   // Shenandoah supports TLAB allocation
 536   bool supports_tlab_allocation() const { return true; }
 537 
 538   HeapWord* allocate_new_tlab(size_t word_size);
 539   size_t tlab_capacity(Thread *thr) const;
 540   size_t unsafe_max_tlab_alloc(Thread *thread) const;
 541   size_t max_tlab_size() const;
 542   size_t tlab_used(Thread* ignored) const;
 543 
 544   void resize_tlabs();
 545   void resize_all_tlabs();
 546 
 547   void accumulate_statistics_tlabs();
 548   void accumulate_statistics_all_gclabs();
 549 
 550   void make_parsable(bool retire_tlabs);
 551   void ensure_parsability(bool retire_tlabs);
 552 
 553 // ---------- Marking support
 554 //
 555 private:
 556   ShenandoahMarkingContext* _marking_context;
 557   MemRegion _bitmap_region;
 558   MemRegion _aux_bitmap_region;
 559   MarkBitMap _verification_bit_map;
 560   MarkBitMap _aux_bit_map;
 561 
 562   size_t _bitmap_size;
 563   size_t _bitmap_regions_per_slice;
 564   size_t _bitmap_bytes_per_slice;
 565 
 566   bool _bitmap_region_special;
 567   bool _aux_bitmap_region_special;
 568 
 569   // Used for buffering per-region liveness data.
 570   // Needed since ShenandoahHeapRegion uses atomics to update liveness.
 571   //
 572   // The array has max-workers elements, each of which is an array of
 573   // jushort * max_regions. The choice of jushort is not accidental:
 574   // there is a tradeoff between static/dynamic footprint that translates
 575   // into cache pressure (which is already high during marking), and
 576   // too many atomic updates. size_t/jint is too large, jbyte is too small.
 577   jushort** _liveness_cache;
 578 
 579 public:
 580   inline ShenandoahMarkingContext* complete_marking_context() const;
 581   inline ShenandoahMarkingContext* marking_context() const;
 582   inline void mark_complete_marking_context();
 583   inline void mark_incomplete_marking_context();
 584 
 585   template<class T>
 586   inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
 587 
 588   template<class T>
 589   inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
 590 
 591   template<class T>
 592   inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
 593 
 594   void reset_mark_bitmap();
 595 
 596   // SATB barriers hooks
 597   inline bool requires_marking(const void* entry) const;
 598   void force_satb_flush_all_threads();
 599 
 600   // Support for bitmap uncommits
 601   bool commit_bitmap_slice(ShenandoahHeapRegion *r);
 602   bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
 603   bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
 604 
 605   // Liveness caching support
 606   jushort* get_liveness_cache(uint worker_id);
 607   void flush_liveness_cache(uint worker_id);
 608 
 609 // ---------- Evacuation support
 610 //
 611 private:
 612   ShenandoahCollectionSet* _collection_set;
 613   ShenandoahEvacOOMHandler _oom_evac_handler;
 614 
 615   void evacuate_and_update_roots();
 616 
 617 public:
 618   static address in_cset_fast_test_addr();
 619 
 620   ShenandoahCollectionSet* collection_set() const { return _collection_set; }
 621 
 622   template <class T>
 623   inline bool in_collection_set(T obj) const;
 624 
 625   // Avoid accidentally calling the method above with ShenandoahHeapRegion*, which would be *wrong*.
 626   inline bool in_collection_set(ShenandoahHeapRegion* r) shenandoah_not_implemented_return(false);
 627 
 628   // Evacuates object src. Returns the evacuated object, either evacuated
 629   // by this thread, or by some other thread.
 630   inline oop  evacuate_object(oop src, Thread* thread);
 631 
 632   // Call before/after evacuation.
 633   void enter_evacuation();
 634   void leave_evacuation();
 635 
 636 // ---------- Helper functions
 637 //
 638 public:
 639   template <class T>
 640   inline oop maybe_update_with_forwarded(T* p);
 641 
 642   template <class T>
 643   inline oop maybe_update_with_forwarded_not_null(T* p, oop obj);
 644 
 645   template <class T>
 646   inline oop update_with_forwarded_not_null(T* p, oop obj);
 647 
 648   static inline oop cas_oop(oop n, narrowOop* addr, oop c);
 649   static inline oop cas_oop(oop n, oop* addr, oop c);
 650 
 651   void trash_humongous_region_at(ShenandoahHeapRegion *r);
 652 
 653   void stop_concurrent_marking();
 654 
 655   void roots_iterate(OopClosure* cl);
 656 
 657 private:
 658   void trash_cset_regions();
 659   void update_heap_references(bool concurrent);
 660 
 661 // ---------- Testing helpers functions
 662 //
 663 private:
 664   ShenandoahSharedFlag _inject_alloc_failure;
 665 
 666   void try_inject_alloc_failure();
 667   bool should_inject_alloc_failure();
 668 };
 669 
 670 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP