1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
  25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
  26 
  27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  28 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  29 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  30 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  31 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  32 
  33 #include "gc/shared/cmBitMap.hpp"
  34 #include "gc/g1/heapRegionBounds.inline.hpp"
  35 
  36 #include "gc/shared/barrierSet.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/space.hpp"
  39 #include "oops/oop.hpp"
  40 #include "oops/markOop.hpp"
  41 
  42 
  43 class SpaceClosure;
  44 class GCTracer;
  45 
  46 class ShenandoahJNICritical;
  47 
  48 class ShenandoahJNICritical;
  49 
  50 class ShenandoahAlwaysTrueClosure : public BoolObjectClosure {
  51 public:
  52   bool do_object_b(oop p) { return true; }
  53 };
  54 
  55 
  56 class ShenandoahIsAliveClosure: public BoolObjectClosure {
  57 
  58 public:
  59   bool do_object_b(oop obj);
  60 };
  61 
  62 
  63 class ShenandoahHeapRegionClosure : public StackObj {
  64   bool _complete;
  65   void incomplete() {_complete = false;}
  66 
  67 public:
  68   ShenandoahHeapRegionClosure(): _complete(true) {}
  69 
  70   // typically called on each region until it returns true;
  71   virtual bool doHeapRegion(ShenandoahHeapRegion* r) = 0;
  72 
  73   bool complete() { return _complete;}
  74 };
  75 
  76 // A "ShenandoahHeap" is an implementation of a java heap for HotSpot.
  77 // It uses a new pauseless GC algorithm based on Brooks pointers.
  78 // Derived from G1
  79 
  80 //
  81 // CollectedHeap
  82 //    SharedHeap
  83 //      ShenandoahHeap
  84 
  85 class ShenandoahHeap : public CollectedHeap {
  86 
  87 private:
  88 
  89   static ShenandoahHeap* _pgc;
  90   ShenandoahCollectorPolicy* _shenandoah_policy;
  91   VirtualSpace _storage;
  92   ShenandoahHeapRegion* _first_region;
  93   HeapWord* _first_region_bottom;
  94   // Ordered array of regions  (name confusing with _regions)
  95   ShenandoahHeapRegion** _ordered_regions;
  96 
  97   // Sortable array of regions
  98   ShenandoahHeapRegionSet* _free_regions;
  99   ShenandoahHeapRegionSet* _collection_set;
 100   ShenandoahHeapRegion* _currentAllocationRegion;
 101   ShenandoahConcurrentMark* _scm;
 102 
 103 
 104 
 105   ShenandoahConcurrentThread* _concurrent_gc_thread;
 106 
 107   size_t _num_regions;
 108   size_t _max_regions;
 109   size_t _initialSize;
 110 #ifndef NDEBUG
 111   uint _numAllocs;
 112 #endif
 113   WorkGangBarrierSync barrierSync;
 114   int _max_parallel_workers;
 115   int _max_conc_workers;
 116   int _max_workers;
 117 
 118   WorkGang* _conc_workers;
 119   WorkGang* _workers;
 120 
 121 
 122   volatile size_t _used;
 123 
 124   CMBitMap _mark_bit_map;
 125   CMBitMap* _next_mark_bit_map;
 126 
 127   bool* _in_cset_fast_test;
 128   bool* _in_cset_fast_test_base;
 129   uint _in_cset_fast_test_length;
 130 
 131   bool _cancelled_concgc;
 132 
 133   ShenandoahJNICritical* _jni_critical;
 134 
 135 public:
 136   size_t _bytesAllocSinceCM;
 137   size_t _bytes_allocated_during_cm;
 138   size_t _bytes_allocated_during_cm_start;
 139   size_t _max_allocated_gc;
 140   size_t _allocated_last_gc;
 141   size_t _used_start_gc;
 142 
 143 public:
 144   ShenandoahHeap(ShenandoahCollectorPolicy* policy);
 145   HeapWord* allocate_from_gclab(Thread* thread, size_t size);
 146   HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
 147   HeapWord* allocate_new_tlab(size_t word_size);
 148   HeapWord* allocate_new_gclab(size_t word_size);
 149 private:
 150   HeapWord* allocate_new_tlab(size_t word_size, bool mark);
 151 public:
 152   HeapWord* allocate_memory(size_t word_size);
 153 
 154   bool find_contiguous_free_regions(uint num_free_regions, ShenandoahHeapRegion** free_regions);
 155   bool allocate_contiguous_free_regions(uint num_free_regions, ShenandoahHeapRegion** free_regions);
 156 
 157   // For now we are ignoring eden.
 158   inline bool should_alloc_in_eden(size_t size) { return false;}
 159   void print_on(outputStream* st) const ;
 160 
 161   ShenandoahHeap::Name kind() const {
 162     return CollectedHeap::ShenandoahHeap;
 163   }
 164 
 165   static ShenandoahHeap* heap();
 166 
 167   ShenandoahCollectorPolicy *shenandoahPolicy() { return _shenandoah_policy;}
 168 
 169   jint initialize();
 170   static size_t conservative_max_heap_alignment() {
 171     return HeapRegionBounds::max_size();
 172   }
 173 
 174   void post_initialize();
 175   size_t capacity() const;
 176   size_t used() const;
 177   bool is_maximal_no_gc() const;
 178   size_t max_capacity() const;
 179   virtual bool is_in(const void* p) const;
 180   bool is_in_partial_collection(const void* p);
 181   bool is_scavengable(const void* addr);
 182   virtual HeapWord* mem_allocate(size_t size, bool* what);
 183   HeapWord* mem_allocate_locked(size_t size, bool* what);
 184   virtual size_t unsafe_max_alloc();
 185   bool can_elide_tlab_store_barriers() const;
 186   virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj);
 187   bool can_elide_initializing_store_barrier(oop new_obj);
 188   bool card_mark_must_follow_store() const;
 189   bool supports_heap_inspection() const;
 190   void collect(GCCause::Cause);
 191   void do_full_collection(bool clear_all_soft_refs);
 192   AdaptiveSizePolicy* size_policy();
 193   ShenandoahCollectorPolicy* collector_policy() const;
 194 
 195   void ensure_parsability(bool retire_tlabs);
 196 
 197   void add_free_region(ShenandoahHeapRegion* r) {_free_regions->append(r);}
 198   void clear_free_regions() {_free_regions->clear();}
 199 
 200   void oop_iterate(ExtendedOopClosure* cl, bool skip_dirty_regions,
 201                    bool skip_unreachable_objects);
 202   void oop_iterate(ExtendedOopClosure* cl) {
 203     oop_iterate(cl, false, false);
 204   }
 205 
 206   void roots_iterate(ExtendedOopClosure* cl);
 207   void weak_roots_iterate(ExtendedOopClosure* cl);
 208 
 209   void object_iterate(ObjectClosure* cl);
 210   void object_iterate_careful(ObjectClosureCareful* cl);
 211   void object_iterate_no_from_space(ObjectClosure* cl);
 212   void safe_object_iterate(ObjectClosure* cl);
 213 
 214   void marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl);
 215   void marked_object_iterate_careful(ShenandoahHeapRegion* region, ObjectClosure* cl);
 216 private:
 217   void marked_object_iterate(ShenandoahHeapRegion* region, ObjectClosure* cl, HeapWord* start, HeapWord* limit);
 218 
 219 public:
 220   HeapWord* block_start(const void* addr) const;
 221   size_t block_size(const HeapWord* addr) const;
 222   bool block_is_obj(const HeapWord* addr) const;
 223   jlong millis_since_last_gc();
 224   void prepare_for_verify();
 225   void print_gc_threads_on(outputStream* st) const;
 226   void gc_threads_do(ThreadClosure* tcl) const;
 227   void print_tracing_info() const;
 228   void verify(bool silent,  VerifyOption vo);
 229   bool supports_tlab_allocation() const;
 230   virtual size_t tlab_capacity(Thread *thr) const;
 231   void oop_iterate(MemRegion mr, ExtendedOopClosure* ecl);
 232   void object_iterate_since_last_GC(ObjectClosure* cl);
 233   void space_iterate(SpaceClosure* scl);
 234   virtual size_t unsafe_max_tlab_alloc(Thread *thread) const;
 235   virtual size_t max_tlab_size() const;
 236 
 237   void resize_all_tlabs();
 238   void accumulate_statistics_all_gclabs();
 239 
 240   HeapWord* tlab_post_allocation_setup(HeapWord* obj);
 241 
 242   uint oop_extra_words();
 243 
 244 #ifndef CC_INTERP
 245   void compile_prepare_oop(MacroAssembler* masm, Register obj = rax);
 246 #endif
 247 
 248   Space* space_containing(const void* oop) const;
 249   void gc_prologue(bool b);
 250   void gc_epilogue(bool b);
 251 
 252   void heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_dirty_regions = false, bool skip_humongous_continuation = false) const;
 253   ShenandoahHeapRegion* heap_region_containing(const void* addr) const;
 254   inline uint heap_region_index_containing(const void* addr) const;
 255 
 256 /**
 257  * Maybe we need that at some point...
 258 
 259   oop* resolve_oop_ptr(oop* p);
 260 
 261   oop oop_containing_oop_ptr(oop* p);
 262 
 263 */
 264 
 265   void temp();
 266 
 267   volatile unsigned int _concurrent_mark_in_progress;
 268 
 269   volatile unsigned int _evacuation_in_progress;
 270   volatile bool _update_references_in_progress;
 271   bool _need_update_refs;
 272   bool _need_reset_bitmaps;
 273 
 274   void start_concurrent_marking();
 275   void stop_concurrent_marking();
 276   ShenandoahConcurrentMark* concurrentMark() { return _scm;}
 277   ShenandoahConcurrentThread* concurrent_thread() { return _concurrent_gc_thread; }
 278 
 279   ShenandoahJNICritical* jni_critical();
 280 
 281   size_t bump_object_age(HeapWord* start, HeapWord* end);
 282 
 283   inline bool mark_current(oop obj) const;
 284   inline bool mark_current_no_checks(oop obj) const;
 285   inline bool is_marked_current(oop obj) const;
 286 
 287   ReferenceProcessor* _ref_processor;
 288   bool is_marked_prev(oop obj) const;
 289 
 290   bool is_obj_ill(const oop obj) const;
 291 
 292   void reset_mark_bitmap();
 293   void reset_mark_bitmap_range(HeapWord* from, HeapWord* to);
 294 
 295   bool is_bitmap_clear();
 296 
 297   void mark_object_live(oop obj, bool enqueue);
 298 
 299   void prepare_for_concurrent_evacuation();
 300   void do_evacuation();
 301   void parallel_evacuate();
 302 
 303   void initialize_brooks_ptr(HeapWord* brooks_ptr, HeapWord* object, bool new_obj = true);
 304   void initialize_brooks_ptr(oop p);
 305 
 306   inline oop maybe_update_oop_ref(oop* p);
 307   void evacuate_region(ShenandoahHeapRegion* from_region, ShenandoahHeapRegion* to_region);
 308   void parallel_evacuate_region(ShenandoahHeapRegion* from_region);
 309   void verify_evacuated_region(ShenandoahHeapRegion* from_region);
 310 
 311   void print_heap_regions(outputStream* st = tty) const;
 312 
 313   void print_all_refs(const char* prefix);
 314 
 315   void print_heap_objects(HeapWord* start, HeapWord* end);
 316   void print_heap_locations(HeapWord* start, HeapWord* end);
 317   void print_heap_object(oop p);
 318 
 319   oop  evacuate_object(oop src, Thread* thread);
 320   bool is_in_collection_set(const void* p) {
 321     return heap_region_containing(p)->is_in_collection_set();
 322   }
 323 
 324   void copy_object(oop p, HeapWord* s);
 325   void verify_copy(oop p, oop c);
 326   //  void assign_brooks_pointer(oop p, HeapWord* filler, HeapWord* copy);
 327   void verify_heap_size_consistency();
 328   void verify_heap_after_marking();
 329   void verify_heap_after_evacuation();
 330   void verify_heap_after_update_refs();
 331   void verify_regions_after_update_refs();
 332 
 333   static ByteSize ordered_regions_offset() { return byte_offset_of(ShenandoahHeap, _ordered_regions); }
 334   static ByteSize first_region_bottom_offset() { return byte_offset_of(ShenandoahHeap, _first_region_bottom); }
 335 
 336   void cleanup_after_cancelconcgc();
 337   void increase_used(size_t bytes);
 338   void decrease_used(size_t bytes);
 339   void set_used(size_t bytes);
 340 
 341   int ensure_new_regions(int num_new_regions);
 342 
 343   void set_evacuation_in_progress(bool in_progress);
 344   bool is_evacuation_in_progress();
 345 
 346   bool is_update_references_in_progress();
 347   void set_update_references_in_progress(bool update_refs_in_progress);
 348 
 349   inline bool need_update_refs() const;
 350   void set_need_update_refs(bool update_refs);
 351 
 352   ReferenceProcessor* ref_processor() { return _ref_processor;}
 353   virtual void ref_processing_init();
 354   ShenandoahIsAliveClosure isAlive;
 355   void evacuate_and_update_roots();
 356   void prepare_for_update_references();
 357 
 358   void update_references();
 359 
 360   ShenandoahHeapRegionSet* free_regions();
 361 
 362   void update_roots();
 363 
 364   void acquire_pending_refs_lock();
 365   void release_pending_refs_lock();
 366 
 367   int max_workers();
 368   int max_conc_workers();
 369   int max_parallel_workers();
 370   WorkGang* conc_workers() const{ return _conc_workers;}
 371   WorkGang* workers() const{ return _workers;}
 372 
 373   ShenandoahHeapRegion** heap_regions();
 374   size_t num_regions();
 375   size_t max_regions();
 376 
 377   ShenandoahHeapRegion* next_compaction_region(const ShenandoahHeapRegion* r);
 378 
 379   void recycle_dirty_regions();
 380 
 381   void register_region_with_in_cset_fast_test(ShenandoahHeapRegion* r) {
 382     assert(_in_cset_fast_test_base != NULL, "sanity");
 383     assert(r->is_in_collection_set(), "invariant");
 384     uint index = r->region_number();
 385     assert(index < _in_cset_fast_test_length, "invariant");
 386     assert(!_in_cset_fast_test_base[index], "invariant");
 387     _in_cset_fast_test_base[index] = true;
 388   }
 389   bool in_cset_fast_test(HeapWord* obj) {
 390     assert(_in_cset_fast_test != NULL, "sanity");
 391     if (is_in(obj)) {
 392       // no need to subtract the bottom of the heap from obj,
 393       // _in_cset_fast_test is biased
 394       uintx index = ((uintx) obj) >> ShenandoahHeapRegion::RegionSizeShift;
 395       bool ret = _in_cset_fast_test[index];
 396       // let's make sure the result is consistent with what the slower
 397       // test returns
 398       assert( ret || !is_in_collection_set(obj), "sanity");
 399       assert(!ret ||  is_in_collection_set(obj), "sanity");
 400       return ret;
 401     } else {
 402       return false;
 403     }
 404   }
 405 
 406   static address in_cset_fast_test_addr() {
 407     return (address) (ShenandoahHeap::heap()->_in_cset_fast_test);
 408   }
 409 
 410   void clear_cset_fast_test() {
 411     assert(_in_cset_fast_test_base != NULL, "sanity");
 412     memset(_in_cset_fast_test_base, false,
 413            (size_t) _in_cset_fast_test_length * sizeof(bool));
 414   }
 415 
 416   GCTracer* tracer();
 417   ShenandoahHeapRegionSet* collection_set() { return _collection_set; }
 418   size_t tlab_used(Thread* ignored) const;
 419 
 420 private:
 421 
 422   bool grow_heap_by();
 423 
 424   void verify_evacuation(ShenandoahHeapRegion* from_region);
 425   void set_concurrent_mark_in_progress(bool in_progress);
 426 
 427   void oom_during_evacuation();
 428   void cancel_concgc();
 429 public:
 430   bool cancelled_concgc();
 431   void clear_cancelled_concgc();
 432 
 433   void shutdown();
 434 
 435   bool concurrent_mark_in_progress();
 436   size_t calculateUsed();
 437   size_t calculateFree();
 438 
 439 private:
 440   void verify_live();
 441   void verify_liveness_after_concurrent_mark();
 442 
 443   HeapWord* allocate_memory_with_lock(size_t word_size);
 444   HeapWord* allocate_memory_heap_lock(size_t word_size);
 445   HeapWord* allocate_memory_shenandoah_lock(size_t word_size);
 446   HeapWord* allocate_memory_work(size_t word_size);
 447   HeapWord* allocate_large_memory(size_t word_size);
 448   ShenandoahHeapRegion* check_skip_humongous(ShenandoahHeapRegion* region) const;
 449   ShenandoahHeapRegion* get_next_region_skip_humongous() const;
 450   ShenandoahHeapRegion* get_current_region_skip_humongous() const;
 451   ShenandoahHeapRegion* check_grow_heap(ShenandoahHeapRegion* current);
 452   ShenandoahHeapRegion* get_next_region();
 453   ShenandoahHeapRegion* get_current_region();
 454 
 455   void set_from_region_protection(bool protect);
 456 
 457 public:
 458   // Delete entries for dead interned string and clean up unreferenced symbols
 459   // in symbol table, possibly in parallel.
 460   void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
 461 
 462 };
 463 
 464 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP