< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page
rev 55608 : Rename ShenandoahBrooksPointer to ShenandoahForwarding
rev 55609 : Eliminate extra forwarding pointer per object


  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/memAllocator.hpp"
  30 #include "gc/shared/parallelCleaning.hpp"
  31 #include "gc/shared/plab.hpp"
  32 
  33 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  34 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  35 #include "gc/shenandoah/shenandoahForwarding.hpp"
  36 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  37 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  38 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  39 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahControlThread.hpp"
  41 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  42 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  43 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  44 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  45 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  46 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  47 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  48 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  49 #include "gc/shenandoah/shenandoahMetrics.hpp"
  50 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  51 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  52 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  53 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  54 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  55 #include "gc/shenandoah/shenandoahUtils.hpp"


 114     AbstractGangTask("Shenandoah Pretouch Bitmap"),
 115     _bitmap_base(bitmap_base),
 116     _bitmap_size(bitmap_size),
 117     _page_size(page_size) {}
 118 
 119   virtual void work(uint worker_id) {
 120     ShenandoahHeapRegion* r = _regions.next();
 121     while (r != NULL) {
 122       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 123       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 124       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 125 
 126       os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 127 
 128       r = _regions.next();
 129     }
 130   }
 131 };
 132 
 133 jint ShenandoahHeap::initialize() {
 134   ShenandoahForwarding::initial_checks();
 135 
 136   initialize_heuristics();
 137 
 138   //
 139   // Figure out heap sizing
 140   //
 141 
 142   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 143   size_t min_byte_size  = collector_policy()->min_heap_byte_size();
 144   size_t max_byte_size  = collector_policy()->max_heap_byte_size();
 145   size_t heap_alignment = collector_policy()->heap_alignment();
 146 
 147   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 148 
 149   if (ShenandoahAlwaysPreTouch) {
 150     // Enabled pre-touch means the entire heap is committed right away.
 151     init_byte_size = max_byte_size;
 152   }
 153 
 154   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");


 837 
 838       // If we requested more than we were granted, give the rest back to pacer.
 839       // This only matters if we are in the same pacing epoch: do not try to unpace
 840       // over the budget for the other phase.
 841       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 842         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 843       }
 844     } else {
 845       increase_used(actual*HeapWordSize);
 846     }
 847   }
 848 
 849   return result;
 850 }
 851 
 852 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 853   ShenandoahHeapLocker locker(lock());
 854   return _free_set->allocate(req, in_new_region);
 855 }
 856 
 857 class ShenandoahMemAllocator : public MemAllocator {
 858 private:
 859   MemAllocator& _initializer;
 860 public:
 861   ShenandoahMemAllocator(MemAllocator& initializer, Klass* klass, size_t word_size, Thread* thread) :
 862   MemAllocator(klass, word_size + ShenandoahForwarding::word_size(), thread),
 863     _initializer(initializer) {}
 864 
 865 protected:
 866   virtual HeapWord* mem_allocate(Allocation& allocation) const {
 867     HeapWord* result = MemAllocator::mem_allocate(allocation);
 868     // Initialize brooks-pointer
 869     if (result != NULL) {
 870       result += ShenandoahForwarding::word_size();
 871       ShenandoahForwarding::initialize(oop(result));
 872       assert(! ShenandoahHeap::heap()->in_collection_set(result), "never allocate in targetted region");
 873     }
 874     return result;
 875   }
 876 
 877   virtual oop initialize(HeapWord* mem) const {
 878      return _initializer.initialize(mem);
 879   }
 880 };
 881 
 882 oop ShenandoahHeap::obj_allocate(Klass* klass, int size, TRAPS) {
 883   ObjAllocator initializer(klass, size, THREAD);
 884   ShenandoahMemAllocator allocator(initializer, klass, size, THREAD);
 885   return allocator.allocate();
 886 }
 887 
 888 oop ShenandoahHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
 889   ObjArrayAllocator initializer(klass, size, length, do_zero, THREAD);
 890   ShenandoahMemAllocator allocator(initializer, klass, size, THREAD);
 891   return allocator.allocate();
 892 }
 893 
 894 oop ShenandoahHeap::class_allocate(Klass* klass, int size, TRAPS) {
 895   ClassAllocator initializer(klass, size, THREAD);
 896   ShenandoahMemAllocator allocator(initializer, klass, size, THREAD);
 897   return allocator.allocate();
 898 }
 899 
 900 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 901                                         bool*  gc_overhead_limit_was_exceeded) {
 902   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 903   return allocate_memory(req);
 904 }
 905 
 906 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 907                                                              size_t size,
 908                                                              Metaspace::MetadataType mdtype) {
 909   MetaWord* result;
 910 
 911   // Inform metaspace OOM to GC heuristics if class unloading is possible.
 912   if (heuristics()->can_unload_classes()) {
 913     ShenandoahHeuristics* h = heuristics();
 914     h->record_metaspace_oom();
 915   }
 916 
 917   // Expand and retry allocation
 918   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 919   if (result != NULL) {


 922 
 923   // Start full GC
 924   collect(GCCause::_metadata_GC_clear_soft_refs);
 925 
 926   // Retry allocation
 927   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 928   if (result != NULL) {
 929     return result;
 930   }
 931 
 932   // Expand and retry allocation
 933   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 934   if (result != NULL) {
 935     return result;
 936   }
 937 
 938   // Out of memory
 939   return NULL;
 940 }
 941 
 942 void ShenandoahHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
 943   HeapWord* obj = tlab_post_allocation_setup(start);
 944   CollectedHeap::fill_with_object(obj, end);
 945 }
 946 
 947 size_t ShenandoahHeap::min_dummy_object_size() const {
 948   return CollectedHeap::min_dummy_object_size() + ShenandoahForwarding::word_size();
 949 }
 950 
 951 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 952 private:
 953   ShenandoahHeap* const _heap;
 954   Thread* const _thread;
 955 public:
 956   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 957     _heap(heap), _thread(Thread::current()) {}
 958 
 959   void do_object(oop p) {
 960     shenandoah_assert_marked(NULL, p);
 961     if (oopDesc::equals_raw(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
 962       _heap->evacuate_object(p, _thread);
 963     }
 964   }
 965 };
 966 
 967 class ShenandoahEvacuationTask : public AbstractGangTask {
 968 private:
 969   ShenandoahHeap* const _sh;
 970   ShenandoahCollectionSet* const _cs;
 971   bool _concurrent;
 972 public:
 973   ShenandoahEvacuationTask(ShenandoahHeap* sh,
 974                            ShenandoahCollectionSet* cs,
 975                            bool concurrent) :
 976     AbstractGangTask("Parallel Evacuation Task"),
 977     _sh(sh),
 978     _cs(cs),
 979     _concurrent(concurrent)
 980   {}
 981 


1021     r->make_trash();
1022   }
1023   collection_set()->clear();
1024 }
1025 
1026 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1027   st->print_cr("Heap Regions:");
1028   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1029   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1030   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
1031   st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
1032 
1033   for (size_t i = 0; i < num_regions(); i++) {
1034     get_region(i)->print_on(st);
1035   }
1036 }
1037 
1038 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1039   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1040 
1041   oop humongous_obj = oop(start->bottom() + ShenandoahForwarding::word_size());
1042   size_t size = humongous_obj->size() + ShenandoahForwarding::word_size();
1043   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1044   size_t index = start->region_number() + required_regions - 1;
1045 
1046   assert(!start->has_live(), "liveness must be zero");
1047 
1048   for(size_t i = 0; i < required_regions; i++) {
1049     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1050     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1051     ShenandoahHeapRegion* region = get_region(index --);
1052 
1053     assert(region->is_humongous(), "expect correct humongous start or continuation");
1054     assert(!region->is_cset(), "Humongous region should not be in collection set");
1055 
1056     region->make_trash_immediate();
1057   }
1058 }
1059 
1060 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1061 public:
1062   void do_thread(Thread* thread) {


1831 
1832 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1833   if (has_forwarded_objects()) {
1834     set_gc_state_mask(MARKING | UPDATEREFS, in_progress);
1835   } else {
1836     set_gc_state_mask(MARKING, in_progress);
1837   }
1838   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1839 }
1840 
1841 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1842    set_gc_state_mask(TRAVERSAL | HAS_FORWARDED | UPDATEREFS, in_progress);
1843    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1844 }
1845 
1846 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1847   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1848   set_gc_state_mask(EVACUATION, in_progress);
1849 }
1850 
1851 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1852   // Initialize Brooks pointer for the next object
1853   HeapWord* result = obj + ShenandoahForwarding::word_size();
1854   ShenandoahForwarding::initialize(oop(result));
1855   return result;
1856 }
1857 
1858 void ShenandoahHeap::ref_processing_init() {
1859   assert(_max_workers > 0, "Sanity");
1860 
1861   _ref_processor =
1862     new ReferenceProcessor(&_subject_to_discovery,  // is_subject_to_discovery
1863                            ParallelRefProcEnabled,  // MT processing
1864                            _max_workers,            // Degree of MT processing
1865                            true,                    // MT discovery
1866                            _max_workers,            // Degree of MT discovery
1867                            false,                   // Reference discovery is not atomic
1868                            NULL,                    // No closure, should be installed before use
1869                            true);                   // Scale worker threads
1870 
1871   shenandoah_assert_rp_isalive_not_installed();
1872 }
1873 
1874 GCTracer* ShenandoahHeap::tracer() {
1875   return shenandoah_policy()->tracer();
1876 }
1877 


2799   assert(worker_id < _max_workers, "sanity");
2800   for (uint i = 0; i < num_regions(); i++) {
2801     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2802   }
2803 #endif
2804   return _liveness_cache[worker_id];
2805 }
2806 
2807 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2808   assert(worker_id < _max_workers, "sanity");
2809   assert(_liveness_cache != NULL, "sanity");
2810   jushort* ld = _liveness_cache[worker_id];
2811   for (uint i = 0; i < num_regions(); i++) {
2812     ShenandoahHeapRegion* r = get_region(i);
2813     jushort live = ld[i];
2814     if (live > 0) {
2815       r->increase_live_data_gc_words(live);
2816       ld[i] = 0;
2817     }
2818   }
2819 }
2820 
2821 size_t ShenandoahHeap::obj_size(oop obj) const {
2822   return CollectedHeap::obj_size(obj) + ShenandoahForwarding::word_size();
2823 }
2824 
2825 ptrdiff_t ShenandoahHeap::cell_header_size() const {
2826   return ShenandoahForwarding::byte_size();
2827 }


  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/memAllocator.hpp"
  30 #include "gc/shared/parallelCleaning.hpp"
  31 #include "gc/shared/plab.hpp"
  32 
  33 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  34 #include "gc/shenandoah/shenandoahBarrierSet.hpp"

  35 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  36 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  37 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  38 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  39 #include "gc/shenandoah/shenandoahControlThread.hpp"
  40 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  41 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  44 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  45 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  46 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  47 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  48 #include "gc/shenandoah/shenandoahMetrics.hpp"
  49 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  50 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  51 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  52 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  53 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  54 #include "gc/shenandoah/shenandoahUtils.hpp"


 113     AbstractGangTask("Shenandoah Pretouch Bitmap"),
 114     _bitmap_base(bitmap_base),
 115     _bitmap_size(bitmap_size),
 116     _page_size(page_size) {}
 117 
 118   virtual void work(uint worker_id) {
 119     ShenandoahHeapRegion* r = _regions.next();
 120     while (r != NULL) {
 121       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 122       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 123       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 124 
 125       os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 126 
 127       r = _regions.next();
 128     }
 129   }
 130 };
 131 
 132 jint ShenandoahHeap::initialize() {

 133 
 134   initialize_heuristics();
 135 
 136   //
 137   // Figure out heap sizing
 138   //
 139 
 140   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 141   size_t min_byte_size  = collector_policy()->min_heap_byte_size();
 142   size_t max_byte_size  = collector_policy()->max_heap_byte_size();
 143   size_t heap_alignment = collector_policy()->heap_alignment();
 144 
 145   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 146 
 147   if (ShenandoahAlwaysPreTouch) {
 148     // Enabled pre-touch means the entire heap is committed right away.
 149     init_byte_size = max_byte_size;
 150   }
 151 
 152   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");


 835 
 836       // If we requested more than we were granted, give the rest back to pacer.
 837       // This only matters if we are in the same pacing epoch: do not try to unpace
 838       // over the budget for the other phase.
 839       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 840         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 841       }
 842     } else {
 843       increase_used(actual*HeapWordSize);
 844     }
 845   }
 846 
 847   return result;
 848 }
 849 
 850 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 851   ShenandoahHeapLocker locker(lock());
 852   return _free_set->allocate(req, in_new_region);
 853 }
 854 











































 855 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 856                                         bool*  gc_overhead_limit_was_exceeded) {
 857   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 858   return allocate_memory(req);
 859 }
 860 
 861 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 862                                                              size_t size,
 863                                                              Metaspace::MetadataType mdtype) {
 864   MetaWord* result;
 865 
 866   // Inform metaspace OOM to GC heuristics if class unloading is possible.
 867   if (heuristics()->can_unload_classes()) {
 868     ShenandoahHeuristics* h = heuristics();
 869     h->record_metaspace_oom();
 870   }
 871 
 872   // Expand and retry allocation
 873   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 874   if (result != NULL) {


 877 
 878   // Start full GC
 879   collect(GCCause::_metadata_GC_clear_soft_refs);
 880 
 881   // Retry allocation
 882   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 883   if (result != NULL) {
 884     return result;
 885   }
 886 
 887   // Expand and retry allocation
 888   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 889   if (result != NULL) {
 890     return result;
 891   }
 892 
 893   // Out of memory
 894   return NULL;
 895 }
 896 









 897 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 898 private:
 899   ShenandoahHeap* const _heap;
 900   Thread* const _thread;
 901 public:
 902   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 903     _heap(heap), _thread(Thread::current()) {}
 904 
 905   void do_object(oop p) {
 906     shenandoah_assert_marked(NULL, p);
 907     if (!p->is_forwarded()) {
 908       _heap->evacuate_object(p, _thread);
 909     }
 910   }
 911 };
 912 
 913 class ShenandoahEvacuationTask : public AbstractGangTask {
 914 private:
 915   ShenandoahHeap* const _sh;
 916   ShenandoahCollectionSet* const _cs;
 917   bool _concurrent;
 918 public:
 919   ShenandoahEvacuationTask(ShenandoahHeap* sh,
 920                            ShenandoahCollectionSet* cs,
 921                            bool concurrent) :
 922     AbstractGangTask("Parallel Evacuation Task"),
 923     _sh(sh),
 924     _cs(cs),
 925     _concurrent(concurrent)
 926   {}
 927 


 967     r->make_trash();
 968   }
 969   collection_set()->clear();
 970 }
 971 
 972 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
 973   st->print_cr("Heap Regions:");
 974   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
 975   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
 976   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
 977   st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
 978 
 979   for (size_t i = 0; i < num_regions(); i++) {
 980     get_region(i)->print_on(st);
 981   }
 982 }
 983 
 984 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
 985   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
 986 
 987   oop humongous_obj = oop(start->bottom());
 988   size_t size = humongous_obj->size();
 989   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
 990   size_t index = start->region_number() + required_regions - 1;
 991 
 992   assert(!start->has_live(), "liveness must be zero");
 993 
 994   for(size_t i = 0; i < required_regions; i++) {
 995     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
 996     // as it expects that every region belongs to a humongous region starting with a humongous start region.
 997     ShenandoahHeapRegion* region = get_region(index --);
 998 
 999     assert(region->is_humongous(), "expect correct humongous start or continuation");
1000     assert(!region->is_cset(), "Humongous region should not be in collection set");
1001 
1002     region->make_trash_immediate();
1003   }
1004 }
1005 
1006 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1007 public:
1008   void do_thread(Thread* thread) {


1777 
1778 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1779   if (has_forwarded_objects()) {
1780     set_gc_state_mask(MARKING | UPDATEREFS, in_progress);
1781   } else {
1782     set_gc_state_mask(MARKING, in_progress);
1783   }
1784   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1785 }
1786 
1787 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1788    set_gc_state_mask(TRAVERSAL | HAS_FORWARDED | UPDATEREFS, in_progress);
1789    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1790 }
1791 
1792 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1793   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1794   set_gc_state_mask(EVACUATION, in_progress);
1795 }
1796 







1797 void ShenandoahHeap::ref_processing_init() {
1798   assert(_max_workers > 0, "Sanity");
1799 
1800   _ref_processor =
1801     new ReferenceProcessor(&_subject_to_discovery,  // is_subject_to_discovery
1802                            ParallelRefProcEnabled,  // MT processing
1803                            _max_workers,            // Degree of MT processing
1804                            true,                    // MT discovery
1805                            _max_workers,            // Degree of MT discovery
1806                            false,                   // Reference discovery is not atomic
1807                            NULL,                    // No closure, should be installed before use
1808                            true);                   // Scale worker threads
1809 
1810   shenandoah_assert_rp_isalive_not_installed();
1811 }
1812 
1813 GCTracer* ShenandoahHeap::tracer() {
1814   return shenandoah_policy()->tracer();
1815 }
1816 


2738   assert(worker_id < _max_workers, "sanity");
2739   for (uint i = 0; i < num_regions(); i++) {
2740     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2741   }
2742 #endif
2743   return _liveness_cache[worker_id];
2744 }
2745 
2746 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2747   assert(worker_id < _max_workers, "sanity");
2748   assert(_liveness_cache != NULL, "sanity");
2749   jushort* ld = _liveness_cache[worker_id];
2750   for (uint i = 0; i < num_regions(); i++) {
2751     ShenandoahHeapRegion* r = get_region(i);
2752     jushort live = ld[i];
2753     if (live > 0) {
2754       r->increase_live_data_gc_words(live);
2755       ld[i] = 0;
2756     }
2757   }








2758 }
< prev index next >