< prev index next >

src/share/vm/gc/shenandoah/shenandoahHeap.cpp

Print this page
rev 13055 : Implement barriers for maintaining connection matrix.


 246 
 247   if (ShenandoahAlwaysPreTouch) {
 248     assert (!AlwaysPreTouch, "Should have been overridden");
 249 
 250     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 251     // before initialize() below zeroes it with initializing thread. For any given region,
 252     // we touch the region and the corresponding bitmaps from the same thread.
 253 
 254     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 255                        _ordered_regions->count(), page_size);
 256     ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), bitmap_size, page_size);
 257     _workers->run_task(&cl);
 258   }
 259 
 260   _mark_bit_map0.initialize(heap_region, bitmap_region0);
 261   _complete_mark_bit_map = &_mark_bit_map0;
 262 
 263   _mark_bit_map1.initialize(heap_region, bitmap_region1);
 264   _next_mark_bit_map = &_mark_bit_map1;
 265 


 266   _monitoring_support = new ShenandoahMonitoringSupport(this);
 267 
 268   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 269 
 270   ShenandoahMarkCompact::initialize();
 271 
 272   return JNI_OK;
 273 }
 274 
 275 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 276   CollectedHeap(),
 277   _shenandoah_policy(policy),
 278   _concurrent_mark_in_progress(0),
 279   _evacuation_in_progress(0),
 280   _full_gc_in_progress(false),
 281   _free_regions(NULL),
 282   _collection_set(NULL),
 283   _bytes_allocated_since_cm(0),
 284   _bytes_allocated_during_cm(0),
 285   _max_allocated_gc(0),
 286   _allocated_last_gc(0),
 287   _used_start_gc(0),
 288   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 289   _ref_processor(NULL),
 290   _in_cset_fast_test(NULL),
 291   _in_cset_fast_test_base(NULL),
 292   _next_top_at_mark_starts(NULL),
 293   _next_top_at_mark_starts_base(NULL),
 294   _complete_top_at_mark_starts(NULL),
 295   _complete_top_at_mark_starts_base(NULL),
 296   _mark_bit_map0(),
 297   _mark_bit_map1(),

 298   _cancelled_concgc(false),
 299   _need_update_refs(false),
 300   _need_reset_bitmaps(false),
 301   _heap_lock(0),
 302 #ifdef ASSERT
 303   _heap_lock_owner(NULL),
 304 #endif
 305   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 306 
 307 {
 308   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 309   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 310   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 311 
 312   _scm = new ShenandoahConcurrentMark();
 313   _used = 0;
 314 
 315   _max_workers = MAX2(_max_workers, 1U);
 316   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 317                             /* are_GC_task_threads */true,


1118     return false;
1119   }
1120 };
1121 #endif
1122 
1123 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1124   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1125 
1126   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1127 
1128   if (!cancelled_concgc()) {
1129 
1130     recycle_dirty_regions();
1131 
1132     ensure_parsability(true);
1133 
1134 #ifdef ASSERT
1135     if (ShenandoahVerify) {
1136       verify_heap_after_marking();
1137     }









1138 #endif
1139 
1140     // NOTE: This needs to be done during a stop the world pause, because
1141     // putting regions into the collection set concurrently with Java threads
1142     // will create a race. In particular, acmp could fail because when we
1143     // resolve the first operand, the containing region might not yet be in
1144     // the collection set, and thus return the original oop. When the 2nd
1145     // operand gets resolved, the region could be in the collection set
1146     // and the oop gets evacuated. If both operands have originally been
1147     // the same, we get false negatives.
1148 
1149     {
1150       ShenandoahHeapLock lock(this);
1151       _collection_set->clear();
1152       _free_regions->clear();
1153 
1154       ShenandoahReclaimHumongousRegionsClosure reclaim;
1155       heap_region_iterate(&reclaim);
1156 
1157 #ifdef ASSERT
1158       CheckCollectionSetClosure ccsc;
1159       _ordered_regions->heap_region_iterate(&ccsc);
1160 #endif
1161 
1162     if (UseShenandoahMatrix) {
1163       int num = num_regions();
1164       int *connections = NEW_C_HEAP_ARRAY(int, num * num, mtGC);
1165       calculate_matrix(connections);
1166       print_matrix(connections);
1167       _shenandoah_policy->choose_collection_set(_collection_set, connections);
1168       FREE_C_HEAP_ARRAY(int,connections);
1169     } else {
1170       _shenandoah_policy->choose_collection_set(_collection_set);
1171     }
1172 
1173     _shenandoah_policy->choose_free_set(_free_regions);
1174     }
1175 
1176     if (UseShenandoahMatrix) {
1177       _collection_set->print();
1178     }
1179 
1180     _bytes_allocated_since_cm = 0;
1181 
1182     Universe::update_heap_info_at_gc();
1183   }
1184 }
1185 
1186 
1187 class RetireTLABClosure : public ThreadClosure {
1188 private:
1189   bool _retire;
1190 
1191 public:
1192   RetireTLABClosure(bool retire) : _retire(retire) {
1193   }
1194 
1195   void do_thread(Thread* thread) {
1196     thread->gclab().make_parsable(_retire);
1197   }
1198 };
1199 


1771       continue;
1772     }
1773     if (blk->doHeapRegion(current)) {
1774       return;
1775     }
1776   }
1777 }
1778 
1779 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1780   ShenandoahHeap* sh;
1781 public:
1782   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1783 
1784   bool doHeapRegion(ShenandoahHeapRegion* r) {
1785     r->clear_live_data();
1786     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1787     return false;
1788   }
1789 };
1790 
1791 
1792 void ShenandoahHeap::start_concurrent_marking() {
1793 
1794   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1795   accumulate_statistics_all_tlabs();
1796   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1797 
1798   set_concurrent_mark_in_progress(true);
1799   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1800   if (UseTLAB) {
1801     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1802     ensure_parsability(true);
1803     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1804   }
1805 
1806   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1807   _used_start_gc = used();
1808 
1809 #ifdef ASSERT
1810   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1811     ensure_parsability(false);
1812     print_all_refs("pre-mark");
1813   }
1814 #endif
1815 
1816   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1817   ClearLivenessClosure clc(this);
1818   heap_region_iterate(&clc);
1819   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1820 



1821   // print_all_refs("pre -mark");
1822 
1823   // oopDesc::_debug = true;
1824 
1825   // Make above changes visible to worker threads
1826   OrderAccess::fence();
1827 
1828   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1829   concurrentMark()->init_mark_roots();
1830   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1831 
1832   //  print_all_refs("pre-mark2");
1833 }
1834 
1835 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1836 
1837   ShenandoahHeap* _sh;
1838 
1839 public:
1840   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}


1883     return false;
1884   }
1885 };
1886 
1887 void ShenandoahHeap::swap_mark_bitmaps() {
1888   // Swap bitmaps.
1889   CMBitMap* tmp1 = _complete_mark_bit_map;
1890   _complete_mark_bit_map = _next_mark_bit_map;
1891   _next_mark_bit_map = tmp1;
1892 
1893   // Swap top-at-mark-start pointers
1894   HeapWord** tmp2 = _complete_top_at_mark_starts;
1895   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1896   _next_top_at_mark_starts = tmp2;
1897 
1898   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1899   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1900   _next_top_at_mark_starts_base = tmp3;
1901 }
1902 















































































1903 void ShenandoahHeap::stop_concurrent_marking() {
1904   assert(concurrent_mark_in_progress(), "How else could we get here?");
1905   if (! cancelled_concgc()) {
1906     // If we needed to update refs, and concurrent marking has been cancelled,
1907     // we need to finish updating references.
1908     set_need_update_refs(false);
1909     swap_mark_bitmaps();
1910   }
1911   set_concurrent_mark_in_progress(false);
1912 
1913   if (log_is_enabled(Trace, gc, region)) {
1914     ResourceMark rm;
1915     outputStream* out = Log(gc, region)::trace_stream();
1916     print_heap_regions(out);
1917   }
1918 
1919 }
1920 
1921 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1922   _concurrent_mark_in_progress = in_progress ? 1 : 0;


2423 public:
2424   ShenandoahCountGarbageClosure() : _garbage(0) {
2425   }
2426 
2427   bool doHeapRegion(ShenandoahHeapRegion* r) {
2428     if (! r->is_humongous() && ! r->is_pinned() && ! r->in_collection_set()) {
2429       _garbage += r->garbage();
2430     }
2431     return false;
2432   }
2433 
2434   size_t garbage() {
2435     return _garbage;
2436   }
2437 };
2438 
2439 size_t ShenandoahHeap::garbage() {
2440   ShenandoahCountGarbageClosure cl;
2441   heap_region_iterate(&cl);
2442   return cl.garbage();




2443 }
2444 
2445 #ifdef ASSERT
2446 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2447   assert(_heap_lock == locked, "must be locked");
2448   assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2449 }
2450 
2451 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2452   Thread* thr = Thread::current();
2453   assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2454          (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2455   "must own heap lock or by VM thread at safepoint");
2456 }
2457 
2458 #endif


 246 
 247   if (ShenandoahAlwaysPreTouch) {
 248     assert (!AlwaysPreTouch, "Should have been overridden");
 249 
 250     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 251     // before initialize() below zeroes it with initializing thread. For any given region,
 252     // we touch the region and the corresponding bitmaps from the same thread.
 253 
 254     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 255                        _ordered_regions->count(), page_size);
 256     ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), bitmap_size, page_size);
 257     _workers->run_task(&cl);
 258   }
 259 
 260   _mark_bit_map0.initialize(heap_region, bitmap_region0);
 261   _complete_mark_bit_map = &_mark_bit_map0;
 262 
 263   _mark_bit_map1.initialize(heap_region, bitmap_region1);
 264   _next_mark_bit_map = &_mark_bit_map1;
 265 
 266   _connection_matrix = new ShenandoahConnectionMatrix(_max_regions);
 267 
 268   _monitoring_support = new ShenandoahMonitoringSupport(this);
 269 
 270   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 271 
 272   ShenandoahMarkCompact::initialize();
 273 
 274   return JNI_OK;
 275 }
 276 
 277 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 278   CollectedHeap(),
 279   _shenandoah_policy(policy),
 280   _concurrent_mark_in_progress(0),
 281   _evacuation_in_progress(0),
 282   _full_gc_in_progress(false),
 283   _free_regions(NULL),
 284   _collection_set(NULL),
 285   _bytes_allocated_since_cm(0),
 286   _bytes_allocated_during_cm(0),
 287   _max_allocated_gc(0),
 288   _allocated_last_gc(0),
 289   _used_start_gc(0),
 290   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 291   _ref_processor(NULL),
 292   _in_cset_fast_test(NULL),
 293   _in_cset_fast_test_base(NULL),
 294   _next_top_at_mark_starts(NULL),
 295   _next_top_at_mark_starts_base(NULL),
 296   _complete_top_at_mark_starts(NULL),
 297   _complete_top_at_mark_starts_base(NULL),
 298   _mark_bit_map0(),
 299   _mark_bit_map1(),
 300   _connection_matrix(NULL),
 301   _cancelled_concgc(false),
 302   _need_update_refs(false),
 303   _need_reset_bitmaps(false),
 304   _heap_lock(0),
 305 #ifdef ASSERT
 306   _heap_lock_owner(NULL),
 307 #endif
 308   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer())
 309 
 310 {
 311   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 312   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 313   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 314 
 315   _scm = new ShenandoahConcurrentMark();
 316   _used = 0;
 317 
 318   _max_workers = MAX2(_max_workers, 1U);
 319   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 320                             /* are_GC_task_threads */true,


1121     return false;
1122   }
1123 };
1124 #endif
1125 
1126 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1127   assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
1128 
1129   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1130 
1131   if (!cancelled_concgc()) {
1132 
1133     recycle_dirty_regions();
1134 
1135     ensure_parsability(true);
1136 
1137 #ifdef ASSERT
1138     if (ShenandoahVerify) {
1139       verify_heap_after_marking();
1140     }
1141 
1142     if (UseShenandoahMatrix) {
1143       if (PrintShenandoahMatrix) {
1144         connection_matrix()->print_on(tty);
1145       }
1146       if (VerifyShenandoahMatrix) {
1147         verify_matrix();
1148       }
1149     }
1150 #endif
1151 
1152     // NOTE: This needs to be done during a stop the world pause, because
1153     // putting regions into the collection set concurrently with Java threads
1154     // will create a race. In particular, acmp could fail because when we
1155     // resolve the first operand, the containing region might not yet be in
1156     // the collection set, and thus return the original oop. When the 2nd
1157     // operand gets resolved, the region could be in the collection set
1158     // and the oop gets evacuated. If both operands have originally been
1159     // the same, we get false negatives.
1160 
1161     {
1162       ShenandoahHeapLock lock(this);
1163       _collection_set->clear();
1164       _free_regions->clear();
1165 
1166       ShenandoahReclaimHumongousRegionsClosure reclaim;
1167       heap_region_iterate(&reclaim);
1168 
1169 #ifdef ASSERT
1170       CheckCollectionSetClosure ccsc;
1171       _ordered_regions->heap_region_iterate(&ccsc);
1172 #endif
1173 








1174       _shenandoah_policy->choose_collection_set(_collection_set);

1175 
1176       _shenandoah_policy->choose_free_set(_free_regions);
1177     }
1178 




1179     _bytes_allocated_since_cm = 0;
1180 
1181     Universe::update_heap_info_at_gc();
1182   }
1183 }
1184 
1185 
1186 class RetireTLABClosure : public ThreadClosure {
1187 private:
1188   bool _retire;
1189 
1190 public:
1191   RetireTLABClosure(bool retire) : _retire(retire) {
1192   }
1193 
1194   void do_thread(Thread* thread) {
1195     thread->gclab().make_parsable(_retire);
1196   }
1197 };
1198 


1770       continue;
1771     }
1772     if (blk->doHeapRegion(current)) {
1773       return;
1774     }
1775   }
1776 }
1777 
1778 class ClearLivenessClosure : public ShenandoahHeapRegionClosure {
1779   ShenandoahHeap* sh;
1780 public:
1781   ClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) { }
1782 
1783   bool doHeapRegion(ShenandoahHeapRegion* r) {
1784     r->clear_live_data();
1785     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1786     return false;
1787   }
1788 };
1789 

1790 void ShenandoahHeap::start_concurrent_marking() {
1791 
1792   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::accumulate_stats);
1793   accumulate_statistics_all_tlabs();
1794   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::accumulate_stats);
1795 
1796   set_concurrent_mark_in_progress(true);
1797   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1798   if (UseTLAB) {
1799     shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::make_parsable);
1800     ensure_parsability(true);
1801     shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::make_parsable);
1802   }
1803 
1804   _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1805   _used_start_gc = used();
1806 
1807 #ifdef ASSERT
1808   if (ShenandoahDumpHeapBeforeConcurrentMark) {
1809     ensure_parsability(false);
1810     print_all_refs("pre-mark");
1811   }
1812 #endif
1813 
1814   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::clear_liveness);
1815   ClearLivenessClosure clc(this);
1816   heap_region_iterate(&clc);
1817   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::clear_liveness);
1818 
1819   if (UseShenandoahMatrix) {
1820     connection_matrix()->clear_all();
1821   }
1822   // print_all_refs("pre -mark");
1823 
1824   // oopDesc::_debug = true;
1825 
1826   // Make above changes visible to worker threads
1827   OrderAccess::fence();
1828 
1829   shenandoahPolicy()->record_phase_start(ShenandoahCollectorPolicy::scan_roots);
1830   concurrentMark()->init_mark_roots();
1831   shenandoahPolicy()->record_phase_end(ShenandoahCollectorPolicy::scan_roots);
1832 
1833   //  print_all_refs("pre-mark2");
1834 }
1835 
1836 class VerifyAfterEvacuationClosure : public ExtendedOopClosure {
1837 
1838   ShenandoahHeap* _sh;
1839 
1840 public:
1841   VerifyAfterEvacuationClosure() : _sh ( ShenandoahHeap::heap() ) {}


1884     return false;
1885   }
1886 };
1887 
1888 void ShenandoahHeap::swap_mark_bitmaps() {
1889   // Swap bitmaps.
1890   CMBitMap* tmp1 = _complete_mark_bit_map;
1891   _complete_mark_bit_map = _next_mark_bit_map;
1892   _next_mark_bit_map = tmp1;
1893 
1894   // Swap top-at-mark-start pointers
1895   HeapWord** tmp2 = _complete_top_at_mark_starts;
1896   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1897   _next_top_at_mark_starts = tmp2;
1898 
1899   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1900   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1901   _next_top_at_mark_starts_base = tmp3;
1902 }
1903 
1904 class ShenandoahVerifyMatrixOopClosure : public ExtendedOopClosure {
1905 private:
1906   oop _obj;
1907 
1908   template <class T>
1909   inline void do_oop_nv(T* p) {
1910     T o = oopDesc::load_heap_oop(p);
1911     if (! oopDesc::is_null(o)) {
1912       oop obj = oopDesc::decode_heap_oop_not_null(o);
1913       ShenandoahHeap* heap = ShenandoahHeap::heap();
1914       guarantee(heap->is_marked_complete(obj), "must be marked");
1915 
1916       uint from_idx = heap->heap_region_index_containing(p);
1917       uint to_idx = heap->heap_region_index_containing(obj);
1918       if (!heap->connection_matrix()->is_connected(from_idx, to_idx)) {
1919         tty->print_cr("from-obj: ");
1920         _obj->print_on(tty);
1921         tty->print_cr("to-obj:");
1922         obj->print_on(tty);
1923         tty->print_cr("from-obj allocated after mark: %s", BOOL_TO_STR(heap->allocated_after_complete_mark_start((HeapWord*) _obj)));
1924         tty->print_cr("to-obj allocated after mark: %s", BOOL_TO_STR(heap->allocated_after_complete_mark_start((HeapWord*) obj)));
1925         tty->print_cr("from-obj marked: %s", BOOL_TO_STR(heap->is_marked_complete(_obj)));
1926         tty->print_cr("to-obj marked: %s", BOOL_TO_STR(heap->is_marked_complete(obj)));
1927         tty->print_cr("from-idx: %u, to-idx: %u", from_idx, to_idx);
1928 
1929         oop fwd_from = BrooksPointer::forwardee(_obj);
1930         oop fwd_to = BrooksPointer::forwardee(obj);
1931         tty->print_cr("from-obj forwardee: " PTR_FORMAT, p2i(fwd_from));
1932         tty->print_cr("to-obj forwardee: " PTR_FORMAT, p2i(fwd_to));
1933         tty->print_cr("forward(from-obj) marked: %s", BOOL_TO_STR(heap->is_marked_complete(fwd_from)));
1934         tty->print_cr("forward(to-obj) marked: %s", BOOL_TO_STR(heap->is_marked_complete(fwd_to)));
1935         uint fwd_from_idx = heap->heap_region_index_containing(fwd_from);
1936         uint fwd_to_idx = heap->heap_region_index_containing(fwd_to);
1937         tty->print_cr("forward(from-idx): %u, forward(to-idx): %u", fwd_from_idx, fwd_to_idx);
1938         tty->print_cr("forward(from) connected with forward(to)? %s", BOOL_TO_STR(heap->connection_matrix()->is_connected(fwd_from_idx, fwd_to_idx)));
1939         tty->print_cr("sizeof(bool): %lu", sizeof(bool));
1940       }
1941       guarantee(oopDesc::unsafe_equals(ShenandoahBarrierSet::resolve_oop_static_not_null(obj), obj), "polizeilich verboten");
1942       guarantee(heap->connection_matrix()->is_connected(from_idx, to_idx), "must be connected");
1943     }
1944   }
1945 
1946 public:
1947   ShenandoahVerifyMatrixOopClosure(oop obj) : _obj(obj) {}
1948 
1949   void do_oop(oop* o) {
1950     do_oop_nv(o);
1951   }
1952 
1953   void do_oop(narrowOop* o) {
1954     do_oop_nv(o);
1955   }
1956 };
1957 
1958 class ShenandoahVerifyMatrixObjectClosure : public ObjectClosure {
1959 public:
1960   void do_object(oop obj) {
1961     guarantee(ShenandoahHeap::heap()->is_marked_complete(obj), "must be marked");
1962     ShenandoahVerifyMatrixOopClosure cl(obj);
1963     obj->oop_iterate(&cl);
1964   }
1965 
1966 };
1967 
1968 class ShenandoahVerifyMatrixRegionClosure : public ShenandoahHeapRegionClosure {
1969   bool doHeapRegion(ShenandoahHeapRegion* r) {
1970     ShenandoahVerifyMatrixObjectClosure cl;
1971     ShenandoahHeap::heap()->marked_object_iterate(r, &cl);
1972     return false;
1973   }
1974 };
1975 
1976 void ShenandoahHeap::verify_matrix() {
1977   OrderAccess::fence();
1978   ensure_parsability(false);
1979   ShenandoahVerifyMatrixRegionClosure cl;
1980   heap_region_iterate(&cl, true, true);
1981 }
1982 
1983 void ShenandoahHeap::stop_concurrent_marking() {
1984   assert(concurrent_mark_in_progress(), "How else could we get here?");
1985   if (! cancelled_concgc()) {
1986     // If we needed to update refs, and concurrent marking has been cancelled,
1987     // we need to finish updating references.
1988     set_need_update_refs(false);
1989     swap_mark_bitmaps();
1990   }
1991   set_concurrent_mark_in_progress(false);
1992 
1993   if (log_is_enabled(Trace, gc, region)) {
1994     ResourceMark rm;
1995     outputStream* out = Log(gc, region)::trace_stream();
1996     print_heap_regions(out);
1997   }
1998 
1999 }
2000 
2001 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
2002   _concurrent_mark_in_progress = in_progress ? 1 : 0;


2503 public:
2504   ShenandoahCountGarbageClosure() : _garbage(0) {
2505   }
2506 
2507   bool doHeapRegion(ShenandoahHeapRegion* r) {
2508     if (! r->is_humongous() && ! r->is_pinned() && ! r->in_collection_set()) {
2509       _garbage += r->garbage();
2510     }
2511     return false;
2512   }
2513 
2514   size_t garbage() {
2515     return _garbage;
2516   }
2517 };
2518 
2519 size_t ShenandoahHeap::garbage() {
2520   ShenandoahCountGarbageClosure cl;
2521   heap_region_iterate(&cl);
2522   return cl.garbage();
2523 }
2524 
2525 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() {
2526   return _connection_matrix;
2527 }
2528 
2529 #ifdef ASSERT
2530 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2531   assert(_heap_lock == locked, "must be locked");
2532   assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2533 }
2534 
2535 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2536   Thread* thr = Thread::current();
2537   assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2538          (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2539   "must own heap lock or by VM thread at safepoint");
2540 }
2541 
2542 #endif
< prev index next >