65 oop obj = oopDesc::decode_heap_oop_not_null(o);
66 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
67 "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
68 p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
69 }
70 }
71
72 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
73 void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_nv(p); }
74 #endif
75
76 const char* ShenandoahHeap::name() const {
77 return "Shenandoah";
78 }
79
80 class ShenandoahPretouchTask : public AbstractGangTask {
81 private:
82 ShenandoahHeapRegionSet* _regions;
83 const size_t _bitmap_size;
84 const size_t _page_size;
85 char* _bitmap0_base;
86 char* _bitmap1_base;
87 public:
88 ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
89 char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
90 size_t page_size) :
91 AbstractGangTask("Shenandoah PreTouch",
92 Universe::is_fully_initialized() ? GCId::current_raw() :
93 // During VM initialization there is
94 // no GC cycle that this task can be
95 // associated with.
96 GCId::undefined()),
97 _bitmap0_base(bitmap0_base),
98 _bitmap1_base(bitmap1_base),
99 _regions(regions),
100 _bitmap_size(bitmap_size),
101 _page_size(page_size) {
102 _regions->clear_current_index();
103 };
104
105 virtual void work(uint worker_id) {
106 ShenandoahHeapRegion* r = _regions->claim_next();
107 while (r != NULL) {
108 log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
109 r->region_number(), p2i(r->bottom()), p2i(r->end()));
110 os::pretouch_memory(r->bottom(), r->end(), _page_size);
111
112 size_t start = r->region_number() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
113 size_t end = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
114 assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
115
116 log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
117 r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
118 os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
119
120 log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
121 r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
122 os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
123
124 r = _regions->claim_next();
125 }
126 }
127 };
128
129 jint ShenandoahHeap::initialize() {
130 CollectedHeap::pre_initialize();
131
132 BrooksPointer::initial_checks();
133
134 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
135 size_t max_byte_size = collector_policy()->max_heap_byte_size();
136 size_t heap_alignment = collector_policy()->heap_alignment();
137
138 if (ShenandoahAlwaysPreTouch) {
139 // Enabled pre-touch means the entire heap is committed right away.
140 init_byte_size = max_byte_size;
141 }
142
154 set_barrier_set(new ShenandoahBarrierSet(this));
155 ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
156
157 _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
158 size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
159 _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
160 _committed = _initial_size;
161
162 log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
163 if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
164 vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
165 }
166
167 size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
168
169 _ordered_regions = new ShenandoahHeapRegionSet(_num_regions);
170 _free_regions = new ShenandoahFreeSet(_ordered_regions, _num_regions);
171
172 _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
173
174 _next_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
175 _next_top_at_mark_starts = _next_top_at_mark_starts_base -
176 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
177
178 _complete_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
179 _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
180 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
181
182 {
183 ShenandoahHeapLocker locker(lock());
184 for (size_t i = 0; i < _num_regions; i++) {
185 ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
186 (HeapWord*) pgc_rs.base() + reg_size_words * i,
187 reg_size_words,
188 i,
189 i < num_committed_regions);
190
191 _complete_top_at_mark_starts_base[i] = r->bottom();
192 _next_top_at_mark_starts_base[i] = r->bottom();
193
194 // Add to ordered regions first.
195 // We use the active size of ordered regions as the number of active regions in heap,
196 // free set and collection set use the number to assert the correctness of incoming regions.
197 _ordered_regions->add_region(r);
198 _free_regions->add_region(r);
199 assert(!collection_set()->is_in(i), "New region should not be in collection set");
200 }
201 }
202
203 assert(_ordered_regions->active_regions() == _num_regions, "Must match");
204 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
205 "misaligned heap: "PTR_FORMAT, p2i(base()));
206
207 LogTarget(Trace, gc, region) lt;
208 if (lt.is_enabled()) {
209 ResourceMark rm;
210 LogStream ls(lt);
211 log_trace(gc, region)("All Regions");
212 _ordered_regions->print_on(&ls);
226 // Reserve space for prev and next bitmap.
227 _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
228 _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
229
230 size_t bitmap_bytes_per_region = _bitmap_size / _num_regions;
231 _bitmap_words_per_region = bitmap_bytes_per_region / HeapWordSize;
232
233 guarantee(bitmap_bytes_per_region != 0,
234 "Bitmap bytes per region should not be zero");
235 guarantee(is_power_of_2(bitmap_bytes_per_region),
236 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
237 guarantee((bitmap_bytes_per_region % os::vm_page_size()) == 0,
238 "Bitmap bytes per region should be page-granular: bpr = " SIZE_FORMAT ", page size = %d",
239 bitmap_bytes_per_region, os::vm_page_size());
240 guarantee(is_power_of_2(_bitmap_words_per_region),
241 "Bitmap words per region Should be power of two: " SIZE_FORMAT, _bitmap_words_per_region);
242
243 size_t bitmap_page_size = UseLargePages && (bitmap_bytes_per_region >= (size_t)os::large_page_size()) ?
244 (size_t)os::large_page_size() : (size_t)os::vm_page_size();
245
246 ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
247 MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
248 _bitmap0_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
249
250 ReservedSpace bitmap1(_bitmap_size, bitmap_page_size);
251 MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
252 _bitmap1_region = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
253
254 {
255 ShenandoahHeapLocker locker(lock());
256 for (size_t i = 0; i < _num_regions; i++) {
257 ShenandoahHeapRegion* r = _ordered_regions->get(i);
258 if (r->is_committed()) {
259 commit_bitmaps(r);
260 }
261 }
262 }
263
264 size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
265
266 if (ShenandoahVerify) {
267 ReservedSpace verify_bitmap(_bitmap_size, page_size);
268 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
269 "couldn't allocate verification bitmap");
270 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
271 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
272 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
273 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
274 }
275
276 if (ShenandoahAlwaysPreTouch) {
277 assert (!AlwaysPreTouch, "Should have been overridden");
278
279 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
280 // before initialize() below zeroes it with initializing thread. For any given region,
281 // we touch the region and the corresponding bitmaps from the same thread.
282
283 log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
284 _ordered_regions->count(), page_size);
285 ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
286 _workers->run_task(&cl);
287 }
288
289 _mark_bit_map0.initialize(_heap_region, _bitmap0_region);
290 _complete_mark_bit_map = &_mark_bit_map0;
291
292 _mark_bit_map1.initialize(_heap_region, _bitmap1_region);
293 _next_mark_bit_map = &_mark_bit_map1;
294
295 if (UseShenandoahMatrix) {
296 _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
297 } else {
298 _connection_matrix = NULL;
299 }
300
301 _partial_gc = _shenandoah_policy->can_do_partial_gc() ?
302 new ShenandoahPartialGC(this, _num_regions) :
303 NULL;
304
305 _monitoring_support = new ShenandoahMonitoringSupport(this);
306
307 _phase_timings = new ShenandoahPhaseTimings();
308
309 if (ShenandoahAllocationTrace) {
310 _alloc_tracker = new ShenandoahAllocTracker();
311 }
312
313 ShenandoahStringDedup::initialize();
320
321 return JNI_OK;
322 }
323
324 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
325 CollectedHeap(),
326 _shenandoah_policy(policy),
327 _concurrent_mark_in_progress(0),
328 _evacuation_in_progress(0),
329 _full_gc_in_progress(false),
330 _update_refs_in_progress(false),
331 _concurrent_partial_in_progress(false),
332 _free_regions(NULL),
333 _collection_set(NULL),
334 _bytes_allocated_since_cm(0),
335 _bytes_allocated_during_cm(0),
336 _allocated_last_gc(0),
337 _used_start_gc(0),
338 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
339 _ref_processor(NULL),
340 _next_top_at_mark_starts(NULL),
341 _next_top_at_mark_starts_base(NULL),
342 _complete_top_at_mark_starts(NULL),
343 _complete_top_at_mark_starts_base(NULL),
344 _mark_bit_map0(),
345 _mark_bit_map1(),
346 _connection_matrix(NULL),
347 _cancelled_concgc(0),
348 _need_update_refs(false),
349 _need_reset_bitmaps(false),
350 _verifier(NULL),
351 _heap_lock(0),
352 _used_at_last_gc(0),
353 _alloc_seq_at_last_gc_start(0),
354 _alloc_seq_at_last_gc_end(0),
355 _safepoint_workers(NULL),
356 #ifdef ASSERT
357 _heap_lock_owner(NULL),
358 _heap_expansion_count(0),
359 #endif
360 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
361 _phase_timings(NULL),
362 _alloc_tracker(NULL)
363 {
364 log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
365 log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
366 log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
367
368 _scm = new ShenandoahConcurrentMark();
369 _used = 0;
370
371 _max_workers = MAX2(_max_workers, 1U);
372 _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
373 /* are_GC_task_threads */true,
374 /* are_ConcurrentGC_threads */false);
375 if (_workers == NULL) {
376 vm_exit_during_initialization("Failed necessary allocation.");
377 } else {
378 _workers->initialize_workers();
379 }
380
381 if (ParallelSafepointCleanupThreads > 1) {
382 _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
383 ParallelSafepointCleanupThreads,
384 false, false);
385 _safepoint_workers->initialize_workers();
386 }
387 }
388
389 class ShenandoahResetNextBitmapTask : public AbstractGangTask {
390 private:
391 ShenandoahHeapRegionSet* _regions;
392
393 public:
394 ShenandoahResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
395 AbstractGangTask("Parallel Reset Bitmap Task"),
396 _regions(regions) {
397 _regions->clear_current_index();
398 }
399
400 void work(uint worker_id) {
401 ShenandoahHeapRegion* region = _regions->claim_next();
402 ShenandoahHeap* heap = ShenandoahHeap::heap();
403 while (region != NULL) {
404 if (region->is_committed()) {
405 HeapWord* bottom = region->bottom();
406 HeapWord* top = heap->next_top_at_mark_start(region->bottom());
407 if (top > bottom) {
408 heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
409 }
410 assert(heap->is_next_bitmap_clear_range(bottom, region->end()), "must be clear");
411 }
412 region = _regions->claim_next();
413 }
414 }
415 };
416
417 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
418 assert_gc_workers(workers->active_workers());
419
420 ShenandoahResetNextBitmapTask task = ShenandoahResetNextBitmapTask(_ordered_regions);
421 workers->run_task(&task);
422 }
423
424 class ShenandoahResetCompleteBitmapTask : public AbstractGangTask {
425 private:
426 ShenandoahHeapRegionSet* _regions;
427
428 public:
429 ShenandoahResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
430 AbstractGangTask("Parallel Reset Bitmap Task"),
431 _regions(regions) {
432 _regions->clear_current_index();
433 }
434
435 void work(uint worker_id) {
436 ShenandoahHeapRegion* region = _regions->claim_next();
437 ShenandoahHeap* heap = ShenandoahHeap::heap();
438 while (region != NULL) {
439 if (region->is_committed()) {
440 HeapWord* bottom = region->bottom();
441 HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
442 if (top > bottom) {
443 heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
444 }
445 assert(heap->is_complete_bitmap_clear_range(bottom, region->end()), "must be clear");
446 }
447 region = _regions->claim_next();
448 }
449 }
450 };
451
452 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
453 assert_gc_workers(workers->active_workers());
454
455 ShenandoahResetCompleteBitmapTask task = ShenandoahResetCompleteBitmapTask(_ordered_regions);
456 workers->run_task(&task);
457 }
458
459 bool ShenandoahHeap::is_next_bitmap_clear() {
460 for (size_t idx = 0; idx < _num_regions; idx++) {
461 ShenandoahHeapRegion* r = _ordered_regions->get(idx);
462 if (r->is_committed() && !is_next_bitmap_clear_range(r->bottom(), r->end())) {
463 return false;
464 }
465 }
466 return true;
467 }
468
469 bool ShenandoahHeap::is_next_bitmap_clear_range(HeapWord* start, HeapWord* end) {
470 return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
471 }
472
473 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
474 return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
475 }
476
477 void ShenandoahHeap::print_on(outputStream* st) const {
478 st->print_cr("Shenandoah Heap");
479 st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
480 capacity() / K, committed() / K, used() / K);
481 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
482 num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
483
484 st->print("Status: ");
485 if (concurrent_mark_in_progress()) {
486 st->print("marking ");
487 } else if (is_evacuation_in_progress()) {
488 st->print("evacuating ");
489 } else if (is_update_refs_in_progress()) {
490 st->print("updating refs ");
491 } else {
492 st->print("idle ");
493 }
494 if (cancelled_concgc()) {
770 }
771
772 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
773 private:
774 ShenandoahHeap* _heap;
775 Thread* _thread;
776 public:
777 ShenandoahEvacuateUpdateRootsClosure() :
778 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
779 }
780
781 private:
782 template <class T>
783 void do_oop_work(T* p) {
784 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
785
786 T o = oopDesc::load_heap_oop(p);
787 if (! oopDesc::is_null(o)) {
788 oop obj = oopDesc::decode_heap_oop_not_null(o);
789 if (_heap->in_collection_set(obj)) {
790 assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
791 _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
792 oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
793 if (oopDesc::unsafe_equals(resolved, obj)) {
794 bool evac;
795 resolved = _heap->evacuate_object(obj, _thread, evac);
796 }
797 oopDesc::encode_store_heap_oop(p, resolved);
798 }
799 }
800 }
801
802 public:
803 void do_oop(oop* p) {
804 do_oop_work(p);
805 }
806 void do_oop(narrowOop* p) {
807 do_oop_work(p);
808 }
809 };
810
811 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure {
834 }
835
836 public:
837 void do_oop(oop* p) {
838 do_oop_work(p);
839 }
840 void do_oop(narrowOop* p) {
841 do_oop_work(p);
842 }
843 };
844
845 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
846 private:
847 ShenandoahHeap* const _heap;
848 Thread* const _thread;
849 public:
850 ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
851 _heap(heap), _thread(Thread::current()) {}
852
853 void do_object(oop p) {
854 assert(_heap->is_marked_complete(p), "expect only marked objects");
855 if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
856 bool evac;
857 _heap->evacuate_object(p, _thread, evac);
858 }
859 }
860 };
861
862 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
863 private:
864 ShenandoahHeap* const _sh;
865 ShenandoahCollectionSet* const _cs;
866 volatile jbyte _claimed_codecache;
867
868 bool claim_codecache() {
869 jbyte old = Atomic::cmpxchg((jbyte)1, &_claimed_codecache, (jbyte)0);
870 return old == 0;
871 }
872 public:
873 ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
874 ShenandoahCollectionSet* cs) :
964 }
965
966 #ifdef ASSERT
967 class ShenandoahCheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
968 bool heap_region_do(ShenandoahHeapRegion* r) {
969 assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
970 return false;
971 }
972 };
973 #endif
974
975 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
976 assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
977
978 log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
979
980 if (!cancelled_concgc()) {
981 // Allocations might have happened before we STWed here, record peak:
982 shenandoahPolicy()->record_peak_occupancy();
983
984 ensure_parsability(true);
985
986 if (ShenandoahVerify) {
987 verifier()->verify_after_concmark();
988 }
989
990 trash_cset_regions();
991
992 // NOTE: This needs to be done during a stop the world pause, because
993 // putting regions into the collection set concurrently with Java threads
994 // will create a race. In particular, acmp could fail because when we
995 // resolve the first operand, the containing region might not yet be in
996 // the collection set, and thus return the original oop. When the 2nd
997 // operand gets resolved, the region could be in the collection set
998 // and the oop gets evacuated. If both operands have originally been
999 // the same, we get false negatives.
1000
1001 {
1002 ShenandoahHeapLocker locker(lock());
1003 _collection_set->clear();
1004 _free_regions->clear();
1020 if (ShenandoahVerify) {
1021 verifier()->verify_before_evacuation();
1022 }
1023 }
1024 }
1025
1026
1027 class ShenandoahRetireTLABClosure : public ThreadClosure {
1028 private:
1029 bool _retire;
1030
1031 public:
1032 ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {}
1033
1034 void do_thread(Thread* thread) {
1035 assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1036 thread->gclab().make_parsable(_retire);
1037 }
1038 };
1039
1040 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1041 if (UseTLAB) {
1042 CollectedHeap::ensure_parsability(retire_tlabs);
1043 ShenandoahRetireTLABClosure cl(retire_tlabs);
1044 Threads::java_threads_do(&cl);
1045 gc_threads_do(&cl);
1046 }
1047 }
1048
1049
1050 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1051 ShenandoahRootEvacuator* _rp;
1052 public:
1053
1054 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1055 AbstractGangTask("Shenandoah evacuate and update roots"),
1056 _rp(rp)
1057 {
1058 // Nothing else to do.
1059 }
1060
1286 return NULL;
1287 }
1288
1289 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1290 Space* sp = heap_region_containing(addr);
1291 assert(sp != NULL, "block_size of address outside of heap");
1292 return sp->block_size(addr);
1293 }
1294
1295 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1296 Space* sp = heap_region_containing(addr);
1297 return sp->block_is_obj(addr);
1298 }
1299
1300 jlong ShenandoahHeap::millis_since_last_gc() {
1301 return 0;
1302 }
1303
1304 void ShenandoahHeap::prepare_for_verify() {
1305 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1306 ensure_parsability(false);
1307 }
1308 }
1309
1310 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1311 workers()->print_worker_threads_on(st);
1312 }
1313
1314 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1315 workers()->threads_do(tcl);
1316 }
1317
1318 void ShenandoahHeap::print_tracing_info() const {
1319 LogTarget(Info, gc, stats) lt;
1320 if (lt.is_enabled()) {
1321 ResourceMark rm;
1322 LogStream ls(lt);
1323
1324 phase_timings()->print_on(&ls);
1325
1326 ls.cr();
1347 } else {
1348 // TODO: Consider allocating verification bitmaps on demand,
1349 // and turn this on unconditionally.
1350 }
1351 }
1352 }
1353 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1354 return _free_regions->capacity();
1355 }
1356
1357 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1358 ObjectClosure* _cl;
1359 public:
1360 ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1361 bool heap_region_do(ShenandoahHeapRegion* r) {
1362 ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1363 return false;
1364 }
1365 };
1366
1367 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1368 ShenandoahIterateObjectClosureRegionClosure blk(cl);
1369 heap_region_iterate(&blk, false, true);
1370 }
1371
1372 class ShenandoahSafeObjectIterateAdjustPtrsClosure : public MetadataAwareOopClosure {
1373 private:
1374 ShenandoahHeap* _heap;
1375
1376 public:
1377 ShenandoahSafeObjectIterateAdjustPtrsClosure() : _heap(ShenandoahHeap::heap()) {}
1378
1379 private:
1380 template <class T>
1381 inline void do_oop_work(T* p) {
1382 T o = oopDesc::load_heap_oop(p);
1383 if (!oopDesc::is_null(o)) {
1384 oop obj = oopDesc::decode_heap_oop_not_null(o);
1385 oopDesc::encode_store_heap_oop(p, BrooksPointer::forwardee(obj));
1386 }
1387 }
1388 public:
1389 void do_oop(oop* p) {
1390 do_oop_work(p);
1391 }
1392 void do_oop(narrowOop* p) {
1393 do_oop_work(p);
1394 }
1395 };
1396
1397 class ShenandoahSafeObjectIterateAndUpdate : public ObjectClosure {
1398 private:
1399 ObjectClosure* _cl;
1400 public:
1401 ShenandoahSafeObjectIterateAndUpdate(ObjectClosure *cl) : _cl(cl) {}
1402
1403 virtual void do_object(oop obj) {
1404 assert (oopDesc::unsafe_equals(obj, BrooksPointer::forwardee(obj)),
1405 "avoid double-counting: only non-forwarded objects here");
1406
1407 // Fix up the ptrs.
1408 ShenandoahSafeObjectIterateAdjustPtrsClosure adjust_ptrs;
1409 obj->oop_iterate(&adjust_ptrs);
1410
1411 // Can reply the object now:
1412 _cl->do_object(obj);
1413 }
1414 };
1415
1416 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1417 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1418
1419 // Safe iteration does objects only with correct references.
1420 // This is why we skip collection set regions that have stale copies of objects,
1421 // and fix up the pointers in the returned objects.
1422
1423 ShenandoahSafeObjectIterateAndUpdate safe_cl(cl);
1424 ShenandoahIterateObjectClosureRegionClosure blk(&safe_cl);
1425 heap_region_iterate(&blk,
1426 /* skip_cset_regions = */ true,
1427 /* skip_humongous_continuations = */ true);
1428
1429 _need_update_refs = false; // already updated the references
1430 }
1431
1432 // Apply blk->heap_region_do() on all committed regions in address order,
1433 // terminating the iteration early if heap_region_do() returns true.
1434 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const {
1435 for (size_t i = 0; i < num_regions(); i++) {
1436 ShenandoahHeapRegion* current = _ordered_regions->get(i);
1437 if (skip_humongous_continuation && current->is_humongous_continuation()) {
1438 continue;
1439 }
1440 if (skip_cset_regions && in_collection_set(current)) {
1441 continue;
1442 }
1443 if (blk->heap_region_do(current)) {
1444 return;
1445 }
1446 }
1447 }
1448
1449 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1450 private:
1451 ShenandoahHeap* sh;
1452 public:
1453 ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1454
1455 bool heap_region_do(ShenandoahHeapRegion* r) {
1456 r->clear_live_data();
1457 sh->set_next_top_at_mark_start(r->bottom(), r->top());
1458 return false;
1459 }
1460 };
1461
1462 void ShenandoahHeap::start_concurrent_marking() {
1463 if (ShenandoahVerify) {
1464 verifier()->verify_before_concmark();
1465 }
1466
1467 {
1468 ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1469 accumulate_statistics_all_tlabs();
1470 }
1471
1472 set_concurrent_mark_in_progress(true);
1473 // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1474 if (UseTLAB) {
1475 ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1476 ensure_parsability(true);
1477 }
1478
1479 _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1480 _used_start_gc = used();
1481
1482 {
1483 ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1484 ShenandoahClearLivenessClosure clc(this);
1485 heap_region_iterate(&clc);
1486 }
1487
1488 // Make above changes visible to worker threads
1489 OrderAccess::fence();
1490
1491 concurrentMark()->init_mark_roots();
1492
1493 if (UseTLAB) {
1494 ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1495 resize_all_tlabs();
1496 }
1497 }
1498
1499 void ShenandoahHeap::swap_mark_bitmaps() {
1500 // Swap bitmaps.
1501 MarkBitMap* tmp1 = _complete_mark_bit_map;
1502 _complete_mark_bit_map = _next_mark_bit_map;
1503 _next_mark_bit_map = tmp1;
1504
1505 // Swap top-at-mark-start pointers
1506 HeapWord** tmp2 = _complete_top_at_mark_starts;
1507 _complete_top_at_mark_starts = _next_top_at_mark_starts;
1508 _next_top_at_mark_starts = tmp2;
1509
1510 HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1511 _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1512 _next_top_at_mark_starts_base = tmp3;
1513 }
1514
1515
1516 void ShenandoahHeap::stop_concurrent_marking() {
1517 assert(concurrent_mark_in_progress(), "How else could we get here?");
1518 if (! cancelled_concgc()) {
1519 // If we needed to update refs, and concurrent marking has been cancelled,
1520 // we need to finish updating references.
1521 set_need_update_refs(false);
1522 swap_mark_bitmaps();
1523 }
1524 set_concurrent_mark_in_progress(false);
1525
1526 LogTarget(Trace, gc, region) lt;
1527 if (lt.is_enabled()) {
1528 ResourceMark rm;
1529 LogStream ls(lt);
1530 ls.print_cr("Regions at stopping the concurrent mark:");
1531 print_heap_regions_on(&ls);
1532 }
1533 }
1534
1535 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1536 _concurrent_mark_in_progress = in_progress ? 1 : 0;
1537 JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1538 }
1539
1540 void ShenandoahHeap::set_concurrent_partial_in_progress(bool in_progress) {
1541 _concurrent_partial_in_progress = in_progress;
1542 JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1590 return result;
1591 }
1592
1593 uint ShenandoahHeap::oop_extra_words() {
1594 return BrooksPointer::word_size();
1595 }
1596
1597 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1598 _heap(ShenandoahHeap::heap_no_check()) {
1599 }
1600
1601 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1602 assert(_heap != NULL, "sanity");
1603 obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1604 #ifdef ASSERT
1605 if (_heap->concurrent_mark_in_progress()) {
1606 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1607 }
1608 #endif
1609 assert(!oopDesc::is_null(obj), "null");
1610 return _heap->is_marked_next(obj);
1611 }
1612
1613 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1614 _heap(ShenandoahHeap::heap_no_check()) {
1615 }
1616
1617 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1618 assert(_heap != NULL, "sanity");
1619 assert(!oopDesc::is_null(obj), "null");
1620 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1621 return _heap->is_marked_next(obj);
1622 }
1623
1624 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
1625 return need_update_refs() ?
1626 (BoolObjectClosure*) &_forwarded_is_alive :
1627 (BoolObjectClosure*) &_is_alive;
1628 }
1629
1630 void ShenandoahHeap::ref_processing_init() {
1631 MemRegion mr = reserved_region();
1632
1633 _forwarded_is_alive.init(ShenandoahHeap::heap());
1634 _is_alive.init(ShenandoahHeap::heap());
1635 assert(_max_workers > 0, "Sanity");
1636
1637 _ref_processor =
1638 new ReferenceProcessor(mr, // span
1639 ParallelRefProcEnabled, // MT processing
1640 _max_workers, // Degree of MT processing
1641 true, // MT discovery
1788 _need_update_refs = need_update_refs;
1789 }
1790
1791 //fixme this should be in heapregionset
1792 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
1793 size_t region_idx = r->region_number() + 1;
1794 ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
1795 guarantee(next->region_number() == region_idx, "region number must match");
1796 while (next->is_humongous()) {
1797 region_idx = next->region_number() + 1;
1798 next = _ordered_regions->get(region_idx);
1799 guarantee(next->region_number() == region_idx, "region number must match");
1800 }
1801 return next;
1802 }
1803
1804 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
1805 return _monitoring_support;
1806 }
1807
1808 MarkBitMap* ShenandoahHeap::complete_mark_bit_map() {
1809 return _complete_mark_bit_map;
1810 }
1811
1812 MarkBitMap* ShenandoahHeap::next_mark_bit_map() {
1813 return _next_mark_bit_map;
1814 }
1815
1816 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
1817 _free_regions->add_region(r);
1818 }
1819
1820 void ShenandoahHeap::clear_free_regions() {
1821 _free_regions->clear();
1822 }
1823
1824 address ShenandoahHeap::in_cset_fast_test_addr() {
1825 ShenandoahHeap* heap = ShenandoahHeap::heap();
1826 assert(heap->collection_set() != NULL, "Sanity");
1827 return (address) heap->collection_set()->biased_map_address();
1828 }
1829
1830 address ShenandoahHeap::cancelled_concgc_addr() {
1831 return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
1832 }
1833
1834
1835 size_t ShenandoahHeap::conservative_max_heap_alignment() {
1836 return ShenandoahMaxRegionSize;
1837 }
1838
1839 size_t ShenandoahHeap::bytes_allocated_since_cm() {
1840 return _bytes_allocated_since_cm;
1841 }
1842
1843 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
1844 _bytes_allocated_since_cm = bytes;
1845 }
1846
1847 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
1848 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1849 _next_top_at_mark_starts[index] = addr;
1850 }
1851
1852 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
1853 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1854 return _next_top_at_mark_starts[index];
1855 }
1856
1857 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
1858 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1859 _complete_top_at_mark_starts[index] = addr;
1860 }
1861
1862 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
1863 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1864 return _complete_top_at_mark_starts[index];
1865 }
1866
1867 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1868 _full_gc_in_progress = in_progress;
1869 }
1870
1871 bool ShenandoahHeap::is_full_gc_in_progress() const {
1872 return _full_gc_in_progress;
1873 }
1874
1875 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1876 _update_refs_in_progress = in_progress;
1877 }
1878
1879 bool ShenandoahHeap::is_update_refs_in_progress() const {
1880 return _update_refs_in_progress;
1881 }
1882
1883 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1884 ShenandoahCodeRoots::add_nmethod(nm);
1970 private:
1971 T cl;
1972 ShenandoahHeap* _heap;
1973 ShenandoahHeapRegionSet* _regions;
1974 bool _concurrent;
1975 public:
1976 ShenandoahUpdateHeapRefsTask(ShenandoahHeapRegionSet* regions, bool concurrent) :
1977 AbstractGangTask("Concurrent Update References Task"),
1978 cl(T()),
1979 _heap(ShenandoahHeap::heap()),
1980 _regions(regions),
1981 _concurrent(concurrent) {
1982 }
1983
1984 void work(uint worker_id) {
1985 SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
1986 ShenandoahHeapRegion* r = _regions->claim_next();
1987 while (r != NULL) {
1988 if (_heap->in_collection_set(r)) {
1989 HeapWord* bottom = r->bottom();
1990 HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
1991 if (top > bottom) {
1992 _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
1993 }
1994 } else {
1995 if (r->is_active()) {
1996 _heap->marked_object_oop_safe_iterate(r, &cl);
1997 }
1998 }
1999 if (_heap->check_cancelled_concgc_and_yield(_concurrent)) {
2000 return;
2001 }
2002 r = _regions->claim_next();
2003 }
2004 }
2005 };
2006
2007 void ShenandoahHeap::update_heap_references(ShenandoahHeapRegionSet* update_regions, bool concurrent) {
2008 if (UseShenandoahMatrix) {
2009 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(update_regions, concurrent);
2010 workers()->run_task(&task);
2011 } else {
2012 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(update_regions, concurrent);
2013 workers()->run_task(&task);
2014 }
2015 }
2016
2017 void ShenandoahHeap::concurrent_update_heap_references() {
2018 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2019 ShenandoahHeapRegionSet* update_regions = regions();
2020 update_regions->clear_current_index();
2021 update_heap_references(update_regions, true);
2022 }
2023
2024 void ShenandoahHeap::prepare_update_refs() {
2025 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2026
2027 if (ShenandoahVerify) {
2028 verifier()->verify_before_updaterefs();
2029 }
2030
2031 set_evacuation_in_progress_at_safepoint(false);
2032 set_update_refs_in_progress(true);
2033 ensure_parsability(true);
2034 if (UseShenandoahMatrix) {
2035 connection_matrix()->clear_all();
2036 }
2037 for (uint i = 0; i < num_regions(); i++) {
2038 ShenandoahHeapRegion* r = _ordered_regions->get(i);
2039 r->set_concurrent_iteration_safe_limit(r->top());
2040 }
2041 }
2042
2043 void ShenandoahHeap::finish_update_refs() {
2044 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2045
2046 if (cancelled_concgc()) {
2047 ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
2048
2049 // Finish updating references where we left off.
2050 clear_cancelled_concgc();
2051 ShenandoahHeapRegionSet* update_regions = regions();
2052 update_heap_references(update_regions, false);
2053 }
2178 }
2179 SpinPause(); // allow allocators to barge the lock
2180 }
2181 }
2182
2183 _shenandoah_policy->record_bytes_reclaimed(bytes_reclaimed);
2184 }
2185
2186 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2187 print_on(st);
2188 print_heap_regions_on(st);
2189 }
2190
2191 address ShenandoahHeap::concurrent_mark_in_progress_addr() {
2192 return (address) &(ShenandoahHeap::heap()->_concurrent_mark_in_progress);
2193 }
2194
2195 bool ShenandoahHeap::commit_bitmaps(ShenandoahHeapRegion* r) {
2196 size_t len = _bitmap_words_per_region * HeapWordSize;
2197 size_t off = r->region_number() * _bitmap_words_per_region;
2198 if (!os::commit_memory((char*)(_bitmap0_region.start() + off), len, false)) {
2199 return false;
2200 }
2201 if (!os::commit_memory((char*)(_bitmap1_region.start() + off), len, false)) {
2202 return false;
2203 }
2204 return true;
2205 }
2206
2207 bool ShenandoahHeap::uncommit_bitmaps(ShenandoahHeapRegion* r) {
2208 size_t len = _bitmap_words_per_region * HeapWordSize;
2209 size_t off = r->region_number() * _bitmap_words_per_region;
2210 if (!os::uncommit_memory((char*)(_bitmap0_region.start() + off), len)) {
2211 return false;
2212 }
2213 if (!os::uncommit_memory((char*)(_bitmap1_region.start() + off), len)) {
2214 return false;
2215 }
2216 return true;
2217 }
|
65 oop obj = oopDesc::decode_heap_oop_not_null(o);
66 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
67 "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
68 p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
69 }
70 }
71
72 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
73 void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_nv(p); }
74 #endif
75
76 const char* ShenandoahHeap::name() const {
77 return "Shenandoah";
78 }
79
80 class ShenandoahPretouchTask : public AbstractGangTask {
81 private:
82 ShenandoahHeapRegionSet* _regions;
83 const size_t _bitmap_size;
84 const size_t _page_size;
85 char* _bitmap_base;
86 public:
87 ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
88 char* bitmap_base, size_t bitmap_size,
89 size_t page_size) :
90 AbstractGangTask("Shenandoah PreTouch",
91 Universe::is_fully_initialized() ? GCId::current_raw() :
92 // During VM initialization there is
93 // no GC cycle that this task can be
94 // associated with.
95 GCId::undefined()),
96 _bitmap_base(bitmap_base),
97 _regions(regions),
98 _bitmap_size(bitmap_size),
99 _page_size(page_size) {
100 _regions->clear_current_index();
101 };
102
103 virtual void work(uint worker_id) {
104 ShenandoahHeapRegion* r = _regions->claim_next();
105 while (r != NULL) {
106 log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
107 r->region_number(), p2i(r->bottom()), p2i(r->end()));
108 os::pretouch_memory(r->bottom(), r->end(), _page_size);
109
110 size_t start = r->region_number() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
111 size_t end = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
112 assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
113
114 log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
115 r->region_number(), p2i(_bitmap_base + start), p2i(_bitmap_base + end));
116 os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
117
118 r = _regions->claim_next();
119 }
120 }
121 };
122
123 jint ShenandoahHeap::initialize() {
124 CollectedHeap::pre_initialize();
125
126 BrooksPointer::initial_checks();
127
128 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
129 size_t max_byte_size = collector_policy()->max_heap_byte_size();
130 size_t heap_alignment = collector_policy()->heap_alignment();
131
132 if (ShenandoahAlwaysPreTouch) {
133 // Enabled pre-touch means the entire heap is committed right away.
134 init_byte_size = max_byte_size;
135 }
136
148 set_barrier_set(new ShenandoahBarrierSet(this));
149 ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
150
151 _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
152 size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
153 _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
154 _committed = _initial_size;
155
156 log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
157 if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
158 vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
159 }
160
161 size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
162
163 _ordered_regions = new ShenandoahHeapRegionSet(_num_regions);
164 _free_regions = new ShenandoahFreeSet(_ordered_regions, _num_regions);
165
166 _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
167
168 _top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
169 _top_at_mark_starts = _top_at_mark_starts_base -
170 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
171
172
173 {
174 ShenandoahHeapLocker locker(lock());
175 for (size_t i = 0; i < _num_regions; i++) {
176 ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
177 (HeapWord*) pgc_rs.base() + reg_size_words * i,
178 reg_size_words,
179 i,
180 i < num_committed_regions);
181
182 _top_at_mark_starts_base[i] = r->bottom();
183
184 // Add to ordered regions first.
185 // We use the active size of ordered regions as the number of active regions in heap,
186 // free set and collection set use the number to assert the correctness of incoming regions.
187 _ordered_regions->add_region(r);
188 _free_regions->add_region(r);
189 assert(!collection_set()->is_in(i), "New region should not be in collection set");
190 }
191 }
192
193 assert(_ordered_regions->active_regions() == _num_regions, "Must match");
194 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
195 "misaligned heap: "PTR_FORMAT, p2i(base()));
196
197 LogTarget(Trace, gc, region) lt;
198 if (lt.is_enabled()) {
199 ResourceMark rm;
200 LogStream ls(lt);
201 log_trace(gc, region)("All Regions");
202 _ordered_regions->print_on(&ls);
216 // Reserve space for prev and next bitmap.
217 _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
218 _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
219
220 size_t bitmap_bytes_per_region = _bitmap_size / _num_regions;
221 _bitmap_words_per_region = bitmap_bytes_per_region / HeapWordSize;
222
223 guarantee(bitmap_bytes_per_region != 0,
224 "Bitmap bytes per region should not be zero");
225 guarantee(is_power_of_2(bitmap_bytes_per_region),
226 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
227 guarantee((bitmap_bytes_per_region % os::vm_page_size()) == 0,
228 "Bitmap bytes per region should be page-granular: bpr = " SIZE_FORMAT ", page size = %d",
229 bitmap_bytes_per_region, os::vm_page_size());
230 guarantee(is_power_of_2(_bitmap_words_per_region),
231 "Bitmap words per region Should be power of two: " SIZE_FORMAT, _bitmap_words_per_region);
232
233 size_t bitmap_page_size = UseLargePages && (bitmap_bytes_per_region >= (size_t)os::large_page_size()) ?
234 (size_t)os::large_page_size() : (size_t)os::vm_page_size();
235
236 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
237 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
238 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
239
240 {
241 ShenandoahHeapLocker locker(lock());
242 for (size_t i = 0; i < _num_regions; i++) {
243 ShenandoahHeapRegion* r = _ordered_regions->get(i);
244 if (r->is_committed()) {
245 commit_bitmaps(r);
246 }
247 }
248 }
249
250 size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
251
252 if (ShenandoahVerify) {
253 ReservedSpace verify_bitmap(_bitmap_size, page_size);
254 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
255 "couldn't allocate verification bitmap");
256 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
257 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
258 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
259 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
260 }
261
262 if (ShenandoahAlwaysPreTouch) {
263 assert (!AlwaysPreTouch, "Should have been overridden");
264
265 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
266 // before initialize() below zeroes it with initializing thread. For any given region,
267 // we touch the region and the corresponding bitmaps from the same thread.
268
269 log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
270 _ordered_regions->count(), page_size);
271 ShenandoahPretouchTask cl(_ordered_regions, bitmap.base(), _bitmap_size, page_size);
272 _workers->run_task(&cl);
273 }
274
275 _mark_bit_map.initialize(_heap_region, _bitmap_region);
276
277 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
278 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
279 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
280 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
281 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
282
283 if (UseShenandoahMatrix) {
284 _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
285 } else {
286 _connection_matrix = NULL;
287 }
288
289 _partial_gc = _shenandoah_policy->can_do_partial_gc() ?
290 new ShenandoahPartialGC(this, _num_regions) :
291 NULL;
292
293 _monitoring_support = new ShenandoahMonitoringSupport(this);
294
295 _phase_timings = new ShenandoahPhaseTimings();
296
297 if (ShenandoahAllocationTrace) {
298 _alloc_tracker = new ShenandoahAllocTracker();
299 }
300
301 ShenandoahStringDedup::initialize();
308
309 return JNI_OK;
310 }
311
312 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
313 CollectedHeap(),
314 _shenandoah_policy(policy),
315 _concurrent_mark_in_progress(0),
316 _evacuation_in_progress(0),
317 _full_gc_in_progress(false),
318 _update_refs_in_progress(false),
319 _concurrent_partial_in_progress(false),
320 _free_regions(NULL),
321 _collection_set(NULL),
322 _bytes_allocated_since_cm(0),
323 _bytes_allocated_during_cm(0),
324 _allocated_last_gc(0),
325 _used_start_gc(0),
326 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
327 _ref_processor(NULL),
328 _top_at_mark_starts(NULL),
329 _top_at_mark_starts_base(NULL),
330 _mark_bit_map(),
331 _aux_bit_map(),
332 _connection_matrix(NULL),
333 _cancelled_concgc(0),
334 _need_update_refs(false),
335 _need_reset_bitmap(false),
336 _bitmap_valid(true),
337 _verifier(NULL),
338 _heap_lock(0),
339 _used_at_last_gc(0),
340 _alloc_seq_at_last_gc_start(0),
341 _alloc_seq_at_last_gc_end(0),
342 _safepoint_workers(NULL),
343 #ifdef ASSERT
344 _heap_lock_owner(NULL),
345 _heap_expansion_count(0),
346 #endif
347 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
348 _phase_timings(NULL),
349 _alloc_tracker(NULL)
350 {
351 log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
352 log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
353 log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
354
355 _scm = new ShenandoahConcurrentMark();
356 _used = 0;
357
358 _max_workers = MAX2(_max_workers, 1U);
359 _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
360 /* are_GC_task_threads */true,
361 /* are_ConcurrentGC_threads */false);
362 if (_workers == NULL) {
363 vm_exit_during_initialization("Failed necessary allocation.");
364 } else {
365 _workers->initialize_workers();
366 }
367
368 if (ParallelSafepointCleanupThreads > 1) {
369 _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
370 ParallelSafepointCleanupThreads,
371 false, false);
372 _safepoint_workers->initialize_workers();
373 }
374 }
375
376 class ShenandoahResetBitmapTask : public AbstractGangTask {
377 private:
378 ShenandoahHeapRegionSet* _regions;
379
380 public:
381 ShenandoahResetBitmapTask(ShenandoahHeapRegionSet* regions) :
382 AbstractGangTask("Parallel Reset Bitmap Task"),
383 _regions(regions) {
384 _regions->clear_current_index();
385 }
386
387 void work(uint worker_id) {
388 ShenandoahHeapRegion* region = _regions->claim_next();
389 ShenandoahHeap* heap = ShenandoahHeap::heap();
390 while (region != NULL) {
391 if (region->is_committed()) {
392 HeapWord* bottom = region->bottom();
393 HeapWord* top = heap->top_at_mark_start(region->bottom());
394 if (top > bottom) {
395 heap->mark_bit_map()->clear_range_large(MemRegion(bottom, top));
396 }
397 assert(heap->is_bitmap_clear_range(bottom, region->end()), "must be clear");
398 heap->set_top_at_mark_start(region->bottom(), region->bottom());
399 }
400 region = _regions->claim_next();
401 }
402 }
403 };
404
405 void ShenandoahHeap::reset_mark_bitmap(WorkGang* workers) {
406 assert_gc_workers(workers->active_workers());
407
408 ShenandoahResetBitmapTask task = ShenandoahResetBitmapTask(_ordered_regions);
409 workers->run_task(&task);
410 }
411
412 bool ShenandoahHeap::is_bitmap_clear() {
413 for (size_t idx = 0; idx < _num_regions; idx++) {
414 ShenandoahHeapRegion* r = _ordered_regions->get(idx);
415 if (r->is_committed() && !is_bitmap_clear_range(r->bottom(), r->end())) {
416 return false;
417 }
418 }
419 return true;
420 }
421
422 bool ShenandoahHeap::is_bitmap_clear_range(HeapWord* start, HeapWord* end) {
423 return _mark_bit_map.getNextMarkedWordAddress(start, end) == end;
424 }
425
426 void ShenandoahHeap::print_on(outputStream* st) const {
427 st->print_cr("Shenandoah Heap");
428 st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
429 capacity() / K, committed() / K, used() / K);
430 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
431 num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
432
433 st->print("Status: ");
434 if (concurrent_mark_in_progress()) {
435 st->print("marking ");
436 } else if (is_evacuation_in_progress()) {
437 st->print("evacuating ");
438 } else if (is_update_refs_in_progress()) {
439 st->print("updating refs ");
440 } else {
441 st->print("idle ");
442 }
443 if (cancelled_concgc()) {
719 }
720
721 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
722 private:
723 ShenandoahHeap* _heap;
724 Thread* _thread;
725 public:
726 ShenandoahEvacuateUpdateRootsClosure() :
727 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
728 }
729
730 private:
731 template <class T>
732 void do_oop_work(T* p) {
733 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
734
735 T o = oopDesc::load_heap_oop(p);
736 if (! oopDesc::is_null(o)) {
737 oop obj = oopDesc::decode_heap_oop_not_null(o);
738 if (_heap->in_collection_set(obj)) {
739 assert(_heap->is_marked(obj), "only evacuate marked objects %d %d",
740 _heap->is_marked(obj), _heap->is_marked(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
741 oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
742 if (oopDesc::unsafe_equals(resolved, obj)) {
743 bool evac;
744 resolved = _heap->evacuate_object(obj, _thread, evac);
745 }
746 oopDesc::encode_store_heap_oop(p, resolved);
747 }
748 }
749 }
750
751 public:
752 void do_oop(oop* p) {
753 do_oop_work(p);
754 }
755 void do_oop(narrowOop* p) {
756 do_oop_work(p);
757 }
758 };
759
760 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure {
783 }
784
785 public:
786 void do_oop(oop* p) {
787 do_oop_work(p);
788 }
789 void do_oop(narrowOop* p) {
790 do_oop_work(p);
791 }
792 };
793
794 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
795 private:
796 ShenandoahHeap* const _heap;
797 Thread* const _thread;
798 public:
799 ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
800 _heap(heap), _thread(Thread::current()) {}
801
802 void do_object(oop p) {
803 assert(_heap->is_marked(p), "expect only marked objects");
804 if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
805 bool evac;
806 _heap->evacuate_object(p, _thread, evac);
807 }
808 }
809 };
810
811 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
812 private:
813 ShenandoahHeap* const _sh;
814 ShenandoahCollectionSet* const _cs;
815 volatile jbyte _claimed_codecache;
816
817 bool claim_codecache() {
818 jbyte old = Atomic::cmpxchg((jbyte)1, &_claimed_codecache, (jbyte)0);
819 return old == 0;
820 }
821 public:
822 ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
823 ShenandoahCollectionSet* cs) :
913 }
914
915 #ifdef ASSERT
916 class ShenandoahCheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
917 bool heap_region_do(ShenandoahHeapRegion* r) {
918 assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
919 return false;
920 }
921 };
922 #endif
923
924 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
925 assert(_ordered_regions->get(0)->region_number() == 0, "FIXME CHF. FIXME CHF!");
926
927 log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
928
929 if (!cancelled_concgc()) {
930 // Allocations might have happened before we STWed here, record peak:
931 shenandoahPolicy()->record_peak_occupancy();
932
933 make_tlabs_parsable(true);
934
935 if (ShenandoahVerify) {
936 verifier()->verify_after_concmark();
937 }
938
939 trash_cset_regions();
940
941 // NOTE: This needs to be done during a stop the world pause, because
942 // putting regions into the collection set concurrently with Java threads
943 // will create a race. In particular, acmp could fail because when we
944 // resolve the first operand, the containing region might not yet be in
945 // the collection set, and thus return the original oop. When the 2nd
946 // operand gets resolved, the region could be in the collection set
947 // and the oop gets evacuated. If both operands have originally been
948 // the same, we get false negatives.
949
950 {
951 ShenandoahHeapLocker locker(lock());
952 _collection_set->clear();
953 _free_regions->clear();
969 if (ShenandoahVerify) {
970 verifier()->verify_before_evacuation();
971 }
972 }
973 }
974
975
976 class ShenandoahRetireTLABClosure : public ThreadClosure {
977 private:
978 bool _retire;
979
980 public:
981 ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {}
982
983 void do_thread(Thread* thread) {
984 assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
985 thread->gclab().make_parsable(_retire);
986 }
987 };
988
989 void ShenandoahHeap::make_tlabs_parsable(bool retire_tlabs) {
990 if (UseTLAB) {
991 CollectedHeap::ensure_parsability(retire_tlabs);
992 ShenandoahRetireTLABClosure cl(retire_tlabs);
993 Threads::java_threads_do(&cl);
994 gc_threads_do(&cl);
995 }
996 }
997
998
999 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1000 ShenandoahRootEvacuator* _rp;
1001 public:
1002
1003 ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1004 AbstractGangTask("Shenandoah evacuate and update roots"),
1005 _rp(rp)
1006 {
1007 // Nothing else to do.
1008 }
1009
1235 return NULL;
1236 }
1237
1238 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1239 Space* sp = heap_region_containing(addr);
1240 assert(sp != NULL, "block_size of address outside of heap");
1241 return sp->block_size(addr);
1242 }
1243
1244 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1245 Space* sp = heap_region_containing(addr);
1246 return sp->block_is_obj(addr);
1247 }
1248
1249 jlong ShenandoahHeap::millis_since_last_gc() {
1250 return 0;
1251 }
1252
1253 void ShenandoahHeap::prepare_for_verify() {
1254 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1255 make_tlabs_parsable(false);
1256 }
1257 }
1258
1259 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1260 workers()->print_worker_threads_on(st);
1261 }
1262
1263 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1264 workers()->threads_do(tcl);
1265 }
1266
1267 void ShenandoahHeap::print_tracing_info() const {
1268 LogTarget(Info, gc, stats) lt;
1269 if (lt.is_enabled()) {
1270 ResourceMark rm;
1271 LogStream ls(lt);
1272
1273 phase_timings()->print_on(&ls);
1274
1275 ls.cr();
1296 } else {
1297 // TODO: Consider allocating verification bitmaps on demand,
1298 // and turn this on unconditionally.
1299 }
1300 }
1301 }
1302 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1303 return _free_regions->capacity();
1304 }
1305
1306 class ShenandoahIterateObjectClosureRegionClosure: public ShenandoahHeapRegionClosure {
1307 ObjectClosure* _cl;
1308 public:
1309 ShenandoahIterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
1310 bool heap_region_do(ShenandoahHeapRegion* r) {
1311 ShenandoahHeap::heap()->marked_object_iterate(r, _cl);
1312 return false;
1313 }
1314 };
1315
1316 class ObjectIterateScanRootClosure : public ExtendedOopClosure {
1317 private:
1318 MarkBitMap* _bitmap;
1319 Stack<oop,mtGC>* _oop_stack;
1320
1321 template <class T>
1322 void do_oop_work(T* p) {
1323 T o = oopDesc::load_heap_oop(p);
1324 if (!oopDesc::is_null(o)) {
1325 oop obj = oopDesc::decode_heap_oop_not_null(o);
1326 obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1327 assert(oopDesc::is_oop(obj), "must be a valid oop");
1328 if (!_bitmap->isMarked((HeapWord*) obj)) {
1329 _bitmap->mark((HeapWord*) obj);
1330 _oop_stack->push(obj);
1331 }
1332 }
1333 }
1334
1335 public:
1336 ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1337 _bitmap(bitmap), _oop_stack(oop_stack) {}
1338 void do_oop(oop* p) { do_oop_work(p); }
1339 void do_oop(narrowOop* p) { do_oop_work(p); }
1340 };
1341
1342 /*
1343 * This is public API, used in preparation of object_iterate().
1344 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1345 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1346 * control, we call SH::make_tlabs_parsable().
1347 */
1348 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1349 // No-op.
1350 }
1351
1352
1353 /*
1354 * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1355 *
1356 * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1357 * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1358 * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1359 * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1360 * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1361 * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1362 * wiped the bitmap in preparation for next marking).
1363 *
1364 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1365 * objects as we mark+traverse through the heap, starting from GC roots. This is ok. JVMTI
1366 * IterateThroughHeap is allowed to report dead objects, but is not required to do so.
1367 */
1368 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1369 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1370 if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1371 log_warning(gc)("Hold my beer, we are about to crash this VM.");
1372 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1373 return;
1374 }
1375
1376 Stack<oop,mtGC> oop_stack;
1377
1378 // First, we process all GC roots. This populates the work stack with initial objects.
1379 ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1380 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1381 CLDToOopClosure clds(&oops, false);
1382 CodeBlobToOopClosure blobs(&oops, false);
1383 rp.process_all_roots(&oops, &oops, &clds, &blobs, 0);
1384
1385 // Work through the oop stack to traverse heap.
1386 while (! oop_stack.is_empty()) {
1387 oop obj = oop_stack.pop();
1388 assert(oopDesc::is_oop(obj), "must be a valid oop");
1389 cl->do_object(obj);
1390 obj->oop_iterate(&oops);
1391 }
1392
1393 assert(oop_stack.is_empty(), "should be empty");
1394
1395 if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1396 log_warning(gc)("Hold my beer, we are about to crash this VM.");
1397 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1398 }
1399 }
1400
1401 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1402 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1403 object_iterate(cl);
1404 }
1405
1406 // Apply blk->heap_region_do() on all committed regions in address order,
1407 // terminating the iteration early if heap_region_do() returns true.
1408 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const {
1409 for (size_t i = 0; i < num_regions(); i++) {
1410 ShenandoahHeapRegion* current = _ordered_regions->get(i);
1411 if (skip_humongous_continuation && current->is_humongous_continuation()) {
1412 continue;
1413 }
1414 if (skip_cset_regions && in_collection_set(current)) {
1415 continue;
1416 }
1417 if (blk->heap_region_do(current)) {
1418 return;
1419 }
1420 }
1421 }
1422
1423 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1424 private:
1425 ShenandoahHeap* sh;
1426 public:
1427 ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1428
1429 bool heap_region_do(ShenandoahHeapRegion* r) {
1430 r->clear_live_data();
1431 sh->set_top_at_mark_start(r->bottom(), r->top());
1432 return false;
1433 }
1434 };
1435
1436 void ShenandoahHeap::start_concurrent_marking() {
1437 if (ShenandoahVerify) {
1438 verifier()->verify_before_concmark();
1439 }
1440
1441 {
1442 ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1443 accumulate_statistics_all_tlabs();
1444 }
1445
1446 set_concurrent_mark_in_progress(true);
1447 // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1448 if (UseTLAB) {
1449 ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1450 make_tlabs_parsable(true);
1451 }
1452
1453 _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1454 _used_start_gc = used();
1455
1456 {
1457 ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1458 ShenandoahClearLivenessClosure clc(this);
1459 heap_region_iterate(&clc);
1460 }
1461
1462 // Make above changes visible to worker threads
1463 OrderAccess::fence();
1464
1465 concurrentMark()->init_mark_roots();
1466
1467 if (UseTLAB) {
1468 ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1469 resize_all_tlabs();
1470 }
1471 }
1472
1473 void ShenandoahHeap::stop_concurrent_marking() {
1474 assert(concurrent_mark_in_progress(), "How else could we get here?");
1475 if (! cancelled_concgc()) {
1476 // If we needed to update refs, and concurrent marking has been cancelled,
1477 // we need to finish updating references.
1478 set_need_update_refs(false);
1479 }
1480 set_concurrent_mark_in_progress(false);
1481
1482 LogTarget(Trace, gc, region) lt;
1483 if (lt.is_enabled()) {
1484 ResourceMark rm;
1485 LogStream ls(lt);
1486 ls.print_cr("Regions at stopping the concurrent mark:");
1487 print_heap_regions_on(&ls);
1488 }
1489 }
1490
1491 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1492 _concurrent_mark_in_progress = in_progress ? 1 : 0;
1493 JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1494 }
1495
1496 void ShenandoahHeap::set_concurrent_partial_in_progress(bool in_progress) {
1497 _concurrent_partial_in_progress = in_progress;
1498 JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1546 return result;
1547 }
1548
1549 uint ShenandoahHeap::oop_extra_words() {
1550 return BrooksPointer::word_size();
1551 }
1552
1553 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1554 _heap(ShenandoahHeap::heap_no_check()) {
1555 }
1556
1557 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1558 assert(_heap != NULL, "sanity");
1559 obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1560 #ifdef ASSERT
1561 if (_heap->concurrent_mark_in_progress()) {
1562 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1563 }
1564 #endif
1565 assert(!oopDesc::is_null(obj), "null");
1566 return _heap->is_marked(obj);
1567 }
1568
1569 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1570 _heap(ShenandoahHeap::heap_no_check()) {
1571 }
1572
1573 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1574 assert(_heap != NULL, "sanity");
1575 assert(!oopDesc::is_null(obj), "null");
1576 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1577 return _heap->is_marked(obj);
1578 }
1579
1580 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
1581 return need_update_refs() ?
1582 (BoolObjectClosure*) &_forwarded_is_alive :
1583 (BoolObjectClosure*) &_is_alive;
1584 }
1585
1586 void ShenandoahHeap::ref_processing_init() {
1587 MemRegion mr = reserved_region();
1588
1589 _forwarded_is_alive.init(ShenandoahHeap::heap());
1590 _is_alive.init(ShenandoahHeap::heap());
1591 assert(_max_workers > 0, "Sanity");
1592
1593 _ref_processor =
1594 new ReferenceProcessor(mr, // span
1595 ParallelRefProcEnabled, // MT processing
1596 _max_workers, // Degree of MT processing
1597 true, // MT discovery
1744 _need_update_refs = need_update_refs;
1745 }
1746
1747 //fixme this should be in heapregionset
1748 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
1749 size_t region_idx = r->region_number() + 1;
1750 ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
1751 guarantee(next->region_number() == region_idx, "region number must match");
1752 while (next->is_humongous()) {
1753 region_idx = next->region_number() + 1;
1754 next = _ordered_regions->get(region_idx);
1755 guarantee(next->region_number() == region_idx, "region number must match");
1756 }
1757 return next;
1758 }
1759
1760 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
1761 return _monitoring_support;
1762 }
1763
1764 MarkBitMap* ShenandoahHeap::mark_bit_map() {
1765 return &_mark_bit_map;
1766 }
1767
1768 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
1769 _free_regions->add_region(r);
1770 }
1771
1772 void ShenandoahHeap::clear_free_regions() {
1773 _free_regions->clear();
1774 }
1775
1776 address ShenandoahHeap::in_cset_fast_test_addr() {
1777 ShenandoahHeap* heap = ShenandoahHeap::heap();
1778 assert(heap->collection_set() != NULL, "Sanity");
1779 return (address) heap->collection_set()->biased_map_address();
1780 }
1781
1782 address ShenandoahHeap::cancelled_concgc_addr() {
1783 return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
1784 }
1785
1786
1787 size_t ShenandoahHeap::conservative_max_heap_alignment() {
1788 return ShenandoahMaxRegionSize;
1789 }
1790
1791 size_t ShenandoahHeap::bytes_allocated_since_cm() {
1792 return _bytes_allocated_since_cm;
1793 }
1794
1795 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
1796 _bytes_allocated_since_cm = bytes;
1797 }
1798
1799 void ShenandoahHeap::set_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
1800 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1801 _top_at_mark_starts[index] = addr;
1802 }
1803
1804 HeapWord* ShenandoahHeap::top_at_mark_start(HeapWord* region_base) {
1805 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1806 return _top_at_mark_starts[index];
1807 }
1808
1809 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1810 _full_gc_in_progress = in_progress;
1811 }
1812
1813 bool ShenandoahHeap::is_full_gc_in_progress() const {
1814 return _full_gc_in_progress;
1815 }
1816
1817 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1818 _update_refs_in_progress = in_progress;
1819 }
1820
1821 bool ShenandoahHeap::is_update_refs_in_progress() const {
1822 return _update_refs_in_progress;
1823 }
1824
1825 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1826 ShenandoahCodeRoots::add_nmethod(nm);
1912 private:
1913 T cl;
1914 ShenandoahHeap* _heap;
1915 ShenandoahHeapRegionSet* _regions;
1916 bool _concurrent;
1917 public:
1918 ShenandoahUpdateHeapRefsTask(ShenandoahHeapRegionSet* regions, bool concurrent) :
1919 AbstractGangTask("Concurrent Update References Task"),
1920 cl(T()),
1921 _heap(ShenandoahHeap::heap()),
1922 _regions(regions),
1923 _concurrent(concurrent) {
1924 }
1925
1926 void work(uint worker_id) {
1927 SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
1928 ShenandoahHeapRegion* r = _regions->claim_next();
1929 while (r != NULL) {
1930 if (_heap->in_collection_set(r)) {
1931 HeapWord* bottom = r->bottom();
1932 HeapWord* top = _heap->top_at_mark_start(r->bottom());
1933 if (top > bottom) {
1934 _heap->mark_bit_map()->clear_range_large(MemRegion(bottom, top));
1935 }
1936 } else {
1937 if (r->is_active()) {
1938 _heap->marked_object_oop_safe_iterate(r, &cl);
1939 }
1940 }
1941 if (_heap->check_cancelled_concgc_and_yield(_concurrent)) {
1942 return;
1943 }
1944 r = _regions->claim_next();
1945 }
1946 }
1947 };
1948
1949 void ShenandoahHeap::update_heap_references(ShenandoahHeapRegionSet* update_regions, bool concurrent) {
1950 if (UseShenandoahMatrix) {
1951 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(update_regions, concurrent);
1952 workers()->run_task(&task);
1953 } else {
1954 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(update_regions, concurrent);
1955 workers()->run_task(&task);
1956 }
1957 }
1958
1959 void ShenandoahHeap::concurrent_update_heap_references() {
1960 ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
1961 ShenandoahHeapRegionSet* update_regions = regions();
1962 update_regions->clear_current_index();
1963 update_heap_references(update_regions, true);
1964 }
1965
1966 void ShenandoahHeap::prepare_update_refs() {
1967 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1968
1969 if (ShenandoahVerify) {
1970 verifier()->verify_before_updaterefs();
1971 }
1972
1973 set_evacuation_in_progress_at_safepoint(false);
1974 set_update_refs_in_progress(true);
1975 make_tlabs_parsable(true);
1976 if (UseShenandoahMatrix) {
1977 connection_matrix()->clear_all();
1978 }
1979 for (uint i = 0; i < num_regions(); i++) {
1980 ShenandoahHeapRegion* r = _ordered_regions->get(i);
1981 r->set_concurrent_iteration_safe_limit(r->top());
1982 }
1983 }
1984
1985 void ShenandoahHeap::finish_update_refs() {
1986 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1987
1988 if (cancelled_concgc()) {
1989 ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
1990
1991 // Finish updating references where we left off.
1992 clear_cancelled_concgc();
1993 ShenandoahHeapRegionSet* update_regions = regions();
1994 update_heap_references(update_regions, false);
1995 }
2120 }
2121 SpinPause(); // allow allocators to barge the lock
2122 }
2123 }
2124
2125 _shenandoah_policy->record_bytes_reclaimed(bytes_reclaimed);
2126 }
2127
2128 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2129 print_on(st);
2130 print_heap_regions_on(st);
2131 }
2132
2133 address ShenandoahHeap::concurrent_mark_in_progress_addr() {
2134 return (address) &(ShenandoahHeap::heap()->_concurrent_mark_in_progress);
2135 }
2136
2137 bool ShenandoahHeap::commit_bitmaps(ShenandoahHeapRegion* r) {
2138 size_t len = _bitmap_words_per_region * HeapWordSize;
2139 size_t off = r->region_number() * _bitmap_words_per_region;
2140 if (!os::commit_memory((char*)(_bitmap_region.start() + off), len, false)) {
2141 return false;
2142 }
2143 return true;
2144 }
2145
2146 bool ShenandoahHeap::uncommit_bitmaps(ShenandoahHeapRegion* r) {
2147 size_t len = _bitmap_words_per_region * HeapWordSize;
2148 size_t off = r->region_number() * _bitmap_words_per_region;
2149 if (!os::uncommit_memory((char*)(_bitmap_region.start() + off), len)) {
2150 return false;
2151 }
2152 return true;
2153 }
|