65 oop obj = oopDesc::decode_heap_oop_not_null(o);
66 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
67 "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
68 p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
69 }
70 }
71
72 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
73 void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_nv(p); }
74 #endif
75
76 const char* ShenandoahHeap::name() const {
77 return "Shenandoah";
78 }
79
80 class ShenandoahPretouchTask : public AbstractGangTask {
81 private:
82 ShenandoahHeapRegionSet* _regions;
83 const size_t _bitmap_size;
84 const size_t _page_size;
85 char* _bitmap0_base;
86 char* _bitmap1_base;
87 public:
88 ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
89 char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
90 size_t page_size) :
91 AbstractGangTask("Shenandoah PreTouch",
92 Universe::is_fully_initialized() ? GCId::current_raw() :
93 // During VM initialization there is
94 // no GC cycle that this task can be
95 // associated with.
96 GCId::undefined()),
97 _bitmap0_base(bitmap0_base),
98 _bitmap1_base(bitmap1_base),
99 _regions(regions),
100 _bitmap_size(bitmap_size),
101 _page_size(page_size) {
102 _regions->clear_current_index();
103 };
104
105 virtual void work(uint worker_id) {
106 ShenandoahHeapRegion* r = _regions->claim_next();
107 while (r != NULL) {
108 log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
109 r->region_number(), p2i(r->bottom()), p2i(r->end()));
110 os::pretouch_memory(r->bottom(), r->end(), _page_size);
111
112 size_t start = r->region_number() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
113 size_t end = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
114 assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
115
116 log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
117 r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
118 os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
119
120 log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
121 r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
122 os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
123
124 r = _regions->claim_next();
125 }
126 }
127 };
128
129 jint ShenandoahHeap::initialize() {
130 CollectedHeap::pre_initialize();
131
132 BrooksPointer::initial_checks();
133
134 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
135 size_t max_byte_size = collector_policy()->max_heap_byte_size();
136 size_t heap_alignment = collector_policy()->heap_alignment();
137
138 if (ShenandoahAlwaysPreTouch) {
139 // Enabled pre-touch means the entire heap is committed right away.
140 init_byte_size = max_byte_size;
141 }
142
154 set_barrier_set(new ShenandoahBarrierSet(this));
155 ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
156
157 _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
158 size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
159 _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
160 _committed = _initial_size;
161
162 log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
163 if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
164 vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
165 }
166
167 size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
168
169 _ordered_regions = new ShenandoahHeapRegionSet(_num_regions);
170 _free_regions = new ShenandoahFreeSet(_ordered_regions, _num_regions);
171
172 _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
173
174 _next_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
175 _next_top_at_mark_starts = _next_top_at_mark_starts_base -
176 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
177
178 _complete_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
179 _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
180 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
181
182 {
183 ShenandoahHeapLocker locker(lock());
184 for (size_t i = 0; i < _num_regions; i++) {
185 ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
186 (HeapWord*) pgc_rs.base() + reg_size_words * i,
187 reg_size_words,
188 i,
189 i < num_committed_regions);
190
191 _complete_top_at_mark_starts_base[i] = r->bottom();
192 _next_top_at_mark_starts_base[i] = r->bottom();
193
194 // Add to ordered regions first.
195 // We use the active size of ordered regions as the number of active regions in heap,
196 // free set and collection set use the number to assert the correctness of incoming regions.
197 _ordered_regions->add_region(r);
198 _free_regions->add_region(r);
199 assert(!collection_set()->is_in(i), "New region should not be in collection set");
200 }
201 }
202
203 assert(_ordered_regions->active_regions() == _num_regions, "Must match");
204 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
205 "misaligned heap: "PTR_FORMAT, p2i(base()));
206
207 LogTarget(Trace, gc, region) lt;
208 if (lt.is_enabled()) {
209 ResourceMark rm;
210 LogStream ls(lt);
211 log_trace(gc, region)("All Regions");
212 _ordered_regions->print_on(&ls);
227 _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
228 _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
229
230 size_t bitmap_bytes_per_region = _bitmap_size / _num_regions;
231 _bitmap_words_per_region = bitmap_bytes_per_region / HeapWordSize;
232
233 guarantee(is_power_of_2(bitmap_bytes_per_region),
234 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
235 guarantee((bitmap_bytes_per_region % os::vm_page_size()) == 0,
236 "Bitmap bytes per region should be page-granular: bpr = " SIZE_FORMAT ", page size = %d",
237 bitmap_bytes_per_region, os::vm_page_size());
238 guarantee(is_power_of_2(_bitmap_words_per_region),
239 "Bitmap words per region Should be power of two: " SIZE_FORMAT, _bitmap_words_per_region);
240 guarantee(bitmap_bytes_per_region >= (size_t)os::vm_page_size(),
241 "Bitmap slice per region (" SIZE_FORMAT ") should be larger than page size (%d)",
242 bitmap_bytes_per_region, os::vm_page_size());
243
244 size_t bitmap_page_size = UseLargePages && (bitmap_bytes_per_region >= (size_t)os::large_page_size()) ?
245 (size_t)os::large_page_size() : (size_t)os::vm_page_size();
246
247 ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
248 MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
249 _bitmap0_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
250
251 ReservedSpace bitmap1(_bitmap_size, bitmap_page_size);
252 MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
253 _bitmap1_region = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
254
255 {
256 ShenandoahHeapLocker locker(lock());
257 for (size_t i = 0; i < _num_regions; i++) {
258 ShenandoahHeapRegion* r = _ordered_regions->get(i);
259 if (r->is_committed()) {
260 commit_bitmaps(r);
261 }
262 }
263 }
264
265 size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
266
267 if (ShenandoahVerify) {
268 ReservedSpace verify_bitmap(_bitmap_size, page_size);
269 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
270 "couldn't allocate verification bitmap");
271 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
272 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
273 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
274 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
275 }
276
277 if (ShenandoahAlwaysPreTouch) {
278 assert (!AlwaysPreTouch, "Should have been overridden");
279
280 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
281 // before initialize() below zeroes it with initializing thread. For any given region,
282 // we touch the region and the corresponding bitmaps from the same thread.
283
284 log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
285 _ordered_regions->count(), page_size);
286 ShenandoahPretouchTask cl(_ordered_regions, bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
287 _workers->run_task(&cl);
288 }
289
290 _mark_bit_map0.initialize(_heap_region, _bitmap0_region);
291 _complete_mark_bit_map = &_mark_bit_map0;
292
293 _mark_bit_map1.initialize(_heap_region, _bitmap1_region);
294 _next_mark_bit_map = &_mark_bit_map1;
295
296 if (UseShenandoahMatrix) {
297 _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
298 } else {
299 _connection_matrix = NULL;
300 }
301
302 _partial_gc = _shenandoah_policy->can_do_partial_gc() ?
303 new ShenandoahPartialGC(this, _num_regions) :
304 NULL;
305
306 _monitoring_support = new ShenandoahMonitoringSupport(this);
307
308 _phase_timings = new ShenandoahPhaseTimings();
309
310 if (ShenandoahAllocationTrace) {
311 _alloc_tracker = new ShenandoahAllocTracker();
312 }
313
314 ShenandoahStringDedup::initialize();
320 ShenandoahCodeRoots::initialize();
321
322 return JNI_OK;
323 }
324
325 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
326 CollectedHeap(),
327 _shenandoah_policy(policy),
328 _concurrent_mark_in_progress(0),
329 _evacuation_in_progress(0),
330 _full_gc_in_progress(false),
331 _update_refs_in_progress(false),
332 _free_regions(NULL),
333 _collection_set(NULL),
334 _bytes_allocated_since_cm(0),
335 _bytes_allocated_during_cm(0),
336 _allocated_last_gc(0),
337 _used_start_gc(0),
338 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
339 _ref_processor(NULL),
340 _next_top_at_mark_starts(NULL),
341 _next_top_at_mark_starts_base(NULL),
342 _complete_top_at_mark_starts(NULL),
343 _complete_top_at_mark_starts_base(NULL),
344 _mark_bit_map0(),
345 _mark_bit_map1(),
346 _connection_matrix(NULL),
347 _cancelled_concgc(0),
348 _need_update_refs(false),
349 _need_reset_bitmaps(false),
350 _verifier(NULL),
351 _heap_lock(0),
352 _used_at_last_gc(0),
353 _alloc_seq_at_last_gc_start(0),
354 _alloc_seq_at_last_gc_end(0),
355 _safepoint_workers(NULL),
356 #ifdef ASSERT
357 _heap_lock_owner(NULL),
358 _heap_expansion_count(0),
359 #endif
360 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
361 _phase_timings(NULL),
362 _alloc_tracker(NULL)
363 {
364 log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
365 log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
366 log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
367
368 _scm = new ShenandoahConcurrentMark();
369 _used = 0;
370
371 _max_workers = MAX2(_max_workers, 1U);
372 _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
373 /* are_GC_task_threads */true,
374 /* are_ConcurrentGC_threads */false);
375 if (_workers == NULL) {
376 vm_exit_during_initialization("Failed necessary allocation.");
377 } else {
378 _workers->initialize_workers();
379 }
380
381 if (ParallelSafepointCleanupThreads > 1) {
382 _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
383 ParallelSafepointCleanupThreads,
384 false, false);
385 _safepoint_workers->initialize_workers();
386 }
387 }
388
389 class ShenandoahResetNextBitmapTask : public AbstractGangTask {
390 private:
391 ShenandoahHeapRegionSet* _regions;
392
393 public:
394 ShenandoahResetNextBitmapTask(ShenandoahHeapRegionSet* regions) :
395 AbstractGangTask("Parallel Reset Bitmap Task"),
396 _regions(regions) {
397 _regions->clear_current_index();
398 }
399
400 void work(uint worker_id) {
401 ShenandoahHeapRegion* region = _regions->claim_next();
402 ShenandoahHeap* heap = ShenandoahHeap::heap();
403 while (region != NULL) {
404 if (region->is_committed()) {
405 HeapWord* bottom = region->bottom();
406 HeapWord* top = heap->next_top_at_mark_start(region->bottom());
407 if (top > bottom) {
408 heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
409 }
410 assert(heap->is_next_bitmap_clear_range(bottom, region->end()), "must be clear");
411 }
412 region = _regions->claim_next();
413 }
414 }
415 };
416
417 void ShenandoahHeap::reset_next_mark_bitmap(WorkGang* workers) {
418 assert_gc_workers(workers->active_workers());
419
420 ShenandoahResetNextBitmapTask task = ShenandoahResetNextBitmapTask(_ordered_regions);
421 workers->run_task(&task);
422 }
423
424 class ShenandoahResetCompleteBitmapTask : public AbstractGangTask {
425 private:
426 ShenandoahHeapRegionSet* _regions;
427
428 public:
429 ShenandoahResetCompleteBitmapTask(ShenandoahHeapRegionSet* regions) :
430 AbstractGangTask("Parallel Reset Bitmap Task"),
431 _regions(regions) {
432 _regions->clear_current_index();
433 }
434
435 void work(uint worker_id) {
436 ShenandoahHeapRegion* region = _regions->claim_next();
437 ShenandoahHeap* heap = ShenandoahHeap::heap();
438 while (region != NULL) {
439 if (region->is_committed()) {
440 HeapWord* bottom = region->bottom();
441 HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
442 if (top > bottom) {
443 heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
444 }
445 assert(heap->is_complete_bitmap_clear_range(bottom, region->end()), "must be clear");
446 }
447 region = _regions->claim_next();
448 }
449 }
450 };
451
452 void ShenandoahHeap::reset_complete_mark_bitmap(WorkGang* workers) {
453 assert_gc_workers(workers->active_workers());
454
455 ShenandoahResetCompleteBitmapTask task = ShenandoahResetCompleteBitmapTask(_ordered_regions);
456 workers->run_task(&task);
457 }
458
459 bool ShenandoahHeap::is_next_bitmap_clear() {
460 for (size_t idx = 0; idx < _num_regions; idx++) {
461 ShenandoahHeapRegion* r = _ordered_regions->get(idx);
462 if (r->is_committed() && !is_next_bitmap_clear_range(r->bottom(), r->end())) {
463 return false;
464 }
465 }
466 return true;
467 }
468
469 bool ShenandoahHeap::is_next_bitmap_clear_range(HeapWord* start, HeapWord* end) {
470 return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
471 }
472
473 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
474 return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
475 }
476
477 void ShenandoahHeap::print_on(outputStream* st) const {
478 st->print_cr("Shenandoah Heap");
479 st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
480 capacity() / K, committed() / K, used() / K);
481 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
482 num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
483
484 st->print("Status: ");
485 if (concurrent_mark_in_progress()) {
486 st->print("marking ");
487 } else if (is_evacuation_in_progress()) {
488 st->print("evacuating ");
489 } else if (is_update_refs_in_progress()) {
490 st->print("updating refs ");
491 } else {
492 st->print("idle ");
493 }
494 if (cancelled_concgc()) {
770 }
771
772 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
773 private:
774 ShenandoahHeap* _heap;
775 Thread* _thread;
776 public:
777 ShenandoahEvacuateUpdateRootsClosure() :
778 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
779 }
780
781 private:
782 template <class T>
783 void do_oop_work(T* p) {
784 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
785
786 T o = oopDesc::load_heap_oop(p);
787 if (! oopDesc::is_null(o)) {
788 oop obj = oopDesc::decode_heap_oop_not_null(o);
789 if (_heap->in_collection_set(obj)) {
790 assert(_heap->is_marked_complete(obj), "only evacuate marked objects %d %d",
791 _heap->is_marked_complete(obj), _heap->is_marked_complete(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
792 oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
793 if (oopDesc::unsafe_equals(resolved, obj)) {
794 bool evac;
795 resolved = _heap->evacuate_object(obj, _thread, evac);
796 }
797 oopDesc::encode_store_heap_oop(p, resolved);
798 }
799 }
800 }
801
802 public:
803 void do_oop(oop* p) {
804 do_oop_work(p);
805 }
806 void do_oop(narrowOop* p) {
807 do_oop_work(p);
808 }
809 };
810
811 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure {
834 }
835
836 public:
837 void do_oop(oop* p) {
838 do_oop_work(p);
839 }
840 void do_oop(narrowOop* p) {
841 do_oop_work(p);
842 }
843 };
844
845 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
846 private:
847 ShenandoahHeap* const _heap;
848 Thread* const _thread;
849 public:
850 ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
851 _heap(heap), _thread(Thread::current()) {}
852
853 void do_object(oop p) {
854 assert(_heap->is_marked_complete(p), "expect only marked objects");
855 if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
856 bool evac;
857 _heap->evacuate_object(p, _thread, evac);
858 }
859 }
860 };
861
862 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
863 private:
864 ShenandoahHeap* const _sh;
865 ShenandoahCollectionSet* const _cs;
866 volatile jbyte _claimed_codecache;
867
868 bool claim_codecache() {
869 jbyte old = Atomic::cmpxchg((jbyte)1, &_claimed_codecache, (jbyte)0);
870 return old == 0;
871 }
872 public:
873 ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
874 ShenandoahCollectionSet* cs) :
1437 if (skip_humongous_continuation && current->is_humongous_continuation()) {
1438 continue;
1439 }
1440 if (skip_cset_regions && in_collection_set(current)) {
1441 continue;
1442 }
1443 if (blk->heap_region_do(current)) {
1444 return;
1445 }
1446 }
1447 }
1448
1449 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1450 private:
1451 ShenandoahHeap* sh;
1452 public:
1453 ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1454
1455 bool heap_region_do(ShenandoahHeapRegion* r) {
1456 r->clear_live_data();
1457 sh->set_next_top_at_mark_start(r->bottom(), r->top());
1458 return false;
1459 }
1460 };
1461
1462 void ShenandoahHeap::start_concurrent_marking() {
1463 if (ShenandoahVerify) {
1464 verifier()->verify_before_concmark();
1465 }
1466
1467 {
1468 ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1469 accumulate_statistics_all_tlabs();
1470 }
1471
1472 set_concurrent_mark_in_progress(true);
1473 // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1474 if (UseTLAB) {
1475 ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1476 ensure_parsability(true);
1477 }
1479 _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1480 _used_start_gc = used();
1481
1482 {
1483 ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1484 ShenandoahClearLivenessClosure clc(this);
1485 heap_region_iterate(&clc);
1486 }
1487
1488 // Make above changes visible to worker threads
1489 OrderAccess::fence();
1490
1491 concurrentMark()->init_mark_roots();
1492
1493 if (UseTLAB) {
1494 ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1495 resize_all_tlabs();
1496 }
1497 }
1498
1499 void ShenandoahHeap::swap_mark_bitmaps() {
1500 // Swap bitmaps.
1501 MarkBitMap* tmp1 = _complete_mark_bit_map;
1502 _complete_mark_bit_map = _next_mark_bit_map;
1503 _next_mark_bit_map = tmp1;
1504
1505 // Swap top-at-mark-start pointers
1506 HeapWord** tmp2 = _complete_top_at_mark_starts;
1507 _complete_top_at_mark_starts = _next_top_at_mark_starts;
1508 _next_top_at_mark_starts = tmp2;
1509
1510 HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1511 _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1512 _next_top_at_mark_starts_base = tmp3;
1513 }
1514
1515
1516 void ShenandoahHeap::stop_concurrent_marking() {
1517 assert(concurrent_mark_in_progress(), "How else could we get here?");
1518 if (! cancelled_concgc()) {
1519 // If we needed to update refs, and concurrent marking has been cancelled,
1520 // we need to finish updating references.
1521 set_need_update_refs(false);
1522 swap_mark_bitmaps();
1523 }
1524 set_concurrent_mark_in_progress(false);
1525
1526 LogTarget(Trace, gc, region) lt;
1527 if (lt.is_enabled()) {
1528 ResourceMark rm;
1529 LogStream ls(lt);
1530 ls.print_cr("Regions at stopping the concurrent mark:");
1531 print_heap_regions_on(&ls);
1532 }
1533 }
1534
1535 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1536 _concurrent_mark_in_progress = in_progress ? 1 : 0;
1537 JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1538 }
1539
1540 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
1541 // Note: it is important to first release the _evacuation_in_progress flag here,
1542 // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
1584 return result;
1585 }
1586
1587 uint ShenandoahHeap::oop_extra_words() {
1588 return BrooksPointer::word_size();
1589 }
1590
1591 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1592 _heap(ShenandoahHeap::heap_no_check()) {
1593 }
1594
1595 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1596 assert(_heap != NULL, "sanity");
1597 obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1598 #ifdef ASSERT
1599 if (_heap->concurrent_mark_in_progress()) {
1600 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1601 }
1602 #endif
1603 assert(!oopDesc::is_null(obj), "null");
1604 return _heap->is_marked_next(obj);
1605 }
1606
1607 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1608 _heap(ShenandoahHeap::heap_no_check()) {
1609 }
1610
1611 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1612 assert(_heap != NULL, "sanity");
1613 assert(!oopDesc::is_null(obj), "null");
1614 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1615 return _heap->is_marked_next(obj);
1616 }
1617
1618 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
1619 return need_update_refs() ?
1620 (BoolObjectClosure*) &_forwarded_is_alive :
1621 (BoolObjectClosure*) &_is_alive;
1622 }
1623
1624 void ShenandoahHeap::ref_processing_init() {
1625 MemRegion mr = reserved_region();
1626
1627 _forwarded_is_alive.init(ShenandoahHeap::heap());
1628 _is_alive.init(ShenandoahHeap::heap());
1629 assert(_max_workers > 0, "Sanity");
1630
1631 _ref_processor =
1632 new ReferenceProcessor(mr, // span
1633 ParallelRefProcEnabled, // MT processing
1634 _max_workers, // Degree of MT processing
1635 true, // MT discovery
1782 _need_update_refs = need_update_refs;
1783 }
1784
1785 //fixme this should be in heapregionset
1786 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
1787 size_t region_idx = r->region_number() + 1;
1788 ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
1789 guarantee(next->region_number() == region_idx, "region number must match");
1790 while (next->is_humongous()) {
1791 region_idx = next->region_number() + 1;
1792 next = _ordered_regions->get(region_idx);
1793 guarantee(next->region_number() == region_idx, "region number must match");
1794 }
1795 return next;
1796 }
1797
1798 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
1799 return _monitoring_support;
1800 }
1801
1802 MarkBitMap* ShenandoahHeap::complete_mark_bit_map() {
1803 return _complete_mark_bit_map;
1804 }
1805
1806 MarkBitMap* ShenandoahHeap::next_mark_bit_map() {
1807 return _next_mark_bit_map;
1808 }
1809
1810 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
1811 _free_regions->add_region(r);
1812 }
1813
1814 void ShenandoahHeap::clear_free_regions() {
1815 _free_regions->clear();
1816 }
1817
1818 address ShenandoahHeap::in_cset_fast_test_addr() {
1819 ShenandoahHeap* heap = ShenandoahHeap::heap();
1820 assert(heap->collection_set() != NULL, "Sanity");
1821 return (address) heap->collection_set()->biased_map_address();
1822 }
1823
1824 address ShenandoahHeap::cancelled_concgc_addr() {
1825 return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
1826 }
1827
1828
1829 size_t ShenandoahHeap::conservative_max_heap_alignment() {
1830 return ShenandoahMaxRegionSize;
1831 }
1832
1833 size_t ShenandoahHeap::bytes_allocated_since_cm() {
1834 return _bytes_allocated_since_cm;
1835 }
1836
1837 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
1838 _bytes_allocated_since_cm = bytes;
1839 }
1840
1841 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
1842 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1843 _next_top_at_mark_starts[index] = addr;
1844 }
1845
1846 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
1847 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1848 return _next_top_at_mark_starts[index];
1849 }
1850
1851 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
1852 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1853 _complete_top_at_mark_starts[index] = addr;
1854 }
1855
1856 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
1857 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1858 return _complete_top_at_mark_starts[index];
1859 }
1860
1861 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1862 _full_gc_in_progress = in_progress;
1863 }
1864
1865 bool ShenandoahHeap::is_full_gc_in_progress() const {
1866 return _full_gc_in_progress;
1867 }
1868
1869 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1870 _update_refs_in_progress = in_progress;
1871 }
1872
1873 bool ShenandoahHeap::is_update_refs_in_progress() const {
1874 return _update_refs_in_progress;
1875 }
1876
1877 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1878 ShenandoahCodeRoots::add_nmethod(nm);
1968 private:
1969 T cl;
1970 ShenandoahHeap* _heap;
1971 ShenandoahHeapRegionSet* _regions;
1972 bool _concurrent;
1973 public:
1974 ShenandoahUpdateHeapRefsTask(ShenandoahHeapRegionSet* regions, bool concurrent) :
1975 AbstractGangTask("Concurrent Update References Task"),
1976 cl(T()),
1977 _heap(ShenandoahHeap::heap()),
1978 _regions(regions),
1979 _concurrent(concurrent) {
1980 }
1981
1982 void work(uint worker_id) {
1983 SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
1984 ShenandoahHeapRegion* r = _regions->claim_next();
1985 while (r != NULL) {
1986 if (_heap->in_collection_set(r)) {
1987 HeapWord* bottom = r->bottom();
1988 HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
1989 if (top > bottom) {
1990 _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
1991 }
1992 } else {
1993 if (r->is_active()) {
1994 _heap->marked_object_oop_safe_iterate(r, &cl);
1995 }
1996 }
1997 if (_heap->check_cancelled_concgc_and_yield(_concurrent)) {
1998 return;
1999 }
2000 r = _regions->claim_next();
2001 }
2002 }
2003 };
2004
2005 void ShenandoahHeap::update_heap_references(ShenandoahHeapRegionSet* update_regions, bool concurrent) {
2006 if (UseShenandoahMatrix) {
2007 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(update_regions, concurrent);
2008 workers()->run_task(&task);
2009 } else {
2010 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(update_regions, concurrent);
2172 decrease_used(r->used());
2173 r->recycle();
2174 _free_regions->add_region(r);
2175 }
2176 }
2177 SpinPause(); // allow allocators to barge the lock
2178 }
2179 }
2180
2181 _shenandoah_policy->record_bytes_reclaimed(bytes_reclaimed);
2182 }
2183
2184 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2185 print_on(st);
2186 print_heap_regions_on(st);
2187 }
2188
2189 bool ShenandoahHeap::commit_bitmaps(ShenandoahHeapRegion* r) {
2190 size_t len = _bitmap_words_per_region * HeapWordSize;
2191 size_t off = r->region_number() * _bitmap_words_per_region;
2192 if (!os::commit_memory((char*)(_bitmap0_region.start() + off), len, false)) {
2193 return false;
2194 }
2195 if (!os::commit_memory((char*)(_bitmap1_region.start() + off), len, false)) {
2196 return false;
2197 }
2198 return true;
2199 }
2200
2201 bool ShenandoahHeap::uncommit_bitmaps(ShenandoahHeapRegion* r) {
2202 size_t len = _bitmap_words_per_region * HeapWordSize;
2203 size_t off = r->region_number() * _bitmap_words_per_region;
2204 if (!os::uncommit_memory((char*)(_bitmap0_region.start() + off), len)) {
2205 return false;
2206 }
2207 if (!os::uncommit_memory((char*)(_bitmap1_region.start() + off), len)) {
2208 return false;
2209 }
2210 return true;
2211 }
|
65 oop obj = oopDesc::decode_heap_oop_not_null(o);
66 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)),
67 "need to-space object here obj: "PTR_FORMAT" , rb(obj): "PTR_FORMAT", p: "PTR_FORMAT,
68 p2i(obj), p2i(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), p2i(p));
69 }
70 }
71
72 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
73 void ShenandoahAssertToSpaceClosure::do_oop(oop* p) { do_oop_nv(p); }
74 #endif
75
76 const char* ShenandoahHeap::name() const {
77 return "Shenandoah";
78 }
79
80 class ShenandoahPretouchTask : public AbstractGangTask {
81 private:
82 ShenandoahHeapRegionSet* _regions;
83 const size_t _bitmap_size;
84 const size_t _page_size;
85 char* _bitmap_base;
86 public:
87 ShenandoahPretouchTask(ShenandoahHeapRegionSet* regions,
88 char* bitmap_base, size_t bitmap_size,
89 size_t page_size) :
90 AbstractGangTask("Shenandoah PreTouch",
91 Universe::is_fully_initialized() ? GCId::current_raw() :
92 // During VM initialization there is
93 // no GC cycle that this task can be
94 // associated with.
95 GCId::undefined()),
96 _bitmap_base(bitmap_base),
97 _regions(regions),
98 _bitmap_size(bitmap_size),
99 _page_size(page_size) {
100 _regions->clear_current_index();
101 };
102
103 virtual void work(uint worker_id) {
104 ShenandoahHeapRegion* r = _regions->claim_next();
105 while (r != NULL) {
106 log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
107 r->region_number(), p2i(r->bottom()), p2i(r->end()));
108 os::pretouch_memory(r->bottom(), r->end(), _page_size);
109
110 size_t start = r->region_number() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
111 size_t end = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
112 assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
113
114 log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
115 r->region_number(), p2i(_bitmap_base + start), p2i(_bitmap_base + end));
116 os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
117
118 r = _regions->claim_next();
119 }
120 }
121 };
122
123 jint ShenandoahHeap::initialize() {
124 CollectedHeap::pre_initialize();
125
126 BrooksPointer::initial_checks();
127
128 size_t init_byte_size = collector_policy()->initial_heap_byte_size();
129 size_t max_byte_size = collector_policy()->max_heap_byte_size();
130 size_t heap_alignment = collector_policy()->heap_alignment();
131
132 if (ShenandoahAlwaysPreTouch) {
133 // Enabled pre-touch means the entire heap is committed right away.
134 init_byte_size = max_byte_size;
135 }
136
148 set_barrier_set(new ShenandoahBarrierSet(this));
149 ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
150
151 _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
152 size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
153 _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
154 _committed = _initial_size;
155
156 log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
157 if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
158 vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
159 }
160
161 size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
162
163 _ordered_regions = new ShenandoahHeapRegionSet(_num_regions);
164 _free_regions = new ShenandoahFreeSet(_ordered_regions, _num_regions);
165
166 _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
167
168 _top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
169 _top_at_mark_starts = _top_at_mark_starts_base -
170 ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
171
172
173 {
174 ShenandoahHeapLocker locker(lock());
175 for (size_t i = 0; i < _num_regions; i++) {
176 ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
177 (HeapWord*) pgc_rs.base() + reg_size_words * i,
178 reg_size_words,
179 i,
180 i < num_committed_regions);
181
182 _top_at_mark_starts_base[i] = r->bottom();
183
184 // Add to ordered regions first.
185 // We use the active size of ordered regions as the number of active regions in heap,
186 // free set and collection set use the number to assert the correctness of incoming regions.
187 _ordered_regions->add_region(r);
188 _free_regions->add_region(r);
189 assert(!collection_set()->is_in(i), "New region should not be in collection set");
190 }
191 }
192
193 assert(_ordered_regions->active_regions() == _num_regions, "Must match");
194 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
195 "misaligned heap: "PTR_FORMAT, p2i(base()));
196
197 LogTarget(Trace, gc, region) lt;
198 if (lt.is_enabled()) {
199 ResourceMark rm;
200 LogStream ls(lt);
201 log_trace(gc, region)("All Regions");
202 _ordered_regions->print_on(&ls);
217 _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
218 _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
219
220 size_t bitmap_bytes_per_region = _bitmap_size / _num_regions;
221 _bitmap_words_per_region = bitmap_bytes_per_region / HeapWordSize;
222
223 guarantee(is_power_of_2(bitmap_bytes_per_region),
224 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
225 guarantee((bitmap_bytes_per_region % os::vm_page_size()) == 0,
226 "Bitmap bytes per region should be page-granular: bpr = " SIZE_FORMAT ", page size = %d",
227 bitmap_bytes_per_region, os::vm_page_size());
228 guarantee(is_power_of_2(_bitmap_words_per_region),
229 "Bitmap words per region Should be power of two: " SIZE_FORMAT, _bitmap_words_per_region);
230 guarantee(bitmap_bytes_per_region >= (size_t)os::vm_page_size(),
231 "Bitmap slice per region (" SIZE_FORMAT ") should be larger than page size (%d)",
232 bitmap_bytes_per_region, os::vm_page_size());
233
234 size_t bitmap_page_size = UseLargePages && (bitmap_bytes_per_region >= (size_t)os::large_page_size()) ?
235 (size_t)os::large_page_size() : (size_t)os::vm_page_size();
236
237 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
238 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
239 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
240
241 {
242 ShenandoahHeapLocker locker(lock());
243 for (size_t i = 0; i < _num_regions; i++) {
244 ShenandoahHeapRegion* r = _ordered_regions->get(i);
245 if (r->is_committed()) {
246 commit_bitmaps(r);
247 }
248 }
249 }
250
251 size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
252
253 if (ShenandoahVerify) {
254 ReservedSpace verify_bitmap(_bitmap_size, page_size);
255 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
256 "couldn't allocate verification bitmap");
257 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
258 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
259 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
260 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
261 }
262
263 if (ShenandoahAlwaysPreTouch) {
264 assert (!AlwaysPreTouch, "Should have been overridden");
265
266 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
267 // before initialize() below zeroes it with initializing thread. For any given region,
268 // we touch the region and the corresponding bitmaps from the same thread.
269
270 log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
271 _ordered_regions->count(), page_size);
272 ShenandoahPretouchTask cl(_ordered_regions, bitmap.base(), _bitmap_size, page_size);
273 _workers->run_task(&cl);
274 }
275
276 __mark_bit_map.initialize(_heap_region, _bitmap_region);
277 _mark_bit_map = & __mark_bit_map;
278
279 if (UseShenandoahMatrix) {
280 _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
281 } else {
282 _connection_matrix = NULL;
283 }
284
285 _partial_gc = _shenandoah_policy->can_do_partial_gc() ?
286 new ShenandoahPartialGC(this, _num_regions) :
287 NULL;
288
289 _monitoring_support = new ShenandoahMonitoringSupport(this);
290
291 _phase_timings = new ShenandoahPhaseTimings();
292
293 if (ShenandoahAllocationTrace) {
294 _alloc_tracker = new ShenandoahAllocTracker();
295 }
296
297 ShenandoahStringDedup::initialize();
303 ShenandoahCodeRoots::initialize();
304
305 return JNI_OK;
306 }
307
308 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
309 CollectedHeap(),
310 _shenandoah_policy(policy),
311 _concurrent_mark_in_progress(0),
312 _evacuation_in_progress(0),
313 _full_gc_in_progress(false),
314 _update_refs_in_progress(false),
315 _free_regions(NULL),
316 _collection_set(NULL),
317 _bytes_allocated_since_cm(0),
318 _bytes_allocated_during_cm(0),
319 _allocated_last_gc(0),
320 _used_start_gc(0),
321 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
322 _ref_processor(NULL),
323 _top_at_mark_starts(NULL),
324 _top_at_mark_starts_base(NULL),
325 __mark_bit_map(),
326 _mark_bit_map(NULL),
327 _connection_matrix(NULL),
328 _cancelled_concgc(0),
329 _need_update_refs(false),
330 _need_reset_bitmap(false),
331 _bitmap_valid(true),
332 _verifier(NULL),
333 _heap_lock(0),
334 _used_at_last_gc(0),
335 _alloc_seq_at_last_gc_start(0),
336 _alloc_seq_at_last_gc_end(0),
337 _safepoint_workers(NULL),
338 #ifdef ASSERT
339 _heap_lock_owner(NULL),
340 _heap_expansion_count(0),
341 #endif
342 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
343 _phase_timings(NULL),
344 _alloc_tracker(NULL)
345 {
346 log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
347 log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
348 log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
349
350 _scm = new ShenandoahConcurrentMark();
351 _used = 0;
352
353 _max_workers = MAX2(_max_workers, 1U);
354 _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
355 /* are_GC_task_threads */true,
356 /* are_ConcurrentGC_threads */false);
357 if (_workers == NULL) {
358 vm_exit_during_initialization("Failed necessary allocation.");
359 } else {
360 _workers->initialize_workers();
361 }
362
363 if (ParallelSafepointCleanupThreads > 1) {
364 _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
365 ParallelSafepointCleanupThreads,
366 false, false);
367 _safepoint_workers->initialize_workers();
368 }
369 }
370
371 class ShenandoahResetBitmapTask : public AbstractGangTask {
372 private:
373 ShenandoahHeapRegionSet* _regions;
374
375 public:
376 ShenandoahResetBitmapTask(ShenandoahHeapRegionSet* regions) :
377 AbstractGangTask("Parallel Reset Bitmap Task"),
378 _regions(regions) {
379 _regions->clear_current_index();
380 }
381
382 void work(uint worker_id) {
383 ShenandoahHeapRegion* region = _regions->claim_next();
384 ShenandoahHeap* heap = ShenandoahHeap::heap();
385 while (region != NULL) {
386 if (region->is_committed()) {
387 HeapWord* bottom = region->bottom();
388 HeapWord* top = heap->top_at_mark_start(region->bottom());
389 if (top > bottom) {
390 heap->mark_bit_map()->clear_range_large(MemRegion(bottom, top));
391 }
392 assert(heap->is_bitmap_clear_range(bottom, region->end()), "must be clear");
393 }
394 region = _regions->claim_next();
395 }
396 }
397 };
398
399 void ShenandoahHeap::reset_mark_bitmap(WorkGang* workers) {
400 assert_gc_workers(workers->active_workers());
401
402 ShenandoahResetBitmapTask task = ShenandoahResetBitmapTask(_ordered_regions);
403 workers->run_task(&task);
404 }
405
406 bool ShenandoahHeap::is_bitmap_clear() {
407 for (size_t idx = 0; idx < _num_regions; idx++) {
408 ShenandoahHeapRegion* r = _ordered_regions->get(idx);
409 if (r->is_committed() && !is_bitmap_clear_range(r->bottom(), r->end())) {
410 return false;
411 }
412 }
413 return true;
414 }
415
416 bool ShenandoahHeap::is_bitmap_clear_range(HeapWord* start, HeapWord* end) {
417 return _mark_bit_map->getNextMarkedWordAddress(start, end) == end;
418 }
419
420 void ShenandoahHeap::print_on(outputStream* st) const {
421 st->print_cr("Shenandoah Heap");
422 st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
423 capacity() / K, committed() / K, used() / K);
424 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
425 num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
426
427 st->print("Status: ");
428 if (concurrent_mark_in_progress()) {
429 st->print("marking ");
430 } else if (is_evacuation_in_progress()) {
431 st->print("evacuating ");
432 } else if (is_update_refs_in_progress()) {
433 st->print("updating refs ");
434 } else {
435 st->print("idle ");
436 }
437 if (cancelled_concgc()) {
713 }
714
715 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
716 private:
717 ShenandoahHeap* _heap;
718 Thread* _thread;
719 public:
720 ShenandoahEvacuateUpdateRootsClosure() :
721 _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
722 }
723
724 private:
725 template <class T>
726 void do_oop_work(T* p) {
727 assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
728
729 T o = oopDesc::load_heap_oop(p);
730 if (! oopDesc::is_null(o)) {
731 oop obj = oopDesc::decode_heap_oop_not_null(o);
732 if (_heap->in_collection_set(obj)) {
733 assert(_heap->is_marked(obj), "only evacuate marked objects %d %d",
734 _heap->is_marked(obj), _heap->is_marked(ShenandoahBarrierSet::resolve_oop_static_not_null(obj)));
735 oop resolved = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
736 if (oopDesc::unsafe_equals(resolved, obj)) {
737 bool evac;
738 resolved = _heap->evacuate_object(obj, _thread, evac);
739 }
740 oopDesc::encode_store_heap_oop(p, resolved);
741 }
742 }
743 }
744
745 public:
746 void do_oop(oop* p) {
747 do_oop_work(p);
748 }
749 void do_oop(narrowOop* p) {
750 do_oop_work(p);
751 }
752 };
753
754 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure {
777 }
778
779 public:
780 void do_oop(oop* p) {
781 do_oop_work(p);
782 }
783 void do_oop(narrowOop* p) {
784 do_oop_work(p);
785 }
786 };
787
788 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
789 private:
790 ShenandoahHeap* const _heap;
791 Thread* const _thread;
792 public:
793 ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
794 _heap(heap), _thread(Thread::current()) {}
795
796 void do_object(oop p) {
797 assert(_heap->is_marked(p), "expect only marked objects");
798 if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p))) {
799 bool evac;
800 _heap->evacuate_object(p, _thread, evac);
801 }
802 }
803 };
804
805 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
806 private:
807 ShenandoahHeap* const _sh;
808 ShenandoahCollectionSet* const _cs;
809 volatile jbyte _claimed_codecache;
810
811 bool claim_codecache() {
812 jbyte old = Atomic::cmpxchg((jbyte)1, &_claimed_codecache, (jbyte)0);
813 return old == 0;
814 }
815 public:
816 ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
817 ShenandoahCollectionSet* cs) :
1380 if (skip_humongous_continuation && current->is_humongous_continuation()) {
1381 continue;
1382 }
1383 if (skip_cset_regions && in_collection_set(current)) {
1384 continue;
1385 }
1386 if (blk->heap_region_do(current)) {
1387 return;
1388 }
1389 }
1390 }
1391
1392 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1393 private:
1394 ShenandoahHeap* sh;
1395 public:
1396 ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1397
1398 bool heap_region_do(ShenandoahHeapRegion* r) {
1399 r->clear_live_data();
1400 sh->set_top_at_mark_start(r->bottom(), r->top());
1401 return false;
1402 }
1403 };
1404
1405 void ShenandoahHeap::start_concurrent_marking() {
1406 if (ShenandoahVerify) {
1407 verifier()->verify_before_concmark();
1408 }
1409
1410 {
1411 ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1412 accumulate_statistics_all_tlabs();
1413 }
1414
1415 set_concurrent_mark_in_progress(true);
1416 // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1417 if (UseTLAB) {
1418 ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1419 ensure_parsability(true);
1420 }
1422 _shenandoah_policy->record_bytes_allocated(_bytes_allocated_since_cm);
1423 _used_start_gc = used();
1424
1425 {
1426 ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1427 ShenandoahClearLivenessClosure clc(this);
1428 heap_region_iterate(&clc);
1429 }
1430
1431 // Make above changes visible to worker threads
1432 OrderAccess::fence();
1433
1434 concurrentMark()->init_mark_roots();
1435
1436 if (UseTLAB) {
1437 ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1438 resize_all_tlabs();
1439 }
1440 }
1441
1442 void ShenandoahHeap::stop_concurrent_marking() {
1443 assert(concurrent_mark_in_progress(), "How else could we get here?");
1444 if (! cancelled_concgc()) {
1445 // If we needed to update refs, and concurrent marking has been cancelled,
1446 // we need to finish updating references.
1447 set_need_update_refs(false);
1448 }
1449 set_concurrent_mark_in_progress(false);
1450
1451 LogTarget(Trace, gc, region) lt;
1452 if (lt.is_enabled()) {
1453 ResourceMark rm;
1454 LogStream ls(lt);
1455 ls.print_cr("Regions at stopping the concurrent mark:");
1456 print_heap_regions_on(&ls);
1457 }
1458 }
1459
1460 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1461 _concurrent_mark_in_progress = in_progress ? 1 : 0;
1462 JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1463 }
1464
1465 void ShenandoahHeap::set_evacuation_in_progress_concurrently(bool in_progress) {
1466 // Note: it is important to first release the _evacuation_in_progress flag here,
1467 // so that Java threads can get out of oom_during_evacuation() and reach a safepoint,
1509 return result;
1510 }
1511
1512 uint ShenandoahHeap::oop_extra_words() {
1513 return BrooksPointer::word_size();
1514 }
1515
1516 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1517 _heap(ShenandoahHeap::heap_no_check()) {
1518 }
1519
1520 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1521 assert(_heap != NULL, "sanity");
1522 obj = ShenandoahBarrierSet::resolve_oop_static_not_null(obj);
1523 #ifdef ASSERT
1524 if (_heap->concurrent_mark_in_progress()) {
1525 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1526 }
1527 #endif
1528 assert(!oopDesc::is_null(obj), "null");
1529 return _heap->is_marked(obj);
1530 }
1531
1532 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1533 _heap(ShenandoahHeap::heap_no_check()) {
1534 }
1535
1536 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1537 assert(_heap != NULL, "sanity");
1538 assert(!oopDesc::is_null(obj), "null");
1539 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), "only query to-space");
1540 return _heap->is_marked(obj);
1541 }
1542
1543 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
1544 return need_update_refs() ?
1545 (BoolObjectClosure*) &_forwarded_is_alive :
1546 (BoolObjectClosure*) &_is_alive;
1547 }
1548
1549 void ShenandoahHeap::ref_processing_init() {
1550 MemRegion mr = reserved_region();
1551
1552 _forwarded_is_alive.init(ShenandoahHeap::heap());
1553 _is_alive.init(ShenandoahHeap::heap());
1554 assert(_max_workers > 0, "Sanity");
1555
1556 _ref_processor =
1557 new ReferenceProcessor(mr, // span
1558 ParallelRefProcEnabled, // MT processing
1559 _max_workers, // Degree of MT processing
1560 true, // MT discovery
1707 _need_update_refs = need_update_refs;
1708 }
1709
1710 //fixme this should be in heapregionset
1711 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
1712 size_t region_idx = r->region_number() + 1;
1713 ShenandoahHeapRegion* next = _ordered_regions->get(region_idx);
1714 guarantee(next->region_number() == region_idx, "region number must match");
1715 while (next->is_humongous()) {
1716 region_idx = next->region_number() + 1;
1717 next = _ordered_regions->get(region_idx);
1718 guarantee(next->region_number() == region_idx, "region number must match");
1719 }
1720 return next;
1721 }
1722
1723 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
1724 return _monitoring_support;
1725 }
1726
1727 MarkBitMap* ShenandoahHeap::mark_bit_map() {
1728 return _mark_bit_map;
1729 }
1730
1731 void ShenandoahHeap::add_free_region(ShenandoahHeapRegion* r) {
1732 _free_regions->add_region(r);
1733 }
1734
1735 void ShenandoahHeap::clear_free_regions() {
1736 _free_regions->clear();
1737 }
1738
1739 address ShenandoahHeap::in_cset_fast_test_addr() {
1740 ShenandoahHeap* heap = ShenandoahHeap::heap();
1741 assert(heap->collection_set() != NULL, "Sanity");
1742 return (address) heap->collection_set()->biased_map_address();
1743 }
1744
1745 address ShenandoahHeap::cancelled_concgc_addr() {
1746 return (address) &(ShenandoahHeap::heap()->_cancelled_concgc);
1747 }
1748
1749
1750 size_t ShenandoahHeap::conservative_max_heap_alignment() {
1751 return ShenandoahMaxRegionSize;
1752 }
1753
1754 size_t ShenandoahHeap::bytes_allocated_since_cm() {
1755 return _bytes_allocated_since_cm;
1756 }
1757
1758 void ShenandoahHeap::set_bytes_allocated_since_cm(size_t bytes) {
1759 _bytes_allocated_since_cm = bytes;
1760 }
1761
1762 void ShenandoahHeap::set_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
1763 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1764 _top_at_mark_starts[index] = addr;
1765 }
1766
1767 HeapWord* ShenandoahHeap::top_at_mark_start(HeapWord* region_base) {
1768 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
1769 return _top_at_mark_starts[index];
1770 }
1771
1772 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1773 _full_gc_in_progress = in_progress;
1774 }
1775
1776 bool ShenandoahHeap::is_full_gc_in_progress() const {
1777 return _full_gc_in_progress;
1778 }
1779
1780 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1781 _update_refs_in_progress = in_progress;
1782 }
1783
1784 bool ShenandoahHeap::is_update_refs_in_progress() const {
1785 return _update_refs_in_progress;
1786 }
1787
1788 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1789 ShenandoahCodeRoots::add_nmethod(nm);
1879 private:
1880 T cl;
1881 ShenandoahHeap* _heap;
1882 ShenandoahHeapRegionSet* _regions;
1883 bool _concurrent;
1884 public:
1885 ShenandoahUpdateHeapRefsTask(ShenandoahHeapRegionSet* regions, bool concurrent) :
1886 AbstractGangTask("Concurrent Update References Task"),
1887 cl(T()),
1888 _heap(ShenandoahHeap::heap()),
1889 _regions(regions),
1890 _concurrent(concurrent) {
1891 }
1892
1893 void work(uint worker_id) {
1894 SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
1895 ShenandoahHeapRegion* r = _regions->claim_next();
1896 while (r != NULL) {
1897 if (_heap->in_collection_set(r)) {
1898 HeapWord* bottom = r->bottom();
1899 HeapWord* top = _heap->top_at_mark_start(r->bottom());
1900 if (top > bottom) {
1901 _heap->mark_bit_map()->clear_range_large(MemRegion(bottom, top));
1902 }
1903 } else {
1904 if (r->is_active()) {
1905 _heap->marked_object_oop_safe_iterate(r, &cl);
1906 }
1907 }
1908 if (_heap->check_cancelled_concgc_and_yield(_concurrent)) {
1909 return;
1910 }
1911 r = _regions->claim_next();
1912 }
1913 }
1914 };
1915
1916 void ShenandoahHeap::update_heap_references(ShenandoahHeapRegionSet* update_regions, bool concurrent) {
1917 if (UseShenandoahMatrix) {
1918 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(update_regions, concurrent);
1919 workers()->run_task(&task);
1920 } else {
1921 ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(update_regions, concurrent);
2083 decrease_used(r->used());
2084 r->recycle();
2085 _free_regions->add_region(r);
2086 }
2087 }
2088 SpinPause(); // allow allocators to barge the lock
2089 }
2090 }
2091
2092 _shenandoah_policy->record_bytes_reclaimed(bytes_reclaimed);
2093 }
2094
2095 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2096 print_on(st);
2097 print_heap_regions_on(st);
2098 }
2099
2100 bool ShenandoahHeap::commit_bitmaps(ShenandoahHeapRegion* r) {
2101 size_t len = _bitmap_words_per_region * HeapWordSize;
2102 size_t off = r->region_number() * _bitmap_words_per_region;
2103 if (!os::commit_memory((char*)(_bitmap_region.start() + off), len, false)) {
2104 return false;
2105 }
2106 return true;
2107 }
2108
2109 bool ShenandoahHeap::uncommit_bitmaps(ShenandoahHeapRegion* r) {
2110 size_t len = _bitmap_words_per_region * HeapWordSize;
2111 size_t off = r->region_number() * _bitmap_words_per_region;
2112 if (!os::uncommit_memory((char*)(_bitmap_region.start() + off), len)) {
2113 return false;
2114 }
2115 return true;
2116 }
|