277 _next_mark_bit_map = &_mark_bit_map1;
278
279 _connection_matrix = new ShenandoahConnectionMatrix(_max_regions);
280 _partial_gc = new ShenandoahPartialGC(this, _max_regions);
281
282 _monitoring_support = new ShenandoahMonitoringSupport(this);
283
284 _concurrent_gc_thread = new ShenandoahConcurrentThread();
285
286 ShenandoahMarkCompact::initialize();
287
288 return JNI_OK;
289 }
290
291 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
292 CollectedHeap(),
293 _shenandoah_policy(policy),
294 _concurrent_mark_in_progress(0),
295 _evacuation_in_progress(0),
296 _full_gc_in_progress(false),
297 _free_regions(NULL),
298 _collection_set(NULL),
299 _bytes_allocated_since_cm(0),
300 _bytes_allocated_during_cm(0),
301 _max_allocated_gc(0),
302 _allocated_last_gc(0),
303 _used_start_gc(0),
304 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
305 _ref_processor(NULL),
306 _in_cset_fast_test(NULL),
307 _in_cset_fast_test_base(NULL),
308 _next_top_at_mark_starts(NULL),
309 _next_top_at_mark_starts_base(NULL),
310 _complete_top_at_mark_starts(NULL),
311 _complete_top_at_mark_starts_base(NULL),
312 _mark_bit_map0(),
313 _mark_bit_map1(),
314 _connection_matrix(NULL),
315 _cancelled_concgc(false),
316 _need_update_refs(false),
2364 }
2365
2366 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2367 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2368 _complete_top_at_mark_starts[index] = addr;
2369 }
2370
2371 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2372 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2373 return _complete_top_at_mark_starts[index];
2374 }
2375
2376 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2377 _full_gc_in_progress = in_progress;
2378 }
2379
2380 bool ShenandoahHeap::is_full_gc_in_progress() const {
2381 return _full_gc_in_progress;
2382 }
2383
2384 class NMethodOopInitializer : public OopClosure {
2385 private:
2386 ShenandoahHeap* _heap;
2387 public:
2388 NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2389 }
2390
2391 private:
2392 template <class T>
2393 inline void do_oop_work(T* p) {
2394 T o = oopDesc::load_heap_oop(p);
2395 if (! oopDesc::is_null(o)) {
2396 oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2397 oop obj2 = oopDesc::bs()->write_barrier(obj1);
2398 if (! oopDesc::unsafe_equals(obj1, obj2)) {
2399 oopDesc::encode_store_heap_oop(p, obj2);
2400 }
2401 }
2402 }
2403
2562 size_t ShenandoahHeap::garbage() {
2563 ShenandoahCountGarbageClosure cl;
2564 heap_region_iterate(&cl);
2565 return cl.garbage();
2566 }
2567
2568 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() {
2569 return _connection_matrix;
2570 }
2571
2572 ShenandoahPartialGC* ShenandoahHeap::partial_gc() {
2573 return _partial_gc;
2574 }
2575
2576 void ShenandoahHeap::do_partial_collection() {
2577 {
2578 ShenandoahHeapLock lock(this);
2579 partial_gc()->prepare();
2580 }
2581 partial_gc()->do_partial_collection();
2582 }
2583
2584 #ifdef ASSERT
2585 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2586 assert(_heap_lock == locked, "must be locked");
2587 assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2588 }
2589
2590 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2591 Thread* thr = Thread::current();
2592 assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2593 (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2594 "must own heap lock or by VM thread at safepoint");
2595 }
2596
2597 #endif
|
277 _next_mark_bit_map = &_mark_bit_map1;
278
279 _connection_matrix = new ShenandoahConnectionMatrix(_max_regions);
280 _partial_gc = new ShenandoahPartialGC(this, _max_regions);
281
282 _monitoring_support = new ShenandoahMonitoringSupport(this);
283
284 _concurrent_gc_thread = new ShenandoahConcurrentThread();
285
286 ShenandoahMarkCompact::initialize();
287
288 return JNI_OK;
289 }
290
291 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
292 CollectedHeap(),
293 _shenandoah_policy(policy),
294 _concurrent_mark_in_progress(0),
295 _evacuation_in_progress(0),
296 _full_gc_in_progress(false),
297 _update_refs_in_progress(false),
298 _free_regions(NULL),
299 _collection_set(NULL),
300 _bytes_allocated_since_cm(0),
301 _bytes_allocated_during_cm(0),
302 _max_allocated_gc(0),
303 _allocated_last_gc(0),
304 _used_start_gc(0),
305 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
306 _ref_processor(NULL),
307 _in_cset_fast_test(NULL),
308 _in_cset_fast_test_base(NULL),
309 _next_top_at_mark_starts(NULL),
310 _next_top_at_mark_starts_base(NULL),
311 _complete_top_at_mark_starts(NULL),
312 _complete_top_at_mark_starts_base(NULL),
313 _mark_bit_map0(),
314 _mark_bit_map1(),
315 _connection_matrix(NULL),
316 _cancelled_concgc(false),
317 _need_update_refs(false),
2365 }
2366
2367 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2368 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2369 _complete_top_at_mark_starts[index] = addr;
2370 }
2371
2372 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2373 uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_shift();
2374 return _complete_top_at_mark_starts[index];
2375 }
2376
2377 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2378 _full_gc_in_progress = in_progress;
2379 }
2380
2381 bool ShenandoahHeap::is_full_gc_in_progress() const {
2382 return _full_gc_in_progress;
2383 }
2384
2385 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2386 _update_refs_in_progress = in_progress;
2387 }
2388
2389 bool ShenandoahHeap::is_update_refs_in_progress() const {
2390 return _update_refs_in_progress;
2391 }
2392
2393 class NMethodOopInitializer : public OopClosure {
2394 private:
2395 ShenandoahHeap* _heap;
2396 public:
2397 NMethodOopInitializer() : _heap(ShenandoahHeap::heap()) {
2398 }
2399
2400 private:
2401 template <class T>
2402 inline void do_oop_work(T* p) {
2403 T o = oopDesc::load_heap_oop(p);
2404 if (! oopDesc::is_null(o)) {
2405 oop obj1 = oopDesc::decode_heap_oop_not_null(o);
2406 oop obj2 = oopDesc::bs()->write_barrier(obj1);
2407 if (! oopDesc::unsafe_equals(obj1, obj2)) {
2408 oopDesc::encode_store_heap_oop(p, obj2);
2409 }
2410 }
2411 }
2412
2571 size_t ShenandoahHeap::garbage() {
2572 ShenandoahCountGarbageClosure cl;
2573 heap_region_iterate(&cl);
2574 return cl.garbage();
2575 }
2576
2577 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() {
2578 return _connection_matrix;
2579 }
2580
2581 ShenandoahPartialGC* ShenandoahHeap::partial_gc() {
2582 return _partial_gc;
2583 }
2584
2585 void ShenandoahHeap::do_partial_collection() {
2586 {
2587 ShenandoahHeapLock lock(this);
2588 partial_gc()->prepare();
2589 }
2590 partial_gc()->do_partial_collection();
2591 }
2592
2593 class ShenandoahUpdateHeapRefsClosure : public ExtendedOopClosure {
2594 ShenandoahHeap* _heap;
2595 public:
2596 ShenandoahUpdateHeapRefsClosure() :
2597 _heap(ShenandoahHeap::heap()) {}
2598
2599 template <class T>
2600 inline void do_oop_nv(T* p) {
2601 // tty->print_cr("updating: "PTR_FORMAT, p2i(p));
2602 _heap->maybe_update_oop_ref(p);
2603 }
2604
2605 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
2606 virtual void do_oop(oop* p) { do_oop_nv(p); }
2607 };
2608
2609 class ShenandoahUpdateHeapObjectsClosure : public ObjectClosure {
2610 private:
2611 ShenandoahUpdateHeapRefsClosure _cl;
2612 public:
2613 void do_object(oop obj) {
2614 obj->oop_iterate(&_cl);
2615 }
2616 };
2617
2618 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2619 private:
2620 ShenandoahHeap* _heap;
2621 ShenandoahHeapRegionSet* _regions;
2622
2623 public:
2624 ShenandoahUpdateHeapRefsTask() :
2625 AbstractGangTask("Concurrent Update References Task"),
2626 _heap(ShenandoahHeap::heap()),
2627 _regions(ShenandoahHeap::heap()->regions()) {
2628 _regions->clear_current_index();
2629 }
2630
2631 void work(uint worker_id) {
2632 ShenandoahUpdateHeapObjectsClosure cl;
2633 ShenandoahHeapRegion* r = _regions->claim_next();
2634 while (r != NULL) {
2635 if (! r->is_humongous_continuation() &&
2636 ! _heap->in_collection_set(r) &&
2637 ! r->is_empty()) {
2638 HeapWord* limit = r->concurrent_iteration_safe_limit();
2639 _heap->marked_object_iterate(r, &cl, limit);
2640 } else if (_heap->in_collection_set(r)) {
2641 HeapWord* bottom = r->bottom();
2642 HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2643 if (top > bottom) {
2644 _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2645 }
2646 }
2647 r = _regions->claim_next();
2648 }
2649 }
2650 };
2651
2652 void ShenandoahHeap::concurrent_update_heap_references() {
2653 _shenandoah_policy->record_phase_start(ShenandoahCollectorPolicy::conc_update_refs);
2654 ShenandoahUpdateHeapRefsTask task;
2655 workers()->run_task(&task);
2656 _shenandoah_policy->record_phase_end(ShenandoahCollectorPolicy::conc_update_refs);
2657 }
2658
2659 void ShenandoahHeap::prepare_update_refs() {
2660 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2661 set_evacuation_in_progress_at_safepoint(false);
2662 set_update_refs_in_progress(true);
2663 ensure_parsability(true);
2664 for (uint i = 0; i < _num_regions; i++) {
2665 ShenandoahHeapRegion* r = _ordered_regions->get(i);
2666 r->set_concurrent_iteration_safe_limit(r->top());
2667 }
2668 }
2669
2670 void ShenandoahHeap::finish_update_refs() {
2671 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2672 concurrentMark()->update_roots();
2673 recycle_dirty_regions();
2674 set_need_update_refs(false);
2675 set_update_refs_in_progress(false);
2676 }
2677
2678 #ifdef ASSERT
2679 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2680 assert(_heap_lock == locked, "must be locked");
2681 assert(_heap_lock_owner == Thread::current(), "must be owned by current thread");
2682 }
2683
2684 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2685 Thread* thr = Thread::current();
2686 assert((_heap_lock == locked && _heap_lock_owner == thr) ||
2687 (SafepointSynchronize::is_at_safepoint() && thr->is_VM_thread()),
2688 "must own heap lock or by VM thread at safepoint");
2689 }
2690
2691 #endif
|