--- old/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp 2015-03-31 14:52:48.387189477 +0200 +++ new/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp 2015-03-31 14:52:48.271189482 +0200 @@ -673,10 +673,10 @@ HeapWord* bottom, \ HeapWord* top, \ ClosureType* cl) { \ - bool is_par = SharedHeap::heap()->n_par_threads() > 0; \ + bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0; \ if (is_par) { \ - assert(SharedHeap::heap()->n_par_threads() == \ - SharedHeap::heap()->workers()->active_workers(), "Mismatch"); \ + assert(GenCollectedHeap::heap()->n_par_threads() == \ + GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \ walk_mem_region_with_cl_par(mr, bottom, top, cl); \ } else { \ walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \ @@ -1907,11 +1907,11 @@ assert(chunk->is_free() && ffc->is_free(), "Error"); _bt.split_block((HeapWord*)chunk, chunk->size(), new_size); if (rem_sz < SmallForDictionary) { - bool is_par = (SharedHeap::heap()->n_par_threads() > 0); + bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0); if (is_par) _indexedFreeListParLocks[rem_sz]->lock(); assert(!is_par || - (SharedHeap::heap()->n_par_threads() == - SharedHeap::heap()->workers()->active_workers()), "Mismatch"); + (GenCollectedHeap::heap()->n_par_threads() == + GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch"); returnChunkToFreeList(ffc); split(size, rem_sz); if (is_par) _indexedFreeListParLocks[rem_sz]->unlock(); @@ -1982,7 +1982,7 @@ bool CompactibleFreeListSpace::no_allocs_since_save_marks() { assert(_promoInfo.tracking(), "No preceding save_marks?"); - assert(SharedHeap::heap()->n_par_threads() == 0, + assert(GenCollectedHeap::heap()->n_par_threads() == 0, "Shouldn't be called if using parallel gc."); return _promoInfo.noPromotions(); } @@ -1991,7 +1991,7 @@ \ void CompactibleFreeListSpace:: \ oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ - assert(SharedHeap::heap()->n_par_threads() == 0, \ + assert(GenCollectedHeap::heap()->n_par_threads() == 0, \ "Shouldn't be called (yet) during parallel part of gc."); \ _promoInfo.promoted_oops_iterate##nv_suffix(blk); \ /* \ --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2015-03-31 14:52:48.639189466 +0200 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2015-03-31 14:52:48.515189471 +0200 @@ -2128,6 +2128,7 @@ } #ifndef PRODUCT + class CheckGCTimeStampsHRClosure : public HeapRegionClosure { private: unsigned _gc_time_stamp; @@ -2156,6 +2157,14 @@ heap_region_iterate(&cl); guarantee(!cl.failures(), "all GC time stamps should have been reset"); } + +bool G1CollectedHeap::heap_lock_held_for_gc() { + Thread* t = Thread::current(); + return Heap_lock->owned_by_self() + || ( (t->is_GC_task_thread() || t->is_VM_thread()) + && _thread_holds_heap_lock_for_gc); +} + #endif // PRODUCT void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, @@ -3336,8 +3345,6 @@ #endif // PRODUCT G1CollectedHeap* G1CollectedHeap::heap() { - assert(_sh->kind() == CollectedHeap::G1CollectedHeap, - "not a garbage-first heap"); return _g1h; } --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2015-03-31 14:52:48.907189455 +0200 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2015-03-31 14:52:48.791189460 +0200 @@ -1106,6 +1106,17 @@ // region has no marks. Return true if all is well, false if errors // are detected. bool verify_bitmaps(const char* caller, HeapRegion* hr); + + // True if the heap_lock is held by the a non-gc thread invoking a gc + // operation. + bool _thread_holds_heap_lock_for_gc; + // Returns true if the calling thread holds the heap lock, + // or the calling thread is a par gc thread and the heap_lock is held + // by the vm thread doing a gc operation. + bool heap_lock_held_for_gc(); + + void set_heap_lock_held_for_gc(bool value) { _thread_holds_heap_lock_for_gc = value; } + #endif // PRODUCT // If G1VerifyBitmaps is set, verify that the marking bitmaps for --- old/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp 2015-03-31 14:52:49.123189446 +0200 +++ new/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp 2015-03-31 14:52:49.007189451 +0200 @@ -1460,7 +1460,7 @@ _max_survivor_regions = (uint) ceil(max_survivor_regions_d); _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold( - HeapRegion::GrainWords * _max_survivor_regions); + HeapRegion::GrainWords * _max_survivor_regions, counters()); } bool G1CollectorPolicy::force_initial_mark_if_outside_cycle( --- old/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp 2015-03-31 14:52:49.343189437 +0200 +++ new/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp 2015-03-31 14:52:49.235189441 +0200 @@ -61,9 +61,8 @@ bool clear_all_softrefs) { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); - SharedHeap* sh = SharedHeap::heap(); #ifdef ASSERT - if (sh->collector_policy()->should_clear_all_soft_refs()) { + if (G1CollectedHeap::heap()->collector_policy()->should_clear_all_soft_refs()) { assert(clear_all_softrefs, "Policy should have been checked earler"); } #endif --- old/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp 2015-03-31 14:52:49.543189428 +0200 +++ new/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp 2015-03-31 14:52:49.431189433 +0200 @@ -253,7 +253,8 @@ { G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i); - Threads::possibly_parallel_oops_do(strong_roots, thread_stack_clds, strong_code); + bool is_par = _g1h->n_par_threads() > 0; + Threads::possibly_parallel_oops_do(is_par, strong_roots, thread_stack_clds, strong_code); } } --- old/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp 2015-03-31 14:52:49.735189420 +0200 +++ new/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp 2015-03-31 14:52:49.627189425 +0200 @@ -34,6 +34,19 @@ #include "gc_implementation/g1/vm_operations_g1.hpp" #include "runtime/interfaceSupport.hpp" +bool VM_G1OperationWithAllocRequest::doit_prologue() { + bool succeeded = VM_CollectForAllocation::doit_prologue(); + if (succeeded) { + G1CollectedHeap::heap()->set_heap_lock_held_for_gc(true); + } + return succeeded; +} + +void VM_G1OperationWithAllocRequest::doit_epilogue() { + G1CollectedHeap::heap()->set_heap_lock_held_for_gc(false); + VM_CollectForAllocation::doit_epilogue(); +} + VM_G1CollectForAllocation::VM_G1CollectForAllocation(uint gc_count_before, size_t word_size) : VM_G1OperationWithAllocRequest(gc_count_before, word_size, @@ -225,15 +238,10 @@ void VM_CGC_Operation::doit() { TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); - GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()); - SharedHeap* sh = SharedHeap::heap(); - // This could go away if CollectedHeap gave access to _gc_is_active... - if (sh != NULL) { - IsGCActiveMark x; - _cl->do_void(); - } else { - _cl->do_void(); - } + G1CollectedHeap* g1h = G1CollectedHeap::heap(); + GCTraceTime t(_printGCMessage, G1Log::fine(), true, g1h->gc_timer_cm(), g1h->concurrent_mark()->concurrent_gc_id()); + IsGCActiveMark x; + _cl->do_void(); } bool VM_CGC_Operation::doit_prologue() { @@ -244,14 +252,14 @@ } Heap_lock->lock(); - SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true; + G1CollectedHeap::heap()->set_heap_lock_held_for_gc(true); return true; } void VM_CGC_Operation::doit_epilogue() { // Note the relative order of the unlocks must match that in // VM_GC_Operation::doit_epilogue() - SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false; + G1CollectedHeap::heap()->set_heap_lock_held_for_gc(false); Heap_lock->unlock(); if (_needs_pll) { release_and_notify_pending_list_lock(); --- old/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp 2015-03-31 14:52:49.931189412 +0200 +++ new/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp 2015-03-31 14:52:49.823189417 +0200 @@ -47,6 +47,11 @@ GCCause::Cause gc_cause) : VM_CollectForAllocation(word_size, gc_count_before, gc_cause), _pause_succeeded(false) {} + + // override to handle G1 specific locking asserts + virtual bool doit_prologue(); + virtual void doit_epilogue(); + bool pause_succeeded() { return _pause_succeeded; } void set_allocation_context(AllocationContext_t context) { _allocation_context = context; } AllocationContext_t allocation_context() { return _allocation_context; } --- old/src/share/vm/gc_implementation/shared/ageTable.cpp 2015-03-31 14:52:50.127189404 +0200 +++ new/src/share/vm/gc_implementation/shared/ageTable.cpp 2015-03-31 14:52:50.015189409 +0200 @@ -79,7 +79,7 @@ } } -uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) { +uint ageTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters) { size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100); uint result; @@ -126,9 +126,6 @@ age++; } if (UsePerfData) { - SharedHeap* sh = SharedHeap::heap(); - CollectorPolicy* policy = sh->collector_policy(); - GCPolicyCounters* gc_counters = policy->counters(); gc_counters->tenuring_threshold()->set_value(result); gc_counters->desired_survivor_size()->set_value( desired_survivor_size*oopSize); --- old/src/share/vm/gc_implementation/shared/ageTable.hpp 2015-03-31 14:52:50.327189396 +0200 +++ new/src/share/vm/gc_implementation/shared/ageTable.hpp 2015-03-31 14:52:50.215189400 +0200 @@ -29,6 +29,8 @@ #include "oops/oop.hpp" #include "runtime/perfData.hpp" +class GCPolicyCounters; + /* Copyright (c) 1992-2009 Oracle and/or its affiliates, and Stanford University. See the LICENSE file for license information. */ @@ -69,7 +71,7 @@ void merge_par(ageTable* subTable); // calculate new tenuring threshold based on age information - uint compute_tenuring_threshold(size_t survivor_capacity); + uint compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters); private: PerfVariable* _perf_sizes[table_size]; --- old/src/share/vm/gc_implementation/shared/vmGCOperations.cpp 2015-03-31 14:52:50.519189388 +0200 +++ new/src/share/vm/gc_implementation/shared/vmGCOperations.cpp 2015-03-31 14:52:50.411189392 +0200 @@ -116,8 +116,6 @@ _prologue_succeeded = false; } else { _prologue_succeeded = true; - SharedHeap* sh = SharedHeap::heap(); - if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = true; } return _prologue_succeeded; } @@ -126,8 +124,6 @@ void VM_GC_Operation::doit_epilogue() { assert(Thread::current()->is_Java_thread(), "just checking"); // Release the Heap_lock first. - SharedHeap* sh = SharedHeap::heap(); - if (sh != NULL) sh->_thread_holds_heap_lock_for_gc = false; Heap_lock->unlock(); release_and_notify_pending_list_lock(); } --- old/src/share/vm/memory/cardTableModRefBS.cpp 2015-03-31 14:52:50.727189379 +0200 +++ new/src/share/vm/memory/cardTableModRefBS.cpp 2015-03-31 14:52:50.615189384 +0200 @@ -459,12 +459,12 @@ // equal to active_workers. When a different mechanism for shutting // off parallelism is used, then active_workers can be used in // place of n_par_threads. - int n_threads = SharedHeap::heap()->n_par_threads(); + int n_threads = GenCollectedHeap::heap()->n_par_threads(); bool is_par = n_threads > 0; if (is_par) { #if INCLUDE_ALL_GCS - assert(SharedHeap::heap()->n_par_threads() == - SharedHeap::heap()->workers()->active_workers(), "Mismatch"); + assert(GenCollectedHeap::heap()->n_par_threads() == + GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads); #else // INCLUDE_ALL_GCS fatal("Parallel gc not supported here."); --- old/src/share/vm/memory/cardTableRS.cpp 2015-03-31 14:52:50.943189370 +0200 +++ new/src/share/vm/memory/cardTableRS.cpp 2015-03-31 14:52:50.827189375 +0200 @@ -167,10 +167,10 @@ // Cannot yet substitute active_workers for n_par_threads // in the case where parallelism is being turned off by // setting n_par_threads to 0. - _is_par = (SharedHeap::heap()->n_par_threads() > 0); + _is_par = (GenCollectedHeap::heap()->n_par_threads() > 0); assert(!_is_par || - (SharedHeap::heap()->n_par_threads() == - SharedHeap::heap()->workers()->active_workers()), "Mismatch"); + (GenCollectedHeap::heap()->n_par_threads() == + GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch"); } bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) { --- old/src/share/vm/memory/defNewGeneration.cpp 2015-03-31 14:52:51.151189361 +0200 +++ new/src/share/vm/memory/defNewGeneration.cpp 2015-03-31 14:52:51.039189366 +0200 @@ -550,8 +550,9 @@ void DefNewGeneration::adjust_desired_tenuring_threshold() { // Set the desired survivor size to half the real survivor space + GCPolicyCounters* gc_counters = GenCollectedHeap::heap()->collector_policy()->counters(); _tenuring_threshold = - age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize); + age_table()->compute_tenuring_threshold(to()->capacity()/HeapWordSize, gc_counters); } void DefNewGeneration::collect(bool full, --- old/src/share/vm/memory/genCollectedHeap.cpp 2015-03-31 14:52:51.367189352 +0200 +++ new/src/share/vm/memory/genCollectedHeap.cpp 2015-03-31 14:52:51.251189357 +0200 @@ -606,7 +606,8 @@ // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots; - Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p); + bool is_par =n_par_threads() > 0; + Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_clds_p, roots_from_code_p); if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) { Universe::oops_do(strong_roots); --- old/src/share/vm/memory/sharedHeap.cpp 2015-03-31 14:52:51.583189343 +0200 +++ new/src/share/vm/memory/sharedHeap.cpp 2015-03-31 14:52:51.467189348 +0200 @@ -35,13 +35,10 @@ #include "utilities/copy.hpp" #include "utilities/workgroup.hpp" -SharedHeap* SharedHeap::_sh; - SharedHeap::SharedHeap() : CollectedHeap(), _workers(NULL) { - _sh = this; // ch is static, should be set only once. if (UseConcMarkSweepGC || UseG1GC) { _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads, /* are_GC_task_threads */true, @@ -54,13 +51,6 @@ } } -bool SharedHeap::heap_lock_held_for_gc() { - Thread* t = Thread::current(); - return Heap_lock->owned_by_self() - || ( (t->is_GC_task_thread() || t->is_VM_thread()) - && _thread_holds_heap_lock_for_gc); -} - void SharedHeap::set_par_threads(uint t) { assert(t == 0 || !UseSerialGC, "Cannot have parallel threads"); _n_par_threads = t; --- old/src/share/vm/memory/sharedHeap.hpp 2015-03-31 14:52:51.783189335 +0200 +++ new/src/share/vm/memory/sharedHeap.hpp 2015-03-31 14:52:51.667189340 +0200 @@ -105,11 +105,6 @@ friend class VM_CGC_Operation; protected: - // There should be only a single instance of "SharedHeap" in a program. - // This is enforced with the protected constructor below, which will also - // set the static pointer "_sh" to that instance. - static SharedHeap* _sh; - // If we're doing parallel GC, use this gang of threads. FlexibleWorkGang* _workers; @@ -117,17 +112,7 @@ // function. SharedHeap(); - // Returns true if the calling thread holds the heap lock, - // or the calling thread is a par gc thread and the heap_lock is held - // by the vm thread doing a gc operation. - bool heap_lock_held_for_gc(); - // True if the heap_lock is held by the a non-gc thread invoking a gc - // operation. - bool _thread_holds_heap_lock_for_gc; - public: - static SharedHeap* heap() { return _sh; } - void set_barrier_set(BarrierSet* bs); // Does operations required after initialization has been done. --- old/src/share/vm/runtime/thread.cpp 2015-03-31 14:52:51.991189326 +0200 +++ new/src/share/vm/runtime/thread.cpp 2015-03-31 14:52:51.871189331 +0200 @@ -754,13 +754,9 @@ return true; } else { guarantee(res == strong_roots_parity, "Or else what?"); - assert(SharedHeap::heap()->workers()->active_workers() > 0, - "Should only fail when parallel."); return false; } } - assert(SharedHeap::heap()->workers()->active_workers() > 0, - "Should only fail when parallel."); return false; } @@ -4066,20 +4062,7 @@ } #endif // PRODUCT -void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { - // Introduce a mechanism allowing parallel threads to claim threads as - // root groups. Overhead should be small enough to use all the time, - // even in sequential code. - SharedHeap* sh = SharedHeap::heap(); - // Cannot yet substitute active_workers for n_par_threads - // because of G1CollectedHeap::verify() use of - // SharedHeap::process_roots(). n_par_threads == 0 will - // turn off parallelism in process_roots while active_workers - // is being used for parallelism elsewhere. - bool is_par = sh->n_par_threads() > 0; - assert(!is_par || - (SharedHeap::heap()->n_par_threads() == - SharedHeap::heap()->workers()->active_workers()), "Mismatch"); +void Threads::possibly_parallel_oops_do(bool is_par, OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { int cp = Threads::thread_claim_parity(); ALL_JAVA_THREADS(p) { if (p->claim_oops_do(is_par, cp)) { --- old/src/share/vm/runtime/thread.hpp 2015-03-31 14:52:52.255189315 +0200 +++ new/src/share/vm/runtime/thread.hpp 2015-03-31 14:52:52.135189320 +0200 @@ -1894,7 +1894,7 @@ // This version may only be called by sequential code. static void oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf); // This version may be called by sequential or parallel code. - static void possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf); + static void possibly_parallel_oops_do(bool is_par, OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf); // This creates a list of GCTasks, one per thread. static void create_thread_roots_tasks(GCTaskQueue* q); // This creates a list of GCTasks, one per thread, for marking objects.