--- old/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-07-16 10:36:11.825613357 +0200 +++ new/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-07-16 10:36:11.727610453 +0200 @@ -1917,7 +1917,6 @@ _ref_processor_cm(NULL), _ref_processor_stw(NULL), _bot_shared(NULL), - _evac_failure_scan_stack(NULL), _cg1r(NULL), _g1mm(NULL), _refine_cte_cl(NULL), @@ -2204,6 +2203,11 @@ G1StringDedup::initialize(); + _preserved_objs = NEW_C_HEAP_ARRAY(OopAndMarkOopStack, ParallelGCThreads, mtGC); + for (uint i = 0; i < ParallelGCThreads; i++) { + new (&_preserved_objs[i]) OopAndMarkOopStack(); + } + return JNI_OK; } @@ -4255,21 +4259,6 @@ return true; } -void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { - _drain_in_progress = false; - set_evac_failure_closure(cl); - _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray(40, true); -} - -void G1CollectedHeap::finalize_for_evac_failure() { - assert(_evac_failure_scan_stack != NULL && - _evac_failure_scan_stack->length() == 0, - "Postcondition"); - assert(!_drain_in_progress, "Postcondition"); - delete _evac_failure_scan_stack; - _evac_failure_scan_stack = NULL; -} - void G1CollectedHeap::remove_self_forwarding_pointers() { double remove_self_forwards_start = os::elapsedTime(); @@ -4277,104 +4266,30 @@ workers()->run_task(&rsfp_task); // Now restore saved marks, if any. - assert(_objs_with_preserved_marks.size() == - _preserved_marks_of_objs.size(), "Both or none."); - while (!_objs_with_preserved_marks.is_empty()) { - oop obj = _objs_with_preserved_marks.pop(); - markOop m = _preserved_marks_of_objs.pop(); - obj->set_mark(m); + for (uint i = 0; i < ParallelGCThreads; i++) { + OopAndMarkOopStack& cur = _preserved_objs[i]; + while (!cur.is_empty()) { + OopAndMarkOop elem = cur.pop(); + elem.set_mark(); + } + cur.clear(true); } - _objs_with_preserved_marks.clear(true); - _preserved_marks_of_objs.clear(true); g1_policy()->phase_times()->record_evac_fail_remove_self_forwards((os::elapsedTime() - remove_self_forwards_start) * 1000.0); } -void G1CollectedHeap::push_on_evac_failure_scan_stack(oop obj) { - _evac_failure_scan_stack->push(obj); -} - -void G1CollectedHeap::drain_evac_failure_scan_stack() { - assert(_evac_failure_scan_stack != NULL, "precondition"); - - while (_evac_failure_scan_stack->length() > 0) { - oop obj = _evac_failure_scan_stack->pop(); - _evac_failure_closure->set_region(heap_region_containing(obj)); - obj->oop_iterate_backwards(_evac_failure_closure); - } -} - -oop -G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, - oop old) { - assert(obj_in_cs(old), - err_msg("obj: " PTR_FORMAT " should still be in the CSet", - p2i(old))); - markOop m = old->mark(); - oop forward_ptr = old->forward_to_atomic(old); - if (forward_ptr == NULL) { - // Forward-to-self succeeded. - assert(_par_scan_state != NULL, "par scan state"); - OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure(); - uint queue_num = _par_scan_state->queue_num(); - +void G1CollectedHeap::preserve_mark_during_evac_failure(uint queue_num, oop obj, markOop m) { + if (!_evacuation_failed) { _evacuation_failed = true; - _evacuation_failed_info_array[queue_num].register_copy_failure(old->size()); - if (_evac_failure_closure != cl) { - MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag); - assert(!_drain_in_progress, - "Should only be true while someone holds the lock."); - // Set the global evac-failure closure to the current thread's. - assert(_evac_failure_closure == NULL, "Or locking has failed."); - set_evac_failure_closure(cl); - // Now do the common part. - handle_evacuation_failure_common(old, m); - // Reset to NULL. - set_evac_failure_closure(NULL); - } else { - // The lock is already held, and this is recursive. - assert(_drain_in_progress, "This should only be the recursive case."); - handle_evacuation_failure_common(old, m); - } - return old; - } else { - // Forward-to-self failed. Either someone else managed to allocate - // space for this object (old != forward_ptr) or they beat us in - // self-forwarding it (old == forward_ptr). - assert(old == forward_ptr || !obj_in_cs(forward_ptr), - err_msg("obj: " PTR_FORMAT " forwarded to: " PTR_FORMAT " " - "should not be in the CSet", - p2i(old), p2i(forward_ptr))); - return forward_ptr; - } -} - -void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) { - preserve_mark_if_necessary(old, m); - - HeapRegion* r = heap_region_containing(old); - if (!r->evacuation_failed()) { - r->set_evacuation_failed(true); - _hr_printer.evac_failure(r); } - push_on_evac_failure_scan_stack(old); + _evacuation_failed_info_array[queue_num].register_copy_failure(obj->size()); - if (!_drain_in_progress) { - // prevent recursion in copy_to_survivor_space() - _drain_in_progress = true; - drain_evac_failure_scan_stack(); - _drain_in_progress = false; - } -} - -void G1CollectedHeap::preserve_mark_if_necessary(oop obj, markOop m) { - assert(evacuation_failed(), "Oversaving!"); // We want to call the "for_promotion_failure" version only in the // case of a promotion failure. if (m->must_be_preserved_for_promotion_failure(obj)) { - _objs_with_preserved_marks.push(obj); - _preserved_marks_of_objs.push(m); + OopAndMarkOop elem(obj, m); + _preserved_objs[queue_num].push(elem); } } @@ -4597,9 +4512,6 @@ ReferenceProcessor* rp = _g1h->ref_processor_stw(); G1ParScanThreadState pss(_g1h, worker_id, rp); - G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp); - - pss.set_evac_failure_closure(&evac_failure_cl); bool only_young = _g1h->collector_state()->gcs_are_young(); @@ -5269,9 +5181,6 @@ G1STWIsAliveClosure is_alive(_g1h); G1ParScanThreadState pss(_g1h, worker_id, NULL); - G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); - - pss.set_evac_failure_closure(&evac_failure_cl); G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); @@ -5368,10 +5277,6 @@ HandleMark hm; G1ParScanThreadState pss(_g1h, worker_id, NULL); - G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL); - - pss.set_evac_failure_closure(&evac_failure_cl); - assert(pss.queue_is_empty(), "both queue and overflow should be empty"); G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL); @@ -5476,15 +5381,11 @@ // Use only a single queue for this PSS. G1ParScanThreadState pss(this, 0, NULL); + assert(pss.queue_is_empty(), "pre-condition"); // We do not embed a reference processor in the copying/scanning // closures while we're actually processing the discovered // reference objects. - G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL); - - pss.set_evac_failure_closure(&evac_failure_cl); - - assert(pss.queue_is_empty(), "pre-condition"); G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL); @@ -5590,8 +5491,6 @@ const uint n_workers = workers()->active_workers(); - init_for_evac_failure(NULL); - assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty"); double start_par_time_sec = os::elapsedTime(); double end_par_time_sec; @@ -5655,8 +5554,6 @@ purge_code_root_memory(); - finalize_for_evac_failure(); - if (evacuation_failed()) { remove_self_forwarding_pointers(); --- old/src/share/vm/gc/g1/g1CollectedHeap.hpp 2015-07-16 10:36:12.447631791 +0200 +++ new/src/share/vm/gc/g1/g1CollectedHeap.hpp 2015-07-16 10:36:12.351628946 +0200 @@ -858,44 +858,27 @@ // forwarding pointers to themselves. Reset them. void remove_self_forwarding_pointers(); - // Together, these store an object with a preserved mark, and its mark value. - Stack _objs_with_preserved_marks; - Stack _preserved_marks_of_objs; + struct OopAndMarkOop { + private: + oop _o; + markOop _m; + public: + OopAndMarkOop(oop obj, markOop m) : _o(obj), _m(m) { + } + + void set_mark() { + _o->set_mark(_m); + } + }; + + typedef Stack OopAndMarkOopStack; + // Stores marks with the corresponding oop that we need to preserve during evacuation + // failure. + OopAndMarkOopStack* _preserved_objs; // Preserve the mark of "obj", if necessary, in preparation for its mark // word being overwritten with a self-forwarding-pointer. - void preserve_mark_if_necessary(oop obj, markOop m); - - // The stack of evac-failure objects left to be scanned. - GrowableArray* _evac_failure_scan_stack; - // The closure to apply to evac-failure objects. - - OopsInHeapRegionClosure* _evac_failure_closure; - // Set the field above. - void - set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) { - _evac_failure_closure = evac_failure_closure; - } - - // Push "obj" on the scan stack. - void push_on_evac_failure_scan_stack(oop obj); - // Process scan stack entries until the stack is empty. - void drain_evac_failure_scan_stack(); - // True iff an invocation of "drain_scan_stack" is in progress; to - // prevent unnecessary recursion. - bool _drain_in_progress; - - // Do any necessary initialization for evacuation-failure handling. - // "cl" is the closure that will be used to process evac-failure - // objects. - void init_for_evac_failure(OopsInHeapRegionClosure* cl); - // Do any necessary cleanup for evacuation-failure handling data - // structures. - void finalize_for_evac_failure(); - - // An attempt to evacuate "obj" has failed; take necessary steps. - oop handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, oop obj); - void handle_evacuation_failure_common(oop obj, markOop m); + void preserve_mark_during_evac_failure(uint queue, oop obj, markOop m); #ifndef PRODUCT // Support for forcing evacuation failures. Analogous to --- old/src/share/vm/gc/g1/g1OopClosures.hpp 2015-07-16 10:36:12.986647765 +0200 +++ new/src/share/vm/gc/g1/g1OopClosures.hpp 2015-07-16 10:36:12.890644920 +0200 @@ -148,8 +148,6 @@ // We use a separate closure to handle references during evacuation // failure processing. -typedef G1ParCopyClosure G1ParScanHeapEvacFailureClosure; - class FilterIntoCSClosure: public ExtendedOopClosure { G1CollectedHeap* _g1; OopClosure* _oc; --- old/src/share/vm/gc/g1/g1ParScanThreadState.cpp 2015-07-16 10:36:13.506663175 +0200 +++ new/src/share/vm/gc/g1/g1ParScanThreadState.cpp 2015-07-16 10:36:13.410660330 +0200 @@ -144,8 +144,6 @@ #endif // ASSERT void G1ParScanThreadState::trim_queue() { - assert(_evac_failure_cl != NULL, "not set"); - StarTask ref; do { // Drain the overflow stack first, so other threads can steal. @@ -222,7 +220,7 @@ if (obj_ptr == NULL) { // This will either forward-to-self, or detect that someone else has // installed a forwarding pointer. - return _g1h->handle_evacuation_failure_par(this, old); + return handle_evacuation_failure_par(old, old_mark); } } } @@ -236,7 +234,7 @@ // Doing this after all the allocation attempts also tests the // undo_allocation() method too. _g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context); - return _g1h->handle_evacuation_failure_par(this, old); + return handle_evacuation_failure_par(old, old_mark); } #endif // !PRODUCT @@ -301,3 +299,35 @@ return forward_ptr; } } + +oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) { + assert(_g1h->obj_in_cs(old), + err_msg("Object " PTR_FORMAT " should be in the CSet", p2i(old))); + + oop forward_ptr = old->forward_to_atomic(old); + if (forward_ptr == NULL) { + // Forward-to-self succeeded. We are the "owner" of the object. + HeapRegion* r = _g1h->heap_region_containing(old); + + if (!r->evacuation_failed()) { + r->set_evacuation_failed(true); + _g1h->hr_printer()->evac_failure(r); + } + + _g1h->preserve_mark_during_evac_failure(_queue_num, old, m); + + _scanner.set_region(r); + old->oop_iterate_backwards(&_scanner); + + return old; + } else { + // Forward-to-self failed. Either someone else managed to allocate + // space for this object (old != forward_ptr) or they beat us in + // self-forwarding it (old == forward_ptr). + assert(old == forward_ptr || !_g1h->obj_in_cs(forward_ptr), + err_msg("Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " " + "should not be in the CSet", + p2i(old), p2i(forward_ptr))); + return forward_ptr; + } +} --- old/src/share/vm/gc/g1/g1ParScanThreadState.hpp 2015-07-16 10:36:14.025678556 +0200 +++ new/src/share/vm/gc/g1/g1ParScanThreadState.hpp 2015-07-16 10:36:13.930675741 +0200 @@ -54,8 +54,6 @@ uint _tenuring_threshold; G1ParScanClosure _scanner; - OopsInHeapRegionClosure* _evac_failure_cl; - int _hash_seed; uint _queue_num; @@ -114,12 +112,6 @@ } } - void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) { - _evac_failure_cl = evac_failure_cl; - } - - OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; } - int* hash_seed() { return &_hash_seed; } uint queue_num() { return _queue_num; } @@ -211,6 +203,9 @@ void trim_queue(); inline void steal_and_trim_queue(RefToScanQueueSet *task_queues); + + // An attempt to evacuate "obj" has failed; take necessary steps. + oop handle_evacuation_failure_par(oop obj, markOop m); }; #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP --- old/src/share/vm/runtime/mutexLocker.cpp 2015-07-16 10:36:14.544693938 +0200 +++ new/src/share/vm/runtime/mutexLocker.cpp 2015-07-16 10:36:14.447691063 +0200 @@ -82,7 +82,6 @@ Monitor* DirtyCardQ_CBL_mon = NULL; Mutex* Shared_DirtyCardQ_lock = NULL; Mutex* ParGCRareEvent_lock = NULL; -Mutex* EvacFailureStack_lock = NULL; Mutex* DerivedPointerTableGC_lock = NULL; Mutex* Compile_lock = NULL; Monitor* MethodCompileQueue_lock = NULL; @@ -200,7 +199,6 @@ def(OldSets_lock , Mutex , leaf , true, Monitor::_safepoint_check_never); def(RootRegionScan_lock , Monitor, leaf , true, Monitor::_safepoint_check_never); def(MMUTracker_lock , Mutex , leaf , true, Monitor::_safepoint_check_never); - def(EvacFailureStack_lock , Mutex , nonleaf , true, Monitor::_safepoint_check_never); def(StringDedupQueue_lock , Monitor, leaf, true, Monitor::_safepoint_check_never); def(StringDedupTable_lock , Mutex , leaf, true, Monitor::_safepoint_check_never); --- old/src/share/vm/runtime/mutexLocker.hpp 2015-07-16 10:36:15.063709319 +0200 +++ new/src/share/vm/runtime/mutexLocker.hpp 2015-07-16 10:36:14.967706474 +0200 @@ -86,7 +86,6 @@ // non-Java threads. // (see option ExplicitGCInvokesConcurrent) extern Mutex* ParGCRareEvent_lock; // Synchronizes various (rare) parallel GC ops. -extern Mutex* EvacFailureStack_lock; // guards the evac failure scan stack extern Mutex* Compile_lock; // a lock held when Compilation is updating code (used to block CodeCache traversal, CHA updates, etc) extern Monitor* MethodCompileQueue_lock; // a lock held when method compilations are enqueued, dequeued extern Monitor* CompileThread_lock; // a lock held by compile threads during compilation system initialization