--- old/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp 2012-02-10 12:58:05.398980000 -0800 +++ new/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp 2012-02-10 12:58:05.157436000 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -135,7 +135,8 @@ template static inline bool should_scavenge(T* p, MutableSpace* to_space); template static inline bool should_scavenge(T* p, bool check_to_space); - template inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p); + template + inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p); // Is an object in the young generation // This assumes that the HeapWord argument is in the heap, --- old/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp 2012-02-10 12:58:05.399118000 -0800 +++ new/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp 2012-02-10 12:58:05.146094000 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -171,7 +171,7 @@ void set_old_gen_is_full(bool state) { _old_gen_is_full = state; } // Promotion methods - oop copy_to_survivor_space(oop o); + template oop copy_to_survivor_space(oop o); oop oop_promotion_failed(oop obj, markOop obj_mark); void reset(); --- old/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp 2012-02-10 12:58:05.399134000 -0800 +++ new/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp 2012-02-10 12:58:05.143159000 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -50,7 +50,8 @@ assert(Universe::heap()->is_gc_active(), "called outside gc"); PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which); - PSScavengeRootsClosure roots_closure(pm); + PSScavengeRootsClosure roots_closure(pm); + PSScavengeRootsClosure roots_to_old_closure(pm); switch (_root_type) { case universe: @@ -91,7 +92,7 @@ case code_cache: { - CodeBlobToOopClosure each_scavengable_code_blob(&roots_closure, /*do_marking=*/ true); + CodeBlobToOopClosure each_scavengable_code_blob(&roots_to_old_closure, /*do_marking=*/ true); CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob); } break; @@ -112,7 +113,7 @@ assert(Universe::heap()->is_gc_active(), "called outside gc"); PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which); - PSScavengeRootsClosure roots_closure(pm); + PSScavengeRootsClosure roots_closure(pm); CodeBlobToOopClosure roots_in_blobs(&roots_closure, /*do_marking=*/ true); if (_java_thread != NULL) --- old/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp 2012-02-10 12:58:05.399034000 -0800 +++ new/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp 2012-02-10 12:58:05.145666000 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,7 @@ #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" +#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" inline void PSScavenge::save_to_space_top_before_gc() { @@ -65,7 +66,7 @@ // Attempt to "claim" oop at p via CAS, push the new obj if successful // This version tests the oop* to make sure it is within the heap before // attempting marking. -template +template inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm, T* p) { assert(should_scavenge(p, true), "revisiting object?"); @@ -73,7 +74,7 @@ oop o = oopDesc::load_decode_heap_oop_not_null(p); oop new_obj = o->is_forwarded() ? o->forwardee() - : pm->copy_to_survivor_space(o); + : pm->copy_to_survivor_space(o); oopDesc::encode_store_heap_oop_not_null(p, new_obj); // We cannot mark without test, as some code passes us pointers @@ -86,6 +87,7 @@ } } +template class PSScavengeRootsClosure: public OopClosure { private: PSPromotionManager* _promotion_manager; @@ -94,7 +96,7 @@ template void do_oop_work(T *p) { if (PSScavenge::should_scavenge(p)) { // We never card mark roots, maybe call a func without test? - PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); + PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); } } public: @@ -103,4 +105,5 @@ void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); } }; + #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSSCAVENGE_INLINE_HPP --- old/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp 2012-02-10 12:58:05.401656000 -0800 +++ new/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp 2012-02-10 12:58:05.158909000 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,170 @@ claim_or_forward_internal_depth(p); } +// +// This method is pretty bulky. It would be nice to split it up +// into smaller submethods, but we need to be careful not to hurt +// performance. +// +template +oop PSPromotionManager::copy_to_survivor_space(oop o) { + assert(PSScavenge::should_scavenge(&o), "Sanity"); + + oop new_obj = NULL; + + // NOTE! We must be very careful with any methods that access the mark + // in o. There may be multiple threads racing on it, and it may be forwarded + // at any time. Do not use oop methods for accessing the mark! + markOop test_mark = o->mark(); + + // The same test as "o->is_forwarded()" + if (!test_mark->is_marked()) { + bool new_obj_is_tenured = false; + size_t new_obj_size = o->size(); + + if (!promote_immediately) { + // Find the objects age, MT safe. + int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? + test_mark->displaced_mark_helper()->age() : test_mark->age(); + + // Try allocating obj in to-space (unless too old) + if (age < PSScavenge::tenuring_threshold()) { + new_obj = (oop) _young_lab.allocate(new_obj_size); + if (new_obj == NULL && !_young_gen_is_full) { + // Do we allocate directly, or flush and refill? + if (new_obj_size > (YoungPLABSize / 2)) { + // Allocate this object directly + new_obj = (oop)young_space()->cas_allocate(new_obj_size); + } else { + // Flush and fill + _young_lab.flush(); + + HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); + if (lab_base != NULL) { + _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); + // Try the young lab allocation again. + new_obj = (oop) _young_lab.allocate(new_obj_size); + } else { + _young_gen_is_full = true; + } + } + } + } + } + + // Otherwise try allocating obj tenured + if (new_obj == NULL) { +#ifndef PRODUCT + if (Universe::heap()->promotion_should_fail()) { + return oop_promotion_failed(o, test_mark); + } +#endif // #ifndef PRODUCT + + new_obj = (oop) _old_lab.allocate(new_obj_size); + new_obj_is_tenured = true; + + if (new_obj == NULL) { + if (!_old_gen_is_full) { + // Do we allocate directly, or flush and refill? + if (new_obj_size > (OldPLABSize / 2)) { + // Allocate this object directly + new_obj = (oop)old_gen()->cas_allocate(new_obj_size); + } else { + // Flush and fill + _old_lab.flush(); + + HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize); + if(lab_base != NULL) { + _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); + // Try the old lab allocation again. + new_obj = (oop) _old_lab.allocate(new_obj_size); + } + } + } + + // This is the promotion failed test, and code handling. + // The code belongs here for two reasons. It is slightly + // different thatn the code below, and cannot share the + // CAS testing code. Keeping the code here also minimizes + // the impact on the common case fast path code. + + if (new_obj == NULL) { + _old_gen_is_full = true; + return oop_promotion_failed(o, test_mark); + } + } + } + + assert(new_obj != NULL, "allocation should have succeeded"); + + // Copy obj + Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size); + + // Now we have to CAS in the header. + if (o->cas_forward_to(new_obj, test_mark)) { + // We won any races, we "own" this object. + assert(new_obj == o->forwardee(), "Sanity"); + + // Increment age if obj still in new generation. Now that + // we're dealing with a markOop that cannot change, it is + // okay to use the non mt safe oop methods. + if (!new_obj_is_tenured) { + new_obj->incr_age(); + assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); + } + + // Do the size comparison first with new_obj_size, which we + // already have. Hopefully, only a few objects are larger than + // _min_array_size_for_chunking, and most of them will be arrays. + // So, the is->objArray() test would be very infrequent. + if (new_obj_size > _min_array_size_for_chunking && + new_obj->is_objArray() && + PSChunkLargeArrays) { + // we'll chunk it + oop* const masked_o = mask_chunked_array_oop(o); + push_depth(masked_o); + TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); + } else { + // we'll just push its contents + new_obj->push_contents(this); + } + } else { + // We lost, someone else "owns" this object + guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); + + // Try to deallocate the space. If it was directly allocated we cannot + // deallocate it, so we have to test. If the deallocation fails, + // overwrite with a filler object. + if (new_obj_is_tenured) { + if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { + CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); + } + } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { + CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); + } + + // don't update this before the unallocation! + new_obj = o->forwardee(); + } + } else { + assert(o->is_forwarded(), "Sanity"); + new_obj = o->forwardee(); + } + +#ifdef DEBUG + // This code must come after the CAS test, or it will print incorrect + // information. + if (TraceScavenge) { + gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}", + PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring", + new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size()); + } +#endif + + return new_obj; +} + + inline void PSPromotionManager::process_popped_location_depth(StarTask p) { if (is_oop_masked(p)) { assert(PSChunkLargeArrays, "invariant"); @@ -69,9 +233,9 @@ } else { if (p.is_narrow()) { assert(UseCompressedOops, "Error"); - PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p); + PSScavenge::copy_and_push_safe_barrier(this, p); } else { - PSScavenge::copy_and_push_safe_barrier(this, (oop*)p); + PSScavenge::copy_and_push_safe_barrier(this, p); } } } --- old/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp 2012-02-10 12:58:05.400339000 -0800 +++ new/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp 2012-02-10 12:58:05.149595000 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -247,167 +247,6 @@ } } -// -// This method is pretty bulky. It would be nice to split it up -// into smaller submethods, but we need to be careful not to hurt -// performance. -// - -oop PSPromotionManager::copy_to_survivor_space(oop o) { - assert(PSScavenge::should_scavenge(&o), "Sanity"); - - oop new_obj = NULL; - - // NOTE! We must be very careful with any methods that access the mark - // in o. There may be multiple threads racing on it, and it may be forwarded - // at any time. Do not use oop methods for accessing the mark! - markOop test_mark = o->mark(); - - // The same test as "o->is_forwarded()" - if (!test_mark->is_marked()) { - bool new_obj_is_tenured = false; - size_t new_obj_size = o->size(); - - // Find the objects age, MT safe. - int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? - test_mark->displaced_mark_helper()->age() : test_mark->age(); - - // Try allocating obj in to-space (unless too old) - if (age < PSScavenge::tenuring_threshold()) { - new_obj = (oop) _young_lab.allocate(new_obj_size); - if (new_obj == NULL && !_young_gen_is_full) { - // Do we allocate directly, or flush and refill? - if (new_obj_size > (YoungPLABSize / 2)) { - // Allocate this object directly - new_obj = (oop)young_space()->cas_allocate(new_obj_size); - } else { - // Flush and fill - _young_lab.flush(); - - HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); - if (lab_base != NULL) { - _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); - // Try the young lab allocation again. - new_obj = (oop) _young_lab.allocate(new_obj_size); - } else { - _young_gen_is_full = true; - } - } - } - } - - // Otherwise try allocating obj tenured - if (new_obj == NULL) { -#ifndef PRODUCT - if (Universe::heap()->promotion_should_fail()) { - return oop_promotion_failed(o, test_mark); - } -#endif // #ifndef PRODUCT - - new_obj = (oop) _old_lab.allocate(new_obj_size); - new_obj_is_tenured = true; - - if (new_obj == NULL) { - if (!_old_gen_is_full) { - // Do we allocate directly, or flush and refill? - if (new_obj_size > (OldPLABSize / 2)) { - // Allocate this object directly - new_obj = (oop)old_gen()->cas_allocate(new_obj_size); - } else { - // Flush and fill - _old_lab.flush(); - - HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize); - if(lab_base != NULL) { - _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); - // Try the old lab allocation again. - new_obj = (oop) _old_lab.allocate(new_obj_size); - } - } - } - - // This is the promotion failed test, and code handling. - // The code belongs here for two reasons. It is slightly - // different thatn the code below, and cannot share the - // CAS testing code. Keeping the code here also minimizes - // the impact on the common case fast path code. - - if (new_obj == NULL) { - _old_gen_is_full = true; - return oop_promotion_failed(o, test_mark); - } - } - } - - assert(new_obj != NULL, "allocation should have succeeded"); - - // Copy obj - Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size); - - // Now we have to CAS in the header. - if (o->cas_forward_to(new_obj, test_mark)) { - // We won any races, we "own" this object. - assert(new_obj == o->forwardee(), "Sanity"); - - // Increment age if obj still in new generation. Now that - // we're dealing with a markOop that cannot change, it is - // okay to use the non mt safe oop methods. - if (!new_obj_is_tenured) { - new_obj->incr_age(); - assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); - } - - // Do the size comparison first with new_obj_size, which we - // already have. Hopefully, only a few objects are larger than - // _min_array_size_for_chunking, and most of them will be arrays. - // So, the is->objArray() test would be very infrequent. - if (new_obj_size > _min_array_size_for_chunking && - new_obj->is_objArray() && - PSChunkLargeArrays) { - // we'll chunk it - oop* const masked_o = mask_chunked_array_oop(o); - push_depth(masked_o); - TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); - } else { - // we'll just push its contents - new_obj->push_contents(this); - } - } else { - // We lost, someone else "owns" this object - guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed."); - - // Try to deallocate the space. If it was directly allocated we cannot - // deallocate it, so we have to test. If the deallocation fails, - // overwrite with a filler object. - if (new_obj_is_tenured) { - if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { - CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); - } - } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) { - CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size); - } - - // don't update this before the unallocation! - new_obj = o->forwardee(); - } - } else { - assert(o->is_forwarded(), "Sanity"); - new_obj = o->forwardee(); - } - -#ifdef DEBUG - // This code must come after the CAS test, or it will print incorrect - // information. - if (TraceScavenge) { - gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}", - PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring", - new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size()); - } -#endif - - return new_obj; -} - template void PSPromotionManager::process_array_chunk_work( oop obj, int start, int end) { --- old/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp 2012-02-10 12:58:05.399908000 -0800 +++ new/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp 2012-02-10 12:58:05.151949000 -0800 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "classfile/symbolTable.hpp" +#include "code/codeCache.hpp" #include "gc_implementation/parallelScavenge/cardTableExtension.hpp" #include "gc_implementation/parallelScavenge/gcTaskManager.hpp" #include "gc_implementation/parallelScavenge/generationSizer.hpp" @@ -100,7 +101,7 @@ // Weak refs may be visited more than once. if (PSScavenge::should_scavenge(p, _to_space)) { - PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); + PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p); } } virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } @@ -452,7 +453,7 @@ // Unlink any dead interned Strings StringTable::unlink(&_is_alive_closure); // Process the remaining live ones - PSScavengeRootsClosure root_closure(promotion_manager); + PSScavengeRootsClosure root_closure(promotion_manager); StringTable::oops_do(&root_closure); } @@ -602,6 +603,8 @@ NOT_PRODUCT(reference_processor()->verify_no_references_recorded()); + CodeCache::prune_scavenge_root_nmethods(); + // Re-verify object start arrays if (VerifyObjectStartArray && VerifyAfterGC) {