# HG changeset patch # User stefank # Date 1374950028 -7200 # Sat Jul 27 20:33:48 2013 +0200 # Node ID 153f6ae9dabcd0194eb7af8528898f107978b040 # Parent 8429693b8c5c65f714cebf0dc6027deefcb4b900 [mq]: templateOopIterate diff --git a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp --- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp +++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp @@ -33,6 +33,7 @@ #include "runtime/basicLock.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/os.hpp" +#include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) { diff --git a/src/share/vm/c1/c1_Compilation.cpp b/src/share/vm/c1/c1_Compilation.cpp --- a/src/share/vm/c1/c1_Compilation.cpp +++ b/src/share/vm/c1/c1_Compilation.cpp @@ -34,6 +34,7 @@ #include "code/debugInfoRec.hpp" #include "compiler/compileLog.hpp" #include "c1/c1_RangeCheckElimination.hpp" +#include "runtime/sharedRuntime.hpp" typedef enum { diff --git a/src/share/vm/ci/bcEscapeAnalyzer.cpp b/src/share/vm/ci/bcEscapeAnalyzer.cpp --- a/src/share/vm/ci/bcEscapeAnalyzer.cpp +++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp @@ -30,7 +30,7 @@ #include "ci/ciStreams.hpp" #include "interpreter/bytecode.hpp" #include "utilities/bitMap.inline.hpp" - +#include "utilities/copy.hpp" #ifndef PRODUCT diff --git a/src/share/vm/classfile/classFileParser.hpp b/src/share/vm/classfile/classFileParser.hpp --- a/src/share/vm/classfile/classFileParser.hpp +++ b/src/share/vm/classfile/classFileParser.hpp @@ -27,7 +27,6 @@ #include "classfile/classFileStream.hpp" #include "memory/resourceArea.hpp" -#include "oops/oop.inline.hpp" #include "oops/typeArrayOop.hpp" #include "runtime/handles.inline.hpp" #include "utilities/accessFlags.hpp" diff --git a/src/share/vm/classfile/systemDictionary.cpp b/src/share/vm/classfile/systemDictionary.cpp --- a/src/share/vm/classfile/systemDictionary.cpp +++ b/src/share/vm/classfile/systemDictionary.cpp @@ -35,6 +35,7 @@ #include "compiler/compileBroker.hpp" #include "interpreter/bytecodeStream.hpp" #include "interpreter/interpreter.hpp" +#include "interpreter/linkResolver.hpp" #include "memory/gcLocker.hpp" #include "memory/oopFactory.hpp" #include "oops/instanceKlass.hpp" diff --git a/src/share/vm/code/oopRecorder.cpp b/src/share/vm/code/oopRecorder.cpp --- a/src/share/vm/code/oopRecorder.cpp +++ b/src/share/vm/code/oopRecorder.cpp @@ -29,6 +29,7 @@ #include "code/oopRecorder.hpp" #include "memory/allocation.inline.hpp" #include "oops/oop.inline.hpp" +#include "utilities/copy.hpp" #ifdef ASSERT template int ValueRecorder::_find_index_calls = 0; diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp @@ -39,7 +39,7 @@ "only grey objects on this stack"); // iterate over the oops in this oop, marking and pushing // the ones in CMS heap (i.e. in _span). - newOop->oop_iterate(&_par_pushAndMarkClosure); + newOop->oop_iterate(&_par_pushAndMarkClosure); } } } diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp @@ -27,6 +27,7 @@ #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp" #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp" #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" +#include "gc_implementation/concurrentMarkSweep/promotionInfo.inline.hpp" #include "gc_implementation/shared/liveRange.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" #include "gc_interface/collectedHeap.inline.hpp" @@ -675,7 +676,7 @@ // We de-virtualize the block-related calls below, since we know that our // space is a CompactibleFreeListSpace. -#define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ +#define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType, nv) \ void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \ HeapWord* bottom, \ HeapWord* top, \ @@ -709,7 +710,7 @@ !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ oop(bottom)) && \ !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ - size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ + size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ bottom += _cfls->adjustObjectSize(word_sz); \ } else { \ bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \ @@ -736,7 +737,7 @@ !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ oop(bottom)) && \ !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ - size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ + size_t word_sz = oop(bottom)->oop_iterate(cl, mr); \ bottom += _cfls->adjustObjectSize(word_sz); \ } else { \ bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ @@ -747,8 +748,8 @@ // (There are only two of these, rather than N, because the split is due // only to the introduction of the FilteringClosure, a local part of the // impl of this abstraction.) -FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure) -FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) +FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure, false) +FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure, true) DirtyCardToOopClosure* CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl, @@ -790,7 +791,7 @@ cur += curSize) { curSize = block_size(cur); if (block_is_obj(cur)) { - oop(cur)->oop_iterate(cl); + oop(cur)->oop_iterate(cl); } } } @@ -1998,24 +1999,6 @@ return _promoInfo.noPromotions(); } -#define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ - \ -void CompactibleFreeListSpace:: \ -oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ - assert(SharedHeap::heap()->n_par_threads() == 0, \ - "Shouldn't be called (yet) during parallel part of gc."); \ - _promoInfo.promoted_oops_iterate##nv_suffix(blk); \ - /* \ - * This also restores any displaced headers and removes the elements from \ - * the iteration set as they are processed, so that we have a clean slate \ - * at the end of the iteration. Note, thus, that if new objects are \ - * promoted as a result of the iteration they are iterated over as well. \ - */ \ - assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); \ -} - -ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN) - bool CompactibleFreeListSpace::linearAllocationWouldFail() const { return _smallLinearAllocBlock._word_size == 0; } diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp @@ -415,10 +415,8 @@ // Fields in objects allocated by applications of the closure // *are* included in the iteration. Thus, when the iteration completes // there should be no further such objects remaining. - #define CFLS_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ - void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); - ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DECL) - #undef CFLS_OOP_SINCE_SAVE_MARKS_DECL + template + void cfls_oop_since_save_marks_iterate(OopClosureType* blk); // Allocation support HeapWord* allocate(size_t size); diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.inline-disp.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.inline-disp.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.inline-disp.hpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_INLINE_DISP_HPP +#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_INLINE_DISP_HPP + +#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp" +#include "gc_implementation/concurrentMarkSweep/promotionInfo.inline.hpp" + +template +void CompactibleFreeListSpace:: +cfls_oop_since_save_marks_iterate(OopClosureType* blk) { + assert(SharedHeap::heap()->n_par_threads() == 0, + "Shouldn't be called (yet) during parallel part of gc."); + _promoInfo.promoted_oops_iterate(blk); + /* + * This also restores any displaced headers and removes the elements from + * the iteration set as they are processed, so that we have a clean slate + * at the end of the iteration. Note, thus, that if new objects are + * promoted as a result of the iteration they are iterated over as well. + */ + assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); +} + +#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_COMPACTIBLEFREELISTSPACE_INLINE_DISP_HPP diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp @@ -33,6 +33,7 @@ #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp" #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp" #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp" +#include "gc_implementation/concurrentMarkSweep/promotionInfo.inline.hpp" #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp" #include "gc_implementation/parNew/parNewGeneration.hpp" #include "gc_implementation/shared/collectorCounters.hpp" @@ -52,6 +53,7 @@ #include "memory/padded.hpp" #include "memory/referencePolicy.hpp" #include "memory/resourceArea.hpp" +#include "memory/space.inline.hpp" #include "memory/tenuredGeneration.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" @@ -200,7 +202,7 @@ ReservedSpace rs, size_t initial_byte_size, int level, CardTableRS* ct, bool use_adaptive_freelists, FreeBlockDictionary::DictionaryChoice dictionaryChoice) : - CardGeneration(rs, initial_byte_size, level, ct), + CardGeneration(rs, initial_byte_size, level, ct, _dispatch_index_generation_cms), _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), _debug_collection_type(Concurrent_collection_type), _did_compact(false) @@ -1440,12 +1442,15 @@ ps->lab.retire(thread_num); } +// SSS: Temporary include +#include "gc_implementation/parNew/parOopClosures.inline.hpp" + void ConcurrentMarkSweepGeneration:: par_oop_since_save_marks_iterate_done(int thread_num) { CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; ParScanWithoutBarrierClosure* dummy_cl = NULL; - ps->promo.promoted_oops_iterate_nv(dummy_cl); + ps->promo.promoted_oops_iterate(dummy_cl); } bool ConcurrentMarkSweepGeneration::should_collect(bool full, @@ -3120,18 +3125,6 @@ return cmsSpace()->no_allocs_since_save_marks(); } -#define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ - \ -void ConcurrentMarkSweepGeneration:: \ -oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ - cl->set_generation(this); \ - cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \ - cl->reset_generation(); \ - save_marks(); \ -} - -ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN) - void ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) { cl->set_generation(this); @@ -4213,7 +4206,8 @@ assert(new_oop->is_oop(), "Should be an oop"); assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object"); assert(_span.contains((HeapWord*)new_oop), "Not in span"); - new_oop->oop_iterate(this); // do_oop() above + // SSS: Opportunity to devirtualize + new_oop->oop_iterate(this); // do_oop() above do_yield_check(); } } @@ -4253,7 +4247,8 @@ } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { assert(obj_to_scan->is_oop(), "Should be an oop"); assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object"); - obj_to_scan->oop_iterate(&cl); + // SSS: Opportunity to devirtualize + obj_to_scan->oop_iterate(&cl); } else if (terminator()->offer_termination(&_term_term)) { assert(work_q->size() == 0, "Impossible!"); break; @@ -5434,7 +5429,7 @@ // Verify that "start" is an object boundary assert(mr.is_empty() || oop(mr.start())->is_oop(), "Should be an oop"); - space->par_oop_iterate(mr, cl); + space->par_oop_iterate(mr, cl); } pst->all_tasks_completed(); } @@ -5572,7 +5567,7 @@ assert(obj_to_scan->is_oop(), "Oops, not an oop!"); assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); // Do scanning work - obj_to_scan->oop_iterate(cl); + obj_to_scan->oop_iterate(cl); // Loop around, finish this work, and try to steal some more } else if (terminator()->offer_termination()) { break; // nirvana from the infinite cycle @@ -6070,7 +6065,8 @@ assert(obj_to_scan->is_oop(), "Oops, not an oop!"); assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); // Do scanning work - obj_to_scan->oop_iterate(keep_alive); + // SSS: Opportunity to devirtualize + obj_to_scan->oop_iterate(keep_alive); // Loop around, finish this work, and try to steal some more } else if (terminator()->offer_termination()) { break; // nirvana from the infinite cycle @@ -6928,7 +6924,7 @@ "only grey objects on this stack"); // iterate over the oops in this oop, marking and pushing // the ones in CMS heap (i.e. in _span). - new_oop->oop_iterate(&_pushAndMarkClosure); + new_oop->oop_iterate(&_pushAndMarkClosure); // check if it's time to yield do_yield_check(); } while (!_mark_stack->isEmpty() || @@ -7069,12 +7065,12 @@ // objArrays are precisely marked; restrict scanning // to dirty cards only. size = CompactibleFreeListSpace::adjustObjectSize( - p->oop_iterate(_scanningClosure, mr)); + p->oop_iterate(_scanningClosure, mr)); } else { // A non-array may have been imprecisely marked; we need // to scan object in its entirety. size = CompactibleFreeListSpace::adjustObjectSize( - p->oop_iterate(_scanningClosure)); + p->oop_iterate(_scanningClosure)); } #ifdef ASSERT size_t direct_size = @@ -7165,7 +7161,7 @@ // Note that we do not yield while we iterate over // the interior oops of p, pushing the relevant ones // on our marking stack. - size_t size = p->oop_iterate(_scanning_closure); + size_t size = p->oop_iterate(_scanning_closure); do_yield_check(); // Observe that below, we do not abandon the preclean // phase as soon as we should; rather we empty the @@ -7180,7 +7176,7 @@ "only grey objects on this stack"); // iterate over the oops in this oop, marking and pushing // the ones in CMS heap (i.e. in _span). - new_oop->oop_iterate(_scanning_closure); + new_oop->oop_iterate(_scanning_closure); // check if it's time to yield do_yield_check(); } @@ -7243,15 +7239,15 @@ if (p->is_objArray()) { is_obj_array = true; if (_parallel) { - p->oop_iterate(_par_scan_closure, mr); + p->oop_iterate(_par_scan_closure, mr); } else { - p->oop_iterate(_scan_closure, mr); + p->oop_iterate(_scan_closure, mr); } } else { if (_parallel) { - p->oop_iterate(_par_scan_closure); + p->oop_iterate(_par_scan_closure); } else { - p->oop_iterate(_scan_closure); + p->oop_iterate(_scan_closure); } } } @@ -7435,7 +7431,7 @@ // running concurrent with mutators. assert(new_oop->is_oop(true), "Oops! expected to pop an oop"); // now scan this oop's oops - new_oop->oop_iterate(&pushOrMarkClosure); + new_oop->oop_iterate(&pushOrMarkClosure); do_yield_check(); } assert(_markStack->isEmpty(), "tautology, emphasizing post-condition"); @@ -7566,7 +7562,7 @@ // running concurrent with mutators. assert(new_oop->is_oop(true), "Oops! expected to pop an oop"); // now scan this oop's oops - new_oop->oop_iterate(&pushOrMarkClosure); + new_oop->oop_iterate(&pushOrMarkClosure); do_yield_check(); } assert(_work_queue->size() == 0, "tautology, emphasizing post-condition"); @@ -7631,7 +7627,8 @@ oop new_oop = _mark_stack->pop(); assert(new_oop->is_oop(), "Oops! expected to pop an oop"); // now scan this oop's oops - new_oop->oop_iterate(&_pam_verify_closure); + // SSS: This is not devirtualized, opportunity to fix? + new_oop->oop_iterate(&_pam_verify_closure); } assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition"); return true; @@ -8813,7 +8810,7 @@ assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop"); // iterate over the oops in this oop, marking and pushing // the ones in CMS heap (i.e. in _span). - new_oop->oop_iterate(&_mark_and_push); + new_oop->oop_iterate(&_mark_and_push); } } } @@ -8888,7 +8885,7 @@ assert(_span.contains(addr), "Should be within span"); assert(_bit_map->isMarked(addr), "Should be marked"); assert(obj->is_oop(), "Should be an oop"); - obj->oop_iterate(_keep_alive); + obj->oop_iterate(_keep_alive); } } @@ -8908,7 +8905,7 @@ assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop"); // iterate over the oops in this oop, marking and pushing // the ones in CMS heap (i.e. in _span). - new_oop->oop_iterate(&_mark_and_push); + new_oop->oop_iterate(&_mark_and_push); } } } diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp @@ -1288,9 +1288,8 @@ // Need to declare the full complement of closures, whether we'll // override them or not, or get message from the compiler: // oop_since_save_marks_iterate_nv hides virtual function... - #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ - void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); - ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL) + template + void oop_since_save_marks_iterate(OopClosureType* cl); // Smart allocation XXX -- move to CFLSpace? void setNearLargestChunk(); diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline-disp.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline-disp.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline-disp.hpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_DISP_HPP +#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_DISP_HPP + +#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.inline-disp.hpp" +#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp" + +template +void ConcurrentMarkSweepGeneration:: +oop_since_save_marks_iterate(OopClosureType* cl) { + cl->set_generation(this); + cmsSpace()->cfls_oop_since_save_marks_iterate(cl); + cl->reset_generation(); + save_marks(); +} + +#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_DISP_HPP diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp @@ -34,88 +34,6 @@ //// PromotionInfo ///////////////////////////////////////////////////////////////////////// - -////////////////////////////////////////////////////////////////////////////// -// We go over the list of promoted objects, removing each from the list, -// and applying the closure (this may, in turn, add more elements to -// the tail of the promoted list, and these newly added objects will -// also be processed) until the list is empty. -// To aid verification and debugging, in the non-product builds -// we actually forward _promoHead each time we process a promoted oop. -// Note that this is not necessary in general (i.e. when we don't need to -// call PromotionInfo::verify()) because oop_iterate can only add to the -// end of _promoTail, and never needs to look at _promoHead. - -#define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) { \ - NOT_PRODUCT(verify()); \ - PromotedObject *curObj, *nextObj; \ - for (curObj = _promoHead; curObj != NULL; curObj = nextObj) { \ - if ((nextObj = curObj->next()) == NULL) { \ - /* protect ourselves against additions due to closure application \ - below by resetting the list. */ \ - assert(_promoTail == curObj, "Should have been the tail"); \ - _promoHead = _promoTail = NULL; \ - } \ - if (curObj->hasDisplacedMark()) { \ - /* restore displaced header */ \ - oop(curObj)->set_mark(nextDisplacedHeader()); \ - } else { \ - /* restore prototypical header */ \ - oop(curObj)->init_mark(); \ - } \ - /* The "promoted_mark" should now not be set */ \ - assert(!curObj->hasPromotedMark(), \ - "Should have been cleared by restoring displaced mark-word"); \ - NOT_PRODUCT(_promoHead = nextObj); \ - if (cl != NULL) oop(curObj)->oop_iterate(cl); \ - if (nextObj == NULL) { /* start at head of list reset above */ \ - nextObj = _promoHead; \ - } \ - } \ - assert(noPromotions(), "post-condition violation"); \ - assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\ - assert(_spoolHead == _spoolTail, "emptied spooling buffers"); \ - assert(_firstIndex == _nextIndex, "empty buffer"); \ -} - -// This should have been ALL_SINCE_...() just like the others, -// but, because the body of the method above is somehwat longer, -// the MSVC compiler cannot cope; as a workaround, we split the -// macro into its 3 constituent parts below (see original macro -// definition in specializedOopClosures.hpp). -SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN) -PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v) - - -// Return the next displaced header, incrementing the pointer and -// recycling spool area as necessary. -markOop PromotionInfo::nextDisplacedHeader() { - assert(_spoolHead != NULL, "promotionInfo inconsistency"); - assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex, - "Empty spool space: no displaced header can be fetched"); - assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?"); - markOop hdr = _spoolHead->displacedHdr[_firstIndex]; - // Spool forward - if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block - // forward to next block, recycling this block into spare spool buffer - SpoolBlock* tmp = _spoolHead->nextSpoolBlock; - assert(_spoolHead != _spoolTail, "Spooling storage mix-up"); - _spoolHead->nextSpoolBlock = _spareSpool; - _spareSpool = _spoolHead; - _spoolHead = tmp; - _firstIndex = 1; - NOT_PRODUCT( - if (_spoolHead == NULL) { // all buffers fully consumed - assert(_spoolTail == NULL && _nextIndex == 1, - "spool buffers processing inconsistency"); - } - ) - } - return hdr; -} - void PromotionInfo::track(PromotedObject* trackOop) { track(trackOop, oop(trackOop)->klass()); } diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.hpp @@ -187,13 +187,10 @@ bool ensure_spooling_space() { return has_spooling_space() || ensure_spooling_space_work(); } - #define PROMOTED_OOPS_ITERATE_DECL(OopClosureType, nv_suffix) \ - void promoted_oops_iterate##nv_suffix(OopClosureType* cl); - ALL_SINCE_SAVE_MARKS_CLOSURES(PROMOTED_OOPS_ITERATE_DECL) - #undef PROMOTED_OOPS_ITERATE_DECL - void promoted_oops_iterate(OopsInGenClosure* cl) { - promoted_oops_iterate_v(cl); - } + + template + void promoted_oops_iterate(OopClosureType* cl); + void verify() const; void reset() { _promoHead = NULL; diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.inline.hpp b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.inline.hpp @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_PROMOTIONINFO_INLINE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_PROMOTIONINFO_INLINE_HPP + +#include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp" +#include "oops/oop.inline.hpp" + +////////////////////////////////////////////////////////////////////////////// +// We go over the list of promoted objects, removing each from the list, +// and applying the closure (this may, in turn, add more elements to +// the tail of the promoted list, and these newly added objects will +// also be processed) until the list is empty. +// To aid verification and debugging, in the non-product builds +// we actually forward _promoHead each time we process a promoted oop. +// Note that this is not necessary in general (i.e. when we don't need to +// call PromotionInfo::verify()) because oop_iterate can only add to the +// end of _promoTail, and never needs to look at _promoHead. + +// Return the next displaced header, incrementing the pointer and +// recycling spool area as necessary. +inline markOop PromotionInfo::nextDisplacedHeader() { + assert(_spoolHead != NULL, "promotionInfo inconsistency"); + assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex, + "Empty spool space: no displaced header can be fetched"); + assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?"); + markOop hdr = _spoolHead->displacedHdr[_firstIndex]; + // Spool forward + if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block + // forward to next block, recycling this block into spare spool buffer + SpoolBlock* tmp = _spoolHead->nextSpoolBlock; + assert(_spoolHead != _spoolTail, "Spooling storage mix-up"); + _spoolHead->nextSpoolBlock = _spareSpool; + _spareSpool = _spoolHead; + _spoolHead = tmp; + _firstIndex = 1; + NOT_PRODUCT( + if (_spoolHead == NULL) { // all buffers fully consumed + assert(_spoolTail == NULL && _nextIndex == 1, + "spool buffers processing inconsistency"); + } + ) + } + return hdr; +} + +template +void PromotionInfo::promoted_oops_iterate(OopClosureType* cl) { + NOT_PRODUCT(verify()); + PromotedObject *curObj, *nextObj; + for (curObj = _promoHead; curObj != NULL; curObj = nextObj) { + if ((nextObj = curObj->next()) == NULL) { + /* protect ourselves against additions due to closure application + below by resetting the list. */ + assert(_promoTail == curObj, "Should have been the tail"); + _promoHead = _promoTail = NULL; + } + if (curObj->hasDisplacedMark()) { + /* restore displaced header */ + oop(curObj)->set_mark(nextDisplacedHeader()); + } else { + /* restore prototypical header */ + oop(curObj)->init_mark(); + } + /* The "promoted_mark" should now not be set */ + assert(!curObj->hasPromotedMark(), + "Should have been cleared by restoring displaced mark-word"); + NOT_PRODUCT(_promoHead = nextObj); + if (cl != NULL) oop(curObj)->oop_iterate(cl); + if (nextObj == NULL) { /* start at head of list reset above */ + nextObj = _promoHead; + } + } + assert(noPromotions(), "post-condition violation"); + assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list"); + assert(_spoolHead == _spoolTail, "emptied spooling buffers"); + assert(_firstIndex == _nextIndex, "empty buffer"); +} + +#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_PROMOTIONINFO_INLINE_HPP diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.cpp b/src/share/vm/gc_implementation/g1/concurrentMark.cpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp @@ -344,7 +344,7 @@ } } -template +template bool CMMarkStack::drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after) { assert(!_drain_in_progress || !_drain_in_progress_yields || yield_after || SafepointSynchronize::is_at_safepoint(), @@ -358,7 +358,7 @@ assert(newOop->is_oop(), "Expected an oop"); assert(bm == NULL || bm->isMarked((HeapWord*)newOop), "only grey objects on this stack"); - newOop->oop_iterate(cl); + newOop->oop_iterate(cl); if (yield_after && _cm->do_yield_check()) { res = false; break; @@ -1201,7 +1201,7 @@ while (curr < end) { Prefetch::read(curr, interval); oop obj = oop(curr); - int size = obj->oop_iterate(&cl); + int size = obj->oop_iterate(&cl); assert(size == obj->size(), "sanity"); curr += size; } @@ -3509,7 +3509,7 @@ size_t obj_size = obj->size(); _words_scanned += obj_size; - obj->oop_iterate(_cm_oop_closure); + obj->oop_iterate(_cm_oop_closure); statsOnly( ++_objs_scanned ); check_limits(); } diff --git a/src/share/vm/gc_implementation/g1/concurrentMark.hpp b/src/share/vm/gc_implementation/g1/concurrentMark.hpp --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp @@ -241,7 +241,7 @@ // concurrent marker performing the drain offers to yield after // processing each object. If a yield occurs, stops the drain operation // and returns false. Otherwise, returns true. - template + template bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false); bool isEmpty() { return _index == 0; } diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -62,6 +62,8 @@ #include "memory/generationSpec.hpp" #include "memory/iterator.hpp" #include "memory/referenceProcessor.hpp" +#include "oops/objArrayKlass.hpp" +#include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.pcgc.inline.hpp" #include "runtime/atomic.inline.hpp" @@ -4525,7 +4527,7 @@ while (_evac_failure_scan_stack->length() > 0) { oop obj = _evac_failure_scan_stack->pop(); _evac_failure_closure->set_region(heap_region_containing(obj)); - obj->oop_iterate_backwards(_evac_failure_closure); + obj->oop_iterate_backwards(_evac_failure_closure); } } diff --git a/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp b/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp --- a/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp +++ b/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp @@ -157,7 +157,7 @@ // The problem is that, if evacuation fails, we might have // remembered set entries missing given that we skipped cards on // the collection set. So, we'll recreate such entries now. - obj->oop_iterate(_update_rset_cl); + obj->oop_iterate(_update_rset_cl); } else { // The object has been either evacuated or is dead. Fill it with a diff --git a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp --- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp +++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp @@ -243,7 +243,7 @@ // No point in using the slower heap_region_containing() method, // given that we know obj is in the heap. _scanner.set_region(_g1h->heap_region_containing_raw(obj)); - obj->oop_iterate_backwards(&_scanner); + obj->oop_iterate_backwards(&_scanner); } } else { undo_allocation(alloc_purpose, obj_ptr, word_sz); diff --git a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp --- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp +++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp @@ -114,11 +114,11 @@ // along with the first chunk (i.e., the chunk with start == 0). // Note that at this point the length field of to_obj_array is not // correct given that we are using it to keep track of the next - // start index. oop_iterate_range() (thankfully!) ignores the length + // start index. oop_iterate_range_t() (thankfully!) ignores the length // field and only relies on the start / end parameters. It does // however return the size of the object which will be incorrect. So // we have to ignore it even if we wanted to use it. - to_obj_array->oop_iterate_range(&_scanner, start, end); + to_obj_array->oop_iterate_range_t(&_scanner, start, end); } template inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) { diff --git a/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp b/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp --- a/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp +++ b/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "classfile/javaClasses.hpp" +#include "gc_implementation/g1/g1StringDedup.hpp" #include "gc_implementation/g1/g1StringDedupQueue.hpp" #include "memory/gcLocker.hpp" #include "runtime/atomic.inline.hpp" diff --git a/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp b/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp --- a/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp +++ b/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp @@ -61,26 +61,4 @@ class G1InvokeIfNotTriggeredClosure; class G1UpdateRSOrPushRefOopClosure; -#ifdef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES -#error "FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES already defined." -#endif - -#define FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) \ - f(G1ParScanClosure,_nv) \ - f(G1ParPushHeapRSClosure,_nv) \ - f(FilterIntoCSClosure,_nv) \ - f(FilterOutOfRegionClosure,_nv) \ - f(G1CMOopClosure,_nv) \ - f(G1RootRegionScanClosure,_nv) \ - f(G1Mux2Closure,_nv) \ - f(G1TriggerClosure,_nv) \ - f(G1InvokeIfNotTriggeredClosure,_nv) \ - f(G1UpdateRSOrPushRefOopClosure,_nv) - -#ifdef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES -#error "FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES already defined." -#endif - -#define FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(f) - #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1_SPECIALIZED_OOP_CLOSURES_HPP diff --git a/src/share/vm/gc_implementation/g1/heapRegion.cpp b/src/share/vm/gc_implementation/g1/heapRegion.cpp --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp @@ -57,7 +57,7 @@ OopClosure* oc) : _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { } -template +template HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h, HeapRegion* hr, HeapWord* cur, HeapWord* top) { @@ -69,7 +69,7 @@ if (!g1h->is_obj_dead(cur_oop, hr)) { // Bottom lies entirely below top, so we can call the // non-memRegion version of oop_iterate below. - cur_oop->oop_iterate(cl); + cur_oop->oop_iterate(cl); } cur = next_obj; cur_oop = oop(cur); @@ -101,7 +101,7 @@ // or it was allocated after marking finished, then we add it. Otherwise // we can safely ignore the object. if (!g1h->is_obj_dead(oop(bottom), _hr)) { - oop_size = oop(bottom)->oop_iterate(cl2, mr); + oop_size = oop(bottom)->oop_iterate(cl2, mr); } else { oop_size = _hr->block_size(bottom); } @@ -112,18 +112,18 @@ // We replicate the loop below for several kinds of possible filters. switch (_fk) { case NoFilterKind: - bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top); + bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top); break; case IntoCSFilterKind: { FilterIntoCSClosure filt(this, g1h, _cl); - bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); + bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); break; } case OutOfRegionFilterKind: { FilterOutOfRegionClosure filt(_hr, _cl); - bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); + bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top); break; } @@ -133,7 +133,7 @@ // Last object. Need to do dead-obj filtering here too. if (!g1h->is_obj_dead(oop(bottom), _hr)) { - oop(bottom)->oop_iterate(cl2, mr); + oop(bottom)->oop_iterate(cl2, mr); } } } @@ -525,7 +525,7 @@ assert((cur + block_size(cur)) > start, "Loop postcondition"); if (!g1h->is_obj_dead(obj)) { - obj->oop_iterate(cl, mr); + obj->oop_iterate(cl, mr); } while (cur < end) { @@ -543,11 +543,11 @@ // This object either does not span the MemRegion // boundary, or if it does it's not an array. // Apply closure to whole object. - obj->oop_iterate(cl); + obj->oop_iterate(cl); } else { // This obj is an array that spans the boundary. // Stop at the boundary. - obj->oop_iterate(cl, mr); + obj->oop_iterate(cl, mr); } } cur = next; diff --git a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp @@ -35,8 +35,9 @@ #include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/parGCAllocBuffer.inline.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" +#include "gc_interface/collectedHeap.inline.hpp" #include "memory/defNewGeneration.inline.hpp" -#include "memory/genCollectedHeap.hpp" +#include "memory/genCollectedHeap.inline.hpp" #include "memory/genOopClosures.inline.hpp" #include "memory/generation.hpp" #include "memory/generation.inline.hpp" @@ -44,7 +45,7 @@ #include "memory/resourceArea.hpp" #include "memory/sharedHeap.hpp" #include "memory/space.hpp" -#include "oops/objArrayOop.hpp" +#include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/oop.pcgc.inline.hpp" #include "runtime/atomic.inline.hpp" @@ -148,10 +149,10 @@ // should make sure end is even (aligned to HeapWord in case of compressed oops) if ((HeapWord *)obj < young_old_boundary()) { // object is in to_space - obj->oop_iterate_range(&_to_space_closure, start, end); + obj->oop_iterate_range_t(&_to_space_closure, start, end); } else { // object is in old generation - obj->oop_iterate_range(&_old_gen_closure, start, end); + obj->oop_iterate_range_t(&_old_gen_closure, start, end); } } @@ -169,11 +170,11 @@ scan_partial_array_and_push_remainder(obj_to_scan); } else { // object is in to_space - obj_to_scan->oop_iterate(&_to_space_closure); + obj_to_scan->oop_iterate(&_to_space_closure); } } else { // object is in old generation - obj_to_scan->oop_iterate(&_old_gen_closure); + obj_to_scan->oop_iterate(&_old_gen_closure); } } } @@ -875,7 +876,7 @@ do { // Beware: this call will lead to closure applications via virtual // calls. - _gch->oop_since_save_marks_iterate(_level, + _gch->gch_oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, _scan_older); } while (!_gch->no_allocs_since_save_marks(_level)); diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp @@ -29,6 +29,7 @@ #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" #include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" +#include "gc_interface/collectedHeap.inline.hpp" #include "oops/oop.psgc.inline.hpp" inline PSPromotionManager* PSPromotionManager::manager_array(int index) { diff --git a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp @@ -40,6 +40,7 @@ #include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/isGCActiveMark.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" +#include "gc_interface/collectedHeap.inline.hpp" #include "gc_interface/gcCause.hpp" #include "memory/collectorPolicy.hpp" #include "memory/gcLocker.inline.hpp" diff --git a/src/share/vm/gc_implementation/shared/immutableSpace.cpp b/src/share/vm/gc_implementation/shared/immutableSpace.cpp --- a/src/share/vm/gc_implementation/shared/immutableSpace.cpp +++ b/src/share/vm/gc_implementation/shared/immutableSpace.cpp @@ -46,7 +46,7 @@ HeapWord* t = end(); // Could call objects iterate, but this is easier. while (obj_addr < t) { - obj_addr += oop(obj_addr)->oop_iterate(cl); + obj_addr += oop(obj_addr)->oop_iterate(cl); } } diff --git a/src/share/vm/gc_implementation/shared/markSweep.inline.hpp b/src/share/vm/gc_implementation/shared/markSweep.inline.hpp --- a/src/share/vm/gc_implementation/shared/markSweep.inline.hpp +++ b/src/share/vm/gc_implementation/shared/markSweep.inline.hpp @@ -27,6 +27,7 @@ #include "gc_implementation/shared/markSweep.hpp" #include "gc_interface/collectedHeap.hpp" +#include "oops/markOop.inline.hpp" #include "utilities/stack.inline.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS diff --git a/src/share/vm/gc_implementation/shared/mutableSpace.cpp b/src/share/vm/gc_implementation/shared/mutableSpace.cpp --- a/src/share/vm/gc_implementation/shared/mutableSpace.cpp +++ b/src/share/vm/gc_implementation/shared/mutableSpace.cpp @@ -225,7 +225,7 @@ HeapWord* t = top(); // Could call objects iterate, but this is easier. while (obj_addr < t) { - obj_addr += oop(obj_addr)->oop_iterate(cl); + obj_addr += oop(obj_addr)->oop_iterate(cl); } } diff --git a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp --- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp +++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp @@ -24,6 +24,7 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP + #include "gc_interface/collectedHeap.hpp" #include "memory/allocation.hpp" #include "memory/blockOffsetTable.hpp" diff --git a/src/share/vm/gc_interface/collectedHeap.hpp b/src/share/vm/gc_interface/collectedHeap.hpp --- a/src/share/vm/gc_interface/collectedHeap.hpp +++ b/src/share/vm/gc_interface/collectedHeap.hpp @@ -614,8 +614,8 @@ // Support for PromotionFailureALot. Return true if it's time to cause a // promotion failure. The no-argument version uses // this->_promotion_failure_alot_count as the counter. - inline bool promotion_should_fail(volatile size_t* count); - inline bool promotion_should_fail(); + bool promotion_should_fail(volatile size_t* count); + bool promotion_should_fail(); // Reset the PromotionFailureALot counters. Should be called at the end of a // GC in which promotion failure occurred. diff --git a/src/share/vm/memory/defNewGeneration.cpp b/src/share/vm/memory/defNewGeneration.cpp --- a/src/share/vm/memory/defNewGeneration.cpp +++ b/src/share/vm/memory/defNewGeneration.cpp @@ -30,9 +30,10 @@ #include "gc_implementation/shared/gcTraceTime.hpp" #include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/spaceDecorator.hpp" +#include "gc_interface/collectedHeap.inline.hpp" #include "memory/defNewGeneration.inline.hpp" #include "memory/gcLocker.inline.hpp" -#include "memory/genCollectedHeap.hpp" +#include "memory/genCollectedHeap.inline.hpp" #include "memory/genOopClosures.inline.hpp" #include "memory/genRemSet.hpp" #include "memory/generationSpec.hpp" @@ -91,7 +92,7 @@ void DefNewGeneration::EvacuateFollowersClosure::do_void() { do { - _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, + _gch->gch_oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, _scan_older); } while (!_gch->no_allocs_since_save_marks(_level)); } @@ -106,7 +107,7 @@ void DefNewGeneration::FastEvacuateFollowersClosure::do_void() { do { - _gch->oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, + _gch->gch_oop_since_save_marks_iterate(_level, _scan_cur_or_nonheap, _scan_older); } while (!_gch->no_allocs_since_save_marks(_level)); guarantee(_gen->promo_failure_scan_is_complete(), "Failed to finish scan"); @@ -186,7 +187,7 @@ size_t initial_size, int level, const char* policy) - : Generation(rs, initial_size, level), + : Generation(rs, initial_size, level, _dispatch_index_generation_def_new), _promo_failure_drain_in_progress(false), _should_allocate_from_space(false) { @@ -822,7 +823,7 @@ void DefNewGeneration::drain_promo_failure_scan_stack() { while (!_promo_failure_scan_stack.is_empty()) { oop obj = _promo_failure_scan_stack.pop(); - obj->oop_iterate(_promo_failure_scan_stack_closure); + obj->oop_iterate(_promo_failure_scan_stack_closure); } } @@ -846,22 +847,6 @@ return to()->saved_mark_at_top(); } -#define DefNew_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ - \ -void DefNewGeneration:: \ -oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ - cl->set_generation(this); \ - eden()->oop_since_save_marks_iterate##nv_suffix(cl); \ - to()->oop_since_save_marks_iterate##nv_suffix(cl); \ - from()->oop_since_save_marks_iterate##nv_suffix(cl); \ - cl->reset_generation(); \ - save_marks(); \ -} - -ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DEFN) - -#undef DefNew_SINCE_SAVE_MARKS_DEFN - void DefNewGeneration::contribute_scratch(ScratchBlock*& list, Generation* requestor, size_t max_alloc_words) { if (requestor == this || _promotion_failed) return; diff --git a/src/share/vm/memory/defNewGeneration.hpp b/src/share/vm/memory/defNewGeneration.hpp --- a/src/share/vm/memory/defNewGeneration.hpp +++ b/src/share/vm/memory/defNewGeneration.hpp @@ -36,6 +36,7 @@ class ContiguousSpace; class ScanClosure; class STWGCTimer; +class CSpaceCounters; // DefNewGeneration is a young generation containing eden, from- and // to-space. @@ -300,12 +301,8 @@ // Need to declare the full complement of closures, whether we'll // override them or not, or get message from the compiler: // oop_since_save_marks_iterate_nv hides virtual function... -#define DefNew_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ - void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); - - ALL_SINCE_SAVE_MARKS_CLOSURES(DefNew_SINCE_SAVE_MARKS_DECL) - -#undef DefNew_SINCE_SAVE_MARKS_DECL + template + void oop_since_save_marks_iterate(OopClosureType* cl); // For non-youngest collection, the DefNewGeneration can contribute // "to-space". diff --git a/src/share/vm/memory/defNewGeneration.inline-disp.hpp b/src/share/vm/memory/defNewGeneration.inline-disp.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/memory/defNewGeneration.inline-disp.hpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_DEFNEWGENERATION_INLINE_DISP_HPP +#define SHARE_VM_MEMORY_DEFNEWGENERATION_INLINE_DISP_HPP + +#include "memory/defNewGeneration.hpp" +#include "memory/space.inline-disp.hpp" + +template +void DefNewGeneration:: +oop_since_save_marks_iterate(OopClosureType* cl) { + cl->set_generation(this); + eden()->cspace_oop_since_save_marks_iterate(cl); + to()->cspace_oop_since_save_marks_iterate(cl); + from()->cspace_oop_since_save_marks_iterate(cl); + cl->reset_generation(); + save_marks(); +} + +#endif // SHARE_VM_MEMORY_DEFNEWGENERATION_INLINE_DISP_HPP diff --git a/src/share/vm/memory/genCollectedHeap.cpp b/src/share/vm/memory/genCollectedHeap.cpp --- a/src/share/vm/memory/genCollectedHeap.cpp +++ b/src/share/vm/memory/genCollectedHeap.cpp @@ -664,21 +664,6 @@ } } -#define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ -void GenCollectedHeap:: \ -oop_since_save_marks_iterate(int level, \ - OopClosureType* cur, \ - OopClosureType* older) { \ - _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \ - for (int i = level+1; i < n_gens(); i++) { \ - _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \ - } \ -} - -ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN) - -#undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN - bool GenCollectedHeap::no_allocs_since_save_marks(int level) { for (int i = level; i < _n_gens; i++) { if (!_gens[i]->no_allocs_since_save_marks()) return false; diff --git a/src/share/vm/memory/genCollectedHeap.hpp b/src/share/vm/memory/genCollectedHeap.hpp --- a/src/share/vm/memory/genCollectedHeap.hpp +++ b/src/share/vm/memory/genCollectedHeap.hpp @@ -443,15 +443,11 @@ // "level". The "cur" closure is // applied to references in the generation at "level", and the "older" // closure to older generations. -#define GCH_SINCE_SAVE_MARKS_ITERATE_DECL(OopClosureType, nv_suffix) \ - void oop_since_save_marks_iterate(int level, \ - OopClosureType* cur, \ + template + void gch_oop_since_save_marks_iterate(int level, + OopClosureType* cur, OopClosureType* older); - ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DECL) - -#undef GCH_SINCE_SAVE_MARKS_ITERATE_DECL - // Returns "true" iff no allocations have occurred in any generation at // "level" or above since the last // call to "save_marks". diff --git a/src/share/vm/memory/genCollectedHeap.inline.hpp b/src/share/vm/memory/genCollectedHeap.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/memory/genCollectedHeap.inline.hpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_GENCOLLECTEDHEAP_INLINE_HPP +#define SHARE_VM_MEMORY_GENCOLLECTEDHEAP_INLINE_HPP + +#include "memory/genCollectedHeap.hpp" +#include "memory/generation.inline-disp.hpp" + +template +void GenCollectedHeap:: +gch_oop_since_save_marks_iterate(int level, + OopClosureType* cur, + OopClosureType* older) { + _gens[level]->oop_since_save_marks_iterate_disp(cur); + for (int i = level+1; i < n_gens(); i++) { + _gens[i]->oop_since_save_marks_iterate_disp(older); + } +} + +#endif // SHARE_VM_MEMORY_GENCOLLECTEDHEAP_INLINE_HPP diff --git a/src/share/vm/memory/generation.cpp b/src/share/vm/memory/generation.cpp --- a/src/share/vm/memory/generation.cpp +++ b/src/share/vm/memory/generation.cpp @@ -45,9 +45,10 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC -Generation::Generation(ReservedSpace rs, size_t initial_size, int level) : +Generation::Generation(ReservedSpace rs, size_t initial_size, int level, jbyte dispatch_index) : _level(level), - _ref_processor(NULL) { + _ref_processor(NULL), + _dispatch_index(dispatch_index) { if (!_virtual_space.initialize(rs, initial_size)) { vm_exit_during_initialization("Could not reserve enough space for " "object heap"); @@ -378,8 +379,9 @@ CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, - GenRemSet* remset) : - Generation(rs, initial_byte_size, level), _rs(remset), + GenRemSet* remset, + jbyte dispatch_index) : + Generation(rs, initial_byte_size, level, dispatch_index), _rs(remset), _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(), _used_at_prologue() { @@ -826,21 +828,6 @@ return _the_space->saved_mark_at_top(); } -#define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -void OneContigSpaceCardGeneration:: \ -oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ - blk->set_generation(this); \ - _the_space->oop_since_save_marks_iterate##nv_suffix(blk); \ - blk->reset_generation(); \ - save_marks(); \ -} - -ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN) - -#undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN - - void OneContigSpaceCardGeneration::gc_epilogue(bool full) { _last_gc = WaterMark(the_space(), the_space()->top()); diff --git a/src/share/vm/memory/generation.hpp b/src/share/vm/memory/generation.hpp --- a/src/share/vm/memory/generation.hpp +++ b/src/share/vm/memory/generation.hpp @@ -114,12 +114,19 @@ // Statistics for garbage collection GCStats* _gc_stats; + enum { + _dispatch_index_generation_cms, + _dispatch_index_generation_def_new, + _dispatch_index_generation_one_contig + }; + const jbyte _dispatch_index; + // Returns the next generation in the configuration, or else NULL if this // is the highest generation. Generation* next_gen() const; // Initialize the generation. - Generation(ReservedSpace rs, size_t initial_byte_size, int level); + Generation(ReservedSpace rs, size_t initial_byte_size, int level, jbyte dispatch_index); // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in // "sp" that point into younger generations. @@ -479,24 +486,15 @@ // generation since the last call to "save_marks". virtual bool no_allocs_since_save_marks() = 0; - // Apply "cl->apply" to (the addresses of) all reference fields in objects - // allocated in the current generation since the last call to "save_marks". - // If more objects are allocated in this generation as a result of applying - // the closure, iterates over reference fields in those objects as well. - // Calls "save_marks" at the end of the iteration. - // General signature... - virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0; // ...and specializations for de-virtualization. (The general // implementation of the _nv versions call the virtual version. // Note that the _nv suffix is not really semantically necessary, // but it avoids some not-so-useful warnings on Solaris.) -#define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ - virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ - oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \ - } - SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL) + template + void oop_since_save_marks_iterate(OopClosureType* cl); -#undef Generation_SINCE_SAVE_MARKS_DECL + template + void oop_since_save_marks_iterate_disp(OopClosureType* cl); // The "requestor" generation is performing some garbage collection // action for which it would be useful to have scratch space. If @@ -639,7 +637,7 @@ size_t _used_at_prologue; CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, - GenRemSet* remset); + GenRemSet* remset, jbyte dispatch_index); public: @@ -698,7 +696,7 @@ OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, GenRemSet* remset, ContiguousSpace* space) : - CardGeneration(rs, initial_byte_size, level, remset), + CardGeneration(rs, initial_byte_size, level, remset, _dispatch_index_generation_one_contig), _the_space(space) {} @@ -729,10 +727,8 @@ inline WaterMark top_mark(); inline WaterMark bottom_mark(); -#define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ - void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); - OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v) - SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL) + template + void oop_since_save_marks_iterate(OopClosureType* cl); void save_marks(); void reset_saved_marks(); diff --git a/src/share/vm/memory/generation.inline-disp.hpp b/src/share/vm/memory/generation.inline-disp.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/memory/generation.inline-disp.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_GENERATION_INLINE_DISP_HPP +#define SHARE_VM_MEMORY_GENERATION_INLINE_DISP_HPP + +#include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline-disp.hpp" +#include "memory/defNewGeneration.inline-disp.hpp" +#include "memory/space.inline-disp.hpp" + +template +void OneContigSpaceCardGeneration:: +oop_since_save_marks_iterate(OopClosureType* blk) { + blk->set_generation(this); + _the_space->cspace_oop_since_save_marks_iterate(blk); + blk->reset_generation(); + save_marks(); +} + +template +void Generation::oop_since_save_marks_iterate_disp(OopClosureType* cl) { + switch (_dispatch_index) { + case _dispatch_index_generation_cms: ((ConcurrentMarkSweepGeneration*)this)->oop_since_save_marks_iterate(cl); break; + case _dispatch_index_generation_one_contig: ((OneContigSpaceCardGeneration*)this)->oop_since_save_marks_iterate(cl); break; + case _dispatch_index_generation_def_new: ((DefNewGeneration*)this)->oop_since_save_marks_iterate(cl); break; + default: ShouldNotReachHere(); break; + } +} + +#endif // SHARE_VM_MEMORY_GENERATION_INLINE_DISP_HPP diff --git a/src/share/vm/memory/iterator.cpp b/src/share/vm/memory/iterator.cpp --- a/src/share/vm/memory/iterator.cpp +++ b/src/share/vm/memory/iterator.cpp @@ -40,7 +40,7 @@ } void ObjectToOopClosure::do_object(oop obj) { - obj->oop_iterate(_cl); + obj->oop_iterate(_cl); } void VoidClosure::do_void() { diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp --- a/src/share/vm/memory/space.cpp +++ b/src/share/vm/memory/space.cpp @@ -96,7 +96,7 @@ // block alignment or minimum block size restrictions. XXX if (_sp->block_is_obj(bottom) && !_sp->obj_allocated_since_save_marks(oop(bottom))) { - oop(bottom)->oop_iterate(_cl, mr); + oop(bottom)->oop_iterate(_cl, mr); } } } @@ -234,31 +234,31 @@ // We must replicate this so that the static type of "FilteringClosure" // (see above) is apparent at the oop_iterate calls. -#define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ +#define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType, nv) \ void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ HeapWord* bottom, \ HeapWord* top, \ ClosureType* cl) { \ - bottom += oop(bottom)->oop_iterate(cl, mr); \ + bottom += oop(bottom)->oop_iterate(cl, mr); \ if (bottom < top) { \ HeapWord* next_obj = bottom + oop(bottom)->size(); \ while (next_obj < top) { \ /* Bottom lies entirely below top, so we can call the */ \ /* non-memRegion version of oop_iterate below. */ \ - oop(bottom)->oop_iterate(cl); \ + oop(bottom)->oop_iterate(cl); \ bottom = next_obj; \ next_obj = bottom + oop(bottom)->size(); \ } \ /* Last object. */ \ - oop(bottom)->oop_iterate(cl, mr); \ + oop(bottom)->oop_iterate(cl, mr); \ } \ } // (There are only two of these, rather than N, because the split is due // only to the introduction of the FilteringClosure, a local part of the // impl of this abstraction.) -ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure) -ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) +ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure, false) +ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure, true) DirtyCardToOopClosure* ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl, @@ -554,30 +554,13 @@ return true; } -#if INCLUDE_ALL_GCS -#define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ - void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\ - HeapWord* obj_addr = mr.start(); \ - HeapWord* t = mr.end(); \ - while (obj_addr < t) { \ - assert(oop(obj_addr)->is_oop(), "Should be an oop"); \ - obj_addr += oop(obj_addr)->oop_iterate(blk); \ - } \ - } - - ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN) - -#undef ContigSpace_PAR_OOP_ITERATE_DEFN -#endif // INCLUDE_ALL_GCS - void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) { if (is_empty()) return; HeapWord* obj_addr = bottom(); HeapWord* t = top(); // Could call objects iterate, but this is easier. while (obj_addr < t) { - obj_addr += oop(obj_addr)->oop_iterate(blk); + obj_addr += oop(obj_addr)->oop_iterate(blk); } } @@ -617,32 +600,27 @@ return NULL; // all done } -#define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ - \ -void ContiguousSpace:: \ -oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ - HeapWord* t; \ - HeapWord* p = saved_mark_word(); \ - assert(p != NULL, "expected saved mark"); \ - \ - const intx interval = PrefetchScanIntervalInBytes; \ - do { \ - t = top(); \ - while (p < t) { \ - Prefetch::write(p, interval); \ - debug_only(HeapWord* prev = p); \ - oop m = oop(p); \ - p += m->oop_iterate(blk); \ - } \ - } while (t < top()); \ - \ - set_saved_mark_word(p); \ +template +void ContiguousSpace:: +cspace_oop_since_save_marks_iterate(OopClosureType* blk) { + HeapWord* t; + HeapWord* p = saved_mark_word(); + assert(p != NULL, "expected saved mark"); + + const intx interval = PrefetchScanIntervalInBytes; + do { + t = top(); + while (p < t) { + Prefetch::write(p, interval); + debug_only(HeapWord* prev = p); + oop m = oop(p); + p += m->oop_iterate(blk); + } + } while (t < top()); + + set_saved_mark_word(p); } -ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN) - -#undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN - // Very general, slow implementation. HeapWord* ContiguousSpace::block_start_const(const void* p) const { assert(MemRegion(bottom(), end()).contains(p), diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp --- a/src/share/vm/memory/space.hpp +++ b/src/share/vm/memory/space.hpp @@ -558,11 +558,8 @@ #if INCLUDE_ALL_GCS // In support of parallel oop_iterate. - #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - void par_oop_iterate(MemRegion mr, OopClosureType* blk); - - ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL) - #undef ContigSpace_PAR_OOP_ITERATE_DECL + template + void par_oop_iterate(MemRegion mr, OopClosureType* blk); #endif // INCLUDE_ALL_GCS // Compaction support @@ -585,11 +582,8 @@ // *are* included in the iteration. // Updates _saved_mark_word to point to just after the last object // iterated over. -#define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ - void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk); - - ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL) -#undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL + template + void cspace_oop_since_save_marks_iterate(OopClosureType* blk); // Same as object_iterate, but starting from "mark", which is required // to denote the start of an object. Objects allocated by diff --git a/src/share/vm/memory/space.inline-disp.hpp b/src/share/vm/memory/space.inline-disp.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/memory/space.inline-disp.hpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_SPACE_INLINE_DISP_HPP +#define SHARE_VM_MEMORY_SPACE_INLINE_DISP_HPP + +#include "memory/space.hpp" +#include "oops/oop.inline.hpp" + +template +void ContiguousSpace:: +cspace_oop_since_save_marks_iterate(OopClosureType* blk) { + HeapWord* t; + HeapWord* p = saved_mark_word(); + assert(p != NULL, "expected saved mark"); + + const intx interval = PrefetchScanIntervalInBytes; + do { + t = top(); + while (p < t) { + Prefetch::write(p, interval); + debug_only(HeapWord* prev = p); + oop m = oop(p); + p += m->oop_iterate(blk); + } + } while (t < top()); + + set_saved_mark_word(p); +} + +#endif // SHARE_VM_MEMORY_SPACE_INLINE_DISP_HPP diff --git a/src/share/vm/memory/space.inline.hpp b/src/share/vm/memory/space.inline.hpp --- a/src/share/vm/memory/space.inline.hpp +++ b/src/share/vm/memory/space.inline.hpp @@ -26,6 +26,7 @@ #define SHARE_VM_MEMORY_SPACE_INLINE_HPP #include "gc_interface/collectedHeap.hpp" +#include "oops/oop.inline.hpp" #include "memory/space.hpp" #include "memory/universe.hpp" #include "runtime/prefetch.inline.hpp" @@ -334,4 +335,16 @@ return _offsets.block_start(p); } +#ifndef SERIALGC +template +void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) { + HeapWord* obj_addr = mr.start(); + HeapWord* t = mr.end(); + while (obj_addr < t) { + assert(oop(obj_addr)->is_oop(), "Should be an oop"); + obj_addr += oop(obj_addr)->oop_iterate(blk); + } +} +#endif // SERIALGC + #endif // SHARE_VM_MEMORY_SPACE_INLINE_HPP diff --git a/src/share/vm/memory/specialized_oop_closures.hpp b/src/share/vm/memory/specialized_oop_closures.hpp --- a/src/share/vm/memory/specialized_oop_closures.hpp +++ b/src/share/vm/memory/specialized_oop_closures.hpp @@ -30,171 +30,9 @@ #include "gc_implementation/g1/g1_specialized_oop_closures.hpp" #endif // INCLUDE_ALL_GCS -// The following OopClosure types get specialized versions of -// "oop_oop_iterate" that invoke the closures' do_oop methods -// non-virtually, using a mechanism defined in this file. Extend these -// macros in the obvious way to add specializations for new closures. - -// Forward declarations. -class OopClosure; -class OopsInGenClosure; -// DefNew -class ScanClosure; -class FastScanClosure; -class FilteringClosure; -// ParNew -class ParScanWithBarrierClosure; -class ParScanWithoutBarrierClosure; -// CMS -class MarkRefsIntoAndScanClosure; -class Par_MarkRefsIntoAndScanClosure; -class PushAndMarkClosure; -class Par_PushAndMarkClosure; -class PushOrMarkClosure; -class Par_PushOrMarkClosure; -class CMSKeepAliveClosure; -class CMSInnerParMarkAndPushClosure; -// Misc -class NoHeaderExtendedOopClosure; - -// This macro applies an argument macro to all OopClosures for which we -// want specialized bodies of "oop_oop_iterate". The arguments to "f" are: -// "f(closureType, non_virtual)" -// where "closureType" is the name of the particular subclass of OopClosure, -// and "non_virtual" will be the string "_nv" if the closure type should -// have its "do_oop" method invoked non-virtually, or else the -// string "_v". ("OopClosure" itself will be the only class in the latter -// category.) - -// This is split into several because of a Visual C++ 6.0 compiler bug -// where very long macros cause the compiler to crash - -// Some other heap might define further specialized closures. -#ifndef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES -#define FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) \ - /* None */ -#endif - -#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(f) \ - f(ScanClosure,_nv) \ - f(FastScanClosure,_nv) \ - f(FilteringClosure,_nv) - -#if INCLUDE_ALL_GCS -#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f) \ - f(ParScanWithBarrierClosure,_nv) \ - f(ParScanWithoutBarrierClosure,_nv) -#else // INCLUDE_ALL_GCS -#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f) -#endif // INCLUDE_ALL_GCS - -#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(f) \ - f(NoHeaderExtendedOopClosure,_nv) \ - SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(f) \ - SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(f) - -#if INCLUDE_ALL_GCS -#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f) \ - f(MarkRefsIntoAndScanClosure,_nv) \ - f(Par_MarkRefsIntoAndScanClosure,_nv) \ - f(PushAndMarkClosure,_nv) \ - f(Par_PushAndMarkClosure,_nv) \ - f(PushOrMarkClosure,_nv) \ - f(Par_PushOrMarkClosure,_nv) \ - f(CMSKeepAliveClosure,_nv) \ - f(CMSInnerParMarkAndPushClosure,_nv) \ - FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) -#else // INCLUDE_ALL_GCS -#define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f) -#endif // INCLUDE_ALL_GCS - - -// We separate these out, because sometime the general one has -// a different definition from the specialized ones, and sometimes it -// doesn't. - -#define ALL_OOP_OOP_ITERATE_CLOSURES_1(f) \ - f(ExtendedOopClosure,_v) \ - SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(f) - -#define ALL_OOP_OOP_ITERATE_CLOSURES_2(f) \ - SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(f) - -#if INCLUDE_ALL_GCS -// This macro applies an argument macro to all OopClosures for which we -// want specialized bodies of a family of methods related to -// "par_oop_iterate". The arguments to f are the same as above. -// The "root_class" is the most general class to define; this may be -// "OopClosure" in some applications and "OopsInGenClosure" in others. - -#define SPECIALIZED_PAR_OOP_ITERATE_CLOSURES(f) \ - f(MarkRefsIntoAndScanClosure,_nv) \ - f(PushAndMarkClosure,_nv) \ - f(Par_MarkRefsIntoAndScanClosure,_nv) \ - f(Par_PushAndMarkClosure,_nv) - -#define ALL_PAR_OOP_ITERATE_CLOSURES(f) \ - f(ExtendedOopClosure,_v) \ - SPECIALIZED_PAR_OOP_ITERATE_CLOSURES(f) -#endif // INCLUDE_ALL_GCS - -// This macro applies an argument macro to all OopClosures for which we -// want specialized bodies of a family of methods related to -// "oops_since_save_marks_do". The arguments to f are the same as above. -// The "root_class" is the most general class to define; this may be -// "OopClosure" in some applications and "OopsInGenClosure" in others. - - -// Some other heap might define further specialized closures. -#ifndef FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES -#define FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(f) \ - /* None */ -#endif - -#define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_S(f) \ - f(ScanClosure,_nv) \ - f(FastScanClosure,_nv) - -#if INCLUDE_ALL_GCS -#define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_P(f) \ - f(ParScanWithBarrierClosure,_nv) \ - f(ParScanWithoutBarrierClosure,_nv) \ - FURTHER_SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(f) -#else // INCLUDE_ALL_GCS -#define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_P(f) -#endif // INCLUDE_ALL_GCS - -#define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(f) \ - SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_S(f) \ - SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG_P(f) - -#define SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(f) \ - SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(f) - -// We separate these out, because sometime the general one has -// a different definition from the specialized ones, and sometimes it -// doesn't. -// NOTE: One of the valid criticisms of this -// specialize-oop_oop_iterate-for-specific-closures idiom is that it is -// easy to have a silent performance bug: if you fail to de-virtualize, -// things still work, just slower. The "SpecializationStats" mode is -// intended to at least make such a failure easy to detect. -// *Not* using the ALL_SINCE_SAVE_MARKS_CLOSURES(f) macro defined -// below means that *only* closures for which oop_oop_iterate specializations -// exist above may be applied to "oops_since_save_marks". That is, -// this form of the performance bug is caught statically. When you add -// a definition for the general type, this property goes away. -// Make sure you test with SpecializationStats to find such bugs -// when introducing a new closure where you don't want virtual dispatch. - -#define ALL_SINCE_SAVE_MARKS_CLOSURES(f) \ - f(OopsInGenClosure,_v) \ - SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(f) - // For keeping stats on effectiveness. #define ENABLE_SPECIALIZATION_STATS 0 - class SpecializationStats { public: enum Kind { diff --git a/src/share/vm/memory/universe.cpp b/src/share/vm/memory/universe.cpp --- a/src/share/vm/memory/universe.cpp +++ b/src/share/vm/memory/universe.cpp @@ -1497,5 +1497,4 @@ } return true; } - -#endif // ASSERT +#endif diff --git a/src/share/vm/oops/arrayKlass.cpp b/src/share/vm/oops/arrayKlass.cpp --- a/src/share/vm/oops/arrayKlass.cpp +++ b/src/share/vm/oops/arrayKlass.cpp @@ -70,7 +70,7 @@ return super()->uncached_lookup_method(name, signature, mode); } -ArrayKlass::ArrayKlass(Symbol* name) { +ArrayKlass::ArrayKlass(Symbol* name, jbyte dispatch_index) : Klass(dispatch_index) { set_name(name); set_super(Universe::is_bootstrapping() ? (Klass*)NULL : SystemDictionary::Object_klass()); diff --git a/src/share/vm/oops/arrayKlass.hpp b/src/share/vm/oops/arrayKlass.hpp --- a/src/share/vm/oops/arrayKlass.hpp +++ b/src/share/vm/oops/arrayKlass.hpp @@ -45,8 +45,8 @@ // Constructors // The constructor with the Symbol argument does the real array // initialization, the other is a dummy - ArrayKlass(Symbol* name); - ArrayKlass() { assert(DumpSharedSpaces || UseSharedSpaces, "only for cds"); } + ArrayKlass(Symbol* name, jbyte dispatch_index); + ArrayKlass(jbyte dispatch_index) : Klass(dispatch_index, true) { assert(DumpSharedSpaces || UseSharedSpaces, "only for cds"); } public: // Testing operation diff --git a/src/share/vm/oops/instanceClassLoaderKlass.cpp b/src/share/vm/oops/instanceClassLoaderKlass.cpp --- a/src/share/vm/oops/instanceClassLoaderKlass.cpp +++ b/src/share/vm/oops/instanceClassLoaderKlass.cpp @@ -34,6 +34,7 @@ #include "oops/instanceClassLoaderKlass.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/instanceOop.hpp" +#include "oops/klass.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" #include "runtime/handles.inline.hpp" @@ -45,74 +46,8 @@ #include "oops/oop.pcgc.inline.hpp" #endif // INCLUDE_ALL_GCS -// Macro to define InstanceClassLoaderKlass::oop_oop_iterate for virtual/nonvirtual for -// all closures. Macros calling macros above for each oop size. -// Since ClassLoader objects have only a pointer to the loader_data, they are not -// compressed nor does the pointer move. - -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)\ - \ -int InstanceClassLoaderKlass:: \ -oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - /* Get size before changing pointers */ \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\ - int size = InstanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \ - \ - if_do_metadata_checked(closure, nv_suffix) { \ - ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj); \ - /* cld can be null if we have a non-registered class loader. */ \ - if (cld != NULL) { \ - closure->do_class_loader_data(cld); \ - } \ - } \ - \ - return size; \ -} - -#if INCLUDE_ALL_GCS -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceClassLoaderKlass:: \ -oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ - /* Get size before changing pointers */ \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\ - int size = InstanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \ - return size; \ -} -#endif // INCLUDE_ALL_GCS - - -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int InstanceClassLoaderKlass:: \ -oop_oop_iterate##nv_suffix##_m(oop obj, \ - OopClosureType* closure, \ - MemRegion mr) { \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\ - \ - int size = InstanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \ - \ - if_do_metadata_checked(closure, nv_suffix) { \ - if (mr.contains(obj)) { \ - ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj); \ - /* cld can be null if we have a non-registered class loader. */ \ - if (cld != NULL) { \ - closure->do_class_loader_data(cld); \ - } \ - } \ - } \ - \ - return size; \ -} - -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN) -#if INCLUDE_ALL_GCS -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -#endif // INCLUDE_ALL_GCS -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m) +InstanceClassLoaderKlass::InstanceClassLoaderKlass(int vtable_len, int itable_len, int static_field_size, int nonstatic_oop_map_size, ReferenceType rt, AccessFlags access_flags, bool is_anonymous) + : InstanceKlass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt, access_flags, is_anonymous, _instance_class_loader) {} void InstanceClassLoaderKlass::oop_follow_contents(oop obj) { InstanceKlass::oop_follow_contents(obj); diff --git a/src/share/vm/oops/instanceClassLoaderKlass.hpp b/src/share/vm/oops/instanceClassLoaderKlass.hpp --- a/src/share/vm/oops/instanceClassLoaderKlass.hpp +++ b/src/share/vm/oops/instanceClassLoaderKlass.hpp @@ -39,35 +39,28 @@ friend class InstanceKlass; // Constructor - InstanceClassLoaderKlass(int vtable_len, int itable_len, int static_field_size, int nonstatic_oop_map_size, ReferenceType rt, AccessFlags access_flags, bool is_anonymous) - : InstanceKlass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt, access_flags, is_anonymous) {} + InstanceClassLoaderKlass(int vtable_len, int itable_len, int static_field_size, int nonstatic_oop_map_size, ReferenceType rt, AccessFlags access_flags, bool is_anonymous); public: virtual bool oop_is_instanceClassLoader() const { return true; } InstanceClassLoaderKlass() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); } - // Iterators - int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { - return oop_oop_iterate_v(obj, blk); - } - int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) { - return oop_oop_iterate_v_m(obj, blk, mr); + static InstanceClassLoaderKlass* cast(Klass* k) { + assert(k->is_klass(), "must be"); + assert(k->oop_is_instanceClassLoader(), "cast to instanceClassLoaderKlass"); + return (InstanceClassLoaderKlass*) k; } -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ - int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL) - + template + int oop_oop_iterate(oop obj, OopClosureType* blk); + + template + int oop_oop_iterate_m(oop obj, OopClosureType* blk, MemRegion mr); + #if INCLUDE_ALL_GCS -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + template + int oop_oop_iterate_backwards(oop obj, OopClosureType* blk); #endif // INCLUDE_ALL_GCS // Garbage collection diff --git a/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp b/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP +#define SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP + +#include "classfile/javaClasses.hpp" +#include "oops/instanceClassLoaderKlass.hpp" +#include "oops/oop.inline2.hpp" + +// Macro to define instanceClassLoaderKlass::oop_oop_iterate for virtual/nonvirtual for +// all closures. Macros calling macros above for each oop size. +// Since ClassLoader objects have only a pointer to the loader_data, they are not +// compressed nor does the pointer move. + +template +int InstanceClassLoaderKlass:: +oop_oop_iterate(oop obj, OopClosureType* closure) { + /* Get size before changing pointers */ + int size = InstanceKlass::oop_oop_iterate(obj, closure); + + if (Devirtualizer::do_metadata(closure)) { + ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj); + /* cld can be null if we have a non-registered class loader. */ + if (cld != NULL) { + closure->do_class_loader_data(cld); + } + } + + return size; +} + +#ifndef SERIALGC +template +int InstanceClassLoaderKlass:: +oop_oop_iterate_backwards(oop obj, OopClosureType* closure) { + /* Get size before changing pointers */ + int size = InstanceKlass::oop_oop_iterate_backwards(obj, closure); + return size; +} +#endif // !SERIALGC + + +template +int InstanceClassLoaderKlass:: +oop_oop_iterate_m(oop obj, + OopClosureType* closure, + MemRegion mr) { + + int size = InstanceKlass::oop_oop_iterate_m(obj, closure, mr); + + if (Devirtualizer::do_metadata(closure)) { + if (mr.contains(obj)) { + ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj); + /* cld can be null if we have a non-registered class loader. */ + if (cld != NULL) { + closure->do_class_loader_data(cld); + } + } + } + + return size; +} + +#endif // SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp +++ b/src/share/vm/oops/instanceKlass.cpp @@ -40,7 +40,7 @@ #include "memory/oopFactory.hpp" #include "oops/fieldStreams.hpp" #include "oops/instanceClassLoaderKlass.hpp" -#include "oops/instanceKlass.hpp" +#include "oops/instanceKlass.inline.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/instanceOop.hpp" #include "oops/klass.inline.hpp" @@ -212,7 +212,9 @@ int nonstatic_oop_map_size, ReferenceType rt, AccessFlags access_flags, - bool is_anonymous) { + bool is_anonymous, + jbyte dispatch_index) + : Klass(dispatch_index) { No_Safepoint_Verifier no_safepoint; // until k becomes parsable int iksize = InstanceKlass::size(vtable_len, itable_len, nonstatic_oop_map_size, @@ -1964,173 +1966,6 @@ // Garbage collection -#ifdef ASSERT -template void assert_is_in(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in(o), "should be in heap"); - } -} -template void assert_is_in_closed_subset(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in_closed_subset(o), - err_msg("should be in closed *p " INTPTR_FORMAT " " INTPTR_FORMAT, (address)p, (address)o)); - } -} -template void assert_is_in_reserved(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); - } -} -template void assert_nothing(T *p) {} - -#else -template void assert_is_in(T *p) {} -template void assert_is_in_closed_subset(T *p) {} -template void assert_is_in_reserved(T *p) {} -template void assert_nothing(T *p) {} -#endif // ASSERT - -// -// Macros that iterate over areas of oops which are specialized on type of -// oop pointer either narrow or wide, depending on UseCompressedOops -// -// Parameters are: -// T - type of oop to point to (either oop or narrowOop) -// start_p - starting pointer for region to iterate over -// count - number of oops or narrowOops to iterate over -// do_oop - action to perform on each oop (it's arbitrary C code which -// makes it more efficient to put in a macro rather than making -// it a template function) -// assert_fn - assert function which is template function because performance -// doesn't matter when enabled. -#define InstanceKlass_SPECIALIZED_OOP_ITERATE( \ - T, start_p, count, do_oop, \ - assert_fn) \ -{ \ - T* p = (T*)(start_p); \ - T* const end = p + (count); \ - while (p < end) { \ - (assert_fn)(p); \ - do_oop; \ - ++p; \ - } \ -} - -#define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \ - T, start_p, count, do_oop, \ - assert_fn) \ -{ \ - T* const start = (T*)(start_p); \ - T* p = start + (count); \ - while (start < p) { \ - --p; \ - (assert_fn)(p); \ - do_oop; \ - } \ -} - -#define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \ - T, start_p, count, low, high, \ - do_oop, assert_fn) \ -{ \ - T* const l = (T*)(low); \ - T* const h = (T*)(high); \ - assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \ - mask_bits((intptr_t)h, sizeof(T)-1) == 0, \ - "bounded region must be properly aligned"); \ - T* p = (T*)(start_p); \ - T* end = p + (count); \ - if (p < l) p = l; \ - if (end > h) end = h; \ - while (p < end) { \ - (assert_fn)(p); \ - do_oop; \ - ++p; \ - } \ -} - - -// The following macros call specialized macros, passing either oop or -// narrowOop as the specialization type. These test the UseCompressedOops -// flag. -#define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \ -{ \ - /* Compute oopmap block range. The common case \ - is nonstatic_oop_map_size == 1. */ \ - OopMapBlock* map = start_of_nonstatic_oop_maps(); \ - OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ - if (UseCompressedOops) { \ - while (map < end_map) { \ - InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ - obj->obj_field_addr(map->offset()), map->count(), \ - do_oop, assert_fn) \ - ++map; \ - } \ - } else { \ - while (map < end_map) { \ - InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ - obj->obj_field_addr(map->offset()), map->count(), \ - do_oop, assert_fn) \ - ++map; \ - } \ - } \ -} - -#define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \ -{ \ - OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \ - OopMapBlock* map = start_map + nonstatic_oop_map_count(); \ - if (UseCompressedOops) { \ - while (start_map < map) { \ - --map; \ - InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \ - obj->obj_field_addr(map->offset()), map->count(), \ - do_oop, assert_fn) \ - } \ - } else { \ - while (start_map < map) { \ - --map; \ - InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \ - obj->obj_field_addr(map->offset()), map->count(), \ - do_oop, assert_fn) \ - } \ - } \ -} - -#define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \ - assert_fn) \ -{ \ - /* Compute oopmap block range. The common case is \ - nonstatic_oop_map_size == 1, so we accept the \ - usually non-existent extra overhead of examining \ - all the maps. */ \ - OopMapBlock* map = start_of_nonstatic_oop_maps(); \ - OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ - if (UseCompressedOops) { \ - while (map < end_map) { \ - InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ - obj->obj_field_addr(map->offset()), map->count(), \ - low, high, \ - do_oop, assert_fn) \ - ++map; \ - } \ - } else { \ - while (map < end_map) { \ - InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ - obj->obj_field_addr(map->offset()), map->count(), \ - low, high, \ - do_oop, assert_fn) \ - ++map; \ - } \ - } \ -} - void InstanceKlass::oop_follow_contents(oop obj) { assert(obj != NULL, "can't follow the content of NULL object"); MarkSweep::follow_klass(obj->klass()); @@ -2154,72 +1989,6 @@ } #endif // INCLUDE_ALL_GCS -// closure's do_metadata() method dictates whether the given closure should be -// applied to the klass ptr in the object header. - -#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\ - /* header */ \ - if_do_metadata_checked(closure, nv_suffix) { \ - closure->do_klass##nv_suffix(obj->klass()); \ - } \ - InstanceKlass_OOP_MAP_ITERATE( \ - obj, \ - SpecializationStats:: \ - record_do_oop_call##nv_suffix(SpecializationStats::ik); \ - (closure)->do_oop##nv_suffix(p), \ - assert_is_in_closed_subset) \ - return size_helper(); \ -} - -#if INCLUDE_ALL_GCS -#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \ - OopClosureType* closure) { \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \ - \ - assert_should_ignore_metadata(closure, nv_suffix); \ - \ - /* instance variables */ \ - InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ - obj, \ - SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\ - (closure)->do_oop##nv_suffix(p), \ - assert_is_in_closed_subset) \ - return size_helper(); \ -} -#endif // INCLUDE_ALL_GCS - -#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int InstanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \ - OopClosureType* closure, \ - MemRegion mr) { \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\ - if_do_metadata_checked(closure, nv_suffix) { \ - if (mr.contains(obj)) { \ - closure->do_klass##nv_suffix(obj->klass()); \ - } \ - } \ - InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \ - obj, mr.start(), mr.end(), \ - (closure)->do_oop##nv_suffix(p), \ - assert_is_in_closed_subset) \ - return size_helper(); \ -} - -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) -#if INCLUDE_ALL_GCS -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -#endif // INCLUDE_ALL_GCS - int InstanceKlass::oop_adjust_pointers(oop obj) { int size = size_helper(); InstanceKlass_OOP_MAP_ITERATE( \ diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp --- a/src/share/vm/oops/instanceKlass.hpp +++ b/src/share/vm/oops/instanceKlass.hpp @@ -119,7 +119,8 @@ int nonstatic_oop_map_size, ReferenceType rt, AccessFlags access_flags, - bool is_anonymous); + bool is_anonymous, + jbyte dispatch_index = _instance); public: static InstanceKlass* allocate_instance_klass( ClassLoaderData* loader_data, @@ -134,7 +135,7 @@ bool is_anonymous, TRAPS); - InstanceKlass() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); } + InstanceKlass(jbyte dispatch_index = _instance) : Klass(dispatch_index) { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); } // See "The Java Virtual Machine Specification" section 2.16.2-5 for a detailed description // of the class loading & initialization procedure, and the use of the states. @@ -953,29 +954,15 @@ // Naming const char* signature_name() const; - // Iterators - int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { - return oop_oop_iterate_v(obj, blk); - } + template + int oop_oop_iterate(oop obj, OopClosureType* blk); - int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) { - return oop_oop_iterate_v_m(obj, blk, mr); - } - -#define InstanceKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ - int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, \ - MemRegion mr); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DECL) + template + int oop_oop_iterate_m(oop obj, OopClosureType* blk, MemRegion mr); #if INCLUDE_ALL_GCS -#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + template + int oop_oop_iterate_backwards(oop obj, OopClosureType* blk); #endif // INCLUDE_ALL_GCS u2 idnum_allocated_count() const { return _idnum_allocated_count; } diff --git a/src/share/vm/oops/instanceKlass.inline.hpp b/src/share/vm/oops/instanceKlass.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/oops/instanceKlass.inline.hpp @@ -0,0 +1,214 @@ +/* + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP +#define SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP + +#include "oops/instanceKlass.hpp" +#include "oops/oop.inline2.hpp" + +// +// Macros that iterate over areas of oops which are specialized on type of +// oop pointer either narrow or wide, depending on UseCompressedOops +// +// Parameters are: +// T - type of oop to point to (either oop or narrowOop) +// start_p - starting pointer for region to iterate over +// count - number of oops or narrowOops to iterate over +// do_oop - action to perform on each oop (it's arbitrary C code which +// makes it more efficient to put in a macro rather than making +// it a template function) +// assert_fn - assert function which is template function because performance +// doesn't matter when enabled. +#define InstanceKlass_SPECIALIZED_OOP_ITERATE( \ + T, start_p, count, do_oop, \ + assert_fn) \ +{ \ + T* p = (T*)(start_p); \ + T* const end = p + (count); \ + while (p < end) { \ + (assert_fn)(p); \ + do_oop; \ + ++p; \ + } \ +} + +// The following macros call specialized macros, passing either oop or +// narrowOop as the specialization type. These test the UseCompressedOops +// flag. +#define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \ +{ \ + /* Compute oopmap block range. The common case \ + is nonstatic_oop_map_size == 1. */ \ + OopMapBlock* map = start_of_nonstatic_oop_maps(); \ + OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ + if (UseCompressedOops) { \ + while (map < end_map) { \ + InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ + obj->obj_field_addr(map->offset()), map->count(), \ + do_oop, assert_fn) \ + ++map; \ + } \ + } else { \ + while (map < end_map) { \ + InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ + obj->obj_field_addr(map->offset()), map->count(), \ + do_oop, assert_fn) \ + ++map; \ + } \ + } \ +} + +// closure's do_metadata() methodmethod dictates whether the given closure should be +// applied to the klass ptr in the object header. + +template +int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { + /* header */ + if (Devirtualizer::do_metadata(closure)) { + Devirtualizer::do_klass(closure, obj->klass()); + } + InstanceKlass_OOP_MAP_ITERATE( + obj, + (Devirtualizer::do_oop(closure, p)), + assert_is_in_closed_subset) + return size_helper(); +} + +#define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \ + T, start_p, count, do_oop, \ + assert_fn) \ +{ \ + T* const start = (T*)(start_p); \ + T* p = start + (count); \ + while (start < p) { \ + --p; \ + (assert_fn)(p); \ + do_oop; \ + } \ +} + +#define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \ +{ \ + OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \ + OopMapBlock* map = start_map + nonstatic_oop_map_count(); \ + if (UseCompressedOops) { \ + while (start_map < map) { \ + --map; \ + InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \ + obj->obj_field_addr(map->offset()), map->count(), \ + do_oop, assert_fn) \ + } \ + } else { \ + while (start_map < map) { \ + --map; \ + InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \ + obj->obj_field_addr(map->offset()), map->count(), \ + do_oop, assert_fn) \ + } \ + } \ +} + +#ifndef SERIALGC +template +int InstanceKlass::oop_oop_iterate_backwards(oop obj, + OopClosureType* closure) { + /* header */ + if (Devirtualizer::do_metadata(closure)) { + Devirtualizer::do_klass(closure, obj->klass()); + } + /* instance variables */ + InstanceKlass_OOP_MAP_REVERSE_ITERATE( + obj, + (Devirtualizer::do_oop(closure, p)), + assert_is_in_closed_subset) + return size_helper(); +} +#endif // !SERIALGC + +#define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \ + T, start_p, count, low, high, \ + do_oop, assert_fn) \ +{ \ + T* const l = (T*)(low); \ + T* const h = (T*)(high); \ + assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \ + mask_bits((intptr_t)h, sizeof(T)-1) == 0, \ + "bounded region must be properly aligned"); \ + T* p = (T*)(start_p); \ + T* end = p + (count); \ + if (p < l) p = l; \ + if (end > h) end = h; \ + while (p < end) { \ + (assert_fn)(p); \ + do_oop; \ + ++p; \ + } \ +} + +#define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \ + assert_fn) \ +{ \ + /* Compute oopmap block range. The common case is \ + nonstatic_oop_map_size == 1, so we accept the \ + usually non-existent extra overhead of examining \ + all the maps. */ \ + OopMapBlock* map = start_of_nonstatic_oop_maps(); \ + OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ + if (UseCompressedOops) { \ + while (map < end_map) { \ + InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ + obj->obj_field_addr(map->offset()), map->count(), \ + low, high, \ + do_oop, assert_fn) \ + ++map; \ + } \ + } else { \ + while (map < end_map) { \ + InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ + obj->obj_field_addr(map->offset()), map->count(), \ + low, high, \ + do_oop, assert_fn) \ + ++map; \ + } \ + } \ +} + +template +int InstanceKlass::oop_oop_iterate_m(oop obj, + OopClosureType* closure, + MemRegion mr) { + if (Devirtualizer::do_metadata(closure)) { + if (mr.contains(obj)) { + Devirtualizer::do_klass(closure, obj->klass()); + } + } + InstanceKlass_BOUNDED_OOP_MAP_ITERATE( + obj, mr.start(), mr.end(), + (Devirtualizer::do_oop(closure, p)), + assert_is_in_closed_subset) + return size_helper(); +} + +#endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP diff --git a/src/share/vm/oops/instanceMirrorKlass.cpp b/src/share/vm/oops/instanceMirrorKlass.cpp --- a/src/share/vm/oops/instanceMirrorKlass.cpp +++ b/src/share/vm/oops/instanceMirrorKlass.cpp @@ -31,7 +31,7 @@ #include "memory/iterator.inline.hpp" #include "memory/oopFactory.hpp" #include "oops/instanceKlass.hpp" -#include "oops/instanceMirrorKlass.hpp" +#include "oops/instanceMirrorKlass.inline.hpp" #include "oops/instanceOop.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" @@ -51,104 +51,9 @@ int InstanceMirrorKlass::_offset_of_static_fields = 0; -#ifdef ASSERT -template void assert_is_in(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in(o), "should be in heap"); - } +InstanceMirrorKlass::InstanceMirrorKlass(int vtable_len, int itable_len, int static_field_size, int nonstatic_oop_map_size, ReferenceType rt, AccessFlags access_flags, bool is_anonymous) + : InstanceKlass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt, access_flags, is_anonymous, _instance_mirror) { } -template void assert_is_in_closed_subset(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in_closed_subset(o), "should be in closed"); - } -} -template void assert_is_in_reserved(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); - } -} -template void assert_nothing(T *p) {} - -#else -template void assert_is_in(T *p) {} -template void assert_is_in_closed_subset(T *p) {} -template void assert_is_in_reserved(T *p) {} -template void assert_nothing(T *p) {} -#endif // ASSERT - -#define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE( \ - T, start_p, count, do_oop, \ - assert_fn) \ -{ \ - T* p = (T*)(start_p); \ - T* const end = p + (count); \ - while (p < end) { \ - (assert_fn)(p); \ - do_oop; \ - ++p; \ - } \ -} - -#define InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \ - T, start_p, count, low, high, \ - do_oop, assert_fn) \ -{ \ - T* const l = (T*)(low); \ - T* const h = (T*)(high); \ - assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \ - mask_bits((intptr_t)h, sizeof(T)-1) == 0, \ - "bounded region must be properly aligned"); \ - T* p = (T*)(start_p); \ - T* end = p + (count); \ - if (p < l) p = l; \ - if (end > h) end = h; \ - while (p < end) { \ - (assert_fn)(p); \ - do_oop; \ - ++p; \ - } \ -} - - -#define InstanceMirrorKlass_OOP_ITERATE(start_p, count, \ - do_oop, assert_fn) \ -{ \ - if (UseCompressedOops) { \ - InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ - start_p, count, \ - do_oop, assert_fn) \ - } else { \ - InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(oop, \ - start_p, count, \ - do_oop, assert_fn) \ - } \ -} - -// The following macros call specialized macros, passing either oop or -// narrowOop as the specialization type. These test the UseCompressedOops -// flag. -#define InstanceMirrorKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \ - do_oop, assert_fn) \ -{ \ - if (UseCompressedOops) { \ - InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ - start_p, count, \ - low, high, \ - do_oop, assert_fn) \ - } else { \ - InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ - start_p, count, \ - low, high, \ - do_oop, assert_fn) \ - } \ -} - void InstanceMirrorKlass::oop_follow_contents(oop obj) { InstanceKlass::oop_follow_contents(obj); @@ -226,104 +131,6 @@ return size; } -#define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(T, nv_suffix) \ - InstanceMirrorKlass_OOP_ITERATE( \ - start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ - (closure)->do_oop##nv_suffix(p), \ - assert_is_in_closed_subset) \ - return oop_size(obj); \ - -#define InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(T, nv_suffix, mr) \ - InstanceMirrorKlass_BOUNDED_OOP_ITERATE( \ - start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ - mr.start(), mr.end(), \ - (closure)->do_oop##nv_suffix(p), \ - assert_is_in_closed_subset) \ - return oop_size(obj); \ - - -// Macro to define InstanceMirrorKlass::oop_oop_iterate for virtual/nonvirtual for -// all closures. Macros calling macros above for each oop size. - -#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceMirrorKlass:: \ -oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - /* Get size before changing pointers */ \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \ - \ - InstanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \ - \ - if_do_metadata_checked(closure, nv_suffix) { \ - Klass* klass = java_lang_Class::as_Klass(obj); \ - /* We'll get NULL for primitive mirrors. */ \ - if (klass != NULL) { \ - closure->do_klass##nv_suffix(klass); \ - } \ - } \ - \ - if (UseCompressedOops) { \ - InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv_suffix); \ - } else { \ - InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix); \ - } \ -} - -#if INCLUDE_ALL_GCS -#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceMirrorKlass:: \ -oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ - /* Get size before changing pointers */ \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \ - \ - InstanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \ - \ - if (UseCompressedOops) { \ - InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv_suffix); \ - } else { \ - InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix); \ - } \ -} -#endif // INCLUDE_ALL_GCS - - -#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int InstanceMirrorKlass:: \ -oop_oop_iterate##nv_suffix##_m(oop obj, \ - OopClosureType* closure, \ - MemRegion mr) { \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk); \ - \ - InstanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \ - \ - if_do_metadata_checked(closure, nv_suffix) { \ - if (mr.contains(obj)) { \ - Klass* klass = java_lang_Class::as_Klass(obj); \ - /* We'll get NULL for primitive mirrors. */ \ - if (klass != NULL) { \ - closure->do_klass##nv_suffix(klass); \ - } \ - } \ - } \ - \ - if (UseCompressedOops) { \ - InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr); \ - } else { \ - InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, mr); \ - } \ -} - -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN) -#if INCLUDE_ALL_GCS -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -#endif // INCLUDE_ALL_GCS -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m) - #if INCLUDE_ALL_GCS void InstanceMirrorKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { // Note that we don't have to follow the mirror -> klass pointer, since all diff --git a/src/share/vm/oops/instanceMirrorKlass.hpp b/src/share/vm/oops/instanceMirrorKlass.hpp --- a/src/share/vm/oops/instanceMirrorKlass.hpp +++ b/src/share/vm/oops/instanceMirrorKlass.hpp @@ -46,8 +46,7 @@ static int _offset_of_static_fields; // Constructor - InstanceMirrorKlass(int vtable_len, int itable_len, int static_field_size, int nonstatic_oop_map_size, ReferenceType rt, AccessFlags access_flags, bool is_anonymous) - : InstanceKlass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt, access_flags, is_anonymous) {} + InstanceMirrorKlass(int vtable_len, int itable_len, int static_field_size, int nonstatic_oop_map_size, ReferenceType rt, AccessFlags access_flags, bool is_anonymous); public: InstanceMirrorKlass() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); } @@ -94,26 +93,15 @@ // Parallel Scavenge and Parallel Old PARALLEL_GC_DECLS - int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { - return oop_oop_iterate_v(obj, blk); - } - int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) { - return oop_oop_iterate_v_m(obj, blk, mr); - } + template + int oop_oop_iterate(oop obj, OopClosureType* blk); -#define InstanceMirrorKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ - int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DECL) + template + int oop_oop_iterate_m(oop obj, OopClosureType* blk, MemRegion mr); #if INCLUDE_ALL_GCS -#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + template + int oop_oop_iterate_backwards(oop obj, OopClosureType* blk); #endif // INCLUDE_ALL_GCS }; diff --git a/src/share/vm/oops/instanceMirrorKlass.inline.hpp b/src/share/vm/oops/instanceMirrorKlass.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/oops/instanceMirrorKlass.inline.hpp @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_INSTANCEMIRRORKLASS_INLINE_HPP +#define SHARE_VM_OOPS_INSTANCEMIRRORKLASS_INLINE_HPP + +#include "oops/instanceMirrorKlass.hpp" +#include "oops/oop.inline2.hpp" + +#define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE( \ + T, start_p, count, do_oop, \ + assert_fn) \ +{ \ + T* p = (T*)(start_p); \ + T* const end = p + (count); \ + while (p < end) { \ + (assert_fn)(p); \ + do_oop; \ + ++p; \ + } \ +} + +#define InstanceMirrorKlass_OOP_ITERATE(start_p, count, \ + do_oop, assert_fn) \ +{ \ + if (UseCompressedOops) { \ + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ + start_p, count, \ + do_oop, assert_fn) \ + } else { \ + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(oop, \ + start_p, count, \ + do_oop, assert_fn) \ + } \ +} + +#define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(T, nv) \ + InstanceMirrorKlass_OOP_ITERATE( \ + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ + (Devirtualizer::do_oop(closure, p)), \ + assert_is_in_closed_subset) \ + return oop_size(obj); \ + +// Macro to define instanceMirrorKlass::oop_oop_iterate for virtual/nonvirtual for +// all closures. Macros calling macros above for each oop size. +template +int InstanceMirrorKlass:: +oop_oop_iterate(oop obj, OopClosureType* closure) { + /* Get size before changing pointers */ + + InstanceKlass::oop_oop_iterate(obj, closure); + + + if (Devirtualizer::do_metadata(closure)) { + Klass* klass = java_lang_Class::as_Klass(obj); + /* We'll get NULL for array mirrors. */ + if (klass != NULL) { + Devirtualizer::do_klass(closure, klass); + } + } + + if (UseCompressedOops) { + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv); + } else { + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv); + } +} + +#ifndef SERIALGC +template +int InstanceMirrorKlass:: +oop_oop_iterate_backwards(oop obj, OopClosureType* closure) { + /* Get size before changing pointers */ + + InstanceKlass::oop_oop_iterate_backwards(obj, closure); + + if (UseCompressedOops) { + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv); + } else { + InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv); + } +} +#endif // !SERIALGC + +#define InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \ + T, start_p, count, low, high, \ + do_oop, assert_fn) \ +{ \ + T* const l = (T*)(low); \ + T* const h = (T*)(high); \ + assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \ + mask_bits((intptr_t)h, sizeof(T)-1) == 0, \ + "bounded region must be properly aligned"); \ + T* p = (T*)(start_p); \ + T* end = p + (count); \ + if (p < l) p = l; \ + if (end > h) end = h; \ + while (p < end) { \ + (assert_fn)(p); \ + do_oop; \ + ++p; \ + } \ +} + +// The following macros call specialized macros, passing either oop or +// narrowOop as the specialization type. These test the UseCompressedOops +// flag. +#define InstanceMirrorKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \ + do_oop, assert_fn) \ +{ \ + if (UseCompressedOops) { \ + InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ + start_p, count, \ + low, high, \ + do_oop, assert_fn) \ + } else { \ + InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ + start_p, count, \ + low, high, \ + do_oop, assert_fn) \ + } \ +} + +#define InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(T, mr) \ + InstanceMirrorKlass_BOUNDED_OOP_ITERATE( \ + start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ + mr.start(), mr.end(), \ + (closure)->do_oop(p), \ + assert_is_in_closed_subset) \ + return oop_size(obj); \ + +template +int InstanceMirrorKlass:: +oop_oop_iterate_m(oop obj, + OopClosureType* closure, + MemRegion mr) { + + InstanceKlass::oop_oop_iterate_m(obj, closure, mr); + + if (Devirtualizer::do_metadata(closure)) { + if (mr.contains(obj)) { + Klass* klass = java_lang_Class::as_Klass(obj); + /* We'll get NULL for array mirrors. */ + if (klass != NULL) { + Devirtualizer::do_klass(closure, klass); + } + } + } + + if (UseCompressedOops) { + InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(narrowOop, mr); + } else { + InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(oop, mr); + } +} + +#endif // SHARE_VM_OOPS_INSTANCEMIRRORKLASS_INLINE_HPP diff --git a/src/share/vm/oops/instanceRefKlass.cpp b/src/share/vm/oops/instanceRefKlass.cpp --- a/src/share/vm/oops/instanceRefKlass.cpp +++ b/src/share/vm/oops/instanceRefKlass.cpp @@ -47,6 +47,9 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC +InstanceRefKlass::InstanceRefKlass(int vtable_len, int itable_len, int static_field_size, int nonstatic_oop_map_size, ReferenceType rt, AccessFlags access_flags, bool is_anonymous) + : InstanceKlass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt, access_flags, is_anonymous, _instance_ref) {} + template void specialized_oop_follow_contents(InstanceRefKlass* ref, oop obj) { T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); @@ -244,126 +247,6 @@ return size; } -#define InstanceRefKlass_SPECIALIZED_OOP_ITERATE(T, nv_suffix, contains) \ - T* disc_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); \ - if (closure->apply_to_weak_ref_discovered_field()) { \ - closure->do_oop##nv_suffix(disc_addr); \ - } \ - \ - T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); \ - T heap_oop = oopDesc::load_heap_oop(referent_addr); \ - ReferenceProcessor* rp = closure->_ref_processor; \ - if (!oopDesc::is_null(heap_oop)) { \ - oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); \ - if (!referent->is_gc_marked() && (rp != NULL) && \ - rp->discover_reference(obj, reference_type())) { \ - return size; \ - } else if (contains(referent_addr)) { \ - /* treat referent as normal oop */ \ - SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\ - closure->do_oop##nv_suffix(referent_addr); \ - } \ - } \ - T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); \ - if (ReferenceProcessor::pending_list_uses_discovered_field()) { \ - T next_oop = oopDesc::load_heap_oop(next_addr); \ - /* Treat discovered as normal oop, if ref is not "active" (next non-NULL) */\ - if (!oopDesc::is_null(next_oop) && contains(disc_addr)) { \ - /* i.e. ref is not "active" */ \ - debug_only( \ - if(TraceReferenceGC && PrintGCDetails) { \ - gclog_or_tty->print_cr(" Process discovered as normal " \ - INTPTR_FORMAT, disc_addr); \ - } \ - ) \ - SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\ - closure->do_oop##nv_suffix(disc_addr); \ - } \ - } else { \ - /* In the case of older JDKs which do not use the discovered field for */ \ - /* the pending list, an inactive ref (next != NULL) must always have a */ \ - /* NULL discovered field. */ \ - debug_only( \ - T next_oop = oopDesc::load_heap_oop(next_addr); \ - T disc_oop = oopDesc::load_heap_oop(disc_addr); \ - assert(oopDesc::is_null(next_oop) || oopDesc::is_null(disc_oop), \ - err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL" \ - "discovered field", (oopDesc*)obj)); \ - ) \ - } \ - /* treat next as normal oop */ \ - if (contains(next_addr)) { \ - SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk); \ - closure->do_oop##nv_suffix(next_addr); \ - } \ - return size; \ - - -template bool contains(T *t) { return true; } - -// Macro to define InstanceRefKlass::oop_oop_iterate for virtual/nonvirtual for -// all closures. Macros calling macros above for each oop size. - -#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceRefKlass:: \ -oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - /* Get size before changing pointers */ \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\ - \ - int size = InstanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \ - \ - if (UseCompressedOops) { \ - InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, contains); \ - } else { \ - InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, contains); \ - } \ -} - -#if INCLUDE_ALL_GCS -#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceRefKlass:: \ -oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ - /* Get size before changing pointers */ \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\ - \ - int size = InstanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \ - \ - if (UseCompressedOops) { \ - InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, contains); \ - } else { \ - InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, contains); \ - } \ -} -#endif // INCLUDE_ALL_GCS - - -#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int InstanceRefKlass:: \ -oop_oop_iterate##nv_suffix##_m(oop obj, \ - OopClosureType* closure, \ - MemRegion mr) { \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);\ - \ - int size = InstanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \ - if (UseCompressedOops) { \ - InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr.contains); \ - } else { \ - InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, mr.contains); \ - } \ -} - -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DEFN) -#if INCLUDE_ALL_GCS -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -#endif // INCLUDE_ALL_GCS -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m) - #if INCLUDE_ALL_GCS template void specialized_oop_push_contents(InstanceRefKlass *ref, diff --git a/src/share/vm/oops/instanceRefKlass.hpp b/src/share/vm/oops/instanceRefKlass.hpp --- a/src/share/vm/oops/instanceRefKlass.hpp +++ b/src/share/vm/oops/instanceRefKlass.hpp @@ -49,8 +49,7 @@ friend class InstanceKlass; // Constructor - InstanceRefKlass(int vtable_len, int itable_len, int static_field_size, int nonstatic_oop_map_size, ReferenceType rt, AccessFlags access_flags, bool is_anonymous) - : InstanceKlass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt, access_flags, is_anonymous) {} + InstanceRefKlass(int vtable_len, int itable_len, int static_field_size, int nonstatic_oop_map_size, ReferenceType rt, AccessFlags access_flags, bool is_anonymous); public: InstanceRefKlass() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); } @@ -70,27 +69,19 @@ // Parallel Scavenge and Parallel Old PARALLEL_GC_DECLS - int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { - return oop_oop_iterate_v(obj, blk); - } - int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) { - return oop_oop_iterate_v_m(obj, blk, mr); - } + template + int oop_oop_iterate(oop obj, OopClosureType* blk); -#define InstanceRefKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ - int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DECL) + template + int oop_oop_iterate_m(oop obj, OopClosureType* blk, MemRegion mr); #if INCLUDE_ALL_GCS -#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); + template + int oop_oop_iterate_backwards(oop obj, OopClosureType* blk); +#endif // INCLUDE_ALL_GCS - ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) -#endif // INCLUDE_ALL_GCS + template + void specialized_oop_iterate(oop obj, OopClosureType* closure, MemRegion* mr); static void release_and_notify_pending_list_lock(BasicLock *pending_list_basic_lock); static void acquire_pending_list_lock(BasicLock *pending_list_basic_lock); diff --git a/src/share/vm/oops/instanceRefKlass.inline.hpp b/src/share/vm/oops/instanceRefKlass.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/oops/instanceRefKlass.inline.hpp @@ -0,0 +1,142 @@ +/* + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_INSTANCEREFKLASS_INLINE_HPP +#define SHARE_VM_OOPS_INSTANCEREFKLASS_INLINE_HPP + +#include "oops/instanceRefKlass.hpp" + +PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC + +template +void InstanceRefKlass::specialized_oop_iterate(oop obj, + OopClosureType* closure, + MemRegion* mr) { + T* disc_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + if (closure->apply_to_weak_ref_discovered_field()) { + Devirtualizer::do_oop(closure, disc_addr); + } + + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + T heap_oop = oopDesc::load_heap_oop(referent_addr); + ReferenceProcessor* rp = closure->_ref_processor; + if (!oopDesc::is_null(heap_oop)) { + oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); + if (!referent->is_gc_marked() && (rp != NULL) && + rp->discover_reference(obj, reference_type())) { + return; + } else if (mr == NULL || mr->contains(referent_addr)) { + /* treat referent as normal oop */ + Devirtualizer::do_oop(closure, referent_addr); + } + } + T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); + if (ReferenceProcessor::pending_list_uses_discovered_field()) { + T next_oop = oopDesc::load_heap_oop(next_addr); + /* Treat discovered as normal oop, if ref is not "active" (next non-NULL) */ + if (!oopDesc::is_null(next_oop) && (mr == NULL || mr->contains(disc_addr))) { + /* i.e. ref is not "active" */ + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Process discovered as normal " + INTPTR_FORMAT, disc_addr); + } + ) + Devirtualizer::do_oop(closure, disc_addr); + } + } else { + /* In the case of older JDKs which do not use the discovered field for */ + /* the pending list, an inactive ref (next != NULL) must always have a */ + /* NULL discovered field. */ + debug_only( + T next_oop = oopDesc::load_heap_oop(next_addr); + T disc_oop = oopDesc::load_heap_oop(disc_addr); + assert(oopDesc::is_null(next_oop) || oopDesc::is_null(disc_oop), + err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL" + "discovered field", (oopDesc*)obj)); + ) + } + /* treat next as normal oop */ + if (mr == NULL || mr->contains(next_addr)) { + Devirtualizer::do_oop(closure, next_addr); + } +} + +// Macro to define InstanceRefKlass::oop_oop_iterate for virtual/nonvirtual for +// all closures. Macros calling macros above for each oop size. + +template +int InstanceRefKlass:: +oop_oop_iterate(oop obj, OopClosureType* closure) { + /* Get size before changing pointers */ + + int size = InstanceKlass::oop_oop_iterate(obj, closure); + + if (UseCompressedOops) { + specialized_oop_iterate(obj, closure, NULL); + } else { + specialized_oop_iterate(obj, closure, NULL); + } + + return size; +} + +#ifndef SERIALGC + +template +int InstanceRefKlass:: +oop_oop_iterate_backwards(oop obj, OopClosureType* closure) { + /* Get size before changing pointers */ + + int size = InstanceKlass::oop_oop_iterate_backwards(obj, closure); + + if (UseCompressedOops) { + specialized_oop_iterate(obj, closure, NULL); + } else { + specialized_oop_iterate(obj, closure, NULL); + } + + return size; +} +#endif // !SERIALGC + + +template +int InstanceRefKlass:: +oop_oop_iterate_m(oop obj, + OopClosureType* closure, + MemRegion mr) { + + int size = InstanceKlass::oop_oop_iterate_m(obj, closure, mr); + + if (UseCompressedOops) { + specialized_oop_iterate(obj, closure, &mr); + } else { + specialized_oop_iterate(obj, closure, &mr); + } + + return size; +} + +#endif // SHARE_VM_OOPS_INSTANCEREFKLASS_INLINE_HPP diff --git a/src/share/vm/oops/klass.cpp b/src/share/vm/oops/klass.cpp --- a/src/share/vm/oops/klass.cpp +++ b/src/share/vm/oops/klass.cpp @@ -146,7 +146,7 @@ MetaspaceObj::ClassType, CHECK_NULL); } -Klass::Klass() { +Klass::Klass(jbyte dispatch_index) : _dispatch_index(dispatch_index) { Klass* k = this; // Preinitialize supertype information. diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp --- a/src/share/vm/oops/klass.hpp +++ b/src/share/vm/oops/klass.hpp @@ -69,6 +69,14 @@ // note: put frequently-used fields together at start of klass structure // for better cache behavior (may not make much of a difference but sure won't hurt) enum { _primary_super_limit = 8 }; + enum DispatchIndex { + _instance, + _instance_ref, + _instance_mirror, + _instance_class_loader, + _obj_array, + _type_array, + }; // The "layout helper" is a combined descriptor of object layout. // For klasses which are neither instance nor array, the value is zero. @@ -98,6 +106,8 @@ // because it is frequently queried. jint _layout_helper; + const jbyte _dispatch_index; + // The fields _super_check_offset, _secondary_super_cache, _secondary_supers // and _primary_supers all help make fast subtype checks. See big discussion // in doc/server_compiler/checktype.txt @@ -148,7 +158,8 @@ jbyte _accumulated_modified_oops; // Mod Union Equivalent (CMS support) // Constructor - Klass(); + Klass(jbyte dispatch_index, bool dummy /*ignored*/) : _dispatch_index(_instance) {} // SSS: For Dummy objects + Klass(jbyte dispatch_index); void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw(); @@ -226,6 +237,9 @@ int layout_helper() const { return _layout_helper; } void set_layout_helper(int lh) { _layout_helper = lh; } + // dispatch index + jbyte dispatch_index() const { return _dispatch_index; } + // Note: for instances layout_helper() may include padding. // Use InstanceKlass::contains_field_offset to classify field offsets. @@ -558,61 +572,23 @@ clean_weak_klass_links(is_alive, false /* clean_alive_klasses */); } - // iterators - virtual int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) = 0; - virtual int oop_oop_iterate_v(oop obj, ExtendedOopClosure* blk) { - return oop_oop_iterate(obj, blk); - } + template + int oop_oop_iterate_disp(oop obj, OopClosureType* blk); + + template + int oop_oop_iterate_disp(oop obj, OopClosureType* blk, MemRegion mr); #if INCLUDE_ALL_GCS // In case we don't have a specialized backward scanner use forward // iteration. - virtual int oop_oop_iterate_backwards_v(oop obj, ExtendedOopClosure* blk) { - return oop_oop_iterate_v(obj, blk); - } + template + int oop_oop_iterate_backwards(oop obj, OopClosureType* blk) { + return oop_oop_iterate(obj, blk); + } #endif // INCLUDE_ALL_GCS - // Iterates "blk" over all the oops in "obj" (of type "this") within "mr". - // (I don't see why the _m should be required, but without it the Solaris - // C++ gives warning messages about overridings of the "oop_oop_iterate" - // defined above "hiding" this virtual function. (DLD, 6/20/00)) */ - virtual int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) = 0; - virtual int oop_oop_iterate_v_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) { - return oop_oop_iterate_m(obj, blk, mr); - } - - // Versions of the above iterators specialized to particular subtypes - // of OopClosure, to avoid closure virtual calls. -#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - virtual int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk) { \ - /* Default implementation reverts to general version. */ \ - return oop_oop_iterate(obj, blk); \ - } \ - \ - /* Iterates "blk" over all the oops in "obj" (of type "this") within "mr". \ - (I don't see why the _m should be required, but without it the Solaris \ - C++ gives warning messages about overridings of the "oop_oop_iterate" \ - defined above "hiding" this virtual function. (DLD, 6/20/00)) */ \ - virtual int oop_oop_iterate##nv_suffix##_m(oop obj, \ - OopClosureType* blk, \ - MemRegion mr) { \ - return oop_oop_iterate_m(obj, blk, mr); \ - } - - SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL) - SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL) - -#if INCLUDE_ALL_GCS -#define Klass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ - virtual int oop_oop_iterate_backwards##nv_suffix(oop obj, \ - OopClosureType* blk) { \ - /* Default implementation reverts to general version. */ \ - return oop_oop_iterate_backwards_v(obj, blk); \ - } - - SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL) - SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL) -#endif // INCLUDE_ALL_GCS + template + int oop_oop_iterate_disp_backwards(oop obj, OopClosureType* blk); virtual void array_klasses_do(void f(Klass* k)) {} diff --git a/src/share/vm/oops/klass.inline.hpp b/src/share/vm/oops/klass.inline.hpp --- a/src/share/vm/oops/klass.inline.hpp +++ b/src/share/vm/oops/klass.inline.hpp @@ -26,8 +26,13 @@ #define SHARE_VM_OOPS_KLASS_INLINE_HPP #include "memory/universe.hpp" +#include "oops/instanceClassLoaderKlass.inline.hpp" +#include "oops/instanceKlass.inline.hpp" +#include "oops/instanceMirrorKlass.inline.hpp" +#include "oops/instanceRefKlass.inline.hpp" #include "oops/klass.hpp" #include "oops/markOop.hpp" +#include "oops/objArrayKlass.inline.hpp" inline void Klass::set_prototype_header(markOop header) { assert(!header->has_bias_pattern() || oop_is_instance(), "biased locking currently only supported for Java instances"); @@ -43,6 +48,19 @@ return (intptr_t)obj % KlassAlignmentInBytes == 0; } + template +int Klass::oop_oop_iterate_disp(oop obj, OopClosureType* blk) { + switch (dispatch_index()) { + case _instance: return InstanceKlass::cast(this)->oop_oop_iterate(obj, blk); break; + case _instance_ref: return InstanceRefKlass::cast(this)->oop_oop_iterate(obj, blk); break; + case _instance_mirror: return InstanceMirrorKlass::cast(this)->oop_oop_iterate(obj, blk); break; + case _instance_class_loader: return InstanceClassLoaderKlass::cast(this)->oop_oop_iterate(obj, blk); break; + case _obj_array: return ObjArrayKlass::cast(this)->oop_oop_iterate(obj, blk); break; + case _type_array: return TypeArrayKlass::cast(this)->oop_oop_iterate(obj, blk); break; + default: ShouldNotReachHere(); return 0; break; + } +} + inline narrowKlass Klass::encode_klass_not_null(Klass* v) { assert(!is_null(v), "klass value can never be zero"); assert(check_klass_alignment(v), "Address not aligned"); @@ -55,10 +73,35 @@ return (narrowKlass)result; } +template +int Klass::oop_oop_iterate_disp(oop obj, OopClosureType* blk, MemRegion mr) { + switch (dispatch_index()) { + case _instance: return InstanceKlass::cast(this)->oop_oop_iterate_m(obj, blk, mr); break; + case _instance_ref: return InstanceRefKlass::cast(this)->oop_oop_iterate_m(obj, blk, mr); break; + case _instance_mirror: return InstanceMirrorKlass::cast(this)->oop_oop_iterate_m(obj, blk, mr); break; + case _instance_class_loader: return InstanceClassLoaderKlass::cast(this)->oop_oop_iterate_m(obj, blk, mr); break; + case _obj_array: return ObjArrayKlass::cast(this)->oop_oop_iterate_m(obj, blk, mr); break; + case _type_array: return TypeArrayKlass::cast(this)->oop_oop_iterate_m(obj, blk, mr); break; + default: ShouldNotReachHere(); return 0; break; + } +} + inline narrowKlass Klass::encode_klass(Klass* v) { return is_null(v) ? (narrowKlass)0 : encode_klass_not_null(v); } +template +int Klass::oop_oop_iterate_disp_backwards(oop obj, OopClosureType* blk) { + switch (dispatch_index()) { + case _instance: return InstanceKlass::cast(this)->oop_oop_iterate_backwards(obj, blk); break; + case _instance_ref: return InstanceRefKlass::cast(this)->oop_oop_iterate_backwards(obj, blk); break; + case _instance_mirror: return InstanceMirrorKlass::cast(this)->oop_oop_iterate_backwards(obj, blk); break; + case _instance_class_loader: return InstanceClassLoaderKlass::cast(this)->oop_oop_iterate_backwards(obj, blk); break; + /* Not implemented for the arrays. */ + default: ShouldNotReachHere(); return 0; break; + } +} + inline Klass* Klass::decode_klass_not_null(narrowKlass v) { assert(!is_null(v), "narrow klass value can never be zero"); int shift = Universe::narrow_klass_shift(); diff --git a/src/share/vm/oops/klassVtable.cpp b/src/share/vm/oops/klassVtable.cpp --- a/src/share/vm/oops/klassVtable.cpp +++ b/src/share/vm/oops/klassVtable.cpp @@ -26,6 +26,7 @@ #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "gc_implementation/shared/markSweep.inline.hpp" +#include "interpreter/linkResolver.hpp" #include "memory/gcLocker.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" diff --git a/src/share/vm/oops/methodData.cpp b/src/share/vm/oops/methodData.cpp --- a/src/share/vm/oops/methodData.cpp +++ b/src/share/vm/oops/methodData.cpp @@ -35,6 +35,7 @@ #include "runtime/deoptimization.hpp" #include "runtime/handles.inline.hpp" #include "runtime/orderAccess.inline.hpp" +#include "utilities/copy.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC diff --git a/src/share/vm/oops/objArrayKlass.cpp b/src/share/vm/oops/objArrayKlass.cpp --- a/src/share/vm/oops/objArrayKlass.cpp +++ b/src/share/vm/oops/objArrayKlass.cpp @@ -156,7 +156,7 @@ return oak; } -ObjArrayKlass::ObjArrayKlass(int n, KlassHandle element_klass, Symbol* name) : ArrayKlass(name) { +ObjArrayKlass::ObjArrayKlass(int n, KlassHandle element_klass, Symbol* name) : ArrayKlass(name, _obj_array) { this->set_dimension(n); this->set_element_klass(element_klass()); // decrement refcount because object arrays are not explicitly freed. The @@ -412,48 +412,6 @@ bottom_klass()->initialize(THREAD); // dispatches to either InstanceKlass or TypeArrayKlass } -#define ObjArrayKlass_SPECIALIZED_OOP_ITERATE(T, a, p, do_oop) \ -{ \ - T* p = (T*)(a)->base(); \ - T* const end = p + (a)->length(); \ - while (p < end) { \ - do_oop; \ - p++; \ - } \ -} - -#define ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(T, a, p, low, high, do_oop) \ -{ \ - T* const l = (T*)(low); \ - T* const h = (T*)(high); \ - T* p = (T*)(a)->base(); \ - T* end = p + (a)->length(); \ - if (p < l) p = l; \ - if (end > h) end = h; \ - while (p < end) { \ - do_oop; \ - ++p; \ - } \ -} - -#define ObjArrayKlass_OOP_ITERATE(a, p, do_oop) \ - if (UseCompressedOops) { \ - ObjArrayKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ - a, p, do_oop) \ - } else { \ - ObjArrayKlass_SPECIALIZED_OOP_ITERATE(oop, \ - a, p, do_oop) \ - } - -#define ObjArrayKlass_BOUNDED_OOP_ITERATE(a, p, low, high, do_oop) \ - if (UseCompressedOops) { \ - ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ - a, p, low, high, do_oop) \ - } else { \ - ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ - a, p, low, high, do_oop) \ - } - void ObjArrayKlass::oop_follow_contents(oop obj) { assert (obj->is_array(), "obj must be array"); MarkSweep::follow_klass(obj->klass()); @@ -477,88 +435,6 @@ } #endif // INCLUDE_ALL_GCS -#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj, \ - OopClosureType* closure) { \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::oa); \ - assert (obj->is_array(), "obj must be array"); \ - objArrayOop a = objArrayOop(obj); \ - /* Get size before changing pointers. */ \ - /* Don't call size() or oop_size() since that is a virtual call. */ \ - int size = a->object_size(); \ - if_do_metadata_checked(closure, nv_suffix) { \ - closure->do_klass##nv_suffix(obj->klass()); \ - } \ - ObjArrayKlass_OOP_ITERATE(a, p, (closure)->do_oop##nv_suffix(p)) \ - return size; \ -} - -#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int ObjArrayKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \ - OopClosureType* closure, \ - MemRegion mr) { \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::oa); \ - assert(obj->is_array(), "obj must be array"); \ - objArrayOop a = objArrayOop(obj); \ - /* Get size before changing pointers. */ \ - /* Don't call size() or oop_size() since that is a virtual call */ \ - int size = a->object_size(); \ - if_do_metadata_checked(closure, nv_suffix) { \ - /* SSS: Do we need to pass down mr here? */ \ - closure->do_klass##nv_suffix(a->klass()); \ - } \ - ObjArrayKlass_BOUNDED_OOP_ITERATE( \ - a, p, mr.start(), mr.end(), (closure)->do_oop##nv_suffix(p)) \ - return size; \ -} - -// Like oop_oop_iterate but only iterates over a specified range and only used -// for objArrayOops. -#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r(OopClosureType, nv_suffix) \ - \ -int ObjArrayKlass::oop_oop_iterate_range##nv_suffix(oop obj, \ - OopClosureType* closure, \ - int start, int end) { \ - SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::oa); \ - assert(obj->is_array(), "obj must be array"); \ - objArrayOop a = objArrayOop(obj); \ - /* Get size before changing pointers. */ \ - /* Don't call size() or oop_size() since that is a virtual call */ \ - int size = a->object_size(); \ - if (UseCompressedOops) { \ - HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr(start);\ - /* this might be wierd if end needs to be aligned on HeapWord boundary */ \ - HeapWord* high = (HeapWord*)((narrowOop*)a->base() + end); \ - MemRegion mr(low, high); \ - if_do_metadata_checked(closure, nv_suffix) { \ - /* SSS: Do we need to pass down mr here? */ \ - closure->do_klass##nv_suffix(a->klass()); \ - } \ - ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ - a, p, low, high, (closure)->do_oop##nv_suffix(p)) \ - } else { \ - HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr(start); \ - HeapWord* high = (HeapWord*)((oop*)a->base() + end); \ - MemRegion mr(low, high); \ - if_do_metadata_checked(closure, nv_suffix) { \ - /* SSS: Do we need to pass down mr here? */ \ - closure->do_klass##nv_suffix(a->klass()); \ - } \ - ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ - a, p, low, high, (closure)->do_oop##nv_suffix(p)) \ - } \ - return size; \ -} - -ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r) -ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r) - int ObjArrayKlass::oop_adjust_pointers(oop obj) { assert(obj->is_objArray(), "obj must be obj array"); objArrayOop a = objArrayOop(obj); diff --git a/src/share/vm/oops/objArrayKlass.hpp b/src/share/vm/oops/objArrayKlass.hpp --- a/src/share/vm/oops/objArrayKlass.hpp +++ b/src/share/vm/oops/objArrayKlass.hpp @@ -42,8 +42,7 @@ ObjArrayKlass(int n, KlassHandle element_klass, Symbol* name); static ObjArrayKlass* allocate(ClassLoaderData* loader_data, int n, KlassHandle klass_handle, Symbol* name, TRAPS); public: - // For dummy objects - ObjArrayKlass() {} + ObjArrayKlass() : ArrayKlass(_obj_array) {} // For dummy objects. // Instance variables Klass* element_klass() const { return _element_klass; } @@ -118,22 +117,14 @@ objarray_follow_contents(ParCompactionManager* cm, oop obj, int index); #endif // INCLUDE_ALL_GCS - // Iterators - int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { - return oop_oop_iterate_v(obj, blk); - } - int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) { - return oop_oop_iterate_v_m(obj, blk, mr); - } -#define ObjArrayKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ - int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, \ - MemRegion mr); \ - int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* blk, \ - int start, int end); + template + int oop_oop_iterate(oop obj, OopClosureType* closure); - ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DECL) + template + int oop_oop_iterate_m(oop obj, OopClosureType* blk, MemRegion mr); + + template + int oop_oop_iterate_range_t(oop obj, OopClosureType* blk, int start, int end); // JVM support jint compute_modifier_flags(TRAPS) const; diff --git a/src/share/vm/oops/objArrayKlass.inline.hpp b/src/share/vm/oops/objArrayKlass.inline.hpp --- a/src/share/vm/oops/objArrayKlass.inline.hpp +++ b/src/share/vm/oops/objArrayKlass.inline.hpp @@ -27,12 +27,126 @@ #include "gc_implementation/shared/markSweep.inline.hpp" #include "oops/objArrayKlass.hpp" +#include "oops/oop.inline2.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp" #include "gc_implementation/parallelScavenge/psParallelCompact.hpp" #endif // INCLUDE_ALL_GCS +#define ObjArrayKlass_SPECIALIZED_OOP_ITERATE(T, a, p, do_oop) \ +{ \ + T* p = (T*)(a)->base(); \ + T* const end = p + (a)->length(); \ + while (p < end) { \ + do_oop; \ + p++; \ + } \ +} + +#define ObjArrayKlass_OOP_ITERATE(a, p, do_oop) \ + if (UseCompressedOops) { \ + ObjArrayKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ + a, p, do_oop) \ + } else { \ + ObjArrayKlass_SPECIALIZED_OOP_ITERATE(oop, \ + a, p, do_oop) \ + } + +template +int ObjArrayKlass::oop_oop_iterate(oop obj, + OopClosureType* closure) { + assert (obj->is_array(), "obj must be array"); + objArrayOop a = objArrayOop(obj); + /* Get size before changing pointers. */ + /* Don't call size() or oop_size() since that is a virtual call. */ + int size = a->object_size(); + if (Devirtualizer::do_metadata(closure)) { + Devirtualizer::do_klass(closure, obj->klass()); + } + ObjArrayKlass_OOP_ITERATE(a, p, (Devirtualizer::do_oop(closure, p))) + return size; +} + +#define ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(T, a, p, low, high, do_oop) \ +{ \ + T* const l = (T*)(low); \ + T* const h = (T*)(high); \ + T* p = (T*)(a)->base(); \ + T* end = p + (a)->length(); \ + if (p < l) p = l; \ + if (end > h) end = h; \ + while (p < end) { \ + do_oop; \ + ++p; \ + } \ +} + +#define ObjArrayKlass_BOUNDED_OOP_ITERATE(a, p, low, high, do_oop) \ + if (UseCompressedOops) { \ + ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ + a, p, low, high, do_oop) \ + } else { \ + ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ + a, p, low, high, do_oop) \ + } + +template +int ObjArrayKlass::oop_oop_iterate_m(oop obj, + OopClosureType* closure, + MemRegion mr) { + assert(obj->is_array(), "obj must be array"); + objArrayOop a = objArrayOop(obj); + /* Get size before changing pointers. */ + /* Don't call size() or oop_size() since that is a virtual call */ + int size = a->object_size(); + if (Devirtualizer::do_metadata(closure)) { + /* SSS: Do we need to pass down mr here? */ + Devirtualizer::do_klass(closure, obj->klass()); + } + ObjArrayKlass_BOUNDED_OOP_ITERATE( + a, p, mr.start(), mr.end(), (Devirtualizer::do_oop(closure, p))) + return size; +} + +// Like oop_oop_iterate but only iterates over a specified range and only used +// for objArrayOops. + +template +int ObjArrayKlass::oop_oop_iterate_range_t(oop obj, + OopClosureType* closure, + int start, int end) { + assert(obj->is_array(), "obj must be array"); + objArrayOop a = objArrayOop(obj); + /* Get size before changing pointers. */ + /* Don't call size() or oop_size() since that is a virtual call */ + int size = a->object_size(); + if (UseCompressedOops) { + HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr(start); + /* this might be wierd if end needs to be aligned on HeapWord boundary */ + HeapWord* high = (HeapWord*)((narrowOop*)a->base() + end); + MemRegion mr(low, high); + if (Devirtualizer::do_metadata(closure)) { + /* SSS: Do we need to pass down mr here? */ + Devirtualizer::do_klass(closure, obj->klass()); + } + ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, + a, p, low, high, (Devirtualizer::do_oop(closure, p))) + } else { + HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr(start); + HeapWord* high = (HeapWord*)((oop*)a->base() + end); + MemRegion mr(low, high); + if (Devirtualizer::do_metadata(closure)) { + /* SSS: Do we need to pass down mr here? */ + Devirtualizer::do_klass(closure, obj->klass()); + } + ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, + a, p, low, high, (Devirtualizer::do_oop(closure, p))) + } + return size; +} + + void ObjArrayKlass::oop_follow_contents(oop obj, int index) { if (UseCompressedOops) { objarray_follow_contents(obj, index); diff --git a/src/share/vm/oops/objArrayOop.cpp b/src/share/vm/oops/objArrayOop.cpp --- a/src/share/vm/oops/objArrayOop.cpp +++ b/src/share/vm/oops/objArrayOop.cpp @@ -42,13 +42,3 @@ } return res; } - -#define ObjArrayOop_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int objArrayOopDesc::oop_iterate_range(OopClosureType* blk, int start, int end) { \ - SpecializationStats::record_call(); \ - return ((ObjArrayKlass*)klass())->oop_oop_iterate_range##nv_suffix(this, blk, start, end); \ -} - -ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayOop_OOP_ITERATE_DEFN) diff --git a/src/share/vm/oops/objArrayOop.hpp b/src/share/vm/oops/objArrayOop.hpp --- a/src/share/vm/oops/objArrayOop.hpp +++ b/src/share/vm/oops/objArrayOop.hpp @@ -112,11 +112,8 @@ } // special iterators for index ranges, returns size of object -#define ObjArrayOop_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_iterate_range(OopClosureType* blk, int start, int end); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayOop_OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayOop_OOP_ITERATE_DECL) + template + int oop_iterate_range_t(OopClosureType* blk, int start, int end); }; #endif // SHARE_VM_OOPS_OBJARRAYOOP_HPP diff --git a/src/share/vm/oops/objArrayOop.inline.hpp b/src/share/vm/oops/objArrayOop.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/oops/objArrayOop.inline.hpp @@ -0,0 +1,36 @@ +/* + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_OBJARRAYOOP_INLINE_HPP +#define SHARE_VM_OOPS_OBJARRAYOOP_INLINE_HPP + +#include "oops/objArrayOop.hpp" + +template +int objArrayOopDesc::oop_iterate_range_t(OopClosureType* blk, int start, int end) { + SpecializationStats::record_call(); + return ObjArrayKlass::cast(klass())->oop_oop_iterate_range_t(this, blk, start, end); +} + +#endif // SHARE_VM_OOPS_OBJARRAYOOP_INLINE_HPP diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp --- a/src/share/vm/oops/oop.hpp +++ b/src/share/vm/oops/oop.hpp @@ -336,21 +336,15 @@ static BarrierSet* bs() { return _bs; } static void set_bs(BarrierSet* bs) { _bs = bs; } - // iterators, returns size of object -#define OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_iterate(OopClosureType* blk); \ - int oop_iterate(OopClosureType* blk, MemRegion mr); // Only in mr. - - ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DECL) + template + int oop_iterate(OopClosureType* blk); + + template + int oop_iterate(OopClosureType* blk, MemRegion mr); #if INCLUDE_ALL_GCS - -#define OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ + template int oop_iterate_backwards(OopClosureType* blk); - - ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DECL) - ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DECL) #endif int oop_iterate_no_header(OopClosure* bk); @@ -376,4 +370,16 @@ static int klass_gap_offset_in_bytes(); }; + +#ifdef ASSERT +template void assert_is_in(T *p); +template void assert_is_in_closed_subset(T *p); +template void assert_is_in_reserved(T *p); +#else +template void assert_is_in(T *p) {} +template void assert_is_in_closed_subset(T *p) {} +template void assert_is_in_reserved(T *p) {} +#endif // ASSERT +template void assert_nothing(T *p) {} + #endif // SHARE_VM_OOPS_OOP_HPP diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp --- a/src/share/vm/oops/oop.inline.hpp +++ b/src/share/vm/oops/oop.inline.hpp @@ -26,8 +26,6 @@ #define SHARE_VM_OOPS_OOP_INLINE_HPP #include "gc_implementation/shared/ageTable.hpp" -#include "gc_implementation/shared/markSweep.inline.hpp" -#include "gc_interface/collectedHeap.inline.hpp" #include "memory/barrierSet.inline.hpp" #include "memory/cardTableModRefBS.hpp" #include "memory/genCollectedHeap.hpp" @@ -691,44 +689,35 @@ return s; } -#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -inline int oopDesc::oop_iterate(OopClosureType* blk) { \ - SpecializationStats::record_call(); \ - return klass()->oop_oop_iterate##nv_suffix(this, blk); \ -} \ - \ -inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \ - SpecializationStats::record_call(); \ - return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \ +template +inline int oopDesc::oop_iterate(OopClosureType* blk) { + return klass()->oop_oop_iterate_disp(this, blk); } +template +inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { + return klass()->oop_oop_iterate_disp(this, blk, mr); +} inline int oopDesc::oop_iterate_no_header(OopClosure* blk) { // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all // the do_oop calls, but turns off all other features in ExtendedOopClosure. NoHeaderExtendedOopClosure cl(blk); - return oop_iterate(&cl); + return oop_iterate(&cl); } inline int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) { NoHeaderExtendedOopClosure cl(blk); - return oop_iterate(&cl, mr); + return oop_iterate(&cl, mr); } -ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN) #if INCLUDE_ALL_GCS -#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ - SpecializationStats::record_call(); \ - return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ +template +inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { + SpecializationStats::record_call(); + return klass()->oop_oop_iterate_disp_backwards(this, blk); } - -ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN) #endif // INCLUDE_ALL_GCS #endif // SHARE_VM_OOPS_OOP_INLINE_HPP diff --git a/src/share/vm/oops/oop.inline2.hpp b/src/share/vm/oops/oop.inline2.hpp --- a/src/share/vm/oops/oop.inline2.hpp +++ b/src/share/vm/oops/oop.inline2.hpp @@ -36,4 +36,56 @@ inline bool oopDesc::is_scavengable() const { return Universe::heap()->is_scavengable(this); } + +#ifdef ASSERT +template void assert_is_in(T *p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in(o), "should be in heap"); + } +} +template void assert_is_in_closed_subset(T *p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in_closed_subset(o), "should be in closed"); + } +} +template void assert_is_in_reserved(T *p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); + } +} +#endif // ASSERT + +template +class Devirtualizer { + public: + static bool do_metadata(OopClosureType* closure) { ShouldNotReachHere(); return false; } + static void do_klass(OopClosureType* closure, Klass* k) { ShouldNotReachHere(); } + template + static void do_oop(OopClosureType* closure, T* p) { ShouldNotReachHere(); } +}; + +template +class Devirtualizer { + public: + static bool do_metadata(OopClosureType* closure) { return closure->do_metadata_nv(); } + static void do_klass(OopClosureType* closure, Klass* k) { closure->do_klass_nv(k); } + template + static void do_oop(OopClosureType* closure, T* p) { closure->do_oop_nv(p); } +}; + +template +class Devirtualizer { +public: + static bool do_metadata(OopClosureType* closure) { return closure->do_metadata(); } + static void do_klass(OopClosureType* closure, Klass* k) { closure->do_klass(k); } + template + static void do_oop(OopClosureType* closure, T* p) { closure->do_oop(p); } +}; + #endif // SHARE_VM_OOPS_OOP_INLINE2_HPP diff --git a/src/share/vm/oops/typeArrayKlass.cpp b/src/share/vm/oops/typeArrayKlass.cpp --- a/src/share/vm/oops/typeArrayKlass.cpp +++ b/src/share/vm/oops/typeArrayKlass.cpp @@ -84,7 +84,7 @@ return new (loader_data, size, THREAD) TypeArrayKlass(type, name); } -TypeArrayKlass::TypeArrayKlass(BasicType type, Symbol* name) : ArrayKlass(name) { +TypeArrayKlass::TypeArrayKlass(BasicType type, Symbol* name) : ArrayKlass(name, _type_array) { set_layout_helper(array_layout_helper(type)); assert(oop_is_array(), "sanity"); assert(oop_is_typeArray(), "sanity"); diff --git a/src/share/vm/oops/typeArrayKlass.hpp b/src/share/vm/oops/typeArrayKlass.hpp --- a/src/share/vm/oops/typeArrayKlass.hpp +++ b/src/share/vm/oops/typeArrayKlass.hpp @@ -40,7 +40,7 @@ TypeArrayKlass(BasicType type, Symbol* name); static TypeArrayKlass* allocate(ClassLoaderData* loader_data, BasicType type, Symbol* name, TRAPS); public: - TypeArrayKlass() {} // For dummy objects. + TypeArrayKlass() : ArrayKlass(_type_array) {} // For dummy objects. // instance variables jint max_length() { return _max_length; } diff --git a/src/share/vm/opto/callGenerator.cpp b/src/share/vm/opto/callGenerator.cpp --- a/src/share/vm/opto/callGenerator.cpp +++ b/src/share/vm/opto/callGenerator.cpp @@ -39,6 +39,7 @@ #include "opto/rootnode.hpp" #include "opto/runtime.hpp" #include "opto/subnode.hpp" +#include "runtime/sharedRuntime.hpp" // Utility function. diff --git a/src/share/vm/opto/compile.cpp b/src/share/vm/opto/compile.cpp --- a/src/share/vm/opto/compile.cpp +++ b/src/share/vm/opto/compile.cpp @@ -64,6 +64,7 @@ #include "opto/type.hpp" #include "opto/vectornode.hpp" #include "runtime/arguments.hpp" +#include "runtime/sharedRuntime.hpp" #include "runtime/signature.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/timer.hpp" diff --git a/src/share/vm/opto/convertnode.cpp b/src/share/vm/opto/convertnode.cpp --- a/src/share/vm/opto/convertnode.cpp +++ b/src/share/vm/opto/convertnode.cpp @@ -28,6 +28,7 @@ #include "opto/matcher.hpp" #include "opto/phaseX.hpp" #include "opto/subnode.hpp" +#include "runtime/sharedRuntime.hpp" //============================================================================= //------------------------------Identity--------------------------------------- diff --git a/src/share/vm/opto/lcm.cpp b/src/share/vm/opto/lcm.cpp --- a/src/share/vm/opto/lcm.cpp +++ b/src/share/vm/opto/lcm.cpp @@ -31,6 +31,7 @@ #include "opto/cfgnode.hpp" #include "opto/machnode.hpp" #include "opto/runtime.hpp" +#include "runtime/sharedRuntime.hpp" // Optimization - Graph Style diff --git a/src/share/vm/opto/matcher.cpp b/src/share/vm/opto/matcher.cpp --- a/src/share/vm/opto/matcher.cpp +++ b/src/share/vm/opto/matcher.cpp @@ -38,6 +38,7 @@ #include "opto/type.hpp" #include "opto/vectornode.hpp" #include "runtime/os.hpp" +#include "runtime/sharedRuntime.hpp" OptoReg::Name OptoReg::c_frame_pointer; diff --git a/src/share/vm/opto/memnode.cpp b/src/share/vm/opto/memnode.cpp --- a/src/share/vm/opto/memnode.cpp +++ b/src/share/vm/opto/memnode.cpp @@ -40,6 +40,7 @@ #include "opto/narrowptrnode.hpp" #include "opto/phaseX.hpp" #include "opto/regmask.hpp" +#include "utilities/copy.hpp" // Portions of code courtesy of Clifford Click diff --git a/src/share/vm/opto/stringopts.cpp b/src/share/vm/opto/stringopts.cpp --- a/src/share/vm/opto/stringopts.cpp +++ b/src/share/vm/opto/stringopts.cpp @@ -34,6 +34,7 @@ #include "opto/runtime.hpp" #include "opto/stringopts.hpp" #include "opto/subnode.hpp" +#include "runtime/sharedRuntime.hpp" #define __ kit. diff --git a/src/share/vm/prims/jvmtiGetLoadedClasses.cpp b/src/share/vm/prims/jvmtiGetLoadedClasses.cpp --- a/src/share/vm/prims/jvmtiGetLoadedClasses.cpp +++ b/src/share/vm/prims/jvmtiGetLoadedClasses.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" +#include "gc_interface/collectedHeap.inline.hpp" #include "memory/universe.inline.hpp" #include "prims/jvmtiGetLoadedClasses.hpp" #include "runtime/thread.hpp" diff --git a/src/share/vm/prims/methodHandles.cpp b/src/share/vm/prims/methodHandles.cpp --- a/src/share/vm/prims/methodHandles.cpp +++ b/src/share/vm/prims/methodHandles.cpp @@ -26,6 +26,7 @@ #include "classfile/stringTable.hpp" #include "compiler/compileBroker.hpp" #include "interpreter/interpreter.hpp" +#include "interpreter/linkResolver.hpp" #include "interpreter/oopMapCache.hpp" #include "memory/allocation.inline.hpp" #include "memory/oopFactory.hpp" diff --git a/src/share/vm/prims/methodHandles.hpp b/src/share/vm/prims/methodHandles.hpp --- a/src/share/vm/prims/methodHandles.hpp +++ b/src/share/vm/prims/methodHandles.hpp @@ -31,6 +31,7 @@ #include "runtime/globals.hpp" #include "runtime/interfaceSupport.hpp" +class FieldAccessInfo; class MacroAssembler; class Label; diff --git a/src/share/vm/runtime/objectMonitor.cpp b/src/share/vm/runtime/objectMonitor.cpp --- a/src/share/vm/runtime/objectMonitor.cpp +++ b/src/share/vm/runtime/objectMonitor.cpp @@ -35,6 +35,7 @@ #include "runtime/objectMonitor.inline.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/osThread.hpp" +#include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "services/threadService.hpp" diff --git a/src/share/vm/runtime/os.cpp b/src/share/vm/runtime/os.cpp --- a/src/share/vm/runtime/os.cpp +++ b/src/share/vm/runtime/os.cpp @@ -48,6 +48,7 @@ #include "runtime/javaCalls.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/os.inline.hpp" +#include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/thread.inline.hpp" #include "runtime/vm_version.hpp" diff --git a/src/share/vm/runtime/serviceThread.cpp b/src/share/vm/runtime/serviceThread.cpp --- a/src/share/vm/runtime/serviceThread.cpp +++ b/src/share/vm/runtime/serviceThread.cpp @@ -32,6 +32,7 @@ #include "services/gcNotifier.hpp" #include "services/diagnosticArgument.hpp" #include "services/diagnosticFramework.hpp" +#include "services/lowMemoryDetector.hpp" ServiceThread* ServiceThread::_instance = NULL;