--- old/src/share/vm/gc/parallel/parallelScavengeHeap.hpp 2017-04-25 16:44:51.559174616 +0200 +++ new/src/share/vm/gc/parallel/parallelScavengeHeap.hpp 2017-04-25 16:44:51.395174622 +0200 @@ -25,11 +25,13 @@ #ifndef SHARE_VM_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP #define SHARE_VM_GC_PARALLEL_PARALLELSCAVENGEHEAP_HPP +#include "gc/parallel/psCardTable.hpp" #include "gc/parallel/generationSizer.hpp" #include "gc/parallel/objectStartArray.hpp" #include "gc/parallel/psGCAdaptivePolicyCounters.hpp" #include "gc/parallel/psOldGen.hpp" #include "gc/parallel/psYoungGen.hpp" +#include "gc/shared/cardTableModRefBS.hpp" #include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectorPolicy.hpp" #include "gc/shared/gcPolicyCounters.hpp" @@ -188,21 +190,6 @@ size_t tlab_used(Thread* thr) const; size_t unsafe_max_tlab_alloc(Thread* thr) const; - // Can a compiler initialize a new object without store barriers? - // This permission only extends from the creation of a new object - // via a TLAB up to the first subsequent safepoint. - virtual bool can_elide_tlab_store_barriers() const { - return true; - } - - virtual bool card_mark_must_follow_store() const { - return false; - } - - // Return true if we don't we need a store barrier for - // initializing stores to an object at this address. - virtual bool can_elide_initializing_store_barrier(oop new_obj); - void object_iterate(ObjectClosure* cl); void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } @@ -236,6 +223,14 @@ // Mangle the unused parts of all spaces in the heap void gen_mangle_unused_area() PRODUCT_RETURN; + CardTableModRefBS* barrier_set() { + return barrier_set_cast(CollectedHeap::barrier_set()); + } + + PSCardTable* card_table() { + return static_cast(barrier_set()->card_table()); + } + // Call these in sequential code around the processing of strong roots. class ParStrongRootsScope : public MarkScope { public: