44 #include "memory/resourceArea.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "oops/oop.psgc.inline.hpp"
47 #include "runtime/biasedLocking.hpp"
48 #include "runtime/fprofiler.hpp"
49 #include "runtime/handles.inline.hpp"
50 #include "runtime/threadCritical.hpp"
51 #include "runtime/vmThread.hpp"
52 #include "runtime/vm_operations.hpp"
53 #include "services/memoryService.hpp"
54 #include "utilities/stack.inline.hpp"
55
56
57 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
58 int PSScavenge::_consecutive_skipped_scavenges = 0;
59 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
60 CardTableExtension* PSScavenge::_card_table = NULL;
61 bool PSScavenge::_survivor_overflow = false;
62 uint PSScavenge::_tenuring_threshold = 0;
63 HeapWord* PSScavenge::_young_generation_boundary = NULL;
64 elapsedTimer PSScavenge::_accumulated_time;
65 Stack<markOop, mtGC> PSScavenge::_preserved_mark_stack;
66 Stack<oop, mtGC> PSScavenge::_preserved_oop_stack;
67 CollectorCounters* PSScavenge::_counters = NULL;
68 bool PSScavenge::_promotion_failed = false;
69
70 // Define before use
71 class PSIsAliveClosure: public BoolObjectClosure {
72 public:
73 bool do_object_b(oop p) {
74 return (!PSScavenge::is_obj_in_young((HeapWord*) p)) || p->is_forwarded();
75 }
76 };
77
78 PSIsAliveClosure PSScavenge::_is_alive_closure;
79
80 class PSKeepAliveClosure: public OopClosure {
81 protected:
82 MutableSpace* _to_space;
83 PSPromotionManager* _promotion_manager;
84
85 public:
86 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
87 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
88 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
89 _to_space = heap->young_gen()->to_space();
90
91 assert(_promotion_manager != NULL, "Sanity");
92 }
93
94 template <class T> void do_oop_work(T* p) {
798
799 if (AlwaysTenure) {
800 _tenuring_threshold = 0;
801 } else if (NeverTenure) {
802 _tenuring_threshold = markOopDesc::max_age + 1;
803 } else {
804 // We want to smooth out our startup times for the AdaptiveSizePolicy
805 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
806 MaxTenuringThreshold;
807 }
808
809 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
810 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
811
812 PSYoungGen* young_gen = heap->young_gen();
813 PSOldGen* old_gen = heap->old_gen();
814
815 // Set boundary between young_gen and old_gen
816 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
817 "old above young");
818 _young_generation_boundary = young_gen->eden_space()->bottom();
819
820 // Initialize ref handling object for scavenging.
821 MemRegion mr = young_gen->reserved();
822
823 _ref_processor =
824 new ReferenceProcessor(mr, // span
825 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
826 (int) ParallelGCThreads, // mt processing degree
827 true, // mt discovery
828 (int) ParallelGCThreads, // mt discovery degree
829 true, // atomic_discovery
830 NULL, // header provides liveness info
831 false); // next field updates do not need write barrier
832
833 // Cache the cardtable
834 BarrierSet* bs = Universe::heap()->barrier_set();
835 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
836 _card_table = (CardTableExtension*)bs;
837
838 _counters = new CollectorCounters("PSScavenge", 0);
|
44 #include "memory/resourceArea.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "oops/oop.psgc.inline.hpp"
47 #include "runtime/biasedLocking.hpp"
48 #include "runtime/fprofiler.hpp"
49 #include "runtime/handles.inline.hpp"
50 #include "runtime/threadCritical.hpp"
51 #include "runtime/vmThread.hpp"
52 #include "runtime/vm_operations.hpp"
53 #include "services/memoryService.hpp"
54 #include "utilities/stack.inline.hpp"
55
56
57 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
58 int PSScavenge::_consecutive_skipped_scavenges = 0;
59 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
60 CardTableExtension* PSScavenge::_card_table = NULL;
61 bool PSScavenge::_survivor_overflow = false;
62 uint PSScavenge::_tenuring_threshold = 0;
63 HeapWord* PSScavenge::_young_generation_boundary = NULL;
64 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
65 elapsedTimer PSScavenge::_accumulated_time;
66 Stack<markOop, mtGC> PSScavenge::_preserved_mark_stack;
67 Stack<oop, mtGC> PSScavenge::_preserved_oop_stack;
68 CollectorCounters* PSScavenge::_counters = NULL;
69 bool PSScavenge::_promotion_failed = false;
70
71 // Define before use
72 class PSIsAliveClosure: public BoolObjectClosure {
73 public:
74 bool do_object_b(oop p) {
75 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
76 }
77 };
78
79 PSIsAliveClosure PSScavenge::_is_alive_closure;
80
81 class PSKeepAliveClosure: public OopClosure {
82 protected:
83 MutableSpace* _to_space;
84 PSPromotionManager* _promotion_manager;
85
86 public:
87 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
88 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
89 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
90 _to_space = heap->young_gen()->to_space();
91
92 assert(_promotion_manager != NULL, "Sanity");
93 }
94
95 template <class T> void do_oop_work(T* p) {
799
800 if (AlwaysTenure) {
801 _tenuring_threshold = 0;
802 } else if (NeverTenure) {
803 _tenuring_threshold = markOopDesc::max_age + 1;
804 } else {
805 // We want to smooth out our startup times for the AdaptiveSizePolicy
806 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
807 MaxTenuringThreshold;
808 }
809
810 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
811 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
812
813 PSYoungGen* young_gen = heap->young_gen();
814 PSOldGen* old_gen = heap->old_gen();
815
816 // Set boundary between young_gen and old_gen
817 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(),
818 "old above young");
819 set_young_generation_boundary(young_gen->eden_space()->bottom());
820
821 // Initialize ref handling object for scavenging.
822 MemRegion mr = young_gen->reserved();
823
824 _ref_processor =
825 new ReferenceProcessor(mr, // span
826 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
827 (int) ParallelGCThreads, // mt processing degree
828 true, // mt discovery
829 (int) ParallelGCThreads, // mt discovery degree
830 true, // atomic_discovery
831 NULL, // header provides liveness info
832 false); // next field updates do not need write barrier
833
834 // Cache the cardtable
835 BarrierSet* bs = Universe::heap()->barrier_set();
836 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
837 _card_table = (CardTableExtension*)bs;
838
839 _counters = new CollectorCounters("PSScavenge", 0);
|