30 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
31 #include "gc/parallel/psMarkSweep.hpp"
32 #include "gc/parallel/psParallelCompact.inline.hpp"
33 #include "gc/parallel/psScavenge.inline.hpp"
34 #include "gc/parallel/psTasks.hpp"
35 #include "gc/shared/collectorPolicy.hpp"
36 #include "gc/shared/gcCause.hpp"
37 #include "gc/shared/gcHeapSummary.hpp"
38 #include "gc/shared/gcId.hpp"
39 #include "gc/shared/gcLocker.inline.hpp"
40 #include "gc/shared/gcTimer.hpp"
41 #include "gc/shared/gcTrace.hpp"
42 #include "gc/shared/gcTraceTime.inline.hpp"
43 #include "gc/shared/isGCActiveMark.hpp"
44 #include "gc/shared/referencePolicy.hpp"
45 #include "gc/shared/referenceProcessor.hpp"
46 #include "gc/shared/spaceDecorator.hpp"
47 #include "gc/shared/weakProcessor.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "logging/log.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "runtime/biasedLocking.hpp"
52 #include "runtime/handles.inline.hpp"
53 #include "runtime/threadCritical.hpp"
54 #include "runtime/vmThread.hpp"
55 #include "runtime/vm_operations.hpp"
56 #include "services/memoryService.hpp"
57 #include "utilities/stack.inline.hpp"
58
59 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
60 int PSScavenge::_consecutive_skipped_scavenges = 0;
61 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
62 PSCardTable* PSScavenge::_card_table = NULL;
63 bool PSScavenge::_survivor_overflow = false;
64 uint PSScavenge::_tenuring_threshold = 0;
65 HeapWord* PSScavenge::_young_generation_boundary = NULL;
66 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
67 elapsedTimer PSScavenge::_accumulated_time;
68 STWGCTimer PSScavenge::_gc_timer;
69 ParallelScavengeTracer PSScavenge::_gc_tracer;
76 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
77 }
78 };
79
80 PSIsAliveClosure PSScavenge::_is_alive_closure;
81
82 class PSKeepAliveClosure: public OopClosure {
83 protected:
84 MutableSpace* _to_space;
85 PSPromotionManager* _promotion_manager;
86
87 public:
88 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
89 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
90 _to_space = heap->young_gen()->to_space();
91
92 assert(_promotion_manager != NULL, "Sanity");
93 }
94
95 template <class T> void do_oop_work(T* p) {
96 assert (!oopDesc::is_null(*p), "expected non-null ref");
97 assert (oopDesc::is_oop(oopDesc::load_decode_heap_oop_not_null(p)),
98 "expected an oop while scanning weak refs");
99
100 // Weak refs may be visited more than once.
101 if (PSScavenge::should_scavenge(p, _to_space)) {
102 _promotion_manager->copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(p);
103 }
104 }
105 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
106 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
107 };
108
109 class PSEvacuateFollowersClosure: public VoidClosure {
110 private:
111 PSPromotionManager* _promotion_manager;
112 public:
113 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
114
115 virtual void do_void() {
116 assert(_promotion_manager != NULL, "Sanity");
117 _promotion_manager->drain_stacks(true);
721 _consecutive_skipped_scavenges++;
722 if (UsePerfData) {
723 counters->update_scavenge_skipped(promoted_too_large);
724 }
725 }
726 return result;
727 }
728
729 // Used to add tasks
730 GCTaskManager* const PSScavenge::gc_task_manager() {
731 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
732 "shouldn't return NULL");
733 return ParallelScavengeHeap::gc_task_manager();
734 }
735
736 // Adaptive size policy support. When the young generation/old generation
737 // boundary moves, _young_generation_boundary must be reset
738 void PSScavenge::set_young_generation_boundary(HeapWord* v) {
739 _young_generation_boundary = v;
740 if (UseCompressedOops) {
741 _young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v);
742 }
743 }
744
745 void PSScavenge::initialize() {
746 // Arguments must have been parsed
747
748 if (AlwaysTenure || NeverTenure) {
749 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
750 "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold);
751 _tenuring_threshold = MaxTenuringThreshold;
752 } else {
753 // We want to smooth out our startup times for the AdaptiveSizePolicy
754 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
755 MaxTenuringThreshold;
756 }
757
758 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
759 PSYoungGen* young_gen = heap->young_gen();
760 PSOldGen* old_gen = heap->old_gen();
761
|
30 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
31 #include "gc/parallel/psMarkSweep.hpp"
32 #include "gc/parallel/psParallelCompact.inline.hpp"
33 #include "gc/parallel/psScavenge.inline.hpp"
34 #include "gc/parallel/psTasks.hpp"
35 #include "gc/shared/collectorPolicy.hpp"
36 #include "gc/shared/gcCause.hpp"
37 #include "gc/shared/gcHeapSummary.hpp"
38 #include "gc/shared/gcId.hpp"
39 #include "gc/shared/gcLocker.inline.hpp"
40 #include "gc/shared/gcTimer.hpp"
41 #include "gc/shared/gcTrace.hpp"
42 #include "gc/shared/gcTraceTime.inline.hpp"
43 #include "gc/shared/isGCActiveMark.hpp"
44 #include "gc/shared/referencePolicy.hpp"
45 #include "gc/shared/referenceProcessor.hpp"
46 #include "gc/shared/spaceDecorator.hpp"
47 #include "gc/shared/weakProcessor.hpp"
48 #include "memory/resourceArea.hpp"
49 #include "logging/log.hpp"
50 #include "oops/access.inline.hpp"
51 #include "oops/compressedOops.inline.hpp"
52 #include "oops/oop.inline.hpp"
53 #include "runtime/biasedLocking.hpp"
54 #include "runtime/handles.inline.hpp"
55 #include "runtime/threadCritical.hpp"
56 #include "runtime/vmThread.hpp"
57 #include "runtime/vm_operations.hpp"
58 #include "services/memoryService.hpp"
59 #include "utilities/stack.inline.hpp"
60
61 HeapWord* PSScavenge::_to_space_top_before_gc = NULL;
62 int PSScavenge::_consecutive_skipped_scavenges = 0;
63 ReferenceProcessor* PSScavenge::_ref_processor = NULL;
64 PSCardTable* PSScavenge::_card_table = NULL;
65 bool PSScavenge::_survivor_overflow = false;
66 uint PSScavenge::_tenuring_threshold = 0;
67 HeapWord* PSScavenge::_young_generation_boundary = NULL;
68 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0;
69 elapsedTimer PSScavenge::_accumulated_time;
70 STWGCTimer PSScavenge::_gc_timer;
71 ParallelScavengeTracer PSScavenge::_gc_tracer;
78 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded();
79 }
80 };
81
82 PSIsAliveClosure PSScavenge::_is_alive_closure;
83
84 class PSKeepAliveClosure: public OopClosure {
85 protected:
86 MutableSpace* _to_space;
87 PSPromotionManager* _promotion_manager;
88
89 public:
90 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) {
91 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
92 _to_space = heap->young_gen()->to_space();
93
94 assert(_promotion_manager != NULL, "Sanity");
95 }
96
97 template <class T> void do_oop_work(T* p) {
98 assert (oopDesc::is_oop(RawAccess<OOP_NOT_NULL>::oop_load(p)),
99 "expected an oop while scanning weak refs");
100
101 // Weak refs may be visited more than once.
102 if (PSScavenge::should_scavenge(p, _to_space)) {
103 _promotion_manager->copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(p);
104 }
105 }
106 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); }
107 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); }
108 };
109
110 class PSEvacuateFollowersClosure: public VoidClosure {
111 private:
112 PSPromotionManager* _promotion_manager;
113 public:
114 PSEvacuateFollowersClosure(PSPromotionManager* pm) : _promotion_manager(pm) {}
115
116 virtual void do_void() {
117 assert(_promotion_manager != NULL, "Sanity");
118 _promotion_manager->drain_stacks(true);
722 _consecutive_skipped_scavenges++;
723 if (UsePerfData) {
724 counters->update_scavenge_skipped(promoted_too_large);
725 }
726 }
727 return result;
728 }
729
730 // Used to add tasks
731 GCTaskManager* const PSScavenge::gc_task_manager() {
732 assert(ParallelScavengeHeap::gc_task_manager() != NULL,
733 "shouldn't return NULL");
734 return ParallelScavengeHeap::gc_task_manager();
735 }
736
737 // Adaptive size policy support. When the young generation/old generation
738 // boundary moves, _young_generation_boundary must be reset
739 void PSScavenge::set_young_generation_boundary(HeapWord* v) {
740 _young_generation_boundary = v;
741 if (UseCompressedOops) {
742 _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode((oop)v);
743 }
744 }
745
746 void PSScavenge::initialize() {
747 // Arguments must have been parsed
748
749 if (AlwaysTenure || NeverTenure) {
750 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markOopDesc::max_age + 1,
751 "MaxTenuringThreshold should be 0 or markOopDesc::max_age + 1, but is %d", (int) MaxTenuringThreshold);
752 _tenuring_threshold = MaxTenuringThreshold;
753 } else {
754 // We want to smooth out our startup times for the AdaptiveSizePolicy
755 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold :
756 MaxTenuringThreshold;
757 }
758
759 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
760 PSYoungGen* young_gen = heap->young_gen();
761 PSOldGen* old_gen = heap->old_gen();
762
|