54 double vtime_start = os::elapsedVTime();
55
56 while (!should_terminate()) {
57 sample_young_list_rs_lengths();
58
59 if (os::supports_vtime()) {
60 _vtime_accum = (os::elapsedVTime() - vtime_start);
61 } else {
62 _vtime_accum = 0.0;
63 }
64
65 sleep_before_next_cycle();
66 }
67 }
68
69 void G1YoungRemSetSamplingThread::stop_service() {
70 MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag);
71 _monitor.notify();
72 }
73
74 void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
75 SuspendibleThreadSetJoiner sts;
76 G1CollectedHeap* g1h = G1CollectedHeap::heap();
77 G1Policy* g1p = g1h->g1_policy();
78 G1CollectionSet* g1cs = g1h->collection_set();
79 if (g1p->adaptive_young_list_length()) {
80 int regions_visited = 0;
81 HeapRegion* hr = g1cs->inc_head();
82 size_t sampled_rs_lengths = 0;
83
84 while (hr != NULL) {
85 size_t rs_length = hr->rem_set()->occupied();
86 sampled_rs_lengths += rs_length;
87
88 // Update the collection set policy information for this region
89 g1cs->update_young_region_prediction(hr, rs_length);
90
91 ++regions_visited;
92
93 // we try to yield every time we visit 10 regions
94 if (regions_visited == 10) {
95 if (sts.should_yield()) {
96 sts.yield();
97 // A gc may have occurred and our sampling data is stale and further
98 // traversal of the collection set is unsafe
99 return;
100 }
101 regions_visited = 0;
102 }
103 assert(hr == g1cs->inc_tail() || hr->next_in_collection_set() != NULL, "next should only be null at tail of icset");
104 hr = hr->next_in_collection_set();
105 }
106 g1p->revise_young_list_target_length_if_necessary(sampled_rs_lengths);
107 }
108 }
|
54 double vtime_start = os::elapsedVTime();
55
56 while (!should_terminate()) {
57 sample_young_list_rs_lengths();
58
59 if (os::supports_vtime()) {
60 _vtime_accum = (os::elapsedVTime() - vtime_start);
61 } else {
62 _vtime_accum = 0.0;
63 }
64
65 sleep_before_next_cycle();
66 }
67 }
68
69 void G1YoungRemSetSamplingThread::stop_service() {
70 MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag);
71 _monitor.notify();
72 }
73
74 class G1YoungRemSetSamplingClosure : public HeapRegionClosure {
75 SuspendibleThreadSetJoiner* _sts;
76 size_t _regions_visited;
77 size_t _sampled_rs_lengths;
78 public:
79 G1YoungRemSetSamplingClosure(SuspendibleThreadSetJoiner* sts) :
80 HeapRegionClosure(), _sts(sts), _regions_visited(0), _sampled_rs_lengths(0) { }
81
82 virtual bool doHeapRegion(HeapRegion* r) {
83 size_t rs_length = r->rem_set()->occupied();
84 _sampled_rs_lengths += rs_length;
85
86 // Update the collection set policy information for this region
87 G1CollectedHeap::heap()->collection_set()->update_young_region_prediction(r, rs_length);
88
89 _regions_visited++;
90
91 if (_regions_visited == 10) {
92 if (_sts->should_yield()) {
93 _sts->yield();
94 // A gc may have occurred and our sampling data is stale and further
95 // traversal of the collection set is unsafe
96 return true;
97 }
98 _regions_visited = 0;
99 }
100 return false;
101 }
102
103 size_t sampled_rs_lengths() const { return _sampled_rs_lengths; }
104 };
105
106 void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
107 SuspendibleThreadSetJoiner sts;
108 G1CollectedHeap* g1h = G1CollectedHeap::heap();
109 G1Policy* g1p = g1h->g1_policy();
110
111 if (g1p->adaptive_young_list_length()) {
112 G1YoungRemSetSamplingClosure cl(&sts);
113
114 G1CollectionSet* g1cs = g1h->collection_set();
115 g1cs->iterate(&cl);
116
117 if (cl.complete()) {
118 g1p->revise_young_list_target_length_if_necessary(cl.sampled_rs_lengths());
119 }
120 }
121 }
|