Print this page
rev 6902 : 8062036: ConcurrentMarkThread::slt may be invoked before ConcurrentMarkThread::makeSurrogateLockerThread causing intermittent crashes
Summary: Suppress gc_alot during VM init, improve error for SLT uninitialized.
Reviewed-by: jmasa, brutisso, tschatzl
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
+++ new/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
27 27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
28 28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
29 29 #include "gc_implementation/g1/g1Log.hpp"
30 30 #include "gc_implementation/g1/vm_operations_g1.hpp"
31 31 #include "gc_implementation/shared/gcTimer.hpp"
32 32 #include "gc_implementation/shared/gcTraceTime.hpp"
33 33 #include "gc_implementation/shared/isGCActiveMark.hpp"
34 34 #include "gc_implementation/g1/vm_operations_g1.hpp"
35 35 #include "runtime/interfaceSupport.hpp"
36 36
37 37 VM_G1CollectForAllocation::VM_G1CollectForAllocation(
38 38 unsigned int gc_count_before,
39 39 size_t word_size)
40 40 : VM_G1OperationWithAllocRequest(gc_count_before, word_size,
41 41 GCCause::_allocation_failure) {
42 42 guarantee(word_size > 0, "an allocation should always be requested");
43 43 }
44 44
45 45 void VM_G1CollectForAllocation::doit() {
46 46 G1CollectedHeap* g1h = G1CollectedHeap::heap();
47 47 GCCauseSetter x(g1h, _gc_cause);
48 48
49 49 _result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded);
50 50 assert(_result == NULL || _pause_succeeded,
51 51 "if we get back a result, the pause should have succeeded");
52 52 }
53 53
54 54 void VM_G1CollectFull::doit() {
55 55 G1CollectedHeap* g1h = G1CollectedHeap::heap();
56 56 GCCauseSetter x(g1h, _gc_cause);
57 57 g1h->do_full_collection(false /* clear_all_soft_refs */);
58 58 }
59 59
60 60 VM_G1IncCollectionPause::VM_G1IncCollectionPause(
61 61 unsigned int gc_count_before,
62 62 size_t word_size,
63 63 bool should_initiate_conc_mark,
64 64 double target_pause_time_ms,
65 65 GCCause::Cause gc_cause)
66 66 : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
67 67 _should_initiate_conc_mark(should_initiate_conc_mark),
68 68 _target_pause_time_ms(target_pause_time_ms),
69 69 _should_retry_gc(false),
70 70 _old_marking_cycles_completed_before(0) {
71 71 guarantee(target_pause_time_ms > 0.0,
72 72 err_msg("target_pause_time_ms = %1.6lf should be positive",
73 73 target_pause_time_ms));
74 74 _gc_cause = gc_cause;
75 75 }
76 76
77 77 bool VM_G1IncCollectionPause::doit_prologue() {
78 78 bool res = VM_GC_Operation::doit_prologue();
79 79 if (!res) {
80 80 if (_should_initiate_conc_mark) {
81 81 // The prologue can fail for a couple of reasons. The first is that another GC
82 82 // got scheduled and prevented the scheduling of the initial mark GC. The
83 83 // second is that the GC locker may be active and the heap can't be expanded.
84 84 // In both cases we want to retry the GC so that the initial mark pause is
85 85 // actually scheduled. In the second case, however, we should stall until
86 86 // until the GC locker is no longer active and then retry the initial mark GC.
87 87 _should_retry_gc = true;
88 88 }
89 89 }
90 90 return res;
91 91 }
92 92
93 93 void VM_G1IncCollectionPause::doit() {
94 94 G1CollectedHeap* g1h = G1CollectedHeap::heap();
95 95 assert(!_should_initiate_conc_mark ||
96 96 ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
97 97 (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
98 98 _gc_cause == GCCause::_g1_humongous_allocation ||
99 99 _gc_cause == GCCause::_update_allocation_context_stats_inc),
100 100 "only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
101 101
102 102 if (_word_size > 0) {
103 103 // An allocation has been requested. So, try to do that first.
104 104 _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
105 105 false /* expect_null_cur_alloc_region */);
106 106 if (_result != NULL) {
107 107 // If we can successfully allocate before we actually do the
108 108 // pause then we will consider this pause successful.
109 109 _pause_succeeded = true;
110 110 return;
111 111 }
112 112 }
113 113
114 114 GCCauseSetter x(g1h, _gc_cause);
115 115 if (_should_initiate_conc_mark) {
116 116 // It's safer to read old_marking_cycles_completed() here, given
117 117 // that noone else will be updating it concurrently. Since we'll
118 118 // only need it if we're initiating a marking cycle, no point in
119 119 // setting it earlier.
120 120 _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed();
121 121
122 122 // At this point we are supposed to start a concurrent cycle. We
123 123 // will do so if one is not already in progress.
124 124 bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
125 125
126 126 // The above routine returns true if we were able to force the
127 127 // next GC pause to be an initial mark; it returns false if a
128 128 // marking cycle is already in progress.
129 129 //
130 130 // If a marking cycle is already in progress just return and skip the
131 131 // pause below - if the reason for requesting this initial mark pause
132 132 // was due to a System.gc() then the requesting thread should block in
133 133 // doit_epilogue() until the marking cycle is complete.
134 134 //
135 135 // If this initial mark pause was requested as part of a humongous
136 136 // allocation then we know that the marking cycle must just have
137 137 // been started by another thread (possibly also allocating a humongous
138 138 // object) as there was no active marking cycle when the requesting
139 139 // thread checked before calling collect() in
140 140 // attempt_allocation_humongous(). Retrying the GC, in this case,
141 141 // will cause the requesting thread to spin inside collect() until the
142 142 // just started marking cycle is complete - which may be a while. So
143 143 // we do NOT retry the GC.
144 144 if (!res) {
145 145 assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
146 146 if (_gc_cause != GCCause::_g1_humongous_allocation) {
147 147 _should_retry_gc = true;
148 148 }
149 149 return;
150 150 }
151 151 }
152 152
153 153 _pause_succeeded =
154 154 g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
155 155 if (_pause_succeeded && _word_size > 0) {
156 156 // An allocation had been requested.
157 157 _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
158 158 true /* expect_null_cur_alloc_region */);
159 159 } else {
160 160 assert(_result == NULL, "invariant");
161 161 if (!_pause_succeeded) {
162 162 // Another possible reason reason for the pause to not be successful
163 163 // is that, again, the GC locker is active (and has become active
164 164 // since the prologue was executed). In this case we should retry
165 165 // the pause after waiting for the GC locker to become inactive.
166 166 _should_retry_gc = true;
167 167 }
168 168 }
169 169 }
170 170
171 171 void VM_G1IncCollectionPause::doit_epilogue() {
172 172 VM_GC_Operation::doit_epilogue();
173 173
174 174 // If the pause was initiated by a System.gc() and
175 175 // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
176 176 // that just started (or maybe one that was already in progress) to
177 177 // finish.
178 178 if (_gc_cause == GCCause::_java_lang_system_gc &&
179 179 _should_initiate_conc_mark) {
180 180 assert(ExplicitGCInvokesConcurrent,
181 181 "the only way to be here is if ExplicitGCInvokesConcurrent is set");
182 182
183 183 G1CollectedHeap* g1h = G1CollectedHeap::heap();
184 184
185 185 // In the doit() method we saved g1h->old_marking_cycles_completed()
186 186 // in the _old_marking_cycles_completed_before field. We have to
187 187 // wait until we observe that g1h->old_marking_cycles_completed()
188 188 // has increased by at least one. This can happen if a) we started
189 189 // a cycle and it completes, b) a cycle already in progress
190 190 // completes, or c) a Full GC happens.
191 191
192 192 // If the condition has already been reached, there's no point in
193 193 // actually taking the lock and doing the wait.
194 194 if (g1h->old_marking_cycles_completed() <=
195 195 _old_marking_cycles_completed_before) {
196 196 // The following is largely copied from CMS
197 197
198 198 Thread* thr = Thread::current();
199 199 assert(thr->is_Java_thread(), "invariant");
200 200 JavaThread* jt = (JavaThread*)thr;
201 201 ThreadToNativeFromVM native(jt);
202 202
203 203 MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
204 204 while (g1h->old_marking_cycles_completed() <=
205 205 _old_marking_cycles_completed_before) {
↓ open down ↓ |
205 lines elided |
↑ open up ↑ |
206 206 FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
207 207 }
208 208 }
209 209 }
210 210 }
211 211
212 212 void VM_CGC_Operation::acquire_pending_list_lock() {
213 213 assert(_needs_pll, "don't call this otherwise");
214 214 // The caller may block while communicating
215 215 // with the SLT thread in order to acquire/release the PLL.
216 - ConcurrentMarkThread::slt()->
217 - manipulatePLL(SurrogateLockerThread::acquirePLL);
216 + SurrogateLockerThread* slt = ConcurrentMarkThread::slt();
217 + if (slt != NULL) {
218 + slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
219 + } else {
220 + SurrogateLockerThread::report_missing_slt();
221 + }
218 222 }
219 223
220 224 void VM_CGC_Operation::release_and_notify_pending_list_lock() {
221 225 assert(_needs_pll, "don't call this otherwise");
222 226 // The caller may block while communicating
223 227 // with the SLT thread in order to acquire/release the PLL.
224 228 ConcurrentMarkThread::slt()->
225 229 manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
226 230 }
227 231
228 232 void VM_CGC_Operation::doit() {
229 233 gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
230 234 TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
231 235 GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
232 236 SharedHeap* sh = SharedHeap::heap();
233 237 // This could go away if CollectedHeap gave access to _gc_is_active...
234 238 if (sh != NULL) {
235 239 IsGCActiveMark x;
236 240 _cl->do_void();
237 241 } else {
238 242 _cl->do_void();
239 243 }
240 244 }
241 245
242 246 bool VM_CGC_Operation::doit_prologue() {
243 247 // Note the relative order of the locks must match that in
244 248 // VM_GC_Operation::doit_prologue() or deadlocks can occur
245 249 if (_needs_pll) {
246 250 acquire_pending_list_lock();
247 251 }
248 252
249 253 Heap_lock->lock();
250 254 SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
251 255 return true;
252 256 }
253 257
254 258 void VM_CGC_Operation::doit_epilogue() {
255 259 // Note the relative order of the unlocks must match that in
256 260 // VM_GC_Operation::doit_epilogue()
257 261 SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
258 262 Heap_lock->unlock();
259 263 if (_needs_pll) {
260 264 release_and_notify_pending_list_lock();
261 265 }
262 266 }
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX