Print this page
rev 7125 : 7176220: 'Full GC' events miss date stamp information occasionally
Summary: Move date stamp logic into GCTraceTime
Reviewed-by: brutisso, tschatzl
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
+++ new/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/classLoaderData.hpp"
27 27 #include "classfile/symbolTable.hpp"
28 28 #include "classfile/systemDictionary.hpp"
29 29 #include "code/codeCache.hpp"
30 30 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
31 31 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
32 32 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
33 33 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
34 34 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
35 35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
36 36 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
37 37 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
38 38 #include "gc_implementation/parNew/parNewGeneration.hpp"
39 39 #include "gc_implementation/shared/collectorCounters.hpp"
40 40 #include "gc_implementation/shared/gcTimer.hpp"
41 41 #include "gc_implementation/shared/gcTrace.hpp"
42 42 #include "gc_implementation/shared/gcTraceTime.hpp"
43 43 #include "gc_implementation/shared/isGCActiveMark.hpp"
44 44 #include "gc_interface/collectedHeap.inline.hpp"
45 45 #include "memory/allocation.hpp"
46 46 #include "memory/cardTableRS.hpp"
47 47 #include "memory/collectorPolicy.hpp"
48 48 #include "memory/gcLocker.inline.hpp"
49 49 #include "memory/genCollectedHeap.hpp"
50 50 #include "memory/genMarkSweep.hpp"
51 51 #include "memory/genOopClosures.inline.hpp"
52 52 #include "memory/iterator.inline.hpp"
53 53 #include "memory/padded.hpp"
54 54 #include "memory/referencePolicy.hpp"
55 55 #include "memory/resourceArea.hpp"
56 56 #include "memory/tenuredGeneration.hpp"
57 57 #include "oops/oop.inline.hpp"
58 58 #include "prims/jvmtiExport.hpp"
59 59 #include "runtime/globals_extension.hpp"
60 60 #include "runtime/handles.inline.hpp"
61 61 #include "runtime/java.hpp"
62 62 #include "runtime/orderAccess.inline.hpp"
63 63 #include "runtime/vmThread.hpp"
64 64 #include "services/memoryService.hpp"
65 65 #include "services/runtimeService.hpp"
66 66
67 67 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
68 68
69 69 // statics
70 70 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
71 71 bool CMSCollector::_full_gc_requested = false;
72 72 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
73 73
74 74 //////////////////////////////////////////////////////////////////
75 75 // In support of CMS/VM thread synchronization
76 76 //////////////////////////////////////////////////////////////////
77 77 // We split use of the CGC_lock into 2 "levels".
78 78 // The low-level locking is of the usual CGC_lock monitor. We introduce
79 79 // a higher level "token" (hereafter "CMS token") built on top of the
80 80 // low level monitor (hereafter "CGC lock").
81 81 // The token-passing protocol gives priority to the VM thread. The
82 82 // CMS-lock doesn't provide any fairness guarantees, but clients
83 83 // should ensure that it is only held for very short, bounded
84 84 // durations.
85 85 //
86 86 // When either of the CMS thread or the VM thread is involved in
87 87 // collection operations during which it does not want the other
88 88 // thread to interfere, it obtains the CMS token.
89 89 //
90 90 // If either thread tries to get the token while the other has
91 91 // it, that thread waits. However, if the VM thread and CMS thread
92 92 // both want the token, then the VM thread gets priority while the
93 93 // CMS thread waits. This ensures, for instance, that the "concurrent"
94 94 // phases of the CMS thread's work do not block out the VM thread
95 95 // for long periods of time as the CMS thread continues to hog
96 96 // the token. (See bug 4616232).
97 97 //
98 98 // The baton-passing functions are, however, controlled by the
99 99 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
100 100 // and here the low-level CMS lock, not the high level token,
101 101 // ensures mutual exclusion.
102 102 //
103 103 // Two important conditions that we have to satisfy:
104 104 // 1. if a thread does a low-level wait on the CMS lock, then it
105 105 // relinquishes the CMS token if it were holding that token
106 106 // when it acquired the low-level CMS lock.
107 107 // 2. any low-level notifications on the low-level lock
108 108 // should only be sent when a thread has relinquished the token.
109 109 //
110 110 // In the absence of either property, we'd have potential deadlock.
111 111 //
112 112 // We protect each of the CMS (concurrent and sequential) phases
113 113 // with the CMS _token_, not the CMS _lock_.
114 114 //
115 115 // The only code protected by CMS lock is the token acquisition code
116 116 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
117 117 // baton-passing code.
118 118 //
119 119 // Unfortunately, i couldn't come up with a good abstraction to factor and
120 120 // hide the naked CGC_lock manipulation in the baton-passing code
121 121 // further below. That's something we should try to do. Also, the proof
122 122 // of correctness of this 2-level locking scheme is far from obvious,
123 123 // and potentially quite slippery. We have an uneasy supsicion, for instance,
124 124 // that there may be a theoretical possibility of delay/starvation in the
125 125 // low-level lock/wait/notify scheme used for the baton-passing because of
126 126 // potential intereference with the priority scheme embodied in the
127 127 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
128 128 // invocation further below and marked with "XXX 20011219YSR".
129 129 // Indeed, as we note elsewhere, this may become yet more slippery
130 130 // in the presence of multiple CMS and/or multiple VM threads. XXX
131 131
132 132 class CMSTokenSync: public StackObj {
133 133 private:
134 134 bool _is_cms_thread;
135 135 public:
136 136 CMSTokenSync(bool is_cms_thread):
137 137 _is_cms_thread(is_cms_thread) {
138 138 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
139 139 "Incorrect argument to constructor");
140 140 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
141 141 }
142 142
143 143 ~CMSTokenSync() {
144 144 assert(_is_cms_thread ?
145 145 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
146 146 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
147 147 "Incorrect state");
148 148 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
149 149 }
150 150 };
151 151
152 152 // Convenience class that does a CMSTokenSync, and then acquires
153 153 // upto three locks.
154 154 class CMSTokenSyncWithLocks: public CMSTokenSync {
155 155 private:
156 156 // Note: locks are acquired in textual declaration order
157 157 // and released in the opposite order
158 158 MutexLockerEx _locker1, _locker2, _locker3;
159 159 public:
160 160 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
161 161 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
162 162 CMSTokenSync(is_cms_thread),
163 163 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
164 164 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
165 165 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
166 166 { }
167 167 };
168 168
169 169
170 170 // Wrapper class to temporarily disable icms during a foreground cms collection.
171 171 class ICMSDisabler: public StackObj {
172 172 public:
173 173 // The ctor disables icms and wakes up the thread so it notices the change;
174 174 // the dtor re-enables icms. Note that the CMSCollector methods will check
175 175 // CMSIncrementalMode.
176 176 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
177 177 ~ICMSDisabler() { CMSCollector::enable_icms(); }
178 178 };
179 179
180 180 //////////////////////////////////////////////////////////////////
181 181 // Concurrent Mark-Sweep Generation /////////////////////////////
182 182 //////////////////////////////////////////////////////////////////
183 183
184 184 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
185 185
186 186 // This struct contains per-thread things necessary to support parallel
187 187 // young-gen collection.
188 188 class CMSParGCThreadState: public CHeapObj<mtGC> {
189 189 public:
190 190 CFLS_LAB lab;
191 191 PromotionInfo promo;
192 192
193 193 // Constructor.
194 194 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
195 195 promo.setSpace(cfls);
196 196 }
197 197 };
198 198
199 199 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
200 200 ReservedSpace rs, size_t initial_byte_size, int level,
201 201 CardTableRS* ct, bool use_adaptive_freelists,
202 202 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
203 203 CardGeneration(rs, initial_byte_size, level, ct),
204 204 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
205 205 _debug_collection_type(Concurrent_collection_type),
206 206 _did_compact(false)
207 207 {
208 208 HeapWord* bottom = (HeapWord*) _virtual_space.low();
209 209 HeapWord* end = (HeapWord*) _virtual_space.high();
210 210
211 211 _direct_allocated_words = 0;
212 212 NOT_PRODUCT(
213 213 _numObjectsPromoted = 0;
214 214 _numWordsPromoted = 0;
215 215 _numObjectsAllocated = 0;
216 216 _numWordsAllocated = 0;
217 217 )
218 218
219 219 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
220 220 use_adaptive_freelists,
221 221 dictionaryChoice);
222 222 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
223 223 if (_cmsSpace == NULL) {
224 224 vm_exit_during_initialization(
225 225 "CompactibleFreeListSpace allocation failure");
226 226 }
227 227 _cmsSpace->_gen = this;
228 228
229 229 _gc_stats = new CMSGCStats();
230 230
231 231 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
232 232 // offsets match. The ability to tell free chunks from objects
233 233 // depends on this property.
234 234 debug_only(
235 235 FreeChunk* junk = NULL;
236 236 assert(UseCompressedClassPointers ||
237 237 junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
238 238 "Offset of FreeChunk::_prev within FreeChunk must match"
239 239 " that of OopDesc::_klass within OopDesc");
240 240 )
241 241 if (CollectedHeap::use_parallel_gc_threads()) {
242 242 typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
243 243 _par_gc_thread_states =
244 244 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
245 245 if (_par_gc_thread_states == NULL) {
246 246 vm_exit_during_initialization("Could not allocate par gc structs");
247 247 }
248 248 for (uint i = 0; i < ParallelGCThreads; i++) {
249 249 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
250 250 if (_par_gc_thread_states[i] == NULL) {
251 251 vm_exit_during_initialization("Could not allocate par gc structs");
252 252 }
253 253 }
254 254 } else {
255 255 _par_gc_thread_states = NULL;
256 256 }
257 257 _incremental_collection_failed = false;
258 258 // The "dilatation_factor" is the expansion that can occur on
259 259 // account of the fact that the minimum object size in the CMS
260 260 // generation may be larger than that in, say, a contiguous young
261 261 // generation.
262 262 // Ideally, in the calculation below, we'd compute the dilatation
263 263 // factor as: MinChunkSize/(promoting_gen's min object size)
264 264 // Since we do not have such a general query interface for the
265 265 // promoting generation, we'll instead just use the mimimum
266 266 // object size (which today is a header's worth of space);
267 267 // note that all arithmetic is in units of HeapWords.
268 268 assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
269 269 assert(_dilatation_factor >= 1.0, "from previous assert");
270 270 }
271 271
272 272
273 273 // The field "_initiating_occupancy" represents the occupancy percentage
274 274 // at which we trigger a new collection cycle. Unless explicitly specified
275 275 // via CMSInitiatingOccupancyFraction (argument "io" below), it
276 276 // is calculated by:
277 277 //
278 278 // Let "f" be MinHeapFreeRatio in
279 279 //
280 280 // _intiating_occupancy = 100-f +
281 281 // f * (CMSTriggerRatio/100)
282 282 // where CMSTriggerRatio is the argument "tr" below.
283 283 //
284 284 // That is, if we assume the heap is at its desired maximum occupancy at the
285 285 // end of a collection, we let CMSTriggerRatio of the (purported) free
286 286 // space be allocated before initiating a new collection cycle.
287 287 //
288 288 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
289 289 assert(io <= 100 && tr <= 100, "Check the arguments");
290 290 if (io >= 0) {
291 291 _initiating_occupancy = (double)io / 100.0;
292 292 } else {
293 293 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
294 294 (double)(tr * MinHeapFreeRatio) / 100.0)
295 295 / 100.0;
296 296 }
297 297 }
298 298
299 299 void ConcurrentMarkSweepGeneration::ref_processor_init() {
300 300 assert(collector() != NULL, "no collector");
301 301 collector()->ref_processor_init();
302 302 }
303 303
304 304 void CMSCollector::ref_processor_init() {
305 305 if (_ref_processor == NULL) {
306 306 // Allocate and initialize a reference processor
307 307 _ref_processor =
308 308 new ReferenceProcessor(_span, // span
309 309 (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
310 310 (int) ParallelGCThreads, // mt processing degree
311 311 _cmsGen->refs_discovery_is_mt(), // mt discovery
312 312 (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
313 313 _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
314 314 &_is_alive_closure); // closure for liveness info
315 315 // Initialize the _ref_processor field of CMSGen
316 316 _cmsGen->set_ref_processor(_ref_processor);
317 317
318 318 }
319 319 }
320 320
321 321 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
322 322 GenCollectedHeap* gch = GenCollectedHeap::heap();
323 323 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
324 324 "Wrong type of heap");
325 325 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
326 326 gch->gen_policy()->size_policy();
327 327 assert(sp->is_gc_cms_adaptive_size_policy(),
328 328 "Wrong type of size policy");
329 329 return sp;
330 330 }
331 331
332 332 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
333 333 CMSGCAdaptivePolicyCounters* results =
334 334 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
335 335 assert(
336 336 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
337 337 "Wrong gc policy counter kind");
338 338 return results;
339 339 }
340 340
341 341
342 342 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
343 343
344 344 const char* gen_name = "old";
345 345
346 346 // Generation Counters - generation 1, 1 subspace
347 347 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
348 348
349 349 _space_counters = new GSpaceCounters(gen_name, 0,
350 350 _virtual_space.reserved_size(),
351 351 this, _gen_counters);
352 352 }
353 353
354 354 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
355 355 _cms_gen(cms_gen)
356 356 {
357 357 assert(alpha <= 100, "bad value");
358 358 _saved_alpha = alpha;
359 359
360 360 // Initialize the alphas to the bootstrap value of 100.
361 361 _gc0_alpha = _cms_alpha = 100;
362 362
363 363 _cms_begin_time.update();
364 364 _cms_end_time.update();
365 365
366 366 _gc0_duration = 0.0;
367 367 _gc0_period = 0.0;
368 368 _gc0_promoted = 0;
369 369
370 370 _cms_duration = 0.0;
371 371 _cms_period = 0.0;
372 372 _cms_allocated = 0;
373 373
374 374 _cms_used_at_gc0_begin = 0;
375 375 _cms_used_at_gc0_end = 0;
376 376 _allow_duty_cycle_reduction = false;
377 377 _valid_bits = 0;
378 378 _icms_duty_cycle = CMSIncrementalDutyCycle;
379 379 }
380 380
381 381 double CMSStats::cms_free_adjustment_factor(size_t free) const {
382 382 // TBD: CR 6909490
383 383 return 1.0;
384 384 }
385 385
386 386 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
387 387 }
388 388
389 389 // If promotion failure handling is on use
390 390 // the padded average size of the promotion for each
391 391 // young generation collection.
392 392 double CMSStats::time_until_cms_gen_full() const {
393 393 size_t cms_free = _cms_gen->cmsSpace()->free();
394 394 GenCollectedHeap* gch = GenCollectedHeap::heap();
395 395 size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
396 396 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
397 397 if (cms_free > expected_promotion) {
398 398 // Start a cms collection if there isn't enough space to promote
399 399 // for the next minor collection. Use the padded average as
400 400 // a safety factor.
401 401 cms_free -= expected_promotion;
402 402
403 403 // Adjust by the safety factor.
404 404 double cms_free_dbl = (double)cms_free;
405 405 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
406 406 // Apply a further correction factor which tries to adjust
407 407 // for recent occurance of concurrent mode failures.
408 408 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
409 409 cms_free_dbl = cms_free_dbl * cms_adjustment;
410 410
411 411 if (PrintGCDetails && Verbose) {
412 412 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
413 413 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
414 414 cms_free, expected_promotion);
415 415 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
416 416 cms_free_dbl, cms_consumption_rate() + 1.0);
417 417 }
418 418 // Add 1 in case the consumption rate goes to zero.
419 419 return cms_free_dbl / (cms_consumption_rate() + 1.0);
420 420 }
421 421 return 0.0;
422 422 }
423 423
424 424 // Compare the duration of the cms collection to the
425 425 // time remaining before the cms generation is empty.
426 426 // Note that the time from the start of the cms collection
427 427 // to the start of the cms sweep (less than the total
428 428 // duration of the cms collection) can be used. This
429 429 // has been tried and some applications experienced
430 430 // promotion failures early in execution. This was
431 431 // possibly because the averages were not accurate
432 432 // enough at the beginning.
433 433 double CMSStats::time_until_cms_start() const {
434 434 // We add "gc0_period" to the "work" calculation
435 435 // below because this query is done (mostly) at the
436 436 // end of a scavenge, so we need to conservatively
437 437 // account for that much possible delay
438 438 // in the query so as to avoid concurrent mode failures
439 439 // due to starting the collection just a wee bit too
440 440 // late.
441 441 double work = cms_duration() + gc0_period();
442 442 double deadline = time_until_cms_gen_full();
443 443 // If a concurrent mode failure occurred recently, we want to be
444 444 // more conservative and halve our expected time_until_cms_gen_full()
445 445 if (work > deadline) {
446 446 if (Verbose && PrintGCDetails) {
447 447 gclog_or_tty->print(
448 448 " CMSCollector: collect because of anticipated promotion "
449 449 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
450 450 gc0_period(), time_until_cms_gen_full());
451 451 }
452 452 return 0.0;
453 453 }
454 454 return work - deadline;
455 455 }
456 456
457 457 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
458 458 // amount of change to prevent wild oscillation.
459 459 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
460 460 unsigned int new_duty_cycle) {
461 461 assert(old_duty_cycle <= 100, "bad input value");
462 462 assert(new_duty_cycle <= 100, "bad input value");
463 463
464 464 // Note: use subtraction with caution since it may underflow (values are
465 465 // unsigned). Addition is safe since we're in the range 0-100.
466 466 unsigned int damped_duty_cycle = new_duty_cycle;
467 467 if (new_duty_cycle < old_duty_cycle) {
468 468 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
469 469 if (new_duty_cycle + largest_delta < old_duty_cycle) {
470 470 damped_duty_cycle = old_duty_cycle - largest_delta;
471 471 }
472 472 } else if (new_duty_cycle > old_duty_cycle) {
473 473 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
474 474 if (new_duty_cycle > old_duty_cycle + largest_delta) {
475 475 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
476 476 }
477 477 }
478 478 assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
479 479
480 480 if (CMSTraceIncrementalPacing) {
481 481 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
482 482 old_duty_cycle, new_duty_cycle, damped_duty_cycle);
483 483 }
484 484 return damped_duty_cycle;
485 485 }
486 486
487 487 unsigned int CMSStats::icms_update_duty_cycle_impl() {
488 488 assert(CMSIncrementalPacing && valid(),
489 489 "should be handled in icms_update_duty_cycle()");
490 490
491 491 double cms_time_so_far = cms_timer().seconds();
492 492 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
493 493 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
494 494
495 495 // Avoid division by 0.
496 496 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
497 497 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
498 498
499 499 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
500 500 if (new_duty_cycle > _icms_duty_cycle) {
501 501 // Avoid very small duty cycles (1 or 2); 0 is allowed.
502 502 if (new_duty_cycle > 2) {
503 503 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
504 504 new_duty_cycle);
505 505 }
506 506 } else if (_allow_duty_cycle_reduction) {
507 507 // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
508 508 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
509 509 // Respect the minimum duty cycle.
510 510 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
511 511 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
512 512 }
513 513
514 514 if (PrintGCDetails || CMSTraceIncrementalPacing) {
515 515 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
516 516 }
517 517
518 518 _allow_duty_cycle_reduction = false;
519 519 return _icms_duty_cycle;
520 520 }
521 521
522 522 #ifndef PRODUCT
523 523 void CMSStats::print_on(outputStream *st) const {
524 524 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
525 525 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
526 526 gc0_duration(), gc0_period(), gc0_promoted());
527 527 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
528 528 cms_duration(), cms_duration_per_mb(),
529 529 cms_period(), cms_allocated());
530 530 st->print(",cms_since_beg=%g,cms_since_end=%g",
531 531 cms_time_since_begin(), cms_time_since_end());
532 532 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
533 533 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
534 534 if (CMSIncrementalMode) {
535 535 st->print(",dc=%d", icms_duty_cycle());
536 536 }
537 537
538 538 if (valid()) {
539 539 st->print(",promo_rate=%g,cms_alloc_rate=%g",
540 540 promotion_rate(), cms_allocation_rate());
541 541 st->print(",cms_consumption_rate=%g,time_until_full=%g",
542 542 cms_consumption_rate(), time_until_cms_gen_full());
543 543 }
544 544 st->print(" ");
545 545 }
546 546 #endif // #ifndef PRODUCT
547 547
548 548 CMSCollector::CollectorState CMSCollector::_collectorState =
549 549 CMSCollector::Idling;
550 550 bool CMSCollector::_foregroundGCIsActive = false;
551 551 bool CMSCollector::_foregroundGCShouldWait = false;
552 552
553 553 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
554 554 CardTableRS* ct,
555 555 ConcurrentMarkSweepPolicy* cp):
556 556 _cmsGen(cmsGen),
557 557 _ct(ct),
558 558 _ref_processor(NULL), // will be set later
559 559 _conc_workers(NULL), // may be set later
560 560 _abort_preclean(false),
561 561 _start_sampling(false),
562 562 _between_prologue_and_epilogue(false),
563 563 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
564 564 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
565 565 -1 /* lock-free */, "No_lock" /* dummy */),
566 566 _modUnionClosure(&_modUnionTable),
567 567 _modUnionClosurePar(&_modUnionTable),
568 568 // Adjust my span to cover old (cms) gen
569 569 _span(cmsGen->reserved()),
570 570 // Construct the is_alive_closure with _span & markBitMap
571 571 _is_alive_closure(_span, &_markBitMap),
572 572 _restart_addr(NULL),
573 573 _overflow_list(NULL),
574 574 _stats(cmsGen),
575 575 _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
576 576 _eden_chunk_array(NULL), // may be set in ctor body
577 577 _eden_chunk_capacity(0), // -- ditto --
578 578 _eden_chunk_index(0), // -- ditto --
579 579 _survivor_plab_array(NULL), // -- ditto --
580 580 _survivor_chunk_array(NULL), // -- ditto --
581 581 _survivor_chunk_capacity(0), // -- ditto --
582 582 _survivor_chunk_index(0), // -- ditto --
583 583 _ser_pmc_preclean_ovflw(0),
584 584 _ser_kac_preclean_ovflw(0),
585 585 _ser_pmc_remark_ovflw(0),
586 586 _par_pmc_remark_ovflw(0),
587 587 _ser_kac_ovflw(0),
588 588 _par_kac_ovflw(0),
589 589 #ifndef PRODUCT
590 590 _num_par_pushes(0),
591 591 #endif
592 592 _collection_count_start(0),
593 593 _verifying(false),
594 594 _icms_start_limit(NULL),
595 595 _icms_stop_limit(NULL),
596 596 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
597 597 _completed_initialization(false),
598 598 _collector_policy(cp),
599 599 _should_unload_classes(CMSClassUnloadingEnabled),
600 600 _concurrent_cycles_since_last_unload(0),
601 601 _roots_scanning_options(SharedHeap::SO_None),
602 602 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
603 603 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
604 604 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
605 605 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
606 606 _cms_start_registered(false)
607 607 {
608 608 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
609 609 ExplicitGCInvokesConcurrent = true;
610 610 }
611 611 // Now expand the span and allocate the collection support structures
612 612 // (MUT, marking bit map etc.) to cover both generations subject to
613 613 // collection.
614 614
615 615 // For use by dirty card to oop closures.
616 616 _cmsGen->cmsSpace()->set_collector(this);
617 617
618 618 // Allocate MUT and marking bit map
619 619 {
620 620 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
621 621 if (!_markBitMap.allocate(_span)) {
622 622 warning("Failed to allocate CMS Bit Map");
623 623 return;
624 624 }
625 625 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
626 626 }
627 627 {
628 628 _modUnionTable.allocate(_span);
629 629 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
630 630 }
631 631
632 632 if (!_markStack.allocate(MarkStackSize)) {
633 633 warning("Failed to allocate CMS Marking Stack");
634 634 return;
635 635 }
636 636
637 637 // Support for multi-threaded concurrent phases
638 638 if (CMSConcurrentMTEnabled) {
639 639 if (FLAG_IS_DEFAULT(ConcGCThreads)) {
640 640 // just for now
641 641 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
642 642 }
643 643 if (ConcGCThreads > 1) {
644 644 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
645 645 ConcGCThreads, true);
646 646 if (_conc_workers == NULL) {
647 647 warning("GC/CMS: _conc_workers allocation failure: "
648 648 "forcing -CMSConcurrentMTEnabled");
649 649 CMSConcurrentMTEnabled = false;
650 650 } else {
651 651 _conc_workers->initialize_workers();
652 652 }
653 653 } else {
654 654 CMSConcurrentMTEnabled = false;
655 655 }
656 656 }
657 657 if (!CMSConcurrentMTEnabled) {
658 658 ConcGCThreads = 0;
659 659 } else {
660 660 // Turn off CMSCleanOnEnter optimization temporarily for
661 661 // the MT case where it's not fixed yet; see 6178663.
662 662 CMSCleanOnEnter = false;
663 663 }
664 664 assert((_conc_workers != NULL) == (ConcGCThreads > 1),
665 665 "Inconsistency");
666 666
667 667 // Parallel task queues; these are shared for the
668 668 // concurrent and stop-world phases of CMS, but
669 669 // are not shared with parallel scavenge (ParNew).
670 670 {
671 671 uint i;
672 672 uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
673 673
674 674 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
675 675 || ParallelRefProcEnabled)
676 676 && num_queues > 0) {
677 677 _task_queues = new OopTaskQueueSet(num_queues);
678 678 if (_task_queues == NULL) {
679 679 warning("task_queues allocation failure.");
680 680 return;
681 681 }
682 682 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
683 683 if (_hash_seed == NULL) {
684 684 warning("_hash_seed array allocation failure");
685 685 return;
686 686 }
687 687
688 688 typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
689 689 for (i = 0; i < num_queues; i++) {
690 690 PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
691 691 if (q == NULL) {
692 692 warning("work_queue allocation failure.");
693 693 return;
694 694 }
695 695 _task_queues->register_queue(i, q);
696 696 }
697 697 for (i = 0; i < num_queues; i++) {
698 698 _task_queues->queue(i)->initialize();
699 699 _hash_seed[i] = 17; // copied from ParNew
700 700 }
701 701 }
702 702 }
703 703
704 704 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
705 705
706 706 // Clip CMSBootstrapOccupancy between 0 and 100.
707 707 _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
708 708
709 709 _full_gcs_since_conc_gc = 0;
710 710
711 711 // Now tell CMS generations the identity of their collector
712 712 ConcurrentMarkSweepGeneration::set_collector(this);
713 713
714 714 // Create & start a CMS thread for this CMS collector
715 715 _cmsThread = ConcurrentMarkSweepThread::start(this);
716 716 assert(cmsThread() != NULL, "CMS Thread should have been created");
717 717 assert(cmsThread()->collector() == this,
718 718 "CMS Thread should refer to this gen");
719 719 assert(CGC_lock != NULL, "Where's the CGC_lock?");
720 720
721 721 // Support for parallelizing young gen rescan
722 722 GenCollectedHeap* gch = GenCollectedHeap::heap();
723 723 _young_gen = gch->prev_gen(_cmsGen);
724 724 if (gch->supports_inline_contig_alloc()) {
725 725 _top_addr = gch->top_addr();
726 726 _end_addr = gch->end_addr();
727 727 assert(_young_gen != NULL, "no _young_gen");
728 728 _eden_chunk_index = 0;
729 729 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
730 730 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
731 731 if (_eden_chunk_array == NULL) {
732 732 _eden_chunk_capacity = 0;
733 733 warning("GC/CMS: _eden_chunk_array allocation failure");
734 734 }
735 735 }
736 736 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
737 737
738 738 // Support for parallelizing survivor space rescan
739 739 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
740 740 const size_t max_plab_samples =
741 741 ((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size();
742 742
743 743 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
744 744 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
745 745 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
746 746 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
747 747 || _cursor == NULL) {
748 748 warning("Failed to allocate survivor plab/chunk array");
749 749 if (_survivor_plab_array != NULL) {
750 750 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
751 751 _survivor_plab_array = NULL;
752 752 }
753 753 if (_survivor_chunk_array != NULL) {
754 754 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
755 755 _survivor_chunk_array = NULL;
756 756 }
757 757 if (_cursor != NULL) {
758 758 FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
759 759 _cursor = NULL;
760 760 }
761 761 } else {
762 762 _survivor_chunk_capacity = 2*max_plab_samples;
763 763 for (uint i = 0; i < ParallelGCThreads; i++) {
764 764 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
765 765 if (vec == NULL) {
766 766 warning("Failed to allocate survivor plab array");
767 767 for (int j = i; j > 0; j--) {
768 768 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
769 769 }
770 770 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
771 771 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
772 772 _survivor_plab_array = NULL;
773 773 _survivor_chunk_array = NULL;
774 774 _survivor_chunk_capacity = 0;
775 775 break;
776 776 } else {
777 777 ChunkArray* cur =
778 778 ::new (&_survivor_plab_array[i]) ChunkArray(vec,
779 779 max_plab_samples);
780 780 assert(cur->end() == 0, "Should be 0");
781 781 assert(cur->array() == vec, "Should be vec");
782 782 assert(cur->capacity() == max_plab_samples, "Error");
783 783 }
784 784 }
785 785 }
786 786 }
787 787 assert( ( _survivor_plab_array != NULL
788 788 && _survivor_chunk_array != NULL)
789 789 || ( _survivor_chunk_capacity == 0
790 790 && _survivor_chunk_index == 0),
791 791 "Error");
792 792
793 793 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
794 794 _gc_counters = new CollectorCounters("CMS", 1);
795 795 _completed_initialization = true;
796 796 _inter_sweep_timer.start(); // start of time
797 797 }
798 798
799 799 size_t CMSCollector::plab_sample_minimum_size() {
800 800 // The default value of MinTLABSize is 2k, but there is
801 801 // no way to get the default value if the flag has been overridden.
802 802 return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K);
803 803 }
804 804
805 805 const char* ConcurrentMarkSweepGeneration::name() const {
806 806 return "concurrent mark-sweep generation";
807 807 }
808 808 void ConcurrentMarkSweepGeneration::update_counters() {
809 809 if (UsePerfData) {
810 810 _space_counters->update_all();
811 811 _gen_counters->update_all();
812 812 }
813 813 }
814 814
815 815 // this is an optimized version of update_counters(). it takes the
816 816 // used value as a parameter rather than computing it.
817 817 //
818 818 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
819 819 if (UsePerfData) {
820 820 _space_counters->update_used(used);
821 821 _space_counters->update_capacity();
822 822 _gen_counters->update_all();
823 823 }
824 824 }
825 825
826 826 void ConcurrentMarkSweepGeneration::print() const {
827 827 Generation::print();
828 828 cmsSpace()->print();
829 829 }
830 830
831 831 #ifndef PRODUCT
832 832 void ConcurrentMarkSweepGeneration::print_statistics() {
833 833 cmsSpace()->printFLCensus(0);
834 834 }
835 835 #endif
836 836
837 837 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
838 838 GenCollectedHeap* gch = GenCollectedHeap::heap();
839 839 if (PrintGCDetails) {
840 840 if (Verbose) {
841 841 gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
842 842 level(), short_name(), s, used(), capacity());
843 843 } else {
844 844 gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
845 845 level(), short_name(), s, used() / K, capacity() / K);
846 846 }
847 847 }
848 848 if (Verbose) {
849 849 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
850 850 gch->used(), gch->capacity());
851 851 } else {
852 852 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
853 853 gch->used() / K, gch->capacity() / K);
854 854 }
855 855 }
856 856
857 857 size_t
858 858 ConcurrentMarkSweepGeneration::contiguous_available() const {
859 859 // dld proposes an improvement in precision here. If the committed
860 860 // part of the space ends in a free block we should add that to
861 861 // uncommitted size in the calculation below. Will make this
862 862 // change later, staying with the approximation below for the
863 863 // time being. -- ysr.
864 864 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
865 865 }
866 866
867 867 size_t
868 868 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
869 869 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
870 870 }
871 871
872 872 size_t ConcurrentMarkSweepGeneration::max_available() const {
873 873 return free() + _virtual_space.uncommitted_size();
874 874 }
875 875
876 876 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
877 877 size_t available = max_available();
878 878 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
879 879 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
880 880 if (Verbose && PrintGCDetails) {
881 881 gclog_or_tty->print_cr(
882 882 "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
883 883 "max_promo("SIZE_FORMAT")",
884 884 res? "":" not", available, res? ">=":"<",
885 885 av_promo, max_promotion_in_bytes);
886 886 }
887 887 return res;
888 888 }
889 889
890 890 // At a promotion failure dump information on block layout in heap
891 891 // (cms old generation).
892 892 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
893 893 if (CMSDumpAtPromotionFailure) {
894 894 cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
895 895 }
896 896 }
897 897
898 898 CompactibleSpace*
899 899 ConcurrentMarkSweepGeneration::first_compaction_space() const {
900 900 return _cmsSpace;
901 901 }
902 902
903 903 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
904 904 // Clear the promotion information. These pointers can be adjusted
905 905 // along with all the other pointers into the heap but
906 906 // compaction is expected to be a rare event with
907 907 // a heap using cms so don't do it without seeing the need.
908 908 if (CollectedHeap::use_parallel_gc_threads()) {
909 909 for (uint i = 0; i < ParallelGCThreads; i++) {
910 910 _par_gc_thread_states[i]->promo.reset();
911 911 }
912 912 }
913 913 }
914 914
915 915 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
916 916 blk->do_space(_cmsSpace);
917 917 }
918 918
919 919 void ConcurrentMarkSweepGeneration::compute_new_size() {
920 920 assert_locked_or_safepoint(Heap_lock);
921 921
922 922 // If incremental collection failed, we just want to expand
923 923 // to the limit.
924 924 if (incremental_collection_failed()) {
925 925 clear_incremental_collection_failed();
926 926 grow_to_reserved();
927 927 return;
928 928 }
929 929
930 930 // The heap has been compacted but not reset yet.
931 931 // Any metric such as free() or used() will be incorrect.
932 932
933 933 CardGeneration::compute_new_size();
934 934
935 935 // Reset again after a possible resizing
936 936 if (did_compact()) {
937 937 cmsSpace()->reset_after_compaction();
938 938 }
939 939 }
940 940
941 941 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
942 942 assert_locked_or_safepoint(Heap_lock);
943 943
944 944 // If incremental collection failed, we just want to expand
945 945 // to the limit.
946 946 if (incremental_collection_failed()) {
947 947 clear_incremental_collection_failed();
948 948 grow_to_reserved();
949 949 return;
950 950 }
951 951
952 952 double free_percentage = ((double) free()) / capacity();
953 953 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
954 954 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
955 955
956 956 // compute expansion delta needed for reaching desired free percentage
957 957 if (free_percentage < desired_free_percentage) {
958 958 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
959 959 assert(desired_capacity >= capacity(), "invalid expansion size");
960 960 size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
961 961 if (PrintGCDetails && Verbose) {
962 962 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
963 963 gclog_or_tty->print_cr("\nFrom compute_new_size: ");
964 964 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
965 965 gclog_or_tty->print_cr(" Desired free fraction %f",
966 966 desired_free_percentage);
967 967 gclog_or_tty->print_cr(" Maximum free fraction %f",
968 968 maximum_free_percentage);
969 969 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000);
970 970 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
971 971 desired_capacity/1000);
972 972 int prev_level = level() - 1;
973 973 if (prev_level >= 0) {
974 974 size_t prev_size = 0;
975 975 GenCollectedHeap* gch = GenCollectedHeap::heap();
976 976 Generation* prev_gen = gch->_gens[prev_level];
977 977 prev_size = prev_gen->capacity();
978 978 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
979 979 prev_size/1000);
980 980 }
981 981 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
982 982 unsafe_max_alloc_nogc()/1000);
983 983 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
984 984 contiguous_available()/1000);
985 985 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
986 986 expand_bytes);
987 987 }
988 988 // safe if expansion fails
989 989 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
990 990 if (PrintGCDetails && Verbose) {
991 991 gclog_or_tty->print_cr(" Expanded free fraction %f",
992 992 ((double) free()) / capacity());
993 993 }
994 994 } else {
995 995 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
996 996 assert(desired_capacity <= capacity(), "invalid expansion size");
997 997 size_t shrink_bytes = capacity() - desired_capacity;
998 998 // Don't shrink unless the delta is greater than the minimum shrink we want
999 999 if (shrink_bytes >= MinHeapDeltaBytes) {
1000 1000 shrink_free_list_by(shrink_bytes);
1001 1001 }
1002 1002 }
1003 1003 }
1004 1004
1005 1005 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
1006 1006 return cmsSpace()->freelistLock();
1007 1007 }
1008 1008
1009 1009 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
1010 1010 bool tlab) {
1011 1011 CMSSynchronousYieldRequest yr;
1012 1012 MutexLockerEx x(freelistLock(),
1013 1013 Mutex::_no_safepoint_check_flag);
1014 1014 return have_lock_and_allocate(size, tlab);
1015 1015 }
1016 1016
1017 1017 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
1018 1018 bool tlab /* ignored */) {
1019 1019 assert_lock_strong(freelistLock());
1020 1020 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
1021 1021 HeapWord* res = cmsSpace()->allocate(adjustedSize);
1022 1022 // Allocate the object live (grey) if the background collector has
1023 1023 // started marking. This is necessary because the marker may
1024 1024 // have passed this address and consequently this object will
1025 1025 // not otherwise be greyed and would be incorrectly swept up.
1026 1026 // Note that if this object contains references, the writing
1027 1027 // of those references will dirty the card containing this object
1028 1028 // allowing the object to be blackened (and its references scanned)
1029 1029 // either during a preclean phase or at the final checkpoint.
1030 1030 if (res != NULL) {
1031 1031 // We may block here with an uninitialized object with
1032 1032 // its mark-bit or P-bits not yet set. Such objects need
1033 1033 // to be safely navigable by block_start().
1034 1034 assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
1035 1035 assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
1036 1036 collector()->direct_allocated(res, adjustedSize);
1037 1037 _direct_allocated_words += adjustedSize;
1038 1038 // allocation counters
1039 1039 NOT_PRODUCT(
1040 1040 _numObjectsAllocated++;
1041 1041 _numWordsAllocated += (int)adjustedSize;
1042 1042 )
1043 1043 }
1044 1044 return res;
1045 1045 }
1046 1046
1047 1047 // In the case of direct allocation by mutators in a generation that
1048 1048 // is being concurrently collected, the object must be allocated
1049 1049 // live (grey) if the background collector has started marking.
1050 1050 // This is necessary because the marker may
1051 1051 // have passed this address and consequently this object will
1052 1052 // not otherwise be greyed and would be incorrectly swept up.
1053 1053 // Note that if this object contains references, the writing
1054 1054 // of those references will dirty the card containing this object
1055 1055 // allowing the object to be blackened (and its references scanned)
1056 1056 // either during a preclean phase or at the final checkpoint.
1057 1057 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1058 1058 assert(_markBitMap.covers(start, size), "Out of bounds");
1059 1059 if (_collectorState >= Marking) {
1060 1060 MutexLockerEx y(_markBitMap.lock(),
1061 1061 Mutex::_no_safepoint_check_flag);
1062 1062 // [see comments preceding SweepClosure::do_blk() below for details]
1063 1063 //
1064 1064 // Can the P-bits be deleted now? JJJ
1065 1065 //
1066 1066 // 1. need to mark the object as live so it isn't collected
1067 1067 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1068 1068 // 3. need to mark the end of the object so marking, precleaning or sweeping
1069 1069 // can skip over uninitialized or unparsable objects. An allocated
1070 1070 // object is considered uninitialized for our purposes as long as
1071 1071 // its klass word is NULL. All old gen objects are parsable
1072 1072 // as soon as they are initialized.)
1073 1073 _markBitMap.mark(start); // object is live
1074 1074 _markBitMap.mark(start + 1); // object is potentially uninitialized?
1075 1075 _markBitMap.mark(start + size - 1);
1076 1076 // mark end of object
1077 1077 }
1078 1078 // check that oop looks uninitialized
1079 1079 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1080 1080 }
1081 1081
1082 1082 void CMSCollector::promoted(bool par, HeapWord* start,
1083 1083 bool is_obj_array, size_t obj_size) {
1084 1084 assert(_markBitMap.covers(start), "Out of bounds");
1085 1085 // See comment in direct_allocated() about when objects should
1086 1086 // be allocated live.
1087 1087 if (_collectorState >= Marking) {
1088 1088 // we already hold the marking bit map lock, taken in
1089 1089 // the prologue
1090 1090 if (par) {
1091 1091 _markBitMap.par_mark(start);
1092 1092 } else {
1093 1093 _markBitMap.mark(start);
1094 1094 }
1095 1095 // We don't need to mark the object as uninitialized (as
1096 1096 // in direct_allocated above) because this is being done with the
1097 1097 // world stopped and the object will be initialized by the
1098 1098 // time the marking, precleaning or sweeping get to look at it.
1099 1099 // But see the code for copying objects into the CMS generation,
1100 1100 // where we need to ensure that concurrent readers of the
1101 1101 // block offset table are able to safely navigate a block that
1102 1102 // is in flux from being free to being allocated (and in
1103 1103 // transition while being copied into) and subsequently
1104 1104 // becoming a bona-fide object when the copy/promotion is complete.
1105 1105 assert(SafepointSynchronize::is_at_safepoint(),
1106 1106 "expect promotion only at safepoints");
1107 1107
1108 1108 if (_collectorState < Sweeping) {
1109 1109 // Mark the appropriate cards in the modUnionTable, so that
1110 1110 // this object gets scanned before the sweep. If this is
1111 1111 // not done, CMS generation references in the object might
1112 1112 // not get marked.
1113 1113 // For the case of arrays, which are otherwise precisely
1114 1114 // marked, we need to dirty the entire array, not just its head.
1115 1115 if (is_obj_array) {
1116 1116 // The [par_]mark_range() method expects mr.end() below to
1117 1117 // be aligned to the granularity of a bit's representation
1118 1118 // in the heap. In the case of the MUT below, that's a
1119 1119 // card size.
1120 1120 MemRegion mr(start,
1121 1121 (HeapWord*)round_to((intptr_t)(start + obj_size),
1122 1122 CardTableModRefBS::card_size /* bytes */));
1123 1123 if (par) {
1124 1124 _modUnionTable.par_mark_range(mr);
1125 1125 } else {
1126 1126 _modUnionTable.mark_range(mr);
1127 1127 }
1128 1128 } else { // not an obj array; we can just mark the head
1129 1129 if (par) {
1130 1130 _modUnionTable.par_mark(start);
1131 1131 } else {
1132 1132 _modUnionTable.mark(start);
1133 1133 }
1134 1134 }
1135 1135 }
1136 1136 }
1137 1137 }
1138 1138
1139 1139 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1140 1140 {
1141 1141 size_t delta = pointer_delta(addr, space->bottom());
1142 1142 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1143 1143 }
1144 1144
1145 1145 void CMSCollector::icms_update_allocation_limits()
1146 1146 {
1147 1147 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1148 1148 EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1149 1149
1150 1150 const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1151 1151 if (CMSTraceIncrementalPacing) {
1152 1152 stats().print();
1153 1153 }
1154 1154
1155 1155 assert(duty_cycle <= 100, "invalid duty cycle");
1156 1156 if (duty_cycle != 0) {
1157 1157 // The duty_cycle is a percentage between 0 and 100; convert to words and
1158 1158 // then compute the offset from the endpoints of the space.
1159 1159 size_t free_words = eden->free() / HeapWordSize;
1160 1160 double free_words_dbl = (double)free_words;
1161 1161 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1162 1162 size_t offset_words = (free_words - duty_cycle_words) / 2;
1163 1163
1164 1164 _icms_start_limit = eden->top() + offset_words;
1165 1165 _icms_stop_limit = eden->end() - offset_words;
1166 1166
1167 1167 // The limits may be adjusted (shifted to the right) by
1168 1168 // CMSIncrementalOffset, to allow the application more mutator time after a
1169 1169 // young gen gc (when all mutators were stopped) and before CMS starts and
1170 1170 // takes away one or more cpus.
1171 1171 if (CMSIncrementalOffset != 0) {
1172 1172 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1173 1173 size_t adjustment = (size_t)adjustment_dbl;
1174 1174 HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1175 1175 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1176 1176 _icms_start_limit += adjustment;
1177 1177 _icms_stop_limit = tmp_stop;
1178 1178 }
1179 1179 }
1180 1180 }
1181 1181 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1182 1182 _icms_start_limit = _icms_stop_limit = eden->end();
1183 1183 }
1184 1184
1185 1185 // Install the new start limit.
1186 1186 eden->set_soft_end(_icms_start_limit);
1187 1187
1188 1188 if (CMSTraceIncrementalMode) {
1189 1189 gclog_or_tty->print(" icms alloc limits: "
1190 1190 PTR_FORMAT "," PTR_FORMAT
1191 1191 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1192 1192 p2i(_icms_start_limit), p2i(_icms_stop_limit),
1193 1193 percent_of_space(eden, _icms_start_limit),
1194 1194 percent_of_space(eden, _icms_stop_limit));
1195 1195 if (Verbose) {
1196 1196 gclog_or_tty->print("eden: ");
1197 1197 eden->print_on(gclog_or_tty);
1198 1198 }
1199 1199 }
1200 1200 }
1201 1201
1202 1202 // Any changes here should try to maintain the invariant
1203 1203 // that if this method is called with _icms_start_limit
1204 1204 // and _icms_stop_limit both NULL, then it should return NULL
1205 1205 // and not notify the icms thread.
1206 1206 HeapWord*
1207 1207 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1208 1208 size_t word_size)
1209 1209 {
1210 1210 // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1211 1211 // nop.
1212 1212 if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1213 1213 if (top <= _icms_start_limit) {
1214 1214 if (CMSTraceIncrementalMode) {
1215 1215 space->print_on(gclog_or_tty);
1216 1216 gclog_or_tty->stamp();
1217 1217 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1218 1218 ", new limit=" PTR_FORMAT
1219 1219 " (" SIZE_FORMAT "%%)",
1220 1220 p2i(top), p2i(_icms_stop_limit),
1221 1221 percent_of_space(space, _icms_stop_limit));
1222 1222 }
1223 1223 ConcurrentMarkSweepThread::start_icms();
1224 1224 assert(top < _icms_stop_limit, "Tautology");
1225 1225 if (word_size < pointer_delta(_icms_stop_limit, top)) {
1226 1226 return _icms_stop_limit;
1227 1227 }
1228 1228
1229 1229 // The allocation will cross both the _start and _stop limits, so do the
1230 1230 // stop notification also and return end().
1231 1231 if (CMSTraceIncrementalMode) {
1232 1232 space->print_on(gclog_or_tty);
1233 1233 gclog_or_tty->stamp();
1234 1234 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1235 1235 ", new limit=" PTR_FORMAT
1236 1236 " (" SIZE_FORMAT "%%)",
1237 1237 p2i(top), p2i(space->end()),
1238 1238 percent_of_space(space, space->end()));
1239 1239 }
1240 1240 ConcurrentMarkSweepThread::stop_icms();
1241 1241 return space->end();
1242 1242 }
1243 1243
1244 1244 if (top <= _icms_stop_limit) {
1245 1245 if (CMSTraceIncrementalMode) {
1246 1246 space->print_on(gclog_or_tty);
1247 1247 gclog_or_tty->stamp();
1248 1248 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1249 1249 ", new limit=" PTR_FORMAT
1250 1250 " (" SIZE_FORMAT "%%)",
1251 1251 top, space->end(),
1252 1252 percent_of_space(space, space->end()));
1253 1253 }
1254 1254 ConcurrentMarkSweepThread::stop_icms();
1255 1255 return space->end();
1256 1256 }
1257 1257
1258 1258 if (CMSTraceIncrementalMode) {
1259 1259 space->print_on(gclog_or_tty);
1260 1260 gclog_or_tty->stamp();
1261 1261 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1262 1262 ", new limit=" PTR_FORMAT,
1263 1263 top, NULL);
1264 1264 }
1265 1265 }
1266 1266
1267 1267 return NULL;
1268 1268 }
1269 1269
1270 1270 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1271 1271 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1272 1272 // allocate, copy and if necessary update promoinfo --
1273 1273 // delegate to underlying space.
1274 1274 assert_lock_strong(freelistLock());
1275 1275
1276 1276 #ifndef PRODUCT
1277 1277 if (Universe::heap()->promotion_should_fail()) {
1278 1278 return NULL;
1279 1279 }
1280 1280 #endif // #ifndef PRODUCT
1281 1281
1282 1282 oop res = _cmsSpace->promote(obj, obj_size);
1283 1283 if (res == NULL) {
1284 1284 // expand and retry
1285 1285 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
1286 1286 expand(s*HeapWordSize, MinHeapDeltaBytes,
1287 1287 CMSExpansionCause::_satisfy_promotion);
1288 1288 // Since there's currently no next generation, we don't try to promote
1289 1289 // into a more senior generation.
1290 1290 assert(next_gen() == NULL, "assumption, based upon which no attempt "
1291 1291 "is made to pass on a possibly failing "
1292 1292 "promotion to next generation");
1293 1293 res = _cmsSpace->promote(obj, obj_size);
1294 1294 }
1295 1295 if (res != NULL) {
1296 1296 // See comment in allocate() about when objects should
1297 1297 // be allocated live.
1298 1298 assert(obj->is_oop(), "Will dereference klass pointer below");
1299 1299 collector()->promoted(false, // Not parallel
1300 1300 (HeapWord*)res, obj->is_objArray(), obj_size);
1301 1301 // promotion counters
1302 1302 NOT_PRODUCT(
1303 1303 _numObjectsPromoted++;
1304 1304 _numWordsPromoted +=
1305 1305 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1306 1306 )
1307 1307 }
1308 1308 return res;
1309 1309 }
1310 1310
1311 1311
1312 1312 HeapWord*
1313 1313 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1314 1314 HeapWord* top,
1315 1315 size_t word_sz)
1316 1316 {
1317 1317 return collector()->allocation_limit_reached(space, top, word_sz);
1318 1318 }
1319 1319
1320 1320 // IMPORTANT: Notes on object size recognition in CMS.
1321 1321 // ---------------------------------------------------
1322 1322 // A block of storage in the CMS generation is always in
1323 1323 // one of three states. A free block (FREE), an allocated
1324 1324 // object (OBJECT) whose size() method reports the correct size,
1325 1325 // and an intermediate state (TRANSIENT) in which its size cannot
1326 1326 // be accurately determined.
1327 1327 // STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS)
1328 1328 // -----------------------------------------------------
1329 1329 // FREE: klass_word & 1 == 1; mark_word holds block size
1330 1330 //
1331 1331 // OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1332 1332 // obj->size() computes correct size
1333 1333 //
1334 1334 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1335 1335 //
1336 1336 // STATE IDENTIFICATION: (64 bit+COOPS)
1337 1337 // ------------------------------------
1338 1338 // FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1339 1339 //
1340 1340 // OBJECT: klass_word installed; klass_word != 0;
1341 1341 // obj->size() computes correct size
1342 1342 //
1343 1343 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1344 1344 //
1345 1345 //
1346 1346 // STATE TRANSITION DIAGRAM
1347 1347 //
1348 1348 // mut / parnew mut / parnew
1349 1349 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1350 1350 // ^ |
1351 1351 // |------------------------ DEAD <------------------------------------|
1352 1352 // sweep mut
1353 1353 //
1354 1354 // While a block is in TRANSIENT state its size cannot be determined
1355 1355 // so readers will either need to come back later or stall until
1356 1356 // the size can be determined. Note that for the case of direct
1357 1357 // allocation, P-bits, when available, may be used to determine the
1358 1358 // size of an object that may not yet have been initialized.
1359 1359
1360 1360 // Things to support parallel young-gen collection.
1361 1361 oop
1362 1362 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1363 1363 oop old, markOop m,
1364 1364 size_t word_sz) {
1365 1365 #ifndef PRODUCT
1366 1366 if (Universe::heap()->promotion_should_fail()) {
1367 1367 return NULL;
1368 1368 }
1369 1369 #endif // #ifndef PRODUCT
1370 1370
1371 1371 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1372 1372 PromotionInfo* promoInfo = &ps->promo;
1373 1373 // if we are tracking promotions, then first ensure space for
1374 1374 // promotion (including spooling space for saving header if necessary).
1375 1375 // then allocate and copy, then track promoted info if needed.
1376 1376 // When tracking (see PromotionInfo::track()), the mark word may
1377 1377 // be displaced and in this case restoration of the mark word
1378 1378 // occurs in the (oop_since_save_marks_)iterate phase.
1379 1379 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1380 1380 // Out of space for allocating spooling buffers;
1381 1381 // try expanding and allocating spooling buffers.
1382 1382 if (!expand_and_ensure_spooling_space(promoInfo)) {
1383 1383 return NULL;
1384 1384 }
1385 1385 }
1386 1386 assert(promoInfo->has_spooling_space(), "Control point invariant");
1387 1387 const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1388 1388 HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1389 1389 if (obj_ptr == NULL) {
1390 1390 obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1391 1391 if (obj_ptr == NULL) {
1392 1392 return NULL;
1393 1393 }
1394 1394 }
1395 1395 oop obj = oop(obj_ptr);
1396 1396 OrderAccess::storestore();
1397 1397 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1398 1398 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1399 1399 // IMPORTANT: See note on object initialization for CMS above.
1400 1400 // Otherwise, copy the object. Here we must be careful to insert the
1401 1401 // klass pointer last, since this marks the block as an allocated object.
1402 1402 // Except with compressed oops it's the mark word.
1403 1403 HeapWord* old_ptr = (HeapWord*)old;
1404 1404 // Restore the mark word copied above.
1405 1405 obj->set_mark(m);
1406 1406 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1407 1407 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1408 1408 OrderAccess::storestore();
1409 1409
1410 1410 if (UseCompressedClassPointers) {
1411 1411 // Copy gap missed by (aligned) header size calculation below
1412 1412 obj->set_klass_gap(old->klass_gap());
1413 1413 }
1414 1414 if (word_sz > (size_t)oopDesc::header_size()) {
1415 1415 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1416 1416 obj_ptr + oopDesc::header_size(),
1417 1417 word_sz - oopDesc::header_size());
1418 1418 }
1419 1419
1420 1420 // Now we can track the promoted object, if necessary. We take care
1421 1421 // to delay the transition from uninitialized to full object
1422 1422 // (i.e., insertion of klass pointer) until after, so that it
1423 1423 // atomically becomes a promoted object.
1424 1424 if (promoInfo->tracking()) {
1425 1425 promoInfo->track((PromotedObject*)obj, old->klass());
1426 1426 }
1427 1427 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1428 1428 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1429 1429 assert(old->is_oop(), "Will use and dereference old klass ptr below");
1430 1430
1431 1431 // Finally, install the klass pointer (this should be volatile).
1432 1432 OrderAccess::storestore();
1433 1433 obj->set_klass(old->klass());
1434 1434 // We should now be able to calculate the right size for this object
1435 1435 assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1436 1436
1437 1437 collector()->promoted(true, // parallel
1438 1438 obj_ptr, old->is_objArray(), word_sz);
1439 1439
1440 1440 NOT_PRODUCT(
1441 1441 Atomic::inc_ptr(&_numObjectsPromoted);
1442 1442 Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1443 1443 )
1444 1444
1445 1445 return obj;
1446 1446 }
1447 1447
1448 1448 void
1449 1449 ConcurrentMarkSweepGeneration::
1450 1450 par_promote_alloc_undo(int thread_num,
1451 1451 HeapWord* obj, size_t word_sz) {
1452 1452 // CMS does not support promotion undo.
1453 1453 ShouldNotReachHere();
1454 1454 }
1455 1455
1456 1456 void
1457 1457 ConcurrentMarkSweepGeneration::
1458 1458 par_promote_alloc_done(int thread_num) {
1459 1459 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1460 1460 ps->lab.retire(thread_num);
1461 1461 }
1462 1462
1463 1463 void
1464 1464 ConcurrentMarkSweepGeneration::
1465 1465 par_oop_since_save_marks_iterate_done(int thread_num) {
1466 1466 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1467 1467 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1468 1468 ps->promo.promoted_oops_iterate_nv(dummy_cl);
1469 1469 }
1470 1470
1471 1471 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1472 1472 size_t size,
1473 1473 bool tlab)
1474 1474 {
1475 1475 // We allow a STW collection only if a full
1476 1476 // collection was requested.
1477 1477 return full || should_allocate(size, tlab); // FIX ME !!!
1478 1478 // This and promotion failure handling are connected at the
1479 1479 // hip and should be fixed by untying them.
1480 1480 }
1481 1481
1482 1482 bool CMSCollector::shouldConcurrentCollect() {
1483 1483 if (_full_gc_requested) {
1484 1484 if (Verbose && PrintGCDetails) {
1485 1485 gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1486 1486 " gc request (or gc_locker)");
1487 1487 }
1488 1488 return true;
1489 1489 }
1490 1490
1491 1491 // For debugging purposes, change the type of collection.
1492 1492 // If the rotation is not on the concurrent collection
1493 1493 // type, don't start a concurrent collection.
1494 1494 NOT_PRODUCT(
1495 1495 if (RotateCMSCollectionTypes &&
1496 1496 (_cmsGen->debug_collection_type() !=
1497 1497 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1498 1498 assert(_cmsGen->debug_collection_type() !=
1499 1499 ConcurrentMarkSweepGeneration::Unknown_collection_type,
1500 1500 "Bad cms collection type");
1501 1501 return false;
1502 1502 }
1503 1503 )
1504 1504
1505 1505 FreelistLocker x(this);
1506 1506 // ------------------------------------------------------------------
1507 1507 // Print out lots of information which affects the initiation of
1508 1508 // a collection.
1509 1509 if (PrintCMSInitiationStatistics && stats().valid()) {
1510 1510 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1511 1511 gclog_or_tty->stamp();
1512 1512 gclog_or_tty->cr();
1513 1513 stats().print_on(gclog_or_tty);
1514 1514 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1515 1515 stats().time_until_cms_gen_full());
1516 1516 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1517 1517 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1518 1518 _cmsGen->contiguous_available());
1519 1519 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1520 1520 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1521 1521 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1522 1522 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1523 1523 gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1524 1524 gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1525 1525 gclog_or_tty->print_cr("metadata initialized %d",
1526 1526 MetaspaceGC::should_concurrent_collect());
1527 1527 }
1528 1528 // ------------------------------------------------------------------
1529 1529
1530 1530 // If the estimated time to complete a cms collection (cms_duration())
1531 1531 // is less than the estimated time remaining until the cms generation
1532 1532 // is full, start a collection.
1533 1533 if (!UseCMSInitiatingOccupancyOnly) {
1534 1534 if (stats().valid()) {
1535 1535 if (stats().time_until_cms_start() == 0.0) {
1536 1536 return true;
1537 1537 }
1538 1538 } else {
1539 1539 // We want to conservatively collect somewhat early in order
1540 1540 // to try and "bootstrap" our CMS/promotion statistics;
1541 1541 // this branch will not fire after the first successful CMS
1542 1542 // collection because the stats should then be valid.
1543 1543 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1544 1544 if (Verbose && PrintGCDetails) {
1545 1545 gclog_or_tty->print_cr(
1546 1546 " CMSCollector: collect for bootstrapping statistics:"
1547 1547 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1548 1548 _bootstrap_occupancy);
1549 1549 }
1550 1550 return true;
1551 1551 }
1552 1552 }
1553 1553 }
1554 1554
1555 1555 // Otherwise, we start a collection cycle if
1556 1556 // old gen want a collection cycle started. Each may use
1557 1557 // an appropriate criterion for making this decision.
1558 1558 // XXX We need to make sure that the gen expansion
1559 1559 // criterion dovetails well with this. XXX NEED TO FIX THIS
1560 1560 if (_cmsGen->should_concurrent_collect()) {
1561 1561 if (Verbose && PrintGCDetails) {
1562 1562 gclog_or_tty->print_cr("CMS old gen initiated");
1563 1563 }
1564 1564 return true;
1565 1565 }
1566 1566
1567 1567 // We start a collection if we believe an incremental collection may fail;
1568 1568 // this is not likely to be productive in practice because it's probably too
1569 1569 // late anyway.
1570 1570 GenCollectedHeap* gch = GenCollectedHeap::heap();
1571 1571 assert(gch->collector_policy()->is_two_generation_policy(),
1572 1572 "You may want to check the correctness of the following");
1573 1573 if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1574 1574 if (Verbose && PrintGCDetails) {
1575 1575 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1576 1576 }
1577 1577 return true;
1578 1578 }
1579 1579
1580 1580 if (MetaspaceGC::should_concurrent_collect()) {
1581 1581 if (Verbose && PrintGCDetails) {
1582 1582 gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1583 1583 }
1584 1584 return true;
1585 1585 }
1586 1586
1587 1587 // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1588 1588 if (CMSTriggerInterval >= 0) {
1589 1589 if (CMSTriggerInterval == 0) {
1590 1590 // Trigger always
1591 1591 return true;
1592 1592 }
1593 1593
1594 1594 // Check the CMS time since begin (we do not check the stats validity
1595 1595 // as we want to be able to trigger the first CMS cycle as well)
1596 1596 if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1597 1597 if (Verbose && PrintGCDetails) {
1598 1598 if (stats().valid()) {
1599 1599 gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1600 1600 stats().cms_time_since_begin());
1601 1601 } else {
1602 1602 gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
1603 1603 }
1604 1604 }
1605 1605 return true;
1606 1606 }
1607 1607 }
1608 1608
1609 1609 return false;
1610 1610 }
1611 1611
1612 1612 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1613 1613
1614 1614 // Clear _expansion_cause fields of constituent generations
1615 1615 void CMSCollector::clear_expansion_cause() {
1616 1616 _cmsGen->clear_expansion_cause();
1617 1617 }
1618 1618
1619 1619 // We should be conservative in starting a collection cycle. To
1620 1620 // start too eagerly runs the risk of collecting too often in the
1621 1621 // extreme. To collect too rarely falls back on full collections,
1622 1622 // which works, even if not optimum in terms of concurrent work.
1623 1623 // As a work around for too eagerly collecting, use the flag
1624 1624 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1625 1625 // giving the user an easily understandable way of controlling the
1626 1626 // collections.
1627 1627 // We want to start a new collection cycle if any of the following
1628 1628 // conditions hold:
1629 1629 // . our current occupancy exceeds the configured initiating occupancy
1630 1630 // for this generation, or
1631 1631 // . we recently needed to expand this space and have not, since that
1632 1632 // expansion, done a collection of this generation, or
1633 1633 // . the underlying space believes that it may be a good idea to initiate
1634 1634 // a concurrent collection (this may be based on criteria such as the
1635 1635 // following: the space uses linear allocation and linear allocation is
1636 1636 // going to fail, or there is believed to be excessive fragmentation in
1637 1637 // the generation, etc... or ...
1638 1638 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1639 1639 // the case of the old generation; see CR 6543076):
1640 1640 // we may be approaching a point at which allocation requests may fail because
1641 1641 // we will be out of sufficient free space given allocation rate estimates.]
1642 1642 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1643 1643
1644 1644 assert_lock_strong(freelistLock());
1645 1645 if (occupancy() > initiating_occupancy()) {
1646 1646 if (PrintGCDetails && Verbose) {
1647 1647 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1648 1648 short_name(), occupancy(), initiating_occupancy());
1649 1649 }
1650 1650 return true;
1651 1651 }
1652 1652 if (UseCMSInitiatingOccupancyOnly) {
1653 1653 return false;
1654 1654 }
1655 1655 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1656 1656 if (PrintGCDetails && Verbose) {
1657 1657 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1658 1658 short_name());
1659 1659 }
1660 1660 return true;
1661 1661 }
1662 1662 if (_cmsSpace->should_concurrent_collect()) {
1663 1663 if (PrintGCDetails && Verbose) {
1664 1664 gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1665 1665 short_name());
1666 1666 }
1667 1667 return true;
1668 1668 }
1669 1669 return false;
1670 1670 }
1671 1671
1672 1672 void ConcurrentMarkSweepGeneration::collect(bool full,
1673 1673 bool clear_all_soft_refs,
1674 1674 size_t size,
1675 1675 bool tlab)
1676 1676 {
1677 1677 collector()->collect(full, clear_all_soft_refs, size, tlab);
1678 1678 }
1679 1679
1680 1680 void CMSCollector::collect(bool full,
1681 1681 bool clear_all_soft_refs,
1682 1682 size_t size,
1683 1683 bool tlab)
1684 1684 {
1685 1685 if (!UseCMSCollectionPassing && _collectorState > Idling) {
1686 1686 // For debugging purposes skip the collection if the state
1687 1687 // is not currently idle
1688 1688 if (TraceCMSState) {
1689 1689 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1690 1690 Thread::current(), full, _collectorState);
1691 1691 }
1692 1692 return;
1693 1693 }
1694 1694
1695 1695 // The following "if" branch is present for defensive reasons.
1696 1696 // In the current uses of this interface, it can be replaced with:
1697 1697 // assert(!GC_locker.is_active(), "Can't be called otherwise");
1698 1698 // But I am not placing that assert here to allow future
1699 1699 // generality in invoking this interface.
1700 1700 if (GC_locker::is_active()) {
1701 1701 // A consistency test for GC_locker
1702 1702 assert(GC_locker::needs_gc(), "Should have been set already");
1703 1703 // Skip this foreground collection, instead
1704 1704 // expanding the heap if necessary.
1705 1705 // Need the free list locks for the call to free() in compute_new_size()
1706 1706 compute_new_size();
1707 1707 return;
1708 1708 }
1709 1709 acquire_control_and_collect(full, clear_all_soft_refs);
1710 1710 _full_gcs_since_conc_gc++;
1711 1711 }
1712 1712
1713 1713 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1714 1714 GenCollectedHeap* gch = GenCollectedHeap::heap();
1715 1715 unsigned int gc_count = gch->total_full_collections();
1716 1716 if (gc_count == full_gc_count) {
1717 1717 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1718 1718 _full_gc_requested = true;
1719 1719 _full_gc_cause = cause;
1720 1720 CGC_lock->notify(); // nudge CMS thread
1721 1721 } else {
1722 1722 assert(gc_count > full_gc_count, "Error: causal loop");
1723 1723 }
1724 1724 }
1725 1725
1726 1726 bool CMSCollector::is_external_interruption() {
1727 1727 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1728 1728 return GCCause::is_user_requested_gc(cause) ||
1729 1729 GCCause::is_serviceability_requested_gc(cause);
1730 1730 }
1731 1731
1732 1732 void CMSCollector::report_concurrent_mode_interruption() {
1733 1733 if (is_external_interruption()) {
1734 1734 if (PrintGCDetails) {
1735 1735 gclog_or_tty->print(" (concurrent mode interrupted)");
1736 1736 }
1737 1737 } else {
1738 1738 if (PrintGCDetails) {
1739 1739 gclog_or_tty->print(" (concurrent mode failure)");
1740 1740 }
1741 1741 _gc_tracer_cm->report_concurrent_mode_failure();
1742 1742 }
1743 1743 }
1744 1744
1745 1745
1746 1746 // The foreground and background collectors need to coordinate in order
1747 1747 // to make sure that they do not mutually interfere with CMS collections.
1748 1748 // When a background collection is active,
1749 1749 // the foreground collector may need to take over (preempt) and
1750 1750 // synchronously complete an ongoing collection. Depending on the
1751 1751 // frequency of the background collections and the heap usage
1752 1752 // of the application, this preemption can be seldom or frequent.
1753 1753 // There are only certain
1754 1754 // points in the background collection that the "collection-baton"
1755 1755 // can be passed to the foreground collector.
1756 1756 //
1757 1757 // The foreground collector will wait for the baton before
1758 1758 // starting any part of the collection. The foreground collector
1759 1759 // will only wait at one location.
1760 1760 //
1761 1761 // The background collector will yield the baton before starting a new
1762 1762 // phase of the collection (e.g., before initial marking, marking from roots,
1763 1763 // precleaning, final re-mark, sweep etc.) This is normally done at the head
1764 1764 // of the loop which switches the phases. The background collector does some
1765 1765 // of the phases (initial mark, final re-mark) with the world stopped.
1766 1766 // Because of locking involved in stopping the world,
1767 1767 // the foreground collector should not block waiting for the background
1768 1768 // collector when it is doing a stop-the-world phase. The background
1769 1769 // collector will yield the baton at an additional point just before
1770 1770 // it enters a stop-the-world phase. Once the world is stopped, the
1771 1771 // background collector checks the phase of the collection. If the
1772 1772 // phase has not changed, it proceeds with the collection. If the
1773 1773 // phase has changed, it skips that phase of the collection. See
1774 1774 // the comments on the use of the Heap_lock in collect_in_background().
1775 1775 //
1776 1776 // Variable used in baton passing.
1777 1777 // _foregroundGCIsActive - Set to true by the foreground collector when
1778 1778 // it wants the baton. The foreground clears it when it has finished
1779 1779 // the collection.
1780 1780 // _foregroundGCShouldWait - Set to true by the background collector
1781 1781 // when it is running. The foreground collector waits while
1782 1782 // _foregroundGCShouldWait is true.
1783 1783 // CGC_lock - monitor used to protect access to the above variables
1784 1784 // and to notify the foreground and background collectors.
1785 1785 // _collectorState - current state of the CMS collection.
1786 1786 //
1787 1787 // The foreground collector
1788 1788 // acquires the CGC_lock
1789 1789 // sets _foregroundGCIsActive
1790 1790 // waits on the CGC_lock for _foregroundGCShouldWait to be false
1791 1791 // various locks acquired in preparation for the collection
1792 1792 // are released so as not to block the background collector
1793 1793 // that is in the midst of a collection
1794 1794 // proceeds with the collection
1795 1795 // clears _foregroundGCIsActive
1796 1796 // returns
1797 1797 //
1798 1798 // The background collector in a loop iterating on the phases of the
1799 1799 // collection
1800 1800 // acquires the CGC_lock
1801 1801 // sets _foregroundGCShouldWait
1802 1802 // if _foregroundGCIsActive is set
1803 1803 // clears _foregroundGCShouldWait, notifies _CGC_lock
1804 1804 // waits on _CGC_lock for _foregroundGCIsActive to become false
1805 1805 // and exits the loop.
1806 1806 // otherwise
1807 1807 // proceed with that phase of the collection
1808 1808 // if the phase is a stop-the-world phase,
1809 1809 // yield the baton once more just before enqueueing
1810 1810 // the stop-world CMS operation (executed by the VM thread).
1811 1811 // returns after all phases of the collection are done
1812 1812 //
1813 1813
1814 1814 void CMSCollector::acquire_control_and_collect(bool full,
1815 1815 bool clear_all_soft_refs) {
1816 1816 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1817 1817 assert(!Thread::current()->is_ConcurrentGC_thread(),
1818 1818 "shouldn't try to acquire control from self!");
1819 1819
1820 1820 // Start the protocol for acquiring control of the
1821 1821 // collection from the background collector (aka CMS thread).
1822 1822 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1823 1823 "VM thread should have CMS token");
1824 1824 // Remember the possibly interrupted state of an ongoing
1825 1825 // concurrent collection
1826 1826 CollectorState first_state = _collectorState;
1827 1827
1828 1828 // Signal to a possibly ongoing concurrent collection that
1829 1829 // we want to do a foreground collection.
1830 1830 _foregroundGCIsActive = true;
1831 1831
1832 1832 // Disable incremental mode during a foreground collection.
1833 1833 ICMSDisabler icms_disabler;
1834 1834
1835 1835 // release locks and wait for a notify from the background collector
1836 1836 // releasing the locks in only necessary for phases which
1837 1837 // do yields to improve the granularity of the collection.
1838 1838 assert_lock_strong(bitMapLock());
1839 1839 // We need to lock the Free list lock for the space that we are
1840 1840 // currently collecting.
1841 1841 assert(haveFreelistLocks(), "Must be holding free list locks");
1842 1842 bitMapLock()->unlock();
1843 1843 releaseFreelistLocks();
1844 1844 {
1845 1845 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1846 1846 if (_foregroundGCShouldWait) {
1847 1847 // We are going to be waiting for action for the CMS thread;
1848 1848 // it had better not be gone (for instance at shutdown)!
1849 1849 assert(ConcurrentMarkSweepThread::cmst() != NULL,
1850 1850 "CMS thread must be running");
1851 1851 // Wait here until the background collector gives us the go-ahead
1852 1852 ConcurrentMarkSweepThread::clear_CMS_flag(
1853 1853 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1854 1854 // Get a possibly blocked CMS thread going:
1855 1855 // Note that we set _foregroundGCIsActive true above,
1856 1856 // without protection of the CGC_lock.
1857 1857 CGC_lock->notify();
1858 1858 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1859 1859 "Possible deadlock");
1860 1860 while (_foregroundGCShouldWait) {
1861 1861 // wait for notification
1862 1862 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1863 1863 // Possibility of delay/starvation here, since CMS token does
1864 1864 // not know to give priority to VM thread? Actually, i think
1865 1865 // there wouldn't be any delay/starvation, but the proof of
1866 1866 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1867 1867 }
1868 1868 ConcurrentMarkSweepThread::set_CMS_flag(
1869 1869 ConcurrentMarkSweepThread::CMS_vm_has_token);
1870 1870 }
1871 1871 }
1872 1872 // The CMS_token is already held. Get back the other locks.
1873 1873 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1874 1874 "VM thread should have CMS token");
1875 1875 getFreelistLocks();
1876 1876 bitMapLock()->lock_without_safepoint_check();
1877 1877 if (TraceCMSState) {
1878 1878 gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1879 1879 INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1880 1880 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1881 1881 }
1882 1882
1883 1883 // Check if we need to do a compaction, or if not, whether
1884 1884 // we need to start the mark-sweep from scratch.
1885 1885 bool should_compact = false;
1886 1886 bool should_start_over = false;
1887 1887 decide_foreground_collection_type(clear_all_soft_refs,
1888 1888 &should_compact, &should_start_over);
1889 1889
1890 1890 NOT_PRODUCT(
1891 1891 if (RotateCMSCollectionTypes) {
1892 1892 if (_cmsGen->debug_collection_type() ==
1893 1893 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1894 1894 should_compact = true;
1895 1895 } else if (_cmsGen->debug_collection_type() ==
1896 1896 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1897 1897 should_compact = false;
1898 1898 }
1899 1899 }
1900 1900 )
1901 1901
1902 1902 if (first_state > Idling) {
1903 1903 report_concurrent_mode_interruption();
1904 1904 }
1905 1905
1906 1906 set_did_compact(should_compact);
1907 1907 if (should_compact) {
1908 1908 // If the collection is being acquired from the background
1909 1909 // collector, there may be references on the discovered
1910 1910 // references lists that have NULL referents (being those
1911 1911 // that were concurrently cleared by a mutator) or
1912 1912 // that are no longer active (having been enqueued concurrently
1913 1913 // by the mutator).
1914 1914 // Scrub the list of those references because Mark-Sweep-Compact
1915 1915 // code assumes referents are not NULL and that all discovered
1916 1916 // Reference objects are active.
1917 1917 ref_processor()->clean_up_discovered_references();
1918 1918
1919 1919 if (first_state > Idling) {
1920 1920 save_heap_summary();
1921 1921 }
1922 1922
1923 1923 do_compaction_work(clear_all_soft_refs);
1924 1924
1925 1925 // Has the GC time limit been exceeded?
1926 1926 DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1927 1927 size_t max_eden_size = young_gen->max_capacity() -
1928 1928 young_gen->to()->capacity() -
1929 1929 young_gen->from()->capacity();
1930 1930 GenCollectedHeap* gch = GenCollectedHeap::heap();
1931 1931 GCCause::Cause gc_cause = gch->gc_cause();
1932 1932 size_policy()->check_gc_overhead_limit(_young_gen->used(),
1933 1933 young_gen->eden()->used(),
1934 1934 _cmsGen->max_capacity(),
1935 1935 max_eden_size,
1936 1936 full,
1937 1937 gc_cause,
1938 1938 gch->collector_policy());
1939 1939 } else {
1940 1940 do_mark_sweep_work(clear_all_soft_refs, first_state,
1941 1941 should_start_over);
1942 1942 }
1943 1943 // Reset the expansion cause, now that we just completed
1944 1944 // a collection cycle.
1945 1945 clear_expansion_cause();
1946 1946 _foregroundGCIsActive = false;
1947 1947 return;
1948 1948 }
1949 1949
1950 1950 // Resize the tenured generation
1951 1951 // after obtaining the free list locks for the
1952 1952 // two generations.
1953 1953 void CMSCollector::compute_new_size() {
1954 1954 assert_locked_or_safepoint(Heap_lock);
1955 1955 FreelistLocker z(this);
1956 1956 MetaspaceGC::compute_new_size();
1957 1957 _cmsGen->compute_new_size_free_list();
1958 1958 }
1959 1959
1960 1960 // A work method used by foreground collection to determine
1961 1961 // what type of collection (compacting or not, continuing or fresh)
1962 1962 // it should do.
1963 1963 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1964 1964 // and CMSCompactWhenClearAllSoftRefs the default in the future
1965 1965 // and do away with the flags after a suitable period.
1966 1966 void CMSCollector::decide_foreground_collection_type(
1967 1967 bool clear_all_soft_refs, bool* should_compact,
1968 1968 bool* should_start_over) {
1969 1969 // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1970 1970 // flag is set, and we have either requested a System.gc() or
1971 1971 // the number of full gc's since the last concurrent cycle
1972 1972 // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1973 1973 // or if an incremental collection has failed
1974 1974 GenCollectedHeap* gch = GenCollectedHeap::heap();
1975 1975 assert(gch->collector_policy()->is_two_generation_policy(),
1976 1976 "You may want to check the correctness of the following");
1977 1977 // Inform cms gen if this was due to partial collection failing.
1978 1978 // The CMS gen may use this fact to determine its expansion policy.
1979 1979 if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1980 1980 assert(!_cmsGen->incremental_collection_failed(),
1981 1981 "Should have been noticed, reacted to and cleared");
1982 1982 _cmsGen->set_incremental_collection_failed();
1983 1983 }
1984 1984 *should_compact =
1985 1985 UseCMSCompactAtFullCollection &&
1986 1986 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1987 1987 GCCause::is_user_requested_gc(gch->gc_cause()) ||
1988 1988 gch->incremental_collection_will_fail(true /* consult_young */));
1989 1989 *should_start_over = false;
1990 1990 if (clear_all_soft_refs && !*should_compact) {
1991 1991 // We are about to do a last ditch collection attempt
1992 1992 // so it would normally make sense to do a compaction
1993 1993 // to reclaim as much space as possible.
1994 1994 if (CMSCompactWhenClearAllSoftRefs) {
1995 1995 // Default: The rationale is that in this case either
1996 1996 // we are past the final marking phase, in which case
1997 1997 // we'd have to start over, or so little has been done
1998 1998 // that there's little point in saving that work. Compaction
1999 1999 // appears to be the sensible choice in either case.
2000 2000 *should_compact = true;
2001 2001 } else {
2002 2002 // We have been asked to clear all soft refs, but not to
2003 2003 // compact. Make sure that we aren't past the final checkpoint
2004 2004 // phase, for that is where we process soft refs. If we are already
2005 2005 // past that phase, we'll need to redo the refs discovery phase and
2006 2006 // if necessary clear soft refs that weren't previously
2007 2007 // cleared. We do so by remembering the phase in which
2008 2008 // we came in, and if we are past the refs processing
2009 2009 // phase, we'll choose to just redo the mark-sweep
2010 2010 // collection from scratch.
2011 2011 if (_collectorState > FinalMarking) {
2012 2012 // We are past the refs processing phase;
2013 2013 // start over and do a fresh synchronous CMS cycle
2014 2014 _collectorState = Resetting; // skip to reset to start new cycle
2015 2015 reset(false /* == !asynch */);
2016 2016 *should_start_over = true;
2017 2017 } // else we can continue a possibly ongoing current cycle
2018 2018 }
2019 2019 }
2020 2020 }
2021 2021
2022 2022 // A work method used by the foreground collector to do
2023 2023 // a mark-sweep-compact.
2024 2024 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
2025 2025 GenCollectedHeap* gch = GenCollectedHeap::heap();
2026 2026
2027 2027 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
2028 2028 gc_timer->register_gc_start();
2029 2029
2030 2030 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
2031 2031 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
2032 2032
2033 2033 GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
2034 2034 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
2035 2035 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
2036 2036 "collections passed to foreground collector", _full_gcs_since_conc_gc);
2037 2037 }
2038 2038
2039 2039 // Sample collection interval time and reset for collection pause.
2040 2040 if (UseAdaptiveSizePolicy) {
2041 2041 size_policy()->msc_collection_begin();
2042 2042 }
2043 2043
2044 2044 // Temporarily widen the span of the weak reference processing to
2045 2045 // the entire heap.
2046 2046 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
2047 2047 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
2048 2048 // Temporarily, clear the "is_alive_non_header" field of the
2049 2049 // reference processor.
2050 2050 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
2051 2051 // Temporarily make reference _processing_ single threaded (non-MT).
2052 2052 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
2053 2053 // Temporarily make refs discovery atomic
2054 2054 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
2055 2055 // Temporarily make reference _discovery_ single threaded (non-MT)
2056 2056 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
2057 2057
2058 2058 ref_processor()->set_enqueuing_is_done(false);
2059 2059 ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
2060 2060 ref_processor()->setup_policy(clear_all_soft_refs);
2061 2061 // If an asynchronous collection finishes, the _modUnionTable is
2062 2062 // all clear. If we are assuming the collection from an asynchronous
2063 2063 // collection, clear the _modUnionTable.
2064 2064 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
2065 2065 "_modUnionTable should be clear if the baton was not passed");
2066 2066 _modUnionTable.clear_all();
2067 2067 assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
2068 2068 "mod union for klasses should be clear if the baton was passed");
2069 2069 _ct->klass_rem_set()->clear_mod_union();
2070 2070
2071 2071 // We must adjust the allocation statistics being maintained
2072 2072 // in the free list space. We do so by reading and clearing
2073 2073 // the sweep timer and updating the block flux rate estimates below.
2074 2074 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
2075 2075 if (_inter_sweep_timer.is_active()) {
2076 2076 _inter_sweep_timer.stop();
2077 2077 // Note that we do not use this sample to update the _inter_sweep_estimate.
2078 2078 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
2079 2079 _inter_sweep_estimate.padded_average(),
2080 2080 _intra_sweep_estimate.padded_average());
2081 2081 }
2082 2082
2083 2083 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
2084 2084 ref_processor(), clear_all_soft_refs);
2085 2085 #ifdef ASSERT
2086 2086 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
2087 2087 size_t free_size = cms_space->free();
2088 2088 assert(free_size ==
2089 2089 pointer_delta(cms_space->end(), cms_space->compaction_top())
2090 2090 * HeapWordSize,
2091 2091 "All the free space should be compacted into one chunk at top");
2092 2092 assert(cms_space->dictionary()->total_chunk_size(
2093 2093 debug_only(cms_space->freelistLock())) == 0 ||
2094 2094 cms_space->totalSizeInIndexedFreeLists() == 0,
2095 2095 "All the free space should be in a single chunk");
2096 2096 size_t num = cms_space->totalCount();
2097 2097 assert((free_size == 0 && num == 0) ||
2098 2098 (free_size > 0 && (num == 1 || num == 2)),
2099 2099 "There should be at most 2 free chunks after compaction");
2100 2100 #endif // ASSERT
2101 2101 _collectorState = Resetting;
2102 2102 assert(_restart_addr == NULL,
2103 2103 "Should have been NULL'd before baton was passed");
2104 2104 reset(false /* == !asynch */);
2105 2105 _cmsGen->reset_after_compaction();
2106 2106 _concurrent_cycles_since_last_unload = 0;
2107 2107
2108 2108 // Clear any data recorded in the PLAB chunk arrays.
2109 2109 if (_survivor_plab_array != NULL) {
2110 2110 reset_survivor_plab_arrays();
2111 2111 }
2112 2112
2113 2113 // Adjust the per-size allocation stats for the next epoch.
2114 2114 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2115 2115 // Restart the "inter sweep timer" for the next epoch.
2116 2116 _inter_sweep_timer.reset();
2117 2117 _inter_sweep_timer.start();
2118 2118
2119 2119 // Sample collection pause time and reset for collection interval.
2120 2120 if (UseAdaptiveSizePolicy) {
2121 2121 size_policy()->msc_collection_end(gch->gc_cause());
2122 2122 }
2123 2123
2124 2124 gc_timer->register_gc_end();
2125 2125
2126 2126 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
2127 2127
2128 2128 // For a mark-sweep-compact, compute_new_size() will be called
2129 2129 // in the heap's do_collection() method.
2130 2130 }
2131 2131
2132 2132 // A work method used by the foreground collector to do
2133 2133 // a mark-sweep, after taking over from a possibly on-going
2134 2134 // concurrent mark-sweep collection.
2135 2135 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2136 2136 CollectorState first_state, bool should_start_over) {
2137 2137 if (PrintGC && Verbose) {
2138 2138 gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2139 2139 "collector with count %d",
2140 2140 _full_gcs_since_conc_gc);
2141 2141 }
2142 2142 switch (_collectorState) {
2143 2143 case Idling:
2144 2144 if (first_state == Idling || should_start_over) {
2145 2145 // The background GC was not active, or should
2146 2146 // restarted from scratch; start the cycle.
2147 2147 _collectorState = InitialMarking;
2148 2148 }
2149 2149 // If first_state was not Idling, then a background GC
2150 2150 // was in progress and has now finished. No need to do it
2151 2151 // again. Leave the state as Idling.
2152 2152 break;
2153 2153 case Precleaning:
2154 2154 // In the foreground case don't do the precleaning since
2155 2155 // it is not done concurrently and there is extra work
2156 2156 // required.
2157 2157 _collectorState = FinalMarking;
2158 2158 }
2159 2159 collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
2160 2160
2161 2161 // For a mark-sweep, compute_new_size() will be called
2162 2162 // in the heap's do_collection() method.
2163 2163 }
2164 2164
2165 2165
2166 2166 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
2167 2167 DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
2168 2168 EdenSpace* eden_space = dng->eden();
2169 2169 ContiguousSpace* from_space = dng->from();
2170 2170 ContiguousSpace* to_space = dng->to();
2171 2171 // Eden
2172 2172 if (_eden_chunk_array != NULL) {
2173 2173 gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2174 2174 eden_space->bottom(), eden_space->top(),
2175 2175 eden_space->end(), eden_space->capacity());
2176 2176 gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
2177 2177 "_eden_chunk_capacity=" SIZE_FORMAT,
2178 2178 _eden_chunk_index, _eden_chunk_capacity);
2179 2179 for (size_t i = 0; i < _eden_chunk_index; i++) {
2180 2180 gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2181 2181 i, _eden_chunk_array[i]);
2182 2182 }
2183 2183 }
2184 2184 // Survivor
2185 2185 if (_survivor_chunk_array != NULL) {
2186 2186 gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
2187 2187 from_space->bottom(), from_space->top(),
2188 2188 from_space->end(), from_space->capacity());
2189 2189 gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
2190 2190 "_survivor_chunk_capacity=" SIZE_FORMAT,
2191 2191 _survivor_chunk_index, _survivor_chunk_capacity);
2192 2192 for (size_t i = 0; i < _survivor_chunk_index; i++) {
2193 2193 gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
2194 2194 i, _survivor_chunk_array[i]);
2195 2195 }
2196 2196 }
2197 2197 }
2198 2198
2199 2199 void CMSCollector::getFreelistLocks() const {
2200 2200 // Get locks for all free lists in all generations that this
2201 2201 // collector is responsible for
2202 2202 _cmsGen->freelistLock()->lock_without_safepoint_check();
2203 2203 }
2204 2204
2205 2205 void CMSCollector::releaseFreelistLocks() const {
2206 2206 // Release locks for all free lists in all generations that this
2207 2207 // collector is responsible for
2208 2208 _cmsGen->freelistLock()->unlock();
2209 2209 }
2210 2210
2211 2211 bool CMSCollector::haveFreelistLocks() const {
2212 2212 // Check locks for all free lists in all generations that this
2213 2213 // collector is responsible for
2214 2214 assert_lock_strong(_cmsGen->freelistLock());
2215 2215 PRODUCT_ONLY(ShouldNotReachHere());
2216 2216 return true;
2217 2217 }
2218 2218
2219 2219 // A utility class that is used by the CMS collector to
2220 2220 // temporarily "release" the foreground collector from its
2221 2221 // usual obligation to wait for the background collector to
2222 2222 // complete an ongoing phase before proceeding.
2223 2223 class ReleaseForegroundGC: public StackObj {
2224 2224 private:
2225 2225 CMSCollector* _c;
2226 2226 public:
2227 2227 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2228 2228 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2229 2229 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2230 2230 // allow a potentially blocked foreground collector to proceed
2231 2231 _c->_foregroundGCShouldWait = false;
2232 2232 if (_c->_foregroundGCIsActive) {
2233 2233 CGC_lock->notify();
2234 2234 }
2235 2235 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2236 2236 "Possible deadlock");
2237 2237 }
2238 2238
2239 2239 ~ReleaseForegroundGC() {
2240 2240 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2241 2241 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2242 2242 _c->_foregroundGCShouldWait = true;
2243 2243 }
2244 2244 };
2245 2245
2246 2246 // There are separate collect_in_background and collect_in_foreground because of
2247 2247 // the different locking requirements of the background collector and the
2248 2248 // foreground collector. There was originally an attempt to share
2249 2249 // one "collect" method between the background collector and the foreground
2250 2250 // collector but the if-then-else required made it cleaner to have
2251 2251 // separate methods.
2252 2252 void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
2253 2253 assert(Thread::current()->is_ConcurrentGC_thread(),
2254 2254 "A CMS asynchronous collection is only allowed on a CMS thread.");
2255 2255
2256 2256 GenCollectedHeap* gch = GenCollectedHeap::heap();
2257 2257 {
2258 2258 bool safepoint_check = Mutex::_no_safepoint_check_flag;
2259 2259 MutexLockerEx hl(Heap_lock, safepoint_check);
2260 2260 FreelistLocker fll(this);
2261 2261 MutexLockerEx x(CGC_lock, safepoint_check);
2262 2262 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2263 2263 // The foreground collector is active or we're
2264 2264 // not using asynchronous collections. Skip this
2265 2265 // background collection.
2266 2266 assert(!_foregroundGCShouldWait, "Should be clear");
2267 2267 return;
2268 2268 } else {
2269 2269 assert(_collectorState == Idling, "Should be idling before start.");
2270 2270 _collectorState = InitialMarking;
2271 2271 register_gc_start(cause);
2272 2272 // Reset the expansion cause, now that we are about to begin
2273 2273 // a new cycle.
2274 2274 clear_expansion_cause();
2275 2275
2276 2276 // Clear the MetaspaceGC flag since a concurrent collection
2277 2277 // is starting but also clear it after the collection.
2278 2278 MetaspaceGC::set_should_concurrent_collect(false);
2279 2279 }
2280 2280 // Decide if we want to enable class unloading as part of the
2281 2281 // ensuing concurrent GC cycle.
2282 2282 update_should_unload_classes();
2283 2283 _full_gc_requested = false; // acks all outstanding full gc requests
2284 2284 _full_gc_cause = GCCause::_no_gc;
2285 2285 // Signal that we are about to start a collection
2286 2286 gch->increment_total_full_collections(); // ... starting a collection cycle
2287 2287 _collection_count_start = gch->total_full_collections();
2288 2288 }
2289 2289
2290 2290 // Used for PrintGC
2291 2291 size_t prev_used;
2292 2292 if (PrintGC && Verbose) {
2293 2293 prev_used = _cmsGen->used(); // XXXPERM
2294 2294 }
2295 2295
2296 2296 // The change of the collection state is normally done at this level;
2297 2297 // the exceptions are phases that are executed while the world is
2298 2298 // stopped. For those phases the change of state is done while the
2299 2299 // world is stopped. For baton passing purposes this allows the
2300 2300 // background collector to finish the phase and change state atomically.
2301 2301 // The foreground collector cannot wait on a phase that is done
2302 2302 // while the world is stopped because the foreground collector already
2303 2303 // has the world stopped and would deadlock.
2304 2304 while (_collectorState != Idling) {
2305 2305 if (TraceCMSState) {
2306 2306 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2307 2307 Thread::current(), _collectorState);
2308 2308 }
2309 2309 // The foreground collector
2310 2310 // holds the Heap_lock throughout its collection.
2311 2311 // holds the CMS token (but not the lock)
2312 2312 // except while it is waiting for the background collector to yield.
2313 2313 //
2314 2314 // The foreground collector should be blocked (not for long)
2315 2315 // if the background collector is about to start a phase
2316 2316 // executed with world stopped. If the background
2317 2317 // collector has already started such a phase, the
2318 2318 // foreground collector is blocked waiting for the
2319 2319 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
2320 2320 // are executed in the VM thread.
2321 2321 //
2322 2322 // The locking order is
2323 2323 // PendingListLock (PLL) -- if applicable (FinalMarking)
2324 2324 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
2325 2325 // CMS token (claimed in
2326 2326 // stop_world_and_do() -->
2327 2327 // safepoint_synchronize() -->
2328 2328 // CMSThread::synchronize())
2329 2329
2330 2330 {
2331 2331 // Check if the FG collector wants us to yield.
2332 2332 CMSTokenSync x(true); // is cms thread
2333 2333 if (waitForForegroundGC()) {
2334 2334 // We yielded to a foreground GC, nothing more to be
2335 2335 // done this round.
2336 2336 assert(_foregroundGCShouldWait == false, "We set it to false in "
2337 2337 "waitForForegroundGC()");
2338 2338 if (TraceCMSState) {
2339 2339 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2340 2340 " exiting collection CMS state %d",
2341 2341 Thread::current(), _collectorState);
2342 2342 }
2343 2343 return;
2344 2344 } else {
2345 2345 // The background collector can run but check to see if the
2346 2346 // foreground collector has done a collection while the
2347 2347 // background collector was waiting to get the CGC_lock
2348 2348 // above. If yes, break so that _foregroundGCShouldWait
2349 2349 // is cleared before returning.
2350 2350 if (_collectorState == Idling) {
2351 2351 break;
2352 2352 }
2353 2353 }
2354 2354 }
2355 2355
2356 2356 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2357 2357 "should be waiting");
2358 2358
2359 2359 switch (_collectorState) {
2360 2360 case InitialMarking:
2361 2361 {
2362 2362 ReleaseForegroundGC x(this);
2363 2363 stats().record_cms_begin();
2364 2364 VM_CMS_Initial_Mark initial_mark_op(this);
2365 2365 VMThread::execute(&initial_mark_op);
2366 2366 }
2367 2367 // The collector state may be any legal state at this point
2368 2368 // since the background collector may have yielded to the
2369 2369 // foreground collector.
2370 2370 break;
2371 2371 case Marking:
2372 2372 // initial marking in checkpointRootsInitialWork has been completed
2373 2373 if (markFromRoots(true)) { // we were successful
2374 2374 assert(_collectorState == Precleaning, "Collector state should "
2375 2375 "have changed");
2376 2376 } else {
2377 2377 assert(_foregroundGCIsActive, "Internal state inconsistency");
2378 2378 }
2379 2379 break;
2380 2380 case Precleaning:
2381 2381 if (UseAdaptiveSizePolicy) {
2382 2382 size_policy()->concurrent_precleaning_begin();
2383 2383 }
2384 2384 // marking from roots in markFromRoots has been completed
2385 2385 preclean();
2386 2386 if (UseAdaptiveSizePolicy) {
2387 2387 size_policy()->concurrent_precleaning_end();
2388 2388 }
2389 2389 assert(_collectorState == AbortablePreclean ||
2390 2390 _collectorState == FinalMarking,
2391 2391 "Collector state should have changed");
2392 2392 break;
2393 2393 case AbortablePreclean:
2394 2394 if (UseAdaptiveSizePolicy) {
2395 2395 size_policy()->concurrent_phases_resume();
2396 2396 }
2397 2397 abortable_preclean();
2398 2398 if (UseAdaptiveSizePolicy) {
2399 2399 size_policy()->concurrent_precleaning_end();
2400 2400 }
2401 2401 assert(_collectorState == FinalMarking, "Collector state should "
2402 2402 "have changed");
2403 2403 break;
2404 2404 case FinalMarking:
2405 2405 {
2406 2406 ReleaseForegroundGC x(this);
2407 2407
2408 2408 VM_CMS_Final_Remark final_remark_op(this);
2409 2409 VMThread::execute(&final_remark_op);
2410 2410 }
2411 2411 assert(_foregroundGCShouldWait, "block post-condition");
2412 2412 break;
2413 2413 case Sweeping:
2414 2414 if (UseAdaptiveSizePolicy) {
2415 2415 size_policy()->concurrent_sweeping_begin();
2416 2416 }
2417 2417 // final marking in checkpointRootsFinal has been completed
2418 2418 sweep(true);
2419 2419 assert(_collectorState == Resizing, "Collector state change "
2420 2420 "to Resizing must be done under the free_list_lock");
2421 2421 _full_gcs_since_conc_gc = 0;
2422 2422
2423 2423 // Stop the timers for adaptive size policy for the concurrent phases
2424 2424 if (UseAdaptiveSizePolicy) {
2425 2425 size_policy()->concurrent_sweeping_end();
2426 2426 size_policy()->concurrent_phases_end(gch->gc_cause(),
2427 2427 gch->prev_gen(_cmsGen)->capacity(),
2428 2428 _cmsGen->free());
2429 2429 }
2430 2430
2431 2431 case Resizing: {
2432 2432 // Sweeping has been completed...
2433 2433 // At this point the background collection has completed.
2434 2434 // Don't move the call to compute_new_size() down
2435 2435 // into code that might be executed if the background
2436 2436 // collection was preempted.
2437 2437 {
2438 2438 ReleaseForegroundGC x(this); // unblock FG collection
2439 2439 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
2440 2440 CMSTokenSync z(true); // not strictly needed.
2441 2441 if (_collectorState == Resizing) {
2442 2442 compute_new_size();
2443 2443 save_heap_summary();
2444 2444 _collectorState = Resetting;
2445 2445 } else {
2446 2446 assert(_collectorState == Idling, "The state should only change"
2447 2447 " because the foreground collector has finished the collection");
2448 2448 }
2449 2449 }
2450 2450 break;
2451 2451 }
2452 2452 case Resetting:
2453 2453 // CMS heap resizing has been completed
2454 2454 reset(true);
2455 2455 assert(_collectorState == Idling, "Collector state should "
2456 2456 "have changed");
2457 2457
2458 2458 MetaspaceGC::set_should_concurrent_collect(false);
2459 2459
2460 2460 stats().record_cms_end();
2461 2461 // Don't move the concurrent_phases_end() and compute_new_size()
2462 2462 // calls to here because a preempted background collection
2463 2463 // has it's state set to "Resetting".
2464 2464 break;
2465 2465 case Idling:
2466 2466 default:
2467 2467 ShouldNotReachHere();
2468 2468 break;
2469 2469 }
2470 2470 if (TraceCMSState) {
2471 2471 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2472 2472 Thread::current(), _collectorState);
2473 2473 }
2474 2474 assert(_foregroundGCShouldWait, "block post-condition");
2475 2475 }
2476 2476
2477 2477 // Should this be in gc_epilogue?
2478 2478 collector_policy()->counters()->update_counters();
2479 2479
2480 2480 {
2481 2481 // Clear _foregroundGCShouldWait and, in the event that the
2482 2482 // foreground collector is waiting, notify it, before
2483 2483 // returning.
2484 2484 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2485 2485 _foregroundGCShouldWait = false;
2486 2486 if (_foregroundGCIsActive) {
2487 2487 CGC_lock->notify();
2488 2488 }
2489 2489 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2490 2490 "Possible deadlock");
2491 2491 }
2492 2492 if (TraceCMSState) {
2493 2493 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2494 2494 " exiting collection CMS state %d",
2495 2495 Thread::current(), _collectorState);
2496 2496 }
2497 2497 if (PrintGC && Verbose) {
2498 2498 _cmsGen->print_heap_change(prev_used);
2499 2499 }
2500 2500 }
2501 2501
2502 2502 void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
2503 2503 if (!_cms_start_registered) {
2504 2504 register_gc_start(cause);
2505 2505 }
2506 2506 }
2507 2507
2508 2508 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2509 2509 _cms_start_registered = true;
2510 2510 _gc_timer_cm->register_gc_start();
2511 2511 _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2512 2512 }
2513 2513
2514 2514 void CMSCollector::register_gc_end() {
2515 2515 if (_cms_start_registered) {
2516 2516 report_heap_summary(GCWhen::AfterGC);
2517 2517
2518 2518 _gc_timer_cm->register_gc_end();
2519 2519 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2520 2520 _cms_start_registered = false;
2521 2521 }
2522 2522 }
2523 2523
2524 2524 void CMSCollector::save_heap_summary() {
2525 2525 GenCollectedHeap* gch = GenCollectedHeap::heap();
2526 2526 _last_heap_summary = gch->create_heap_summary();
2527 2527 _last_metaspace_summary = gch->create_metaspace_summary();
2528 2528 }
2529 2529
2530 2530 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2531 2531 _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
2532 2532 _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
2533 2533 }
2534 2534
2535 2535 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
2536 2536 assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2537 2537 "Foreground collector should be waiting, not executing");
2538 2538 assert(Thread::current()->is_VM_thread(), "A foreground collection"
2539 2539 "may only be done by the VM Thread with the world stopped");
2540 2540 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2541 2541 "VM thread should have CMS token");
2542 2542
2543 2543 // The gc id is created in register_foreground_gc_start if this collection is synchronous
2544 2544 const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
2545 2545 NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2546 2546 true, NULL, gc_id);)
2547 2547 if (UseAdaptiveSizePolicy) {
2548 2548 size_policy()->ms_collection_begin();
2549 2549 }
2550 2550 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2551 2551
2552 2552 HandleMark hm; // Discard invalid handles created during verification
2553 2553
2554 2554 if (VerifyBeforeGC &&
2555 2555 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2556 2556 Universe::verify();
2557 2557 }
2558 2558
2559 2559 // Snapshot the soft reference policy to be used in this collection cycle.
2560 2560 ref_processor()->setup_policy(clear_all_soft_refs);
2561 2561
2562 2562 // Decide if class unloading should be done
2563 2563 update_should_unload_classes();
2564 2564
2565 2565 bool init_mark_was_synchronous = false; // until proven otherwise
2566 2566 while (_collectorState != Idling) {
2567 2567 if (TraceCMSState) {
2568 2568 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2569 2569 Thread::current(), _collectorState);
2570 2570 }
2571 2571 switch (_collectorState) {
2572 2572 case InitialMarking:
2573 2573 register_foreground_gc_start(cause);
2574 2574 init_mark_was_synchronous = true; // fact to be exploited in re-mark
2575 2575 checkpointRootsInitial(false);
2576 2576 assert(_collectorState == Marking, "Collector state should have changed"
2577 2577 " within checkpointRootsInitial()");
2578 2578 break;
2579 2579 case Marking:
2580 2580 // initial marking in checkpointRootsInitialWork has been completed
2581 2581 if (VerifyDuringGC &&
2582 2582 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2583 2583 Universe::verify("Verify before initial mark: ");
2584 2584 }
2585 2585 {
2586 2586 bool res = markFromRoots(false);
2587 2587 assert(res && _collectorState == FinalMarking, "Collector state should "
2588 2588 "have changed");
2589 2589 break;
2590 2590 }
2591 2591 case FinalMarking:
2592 2592 if (VerifyDuringGC &&
2593 2593 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2594 2594 Universe::verify("Verify before re-mark: ");
2595 2595 }
2596 2596 checkpointRootsFinal(false, clear_all_soft_refs,
2597 2597 init_mark_was_synchronous);
2598 2598 assert(_collectorState == Sweeping, "Collector state should not "
2599 2599 "have changed within checkpointRootsFinal()");
2600 2600 break;
2601 2601 case Sweeping:
2602 2602 // final marking in checkpointRootsFinal has been completed
2603 2603 if (VerifyDuringGC &&
2604 2604 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2605 2605 Universe::verify("Verify before sweep: ");
2606 2606 }
2607 2607 sweep(false);
2608 2608 assert(_collectorState == Resizing, "Incorrect state");
2609 2609 break;
2610 2610 case Resizing: {
2611 2611 // Sweeping has been completed; the actual resize in this case
2612 2612 // is done separately; nothing to be done in this state.
2613 2613 _collectorState = Resetting;
2614 2614 break;
2615 2615 }
2616 2616 case Resetting:
2617 2617 // The heap has been resized.
2618 2618 if (VerifyDuringGC &&
2619 2619 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2620 2620 Universe::verify("Verify before reset: ");
2621 2621 }
2622 2622 save_heap_summary();
2623 2623 reset(false);
2624 2624 assert(_collectorState == Idling, "Collector state should "
2625 2625 "have changed");
2626 2626 break;
2627 2627 case Precleaning:
2628 2628 case AbortablePreclean:
2629 2629 // Elide the preclean phase
2630 2630 _collectorState = FinalMarking;
2631 2631 break;
2632 2632 default:
2633 2633 ShouldNotReachHere();
2634 2634 }
2635 2635 if (TraceCMSState) {
2636 2636 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
2637 2637 Thread::current(), _collectorState);
2638 2638 }
2639 2639 }
2640 2640
2641 2641 if (UseAdaptiveSizePolicy) {
2642 2642 GenCollectedHeap* gch = GenCollectedHeap::heap();
2643 2643 size_policy()->ms_collection_end(gch->gc_cause());
2644 2644 }
2645 2645
2646 2646 if (VerifyAfterGC &&
2647 2647 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2648 2648 Universe::verify();
2649 2649 }
2650 2650 if (TraceCMSState) {
2651 2651 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2652 2652 " exiting collection CMS state %d",
2653 2653 Thread::current(), _collectorState);
2654 2654 }
2655 2655 }
2656 2656
2657 2657 bool CMSCollector::waitForForegroundGC() {
2658 2658 bool res = false;
2659 2659 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2660 2660 "CMS thread should have CMS token");
2661 2661 // Block the foreground collector until the
2662 2662 // background collectors decides whether to
2663 2663 // yield.
2664 2664 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2665 2665 _foregroundGCShouldWait = true;
2666 2666 if (_foregroundGCIsActive) {
2667 2667 // The background collector yields to the
2668 2668 // foreground collector and returns a value
2669 2669 // indicating that it has yielded. The foreground
2670 2670 // collector can proceed.
2671 2671 res = true;
2672 2672 _foregroundGCShouldWait = false;
2673 2673 ConcurrentMarkSweepThread::clear_CMS_flag(
2674 2674 ConcurrentMarkSweepThread::CMS_cms_has_token);
2675 2675 ConcurrentMarkSweepThread::set_CMS_flag(
2676 2676 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2677 2677 // Get a possibly blocked foreground thread going
2678 2678 CGC_lock->notify();
2679 2679 if (TraceCMSState) {
2680 2680 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2681 2681 Thread::current(), _collectorState);
2682 2682 }
2683 2683 while (_foregroundGCIsActive) {
2684 2684 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2685 2685 }
2686 2686 ConcurrentMarkSweepThread::set_CMS_flag(
2687 2687 ConcurrentMarkSweepThread::CMS_cms_has_token);
2688 2688 ConcurrentMarkSweepThread::clear_CMS_flag(
2689 2689 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2690 2690 }
2691 2691 if (TraceCMSState) {
2692 2692 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2693 2693 Thread::current(), _collectorState);
2694 2694 }
2695 2695 return res;
2696 2696 }
2697 2697
2698 2698 // Because of the need to lock the free lists and other structures in
2699 2699 // the collector, common to all the generations that the collector is
2700 2700 // collecting, we need the gc_prologues of individual CMS generations
2701 2701 // delegate to their collector. It may have been simpler had the
2702 2702 // current infrastructure allowed one to call a prologue on a
2703 2703 // collector. In the absence of that we have the generation's
2704 2704 // prologue delegate to the collector, which delegates back
2705 2705 // some "local" work to a worker method in the individual generations
2706 2706 // that it's responsible for collecting, while itself doing any
2707 2707 // work common to all generations it's responsible for. A similar
2708 2708 // comment applies to the gc_epilogue()'s.
2709 2709 // The role of the varaible _between_prologue_and_epilogue is to
2710 2710 // enforce the invocation protocol.
2711 2711 void CMSCollector::gc_prologue(bool full) {
2712 2712 // Call gc_prologue_work() for the CMSGen
2713 2713 // we are responsible for.
2714 2714
2715 2715 // The following locking discipline assumes that we are only called
2716 2716 // when the world is stopped.
2717 2717 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2718 2718
2719 2719 // The CMSCollector prologue must call the gc_prologues for the
2720 2720 // "generations" that it's responsible
2721 2721 // for.
2722 2722
2723 2723 assert( Thread::current()->is_VM_thread()
2724 2724 || ( CMSScavengeBeforeRemark
2725 2725 && Thread::current()->is_ConcurrentGC_thread()),
2726 2726 "Incorrect thread type for prologue execution");
2727 2727
2728 2728 if (_between_prologue_and_epilogue) {
2729 2729 // We have already been invoked; this is a gc_prologue delegation
2730 2730 // from yet another CMS generation that we are responsible for, just
2731 2731 // ignore it since all relevant work has already been done.
2732 2732 return;
2733 2733 }
2734 2734
2735 2735 // set a bit saying prologue has been called; cleared in epilogue
2736 2736 _between_prologue_and_epilogue = true;
2737 2737 // Claim locks for common data structures, then call gc_prologue_work()
2738 2738 // for each CMSGen.
2739 2739
2740 2740 getFreelistLocks(); // gets free list locks on constituent spaces
2741 2741 bitMapLock()->lock_without_safepoint_check();
2742 2742
2743 2743 // Should call gc_prologue_work() for all cms gens we are responsible for
2744 2744 bool duringMarking = _collectorState >= Marking
2745 2745 && _collectorState < Sweeping;
2746 2746
2747 2747 // The young collections clear the modified oops state, which tells if
2748 2748 // there are any modified oops in the class. The remark phase also needs
2749 2749 // that information. Tell the young collection to save the union of all
2750 2750 // modified klasses.
2751 2751 if (duringMarking) {
2752 2752 _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2753 2753 }
2754 2754
2755 2755 bool registerClosure = duringMarking;
2756 2756
2757 2757 ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
2758 2758 &_modUnionClosurePar
2759 2759 : &_modUnionClosure;
2760 2760 _cmsGen->gc_prologue_work(full, registerClosure, muc);
2761 2761
2762 2762 if (!full) {
2763 2763 stats().record_gc0_begin();
2764 2764 }
2765 2765 }
2766 2766
2767 2767 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2768 2768
2769 2769 _capacity_at_prologue = capacity();
2770 2770 _used_at_prologue = used();
2771 2771
2772 2772 // Delegate to CMScollector which knows how to coordinate between
2773 2773 // this and any other CMS generations that it is responsible for
2774 2774 // collecting.
2775 2775 collector()->gc_prologue(full);
2776 2776 }
2777 2777
2778 2778 // This is a "private" interface for use by this generation's CMSCollector.
2779 2779 // Not to be called directly by any other entity (for instance,
2780 2780 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2781 2781 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2782 2782 bool registerClosure, ModUnionClosure* modUnionClosure) {
2783 2783 assert(!incremental_collection_failed(), "Shouldn't be set yet");
2784 2784 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2785 2785 "Should be NULL");
2786 2786 if (registerClosure) {
2787 2787 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2788 2788 }
2789 2789 cmsSpace()->gc_prologue();
2790 2790 // Clear stat counters
2791 2791 NOT_PRODUCT(
2792 2792 assert(_numObjectsPromoted == 0, "check");
2793 2793 assert(_numWordsPromoted == 0, "check");
2794 2794 if (Verbose && PrintGC) {
2795 2795 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2796 2796 SIZE_FORMAT" bytes concurrently",
2797 2797 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2798 2798 }
2799 2799 _numObjectsAllocated = 0;
2800 2800 _numWordsAllocated = 0;
2801 2801 )
2802 2802 }
2803 2803
2804 2804 void CMSCollector::gc_epilogue(bool full) {
2805 2805 // The following locking discipline assumes that we are only called
2806 2806 // when the world is stopped.
2807 2807 assert(SafepointSynchronize::is_at_safepoint(),
2808 2808 "world is stopped assumption");
2809 2809
2810 2810 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2811 2811 // if linear allocation blocks need to be appropriately marked to allow the
2812 2812 // the blocks to be parsable. We also check here whether we need to nudge the
2813 2813 // CMS collector thread to start a new cycle (if it's not already active).
2814 2814 assert( Thread::current()->is_VM_thread()
2815 2815 || ( CMSScavengeBeforeRemark
2816 2816 && Thread::current()->is_ConcurrentGC_thread()),
2817 2817 "Incorrect thread type for epilogue execution");
2818 2818
2819 2819 if (!_between_prologue_and_epilogue) {
2820 2820 // We have already been invoked; this is a gc_epilogue delegation
2821 2821 // from yet another CMS generation that we are responsible for, just
2822 2822 // ignore it since all relevant work has already been done.
2823 2823 return;
2824 2824 }
2825 2825 assert(haveFreelistLocks(), "must have freelist locks");
2826 2826 assert_lock_strong(bitMapLock());
2827 2827
2828 2828 _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2829 2829
2830 2830 _cmsGen->gc_epilogue_work(full);
2831 2831
2832 2832 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2833 2833 // in case sampling was not already enabled, enable it
2834 2834 _start_sampling = true;
2835 2835 }
2836 2836 // reset _eden_chunk_array so sampling starts afresh
2837 2837 _eden_chunk_index = 0;
2838 2838
2839 2839 size_t cms_used = _cmsGen->cmsSpace()->used();
2840 2840
2841 2841 // update performance counters - this uses a special version of
2842 2842 // update_counters() that allows the utilization to be passed as a
2843 2843 // parameter, avoiding multiple calls to used().
2844 2844 //
2845 2845 _cmsGen->update_counters(cms_used);
2846 2846
2847 2847 if (CMSIncrementalMode) {
2848 2848 icms_update_allocation_limits();
2849 2849 }
2850 2850
2851 2851 bitMapLock()->unlock();
2852 2852 releaseFreelistLocks();
2853 2853
2854 2854 if (!CleanChunkPoolAsync) {
2855 2855 Chunk::clean_chunk_pool();
2856 2856 }
2857 2857
2858 2858 set_did_compact(false);
2859 2859 _between_prologue_and_epilogue = false; // ready for next cycle
2860 2860 }
2861 2861
2862 2862 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2863 2863 collector()->gc_epilogue(full);
2864 2864
2865 2865 // Also reset promotion tracking in par gc thread states.
2866 2866 if (CollectedHeap::use_parallel_gc_threads()) {
2867 2867 for (uint i = 0; i < ParallelGCThreads; i++) {
2868 2868 _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2869 2869 }
2870 2870 }
2871 2871 }
2872 2872
2873 2873 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2874 2874 assert(!incremental_collection_failed(), "Should have been cleared");
2875 2875 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2876 2876 cmsSpace()->gc_epilogue();
2877 2877 // Print stat counters
2878 2878 NOT_PRODUCT(
2879 2879 assert(_numObjectsAllocated == 0, "check");
2880 2880 assert(_numWordsAllocated == 0, "check");
2881 2881 if (Verbose && PrintGC) {
2882 2882 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2883 2883 SIZE_FORMAT" bytes",
2884 2884 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2885 2885 }
2886 2886 _numObjectsPromoted = 0;
2887 2887 _numWordsPromoted = 0;
2888 2888 )
2889 2889
2890 2890 if (PrintGC && Verbose) {
2891 2891 // Call down the chain in contiguous_available needs the freelistLock
2892 2892 // so print this out before releasing the freeListLock.
2893 2893 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2894 2894 contiguous_available());
2895 2895 }
2896 2896 }
2897 2897
2898 2898 #ifndef PRODUCT
2899 2899 bool CMSCollector::have_cms_token() {
2900 2900 Thread* thr = Thread::current();
2901 2901 if (thr->is_VM_thread()) {
2902 2902 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2903 2903 } else if (thr->is_ConcurrentGC_thread()) {
2904 2904 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2905 2905 } else if (thr->is_GC_task_thread()) {
2906 2906 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2907 2907 ParGCRareEvent_lock->owned_by_self();
2908 2908 }
2909 2909 return false;
2910 2910 }
2911 2911 #endif
2912 2912
2913 2913 // Check reachability of the given heap address in CMS generation,
2914 2914 // treating all other generations as roots.
2915 2915 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2916 2916 // We could "guarantee" below, rather than assert, but i'll
2917 2917 // leave these as "asserts" so that an adventurous debugger
2918 2918 // could try this in the product build provided some subset of
2919 2919 // the conditions were met, provided they were intersted in the
2920 2920 // results and knew that the computation below wouldn't interfere
2921 2921 // with other concurrent computations mutating the structures
2922 2922 // being read or written.
2923 2923 assert(SafepointSynchronize::is_at_safepoint(),
2924 2924 "Else mutations in object graph will make answer suspect");
2925 2925 assert(have_cms_token(), "Should hold cms token");
2926 2926 assert(haveFreelistLocks(), "must hold free list locks");
2927 2927 assert_lock_strong(bitMapLock());
2928 2928
2929 2929 // Clear the marking bit map array before starting, but, just
2930 2930 // for kicks, first report if the given address is already marked
2931 2931 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2932 2932 _markBitMap.isMarked(addr) ? "" : " not");
2933 2933
2934 2934 if (verify_after_remark()) {
2935 2935 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2936 2936 bool result = verification_mark_bm()->isMarked(addr);
2937 2937 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2938 2938 result ? "IS" : "is NOT");
2939 2939 return result;
2940 2940 } else {
2941 2941 gclog_or_tty->print_cr("Could not compute result");
2942 2942 return false;
2943 2943 }
2944 2944 }
2945 2945
2946 2946
2947 2947 void
2948 2948 CMSCollector::print_on_error(outputStream* st) {
2949 2949 CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2950 2950 if (collector != NULL) {
2951 2951 CMSBitMap* bitmap = &collector->_markBitMap;
2952 2952 st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, bitmap);
2953 2953 bitmap->print_on_error(st, " Bits: ");
2954 2954
2955 2955 st->cr();
2956 2956
2957 2957 CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2958 2958 st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, mut_bitmap);
2959 2959 mut_bitmap->print_on_error(st, " Bits: ");
2960 2960 }
2961 2961 }
2962 2962
2963 2963 ////////////////////////////////////////////////////////
2964 2964 // CMS Verification Support
2965 2965 ////////////////////////////////////////////////////////
2966 2966 // Following the remark phase, the following invariant
2967 2967 // should hold -- each object in the CMS heap which is
2968 2968 // marked in markBitMap() should be marked in the verification_mark_bm().
2969 2969
2970 2970 class VerifyMarkedClosure: public BitMapClosure {
2971 2971 CMSBitMap* _marks;
2972 2972 bool _failed;
2973 2973
2974 2974 public:
2975 2975 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2976 2976
2977 2977 bool do_bit(size_t offset) {
2978 2978 HeapWord* addr = _marks->offsetToHeapWord(offset);
2979 2979 if (!_marks->isMarked(addr)) {
2980 2980 oop(addr)->print_on(gclog_or_tty);
2981 2981 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2982 2982 _failed = true;
2983 2983 }
2984 2984 return true;
2985 2985 }
2986 2986
2987 2987 bool failed() { return _failed; }
2988 2988 };
2989 2989
2990 2990 bool CMSCollector::verify_after_remark(bool silent) {
2991 2991 if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2992 2992 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2993 2993 static bool init = false;
2994 2994
2995 2995 assert(SafepointSynchronize::is_at_safepoint(),
2996 2996 "Else mutations in object graph will make answer suspect");
2997 2997 assert(have_cms_token(),
2998 2998 "Else there may be mutual interference in use of "
2999 2999 " verification data structures");
3000 3000 assert(_collectorState > Marking && _collectorState <= Sweeping,
3001 3001 "Else marking info checked here may be obsolete");
3002 3002 assert(haveFreelistLocks(), "must hold free list locks");
3003 3003 assert_lock_strong(bitMapLock());
3004 3004
3005 3005
3006 3006 // Allocate marking bit map if not already allocated
3007 3007 if (!init) { // first time
3008 3008 if (!verification_mark_bm()->allocate(_span)) {
3009 3009 return false;
3010 3010 }
3011 3011 init = true;
3012 3012 }
3013 3013
3014 3014 assert(verification_mark_stack()->isEmpty(), "Should be empty");
3015 3015
3016 3016 // Turn off refs discovery -- so we will be tracing through refs.
3017 3017 // This is as intended, because by this time
3018 3018 // GC must already have cleared any refs that need to be cleared,
3019 3019 // and traced those that need to be marked; moreover,
3020 3020 // the marking done here is not going to intefere in any
3021 3021 // way with the marking information used by GC.
3022 3022 NoRefDiscovery no_discovery(ref_processor());
3023 3023
3024 3024 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3025 3025
3026 3026 // Clear any marks from a previous round
3027 3027 verification_mark_bm()->clear_all();
3028 3028 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
3029 3029 verify_work_stacks_empty();
3030 3030
3031 3031 GenCollectedHeap* gch = GenCollectedHeap::heap();
3032 3032 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3033 3033 // Update the saved marks which may affect the root scans.
3034 3034 gch->save_marks();
3035 3035
3036 3036 if (CMSRemarkVerifyVariant == 1) {
3037 3037 // In this first variant of verification, we complete
3038 3038 // all marking, then check if the new marks-verctor is
3039 3039 // a subset of the CMS marks-vector.
3040 3040 verify_after_remark_work_1();
3041 3041 } else if (CMSRemarkVerifyVariant == 2) {
3042 3042 // In this second variant of verification, we flag an error
3043 3043 // (i.e. an object reachable in the new marks-vector not reachable
3044 3044 // in the CMS marks-vector) immediately, also indicating the
3045 3045 // identify of an object (A) that references the unmarked object (B) --
3046 3046 // presumably, a mutation to A failed to be picked up by preclean/remark?
3047 3047 verify_after_remark_work_2();
3048 3048 } else {
3049 3049 warning("Unrecognized value %d for CMSRemarkVerifyVariant",
3050 3050 CMSRemarkVerifyVariant);
3051 3051 }
3052 3052 if (!silent) gclog_or_tty->print(" done] ");
3053 3053 return true;
3054 3054 }
3055 3055
3056 3056 void CMSCollector::verify_after_remark_work_1() {
3057 3057 ResourceMark rm;
3058 3058 HandleMark hm;
3059 3059 GenCollectedHeap* gch = GenCollectedHeap::heap();
3060 3060
3061 3061 // Get a clear set of claim bits for the roots processing to work with.
3062 3062 ClassLoaderDataGraph::clear_claimed_marks();
3063 3063
3064 3064 // Mark from roots one level into CMS
3065 3065 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
3066 3066 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3067 3067
3068 3068 gch->gen_process_roots(_cmsGen->level(),
3069 3069 true, // younger gens are roots
3070 3070 true, // activate StrongRootsScope
3071 3071 SharedHeap::ScanningOption(roots_scanning_options()),
3072 3072 should_unload_classes(),
3073 3073 ¬Older,
3074 3074 NULL,
3075 3075 NULL); // SSS: Provide correct closure
3076 3076
3077 3077 // Now mark from the roots
3078 3078 MarkFromRootsClosure markFromRootsClosure(this, _span,
3079 3079 verification_mark_bm(), verification_mark_stack(),
3080 3080 false /* don't yield */, true /* verifying */);
3081 3081 assert(_restart_addr == NULL, "Expected pre-condition");
3082 3082 verification_mark_bm()->iterate(&markFromRootsClosure);
3083 3083 while (_restart_addr != NULL) {
3084 3084 // Deal with stack overflow: by restarting at the indicated
3085 3085 // address.
3086 3086 HeapWord* ra = _restart_addr;
3087 3087 markFromRootsClosure.reset(ra);
3088 3088 _restart_addr = NULL;
3089 3089 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3090 3090 }
3091 3091 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3092 3092 verify_work_stacks_empty();
3093 3093
3094 3094 // Marking completed -- now verify that each bit marked in
3095 3095 // verification_mark_bm() is also marked in markBitMap(); flag all
3096 3096 // errors by printing corresponding objects.
3097 3097 VerifyMarkedClosure vcl(markBitMap());
3098 3098 verification_mark_bm()->iterate(&vcl);
3099 3099 if (vcl.failed()) {
3100 3100 gclog_or_tty->print("Verification failed");
3101 3101 Universe::heap()->print_on(gclog_or_tty);
3102 3102 fatal("CMS: failed marking verification after remark");
3103 3103 }
3104 3104 }
3105 3105
3106 3106 class VerifyKlassOopsKlassClosure : public KlassClosure {
3107 3107 class VerifyKlassOopsClosure : public OopClosure {
3108 3108 CMSBitMap* _bitmap;
3109 3109 public:
3110 3110 VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
3111 3111 void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
3112 3112 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3113 3113 } _oop_closure;
3114 3114 public:
3115 3115 VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
3116 3116 void do_klass(Klass* k) {
3117 3117 k->oops_do(&_oop_closure);
3118 3118 }
3119 3119 };
3120 3120
3121 3121 void CMSCollector::verify_after_remark_work_2() {
3122 3122 ResourceMark rm;
3123 3123 HandleMark hm;
3124 3124 GenCollectedHeap* gch = GenCollectedHeap::heap();
3125 3125
3126 3126 // Get a clear set of claim bits for the roots processing to work with.
3127 3127 ClassLoaderDataGraph::clear_claimed_marks();
3128 3128
3129 3129 // Mark from roots one level into CMS
3130 3130 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
3131 3131 markBitMap());
3132 3132 CLDToOopClosure cld_closure(¬Older, true);
3133 3133
3134 3134 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3135 3135
3136 3136 gch->gen_process_roots(_cmsGen->level(),
3137 3137 true, // younger gens are roots
3138 3138 true, // activate StrongRootsScope
3139 3139 SharedHeap::ScanningOption(roots_scanning_options()),
3140 3140 should_unload_classes(),
3141 3141 ¬Older,
3142 3142 NULL,
3143 3143 &cld_closure);
3144 3144
3145 3145 // Now mark from the roots
3146 3146 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
3147 3147 verification_mark_bm(), markBitMap(), verification_mark_stack());
3148 3148 assert(_restart_addr == NULL, "Expected pre-condition");
3149 3149 verification_mark_bm()->iterate(&markFromRootsClosure);
3150 3150 while (_restart_addr != NULL) {
3151 3151 // Deal with stack overflow: by restarting at the indicated
3152 3152 // address.
3153 3153 HeapWord* ra = _restart_addr;
3154 3154 markFromRootsClosure.reset(ra);
3155 3155 _restart_addr = NULL;
3156 3156 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
3157 3157 }
3158 3158 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
3159 3159 verify_work_stacks_empty();
3160 3160
3161 3161 VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
3162 3162 ClassLoaderDataGraph::classes_do(&verify_klass_oops);
3163 3163
3164 3164 // Marking completed -- now verify that each bit marked in
3165 3165 // verification_mark_bm() is also marked in markBitMap(); flag all
3166 3166 // errors by printing corresponding objects.
3167 3167 VerifyMarkedClosure vcl(markBitMap());
3168 3168 verification_mark_bm()->iterate(&vcl);
3169 3169 assert(!vcl.failed(), "Else verification above should not have succeeded");
3170 3170 }
3171 3171
3172 3172 void ConcurrentMarkSweepGeneration::save_marks() {
3173 3173 // delegate to CMS space
3174 3174 cmsSpace()->save_marks();
3175 3175 for (uint i = 0; i < ParallelGCThreads; i++) {
3176 3176 _par_gc_thread_states[i]->promo.startTrackingPromotions();
3177 3177 }
3178 3178 }
3179 3179
3180 3180 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
3181 3181 return cmsSpace()->no_allocs_since_save_marks();
3182 3182 }
3183 3183
3184 3184 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
3185 3185 \
3186 3186 void ConcurrentMarkSweepGeneration:: \
3187 3187 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
3188 3188 cl->set_generation(this); \
3189 3189 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
3190 3190 cl->reset_generation(); \
3191 3191 save_marks(); \
3192 3192 }
3193 3193
3194 3194 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
3195 3195
3196 3196 void
3197 3197 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
3198 3198 cl->set_generation(this);
3199 3199 younger_refs_in_space_iterate(_cmsSpace, cl);
3200 3200 cl->reset_generation();
3201 3201 }
3202 3202
3203 3203 void
3204 3204 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
3205 3205 if (freelistLock()->owned_by_self()) {
3206 3206 Generation::oop_iterate(cl);
3207 3207 } else {
3208 3208 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3209 3209 Generation::oop_iterate(cl);
3210 3210 }
3211 3211 }
3212 3212
3213 3213 void
3214 3214 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3215 3215 if (freelistLock()->owned_by_self()) {
3216 3216 Generation::object_iterate(cl);
3217 3217 } else {
3218 3218 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3219 3219 Generation::object_iterate(cl);
3220 3220 }
3221 3221 }
3222 3222
3223 3223 void
3224 3224 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3225 3225 if (freelistLock()->owned_by_self()) {
3226 3226 Generation::safe_object_iterate(cl);
3227 3227 } else {
3228 3228 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3229 3229 Generation::safe_object_iterate(cl);
3230 3230 }
3231 3231 }
3232 3232
3233 3233 void
3234 3234 ConcurrentMarkSweepGeneration::post_compact() {
3235 3235 }
3236 3236
3237 3237 void
3238 3238 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3239 3239 // Fix the linear allocation blocks to look like free blocks.
3240 3240
3241 3241 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3242 3242 // are not called when the heap is verified during universe initialization and
3243 3243 // at vm shutdown.
3244 3244 if (freelistLock()->owned_by_self()) {
3245 3245 cmsSpace()->prepare_for_verify();
3246 3246 } else {
3247 3247 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3248 3248 cmsSpace()->prepare_for_verify();
3249 3249 }
3250 3250 }
3251 3251
3252 3252 void
3253 3253 ConcurrentMarkSweepGeneration::verify() {
3254 3254 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3255 3255 // are not called when the heap is verified during universe initialization and
3256 3256 // at vm shutdown.
3257 3257 if (freelistLock()->owned_by_self()) {
3258 3258 cmsSpace()->verify();
3259 3259 } else {
3260 3260 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3261 3261 cmsSpace()->verify();
3262 3262 }
3263 3263 }
3264 3264
3265 3265 void CMSCollector::verify() {
3266 3266 _cmsGen->verify();
3267 3267 }
3268 3268
3269 3269 #ifndef PRODUCT
3270 3270 bool CMSCollector::overflow_list_is_empty() const {
3271 3271 assert(_num_par_pushes >= 0, "Inconsistency");
3272 3272 if (_overflow_list == NULL) {
3273 3273 assert(_num_par_pushes == 0, "Inconsistency");
3274 3274 }
3275 3275 return _overflow_list == NULL;
3276 3276 }
3277 3277
3278 3278 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3279 3279 // merely consolidate assertion checks that appear to occur together frequently.
3280 3280 void CMSCollector::verify_work_stacks_empty() const {
3281 3281 assert(_markStack.isEmpty(), "Marking stack should be empty");
3282 3282 assert(overflow_list_is_empty(), "Overflow list should be empty");
3283 3283 }
3284 3284
3285 3285 void CMSCollector::verify_overflow_empty() const {
3286 3286 assert(overflow_list_is_empty(), "Overflow list should be empty");
3287 3287 assert(no_preserved_marks(), "No preserved marks");
3288 3288 }
3289 3289 #endif // PRODUCT
3290 3290
3291 3291 // Decide if we want to enable class unloading as part of the
3292 3292 // ensuing concurrent GC cycle. We will collect and
3293 3293 // unload classes if it's the case that:
3294 3294 // (1) an explicit gc request has been made and the flag
3295 3295 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3296 3296 // (2) (a) class unloading is enabled at the command line, and
3297 3297 // (b) old gen is getting really full
3298 3298 // NOTE: Provided there is no change in the state of the heap between
3299 3299 // calls to this method, it should have idempotent results. Moreover,
3300 3300 // its results should be monotonically increasing (i.e. going from 0 to 1,
3301 3301 // but not 1 to 0) between successive calls between which the heap was
3302 3302 // not collected. For the implementation below, it must thus rely on
3303 3303 // the property that concurrent_cycles_since_last_unload()
3304 3304 // will not decrease unless a collection cycle happened and that
3305 3305 // _cmsGen->is_too_full() are
3306 3306 // themselves also monotonic in that sense. See check_monotonicity()
3307 3307 // below.
3308 3308 void CMSCollector::update_should_unload_classes() {
3309 3309 _should_unload_classes = false;
3310 3310 // Condition 1 above
3311 3311 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3312 3312 _should_unload_classes = true;
3313 3313 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3314 3314 // Disjuncts 2.b.(i,ii,iii) above
3315 3315 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3316 3316 CMSClassUnloadingMaxInterval)
3317 3317 || _cmsGen->is_too_full();
3318 3318 }
3319 3319 }
3320 3320
3321 3321 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3322 3322 bool res = should_concurrent_collect();
3323 3323 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3324 3324 return res;
3325 3325 }
3326 3326
3327 3327 void CMSCollector::setup_cms_unloading_and_verification_state() {
3328 3328 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3329 3329 || VerifyBeforeExit;
3330 3330 const int rso = SharedHeap::SO_AllCodeCache;
3331 3331
3332 3332 // We set the proper root for this CMS cycle here.
3333 3333 if (should_unload_classes()) { // Should unload classes this cycle
3334 3334 remove_root_scanning_option(rso); // Shrink the root set appropriately
3335 3335 set_verifying(should_verify); // Set verification state for this cycle
3336 3336 return; // Nothing else needs to be done at this time
3337 3337 }
3338 3338
3339 3339 // Not unloading classes this cycle
3340 3340 assert(!should_unload_classes(), "Inconsitency!");
3341 3341
3342 3342 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3343 3343 // Include symbols, strings and code cache elements to prevent their resurrection.
3344 3344 add_root_scanning_option(rso);
3345 3345 set_verifying(true);
3346 3346 } else if (verifying() && !should_verify) {
3347 3347 // We were verifying, but some verification flags got disabled.
3348 3348 set_verifying(false);
3349 3349 // Exclude symbols, strings and code cache elements from root scanning to
3350 3350 // reduce IM and RM pauses.
3351 3351 remove_root_scanning_option(rso);
3352 3352 }
3353 3353 }
3354 3354
3355 3355
3356 3356 #ifndef PRODUCT
3357 3357 HeapWord* CMSCollector::block_start(const void* p) const {
3358 3358 const HeapWord* addr = (HeapWord*)p;
3359 3359 if (_span.contains(p)) {
3360 3360 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3361 3361 return _cmsGen->cmsSpace()->block_start(p);
3362 3362 }
3363 3363 }
3364 3364 return NULL;
3365 3365 }
3366 3366 #endif
3367 3367
3368 3368 HeapWord*
3369 3369 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3370 3370 bool tlab,
3371 3371 bool parallel) {
3372 3372 CMSSynchronousYieldRequest yr;
3373 3373 assert(!tlab, "Can't deal with TLAB allocation");
3374 3374 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3375 3375 expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3376 3376 CMSExpansionCause::_satisfy_allocation);
3377 3377 if (GCExpandToAllocateDelayMillis > 0) {
3378 3378 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3379 3379 }
3380 3380 return have_lock_and_allocate(word_size, tlab);
3381 3381 }
3382 3382
3383 3383 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3384 3384 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3385 3385 // to CardGeneration and share it...
3386 3386 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3387 3387 return CardGeneration::expand(bytes, expand_bytes);
3388 3388 }
3389 3389
3390 3390 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3391 3391 CMSExpansionCause::Cause cause)
3392 3392 {
3393 3393
3394 3394 bool success = expand(bytes, expand_bytes);
3395 3395
3396 3396 // remember why we expanded; this information is used
3397 3397 // by shouldConcurrentCollect() when making decisions on whether to start
3398 3398 // a new CMS cycle.
3399 3399 if (success) {
3400 3400 set_expansion_cause(cause);
3401 3401 if (PrintGCDetails && Verbose) {
3402 3402 gclog_or_tty->print_cr("Expanded CMS gen for %s",
3403 3403 CMSExpansionCause::to_string(cause));
3404 3404 }
3405 3405 }
3406 3406 }
3407 3407
3408 3408 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3409 3409 HeapWord* res = NULL;
3410 3410 MutexLocker x(ParGCRareEvent_lock);
3411 3411 while (true) {
3412 3412 // Expansion by some other thread might make alloc OK now:
3413 3413 res = ps->lab.alloc(word_sz);
3414 3414 if (res != NULL) return res;
3415 3415 // If there's not enough expansion space available, give up.
3416 3416 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3417 3417 return NULL;
3418 3418 }
3419 3419 // Otherwise, we try expansion.
3420 3420 expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3421 3421 CMSExpansionCause::_allocate_par_lab);
3422 3422 // Now go around the loop and try alloc again;
3423 3423 // A competing par_promote might beat us to the expansion space,
3424 3424 // so we may go around the loop again if promotion fails agaion.
3425 3425 if (GCExpandToAllocateDelayMillis > 0) {
3426 3426 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3427 3427 }
3428 3428 }
3429 3429 }
3430 3430
3431 3431
3432 3432 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3433 3433 PromotionInfo* promo) {
3434 3434 MutexLocker x(ParGCRareEvent_lock);
3435 3435 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3436 3436 while (true) {
3437 3437 // Expansion by some other thread might make alloc OK now:
3438 3438 if (promo->ensure_spooling_space()) {
3439 3439 assert(promo->has_spooling_space(),
3440 3440 "Post-condition of successful ensure_spooling_space()");
3441 3441 return true;
3442 3442 }
3443 3443 // If there's not enough expansion space available, give up.
3444 3444 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3445 3445 return false;
3446 3446 }
3447 3447 // Otherwise, we try expansion.
3448 3448 expand(refill_size_bytes, MinHeapDeltaBytes,
3449 3449 CMSExpansionCause::_allocate_par_spooling_space);
3450 3450 // Now go around the loop and try alloc again;
3451 3451 // A competing allocation might beat us to the expansion space,
3452 3452 // so we may go around the loop again if allocation fails again.
3453 3453 if (GCExpandToAllocateDelayMillis > 0) {
3454 3454 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3455 3455 }
3456 3456 }
3457 3457 }
3458 3458
3459 3459
3460 3460 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3461 3461 assert_locked_or_safepoint(ExpandHeap_lock);
3462 3462 // Shrink committed space
3463 3463 _virtual_space.shrink_by(bytes);
3464 3464 // Shrink space; this also shrinks the space's BOT
3465 3465 _cmsSpace->set_end((HeapWord*) _virtual_space.high());
3466 3466 size_t new_word_size = heap_word_size(_cmsSpace->capacity());
3467 3467 // Shrink the shared block offset array
3468 3468 _bts->resize(new_word_size);
3469 3469 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3470 3470 // Shrink the card table
3471 3471 Universe::heap()->barrier_set()->resize_covered_region(mr);
3472 3472
3473 3473 if (Verbose && PrintGC) {
3474 3474 size_t new_mem_size = _virtual_space.committed_size();
3475 3475 size_t old_mem_size = new_mem_size + bytes;
3476 3476 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3477 3477 name(), old_mem_size/K, new_mem_size/K);
3478 3478 }
3479 3479 }
3480 3480
3481 3481 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3482 3482 assert_locked_or_safepoint(Heap_lock);
3483 3483 size_t size = ReservedSpace::page_align_size_down(bytes);
3484 3484 // Only shrink if a compaction was done so that all the free space
3485 3485 // in the generation is in a contiguous block at the end.
3486 3486 if (size > 0 && did_compact()) {
3487 3487 shrink_by(size);
3488 3488 }
3489 3489 }
3490 3490
3491 3491 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3492 3492 assert_locked_or_safepoint(Heap_lock);
3493 3493 bool result = _virtual_space.expand_by(bytes);
3494 3494 if (result) {
3495 3495 size_t new_word_size =
3496 3496 heap_word_size(_virtual_space.committed_size());
3497 3497 MemRegion mr(_cmsSpace->bottom(), new_word_size);
3498 3498 _bts->resize(new_word_size); // resize the block offset shared array
3499 3499 Universe::heap()->barrier_set()->resize_covered_region(mr);
3500 3500 // Hmmmm... why doesn't CFLS::set_end verify locking?
3501 3501 // This is quite ugly; FIX ME XXX
3502 3502 _cmsSpace->assert_locked(freelistLock());
3503 3503 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3504 3504
3505 3505 // update the space and generation capacity counters
3506 3506 if (UsePerfData) {
3507 3507 _space_counters->update_capacity();
3508 3508 _gen_counters->update_all();
3509 3509 }
3510 3510
3511 3511 if (Verbose && PrintGC) {
3512 3512 size_t new_mem_size = _virtual_space.committed_size();
3513 3513 size_t old_mem_size = new_mem_size - bytes;
3514 3514 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
3515 3515 name(), old_mem_size/K, bytes/K, new_mem_size/K);
3516 3516 }
3517 3517 }
3518 3518 return result;
3519 3519 }
3520 3520
3521 3521 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3522 3522 assert_locked_or_safepoint(Heap_lock);
3523 3523 bool success = true;
3524 3524 const size_t remaining_bytes = _virtual_space.uncommitted_size();
3525 3525 if (remaining_bytes > 0) {
3526 3526 success = grow_by(remaining_bytes);
3527 3527 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3528 3528 }
3529 3529 return success;
3530 3530 }
3531 3531
3532 3532 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
3533 3533 assert_locked_or_safepoint(Heap_lock);
3534 3534 assert_lock_strong(freelistLock());
3535 3535 if (PrintGCDetails && Verbose) {
3536 3536 warning("Shrinking of CMS not yet implemented");
3537 3537 }
3538 3538 return;
3539 3539 }
3540 3540
3541 3541
3542 3542 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3543 3543 // phases.
3544 3544 class CMSPhaseAccounting: public StackObj {
3545 3545 public:
3546 3546 CMSPhaseAccounting(CMSCollector *collector,
3547 3547 const char *phase,
3548 3548 const GCId gc_id,
3549 3549 bool print_cr = true);
3550 3550 ~CMSPhaseAccounting();
3551 3551
3552 3552 private:
3553 3553 CMSCollector *_collector;
3554 3554 const char *_phase;
3555 3555 elapsedTimer _wallclock;
3556 3556 bool _print_cr;
3557 3557 const GCId _gc_id;
3558 3558
3559 3559 public:
3560 3560 // Not MT-safe; so do not pass around these StackObj's
3561 3561 // where they may be accessed by other threads.
3562 3562 jlong wallclock_millis() {
3563 3563 assert(_wallclock.is_active(), "Wall clock should not stop");
3564 3564 _wallclock.stop(); // to record time
3565 3565 jlong ret = _wallclock.milliseconds();
3566 3566 _wallclock.start(); // restart
3567 3567 return ret;
3568 3568 }
3569 3569 };
3570 3570
3571 3571 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3572 3572 const char *phase,
3573 3573 const GCId gc_id,
3574 3574 bool print_cr) :
3575 3575 _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
3576 3576
3577 3577 if (PrintCMSStatistics != 0) {
3578 3578 _collector->resetYields();
3579 3579 }
3580 3580 if (PrintGCDetails) {
3581 3581 gclog_or_tty->gclog_stamp(_gc_id);
3582 3582 gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
3583 3583 _collector->cmsGen()->short_name(), _phase);
3584 3584 }
3585 3585 _collector->resetTimer();
3586 3586 _wallclock.start();
3587 3587 _collector->startTimer();
3588 3588 }
3589 3589
3590 3590 CMSPhaseAccounting::~CMSPhaseAccounting() {
3591 3591 assert(_wallclock.is_active(), "Wall clock should not have stopped");
3592 3592 _collector->stopTimer();
3593 3593 _wallclock.stop();
3594 3594 if (PrintGCDetails) {
3595 3595 gclog_or_tty->gclog_stamp(_gc_id);
3596 3596 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3597 3597 _collector->cmsGen()->short_name(),
3598 3598 _phase, _collector->timerValue(), _wallclock.seconds());
3599 3599 if (_print_cr) {
3600 3600 gclog_or_tty->cr();
3601 3601 }
3602 3602 if (PrintCMSStatistics != 0) {
3603 3603 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3604 3604 _collector->yields());
3605 3605 }
3606 3606 }
3607 3607 }
3608 3608
3609 3609 // CMS work
3610 3610
3611 3611 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
3612 3612 class CMSParMarkTask : public AbstractGangTask {
3613 3613 protected:
3614 3614 CMSCollector* _collector;
3615 3615 int _n_workers;
3616 3616 CMSParMarkTask(const char* name, CMSCollector* collector, int n_workers) :
3617 3617 AbstractGangTask(name),
3618 3618 _collector(collector),
3619 3619 _n_workers(n_workers) {}
3620 3620 // Work method in support of parallel rescan ... of young gen spaces
3621 3621 void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
3622 3622 ContiguousSpace* space,
3623 3623 HeapWord** chunk_array, size_t chunk_top);
3624 3624 void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
3625 3625 };
3626 3626
3627 3627 // Parallel initial mark task
3628 3628 class CMSParInitialMarkTask: public CMSParMarkTask {
3629 3629 public:
3630 3630 CMSParInitialMarkTask(CMSCollector* collector, int n_workers) :
3631 3631 CMSParMarkTask("Scan roots and young gen for initial mark in parallel",
3632 3632 collector, n_workers) {}
3633 3633 void work(uint worker_id);
3634 3634 };
3635 3635
3636 3636 // Checkpoint the roots into this generation from outside
3637 3637 // this generation. [Note this initial checkpoint need only
3638 3638 // be approximate -- we'll do a catch up phase subsequently.]
3639 3639 void CMSCollector::checkpointRootsInitial(bool asynch) {
3640 3640 assert(_collectorState == InitialMarking, "Wrong collector state");
3641 3641 check_correct_thread_executing();
3642 3642 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3643 3643
3644 3644 save_heap_summary();
3645 3645 report_heap_summary(GCWhen::BeforeGC);
3646 3646
3647 3647 ReferenceProcessor* rp = ref_processor();
3648 3648 SpecializationStats::clear();
3649 3649 assert(_restart_addr == NULL, "Control point invariant");
3650 3650 if (asynch) {
3651 3651 // acquire locks for subsequent manipulations
3652 3652 MutexLockerEx x(bitMapLock(),
3653 3653 Mutex::_no_safepoint_check_flag);
3654 3654 checkpointRootsInitialWork(asynch);
3655 3655 // enable ("weak") refs discovery
3656 3656 rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3657 3657 _collectorState = Marking;
3658 3658 } else {
3659 3659 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3660 3660 // which recognizes if we are a CMS generation, and doesn't try to turn on
3661 3661 // discovery; verify that they aren't meddling.
3662 3662 assert(!rp->discovery_is_atomic(),
3663 3663 "incorrect setting of discovery predicate");
3664 3664 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3665 3665 "ref discovery for this generation kind");
3666 3666 // already have locks
3667 3667 checkpointRootsInitialWork(asynch);
3668 3668 // now enable ("weak") refs discovery
3669 3669 rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
3670 3670 _collectorState = Marking;
3671 3671 }
3672 3672 SpecializationStats::print();
3673 3673 }
3674 3674
3675 3675 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3676 3676 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3677 3677 assert(_collectorState == InitialMarking, "just checking");
3678 3678
3679 3679 // If there has not been a GC[n-1] since last GC[n] cycle completed,
3680 3680 // precede our marking with a collection of all
3681 3681 // younger generations to keep floating garbage to a minimum.
3682 3682 // XXX: we won't do this for now -- it's an optimization to be done later.
3683 3683
3684 3684 // already have locks
3685 3685 assert_lock_strong(bitMapLock());
3686 3686 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3687 3687
3688 3688 // Setup the verification and class unloading state for this
3689 3689 // CMS collection cycle.
3690 3690 setup_cms_unloading_and_verification_state();
3691 3691
3692 3692 NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
3693 3693 PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
3694 3694 if (UseAdaptiveSizePolicy) {
3695 3695 size_policy()->checkpoint_roots_initial_begin();
3696 3696 }
3697 3697
3698 3698 // Reset all the PLAB chunk arrays if necessary.
3699 3699 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3700 3700 reset_survivor_plab_arrays();
3701 3701 }
3702 3702
3703 3703 ResourceMark rm;
3704 3704 HandleMark hm;
3705 3705
3706 3706 MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3707 3707 GenCollectedHeap* gch = GenCollectedHeap::heap();
3708 3708
3709 3709 verify_work_stacks_empty();
3710 3710 verify_overflow_empty();
3711 3711
3712 3712 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
3713 3713 // Update the saved marks which may affect the root scans.
3714 3714 gch->save_marks();
3715 3715
3716 3716 // weak reference processing has not started yet.
3717 3717 ref_processor()->set_enqueuing_is_done(false);
3718 3718
3719 3719 // Need to remember all newly created CLDs,
3720 3720 // so that we can guarantee that the remark finds them.
3721 3721 ClassLoaderDataGraph::remember_new_clds(true);
3722 3722
3723 3723 // Whenever a CLD is found, it will be claimed before proceeding to mark
3724 3724 // the klasses. The claimed marks need to be cleared before marking starts.
3725 3725 ClassLoaderDataGraph::clear_claimed_marks();
3726 3726
3727 3727 if (CMSPrintEdenSurvivorChunks) {
3728 3728 print_eden_and_survivor_chunk_arrays();
3729 3729 }
3730 3730
3731 3731 {
3732 3732 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3733 3733 if (CMSParallelInitialMarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
3734 3734 // The parallel version.
3735 3735 FlexibleWorkGang* workers = gch->workers();
3736 3736 assert(workers != NULL, "Need parallel worker threads.");
3737 3737 int n_workers = workers->active_workers();
3738 3738 CMSParInitialMarkTask tsk(this, n_workers);
3739 3739 gch->set_par_threads(n_workers);
3740 3740 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3741 3741 if (n_workers > 1) {
3742 3742 GenCollectedHeap::StrongRootsScope srs(gch);
3743 3743 workers->run_task(&tsk);
3744 3744 } else {
3745 3745 GenCollectedHeap::StrongRootsScope srs(gch);
3746 3746 tsk.work(0);
3747 3747 }
3748 3748 gch->set_par_threads(0);
3749 3749 } else {
3750 3750 // The serial version.
3751 3751 CLDToOopClosure cld_closure(¬Older, true);
3752 3752 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3753 3753 gch->gen_process_roots(_cmsGen->level(),
3754 3754 true, // younger gens are roots
3755 3755 true, // activate StrongRootsScope
3756 3756 SharedHeap::ScanningOption(roots_scanning_options()),
3757 3757 should_unload_classes(),
3758 3758 ¬Older,
3759 3759 NULL,
3760 3760 &cld_closure);
3761 3761 }
3762 3762 }
3763 3763
3764 3764 // Clear mod-union table; it will be dirtied in the prologue of
3765 3765 // CMS generation per each younger generation collection.
3766 3766
3767 3767 assert(_modUnionTable.isAllClear(),
3768 3768 "Was cleared in most recent final checkpoint phase"
3769 3769 " or no bits are set in the gc_prologue before the start of the next "
3770 3770 "subsequent marking phase.");
3771 3771
3772 3772 assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3773 3773
3774 3774 // Save the end of the used_region of the constituent generations
3775 3775 // to be used to limit the extent of sweep in each generation.
3776 3776 save_sweep_limits();
3777 3777 if (UseAdaptiveSizePolicy) {
3778 3778 size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3779 3779 }
3780 3780 verify_overflow_empty();
3781 3781 }
3782 3782
3783 3783 bool CMSCollector::markFromRoots(bool asynch) {
3784 3784 // we might be tempted to assert that:
3785 3785 // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3786 3786 // "inconsistent argument?");
3787 3787 // However that wouldn't be right, because it's possible that
3788 3788 // a safepoint is indeed in progress as a younger generation
3789 3789 // stop-the-world GC happens even as we mark in this generation.
3790 3790 assert(_collectorState == Marking, "inconsistent state?");
3791 3791 check_correct_thread_executing();
3792 3792 verify_overflow_empty();
3793 3793
3794 3794 bool res;
3795 3795 if (asynch) {
3796 3796
3797 3797 // Start the timers for adaptive size policy for the concurrent phases
3798 3798 // Do it here so that the foreground MS can use the concurrent
3799 3799 // timer since a foreground MS might has the sweep done concurrently
3800 3800 // or STW.
3801 3801 if (UseAdaptiveSizePolicy) {
3802 3802 size_policy()->concurrent_marking_begin();
3803 3803 }
3804 3804
3805 3805 // Weak ref discovery note: We may be discovering weak
3806 3806 // refs in this generation concurrent (but interleaved) with
3807 3807 // weak ref discovery by a younger generation collector.
3808 3808
3809 3809 CMSTokenSyncWithLocks ts(true, bitMapLock());
3810 3810 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3811 3811 CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3812 3812 res = markFromRootsWork(asynch);
3813 3813 if (res) {
3814 3814 _collectorState = Precleaning;
3815 3815 } else { // We failed and a foreground collection wants to take over
3816 3816 assert(_foregroundGCIsActive, "internal state inconsistency");
3817 3817 assert(_restart_addr == NULL, "foreground will restart from scratch");
3818 3818 if (PrintGCDetails) {
3819 3819 gclog_or_tty->print_cr("bailing out to foreground collection");
3820 3820 }
3821 3821 }
3822 3822 if (UseAdaptiveSizePolicy) {
3823 3823 size_policy()->concurrent_marking_end();
3824 3824 }
3825 3825 } else {
3826 3826 assert(SafepointSynchronize::is_at_safepoint(),
3827 3827 "inconsistent with asynch == false");
3828 3828 if (UseAdaptiveSizePolicy) {
3829 3829 size_policy()->ms_collection_marking_begin();
3830 3830 }
3831 3831 // already have locks
3832 3832 res = markFromRootsWork(asynch);
3833 3833 _collectorState = FinalMarking;
3834 3834 if (UseAdaptiveSizePolicy) {
3835 3835 GenCollectedHeap* gch = GenCollectedHeap::heap();
3836 3836 size_policy()->ms_collection_marking_end(gch->gc_cause());
3837 3837 }
3838 3838 }
3839 3839 verify_overflow_empty();
3840 3840 return res;
3841 3841 }
3842 3842
3843 3843 bool CMSCollector::markFromRootsWork(bool asynch) {
3844 3844 // iterate over marked bits in bit map, doing a full scan and mark
3845 3845 // from these roots using the following algorithm:
3846 3846 // . if oop is to the right of the current scan pointer,
3847 3847 // mark corresponding bit (we'll process it later)
3848 3848 // . else (oop is to left of current scan pointer)
3849 3849 // push oop on marking stack
3850 3850 // . drain the marking stack
3851 3851
3852 3852 // Note that when we do a marking step we need to hold the
3853 3853 // bit map lock -- recall that direct allocation (by mutators)
3854 3854 // and promotion (by younger generation collectors) is also
3855 3855 // marking the bit map. [the so-called allocate live policy.]
3856 3856 // Because the implementation of bit map marking is not
3857 3857 // robust wrt simultaneous marking of bits in the same word,
3858 3858 // we need to make sure that there is no such interference
3859 3859 // between concurrent such updates.
3860 3860
3861 3861 // already have locks
3862 3862 assert_lock_strong(bitMapLock());
3863 3863
3864 3864 verify_work_stacks_empty();
3865 3865 verify_overflow_empty();
3866 3866 bool result = false;
3867 3867 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3868 3868 result = do_marking_mt(asynch);
3869 3869 } else {
3870 3870 result = do_marking_st(asynch);
3871 3871 }
3872 3872 return result;
3873 3873 }
3874 3874
3875 3875 // Forward decl
3876 3876 class CMSConcMarkingTask;
3877 3877
3878 3878 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3879 3879 CMSCollector* _collector;
3880 3880 CMSConcMarkingTask* _task;
3881 3881 public:
3882 3882 virtual void yield();
3883 3883
3884 3884 // "n_threads" is the number of threads to be terminated.
3885 3885 // "queue_set" is a set of work queues of other threads.
3886 3886 // "collector" is the CMS collector associated with this task terminator.
3887 3887 // "yield" indicates whether we need the gang as a whole to yield.
3888 3888 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3889 3889 ParallelTaskTerminator(n_threads, queue_set),
3890 3890 _collector(collector) { }
3891 3891
3892 3892 void set_task(CMSConcMarkingTask* task) {
3893 3893 _task = task;
3894 3894 }
3895 3895 };
3896 3896
3897 3897 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3898 3898 CMSConcMarkingTask* _task;
3899 3899 public:
3900 3900 bool should_exit_termination();
3901 3901 void set_task(CMSConcMarkingTask* task) {
3902 3902 _task = task;
3903 3903 }
3904 3904 };
3905 3905
3906 3906 // MT Concurrent Marking Task
3907 3907 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3908 3908 CMSCollector* _collector;
3909 3909 int _n_workers; // requested/desired # workers
3910 3910 bool _asynch;
3911 3911 bool _result;
3912 3912 CompactibleFreeListSpace* _cms_space;
3913 3913 char _pad_front[64]; // padding to ...
3914 3914 HeapWord* _global_finger; // ... avoid sharing cache line
3915 3915 char _pad_back[64];
3916 3916 HeapWord* _restart_addr;
3917 3917
3918 3918 // Exposed here for yielding support
3919 3919 Mutex* const _bit_map_lock;
3920 3920
3921 3921 // The per thread work queues, available here for stealing
3922 3922 OopTaskQueueSet* _task_queues;
3923 3923
3924 3924 // Termination (and yielding) support
3925 3925 CMSConcMarkingTerminator _term;
3926 3926 CMSConcMarkingTerminatorTerminator _term_term;
3927 3927
3928 3928 public:
3929 3929 CMSConcMarkingTask(CMSCollector* collector,
3930 3930 CompactibleFreeListSpace* cms_space,
3931 3931 bool asynch,
3932 3932 YieldingFlexibleWorkGang* workers,
3933 3933 OopTaskQueueSet* task_queues):
3934 3934 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3935 3935 _collector(collector),
3936 3936 _cms_space(cms_space),
3937 3937 _asynch(asynch), _n_workers(0), _result(true),
3938 3938 _task_queues(task_queues),
3939 3939 _term(_n_workers, task_queues, _collector),
3940 3940 _bit_map_lock(collector->bitMapLock())
3941 3941 {
3942 3942 _requested_size = _n_workers;
3943 3943 _term.set_task(this);
3944 3944 _term_term.set_task(this);
3945 3945 _restart_addr = _global_finger = _cms_space->bottom();
3946 3946 }
3947 3947
3948 3948
3949 3949 OopTaskQueueSet* task_queues() { return _task_queues; }
3950 3950
3951 3951 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3952 3952
3953 3953 HeapWord** global_finger_addr() { return &_global_finger; }
3954 3954
3955 3955 CMSConcMarkingTerminator* terminator() { return &_term; }
3956 3956
3957 3957 virtual void set_for_termination(int active_workers) {
3958 3958 terminator()->reset_for_reuse(active_workers);
3959 3959 }
3960 3960
3961 3961 void work(uint worker_id);
3962 3962 bool should_yield() {
3963 3963 return ConcurrentMarkSweepThread::should_yield()
3964 3964 && !_collector->foregroundGCIsActive()
3965 3965 && _asynch;
3966 3966 }
3967 3967
3968 3968 virtual void coordinator_yield(); // stuff done by coordinator
3969 3969 bool result() { return _result; }
3970 3970
3971 3971 void reset(HeapWord* ra) {
3972 3972 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
3973 3973 _restart_addr = _global_finger = ra;
3974 3974 _term.reset_for_reuse();
3975 3975 }
3976 3976
3977 3977 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3978 3978 OopTaskQueue* work_q);
3979 3979
3980 3980 private:
3981 3981 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3982 3982 void do_work_steal(int i);
3983 3983 void bump_global_finger(HeapWord* f);
3984 3984 };
3985 3985
3986 3986 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3987 3987 assert(_task != NULL, "Error");
3988 3988 return _task->yielding();
3989 3989 // Note that we do not need the disjunct || _task->should_yield() above
3990 3990 // because we want terminating threads to yield only if the task
3991 3991 // is already in the midst of yielding, which happens only after at least one
3992 3992 // thread has yielded.
3993 3993 }
3994 3994
3995 3995 void CMSConcMarkingTerminator::yield() {
3996 3996 if (_task->should_yield()) {
3997 3997 _task->yield();
3998 3998 } else {
3999 3999 ParallelTaskTerminator::yield();
4000 4000 }
4001 4001 }
4002 4002
4003 4003 ////////////////////////////////////////////////////////////////
4004 4004 // Concurrent Marking Algorithm Sketch
4005 4005 ////////////////////////////////////////////////////////////////
4006 4006 // Until all tasks exhausted (both spaces):
4007 4007 // -- claim next available chunk
4008 4008 // -- bump global finger via CAS
4009 4009 // -- find first object that starts in this chunk
4010 4010 // and start scanning bitmap from that position
4011 4011 // -- scan marked objects for oops
4012 4012 // -- CAS-mark target, and if successful:
4013 4013 // . if target oop is above global finger (volatile read)
4014 4014 // nothing to do
4015 4015 // . if target oop is in chunk and above local finger
4016 4016 // then nothing to do
4017 4017 // . else push on work-queue
4018 4018 // -- Deal with possible overflow issues:
4019 4019 // . local work-queue overflow causes stuff to be pushed on
4020 4020 // global (common) overflow queue
4021 4021 // . always first empty local work queue
4022 4022 // . then get a batch of oops from global work queue if any
4023 4023 // . then do work stealing
4024 4024 // -- When all tasks claimed (both spaces)
4025 4025 // and local work queue empty,
4026 4026 // then in a loop do:
4027 4027 // . check global overflow stack; steal a batch of oops and trace
4028 4028 // . try to steal from other threads oif GOS is empty
4029 4029 // . if neither is available, offer termination
4030 4030 // -- Terminate and return result
4031 4031 //
4032 4032 void CMSConcMarkingTask::work(uint worker_id) {
4033 4033 elapsedTimer _timer;
4034 4034 ResourceMark rm;
4035 4035 HandleMark hm;
4036 4036
4037 4037 DEBUG_ONLY(_collector->verify_overflow_empty();)
4038 4038
4039 4039 // Before we begin work, our work queue should be empty
4040 4040 assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
4041 4041 // Scan the bitmap covering _cms_space, tracing through grey objects.
4042 4042 _timer.start();
4043 4043 do_scan_and_mark(worker_id, _cms_space);
4044 4044 _timer.stop();
4045 4045 if (PrintCMSStatistics != 0) {
4046 4046 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
4047 4047 worker_id, _timer.seconds());
4048 4048 // XXX: need xxx/xxx type of notation, two timers
4049 4049 }
4050 4050
4051 4051 // ... do work stealing
4052 4052 _timer.reset();
4053 4053 _timer.start();
4054 4054 do_work_steal(worker_id);
4055 4055 _timer.stop();
4056 4056 if (PrintCMSStatistics != 0) {
4057 4057 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
4058 4058 worker_id, _timer.seconds());
4059 4059 // XXX: need xxx/xxx type of notation, two timers
4060 4060 }
4061 4061 assert(_collector->_markStack.isEmpty(), "Should have been emptied");
4062 4062 assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
4063 4063 // Note that under the current task protocol, the
4064 4064 // following assertion is true even of the spaces
4065 4065 // expanded since the completion of the concurrent
4066 4066 // marking. XXX This will likely change under a strict
4067 4067 // ABORT semantics.
4068 4068 // After perm removal the comparison was changed to
4069 4069 // greater than or equal to from strictly greater than.
4070 4070 // Before perm removal the highest address sweep would
4071 4071 // have been at the end of perm gen but now is at the
4072 4072 // end of the tenured gen.
4073 4073 assert(_global_finger >= _cms_space->end(),
4074 4074 "All tasks have been completed");
4075 4075 DEBUG_ONLY(_collector->verify_overflow_empty();)
4076 4076 }
4077 4077
4078 4078 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
4079 4079 HeapWord* read = _global_finger;
4080 4080 HeapWord* cur = read;
4081 4081 while (f > read) {
4082 4082 cur = read;
4083 4083 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
4084 4084 if (cur == read) {
4085 4085 // our cas succeeded
4086 4086 assert(_global_finger >= f, "protocol consistency");
4087 4087 break;
4088 4088 }
4089 4089 }
4090 4090 }
4091 4091
4092 4092 // This is really inefficient, and should be redone by
4093 4093 // using (not yet available) block-read and -write interfaces to the
4094 4094 // stack and the work_queue. XXX FIX ME !!!
4095 4095 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
4096 4096 OopTaskQueue* work_q) {
4097 4097 // Fast lock-free check
4098 4098 if (ovflw_stk->length() == 0) {
4099 4099 return false;
4100 4100 }
4101 4101 assert(work_q->size() == 0, "Shouldn't steal");
4102 4102 MutexLockerEx ml(ovflw_stk->par_lock(),
4103 4103 Mutex::_no_safepoint_check_flag);
4104 4104 // Grab up to 1/4 the size of the work queue
4105 4105 size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4106 4106 (size_t)ParGCDesiredObjsFromOverflowList);
4107 4107 num = MIN2(num, ovflw_stk->length());
4108 4108 for (int i = (int) num; i > 0; i--) {
4109 4109 oop cur = ovflw_stk->pop();
4110 4110 assert(cur != NULL, "Counted wrong?");
4111 4111 work_q->push(cur);
4112 4112 }
4113 4113 return num > 0;
4114 4114 }
4115 4115
4116 4116 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
4117 4117 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4118 4118 int n_tasks = pst->n_tasks();
4119 4119 // We allow that there may be no tasks to do here because
4120 4120 // we are restarting after a stack overflow.
4121 4121 assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
4122 4122 uint nth_task = 0;
4123 4123
4124 4124 HeapWord* aligned_start = sp->bottom();
4125 4125 if (sp->used_region().contains(_restart_addr)) {
4126 4126 // Align down to a card boundary for the start of 0th task
4127 4127 // for this space.
4128 4128 aligned_start =
4129 4129 (HeapWord*)align_size_down((uintptr_t)_restart_addr,
4130 4130 CardTableModRefBS::card_size);
4131 4131 }
4132 4132
4133 4133 size_t chunk_size = sp->marking_task_size();
4134 4134 while (!pst->is_task_claimed(/* reference */ nth_task)) {
4135 4135 // Having claimed the nth task in this space,
4136 4136 // compute the chunk that it corresponds to:
4137 4137 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
4138 4138 aligned_start + (nth_task+1)*chunk_size);
4139 4139 // Try and bump the global finger via a CAS;
4140 4140 // note that we need to do the global finger bump
4141 4141 // _before_ taking the intersection below, because
4142 4142 // the task corresponding to that region will be
4143 4143 // deemed done even if the used_region() expands
4144 4144 // because of allocation -- as it almost certainly will
4145 4145 // during start-up while the threads yield in the
4146 4146 // closure below.
4147 4147 HeapWord* finger = span.end();
4148 4148 bump_global_finger(finger); // atomically
4149 4149 // There are null tasks here corresponding to chunks
4150 4150 // beyond the "top" address of the space.
4151 4151 span = span.intersection(sp->used_region());
4152 4152 if (!span.is_empty()) { // Non-null task
4153 4153 HeapWord* prev_obj;
4154 4154 assert(!span.contains(_restart_addr) || nth_task == 0,
4155 4155 "Inconsistency");
4156 4156 if (nth_task == 0) {
4157 4157 // For the 0th task, we'll not need to compute a block_start.
4158 4158 if (span.contains(_restart_addr)) {
4159 4159 // In the case of a restart because of stack overflow,
4160 4160 // we might additionally skip a chunk prefix.
4161 4161 prev_obj = _restart_addr;
4162 4162 } else {
4163 4163 prev_obj = span.start();
4164 4164 }
4165 4165 } else {
4166 4166 // We want to skip the first object because
4167 4167 // the protocol is to scan any object in its entirety
4168 4168 // that _starts_ in this span; a fortiori, any
4169 4169 // object starting in an earlier span is scanned
4170 4170 // as part of an earlier claimed task.
4171 4171 // Below we use the "careful" version of block_start
4172 4172 // so we do not try to navigate uninitialized objects.
4173 4173 prev_obj = sp->block_start_careful(span.start());
4174 4174 // Below we use a variant of block_size that uses the
4175 4175 // Printezis bits to avoid waiting for allocated
4176 4176 // objects to become initialized/parsable.
4177 4177 while (prev_obj < span.start()) {
4178 4178 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
4179 4179 if (sz > 0) {
4180 4180 prev_obj += sz;
4181 4181 } else {
4182 4182 // In this case we may end up doing a bit of redundant
4183 4183 // scanning, but that appears unavoidable, short of
4184 4184 // locking the free list locks; see bug 6324141.
4185 4185 break;
4186 4186 }
4187 4187 }
4188 4188 }
4189 4189 if (prev_obj < span.end()) {
4190 4190 MemRegion my_span = MemRegion(prev_obj, span.end());
4191 4191 // Do the marking work within a non-empty span --
4192 4192 // the last argument to the constructor indicates whether the
4193 4193 // iteration should be incremental with periodic yields.
4194 4194 Par_MarkFromRootsClosure cl(this, _collector, my_span,
4195 4195 &_collector->_markBitMap,
4196 4196 work_queue(i),
4197 4197 &_collector->_markStack,
4198 4198 _asynch);
4199 4199 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
4200 4200 } // else nothing to do for this task
4201 4201 } // else nothing to do for this task
4202 4202 }
4203 4203 // We'd be tempted to assert here that since there are no
4204 4204 // more tasks left to claim in this space, the global_finger
4205 4205 // must exceed space->top() and a fortiori space->end(). However,
4206 4206 // that would not quite be correct because the bumping of
4207 4207 // global_finger occurs strictly after the claiming of a task,
4208 4208 // so by the time we reach here the global finger may not yet
4209 4209 // have been bumped up by the thread that claimed the last
4210 4210 // task.
4211 4211 pst->all_tasks_completed();
4212 4212 }
4213 4213
4214 4214 class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
4215 4215 private:
4216 4216 CMSCollector* _collector;
4217 4217 CMSConcMarkingTask* _task;
4218 4218 MemRegion _span;
4219 4219 CMSBitMap* _bit_map;
4220 4220 CMSMarkStack* _overflow_stack;
4221 4221 OopTaskQueue* _work_queue;
4222 4222 protected:
4223 4223 DO_OOP_WORK_DEFN
4224 4224 public:
4225 4225 Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4226 4226 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
4227 4227 MetadataAwareOopClosure(collector->ref_processor()),
4228 4228 _collector(collector),
4229 4229 _task(task),
4230 4230 _span(collector->_span),
4231 4231 _work_queue(work_queue),
4232 4232 _bit_map(bit_map),
4233 4233 _overflow_stack(overflow_stack)
4234 4234 { }
4235 4235 virtual void do_oop(oop* p);
4236 4236 virtual void do_oop(narrowOop* p);
4237 4237
4238 4238 void trim_queue(size_t max);
4239 4239 void handle_stack_overflow(HeapWord* lost);
4240 4240 void do_yield_check() {
4241 4241 if (_task->should_yield()) {
4242 4242 _task->yield();
4243 4243 }
4244 4244 }
4245 4245 };
4246 4246
4247 4247 // Grey object scanning during work stealing phase --
4248 4248 // the salient assumption here is that any references
4249 4249 // that are in these stolen objects being scanned must
4250 4250 // already have been initialized (else they would not have
4251 4251 // been published), so we do not need to check for
4252 4252 // uninitialized objects before pushing here.
4253 4253 void Par_ConcMarkingClosure::do_oop(oop obj) {
4254 4254 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4255 4255 HeapWord* addr = (HeapWord*)obj;
4256 4256 // Check if oop points into the CMS generation
4257 4257 // and is not marked
4258 4258 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4259 4259 // a white object ...
4260 4260 // If we manage to "claim" the object, by being the
4261 4261 // first thread to mark it, then we push it on our
4262 4262 // marking stack
4263 4263 if (_bit_map->par_mark(addr)) { // ... now grey
4264 4264 // push on work queue (grey set)
4265 4265 bool simulate_overflow = false;
4266 4266 NOT_PRODUCT(
4267 4267 if (CMSMarkStackOverflowALot &&
4268 4268 _collector->simulate_overflow()) {
4269 4269 // simulate a stack overflow
4270 4270 simulate_overflow = true;
4271 4271 }
4272 4272 )
4273 4273 if (simulate_overflow ||
4274 4274 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4275 4275 // stack overflow
4276 4276 if (PrintCMSStatistics != 0) {
4277 4277 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4278 4278 SIZE_FORMAT, _overflow_stack->capacity());
4279 4279 }
4280 4280 // We cannot assert that the overflow stack is full because
4281 4281 // it may have been emptied since.
4282 4282 assert(simulate_overflow ||
4283 4283 _work_queue->size() == _work_queue->max_elems(),
4284 4284 "Else push should have succeeded");
4285 4285 handle_stack_overflow(addr);
4286 4286 }
4287 4287 } // Else, some other thread got there first
4288 4288 do_yield_check();
4289 4289 }
4290 4290 }
4291 4291
4292 4292 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4293 4293 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4294 4294
4295 4295 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4296 4296 while (_work_queue->size() > max) {
4297 4297 oop new_oop;
4298 4298 if (_work_queue->pop_local(new_oop)) {
4299 4299 assert(new_oop->is_oop(), "Should be an oop");
4300 4300 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4301 4301 assert(_span.contains((HeapWord*)new_oop), "Not in span");
4302 4302 new_oop->oop_iterate(this); // do_oop() above
4303 4303 do_yield_check();
4304 4304 }
4305 4305 }
4306 4306 }
4307 4307
4308 4308 // Upon stack overflow, we discard (part of) the stack,
4309 4309 // remembering the least address amongst those discarded
4310 4310 // in CMSCollector's _restart_address.
4311 4311 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4312 4312 // We need to do this under a mutex to prevent other
4313 4313 // workers from interfering with the work done below.
4314 4314 MutexLockerEx ml(_overflow_stack->par_lock(),
4315 4315 Mutex::_no_safepoint_check_flag);
4316 4316 // Remember the least grey address discarded
4317 4317 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4318 4318 _collector->lower_restart_addr(ra);
4319 4319 _overflow_stack->reset(); // discard stack contents
4320 4320 _overflow_stack->expand(); // expand the stack if possible
4321 4321 }
4322 4322
4323 4323
4324 4324 void CMSConcMarkingTask::do_work_steal(int i) {
4325 4325 OopTaskQueue* work_q = work_queue(i);
4326 4326 oop obj_to_scan;
4327 4327 CMSBitMap* bm = &(_collector->_markBitMap);
4328 4328 CMSMarkStack* ovflw = &(_collector->_markStack);
4329 4329 int* seed = _collector->hash_seed(i);
4330 4330 Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
4331 4331 while (true) {
4332 4332 cl.trim_queue(0);
4333 4333 assert(work_q->size() == 0, "Should have been emptied above");
4334 4334 if (get_work_from_overflow_stack(ovflw, work_q)) {
4335 4335 // Can't assert below because the work obtained from the
4336 4336 // overflow stack may already have been stolen from us.
4337 4337 // assert(work_q->size() > 0, "Work from overflow stack");
4338 4338 continue;
4339 4339 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4340 4340 assert(obj_to_scan->is_oop(), "Should be an oop");
4341 4341 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4342 4342 obj_to_scan->oop_iterate(&cl);
4343 4343 } else if (terminator()->offer_termination(&_term_term)) {
4344 4344 assert(work_q->size() == 0, "Impossible!");
4345 4345 break;
4346 4346 } else if (yielding() || should_yield()) {
4347 4347 yield();
4348 4348 }
4349 4349 }
4350 4350 }
4351 4351
4352 4352 // This is run by the CMS (coordinator) thread.
4353 4353 void CMSConcMarkingTask::coordinator_yield() {
4354 4354 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4355 4355 "CMS thread should hold CMS token");
4356 4356 // First give up the locks, then yield, then re-lock
4357 4357 // We should probably use a constructor/destructor idiom to
4358 4358 // do this unlock/lock or modify the MutexUnlocker class to
4359 4359 // serve our purpose. XXX
4360 4360 assert_lock_strong(_bit_map_lock);
4361 4361 _bit_map_lock->unlock();
4362 4362 ConcurrentMarkSweepThread::desynchronize(true);
4363 4363 ConcurrentMarkSweepThread::acknowledge_yield_request();
4364 4364 _collector->stopTimer();
4365 4365 if (PrintCMSStatistics != 0) {
4366 4366 _collector->incrementYields();
4367 4367 }
4368 4368 _collector->icms_wait();
4369 4369
4370 4370 // It is possible for whichever thread initiated the yield request
4371 4371 // not to get a chance to wake up and take the bitmap lock between
4372 4372 // this thread releasing it and reacquiring it. So, while the
4373 4373 // should_yield() flag is on, let's sleep for a bit to give the
4374 4374 // other thread a chance to wake up. The limit imposed on the number
4375 4375 // of iterations is defensive, to avoid any unforseen circumstances
4376 4376 // putting us into an infinite loop. Since it's always been this
4377 4377 // (coordinator_yield()) method that was observed to cause the
4378 4378 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4379 4379 // which is by default non-zero. For the other seven methods that
4380 4380 // also perform the yield operation, as are using a different
4381 4381 // parameter (CMSYieldSleepCount) which is by default zero. This way we
4382 4382 // can enable the sleeping for those methods too, if necessary.
4383 4383 // See 6442774.
4384 4384 //
4385 4385 // We really need to reconsider the synchronization between the GC
4386 4386 // thread and the yield-requesting threads in the future and we
4387 4387 // should really use wait/notify, which is the recommended
4388 4388 // way of doing this type of interaction. Additionally, we should
4389 4389 // consolidate the eight methods that do the yield operation and they
4390 4390 // are almost identical into one for better maintenability and
4391 4391 // readability. See 6445193.
4392 4392 //
4393 4393 // Tony 2006.06.29
4394 4394 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4395 4395 ConcurrentMarkSweepThread::should_yield() &&
4396 4396 !CMSCollector::foregroundGCIsActive(); ++i) {
4397 4397 os::sleep(Thread::current(), 1, false);
4398 4398 ConcurrentMarkSweepThread::acknowledge_yield_request();
4399 4399 }
4400 4400
4401 4401 ConcurrentMarkSweepThread::synchronize(true);
4402 4402 _bit_map_lock->lock_without_safepoint_check();
4403 4403 _collector->startTimer();
4404 4404 }
4405 4405
4406 4406 bool CMSCollector::do_marking_mt(bool asynch) {
4407 4407 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4408 4408 int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4409 4409 conc_workers()->total_workers(),
4410 4410 conc_workers()->active_workers(),
4411 4411 Threads::number_of_non_daemon_threads());
4412 4412 conc_workers()->set_active_workers(num_workers);
4413 4413
4414 4414 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
4415 4415
4416 4416 CMSConcMarkingTask tsk(this,
4417 4417 cms_space,
4418 4418 asynch,
4419 4419 conc_workers(),
4420 4420 task_queues());
4421 4421
4422 4422 // Since the actual number of workers we get may be different
4423 4423 // from the number we requested above, do we need to do anything different
4424 4424 // below? In particular, may be we need to subclass the SequantialSubTasksDone
4425 4425 // class?? XXX
4426 4426 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4427 4427
4428 4428 // Refs discovery is already non-atomic.
4429 4429 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4430 4430 assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
4431 4431 conc_workers()->start_task(&tsk);
4432 4432 while (tsk.yielded()) {
4433 4433 tsk.coordinator_yield();
4434 4434 conc_workers()->continue_task(&tsk);
4435 4435 }
4436 4436 // If the task was aborted, _restart_addr will be non-NULL
4437 4437 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4438 4438 while (_restart_addr != NULL) {
4439 4439 // XXX For now we do not make use of ABORTED state and have not
4440 4440 // yet implemented the right abort semantics (even in the original
4441 4441 // single-threaded CMS case). That needs some more investigation
4442 4442 // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4443 4443 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4444 4444 // If _restart_addr is non-NULL, a marking stack overflow
4445 4445 // occurred; we need to do a fresh marking iteration from the
4446 4446 // indicated restart address.
4447 4447 if (_foregroundGCIsActive && asynch) {
4448 4448 // We may be running into repeated stack overflows, having
4449 4449 // reached the limit of the stack size, while making very
4450 4450 // slow forward progress. It may be best to bail out and
4451 4451 // let the foreground collector do its job.
4452 4452 // Clear _restart_addr, so that foreground GC
4453 4453 // works from scratch. This avoids the headache of
4454 4454 // a "rescan" which would otherwise be needed because
4455 4455 // of the dirty mod union table & card table.
4456 4456 _restart_addr = NULL;
4457 4457 return false;
4458 4458 }
4459 4459 // Adjust the task to restart from _restart_addr
4460 4460 tsk.reset(_restart_addr);
4461 4461 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4462 4462 _restart_addr);
4463 4463 _restart_addr = NULL;
4464 4464 // Get the workers going again
4465 4465 conc_workers()->start_task(&tsk);
4466 4466 while (tsk.yielded()) {
4467 4467 tsk.coordinator_yield();
4468 4468 conc_workers()->continue_task(&tsk);
4469 4469 }
4470 4470 }
4471 4471 assert(tsk.completed(), "Inconsistency");
4472 4472 assert(tsk.result() == true, "Inconsistency");
4473 4473 return true;
4474 4474 }
4475 4475
4476 4476 bool CMSCollector::do_marking_st(bool asynch) {
4477 4477 ResourceMark rm;
4478 4478 HandleMark hm;
4479 4479
4480 4480 // Temporarily make refs discovery single threaded (non-MT)
4481 4481 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
4482 4482 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4483 4483 &_markStack, CMSYield && asynch);
4484 4484 // the last argument to iterate indicates whether the iteration
4485 4485 // should be incremental with periodic yields.
4486 4486 _markBitMap.iterate(&markFromRootsClosure);
4487 4487 // If _restart_addr is non-NULL, a marking stack overflow
4488 4488 // occurred; we need to do a fresh iteration from the
4489 4489 // indicated restart address.
4490 4490 while (_restart_addr != NULL) {
4491 4491 if (_foregroundGCIsActive && asynch) {
4492 4492 // We may be running into repeated stack overflows, having
4493 4493 // reached the limit of the stack size, while making very
4494 4494 // slow forward progress. It may be best to bail out and
4495 4495 // let the foreground collector do its job.
4496 4496 // Clear _restart_addr, so that foreground GC
4497 4497 // works from scratch. This avoids the headache of
4498 4498 // a "rescan" which would otherwise be needed because
4499 4499 // of the dirty mod union table & card table.
4500 4500 _restart_addr = NULL;
4501 4501 return false; // indicating failure to complete marking
4502 4502 }
4503 4503 // Deal with stack overflow:
4504 4504 // we restart marking from _restart_addr
4505 4505 HeapWord* ra = _restart_addr;
4506 4506 markFromRootsClosure.reset(ra);
4507 4507 _restart_addr = NULL;
4508 4508 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4509 4509 }
4510 4510 return true;
4511 4511 }
4512 4512
4513 4513 void CMSCollector::preclean() {
4514 4514 check_correct_thread_executing();
4515 4515 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4516 4516 verify_work_stacks_empty();
4517 4517 verify_overflow_empty();
4518 4518 _abort_preclean = false;
4519 4519 if (CMSPrecleaningEnabled) {
4520 4520 if (!CMSEdenChunksRecordAlways) {
4521 4521 _eden_chunk_index = 0;
4522 4522 }
4523 4523 size_t used = get_eden_used();
4524 4524 size_t capacity = get_eden_capacity();
4525 4525 // Don't start sampling unless we will get sufficiently
4526 4526 // many samples.
4527 4527 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4528 4528 * CMSScheduleRemarkEdenPenetration)) {
4529 4529 _start_sampling = true;
4530 4530 } else {
4531 4531 _start_sampling = false;
4532 4532 }
4533 4533 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4534 4534 CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
4535 4535 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4536 4536 }
4537 4537 CMSTokenSync x(true); // is cms thread
4538 4538 if (CMSPrecleaningEnabled) {
4539 4539 sample_eden();
4540 4540 _collectorState = AbortablePreclean;
4541 4541 } else {
4542 4542 _collectorState = FinalMarking;
4543 4543 }
4544 4544 verify_work_stacks_empty();
4545 4545 verify_overflow_empty();
4546 4546 }
4547 4547
4548 4548 // Try and schedule the remark such that young gen
4549 4549 // occupancy is CMSScheduleRemarkEdenPenetration %.
4550 4550 void CMSCollector::abortable_preclean() {
4551 4551 check_correct_thread_executing();
4552 4552 assert(CMSPrecleaningEnabled, "Inconsistent control state");
4553 4553 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4554 4554
4555 4555 // If Eden's current occupancy is below this threshold,
4556 4556 // immediately schedule the remark; else preclean
4557 4557 // past the next scavenge in an effort to
4558 4558 // schedule the pause as described avove. By choosing
4559 4559 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4560 4560 // we will never do an actual abortable preclean cycle.
4561 4561 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4562 4562 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4563 4563 CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
4564 4564 // We need more smarts in the abortable preclean
4565 4565 // loop below to deal with cases where allocation
4566 4566 // in young gen is very very slow, and our precleaning
4567 4567 // is running a losing race against a horde of
4568 4568 // mutators intent on flooding us with CMS updates
4569 4569 // (dirty cards).
4570 4570 // One, admittedly dumb, strategy is to give up
4571 4571 // after a certain number of abortable precleaning loops
4572 4572 // or after a certain maximum time. We want to make
4573 4573 // this smarter in the next iteration.
4574 4574 // XXX FIX ME!!! YSR
4575 4575 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4576 4576 while (!(should_abort_preclean() ||
4577 4577 ConcurrentMarkSweepThread::should_terminate())) {
4578 4578 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4579 4579 cumworkdone += workdone;
4580 4580 loops++;
4581 4581 // Voluntarily terminate abortable preclean phase if we have
4582 4582 // been at it for too long.
4583 4583 if ((CMSMaxAbortablePrecleanLoops != 0) &&
4584 4584 loops >= CMSMaxAbortablePrecleanLoops) {
4585 4585 if (PrintGCDetails) {
4586 4586 gclog_or_tty->print(" CMS: abort preclean due to loops ");
4587 4587 }
4588 4588 break;
4589 4589 }
4590 4590 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4591 4591 if (PrintGCDetails) {
4592 4592 gclog_or_tty->print(" CMS: abort preclean due to time ");
4593 4593 }
4594 4594 break;
4595 4595 }
4596 4596 // If we are doing little work each iteration, we should
4597 4597 // take a short break.
4598 4598 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4599 4599 // Sleep for some time, waiting for work to accumulate
4600 4600 stopTimer();
4601 4601 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4602 4602 startTimer();
4603 4603 waited++;
4604 4604 }
4605 4605 }
4606 4606 if (PrintCMSStatistics > 0) {
4607 4607 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4608 4608 loops, waited, cumworkdone);
4609 4609 }
4610 4610 }
4611 4611 CMSTokenSync x(true); // is cms thread
4612 4612 if (_collectorState != Idling) {
4613 4613 assert(_collectorState == AbortablePreclean,
4614 4614 "Spontaneous state transition?");
4615 4615 _collectorState = FinalMarking;
4616 4616 } // Else, a foreground collection completed this CMS cycle.
4617 4617 return;
4618 4618 }
4619 4619
4620 4620 // Respond to an Eden sampling opportunity
4621 4621 void CMSCollector::sample_eden() {
4622 4622 // Make sure a young gc cannot sneak in between our
4623 4623 // reading and recording of a sample.
4624 4624 assert(Thread::current()->is_ConcurrentGC_thread(),
4625 4625 "Only the cms thread may collect Eden samples");
4626 4626 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4627 4627 "Should collect samples while holding CMS token");
4628 4628 if (!_start_sampling) {
4629 4629 return;
4630 4630 }
4631 4631 // When CMSEdenChunksRecordAlways is true, the eden chunk array
4632 4632 // is populated by the young generation.
4633 4633 if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
4634 4634 if (_eden_chunk_index < _eden_chunk_capacity) {
4635 4635 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
4636 4636 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4637 4637 "Unexpected state of Eden");
4638 4638 // We'd like to check that what we just sampled is an oop-start address;
4639 4639 // however, we cannot do that here since the object may not yet have been
4640 4640 // initialized. So we'll instead do the check when we _use_ this sample
4641 4641 // later.
4642 4642 if (_eden_chunk_index == 0 ||
4643 4643 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4644 4644 _eden_chunk_array[_eden_chunk_index-1])
4645 4645 >= CMSSamplingGrain)) {
4646 4646 _eden_chunk_index++; // commit sample
4647 4647 }
4648 4648 }
4649 4649 }
4650 4650 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4651 4651 size_t used = get_eden_used();
4652 4652 size_t capacity = get_eden_capacity();
4653 4653 assert(used <= capacity, "Unexpected state of Eden");
4654 4654 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4655 4655 _abort_preclean = true;
4656 4656 }
4657 4657 }
4658 4658 }
4659 4659
4660 4660
4661 4661 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4662 4662 assert(_collectorState == Precleaning ||
4663 4663 _collectorState == AbortablePreclean, "incorrect state");
4664 4664 ResourceMark rm;
4665 4665 HandleMark hm;
4666 4666
4667 4667 // Precleaning is currently not MT but the reference processor
4668 4668 // may be set for MT. Disable it temporarily here.
4669 4669 ReferenceProcessor* rp = ref_processor();
4670 4670 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
4671 4671
4672 4672 // Do one pass of scrubbing the discovered reference lists
4673 4673 // to remove any reference objects with strongly-reachable
4674 4674 // referents.
4675 4675 if (clean_refs) {
4676 4676 CMSPrecleanRefsYieldClosure yield_cl(this);
4677 4677 assert(rp->span().equals(_span), "Spans should be equal");
4678 4678 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4679 4679 &_markStack, true /* preclean */);
4680 4680 CMSDrainMarkingStackClosure complete_trace(this,
4681 4681 _span, &_markBitMap, &_markStack,
4682 4682 &keep_alive, true /* preclean */);
4683 4683
4684 4684 // We don't want this step to interfere with a young
4685 4685 // collection because we don't want to take CPU
4686 4686 // or memory bandwidth away from the young GC threads
4687 4687 // (which may be as many as there are CPUs).
4688 4688 // Note that we don't need to protect ourselves from
4689 4689 // interference with mutators because they can't
4690 4690 // manipulate the discovered reference lists nor affect
4691 4691 // the computed reachability of the referents, the
4692 4692 // only properties manipulated by the precleaning
4693 4693 // of these reference lists.
4694 4694 stopTimer();
4695 4695 CMSTokenSyncWithLocks x(true /* is cms thread */,
4696 4696 bitMapLock());
4697 4697 startTimer();
4698 4698 sample_eden();
4699 4699
4700 4700 // The following will yield to allow foreground
4701 4701 // collection to proceed promptly. XXX YSR:
4702 4702 // The code in this method may need further
4703 4703 // tweaking for better performance and some restructuring
4704 4704 // for cleaner interfaces.
4705 4705 GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
4706 4706 rp->preclean_discovered_references(
4707 4707 rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
4708 4708 gc_timer, _gc_tracer_cm->gc_id());
4709 4709 }
4710 4710
4711 4711 if (clean_survivor) { // preclean the active survivor space(s)
4712 4712 assert(_young_gen->kind() == Generation::DefNew ||
4713 4713 _young_gen->kind() == Generation::ParNew ||
4714 4714 _young_gen->kind() == Generation::ASParNew,
4715 4715 "incorrect type for cast");
4716 4716 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4717 4717 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4718 4718 &_markBitMap, &_modUnionTable,
4719 4719 &_markStack, true /* precleaning phase */);
4720 4720 stopTimer();
4721 4721 CMSTokenSyncWithLocks ts(true /* is cms thread */,
4722 4722 bitMapLock());
4723 4723 startTimer();
4724 4724 unsigned int before_count =
4725 4725 GenCollectedHeap::heap()->total_collections();
4726 4726 SurvivorSpacePrecleanClosure
4727 4727 sss_cl(this, _span, &_markBitMap, &_markStack,
4728 4728 &pam_cl, before_count, CMSYield);
4729 4729 dng->from()->object_iterate_careful(&sss_cl);
4730 4730 dng->to()->object_iterate_careful(&sss_cl);
4731 4731 }
4732 4732 MarkRefsIntoAndScanClosure
4733 4733 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4734 4734 &_markStack, this, CMSYield,
4735 4735 true /* precleaning phase */);
4736 4736 // CAUTION: The following closure has persistent state that may need to
4737 4737 // be reset upon a decrease in the sequence of addresses it
4738 4738 // processes.
4739 4739 ScanMarkedObjectsAgainCarefullyClosure
4740 4740 smoac_cl(this, _span,
4741 4741 &_markBitMap, &_markStack, &mrias_cl, CMSYield);
4742 4742
4743 4743 // Preclean dirty cards in ModUnionTable and CardTable using
4744 4744 // appropriate convergence criterion;
4745 4745 // repeat CMSPrecleanIter times unless we find that
4746 4746 // we are losing.
4747 4747 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4748 4748 assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4749 4749 "Bad convergence multiplier");
4750 4750 assert(CMSPrecleanThreshold >= 100,
4751 4751 "Unreasonably low CMSPrecleanThreshold");
4752 4752
4753 4753 size_t numIter, cumNumCards, lastNumCards, curNumCards;
4754 4754 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4755 4755 numIter < CMSPrecleanIter;
4756 4756 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4757 4757 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
4758 4758 if (Verbose && PrintGCDetails) {
4759 4759 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4760 4760 }
4761 4761 // Either there are very few dirty cards, so re-mark
4762 4762 // pause will be small anyway, or our pre-cleaning isn't
4763 4763 // that much faster than the rate at which cards are being
4764 4764 // dirtied, so we might as well stop and re-mark since
4765 4765 // precleaning won't improve our re-mark time by much.
4766 4766 if (curNumCards <= CMSPrecleanThreshold ||
4767 4767 (numIter > 0 &&
4768 4768 (curNumCards * CMSPrecleanDenominator >
4769 4769 lastNumCards * CMSPrecleanNumerator))) {
4770 4770 numIter++;
4771 4771 cumNumCards += curNumCards;
4772 4772 break;
4773 4773 }
4774 4774 }
4775 4775
4776 4776 preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4777 4777
4778 4778 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4779 4779 cumNumCards += curNumCards;
4780 4780 if (PrintGCDetails && PrintCMSStatistics != 0) {
4781 4781 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4782 4782 curNumCards, cumNumCards, numIter);
4783 4783 }
4784 4784 return cumNumCards; // as a measure of useful work done
4785 4785 }
4786 4786
4787 4787 // PRECLEANING NOTES:
4788 4788 // Precleaning involves:
4789 4789 // . reading the bits of the modUnionTable and clearing the set bits.
4790 4790 // . For the cards corresponding to the set bits, we scan the
4791 4791 // objects on those cards. This means we need the free_list_lock
4792 4792 // so that we can safely iterate over the CMS space when scanning
4793 4793 // for oops.
4794 4794 // . When we scan the objects, we'll be both reading and setting
4795 4795 // marks in the marking bit map, so we'll need the marking bit map.
4796 4796 // . For protecting _collector_state transitions, we take the CGC_lock.
4797 4797 // Note that any races in the reading of of card table entries by the
4798 4798 // CMS thread on the one hand and the clearing of those entries by the
4799 4799 // VM thread or the setting of those entries by the mutator threads on the
4800 4800 // other are quite benign. However, for efficiency it makes sense to keep
4801 4801 // the VM thread from racing with the CMS thread while the latter is
4802 4802 // dirty card info to the modUnionTable. We therefore also use the
4803 4803 // CGC_lock to protect the reading of the card table and the mod union
4804 4804 // table by the CM thread.
4805 4805 // . We run concurrently with mutator updates, so scanning
4806 4806 // needs to be done carefully -- we should not try to scan
4807 4807 // potentially uninitialized objects.
4808 4808 //
4809 4809 // Locking strategy: While holding the CGC_lock, we scan over and
4810 4810 // reset a maximal dirty range of the mod union / card tables, then lock
4811 4811 // the free_list_lock and bitmap lock to do a full marking, then
4812 4812 // release these locks; and repeat the cycle. This allows for a
4813 4813 // certain amount of fairness in the sharing of these locks between
4814 4814 // the CMS collector on the one hand, and the VM thread and the
4815 4815 // mutators on the other.
4816 4816
4817 4817 // NOTE: preclean_mod_union_table() and preclean_card_table()
4818 4818 // further below are largely identical; if you need to modify
4819 4819 // one of these methods, please check the other method too.
4820 4820
4821 4821 size_t CMSCollector::preclean_mod_union_table(
4822 4822 ConcurrentMarkSweepGeneration* gen,
4823 4823 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4824 4824 verify_work_stacks_empty();
4825 4825 verify_overflow_empty();
4826 4826
4827 4827 // strategy: starting with the first card, accumulate contiguous
4828 4828 // ranges of dirty cards; clear these cards, then scan the region
4829 4829 // covered by these cards.
4830 4830
4831 4831 // Since all of the MUT is committed ahead, we can just use
4832 4832 // that, in case the generations expand while we are precleaning.
4833 4833 // It might also be fine to just use the committed part of the
4834 4834 // generation, but we might potentially miss cards when the
4835 4835 // generation is rapidly expanding while we are in the midst
4836 4836 // of precleaning.
4837 4837 HeapWord* startAddr = gen->reserved().start();
4838 4838 HeapWord* endAddr = gen->reserved().end();
4839 4839
4840 4840 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4841 4841
4842 4842 size_t numDirtyCards, cumNumDirtyCards;
4843 4843 HeapWord *nextAddr, *lastAddr;
4844 4844 for (cumNumDirtyCards = numDirtyCards = 0,
4845 4845 nextAddr = lastAddr = startAddr;
4846 4846 nextAddr < endAddr;
4847 4847 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4848 4848
4849 4849 ResourceMark rm;
4850 4850 HandleMark hm;
4851 4851
4852 4852 MemRegion dirtyRegion;
4853 4853 {
4854 4854 stopTimer();
4855 4855 // Potential yield point
4856 4856 CMSTokenSync ts(true);
4857 4857 startTimer();
4858 4858 sample_eden();
4859 4859 // Get dirty region starting at nextOffset (inclusive),
4860 4860 // simultaneously clearing it.
4861 4861 dirtyRegion =
4862 4862 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4863 4863 assert(dirtyRegion.start() >= nextAddr,
4864 4864 "returned region inconsistent?");
4865 4865 }
4866 4866 // Remember where the next search should begin.
4867 4867 // The returned region (if non-empty) is a right open interval,
4868 4868 // so lastOffset is obtained from the right end of that
4869 4869 // interval.
4870 4870 lastAddr = dirtyRegion.end();
4871 4871 // Should do something more transparent and less hacky XXX
4872 4872 numDirtyCards =
4873 4873 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4874 4874
4875 4875 // We'll scan the cards in the dirty region (with periodic
4876 4876 // yields for foreground GC as needed).
4877 4877 if (!dirtyRegion.is_empty()) {
4878 4878 assert(numDirtyCards > 0, "consistency check");
4879 4879 HeapWord* stop_point = NULL;
4880 4880 stopTimer();
4881 4881 // Potential yield point
4882 4882 CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4883 4883 bitMapLock());
4884 4884 startTimer();
4885 4885 {
4886 4886 verify_work_stacks_empty();
4887 4887 verify_overflow_empty();
4888 4888 sample_eden();
4889 4889 stop_point =
4890 4890 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4891 4891 }
4892 4892 if (stop_point != NULL) {
4893 4893 // The careful iteration stopped early either because it found an
4894 4894 // uninitialized object, or because we were in the midst of an
4895 4895 // "abortable preclean", which should now be aborted. Redirty
4896 4896 // the bits corresponding to the partially-scanned or unscanned
4897 4897 // cards. We'll either restart at the next block boundary or
4898 4898 // abort the preclean.
4899 4899 assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4900 4900 "Should only be AbortablePreclean.");
4901 4901 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4902 4902 if (should_abort_preclean()) {
4903 4903 break; // out of preclean loop
4904 4904 } else {
4905 4905 // Compute the next address at which preclean should pick up;
4906 4906 // might need bitMapLock in order to read P-bits.
4907 4907 lastAddr = next_card_start_after_block(stop_point);
4908 4908 }
4909 4909 }
4910 4910 } else {
4911 4911 assert(lastAddr == endAddr, "consistency check");
4912 4912 assert(numDirtyCards == 0, "consistency check");
4913 4913 break;
4914 4914 }
4915 4915 }
4916 4916 verify_work_stacks_empty();
4917 4917 verify_overflow_empty();
4918 4918 return cumNumDirtyCards;
4919 4919 }
4920 4920
4921 4921 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4922 4922 // below are largely identical; if you need to modify
4923 4923 // one of these methods, please check the other method too.
4924 4924
4925 4925 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4926 4926 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4927 4927 // strategy: it's similar to precleamModUnionTable above, in that
4928 4928 // we accumulate contiguous ranges of dirty cards, mark these cards
4929 4929 // precleaned, then scan the region covered by these cards.
4930 4930 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
4931 4931 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4932 4932
4933 4933 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4934 4934
4935 4935 size_t numDirtyCards, cumNumDirtyCards;
4936 4936 HeapWord *lastAddr, *nextAddr;
4937 4937
4938 4938 for (cumNumDirtyCards = numDirtyCards = 0,
4939 4939 nextAddr = lastAddr = startAddr;
4940 4940 nextAddr < endAddr;
4941 4941 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4942 4942
4943 4943 ResourceMark rm;
4944 4944 HandleMark hm;
4945 4945
4946 4946 MemRegion dirtyRegion;
4947 4947 {
4948 4948 // See comments in "Precleaning notes" above on why we
4949 4949 // do this locking. XXX Could the locking overheads be
4950 4950 // too high when dirty cards are sparse? [I don't think so.]
4951 4951 stopTimer();
4952 4952 CMSTokenSync x(true); // is cms thread
4953 4953 startTimer();
4954 4954 sample_eden();
4955 4955 // Get and clear dirty region from card table
4956 4956 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4957 4957 MemRegion(nextAddr, endAddr),
4958 4958 true,
4959 4959 CardTableModRefBS::precleaned_card_val());
4960 4960
4961 4961 assert(dirtyRegion.start() >= nextAddr,
4962 4962 "returned region inconsistent?");
4963 4963 }
4964 4964 lastAddr = dirtyRegion.end();
4965 4965 numDirtyCards =
4966 4966 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4967 4967
4968 4968 if (!dirtyRegion.is_empty()) {
4969 4969 stopTimer();
4970 4970 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4971 4971 startTimer();
4972 4972 sample_eden();
4973 4973 verify_work_stacks_empty();
4974 4974 verify_overflow_empty();
4975 4975 HeapWord* stop_point =
4976 4976 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4977 4977 if (stop_point != NULL) {
4978 4978 assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4979 4979 "Should only be AbortablePreclean.");
4980 4980 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4981 4981 if (should_abort_preclean()) {
4982 4982 break; // out of preclean loop
4983 4983 } else {
4984 4984 // Compute the next address at which preclean should pick up.
4985 4985 lastAddr = next_card_start_after_block(stop_point);
4986 4986 }
4987 4987 }
4988 4988 } else {
4989 4989 break;
4990 4990 }
4991 4991 }
4992 4992 verify_work_stacks_empty();
4993 4993 verify_overflow_empty();
4994 4994 return cumNumDirtyCards;
4995 4995 }
4996 4996
4997 4997 class PrecleanKlassClosure : public KlassClosure {
4998 4998 KlassToOopClosure _cm_klass_closure;
4999 4999 public:
5000 5000 PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5001 5001 void do_klass(Klass* k) {
5002 5002 if (k->has_accumulated_modified_oops()) {
5003 5003 k->clear_accumulated_modified_oops();
5004 5004
5005 5005 _cm_klass_closure.do_klass(k);
5006 5006 }
5007 5007 }
5008 5008 };
5009 5009
5010 5010 // The freelist lock is needed to prevent asserts, is it really needed?
5011 5011 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
5012 5012
5013 5013 cl->set_freelistLock(freelistLock);
5014 5014
5015 5015 CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
5016 5016
5017 5017 // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
5018 5018 // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
5019 5019 PrecleanKlassClosure preclean_klass_closure(cl);
5020 5020 ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
5021 5021
5022 5022 verify_work_stacks_empty();
5023 5023 verify_overflow_empty();
5024 5024 }
5025 5025
5026 5026 void CMSCollector::checkpointRootsFinal(bool asynch,
5027 5027 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5028 5028 assert(_collectorState == FinalMarking, "incorrect state transition?");
5029 5029 check_correct_thread_executing();
5030 5030 // world is stopped at this checkpoint
5031 5031 assert(SafepointSynchronize::is_at_safepoint(),
5032 5032 "world should be stopped");
5033 5033 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5034 5034
5035 5035 verify_work_stacks_empty();
5036 5036 verify_overflow_empty();
5037 5037
5038 5038 SpecializationStats::clear();
5039 5039 if (PrintGCDetails) {
5040 5040 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
5041 5041 _young_gen->used() / K,
5042 5042 _young_gen->capacity() / K);
5043 5043 }
5044 5044 if (asynch) {
5045 5045 if (CMSScavengeBeforeRemark) {
5046 5046 GenCollectedHeap* gch = GenCollectedHeap::heap();
5047 5047 // Temporarily set flag to false, GCH->do_collection will
5048 5048 // expect it to be false and set to true
5049 5049 FlagSetting fl(gch->_is_gc_active, false);
5050 5050 NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
5051 5051 PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5052 5052 int level = _cmsGen->level() - 1;
5053 5053 if (level >= 0) {
5054 5054 gch->do_collection(true, // full (i.e. force, see below)
5055 5055 false, // !clear_all_soft_refs
5056 5056 0, // size
5057 5057 false, // is_tlab
5058 5058 level // max_level
5059 5059 );
5060 5060 }
5061 5061 }
5062 5062 FreelistLocker x(this);
5063 5063 MutexLockerEx y(bitMapLock(),
5064 5064 Mutex::_no_safepoint_check_flag);
5065 5065 assert(!init_mark_was_synchronous, "but that's impossible!");
5066 5066 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
5067 5067 } else {
5068 5068 // already have all the locks
5069 5069 checkpointRootsFinalWork(asynch, clear_all_soft_refs,
5070 5070 init_mark_was_synchronous);
5071 5071 }
5072 5072 verify_work_stacks_empty();
5073 5073 verify_overflow_empty();
5074 5074 SpecializationStats::print();
5075 5075 }
5076 5076
5077 5077 void CMSCollector::checkpointRootsFinalWork(bool asynch,
5078 5078 bool clear_all_soft_refs, bool init_mark_was_synchronous) {
5079 5079
5080 5080 NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5081 5081
5082 5082 assert(haveFreelistLocks(), "must have free list locks");
5083 5083 assert_lock_strong(bitMapLock());
5084 5084
5085 5085 if (UseAdaptiveSizePolicy) {
5086 5086 size_policy()->checkpoint_roots_final_begin();
5087 5087 }
5088 5088
5089 5089 ResourceMark rm;
5090 5090 HandleMark hm;
5091 5091
5092 5092 GenCollectedHeap* gch = GenCollectedHeap::heap();
5093 5093
5094 5094 if (should_unload_classes()) {
5095 5095 CodeCache::gc_prologue();
5096 5096 }
5097 5097 assert(haveFreelistLocks(), "must have free list locks");
5098 5098 assert_lock_strong(bitMapLock());
5099 5099
5100 5100 if (!init_mark_was_synchronous) {
5101 5101 // We might assume that we need not fill TLAB's when
5102 5102 // CMSScavengeBeforeRemark is set, because we may have just done
5103 5103 // a scavenge which would have filled all TLAB's -- and besides
5104 5104 // Eden would be empty. This however may not always be the case --
5105 5105 // for instance although we asked for a scavenge, it may not have
5106 5106 // happened because of a JNI critical section. We probably need
5107 5107 // a policy for deciding whether we can in that case wait until
5108 5108 // the critical section releases and then do the remark following
5109 5109 // the scavenge, and skip it here. In the absence of that policy,
5110 5110 // or of an indication of whether the scavenge did indeed occur,
5111 5111 // we cannot rely on TLAB's having been filled and must do
5112 5112 // so here just in case a scavenge did not happen.
5113 5113 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
5114 5114 // Update the saved marks which may affect the root scans.
5115 5115 gch->save_marks();
5116 5116
5117 5117 if (CMSPrintEdenSurvivorChunks) {
5118 5118 print_eden_and_survivor_chunk_arrays();
5119 5119 }
5120 5120
5121 5121 {
5122 5122 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
5123 5123
5124 5124 // Note on the role of the mod union table:
5125 5125 // Since the marker in "markFromRoots" marks concurrently with
5126 5126 // mutators, it is possible for some reachable objects not to have been
5127 5127 // scanned. For instance, an only reference to an object A was
5128 5128 // placed in object B after the marker scanned B. Unless B is rescanned,
5129 5129 // A would be collected. Such updates to references in marked objects
5130 5130 // are detected via the mod union table which is the set of all cards
5131 5131 // dirtied since the first checkpoint in this GC cycle and prior to
5132 5132 // the most recent young generation GC, minus those cleaned up by the
5133 5133 // concurrent precleaning.
5134 5134 if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
5135 5135 GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5136 5136 do_remark_parallel();
5137 5137 } else {
5138 5138 GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
5139 5139 _gc_timer_cm, _gc_tracer_cm->gc_id());
5140 5140 do_remark_non_parallel();
5141 5141 }
5142 5142 }
5143 5143 } else {
5144 5144 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
5145 5145 // The initial mark was stop-world, so there's no rescanning to
5146 5146 // do; go straight on to the next step below.
5147 5147 }
5148 5148 verify_work_stacks_empty();
5149 5149 verify_overflow_empty();
5150 5150
5151 5151 {
5152 5152 NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
5153 5153 refProcessingWork(asynch, clear_all_soft_refs);
5154 5154 }
5155 5155 verify_work_stacks_empty();
5156 5156 verify_overflow_empty();
5157 5157
5158 5158 if (should_unload_classes()) {
5159 5159 CodeCache::gc_epilogue();
5160 5160 }
5161 5161 JvmtiExport::gc_epilogue();
5162 5162
5163 5163 // If we encountered any (marking stack / work queue) overflow
5164 5164 // events during the current CMS cycle, take appropriate
5165 5165 // remedial measures, where possible, so as to try and avoid
5166 5166 // recurrence of that condition.
5167 5167 assert(_markStack.isEmpty(), "No grey objects");
5168 5168 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
5169 5169 _ser_kac_ovflw + _ser_kac_preclean_ovflw;
5170 5170 if (ser_ovflw > 0) {
5171 5171 if (PrintCMSStatistics != 0) {
5172 5172 gclog_or_tty->print_cr("Marking stack overflow (benign) "
5173 5173 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
5174 5174 ", kac_preclean="SIZE_FORMAT")",
5175 5175 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
5176 5176 _ser_kac_ovflw, _ser_kac_preclean_ovflw);
5177 5177 }
5178 5178 _markStack.expand();
5179 5179 _ser_pmc_remark_ovflw = 0;
5180 5180 _ser_pmc_preclean_ovflw = 0;
5181 5181 _ser_kac_preclean_ovflw = 0;
5182 5182 _ser_kac_ovflw = 0;
5183 5183 }
5184 5184 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
5185 5185 if (PrintCMSStatistics != 0) {
5186 5186 gclog_or_tty->print_cr("Work queue overflow (benign) "
5187 5187 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
5188 5188 _par_pmc_remark_ovflw, _par_kac_ovflw);
5189 5189 }
5190 5190 _par_pmc_remark_ovflw = 0;
5191 5191 _par_kac_ovflw = 0;
5192 5192 }
5193 5193 if (PrintCMSStatistics != 0) {
5194 5194 if (_markStack._hit_limit > 0) {
5195 5195 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
5196 5196 _markStack._hit_limit);
5197 5197 }
5198 5198 if (_markStack._failed_double > 0) {
5199 5199 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
5200 5200 " current capacity "SIZE_FORMAT,
5201 5201 _markStack._failed_double,
5202 5202 _markStack.capacity());
5203 5203 }
5204 5204 }
5205 5205 _markStack._hit_limit = 0;
5206 5206 _markStack._failed_double = 0;
5207 5207
5208 5208 if ((VerifyAfterGC || VerifyDuringGC) &&
5209 5209 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5210 5210 verify_after_remark();
5211 5211 }
5212 5212
5213 5213 _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
5214 5214
5215 5215 // Change under the freelistLocks.
5216 5216 _collectorState = Sweeping;
5217 5217 // Call isAllClear() under bitMapLock
5218 5218 assert(_modUnionTable.isAllClear(),
5219 5219 "Should be clear by end of the final marking");
5220 5220 assert(_ct->klass_rem_set()->mod_union_is_clear(),
5221 5221 "Should be clear by end of the final marking");
5222 5222 if (UseAdaptiveSizePolicy) {
5223 5223 size_policy()->checkpoint_roots_final_end(gch->gc_cause());
5224 5224 }
5225 5225 }
5226 5226
5227 5227 void CMSParInitialMarkTask::work(uint worker_id) {
5228 5228 elapsedTimer _timer;
5229 5229 ResourceMark rm;
5230 5230 HandleMark hm;
5231 5231
5232 5232 // ---------- scan from roots --------------
5233 5233 _timer.start();
5234 5234 GenCollectedHeap* gch = GenCollectedHeap::heap();
5235 5235 Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
5236 5236
5237 5237 // ---------- young gen roots --------------
5238 5238 {
5239 5239 work_on_young_gen_roots(worker_id, &par_mri_cl);
5240 5240 _timer.stop();
5241 5241 if (PrintCMSStatistics != 0) {
5242 5242 gclog_or_tty->print_cr(
5243 5243 "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
5244 5244 worker_id, _timer.seconds());
5245 5245 }
5246 5246 }
5247 5247
5248 5248 // ---------- remaining roots --------------
5249 5249 _timer.reset();
5250 5250 _timer.start();
5251 5251
5252 5252 CLDToOopClosure cld_closure(&par_mri_cl, true);
5253 5253
5254 5254 gch->gen_process_roots(_collector->_cmsGen->level(),
5255 5255 false, // yg was scanned above
5256 5256 false, // this is parallel code
5257 5257 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5258 5258 _collector->should_unload_classes(),
5259 5259 &par_mri_cl,
5260 5260 NULL,
5261 5261 &cld_closure);
5262 5262 assert(_collector->should_unload_classes()
5263 5263 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5264 5264 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5265 5265 _timer.stop();
5266 5266 if (PrintCMSStatistics != 0) {
5267 5267 gclog_or_tty->print_cr(
5268 5268 "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
5269 5269 worker_id, _timer.seconds());
5270 5270 }
5271 5271 }
5272 5272
5273 5273 // Parallel remark task
5274 5274 class CMSParRemarkTask: public CMSParMarkTask {
5275 5275 CompactibleFreeListSpace* _cms_space;
5276 5276
5277 5277 // The per-thread work queues, available here for stealing.
5278 5278 OopTaskQueueSet* _task_queues;
5279 5279 ParallelTaskTerminator _term;
5280 5280
5281 5281 public:
5282 5282 // A value of 0 passed to n_workers will cause the number of
5283 5283 // workers to be taken from the active workers in the work gang.
5284 5284 CMSParRemarkTask(CMSCollector* collector,
5285 5285 CompactibleFreeListSpace* cms_space,
5286 5286 int n_workers, FlexibleWorkGang* workers,
5287 5287 OopTaskQueueSet* task_queues):
5288 5288 CMSParMarkTask("Rescan roots and grey objects in parallel",
5289 5289 collector, n_workers),
5290 5290 _cms_space(cms_space),
5291 5291 _task_queues(task_queues),
5292 5292 _term(n_workers, task_queues) { }
5293 5293
5294 5294 OopTaskQueueSet* task_queues() { return _task_queues; }
5295 5295
5296 5296 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5297 5297
5298 5298 ParallelTaskTerminator* terminator() { return &_term; }
5299 5299 int n_workers() { return _n_workers; }
5300 5300
5301 5301 void work(uint worker_id);
5302 5302
5303 5303 private:
5304 5304 // ... of dirty cards in old space
5305 5305 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5306 5306 Par_MarkRefsIntoAndScanClosure* cl);
5307 5307
5308 5308 // ... work stealing for the above
5309 5309 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5310 5310 };
5311 5311
5312 5312 class RemarkKlassClosure : public KlassClosure {
5313 5313 KlassToOopClosure _cm_klass_closure;
5314 5314 public:
5315 5315 RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5316 5316 void do_klass(Klass* k) {
5317 5317 // Check if we have modified any oops in the Klass during the concurrent marking.
5318 5318 if (k->has_accumulated_modified_oops()) {
5319 5319 k->clear_accumulated_modified_oops();
5320 5320
5321 5321 // We could have transfered the current modified marks to the accumulated marks,
5322 5322 // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5323 5323 } else if (k->has_modified_oops()) {
5324 5324 // Don't clear anything, this info is needed by the next young collection.
5325 5325 } else {
5326 5326 // No modified oops in the Klass.
5327 5327 return;
5328 5328 }
5329 5329
5330 5330 // The klass has modified fields, need to scan the klass.
5331 5331 _cm_klass_closure.do_klass(k);
5332 5332 }
5333 5333 };
5334 5334
5335 5335 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
5336 5336 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5337 5337 EdenSpace* eden_space = dng->eden();
5338 5338 ContiguousSpace* from_space = dng->from();
5339 5339 ContiguousSpace* to_space = dng->to();
5340 5340
5341 5341 HeapWord** eca = _collector->_eden_chunk_array;
5342 5342 size_t ect = _collector->_eden_chunk_index;
5343 5343 HeapWord** sca = _collector->_survivor_chunk_array;
5344 5344 size_t sct = _collector->_survivor_chunk_index;
5345 5345
5346 5346 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5347 5347 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5348 5348
5349 5349 do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
5350 5350 do_young_space_rescan(worker_id, cl, from_space, sca, sct);
5351 5351 do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
5352 5352 }
5353 5353
5354 5354 // work_queue(i) is passed to the closure
5355 5355 // Par_MarkRefsIntoAndScanClosure. The "i" parameter
5356 5356 // also is passed to do_dirty_card_rescan_tasks() and to
5357 5357 // do_work_steal() to select the i-th task_queue.
5358 5358
5359 5359 void CMSParRemarkTask::work(uint worker_id) {
5360 5360 elapsedTimer _timer;
5361 5361 ResourceMark rm;
5362 5362 HandleMark hm;
5363 5363
5364 5364 // ---------- rescan from roots --------------
5365 5365 _timer.start();
5366 5366 GenCollectedHeap* gch = GenCollectedHeap::heap();
5367 5367 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5368 5368 _collector->_span, _collector->ref_processor(),
5369 5369 &(_collector->_markBitMap),
5370 5370 work_queue(worker_id));
5371 5371
5372 5372 // Rescan young gen roots first since these are likely
5373 5373 // coarsely partitioned and may, on that account, constitute
5374 5374 // the critical path; thus, it's best to start off that
5375 5375 // work first.
5376 5376 // ---------- young gen roots --------------
5377 5377 {
5378 5378 work_on_young_gen_roots(worker_id, &par_mrias_cl);
5379 5379 _timer.stop();
5380 5380 if (PrintCMSStatistics != 0) {
5381 5381 gclog_or_tty->print_cr(
5382 5382 "Finished young gen rescan work in %dth thread: %3.3f sec",
5383 5383 worker_id, _timer.seconds());
5384 5384 }
5385 5385 }
5386 5386
5387 5387 // ---------- remaining roots --------------
5388 5388 _timer.reset();
5389 5389 _timer.start();
5390 5390 gch->gen_process_roots(_collector->_cmsGen->level(),
5391 5391 false, // yg was scanned above
5392 5392 false, // this is parallel code
5393 5393 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5394 5394 _collector->should_unload_classes(),
5395 5395 &par_mrias_cl,
5396 5396 NULL,
5397 5397 NULL); // The dirty klasses will be handled below
5398 5398
5399 5399 assert(_collector->should_unload_classes()
5400 5400 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5401 5401 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5402 5402 _timer.stop();
5403 5403 if (PrintCMSStatistics != 0) {
5404 5404 gclog_or_tty->print_cr(
5405 5405 "Finished remaining root rescan work in %dth thread: %3.3f sec",
5406 5406 worker_id, _timer.seconds());
5407 5407 }
5408 5408
5409 5409 // ---------- unhandled CLD scanning ----------
5410 5410 if (worker_id == 0) { // Single threaded at the moment.
5411 5411 _timer.reset();
5412 5412 _timer.start();
5413 5413
5414 5414 // Scan all new class loader data objects and new dependencies that were
5415 5415 // introduced during concurrent marking.
5416 5416 ResourceMark rm;
5417 5417 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5418 5418 for (int i = 0; i < array->length(); i++) {
5419 5419 par_mrias_cl.do_class_loader_data(array->at(i));
5420 5420 }
5421 5421
5422 5422 // We don't need to keep track of new CLDs anymore.
5423 5423 ClassLoaderDataGraph::remember_new_clds(false);
5424 5424
5425 5425 _timer.stop();
5426 5426 if (PrintCMSStatistics != 0) {
5427 5427 gclog_or_tty->print_cr(
5428 5428 "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
5429 5429 worker_id, _timer.seconds());
5430 5430 }
5431 5431 }
5432 5432
5433 5433 // ---------- dirty klass scanning ----------
5434 5434 if (worker_id == 0) { // Single threaded at the moment.
5435 5435 _timer.reset();
5436 5436 _timer.start();
5437 5437
5438 5438 // Scan all classes that was dirtied during the concurrent marking phase.
5439 5439 RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
5440 5440 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5441 5441
5442 5442 _timer.stop();
5443 5443 if (PrintCMSStatistics != 0) {
5444 5444 gclog_or_tty->print_cr(
5445 5445 "Finished dirty klass scanning work in %dth thread: %3.3f sec",
5446 5446 worker_id, _timer.seconds());
5447 5447 }
5448 5448 }
5449 5449
5450 5450 // We might have added oops to ClassLoaderData::_handles during the
5451 5451 // concurrent marking phase. These oops point to newly allocated objects
5452 5452 // that are guaranteed to be kept alive. Either by the direct allocation
5453 5453 // code, or when the young collector processes the roots. Hence,
5454 5454 // we don't have to revisit the _handles block during the remark phase.
5455 5455
5456 5456 // ---------- rescan dirty cards ------------
5457 5457 _timer.reset();
5458 5458 _timer.start();
5459 5459
5460 5460 // Do the rescan tasks for each of the two spaces
5461 5461 // (cms_space) in turn.
5462 5462 // "worker_id" is passed to select the task_queue for "worker_id"
5463 5463 do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
5464 5464 _timer.stop();
5465 5465 if (PrintCMSStatistics != 0) {
5466 5466 gclog_or_tty->print_cr(
5467 5467 "Finished dirty card rescan work in %dth thread: %3.3f sec",
5468 5468 worker_id, _timer.seconds());
5469 5469 }
5470 5470
5471 5471 // ---------- steal work from other threads ...
5472 5472 // ---------- ... and drain overflow list.
5473 5473 _timer.reset();
5474 5474 _timer.start();
5475 5475 do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
5476 5476 _timer.stop();
5477 5477 if (PrintCMSStatistics != 0) {
5478 5478 gclog_or_tty->print_cr(
5479 5479 "Finished work stealing in %dth thread: %3.3f sec",
5480 5480 worker_id, _timer.seconds());
5481 5481 }
5482 5482 }
5483 5483
5484 5484 // Note that parameter "i" is not used.
5485 5485 void
5486 5486 CMSParMarkTask::do_young_space_rescan(uint worker_id,
5487 5487 OopsInGenClosure* cl, ContiguousSpace* space,
5488 5488 HeapWord** chunk_array, size_t chunk_top) {
5489 5489 // Until all tasks completed:
5490 5490 // . claim an unclaimed task
5491 5491 // . compute region boundaries corresponding to task claimed
5492 5492 // using chunk_array
5493 5493 // . par_oop_iterate(cl) over that region
5494 5494
5495 5495 ResourceMark rm;
5496 5496 HandleMark hm;
5497 5497
5498 5498 SequentialSubTasksDone* pst = space->par_seq_tasks();
5499 5499
5500 5500 uint nth_task = 0;
5501 5501 uint n_tasks = pst->n_tasks();
5502 5502
5503 5503 if (n_tasks > 0) {
5504 5504 assert(pst->valid(), "Uninitialized use?");
5505 5505 HeapWord *start, *end;
5506 5506 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5507 5507 // We claimed task # nth_task; compute its boundaries.
5508 5508 if (chunk_top == 0) { // no samples were taken
5509 5509 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5510 5510 start = space->bottom();
5511 5511 end = space->top();
5512 5512 } else if (nth_task == 0) {
5513 5513 start = space->bottom();
5514 5514 end = chunk_array[nth_task];
5515 5515 } else if (nth_task < (uint)chunk_top) {
5516 5516 assert(nth_task >= 1, "Control point invariant");
5517 5517 start = chunk_array[nth_task - 1];
5518 5518 end = chunk_array[nth_task];
5519 5519 } else {
5520 5520 assert(nth_task == (uint)chunk_top, "Control point invariant");
5521 5521 start = chunk_array[chunk_top - 1];
5522 5522 end = space->top();
5523 5523 }
5524 5524 MemRegion mr(start, end);
5525 5525 // Verify that mr is in space
5526 5526 assert(mr.is_empty() || space->used_region().contains(mr),
5527 5527 "Should be in space");
5528 5528 // Verify that "start" is an object boundary
5529 5529 assert(mr.is_empty() || oop(mr.start())->is_oop(),
5530 5530 "Should be an oop");
5531 5531 space->par_oop_iterate(mr, cl);
5532 5532 }
5533 5533 pst->all_tasks_completed();
5534 5534 }
5535 5535 }
5536 5536
5537 5537 void
5538 5538 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5539 5539 CompactibleFreeListSpace* sp, int i,
5540 5540 Par_MarkRefsIntoAndScanClosure* cl) {
5541 5541 // Until all tasks completed:
5542 5542 // . claim an unclaimed task
5543 5543 // . compute region boundaries corresponding to task claimed
5544 5544 // . transfer dirty bits ct->mut for that region
5545 5545 // . apply rescanclosure to dirty mut bits for that region
5546 5546
5547 5547 ResourceMark rm;
5548 5548 HandleMark hm;
5549 5549
5550 5550 OopTaskQueue* work_q = work_queue(i);
5551 5551 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5552 5552 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5553 5553 // CAUTION: This closure has state that persists across calls to
5554 5554 // the work method dirty_range_iterate_clear() in that it has
5555 5555 // imbedded in it a (subtype of) UpwardsObjectClosure. The
5556 5556 // use of that state in the imbedded UpwardsObjectClosure instance
5557 5557 // assumes that the cards are always iterated (even if in parallel
5558 5558 // by several threads) in monotonically increasing order per each
5559 5559 // thread. This is true of the implementation below which picks
5560 5560 // card ranges (chunks) in monotonically increasing order globally
5561 5561 // and, a-fortiori, in monotonically increasing order per thread
5562 5562 // (the latter order being a subsequence of the former).
5563 5563 // If the work code below is ever reorganized into a more chaotic
5564 5564 // work-partitioning form than the current "sequential tasks"
5565 5565 // paradigm, the use of that persistent state will have to be
5566 5566 // revisited and modified appropriately. See also related
5567 5567 // bug 4756801 work on which should examine this code to make
5568 5568 // sure that the changes there do not run counter to the
5569 5569 // assumptions made here and necessary for correctness and
5570 5570 // efficiency. Note also that this code might yield inefficient
5571 5571 // behaviour in the case of very large objects that span one or
5572 5572 // more work chunks. Such objects would potentially be scanned
5573 5573 // several times redundantly. Work on 4756801 should try and
5574 5574 // address that performance anomaly if at all possible. XXX
5575 5575 MemRegion full_span = _collector->_span;
5576 5576 CMSBitMap* bm = &(_collector->_markBitMap); // shared
5577 5577 MarkFromDirtyCardsClosure
5578 5578 greyRescanClosure(_collector, full_span, // entire span of interest
5579 5579 sp, bm, work_q, cl);
5580 5580
5581 5581 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5582 5582 assert(pst->valid(), "Uninitialized use?");
5583 5583 uint nth_task = 0;
5584 5584 const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5585 5585 MemRegion span = sp->used_region();
5586 5586 HeapWord* start_addr = span.start();
5587 5587 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5588 5588 alignment);
5589 5589 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5590 5590 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5591 5591 start_addr, "Check alignment");
5592 5592 assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5593 5593 chunk_size, "Check alignment");
5594 5594
5595 5595 while (!pst->is_task_claimed(/* reference */ nth_task)) {
5596 5596 // Having claimed the nth_task, compute corresponding mem-region,
5597 5597 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5598 5598 // The alignment restriction ensures that we do not need any
5599 5599 // synchronization with other gang-workers while setting or
5600 5600 // clearing bits in thus chunk of the MUT.
5601 5601 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5602 5602 start_addr + (nth_task+1)*chunk_size);
5603 5603 // The last chunk's end might be way beyond end of the
5604 5604 // used region. In that case pull back appropriately.
5605 5605 if (this_span.end() > end_addr) {
5606 5606 this_span.set_end(end_addr);
5607 5607 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5608 5608 }
5609 5609 // Iterate over the dirty cards covering this chunk, marking them
5610 5610 // precleaned, and setting the corresponding bits in the mod union
5611 5611 // table. Since we have been careful to partition at Card and MUT-word
5612 5612 // boundaries no synchronization is needed between parallel threads.
5613 5613 _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5614 5614 &modUnionClosure);
5615 5615
5616 5616 // Having transferred these marks into the modUnionTable,
5617 5617 // rescan the marked objects on the dirty cards in the modUnionTable.
5618 5618 // Even if this is at a synchronous collection, the initial marking
5619 5619 // may have been done during an asynchronous collection so there
5620 5620 // may be dirty bits in the mod-union table.
5621 5621 _collector->_modUnionTable.dirty_range_iterate_clear(
5622 5622 this_span, &greyRescanClosure);
5623 5623 _collector->_modUnionTable.verifyNoOneBitsInRange(
5624 5624 this_span.start(),
5625 5625 this_span.end());
5626 5626 }
5627 5627 pst->all_tasks_completed(); // declare that i am done
5628 5628 }
5629 5629
5630 5630 // . see if we can share work_queues with ParNew? XXX
5631 5631 void
5632 5632 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5633 5633 int* seed) {
5634 5634 OopTaskQueue* work_q = work_queue(i);
5635 5635 NOT_PRODUCT(int num_steals = 0;)
5636 5636 oop obj_to_scan;
5637 5637 CMSBitMap* bm = &(_collector->_markBitMap);
5638 5638
5639 5639 while (true) {
5640 5640 // Completely finish any left over work from (an) earlier round(s)
5641 5641 cl->trim_queue(0);
5642 5642 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5643 5643 (size_t)ParGCDesiredObjsFromOverflowList);
5644 5644 // Now check if there's any work in the overflow list
5645 5645 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5646 5646 // only affects the number of attempts made to get work from the
5647 5647 // overflow list and does not affect the number of workers. Just
5648 5648 // pass ParallelGCThreads so this behavior is unchanged.
5649 5649 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5650 5650 work_q,
5651 5651 ParallelGCThreads)) {
5652 5652 // found something in global overflow list;
5653 5653 // not yet ready to go stealing work from others.
5654 5654 // We'd like to assert(work_q->size() != 0, ...)
5655 5655 // because we just took work from the overflow list,
5656 5656 // but of course we can't since all of that could have
5657 5657 // been already stolen from us.
5658 5658 // "He giveth and He taketh away."
5659 5659 continue;
5660 5660 }
5661 5661 // Verify that we have no work before we resort to stealing
5662 5662 assert(work_q->size() == 0, "Have work, shouldn't steal");
5663 5663 // Try to steal from other queues that have work
5664 5664 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5665 5665 NOT_PRODUCT(num_steals++;)
5666 5666 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5667 5667 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5668 5668 // Do scanning work
5669 5669 obj_to_scan->oop_iterate(cl);
5670 5670 // Loop around, finish this work, and try to steal some more
5671 5671 } else if (terminator()->offer_termination()) {
5672 5672 break; // nirvana from the infinite cycle
5673 5673 }
5674 5674 }
5675 5675 NOT_PRODUCT(
5676 5676 if (PrintCMSStatistics != 0) {
5677 5677 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5678 5678 }
5679 5679 )
5680 5680 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5681 5681 "Else our work is not yet done");
5682 5682 }
5683 5683
5684 5684 // Record object boundaries in _eden_chunk_array by sampling the eden
5685 5685 // top in the slow-path eden object allocation code path and record
5686 5686 // the boundaries, if CMSEdenChunksRecordAlways is true. If
5687 5687 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
5688 5688 // sampling in sample_eden() that activates during the part of the
5689 5689 // preclean phase.
5690 5690 void CMSCollector::sample_eden_chunk() {
5691 5691 if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
5692 5692 if (_eden_chunk_lock->try_lock()) {
5693 5693 // Record a sample. This is the critical section. The contents
5694 5694 // of the _eden_chunk_array have to be non-decreasing in the
5695 5695 // address order.
5696 5696 _eden_chunk_array[_eden_chunk_index] = *_top_addr;
5697 5697 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
5698 5698 "Unexpected state of Eden");
5699 5699 if (_eden_chunk_index == 0 ||
5700 5700 ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
5701 5701 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
5702 5702 _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
5703 5703 _eden_chunk_index++; // commit sample
5704 5704 }
5705 5705 _eden_chunk_lock->unlock();
5706 5706 }
5707 5707 }
5708 5708 }
5709 5709
5710 5710 // Return a thread-local PLAB recording array, as appropriate.
5711 5711 void* CMSCollector::get_data_recorder(int thr_num) {
5712 5712 if (_survivor_plab_array != NULL &&
5713 5713 (CMSPLABRecordAlways ||
5714 5714 (_collectorState > Marking && _collectorState < FinalMarking))) {
5715 5715 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5716 5716 ChunkArray* ca = &_survivor_plab_array[thr_num];
5717 5717 ca->reset(); // clear it so that fresh data is recorded
5718 5718 return (void*) ca;
5719 5719 } else {
5720 5720 return NULL;
5721 5721 }
5722 5722 }
5723 5723
5724 5724 // Reset all the thread-local PLAB recording arrays
5725 5725 void CMSCollector::reset_survivor_plab_arrays() {
5726 5726 for (uint i = 0; i < ParallelGCThreads; i++) {
5727 5727 _survivor_plab_array[i].reset();
5728 5728 }
5729 5729 }
5730 5730
5731 5731 // Merge the per-thread plab arrays into the global survivor chunk
5732 5732 // array which will provide the partitioning of the survivor space
5733 5733 // for CMS initial scan and rescan.
5734 5734 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5735 5735 int no_of_gc_threads) {
5736 5736 assert(_survivor_plab_array != NULL, "Error");
5737 5737 assert(_survivor_chunk_array != NULL, "Error");
5738 5738 assert(_collectorState == FinalMarking ||
5739 5739 (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
5740 5740 for (int j = 0; j < no_of_gc_threads; j++) {
5741 5741 _cursor[j] = 0;
5742 5742 }
5743 5743 HeapWord* top = surv->top();
5744 5744 size_t i;
5745 5745 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
5746 5746 HeapWord* min_val = top; // Higher than any PLAB address
5747 5747 uint min_tid = 0; // position of min_val this round
5748 5748 for (int j = 0; j < no_of_gc_threads; j++) {
5749 5749 ChunkArray* cur_sca = &_survivor_plab_array[j];
5750 5750 if (_cursor[j] == cur_sca->end()) {
5751 5751 continue;
5752 5752 }
5753 5753 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5754 5754 HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5755 5755 assert(surv->used_region().contains(cur_val), "Out of bounds value");
5756 5756 if (cur_val < min_val) {
5757 5757 min_tid = j;
5758 5758 min_val = cur_val;
5759 5759 } else {
5760 5760 assert(cur_val < top, "All recorded addresses should be less");
5761 5761 }
5762 5762 }
5763 5763 // At this point min_val and min_tid are respectively
5764 5764 // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5765 5765 // and the thread (j) that witnesses that address.
5766 5766 // We record this address in the _survivor_chunk_array[i]
5767 5767 // and increment _cursor[min_tid] prior to the next round i.
5768 5768 if (min_val == top) {
5769 5769 break;
5770 5770 }
5771 5771 _survivor_chunk_array[i] = min_val;
5772 5772 _cursor[min_tid]++;
5773 5773 }
5774 5774 // We are all done; record the size of the _survivor_chunk_array
5775 5775 _survivor_chunk_index = i; // exclusive: [0, i)
5776 5776 if (PrintCMSStatistics > 0) {
5777 5777 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5778 5778 }
5779 5779 // Verify that we used up all the recorded entries
5780 5780 #ifdef ASSERT
5781 5781 size_t total = 0;
5782 5782 for (int j = 0; j < no_of_gc_threads; j++) {
5783 5783 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5784 5784 total += _cursor[j];
5785 5785 }
5786 5786 assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5787 5787 // Check that the merged array is in sorted order
5788 5788 if (total > 0) {
5789 5789 for (size_t i = 0; i < total - 1; i++) {
5790 5790 if (PrintCMSStatistics > 0) {
5791 5791 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5792 5792 i, _survivor_chunk_array[i]);
5793 5793 }
5794 5794 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5795 5795 "Not sorted");
5796 5796 }
5797 5797 }
5798 5798 #endif // ASSERT
5799 5799 }
5800 5800
5801 5801 // Set up the space's par_seq_tasks structure for work claiming
5802 5802 // for parallel initial scan and rescan of young gen.
5803 5803 // See ParRescanTask where this is currently used.
5804 5804 void
5805 5805 CMSCollector::
5806 5806 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5807 5807 assert(n_threads > 0, "Unexpected n_threads argument");
5808 5808 DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5809 5809
5810 5810 // Eden space
5811 5811 if (!dng->eden()->is_empty()) {
5812 5812 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5813 5813 assert(!pst->valid(), "Clobbering existing data?");
5814 5814 // Each valid entry in [0, _eden_chunk_index) represents a task.
5815 5815 size_t n_tasks = _eden_chunk_index + 1;
5816 5816 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5817 5817 // Sets the condition for completion of the subtask (how many threads
5818 5818 // need to finish in order to be done).
5819 5819 pst->set_n_threads(n_threads);
5820 5820 pst->set_n_tasks((int)n_tasks);
5821 5821 }
5822 5822
5823 5823 // Merge the survivor plab arrays into _survivor_chunk_array
5824 5824 if (_survivor_plab_array != NULL) {
5825 5825 merge_survivor_plab_arrays(dng->from(), n_threads);
5826 5826 } else {
5827 5827 assert(_survivor_chunk_index == 0, "Error");
5828 5828 }
5829 5829
5830 5830 // To space
5831 5831 {
5832 5832 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5833 5833 assert(!pst->valid(), "Clobbering existing data?");
5834 5834 // Sets the condition for completion of the subtask (how many threads
5835 5835 // need to finish in order to be done).
5836 5836 pst->set_n_threads(n_threads);
5837 5837 pst->set_n_tasks(1);
5838 5838 assert(pst->valid(), "Error");
5839 5839 }
5840 5840
5841 5841 // From space
5842 5842 {
5843 5843 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5844 5844 assert(!pst->valid(), "Clobbering existing data?");
5845 5845 size_t n_tasks = _survivor_chunk_index + 1;
5846 5846 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5847 5847 // Sets the condition for completion of the subtask (how many threads
5848 5848 // need to finish in order to be done).
5849 5849 pst->set_n_threads(n_threads);
5850 5850 pst->set_n_tasks((int)n_tasks);
5851 5851 assert(pst->valid(), "Error");
5852 5852 }
5853 5853 }
5854 5854
5855 5855 // Parallel version of remark
5856 5856 void CMSCollector::do_remark_parallel() {
5857 5857 GenCollectedHeap* gch = GenCollectedHeap::heap();
5858 5858 FlexibleWorkGang* workers = gch->workers();
5859 5859 assert(workers != NULL, "Need parallel worker threads.");
5860 5860 // Choose to use the number of GC workers most recently set
5861 5861 // into "active_workers". If active_workers is not set, set it
5862 5862 // to ParallelGCThreads.
5863 5863 int n_workers = workers->active_workers();
5864 5864 if (n_workers == 0) {
5865 5865 assert(n_workers > 0, "Should have been set during scavenge");
5866 5866 n_workers = ParallelGCThreads;
5867 5867 workers->set_active_workers(n_workers);
5868 5868 }
5869 5869 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5870 5870
5871 5871 CMSParRemarkTask tsk(this,
5872 5872 cms_space,
5873 5873 n_workers, workers, task_queues());
5874 5874
5875 5875 // Set up for parallel process_roots work.
5876 5876 gch->set_par_threads(n_workers);
5877 5877 // We won't be iterating over the cards in the card table updating
5878 5878 // the younger_gen cards, so we shouldn't call the following else
5879 5879 // the verification code as well as subsequent younger_refs_iterate
5880 5880 // code would get confused. XXX
5881 5881 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5882 5882
5883 5883 // The young gen rescan work will not be done as part of
5884 5884 // process_roots (which currently doesn't know how to
5885 5885 // parallelize such a scan), but rather will be broken up into
5886 5886 // a set of parallel tasks (via the sampling that the [abortable]
5887 5887 // preclean phase did of EdenSpace, plus the [two] tasks of
5888 5888 // scanning the [two] survivor spaces. Further fine-grain
5889 5889 // parallelization of the scanning of the survivor spaces
5890 5890 // themselves, and of precleaning of the younger gen itself
5891 5891 // is deferred to the future.
5892 5892 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5893 5893
5894 5894 // The dirty card rescan work is broken up into a "sequence"
5895 5895 // of parallel tasks (per constituent space) that are dynamically
5896 5896 // claimed by the parallel threads.
5897 5897 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5898 5898
5899 5899 // It turns out that even when we're using 1 thread, doing the work in a
5900 5900 // separate thread causes wide variance in run times. We can't help this
5901 5901 // in the multi-threaded case, but we special-case n=1 here to get
5902 5902 // repeatable measurements of the 1-thread overhead of the parallel code.
5903 5903 if (n_workers > 1) {
5904 5904 // Make refs discovery MT-safe, if it isn't already: it may not
5905 5905 // necessarily be so, since it's possible that we are doing
5906 5906 // ST marking.
5907 5907 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5908 5908 GenCollectedHeap::StrongRootsScope srs(gch);
5909 5909 workers->run_task(&tsk);
5910 5910 } else {
5911 5911 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5912 5912 GenCollectedHeap::StrongRootsScope srs(gch);
5913 5913 tsk.work(0);
5914 5914 }
5915 5915
5916 5916 gch->set_par_threads(0); // 0 ==> non-parallel.
5917 5917 // restore, single-threaded for now, any preserved marks
5918 5918 // as a result of work_q overflow
5919 5919 restore_preserved_marks_if_any();
5920 5920 }
5921 5921
5922 5922 // Non-parallel version of remark
5923 5923 void CMSCollector::do_remark_non_parallel() {
5924 5924 ResourceMark rm;
5925 5925 HandleMark hm;
5926 5926 GenCollectedHeap* gch = GenCollectedHeap::heap();
5927 5927 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5928 5928
5929 5929 MarkRefsIntoAndScanClosure
5930 5930 mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5931 5931 &_markStack, this,
5932 5932 false /* should_yield */, false /* not precleaning */);
5933 5933 MarkFromDirtyCardsClosure
5934 5934 markFromDirtyCardsClosure(this, _span,
5935 5935 NULL, // space is set further below
5936 5936 &_markBitMap, &_markStack, &mrias_cl);
5937 5937 {
5938 5938 GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5939 5939 // Iterate over the dirty cards, setting the corresponding bits in the
5940 5940 // mod union table.
5941 5941 {
5942 5942 ModUnionClosure modUnionClosure(&_modUnionTable);
5943 5943 _ct->ct_bs()->dirty_card_iterate(
5944 5944 _cmsGen->used_region(),
5945 5945 &modUnionClosure);
5946 5946 }
5947 5947 // Having transferred these marks into the modUnionTable, we just need
5948 5948 // to rescan the marked objects on the dirty cards in the modUnionTable.
5949 5949 // The initial marking may have been done during an asynchronous
5950 5950 // collection so there may be dirty bits in the mod-union table.
5951 5951 const int alignment =
5952 5952 CardTableModRefBS::card_size * BitsPerWord;
5953 5953 {
5954 5954 // ... First handle dirty cards in CMS gen
5955 5955 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5956 5956 MemRegion ur = _cmsGen->used_region();
5957 5957 HeapWord* lb = ur.start();
5958 5958 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5959 5959 MemRegion cms_span(lb, ub);
5960 5960 _modUnionTable.dirty_range_iterate_clear(cms_span,
5961 5961 &markFromDirtyCardsClosure);
5962 5962 verify_work_stacks_empty();
5963 5963 if (PrintCMSStatistics != 0) {
5964 5964 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5965 5965 markFromDirtyCardsClosure.num_dirty_cards());
5966 5966 }
5967 5967 }
5968 5968 }
5969 5969 if (VerifyDuringGC &&
5970 5970 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5971 5971 HandleMark hm; // Discard invalid handles created during verification
5972 5972 Universe::verify();
5973 5973 }
5974 5974 {
5975 5975 GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5976 5976
5977 5977 verify_work_stacks_empty();
5978 5978
5979 5979 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5980 5980 GenCollectedHeap::StrongRootsScope srs(gch);
5981 5981
5982 5982 gch->gen_process_roots(_cmsGen->level(),
5983 5983 true, // younger gens as roots
5984 5984 false, // use the local StrongRootsScope
5985 5985 SharedHeap::ScanningOption(roots_scanning_options()),
5986 5986 should_unload_classes(),
5987 5987 &mrias_cl,
5988 5988 NULL,
5989 5989 NULL); // The dirty klasses will be handled below
5990 5990
5991 5991 assert(should_unload_classes()
5992 5992 || (roots_scanning_options() & SharedHeap::SO_AllCodeCache),
5993 5993 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5994 5994 }
5995 5995
5996 5996 {
5997 5997 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5998 5998
5999 5999 verify_work_stacks_empty();
6000 6000
6001 6001 // Scan all class loader data objects that might have been introduced
6002 6002 // during concurrent marking.
6003 6003 ResourceMark rm;
6004 6004 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
6005 6005 for (int i = 0; i < array->length(); i++) {
6006 6006 mrias_cl.do_class_loader_data(array->at(i));
6007 6007 }
6008 6008
6009 6009 // We don't need to keep track of new CLDs anymore.
6010 6010 ClassLoaderDataGraph::remember_new_clds(false);
6011 6011
6012 6012 verify_work_stacks_empty();
6013 6013 }
6014 6014
6015 6015 {
6016 6016 GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6017 6017
6018 6018 verify_work_stacks_empty();
6019 6019
6020 6020 RemarkKlassClosure remark_klass_closure(&mrias_cl);
6021 6021 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
6022 6022
6023 6023 verify_work_stacks_empty();
6024 6024 }
6025 6025
6026 6026 // We might have added oops to ClassLoaderData::_handles during the
6027 6027 // concurrent marking phase. These oops point to newly allocated objects
6028 6028 // that are guaranteed to be kept alive. Either by the direct allocation
6029 6029 // code, or when the young collector processes the roots. Hence,
6030 6030 // we don't have to revisit the _handles block during the remark phase.
6031 6031
6032 6032 verify_work_stacks_empty();
6033 6033 // Restore evacuated mark words, if any, used for overflow list links
6034 6034 if (!CMSOverflowEarlyRestoration) {
6035 6035 restore_preserved_marks_if_any();
6036 6036 }
6037 6037 verify_overflow_empty();
6038 6038 }
6039 6039
6040 6040 ////////////////////////////////////////////////////////
6041 6041 // Parallel Reference Processing Task Proxy Class
6042 6042 ////////////////////////////////////////////////////////
6043 6043 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
6044 6044 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
6045 6045 CMSCollector* _collector;
6046 6046 CMSBitMap* _mark_bit_map;
6047 6047 const MemRegion _span;
6048 6048 ProcessTask& _task;
6049 6049
6050 6050 public:
6051 6051 CMSRefProcTaskProxy(ProcessTask& task,
6052 6052 CMSCollector* collector,
6053 6053 const MemRegion& span,
6054 6054 CMSBitMap* mark_bit_map,
6055 6055 AbstractWorkGang* workers,
6056 6056 OopTaskQueueSet* task_queues):
6057 6057 // XXX Should superclass AGTWOQ also know about AWG since it knows
6058 6058 // about the task_queues used by the AWG? Then it could initialize
6059 6059 // the terminator() object. See 6984287. The set_for_termination()
6060 6060 // below is a temporary band-aid for the regression in 6984287.
6061 6061 AbstractGangTaskWOopQueues("Process referents by policy in parallel",
6062 6062 task_queues),
6063 6063 _task(task),
6064 6064 _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
6065 6065 {
6066 6066 assert(_collector->_span.equals(_span) && !_span.is_empty(),
6067 6067 "Inconsistency in _span");
6068 6068 set_for_termination(workers->active_workers());
6069 6069 }
6070 6070
6071 6071 OopTaskQueueSet* task_queues() { return queues(); }
6072 6072
6073 6073 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
6074 6074
6075 6075 void do_work_steal(int i,
6076 6076 CMSParDrainMarkingStackClosure* drain,
6077 6077 CMSParKeepAliveClosure* keep_alive,
6078 6078 int* seed);
6079 6079
6080 6080 virtual void work(uint worker_id);
6081 6081 };
6082 6082
6083 6083 void CMSRefProcTaskProxy::work(uint worker_id) {
6084 6084 ResourceMark rm;
6085 6085 HandleMark hm;
6086 6086 assert(_collector->_span.equals(_span), "Inconsistency in _span");
6087 6087 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
6088 6088 _mark_bit_map,
6089 6089 work_queue(worker_id));
6090 6090 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
6091 6091 _mark_bit_map,
6092 6092 work_queue(worker_id));
6093 6093 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
6094 6094 _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
6095 6095 if (_task.marks_oops_alive()) {
6096 6096 do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
6097 6097 _collector->hash_seed(worker_id));
6098 6098 }
6099 6099 assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
6100 6100 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
6101 6101 }
6102 6102
6103 6103 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
6104 6104 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
6105 6105 EnqueueTask& _task;
6106 6106
6107 6107 public:
6108 6108 CMSRefEnqueueTaskProxy(EnqueueTask& task)
6109 6109 : AbstractGangTask("Enqueue reference objects in parallel"),
6110 6110 _task(task)
6111 6111 { }
6112 6112
6113 6113 virtual void work(uint worker_id)
6114 6114 {
6115 6115 _task.work(worker_id);
6116 6116 }
6117 6117 };
6118 6118
6119 6119 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
6120 6120 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
6121 6121 _span(span),
6122 6122 _bit_map(bit_map),
6123 6123 _work_queue(work_queue),
6124 6124 _mark_and_push(collector, span, bit_map, work_queue),
6125 6125 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6126 6126 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
6127 6127 { }
6128 6128
6129 6129 // . see if we can share work_queues with ParNew? XXX
6130 6130 void CMSRefProcTaskProxy::do_work_steal(int i,
6131 6131 CMSParDrainMarkingStackClosure* drain,
6132 6132 CMSParKeepAliveClosure* keep_alive,
6133 6133 int* seed) {
6134 6134 OopTaskQueue* work_q = work_queue(i);
6135 6135 NOT_PRODUCT(int num_steals = 0;)
6136 6136 oop obj_to_scan;
6137 6137
6138 6138 while (true) {
6139 6139 // Completely finish any left over work from (an) earlier round(s)
6140 6140 drain->trim_queue(0);
6141 6141 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
6142 6142 (size_t)ParGCDesiredObjsFromOverflowList);
6143 6143 // Now check if there's any work in the overflow list
6144 6144 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
6145 6145 // only affects the number of attempts made to get work from the
6146 6146 // overflow list and does not affect the number of workers. Just
6147 6147 // pass ParallelGCThreads so this behavior is unchanged.
6148 6148 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
6149 6149 work_q,
6150 6150 ParallelGCThreads)) {
6151 6151 // Found something in global overflow list;
6152 6152 // not yet ready to go stealing work from others.
6153 6153 // We'd like to assert(work_q->size() != 0, ...)
6154 6154 // because we just took work from the overflow list,
6155 6155 // but of course we can't, since all of that might have
6156 6156 // been already stolen from us.
6157 6157 continue;
6158 6158 }
6159 6159 // Verify that we have no work before we resort to stealing
6160 6160 assert(work_q->size() == 0, "Have work, shouldn't steal");
6161 6161 // Try to steal from other queues that have work
6162 6162 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
6163 6163 NOT_PRODUCT(num_steals++;)
6164 6164 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
6165 6165 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
6166 6166 // Do scanning work
6167 6167 obj_to_scan->oop_iterate(keep_alive);
6168 6168 // Loop around, finish this work, and try to steal some more
6169 6169 } else if (terminator()->offer_termination()) {
6170 6170 break; // nirvana from the infinite cycle
6171 6171 }
6172 6172 }
6173 6173 NOT_PRODUCT(
6174 6174 if (PrintCMSStatistics != 0) {
6175 6175 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
6176 6176 }
6177 6177 )
6178 6178 }
6179 6179
6180 6180 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
6181 6181 {
6182 6182 GenCollectedHeap* gch = GenCollectedHeap::heap();
6183 6183 FlexibleWorkGang* workers = gch->workers();
6184 6184 assert(workers != NULL, "Need parallel worker threads.");
6185 6185 CMSRefProcTaskProxy rp_task(task, &_collector,
6186 6186 _collector.ref_processor()->span(),
6187 6187 _collector.markBitMap(),
6188 6188 workers, _collector.task_queues());
6189 6189 workers->run_task(&rp_task);
6190 6190 }
6191 6191
6192 6192 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
6193 6193 {
6194 6194
6195 6195 GenCollectedHeap* gch = GenCollectedHeap::heap();
6196 6196 FlexibleWorkGang* workers = gch->workers();
6197 6197 assert(workers != NULL, "Need parallel worker threads.");
6198 6198 CMSRefEnqueueTaskProxy enq_task(task);
6199 6199 workers->run_task(&enq_task);
6200 6200 }
6201 6201
6202 6202 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
6203 6203
6204 6204 ResourceMark rm;
6205 6205 HandleMark hm;
6206 6206
6207 6207 ReferenceProcessor* rp = ref_processor();
6208 6208 assert(rp->span().equals(_span), "Spans should be equal");
6209 6209 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
6210 6210 // Process weak references.
6211 6211 rp->setup_policy(clear_all_soft_refs);
6212 6212 verify_work_stacks_empty();
6213 6213
6214 6214 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
6215 6215 &_markStack, false /* !preclean */);
6216 6216 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
6217 6217 _span, &_markBitMap, &_markStack,
6218 6218 &cmsKeepAliveClosure, false /* !preclean */);
6219 6219 {
6220 6220 GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6221 6221
6222 6222 ReferenceProcessorStats stats;
6223 6223 if (rp->processing_is_mt()) {
6224 6224 // Set the degree of MT here. If the discovery is done MT, there
6225 6225 // may have been a different number of threads doing the discovery
6226 6226 // and a different number of discovered lists may have Ref objects.
6227 6227 // That is OK as long as the Reference lists are balanced (see
6228 6228 // balance_all_queues() and balance_queues()).
6229 6229 GenCollectedHeap* gch = GenCollectedHeap::heap();
6230 6230 int active_workers = ParallelGCThreads;
6231 6231 FlexibleWorkGang* workers = gch->workers();
6232 6232 if (workers != NULL) {
6233 6233 active_workers = workers->active_workers();
6234 6234 // The expectation is that active_workers will have already
6235 6235 // been set to a reasonable value. If it has not been set,
6236 6236 // investigate.
6237 6237 assert(active_workers > 0, "Should have been set during scavenge");
6238 6238 }
6239 6239 rp->set_active_mt_degree(active_workers);
6240 6240 CMSRefProcTaskExecutor task_executor(*this);
6241 6241 stats = rp->process_discovered_references(&_is_alive_closure,
6242 6242 &cmsKeepAliveClosure,
6243 6243 &cmsDrainMarkingStackClosure,
6244 6244 &task_executor,
6245 6245 _gc_timer_cm,
6246 6246 _gc_tracer_cm->gc_id());
6247 6247 } else {
6248 6248 stats = rp->process_discovered_references(&_is_alive_closure,
6249 6249 &cmsKeepAliveClosure,
6250 6250 &cmsDrainMarkingStackClosure,
6251 6251 NULL,
6252 6252 _gc_timer_cm,
6253 6253 _gc_tracer_cm->gc_id());
6254 6254 }
6255 6255 _gc_tracer_cm->report_gc_reference_stats(stats);
6256 6256
6257 6257 }
6258 6258
6259 6259 // This is the point where the entire marking should have completed.
6260 6260 verify_work_stacks_empty();
6261 6261
6262 6262 if (should_unload_classes()) {
6263 6263 {
6264 6264 GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6265 6265
6266 6266 // Unload classes and purge the SystemDictionary.
6267 6267 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
6268 6268
6269 6269 // Unload nmethods.
6270 6270 CodeCache::do_unloading(&_is_alive_closure, purged_class);
6271 6271
6272 6272 // Prune dead klasses from subklass/sibling/implementor lists.
6273 6273 Klass::clean_weak_klass_links(&_is_alive_closure);
6274 6274 }
6275 6275
6276 6276 {
6277 6277 GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6278 6278 // Clean up unreferenced symbols in symbol table.
6279 6279 SymbolTable::unlink();
6280 6280 }
6281 6281
6282 6282 {
6283 6283 GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
6284 6284 // Delete entries for dead interned strings.
6285 6285 StringTable::unlink(&_is_alive_closure);
6286 6286 }
6287 6287 }
6288 6288
6289 6289
6290 6290 // Restore any preserved marks as a result of mark stack or
6291 6291 // work queue overflow
6292 6292 restore_preserved_marks_if_any(); // done single-threaded for now
6293 6293
6294 6294 rp->set_enqueuing_is_done(true);
6295 6295 if (rp->processing_is_mt()) {
6296 6296 rp->balance_all_queues();
6297 6297 CMSRefProcTaskExecutor task_executor(*this);
6298 6298 rp->enqueue_discovered_references(&task_executor);
6299 6299 } else {
6300 6300 rp->enqueue_discovered_references(NULL);
6301 6301 }
6302 6302 rp->verify_no_references_recorded();
6303 6303 assert(!rp->discovery_enabled(), "should have been disabled");
6304 6304 }
6305 6305
6306 6306 #ifndef PRODUCT
6307 6307 void CMSCollector::check_correct_thread_executing() {
6308 6308 Thread* t = Thread::current();
6309 6309 // Only the VM thread or the CMS thread should be here.
6310 6310 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
6311 6311 "Unexpected thread type");
6312 6312 // If this is the vm thread, the foreground process
6313 6313 // should not be waiting. Note that _foregroundGCIsActive is
6314 6314 // true while the foreground collector is waiting.
6315 6315 if (_foregroundGCShouldWait) {
6316 6316 // We cannot be the VM thread
6317 6317 assert(t->is_ConcurrentGC_thread(),
6318 6318 "Should be CMS thread");
6319 6319 } else {
6320 6320 // We can be the CMS thread only if we are in a stop-world
6321 6321 // phase of CMS collection.
6322 6322 if (t->is_ConcurrentGC_thread()) {
6323 6323 assert(_collectorState == InitialMarking ||
6324 6324 _collectorState == FinalMarking,
6325 6325 "Should be a stop-world phase");
6326 6326 // The CMS thread should be holding the CMS_token.
6327 6327 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6328 6328 "Potential interference with concurrently "
6329 6329 "executing VM thread");
6330 6330 }
6331 6331 }
6332 6332 }
6333 6333 #endif
6334 6334
6335 6335 void CMSCollector::sweep(bool asynch) {
6336 6336 assert(_collectorState == Sweeping, "just checking");
6337 6337 check_correct_thread_executing();
6338 6338 verify_work_stacks_empty();
6339 6339 verify_overflow_empty();
6340 6340 increment_sweep_count();
6341 6341 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
6342 6342
6343 6343 _inter_sweep_timer.stop();
6344 6344 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
6345 6345 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
6346 6346
6347 6347 assert(!_intra_sweep_timer.is_active(), "Should not be active");
6348 6348 _intra_sweep_timer.reset();
6349 6349 _intra_sweep_timer.start();
6350 6350 if (asynch) {
6351 6351 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6352 6352 CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
6353 6353 // First sweep the old gen
6354 6354 {
6355 6355 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6356 6356 bitMapLock());
6357 6357 sweepWork(_cmsGen, asynch);
6358 6358 }
6359 6359
6360 6360 // Update Universe::_heap_*_at_gc figures.
6361 6361 // We need all the free list locks to make the abstract state
6362 6362 // transition from Sweeping to Resetting. See detailed note
6363 6363 // further below.
6364 6364 {
6365 6365 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
6366 6366 // Update heap occupancy information which is used as
6367 6367 // input to soft ref clearing policy at the next gc.
6368 6368 Universe::update_heap_info_at_gc();
6369 6369 _collectorState = Resizing;
6370 6370 }
6371 6371 } else {
6372 6372 // already have needed locks
6373 6373 sweepWork(_cmsGen, asynch);
6374 6374 // Update heap occupancy information which is used as
6375 6375 // input to soft ref clearing policy at the next gc.
6376 6376 Universe::update_heap_info_at_gc();
6377 6377 _collectorState = Resizing;
6378 6378 }
6379 6379 verify_work_stacks_empty();
6380 6380 verify_overflow_empty();
6381 6381
6382 6382 if (should_unload_classes()) {
6383 6383 // Delay purge to the beginning of the next safepoint. Metaspace::contains
6384 6384 // requires that the virtual spaces are stable and not deleted.
6385 6385 ClassLoaderDataGraph::set_should_purge(true);
6386 6386 }
6387 6387
6388 6388 _intra_sweep_timer.stop();
6389 6389 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
6390 6390
6391 6391 _inter_sweep_timer.reset();
6392 6392 _inter_sweep_timer.start();
6393 6393
6394 6394 // We need to use a monotonically non-deccreasing time in ms
6395 6395 // or we will see time-warp warnings and os::javaTimeMillis()
6396 6396 // does not guarantee monotonicity.
6397 6397 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
6398 6398 update_time_of_last_gc(now);
6399 6399
6400 6400 // NOTE on abstract state transitions:
6401 6401 // Mutators allocate-live and/or mark the mod-union table dirty
6402 6402 // based on the state of the collection. The former is done in
6403 6403 // the interval [Marking, Sweeping] and the latter in the interval
6404 6404 // [Marking, Sweeping). Thus the transitions into the Marking state
6405 6405 // and out of the Sweeping state must be synchronously visible
6406 6406 // globally to the mutators.
6407 6407 // The transition into the Marking state happens with the world
6408 6408 // stopped so the mutators will globally see it. Sweeping is
6409 6409 // done asynchronously by the background collector so the transition
6410 6410 // from the Sweeping state to the Resizing state must be done
6411 6411 // under the freelistLock (as is the check for whether to
6412 6412 // allocate-live and whether to dirty the mod-union table).
6413 6413 assert(_collectorState == Resizing, "Change of collector state to"
6414 6414 " Resizing must be done under the freelistLocks (plural)");
6415 6415
6416 6416 // Now that sweeping has been completed, we clear
6417 6417 // the incremental_collection_failed flag,
6418 6418 // thus inviting a younger gen collection to promote into
6419 6419 // this generation. If such a promotion may still fail,
6420 6420 // the flag will be set again when a young collection is
6421 6421 // attempted.
6422 6422 GenCollectedHeap* gch = GenCollectedHeap::heap();
6423 6423 gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up
6424 6424 gch->update_full_collections_completed(_collection_count_start);
6425 6425 }
6426 6426
6427 6427 // FIX ME!!! Looks like this belongs in CFLSpace, with
6428 6428 // CMSGen merely delegating to it.
6429 6429 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
6430 6430 double nearLargestPercent = FLSLargestBlockCoalesceProximity;
6431 6431 HeapWord* minAddr = _cmsSpace->bottom();
6432 6432 HeapWord* largestAddr =
6433 6433 (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
6434 6434 if (largestAddr == NULL) {
6435 6435 // The dictionary appears to be empty. In this case
6436 6436 // try to coalesce at the end of the heap.
6437 6437 largestAddr = _cmsSpace->end();
6438 6438 }
6439 6439 size_t largestOffset = pointer_delta(largestAddr, minAddr);
6440 6440 size_t nearLargestOffset =
6441 6441 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6442 6442 if (PrintFLSStatistics != 0) {
6443 6443 gclog_or_tty->print_cr(
6444 6444 "CMS: Large Block: " PTR_FORMAT ";"
6445 6445 " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6446 6446 largestAddr,
6447 6447 _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6448 6448 }
6449 6449 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6450 6450 }
6451 6451
6452 6452 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6453 6453 return addr >= _cmsSpace->nearLargestChunk();
6454 6454 }
6455 6455
6456 6456 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6457 6457 return _cmsSpace->find_chunk_at_end();
6458 6458 }
6459 6459
6460 6460 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6461 6461 bool full) {
6462 6462 // The next lower level has been collected. Gather any statistics
6463 6463 // that are of interest at this point.
6464 6464 if (!full && (current_level + 1) == level()) {
6465 6465 // Gather statistics on the young generation collection.
6466 6466 collector()->stats().record_gc0_end(used());
6467 6467 }
6468 6468 }
6469 6469
6470 6470 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6471 6471 GenCollectedHeap* gch = GenCollectedHeap::heap();
6472 6472 assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6473 6473 "Wrong type of heap");
6474 6474 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6475 6475 gch->gen_policy()->size_policy();
6476 6476 assert(sp->is_gc_cms_adaptive_size_policy(),
6477 6477 "Wrong type of size policy");
6478 6478 return sp;
6479 6479 }
6480 6480
6481 6481 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6482 6482 if (PrintGCDetails && Verbose) {
6483 6483 gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6484 6484 }
6485 6485 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6486 6486 _debug_collection_type =
6487 6487 (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6488 6488 if (PrintGCDetails && Verbose) {
6489 6489 gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6490 6490 }
6491 6491 }
6492 6492
6493 6493 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6494 6494 bool asynch) {
6495 6495 // We iterate over the space(s) underlying this generation,
6496 6496 // checking the mark bit map to see if the bits corresponding
6497 6497 // to specific blocks are marked or not. Blocks that are
6498 6498 // marked are live and are not swept up. All remaining blocks
6499 6499 // are swept up, with coalescing on-the-fly as we sweep up
6500 6500 // contiguous free and/or garbage blocks:
6501 6501 // We need to ensure that the sweeper synchronizes with allocators
6502 6502 // and stop-the-world collectors. In particular, the following
6503 6503 // locks are used:
6504 6504 // . CMS token: if this is held, a stop the world collection cannot occur
6505 6505 // . freelistLock: if this is held no allocation can occur from this
6506 6506 // generation by another thread
6507 6507 // . bitMapLock: if this is held, no other thread can access or update
6508 6508 //
6509 6509
6510 6510 // Note that we need to hold the freelistLock if we use
6511 6511 // block iterate below; else the iterator might go awry if
6512 6512 // a mutator (or promotion) causes block contents to change
6513 6513 // (for instance if the allocator divvies up a block).
6514 6514 // If we hold the free list lock, for all practical purposes
6515 6515 // young generation GC's can't occur (they'll usually need to
6516 6516 // promote), so we might as well prevent all young generation
6517 6517 // GC's while we do a sweeping step. For the same reason, we might
6518 6518 // as well take the bit map lock for the entire duration
6519 6519
6520 6520 // check that we hold the requisite locks
6521 6521 assert(have_cms_token(), "Should hold cms token");
6522 6522 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6523 6523 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6524 6524 "Should possess CMS token to sweep");
6525 6525 assert_lock_strong(gen->freelistLock());
6526 6526 assert_lock_strong(bitMapLock());
6527 6527
6528 6528 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6529 6529 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
6530 6530 gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6531 6531 _inter_sweep_estimate.padded_average(),
6532 6532 _intra_sweep_estimate.padded_average());
6533 6533 gen->setNearLargestChunk();
6534 6534
6535 6535 {
6536 6536 SweepClosure sweepClosure(this, gen, &_markBitMap,
6537 6537 CMSYield && asynch);
6538 6538 gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6539 6539 // We need to free-up/coalesce garbage/blocks from a
6540 6540 // co-terminal free run. This is done in the SweepClosure
6541 6541 // destructor; so, do not remove this scope, else the
6542 6542 // end-of-sweep-census below will be off by a little bit.
6543 6543 }
6544 6544 gen->cmsSpace()->sweep_completed();
6545 6545 gen->cmsSpace()->endSweepFLCensus(sweep_count());
6546 6546 if (should_unload_classes()) { // unloaded classes this cycle,
6547 6547 _concurrent_cycles_since_last_unload = 0; // ... reset count
6548 6548 } else { // did not unload classes,
6549 6549 _concurrent_cycles_since_last_unload++; // ... increment count
6550 6550 }
6551 6551 }
6552 6552
6553 6553 // Reset CMS data structures (for now just the marking bit map)
6554 6554 // preparatory for the next cycle.
6555 6555 void CMSCollector::reset(bool asynch) {
6556 6556 GenCollectedHeap* gch = GenCollectedHeap::heap();
6557 6557 CMSAdaptiveSizePolicy* sp = size_policy();
6558 6558 AdaptiveSizePolicyOutput(sp, gch->total_collections());
6559 6559 if (asynch) {
6560 6560 CMSTokenSyncWithLocks ts(true, bitMapLock());
6561 6561
6562 6562 // If the state is not "Resetting", the foreground thread
6563 6563 // has done a collection and the resetting.
6564 6564 if (_collectorState != Resetting) {
6565 6565 assert(_collectorState == Idling, "The state should only change"
6566 6566 " because the foreground collector has finished the collection");
6567 6567 return;
6568 6568 }
6569 6569
6570 6570 // Clear the mark bitmap (no grey objects to start with)
6571 6571 // for the next cycle.
6572 6572 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6573 6573 CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
6574 6574
6575 6575 HeapWord* curAddr = _markBitMap.startWord();
6576 6576 while (curAddr < _markBitMap.endWord()) {
6577 6577 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
6578 6578 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6579 6579 _markBitMap.clear_large_range(chunk);
6580 6580 if (ConcurrentMarkSweepThread::should_yield() &&
6581 6581 !foregroundGCIsActive() &&
6582 6582 CMSYield) {
6583 6583 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6584 6584 "CMS thread should hold CMS token");
6585 6585 assert_lock_strong(bitMapLock());
6586 6586 bitMapLock()->unlock();
6587 6587 ConcurrentMarkSweepThread::desynchronize(true);
6588 6588 ConcurrentMarkSweepThread::acknowledge_yield_request();
6589 6589 stopTimer();
6590 6590 if (PrintCMSStatistics != 0) {
6591 6591 incrementYields();
6592 6592 }
6593 6593 icms_wait();
6594 6594
6595 6595 // See the comment in coordinator_yield()
6596 6596 for (unsigned i = 0; i < CMSYieldSleepCount &&
6597 6597 ConcurrentMarkSweepThread::should_yield() &&
6598 6598 !CMSCollector::foregroundGCIsActive(); ++i) {
6599 6599 os::sleep(Thread::current(), 1, false);
6600 6600 ConcurrentMarkSweepThread::acknowledge_yield_request();
6601 6601 }
6602 6602
6603 6603 ConcurrentMarkSweepThread::synchronize(true);
6604 6604 bitMapLock()->lock_without_safepoint_check();
6605 6605 startTimer();
6606 6606 }
6607 6607 curAddr = chunk.end();
6608 6608 }
6609 6609 // A successful mostly concurrent collection has been done.
6610 6610 // Because only the full (i.e., concurrent mode failure) collections
6611 6611 // are being measured for gc overhead limits, clean the "near" flag
6612 6612 // and count.
6613 6613 sp->reset_gc_overhead_limit_count();
6614 6614 _collectorState = Idling;
6615 6615 } else {
6616 6616 // already have the lock
6617 6617 assert(_collectorState == Resetting, "just checking");
6618 6618 assert_lock_strong(bitMapLock());
6619 6619 _markBitMap.clear_all();
6620 6620 _collectorState = Idling;
6621 6621 }
6622 6622
6623 6623 // Stop incremental mode after a cycle completes, so that any future cycles
6624 6624 // are triggered by allocation.
6625 6625 stop_icms();
6626 6626
↓ open down ↓ |
6626 lines elided |
↑ open up ↑ |
6627 6627 NOT_PRODUCT(
6628 6628 if (RotateCMSCollectionTypes) {
6629 6629 _cmsGen->rotate_debug_collection_type();
6630 6630 }
6631 6631 )
6632 6632
6633 6633 register_gc_end();
6634 6634 }
6635 6635
6636 6636 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6637 - gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6638 6637 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6639 6638 GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
6640 6639 TraceCollectorStats tcs(counters());
6641 6640
6642 6641 switch (op) {
6643 6642 case CMS_op_checkpointRootsInitial: {
6644 6643 SvcGCMarker sgcm(SvcGCMarker::OTHER);
6645 6644 checkpointRootsInitial(true); // asynch
6646 6645 if (PrintGC) {
6647 6646 _cmsGen->printOccupancy("initial-mark");
6648 6647 }
6649 6648 break;
6650 6649 }
6651 6650 case CMS_op_checkpointRootsFinal: {
6652 6651 SvcGCMarker sgcm(SvcGCMarker::OTHER);
6653 6652 checkpointRootsFinal(true, // asynch
6654 6653 false, // !clear_all_soft_refs
6655 6654 false); // !init_mark_was_synchronous
6656 6655 if (PrintGC) {
6657 6656 _cmsGen->printOccupancy("remark");
6658 6657 }
6659 6658 break;
6660 6659 }
6661 6660 default:
6662 6661 fatal("No such CMS_op");
6663 6662 }
6664 6663 }
6665 6664
6666 6665 #ifndef PRODUCT
6667 6666 size_t const CMSCollector::skip_header_HeapWords() {
6668 6667 return FreeChunk::header_size();
6669 6668 }
6670 6669
6671 6670 // Try and collect here conditions that should hold when
6672 6671 // CMS thread is exiting. The idea is that the foreground GC
6673 6672 // thread should not be blocked if it wants to terminate
6674 6673 // the CMS thread and yet continue to run the VM for a while
6675 6674 // after that.
6676 6675 void CMSCollector::verify_ok_to_terminate() const {
6677 6676 assert(Thread::current()->is_ConcurrentGC_thread(),
6678 6677 "should be called by CMS thread");
6679 6678 assert(!_foregroundGCShouldWait, "should be false");
6680 6679 // We could check here that all the various low-level locks
6681 6680 // are not held by the CMS thread, but that is overkill; see
6682 6681 // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6683 6682 // is checked.
6684 6683 }
6685 6684 #endif
6686 6685
6687 6686 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6688 6687 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6689 6688 "missing Printezis mark?");
6690 6689 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6691 6690 size_t size = pointer_delta(nextOneAddr + 1, addr);
6692 6691 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6693 6692 "alignment problem");
6694 6693 assert(size >= 3, "Necessary for Printezis marks to work");
6695 6694 return size;
6696 6695 }
6697 6696
6698 6697 // A variant of the above (block_size_using_printezis_bits()) except
6699 6698 // that we return 0 if the P-bits are not yet set.
6700 6699 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6701 6700 if (_markBitMap.isMarked(addr + 1)) {
6702 6701 assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
6703 6702 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6704 6703 size_t size = pointer_delta(nextOneAddr + 1, addr);
6705 6704 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6706 6705 "alignment problem");
6707 6706 assert(size >= 3, "Necessary for Printezis marks to work");
6708 6707 return size;
6709 6708 }
6710 6709 return 0;
6711 6710 }
6712 6711
6713 6712 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6714 6713 size_t sz = 0;
6715 6714 oop p = (oop)addr;
6716 6715 if (p->klass_or_null() != NULL) {
6717 6716 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6718 6717 } else {
6719 6718 sz = block_size_using_printezis_bits(addr);
6720 6719 }
6721 6720 assert(sz > 0, "size must be nonzero");
6722 6721 HeapWord* next_block = addr + sz;
6723 6722 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
6724 6723 CardTableModRefBS::card_size);
6725 6724 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
6726 6725 round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6727 6726 "must be different cards");
6728 6727 return next_card;
6729 6728 }
6730 6729
6731 6730
6732 6731 // CMS Bit Map Wrapper /////////////////////////////////////////
6733 6732
6734 6733 // Construct a CMS bit map infrastructure, but don't create the
6735 6734 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6736 6735 // further below.
6737 6736 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6738 6737 _bm(),
6739 6738 _shifter(shifter),
6740 6739 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6741 6740 {
6742 6741 _bmStartWord = 0;
6743 6742 _bmWordSize = 0;
6744 6743 }
6745 6744
6746 6745 bool CMSBitMap::allocate(MemRegion mr) {
6747 6746 _bmStartWord = mr.start();
6748 6747 _bmWordSize = mr.word_size();
6749 6748 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6750 6749 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6751 6750 if (!brs.is_reserved()) {
6752 6751 warning("CMS bit map allocation failure");
6753 6752 return false;
6754 6753 }
6755 6754 // For now we'll just commit all of the bit map up fromt.
6756 6755 // Later on we'll try to be more parsimonious with swap.
6757 6756 if (!_virtual_space.initialize(brs, brs.size())) {
6758 6757 warning("CMS bit map backing store failure");
6759 6758 return false;
6760 6759 }
6761 6760 assert(_virtual_space.committed_size() == brs.size(),
6762 6761 "didn't reserve backing store for all of CMS bit map?");
6763 6762 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6764 6763 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6765 6764 _bmWordSize, "inconsistency in bit map sizing");
6766 6765 _bm.set_size(_bmWordSize >> _shifter);
6767 6766
6768 6767 // bm.clear(); // can we rely on getting zero'd memory? verify below
6769 6768 assert(isAllClear(),
6770 6769 "Expected zero'd memory from ReservedSpace constructor");
6771 6770 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6772 6771 "consistency check");
6773 6772 return true;
6774 6773 }
6775 6774
6776 6775 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6777 6776 HeapWord *next_addr, *end_addr, *last_addr;
6778 6777 assert_locked();
6779 6778 assert(covers(mr), "out-of-range error");
6780 6779 // XXX assert that start and end are appropriately aligned
6781 6780 for (next_addr = mr.start(), end_addr = mr.end();
6782 6781 next_addr < end_addr; next_addr = last_addr) {
6783 6782 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6784 6783 last_addr = dirty_region.end();
6785 6784 if (!dirty_region.is_empty()) {
6786 6785 cl->do_MemRegion(dirty_region);
6787 6786 } else {
6788 6787 assert(last_addr == end_addr, "program logic");
6789 6788 return;
6790 6789 }
6791 6790 }
6792 6791 }
6793 6792
6794 6793 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
6795 6794 _bm.print_on_error(st, prefix);
6796 6795 }
6797 6796
6798 6797 #ifndef PRODUCT
6799 6798 void CMSBitMap::assert_locked() const {
6800 6799 CMSLockVerifier::assert_locked(lock());
6801 6800 }
6802 6801
6803 6802 bool CMSBitMap::covers(MemRegion mr) const {
6804 6803 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6805 6804 assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6806 6805 "size inconsistency");
6807 6806 return (mr.start() >= _bmStartWord) &&
6808 6807 (mr.end() <= endWord());
6809 6808 }
6810 6809
6811 6810 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6812 6811 return (start >= _bmStartWord && (start + size) <= endWord());
6813 6812 }
6814 6813
6815 6814 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6816 6815 // verify that there are no 1 bits in the interval [left, right)
6817 6816 FalseBitMapClosure falseBitMapClosure;
6818 6817 iterate(&falseBitMapClosure, left, right);
6819 6818 }
6820 6819
6821 6820 void CMSBitMap::region_invariant(MemRegion mr)
6822 6821 {
6823 6822 assert_locked();
6824 6823 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6825 6824 assert(!mr.is_empty(), "unexpected empty region");
6826 6825 assert(covers(mr), "mr should be covered by bit map");
6827 6826 // convert address range into offset range
6828 6827 size_t start_ofs = heapWordToOffset(mr.start());
6829 6828 // Make sure that end() is appropriately aligned
6830 6829 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6831 6830 (1 << (_shifter+LogHeapWordSize))),
6832 6831 "Misaligned mr.end()");
6833 6832 size_t end_ofs = heapWordToOffset(mr.end());
6834 6833 assert(end_ofs > start_ofs, "Should mark at least one bit");
6835 6834 }
6836 6835
6837 6836 #endif
6838 6837
6839 6838 bool CMSMarkStack::allocate(size_t size) {
6840 6839 // allocate a stack of the requisite depth
6841 6840 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6842 6841 size * sizeof(oop)));
6843 6842 if (!rs.is_reserved()) {
6844 6843 warning("CMSMarkStack allocation failure");
6845 6844 return false;
6846 6845 }
6847 6846 if (!_virtual_space.initialize(rs, rs.size())) {
6848 6847 warning("CMSMarkStack backing store failure");
6849 6848 return false;
6850 6849 }
6851 6850 assert(_virtual_space.committed_size() == rs.size(),
6852 6851 "didn't reserve backing store for all of CMS stack?");
6853 6852 _base = (oop*)(_virtual_space.low());
6854 6853 _index = 0;
6855 6854 _capacity = size;
6856 6855 NOT_PRODUCT(_max_depth = 0);
6857 6856 return true;
6858 6857 }
6859 6858
6860 6859 // XXX FIX ME !!! In the MT case we come in here holding a
6861 6860 // leaf lock. For printing we need to take a further lock
6862 6861 // which has lower rank. We need to recallibrate the two
6863 6862 // lock-ranks involved in order to be able to rpint the
6864 6863 // messages below. (Or defer the printing to the caller.
6865 6864 // For now we take the expedient path of just disabling the
6866 6865 // messages for the problematic case.)
6867 6866 void CMSMarkStack::expand() {
6868 6867 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6869 6868 if (_capacity == MarkStackSizeMax) {
6870 6869 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6871 6870 // We print a warning message only once per CMS cycle.
6872 6871 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6873 6872 }
6874 6873 return;
6875 6874 }
6876 6875 // Double capacity if possible
6877 6876 size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6878 6877 // Do not give up existing stack until we have managed to
6879 6878 // get the double capacity that we desired.
6880 6879 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6881 6880 new_capacity * sizeof(oop)));
6882 6881 if (rs.is_reserved()) {
6883 6882 // Release the backing store associated with old stack
6884 6883 _virtual_space.release();
6885 6884 // Reinitialize virtual space for new stack
6886 6885 if (!_virtual_space.initialize(rs, rs.size())) {
6887 6886 fatal("Not enough swap for expanded marking stack");
6888 6887 }
6889 6888 _base = (oop*)(_virtual_space.low());
6890 6889 _index = 0;
6891 6890 _capacity = new_capacity;
6892 6891 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6893 6892 // Failed to double capacity, continue;
6894 6893 // we print a detail message only once per CMS cycle.
6895 6894 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6896 6895 SIZE_FORMAT"K",
6897 6896 _capacity / K, new_capacity / K);
6898 6897 }
6899 6898 }
6900 6899
6901 6900
6902 6901 // Closures
6903 6902 // XXX: there seems to be a lot of code duplication here;
6904 6903 // should refactor and consolidate common code.
6905 6904
6906 6905 // This closure is used to mark refs into the CMS generation in
6907 6906 // the CMS bit map. Called at the first checkpoint. This closure
6908 6907 // assumes that we do not need to re-mark dirty cards; if the CMS
6909 6908 // generation on which this is used is not an oldest
6910 6909 // generation then this will lose younger_gen cards!
6911 6910
6912 6911 MarkRefsIntoClosure::MarkRefsIntoClosure(
6913 6912 MemRegion span, CMSBitMap* bitMap):
6914 6913 _span(span),
6915 6914 _bitMap(bitMap)
6916 6915 {
6917 6916 assert(_ref_processor == NULL, "deliberately left NULL");
6918 6917 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6919 6918 }
6920 6919
6921 6920 void MarkRefsIntoClosure::do_oop(oop obj) {
6922 6921 // if p points into _span, then mark corresponding bit in _markBitMap
6923 6922 assert(obj->is_oop(), "expected an oop");
6924 6923 HeapWord* addr = (HeapWord*)obj;
6925 6924 if (_span.contains(addr)) {
6926 6925 // this should be made more efficient
6927 6926 _bitMap->mark(addr);
6928 6927 }
6929 6928 }
6930 6929
6931 6930 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6932 6931 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6933 6932
6934 6933 Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6935 6934 MemRegion span, CMSBitMap* bitMap):
6936 6935 _span(span),
6937 6936 _bitMap(bitMap)
6938 6937 {
6939 6938 assert(_ref_processor == NULL, "deliberately left NULL");
6940 6939 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6941 6940 }
6942 6941
6943 6942 void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6944 6943 // if p points into _span, then mark corresponding bit in _markBitMap
6945 6944 assert(obj->is_oop(), "expected an oop");
6946 6945 HeapWord* addr = (HeapWord*)obj;
6947 6946 if (_span.contains(addr)) {
6948 6947 // this should be made more efficient
6949 6948 _bitMap->par_mark(addr);
6950 6949 }
6951 6950 }
6952 6951
6953 6952 void Par_MarkRefsIntoClosure::do_oop(oop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6954 6953 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6955 6954
6956 6955 // A variant of the above, used for CMS marking verification.
6957 6956 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6958 6957 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6959 6958 _span(span),
6960 6959 _verification_bm(verification_bm),
6961 6960 _cms_bm(cms_bm)
6962 6961 {
6963 6962 assert(_ref_processor == NULL, "deliberately left NULL");
6964 6963 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6965 6964 }
6966 6965
6967 6966 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6968 6967 // if p points into _span, then mark corresponding bit in _markBitMap
6969 6968 assert(obj->is_oop(), "expected an oop");
6970 6969 HeapWord* addr = (HeapWord*)obj;
6971 6970 if (_span.contains(addr)) {
6972 6971 _verification_bm->mark(addr);
6973 6972 if (!_cms_bm->isMarked(addr)) {
6974 6973 oop(addr)->print();
6975 6974 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6976 6975 fatal("... aborting");
6977 6976 }
6978 6977 }
6979 6978 }
6980 6979
6981 6980 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6982 6981 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6983 6982
6984 6983 //////////////////////////////////////////////////
6985 6984 // MarkRefsIntoAndScanClosure
6986 6985 //////////////////////////////////////////////////
6987 6986
6988 6987 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6989 6988 ReferenceProcessor* rp,
6990 6989 CMSBitMap* bit_map,
6991 6990 CMSBitMap* mod_union_table,
6992 6991 CMSMarkStack* mark_stack,
6993 6992 CMSCollector* collector,
6994 6993 bool should_yield,
6995 6994 bool concurrent_precleaning):
6996 6995 _collector(collector),
6997 6996 _span(span),
6998 6997 _bit_map(bit_map),
6999 6998 _mark_stack(mark_stack),
7000 6999 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
7001 7000 mark_stack, concurrent_precleaning),
7002 7001 _yield(should_yield),
7003 7002 _concurrent_precleaning(concurrent_precleaning),
7004 7003 _freelistLock(NULL)
7005 7004 {
7006 7005 _ref_processor = rp;
7007 7006 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7008 7007 }
7009 7008
7010 7009 // This closure is used to mark refs into the CMS generation at the
7011 7010 // second (final) checkpoint, and to scan and transitively follow
7012 7011 // the unmarked oops. It is also used during the concurrent precleaning
7013 7012 // phase while scanning objects on dirty cards in the CMS generation.
7014 7013 // The marks are made in the marking bit map and the marking stack is
7015 7014 // used for keeping the (newly) grey objects during the scan.
7016 7015 // The parallel version (Par_...) appears further below.
7017 7016 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
7018 7017 if (obj != NULL) {
7019 7018 assert(obj->is_oop(), "expected an oop");
7020 7019 HeapWord* addr = (HeapWord*)obj;
7021 7020 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7022 7021 assert(_collector->overflow_list_is_empty(),
7023 7022 "overflow list should be empty");
7024 7023 if (_span.contains(addr) &&
7025 7024 !_bit_map->isMarked(addr)) {
7026 7025 // mark bit map (object is now grey)
7027 7026 _bit_map->mark(addr);
7028 7027 // push on marking stack (stack should be empty), and drain the
7029 7028 // stack by applying this closure to the oops in the oops popped
7030 7029 // from the stack (i.e. blacken the grey objects)
7031 7030 bool res = _mark_stack->push(obj);
7032 7031 assert(res, "Should have space to push on empty stack");
7033 7032 do {
7034 7033 oop new_oop = _mark_stack->pop();
7035 7034 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7036 7035 assert(_bit_map->isMarked((HeapWord*)new_oop),
7037 7036 "only grey objects on this stack");
7038 7037 // iterate over the oops in this oop, marking and pushing
7039 7038 // the ones in CMS heap (i.e. in _span).
7040 7039 new_oop->oop_iterate(&_pushAndMarkClosure);
7041 7040 // check if it's time to yield
7042 7041 do_yield_check();
7043 7042 } while (!_mark_stack->isEmpty() ||
7044 7043 (!_concurrent_precleaning && take_from_overflow_list()));
7045 7044 // if marking stack is empty, and we are not doing this
7046 7045 // during precleaning, then check the overflow list
7047 7046 }
7048 7047 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7049 7048 assert(_collector->overflow_list_is_empty(),
7050 7049 "overflow list was drained above");
7051 7050 // We could restore evacuated mark words, if any, used for
7052 7051 // overflow list links here because the overflow list is
7053 7052 // provably empty here. That would reduce the maximum
7054 7053 // size requirements for preserved_{oop,mark}_stack.
7055 7054 // But we'll just postpone it until we are all done
7056 7055 // so we can just stream through.
7057 7056 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
7058 7057 _collector->restore_preserved_marks_if_any();
7059 7058 assert(_collector->no_preserved_marks(), "No preserved marks");
7060 7059 }
7061 7060 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
7062 7061 "All preserved marks should have been restored above");
7063 7062 }
7064 7063 }
7065 7064
7066 7065 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
7067 7066 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
7068 7067
7069 7068 void MarkRefsIntoAndScanClosure::do_yield_work() {
7070 7069 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7071 7070 "CMS thread should hold CMS token");
7072 7071 assert_lock_strong(_freelistLock);
7073 7072 assert_lock_strong(_bit_map->lock());
7074 7073 // relinquish the free_list_lock and bitMaplock()
7075 7074 _bit_map->lock()->unlock();
7076 7075 _freelistLock->unlock();
7077 7076 ConcurrentMarkSweepThread::desynchronize(true);
7078 7077 ConcurrentMarkSweepThread::acknowledge_yield_request();
7079 7078 _collector->stopTimer();
7080 7079 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7081 7080 if (PrintCMSStatistics != 0) {
7082 7081 _collector->incrementYields();
7083 7082 }
7084 7083 _collector->icms_wait();
7085 7084
7086 7085 // See the comment in coordinator_yield()
7087 7086 for (unsigned i = 0;
7088 7087 i < CMSYieldSleepCount &&
7089 7088 ConcurrentMarkSweepThread::should_yield() &&
7090 7089 !CMSCollector::foregroundGCIsActive();
7091 7090 ++i) {
7092 7091 os::sleep(Thread::current(), 1, false);
7093 7092 ConcurrentMarkSweepThread::acknowledge_yield_request();
7094 7093 }
7095 7094
7096 7095 ConcurrentMarkSweepThread::synchronize(true);
7097 7096 _freelistLock->lock_without_safepoint_check();
7098 7097 _bit_map->lock()->lock_without_safepoint_check();
7099 7098 _collector->startTimer();
7100 7099 }
7101 7100
7102 7101 ///////////////////////////////////////////////////////////
7103 7102 // Par_MarkRefsIntoAndScanClosure: a parallel version of
7104 7103 // MarkRefsIntoAndScanClosure
7105 7104 ///////////////////////////////////////////////////////////
7106 7105 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
7107 7106 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
7108 7107 CMSBitMap* bit_map, OopTaskQueue* work_queue):
7109 7108 _span(span),
7110 7109 _bit_map(bit_map),
7111 7110 _work_queue(work_queue),
7112 7111 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
7113 7112 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
7114 7113 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
7115 7114 {
7116 7115 _ref_processor = rp;
7117 7116 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7118 7117 }
7119 7118
7120 7119 // This closure is used to mark refs into the CMS generation at the
7121 7120 // second (final) checkpoint, and to scan and transitively follow
7122 7121 // the unmarked oops. The marks are made in the marking bit map and
7123 7122 // the work_queue is used for keeping the (newly) grey objects during
7124 7123 // the scan phase whence they are also available for stealing by parallel
7125 7124 // threads. Since the marking bit map is shared, updates are
7126 7125 // synchronized (via CAS).
7127 7126 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
7128 7127 if (obj != NULL) {
7129 7128 // Ignore mark word because this could be an already marked oop
7130 7129 // that may be chained at the end of the overflow list.
7131 7130 assert(obj->is_oop(true), "expected an oop");
7132 7131 HeapWord* addr = (HeapWord*)obj;
7133 7132 if (_span.contains(addr) &&
7134 7133 !_bit_map->isMarked(addr)) {
7135 7134 // mark bit map (object will become grey):
7136 7135 // It is possible for several threads to be
7137 7136 // trying to "claim" this object concurrently;
7138 7137 // the unique thread that succeeds in marking the
7139 7138 // object first will do the subsequent push on
7140 7139 // to the work queue (or overflow list).
7141 7140 if (_bit_map->par_mark(addr)) {
7142 7141 // push on work_queue (which may not be empty), and trim the
7143 7142 // queue to an appropriate length by applying this closure to
7144 7143 // the oops in the oops popped from the stack (i.e. blacken the
7145 7144 // grey objects)
7146 7145 bool res = _work_queue->push(obj);
7147 7146 assert(res, "Low water mark should be less than capacity?");
7148 7147 trim_queue(_low_water_mark);
7149 7148 } // Else, another thread claimed the object
7150 7149 }
7151 7150 }
7152 7151 }
7153 7152
7154 7153 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7155 7154 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
7156 7155
7157 7156 // This closure is used to rescan the marked objects on the dirty cards
7158 7157 // in the mod union table and the card table proper.
7159 7158 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
7160 7159 oop p, MemRegion mr) {
7161 7160
7162 7161 size_t size = 0;
7163 7162 HeapWord* addr = (HeapWord*)p;
7164 7163 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7165 7164 assert(_span.contains(addr), "we are scanning the CMS generation");
7166 7165 // check if it's time to yield
7167 7166 if (do_yield_check()) {
7168 7167 // We yielded for some foreground stop-world work,
7169 7168 // and we have been asked to abort this ongoing preclean cycle.
7170 7169 return 0;
7171 7170 }
7172 7171 if (_bitMap->isMarked(addr)) {
7173 7172 // it's marked; is it potentially uninitialized?
7174 7173 if (p->klass_or_null() != NULL) {
7175 7174 // an initialized object; ignore mark word in verification below
7176 7175 // since we are running concurrent with mutators
7177 7176 assert(p->is_oop(true), "should be an oop");
7178 7177 if (p->is_objArray()) {
7179 7178 // objArrays are precisely marked; restrict scanning
7180 7179 // to dirty cards only.
7181 7180 size = CompactibleFreeListSpace::adjustObjectSize(
7182 7181 p->oop_iterate(_scanningClosure, mr));
7183 7182 } else {
7184 7183 // A non-array may have been imprecisely marked; we need
7185 7184 // to scan object in its entirety.
7186 7185 size = CompactibleFreeListSpace::adjustObjectSize(
7187 7186 p->oop_iterate(_scanningClosure));
7188 7187 }
7189 7188 #ifdef ASSERT
7190 7189 size_t direct_size =
7191 7190 CompactibleFreeListSpace::adjustObjectSize(p->size());
7192 7191 assert(size == direct_size, "Inconsistency in size");
7193 7192 assert(size >= 3, "Necessary for Printezis marks to work");
7194 7193 if (!_bitMap->isMarked(addr+1)) {
7195 7194 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
7196 7195 } else {
7197 7196 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
7198 7197 assert(_bitMap->isMarked(addr+size-1),
7199 7198 "inconsistent Printezis mark");
7200 7199 }
7201 7200 #endif // ASSERT
7202 7201 } else {
7203 7202 // an unitialized object
7204 7203 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
7205 7204 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7206 7205 size = pointer_delta(nextOneAddr + 1, addr);
7207 7206 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7208 7207 "alignment problem");
7209 7208 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
7210 7209 // will dirty the card when the klass pointer is installed in the
7211 7210 // object (signalling the completion of initialization).
7212 7211 }
7213 7212 } else {
7214 7213 // Either a not yet marked object or an uninitialized object
7215 7214 if (p->klass_or_null() == NULL) {
7216 7215 // An uninitialized object, skip to the next card, since
7217 7216 // we may not be able to read its P-bits yet.
7218 7217 assert(size == 0, "Initial value");
7219 7218 } else {
7220 7219 // An object not (yet) reached by marking: we merely need to
7221 7220 // compute its size so as to go look at the next block.
7222 7221 assert(p->is_oop(true), "should be an oop");
7223 7222 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
7224 7223 }
7225 7224 }
7226 7225 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7227 7226 return size;
7228 7227 }
7229 7228
7230 7229 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
7231 7230 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7232 7231 "CMS thread should hold CMS token");
7233 7232 assert_lock_strong(_freelistLock);
7234 7233 assert_lock_strong(_bitMap->lock());
7235 7234 // relinquish the free_list_lock and bitMaplock()
7236 7235 _bitMap->lock()->unlock();
7237 7236 _freelistLock->unlock();
7238 7237 ConcurrentMarkSweepThread::desynchronize(true);
7239 7238 ConcurrentMarkSweepThread::acknowledge_yield_request();
7240 7239 _collector->stopTimer();
7241 7240 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7242 7241 if (PrintCMSStatistics != 0) {
7243 7242 _collector->incrementYields();
7244 7243 }
7245 7244 _collector->icms_wait();
7246 7245
7247 7246 // See the comment in coordinator_yield()
7248 7247 for (unsigned i = 0; i < CMSYieldSleepCount &&
7249 7248 ConcurrentMarkSweepThread::should_yield() &&
7250 7249 !CMSCollector::foregroundGCIsActive(); ++i) {
7251 7250 os::sleep(Thread::current(), 1, false);
7252 7251 ConcurrentMarkSweepThread::acknowledge_yield_request();
7253 7252 }
7254 7253
7255 7254 ConcurrentMarkSweepThread::synchronize(true);
7256 7255 _freelistLock->lock_without_safepoint_check();
7257 7256 _bitMap->lock()->lock_without_safepoint_check();
7258 7257 _collector->startTimer();
7259 7258 }
7260 7259
7261 7260
7262 7261 //////////////////////////////////////////////////////////////////
7263 7262 // SurvivorSpacePrecleanClosure
7264 7263 //////////////////////////////////////////////////////////////////
7265 7264 // This (single-threaded) closure is used to preclean the oops in
7266 7265 // the survivor spaces.
7267 7266 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
7268 7267
7269 7268 HeapWord* addr = (HeapWord*)p;
7270 7269 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
7271 7270 assert(!_span.contains(addr), "we are scanning the survivor spaces");
7272 7271 assert(p->klass_or_null() != NULL, "object should be initializd");
7273 7272 // an initialized object; ignore mark word in verification below
7274 7273 // since we are running concurrent with mutators
7275 7274 assert(p->is_oop(true), "should be an oop");
7276 7275 // Note that we do not yield while we iterate over
7277 7276 // the interior oops of p, pushing the relevant ones
7278 7277 // on our marking stack.
7279 7278 size_t size = p->oop_iterate(_scanning_closure);
7280 7279 do_yield_check();
7281 7280 // Observe that below, we do not abandon the preclean
7282 7281 // phase as soon as we should; rather we empty the
7283 7282 // marking stack before returning. This is to satisfy
7284 7283 // some existing assertions. In general, it may be a
7285 7284 // good idea to abort immediately and complete the marking
7286 7285 // from the grey objects at a later time.
7287 7286 while (!_mark_stack->isEmpty()) {
7288 7287 oop new_oop = _mark_stack->pop();
7289 7288 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7290 7289 assert(_bit_map->isMarked((HeapWord*)new_oop),
7291 7290 "only grey objects on this stack");
7292 7291 // iterate over the oops in this oop, marking and pushing
7293 7292 // the ones in CMS heap (i.e. in _span).
7294 7293 new_oop->oop_iterate(_scanning_closure);
7295 7294 // check if it's time to yield
7296 7295 do_yield_check();
7297 7296 }
7298 7297 unsigned int after_count =
7299 7298 GenCollectedHeap::heap()->total_collections();
7300 7299 bool abort = (_before_count != after_count) ||
7301 7300 _collector->should_abort_preclean();
7302 7301 return abort ? 0 : size;
7303 7302 }
7304 7303
7305 7304 void SurvivorSpacePrecleanClosure::do_yield_work() {
7306 7305 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7307 7306 "CMS thread should hold CMS token");
7308 7307 assert_lock_strong(_bit_map->lock());
7309 7308 // Relinquish the bit map lock
7310 7309 _bit_map->lock()->unlock();
7311 7310 ConcurrentMarkSweepThread::desynchronize(true);
7312 7311 ConcurrentMarkSweepThread::acknowledge_yield_request();
7313 7312 _collector->stopTimer();
7314 7313 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7315 7314 if (PrintCMSStatistics != 0) {
7316 7315 _collector->incrementYields();
7317 7316 }
7318 7317 _collector->icms_wait();
7319 7318
7320 7319 // See the comment in coordinator_yield()
7321 7320 for (unsigned i = 0; i < CMSYieldSleepCount &&
7322 7321 ConcurrentMarkSweepThread::should_yield() &&
7323 7322 !CMSCollector::foregroundGCIsActive(); ++i) {
7324 7323 os::sleep(Thread::current(), 1, false);
7325 7324 ConcurrentMarkSweepThread::acknowledge_yield_request();
7326 7325 }
7327 7326
7328 7327 ConcurrentMarkSweepThread::synchronize(true);
7329 7328 _bit_map->lock()->lock_without_safepoint_check();
7330 7329 _collector->startTimer();
7331 7330 }
7332 7331
7333 7332 // This closure is used to rescan the marked objects on the dirty cards
7334 7333 // in the mod union table and the card table proper. In the parallel
7335 7334 // case, although the bitMap is shared, we do a single read so the
7336 7335 // isMarked() query is "safe".
7337 7336 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
7338 7337 // Ignore mark word because we are running concurrent with mutators
7339 7338 assert(p->is_oop_or_null(true), "expected an oop or null");
7340 7339 HeapWord* addr = (HeapWord*)p;
7341 7340 assert(_span.contains(addr), "we are scanning the CMS generation");
7342 7341 bool is_obj_array = false;
7343 7342 #ifdef ASSERT
7344 7343 if (!_parallel) {
7345 7344 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7346 7345 assert(_collector->overflow_list_is_empty(),
7347 7346 "overflow list should be empty");
7348 7347
7349 7348 }
7350 7349 #endif // ASSERT
7351 7350 if (_bit_map->isMarked(addr)) {
7352 7351 // Obj arrays are precisely marked, non-arrays are not;
7353 7352 // so we scan objArrays precisely and non-arrays in their
7354 7353 // entirety.
7355 7354 if (p->is_objArray()) {
7356 7355 is_obj_array = true;
7357 7356 if (_parallel) {
7358 7357 p->oop_iterate(_par_scan_closure, mr);
7359 7358 } else {
7360 7359 p->oop_iterate(_scan_closure, mr);
7361 7360 }
7362 7361 } else {
7363 7362 if (_parallel) {
7364 7363 p->oop_iterate(_par_scan_closure);
7365 7364 } else {
7366 7365 p->oop_iterate(_scan_closure);
7367 7366 }
7368 7367 }
7369 7368 }
7370 7369 #ifdef ASSERT
7371 7370 if (!_parallel) {
7372 7371 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7373 7372 assert(_collector->overflow_list_is_empty(),
7374 7373 "overflow list should be empty");
7375 7374
7376 7375 }
7377 7376 #endif // ASSERT
7378 7377 return is_obj_array;
7379 7378 }
7380 7379
7381 7380 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
7382 7381 MemRegion span,
7383 7382 CMSBitMap* bitMap, CMSMarkStack* markStack,
7384 7383 bool should_yield, bool verifying):
7385 7384 _collector(collector),
7386 7385 _span(span),
7387 7386 _bitMap(bitMap),
7388 7387 _mut(&collector->_modUnionTable),
7389 7388 _markStack(markStack),
7390 7389 _yield(should_yield),
7391 7390 _skipBits(0)
7392 7391 {
7393 7392 assert(_markStack->isEmpty(), "stack should be empty");
7394 7393 _finger = _bitMap->startWord();
7395 7394 _threshold = _finger;
7396 7395 assert(_collector->_restart_addr == NULL, "Sanity check");
7397 7396 assert(_span.contains(_finger), "Out of bounds _finger?");
7398 7397 DEBUG_ONLY(_verifying = verifying;)
7399 7398 }
7400 7399
7401 7400 void MarkFromRootsClosure::reset(HeapWord* addr) {
7402 7401 assert(_markStack->isEmpty(), "would cause duplicates on stack");
7403 7402 assert(_span.contains(addr), "Out of bounds _finger?");
7404 7403 _finger = addr;
7405 7404 _threshold = (HeapWord*)round_to(
7406 7405 (intptr_t)_finger, CardTableModRefBS::card_size);
7407 7406 }
7408 7407
7409 7408 // Should revisit to see if this should be restructured for
7410 7409 // greater efficiency.
7411 7410 bool MarkFromRootsClosure::do_bit(size_t offset) {
7412 7411 if (_skipBits > 0) {
7413 7412 _skipBits--;
7414 7413 return true;
7415 7414 }
7416 7415 // convert offset into a HeapWord*
7417 7416 HeapWord* addr = _bitMap->startWord() + offset;
7418 7417 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
7419 7418 "address out of range");
7420 7419 assert(_bitMap->isMarked(addr), "tautology");
7421 7420 if (_bitMap->isMarked(addr+1)) {
7422 7421 // this is an allocated but not yet initialized object
7423 7422 assert(_skipBits == 0, "tautology");
7424 7423 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
7425 7424 oop p = oop(addr);
7426 7425 if (p->klass_or_null() == NULL) {
7427 7426 DEBUG_ONLY(if (!_verifying) {)
7428 7427 // We re-dirty the cards on which this object lies and increase
7429 7428 // the _threshold so that we'll come back to scan this object
7430 7429 // during the preclean or remark phase. (CMSCleanOnEnter)
7431 7430 if (CMSCleanOnEnter) {
7432 7431 size_t sz = _collector->block_size_using_printezis_bits(addr);
7433 7432 HeapWord* end_card_addr = (HeapWord*)round_to(
7434 7433 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7435 7434 MemRegion redirty_range = MemRegion(addr, end_card_addr);
7436 7435 assert(!redirty_range.is_empty(), "Arithmetical tautology");
7437 7436 // Bump _threshold to end_card_addr; note that
7438 7437 // _threshold cannot possibly exceed end_card_addr, anyhow.
7439 7438 // This prevents future clearing of the card as the scan proceeds
7440 7439 // to the right.
7441 7440 assert(_threshold <= end_card_addr,
7442 7441 "Because we are just scanning into this object");
7443 7442 if (_threshold < end_card_addr) {
7444 7443 _threshold = end_card_addr;
7445 7444 }
7446 7445 if (p->klass_or_null() != NULL) {
7447 7446 // Redirty the range of cards...
7448 7447 _mut->mark_range(redirty_range);
7449 7448 } // ...else the setting of klass will dirty the card anyway.
7450 7449 }
7451 7450 DEBUG_ONLY(})
7452 7451 return true;
7453 7452 }
7454 7453 }
7455 7454 scanOopsInOop(addr);
7456 7455 return true;
7457 7456 }
7458 7457
7459 7458 // We take a break if we've been at this for a while,
7460 7459 // so as to avoid monopolizing the locks involved.
7461 7460 void MarkFromRootsClosure::do_yield_work() {
7462 7461 // First give up the locks, then yield, then re-lock
7463 7462 // We should probably use a constructor/destructor idiom to
7464 7463 // do this unlock/lock or modify the MutexUnlocker class to
7465 7464 // serve our purpose. XXX
7466 7465 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7467 7466 "CMS thread should hold CMS token");
7468 7467 assert_lock_strong(_bitMap->lock());
7469 7468 _bitMap->lock()->unlock();
7470 7469 ConcurrentMarkSweepThread::desynchronize(true);
7471 7470 ConcurrentMarkSweepThread::acknowledge_yield_request();
7472 7471 _collector->stopTimer();
7473 7472 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7474 7473 if (PrintCMSStatistics != 0) {
7475 7474 _collector->incrementYields();
7476 7475 }
7477 7476 _collector->icms_wait();
7478 7477
7479 7478 // See the comment in coordinator_yield()
7480 7479 for (unsigned i = 0; i < CMSYieldSleepCount &&
7481 7480 ConcurrentMarkSweepThread::should_yield() &&
7482 7481 !CMSCollector::foregroundGCIsActive(); ++i) {
7483 7482 os::sleep(Thread::current(), 1, false);
7484 7483 ConcurrentMarkSweepThread::acknowledge_yield_request();
7485 7484 }
7486 7485
7487 7486 ConcurrentMarkSweepThread::synchronize(true);
7488 7487 _bitMap->lock()->lock_without_safepoint_check();
7489 7488 _collector->startTimer();
7490 7489 }
7491 7490
7492 7491 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7493 7492 assert(_bitMap->isMarked(ptr), "expected bit to be set");
7494 7493 assert(_markStack->isEmpty(),
7495 7494 "should drain stack to limit stack usage");
7496 7495 // convert ptr to an oop preparatory to scanning
7497 7496 oop obj = oop(ptr);
7498 7497 // Ignore mark word in verification below, since we
7499 7498 // may be running concurrent with mutators.
7500 7499 assert(obj->is_oop(true), "should be an oop");
7501 7500 assert(_finger <= ptr, "_finger runneth ahead");
7502 7501 // advance the finger to right end of this object
7503 7502 _finger = ptr + obj->size();
7504 7503 assert(_finger > ptr, "we just incremented it above");
7505 7504 // On large heaps, it may take us some time to get through
7506 7505 // the marking phase (especially if running iCMS). During
7507 7506 // this time it's possible that a lot of mutations have
7508 7507 // accumulated in the card table and the mod union table --
7509 7508 // these mutation records are redundant until we have
7510 7509 // actually traced into the corresponding card.
7511 7510 // Here, we check whether advancing the finger would make
7512 7511 // us cross into a new card, and if so clear corresponding
7513 7512 // cards in the MUT (preclean them in the card-table in the
7514 7513 // future).
7515 7514
7516 7515 DEBUG_ONLY(if (!_verifying) {)
7517 7516 // The clean-on-enter optimization is disabled by default,
7518 7517 // until we fix 6178663.
7519 7518 if (CMSCleanOnEnter && (_finger > _threshold)) {
7520 7519 // [_threshold, _finger) represents the interval
7521 7520 // of cards to be cleared in MUT (or precleaned in card table).
7522 7521 // The set of cards to be cleared is all those that overlap
7523 7522 // with the interval [_threshold, _finger); note that
7524 7523 // _threshold is always kept card-aligned but _finger isn't
7525 7524 // always card-aligned.
7526 7525 HeapWord* old_threshold = _threshold;
7527 7526 assert(old_threshold == (HeapWord*)round_to(
7528 7527 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7529 7528 "_threshold should always be card-aligned");
7530 7529 _threshold = (HeapWord*)round_to(
7531 7530 (intptr_t)_finger, CardTableModRefBS::card_size);
7532 7531 MemRegion mr(old_threshold, _threshold);
7533 7532 assert(!mr.is_empty(), "Control point invariant");
7534 7533 assert(_span.contains(mr), "Should clear within span");
7535 7534 _mut->clear_range(mr);
7536 7535 }
7537 7536 DEBUG_ONLY(})
7538 7537 // Note: the finger doesn't advance while we drain
7539 7538 // the stack below.
7540 7539 PushOrMarkClosure pushOrMarkClosure(_collector,
7541 7540 _span, _bitMap, _markStack,
7542 7541 _finger, this);
7543 7542 bool res = _markStack->push(obj);
7544 7543 assert(res, "Empty non-zero size stack should have space for single push");
7545 7544 while (!_markStack->isEmpty()) {
7546 7545 oop new_oop = _markStack->pop();
7547 7546 // Skip verifying header mark word below because we are
7548 7547 // running concurrent with mutators.
7549 7548 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7550 7549 // now scan this oop's oops
7551 7550 new_oop->oop_iterate(&pushOrMarkClosure);
7552 7551 do_yield_check();
7553 7552 }
7554 7553 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7555 7554 }
7556 7555
7557 7556 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7558 7557 CMSCollector* collector, MemRegion span,
7559 7558 CMSBitMap* bit_map,
7560 7559 OopTaskQueue* work_queue,
7561 7560 CMSMarkStack* overflow_stack,
7562 7561 bool should_yield):
7563 7562 _collector(collector),
7564 7563 _whole_span(collector->_span),
7565 7564 _span(span),
7566 7565 _bit_map(bit_map),
7567 7566 _mut(&collector->_modUnionTable),
7568 7567 _work_queue(work_queue),
7569 7568 _overflow_stack(overflow_stack),
7570 7569 _yield(should_yield),
7571 7570 _skip_bits(0),
7572 7571 _task(task)
7573 7572 {
7574 7573 assert(_work_queue->size() == 0, "work_queue should be empty");
7575 7574 _finger = span.start();
7576 7575 _threshold = _finger; // XXX Defer clear-on-enter optimization for now
7577 7576 assert(_span.contains(_finger), "Out of bounds _finger?");
7578 7577 }
7579 7578
7580 7579 // Should revisit to see if this should be restructured for
7581 7580 // greater efficiency.
7582 7581 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7583 7582 if (_skip_bits > 0) {
7584 7583 _skip_bits--;
7585 7584 return true;
7586 7585 }
7587 7586 // convert offset into a HeapWord*
7588 7587 HeapWord* addr = _bit_map->startWord() + offset;
7589 7588 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7590 7589 "address out of range");
7591 7590 assert(_bit_map->isMarked(addr), "tautology");
7592 7591 if (_bit_map->isMarked(addr+1)) {
7593 7592 // this is an allocated object that might not yet be initialized
7594 7593 assert(_skip_bits == 0, "tautology");
7595 7594 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
7596 7595 oop p = oop(addr);
7597 7596 if (p->klass_or_null() == NULL) {
7598 7597 // in the case of Clean-on-Enter optimization, redirty card
7599 7598 // and avoid clearing card by increasing the threshold.
7600 7599 return true;
7601 7600 }
7602 7601 }
7603 7602 scan_oops_in_oop(addr);
7604 7603 return true;
7605 7604 }
7606 7605
7607 7606 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7608 7607 assert(_bit_map->isMarked(ptr), "expected bit to be set");
7609 7608 // Should we assert that our work queue is empty or
7610 7609 // below some drain limit?
7611 7610 assert(_work_queue->size() == 0,
7612 7611 "should drain stack to limit stack usage");
7613 7612 // convert ptr to an oop preparatory to scanning
7614 7613 oop obj = oop(ptr);
7615 7614 // Ignore mark word in verification below, since we
7616 7615 // may be running concurrent with mutators.
7617 7616 assert(obj->is_oop(true), "should be an oop");
7618 7617 assert(_finger <= ptr, "_finger runneth ahead");
7619 7618 // advance the finger to right end of this object
7620 7619 _finger = ptr + obj->size();
7621 7620 assert(_finger > ptr, "we just incremented it above");
7622 7621 // On large heaps, it may take us some time to get through
7623 7622 // the marking phase (especially if running iCMS). During
7624 7623 // this time it's possible that a lot of mutations have
7625 7624 // accumulated in the card table and the mod union table --
7626 7625 // these mutation records are redundant until we have
7627 7626 // actually traced into the corresponding card.
7628 7627 // Here, we check whether advancing the finger would make
7629 7628 // us cross into a new card, and if so clear corresponding
7630 7629 // cards in the MUT (preclean them in the card-table in the
7631 7630 // future).
7632 7631
7633 7632 // The clean-on-enter optimization is disabled by default,
7634 7633 // until we fix 6178663.
7635 7634 if (CMSCleanOnEnter && (_finger > _threshold)) {
7636 7635 // [_threshold, _finger) represents the interval
7637 7636 // of cards to be cleared in MUT (or precleaned in card table).
7638 7637 // The set of cards to be cleared is all those that overlap
7639 7638 // with the interval [_threshold, _finger); note that
7640 7639 // _threshold is always kept card-aligned but _finger isn't
7641 7640 // always card-aligned.
7642 7641 HeapWord* old_threshold = _threshold;
7643 7642 assert(old_threshold == (HeapWord*)round_to(
7644 7643 (intptr_t)old_threshold, CardTableModRefBS::card_size),
7645 7644 "_threshold should always be card-aligned");
7646 7645 _threshold = (HeapWord*)round_to(
7647 7646 (intptr_t)_finger, CardTableModRefBS::card_size);
7648 7647 MemRegion mr(old_threshold, _threshold);
7649 7648 assert(!mr.is_empty(), "Control point invariant");
7650 7649 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7651 7650 _mut->clear_range(mr);
7652 7651 }
7653 7652
7654 7653 // Note: the local finger doesn't advance while we drain
7655 7654 // the stack below, but the global finger sure can and will.
7656 7655 HeapWord** gfa = _task->global_finger_addr();
7657 7656 Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7658 7657 _span, _bit_map,
7659 7658 _work_queue,
7660 7659 _overflow_stack,
7661 7660 _finger,
7662 7661 gfa, this);
7663 7662 bool res = _work_queue->push(obj); // overflow could occur here
7664 7663 assert(res, "Will hold once we use workqueues");
7665 7664 while (true) {
7666 7665 oop new_oop;
7667 7666 if (!_work_queue->pop_local(new_oop)) {
7668 7667 // We emptied our work_queue; check if there's stuff that can
7669 7668 // be gotten from the overflow stack.
7670 7669 if (CMSConcMarkingTask::get_work_from_overflow_stack(
7671 7670 _overflow_stack, _work_queue)) {
7672 7671 do_yield_check();
7673 7672 continue;
7674 7673 } else { // done
7675 7674 break;
7676 7675 }
7677 7676 }
7678 7677 // Skip verifying header mark word below because we are
7679 7678 // running concurrent with mutators.
7680 7679 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7681 7680 // now scan this oop's oops
7682 7681 new_oop->oop_iterate(&pushOrMarkClosure);
7683 7682 do_yield_check();
7684 7683 }
7685 7684 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7686 7685 }
7687 7686
7688 7687 // Yield in response to a request from VM Thread or
7689 7688 // from mutators.
7690 7689 void Par_MarkFromRootsClosure::do_yield_work() {
7691 7690 assert(_task != NULL, "sanity");
7692 7691 _task->yield();
7693 7692 }
7694 7693
7695 7694 // A variant of the above used for verifying CMS marking work.
7696 7695 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7697 7696 MemRegion span,
7698 7697 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7699 7698 CMSMarkStack* mark_stack):
7700 7699 _collector(collector),
7701 7700 _span(span),
7702 7701 _verification_bm(verification_bm),
7703 7702 _cms_bm(cms_bm),
7704 7703 _mark_stack(mark_stack),
7705 7704 _pam_verify_closure(collector, span, verification_bm, cms_bm,
7706 7705 mark_stack)
7707 7706 {
7708 7707 assert(_mark_stack->isEmpty(), "stack should be empty");
7709 7708 _finger = _verification_bm->startWord();
7710 7709 assert(_collector->_restart_addr == NULL, "Sanity check");
7711 7710 assert(_span.contains(_finger), "Out of bounds _finger?");
7712 7711 }
7713 7712
7714 7713 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7715 7714 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7716 7715 assert(_span.contains(addr), "Out of bounds _finger?");
7717 7716 _finger = addr;
7718 7717 }
7719 7718
7720 7719 // Should revisit to see if this should be restructured for
7721 7720 // greater efficiency.
7722 7721 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7723 7722 // convert offset into a HeapWord*
7724 7723 HeapWord* addr = _verification_bm->startWord() + offset;
7725 7724 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7726 7725 "address out of range");
7727 7726 assert(_verification_bm->isMarked(addr), "tautology");
7728 7727 assert(_cms_bm->isMarked(addr), "tautology");
7729 7728
7730 7729 assert(_mark_stack->isEmpty(),
7731 7730 "should drain stack to limit stack usage");
7732 7731 // convert addr to an oop preparatory to scanning
7733 7732 oop obj = oop(addr);
7734 7733 assert(obj->is_oop(), "should be an oop");
7735 7734 assert(_finger <= addr, "_finger runneth ahead");
7736 7735 // advance the finger to right end of this object
7737 7736 _finger = addr + obj->size();
7738 7737 assert(_finger > addr, "we just incremented it above");
7739 7738 // Note: the finger doesn't advance while we drain
7740 7739 // the stack below.
7741 7740 bool res = _mark_stack->push(obj);
7742 7741 assert(res, "Empty non-zero size stack should have space for single push");
7743 7742 while (!_mark_stack->isEmpty()) {
7744 7743 oop new_oop = _mark_stack->pop();
7745 7744 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7746 7745 // now scan this oop's oops
7747 7746 new_oop->oop_iterate(&_pam_verify_closure);
7748 7747 }
7749 7748 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7750 7749 return true;
7751 7750 }
7752 7751
7753 7752 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7754 7753 CMSCollector* collector, MemRegion span,
7755 7754 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7756 7755 CMSMarkStack* mark_stack):
7757 7756 MetadataAwareOopClosure(collector->ref_processor()),
7758 7757 _collector(collector),
7759 7758 _span(span),
7760 7759 _verification_bm(verification_bm),
7761 7760 _cms_bm(cms_bm),
7762 7761 _mark_stack(mark_stack)
7763 7762 { }
7764 7763
7765 7764 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7766 7765 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7767 7766
7768 7767 // Upon stack overflow, we discard (part of) the stack,
7769 7768 // remembering the least address amongst those discarded
7770 7769 // in CMSCollector's _restart_address.
7771 7770 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7772 7771 // Remember the least grey address discarded
7773 7772 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7774 7773 _collector->lower_restart_addr(ra);
7775 7774 _mark_stack->reset(); // discard stack contents
7776 7775 _mark_stack->expand(); // expand the stack if possible
7777 7776 }
7778 7777
7779 7778 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7780 7779 assert(obj->is_oop_or_null(), "expected an oop or NULL");
7781 7780 HeapWord* addr = (HeapWord*)obj;
7782 7781 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7783 7782 // Oop lies in _span and isn't yet grey or black
7784 7783 _verification_bm->mark(addr); // now grey
7785 7784 if (!_cms_bm->isMarked(addr)) {
7786 7785 oop(addr)->print();
7787 7786 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7788 7787 addr);
7789 7788 fatal("... aborting");
7790 7789 }
7791 7790
7792 7791 if (!_mark_stack->push(obj)) { // stack overflow
7793 7792 if (PrintCMSStatistics != 0) {
7794 7793 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7795 7794 SIZE_FORMAT, _mark_stack->capacity());
7796 7795 }
7797 7796 assert(_mark_stack->isFull(), "Else push should have succeeded");
7798 7797 handle_stack_overflow(addr);
7799 7798 }
7800 7799 // anything including and to the right of _finger
7801 7800 // will be scanned as we iterate over the remainder of the
7802 7801 // bit map
7803 7802 }
7804 7803 }
7805 7804
7806 7805 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7807 7806 MemRegion span,
7808 7807 CMSBitMap* bitMap, CMSMarkStack* markStack,
7809 7808 HeapWord* finger, MarkFromRootsClosure* parent) :
7810 7809 MetadataAwareOopClosure(collector->ref_processor()),
7811 7810 _collector(collector),
7812 7811 _span(span),
7813 7812 _bitMap(bitMap),
7814 7813 _markStack(markStack),
7815 7814 _finger(finger),
7816 7815 _parent(parent)
7817 7816 { }
7818 7817
7819 7818 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7820 7819 MemRegion span,
7821 7820 CMSBitMap* bit_map,
7822 7821 OopTaskQueue* work_queue,
7823 7822 CMSMarkStack* overflow_stack,
7824 7823 HeapWord* finger,
7825 7824 HeapWord** global_finger_addr,
7826 7825 Par_MarkFromRootsClosure* parent) :
7827 7826 MetadataAwareOopClosure(collector->ref_processor()),
7828 7827 _collector(collector),
7829 7828 _whole_span(collector->_span),
7830 7829 _span(span),
7831 7830 _bit_map(bit_map),
7832 7831 _work_queue(work_queue),
7833 7832 _overflow_stack(overflow_stack),
7834 7833 _finger(finger),
7835 7834 _global_finger_addr(global_finger_addr),
7836 7835 _parent(parent)
7837 7836 { }
7838 7837
7839 7838 // Assumes thread-safe access by callers, who are
7840 7839 // responsible for mutual exclusion.
7841 7840 void CMSCollector::lower_restart_addr(HeapWord* low) {
7842 7841 assert(_span.contains(low), "Out of bounds addr");
7843 7842 if (_restart_addr == NULL) {
7844 7843 _restart_addr = low;
7845 7844 } else {
7846 7845 _restart_addr = MIN2(_restart_addr, low);
7847 7846 }
7848 7847 }
7849 7848
7850 7849 // Upon stack overflow, we discard (part of) the stack,
7851 7850 // remembering the least address amongst those discarded
7852 7851 // in CMSCollector's _restart_address.
7853 7852 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7854 7853 // Remember the least grey address discarded
7855 7854 HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7856 7855 _collector->lower_restart_addr(ra);
7857 7856 _markStack->reset(); // discard stack contents
7858 7857 _markStack->expand(); // expand the stack if possible
7859 7858 }
7860 7859
7861 7860 // Upon stack overflow, we discard (part of) the stack,
7862 7861 // remembering the least address amongst those discarded
7863 7862 // in CMSCollector's _restart_address.
7864 7863 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7865 7864 // We need to do this under a mutex to prevent other
7866 7865 // workers from interfering with the work done below.
7867 7866 MutexLockerEx ml(_overflow_stack->par_lock(),
7868 7867 Mutex::_no_safepoint_check_flag);
7869 7868 // Remember the least grey address discarded
7870 7869 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7871 7870 _collector->lower_restart_addr(ra);
7872 7871 _overflow_stack->reset(); // discard stack contents
7873 7872 _overflow_stack->expand(); // expand the stack if possible
7874 7873 }
7875 7874
7876 7875 void PushOrMarkClosure::do_oop(oop obj) {
7877 7876 // Ignore mark word because we are running concurrent with mutators.
7878 7877 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7879 7878 HeapWord* addr = (HeapWord*)obj;
7880 7879 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7881 7880 // Oop lies in _span and isn't yet grey or black
7882 7881 _bitMap->mark(addr); // now grey
7883 7882 if (addr < _finger) {
7884 7883 // the bit map iteration has already either passed, or
7885 7884 // sampled, this bit in the bit map; we'll need to
7886 7885 // use the marking stack to scan this oop's oops.
7887 7886 bool simulate_overflow = false;
7888 7887 NOT_PRODUCT(
7889 7888 if (CMSMarkStackOverflowALot &&
7890 7889 _collector->simulate_overflow()) {
7891 7890 // simulate a stack overflow
7892 7891 simulate_overflow = true;
7893 7892 }
7894 7893 )
7895 7894 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7896 7895 if (PrintCMSStatistics != 0) {
7897 7896 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7898 7897 SIZE_FORMAT, _markStack->capacity());
7899 7898 }
7900 7899 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7901 7900 handle_stack_overflow(addr);
7902 7901 }
7903 7902 }
7904 7903 // anything including and to the right of _finger
7905 7904 // will be scanned as we iterate over the remainder of the
7906 7905 // bit map
7907 7906 do_yield_check();
7908 7907 }
7909 7908 }
7910 7909
7911 7910 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7912 7911 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7913 7912
7914 7913 void Par_PushOrMarkClosure::do_oop(oop obj) {
7915 7914 // Ignore mark word because we are running concurrent with mutators.
7916 7915 assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7917 7916 HeapWord* addr = (HeapWord*)obj;
7918 7917 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7919 7918 // Oop lies in _span and isn't yet grey or black
7920 7919 // We read the global_finger (volatile read) strictly after marking oop
7921 7920 bool res = _bit_map->par_mark(addr); // now grey
7922 7921 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7923 7922 // Should we push this marked oop on our stack?
7924 7923 // -- if someone else marked it, nothing to do
7925 7924 // -- if target oop is above global finger nothing to do
7926 7925 // -- if target oop is in chunk and above local finger
7927 7926 // then nothing to do
7928 7927 // -- else push on work queue
7929 7928 if ( !res // someone else marked it, they will deal with it
7930 7929 || (addr >= *gfa) // will be scanned in a later task
7931 7930 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7932 7931 return;
7933 7932 }
7934 7933 // the bit map iteration has already either passed, or
7935 7934 // sampled, this bit in the bit map; we'll need to
7936 7935 // use the marking stack to scan this oop's oops.
7937 7936 bool simulate_overflow = false;
7938 7937 NOT_PRODUCT(
7939 7938 if (CMSMarkStackOverflowALot &&
7940 7939 _collector->simulate_overflow()) {
7941 7940 // simulate a stack overflow
7942 7941 simulate_overflow = true;
7943 7942 }
7944 7943 )
7945 7944 if (simulate_overflow ||
7946 7945 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7947 7946 // stack overflow
7948 7947 if (PrintCMSStatistics != 0) {
7949 7948 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7950 7949 SIZE_FORMAT, _overflow_stack->capacity());
7951 7950 }
7952 7951 // We cannot assert that the overflow stack is full because
7953 7952 // it may have been emptied since.
7954 7953 assert(simulate_overflow ||
7955 7954 _work_queue->size() == _work_queue->max_elems(),
7956 7955 "Else push should have succeeded");
7957 7956 handle_stack_overflow(addr);
7958 7957 }
7959 7958 do_yield_check();
7960 7959 }
7961 7960 }
7962 7961
7963 7962 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7964 7963 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7965 7964
7966 7965 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7967 7966 MemRegion span,
7968 7967 ReferenceProcessor* rp,
7969 7968 CMSBitMap* bit_map,
7970 7969 CMSBitMap* mod_union_table,
7971 7970 CMSMarkStack* mark_stack,
7972 7971 bool concurrent_precleaning):
7973 7972 MetadataAwareOopClosure(rp),
7974 7973 _collector(collector),
7975 7974 _span(span),
7976 7975 _bit_map(bit_map),
7977 7976 _mod_union_table(mod_union_table),
7978 7977 _mark_stack(mark_stack),
7979 7978 _concurrent_precleaning(concurrent_precleaning)
7980 7979 {
7981 7980 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7982 7981 }
7983 7982
7984 7983 // Grey object rescan during pre-cleaning and second checkpoint phases --
7985 7984 // the non-parallel version (the parallel version appears further below.)
7986 7985 void PushAndMarkClosure::do_oop(oop obj) {
7987 7986 // Ignore mark word verification. If during concurrent precleaning,
7988 7987 // the object monitor may be locked. If during the checkpoint
7989 7988 // phases, the object may already have been reached by a different
7990 7989 // path and may be at the end of the global overflow list (so
7991 7990 // the mark word may be NULL).
7992 7991 assert(obj->is_oop_or_null(true /* ignore mark word */),
7993 7992 "expected an oop or NULL");
7994 7993 HeapWord* addr = (HeapWord*)obj;
7995 7994 // Check if oop points into the CMS generation
7996 7995 // and is not marked
7997 7996 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7998 7997 // a white object ...
7999 7998 _bit_map->mark(addr); // ... now grey
8000 7999 // push on the marking stack (grey set)
8001 8000 bool simulate_overflow = false;
8002 8001 NOT_PRODUCT(
8003 8002 if (CMSMarkStackOverflowALot &&
8004 8003 _collector->simulate_overflow()) {
8005 8004 // simulate a stack overflow
8006 8005 simulate_overflow = true;
8007 8006 }
8008 8007 )
8009 8008 if (simulate_overflow || !_mark_stack->push(obj)) {
8010 8009 if (_concurrent_precleaning) {
8011 8010 // During precleaning we can just dirty the appropriate card(s)
8012 8011 // in the mod union table, thus ensuring that the object remains
8013 8012 // in the grey set and continue. In the case of object arrays
8014 8013 // we need to dirty all of the cards that the object spans,
8015 8014 // since the rescan of object arrays will be limited to the
8016 8015 // dirty cards.
8017 8016 // Note that no one can be intefering with us in this action
8018 8017 // of dirtying the mod union table, so no locking or atomics
8019 8018 // are required.
8020 8019 if (obj->is_objArray()) {
8021 8020 size_t sz = obj->size();
8022 8021 HeapWord* end_card_addr = (HeapWord*)round_to(
8023 8022 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
8024 8023 MemRegion redirty_range = MemRegion(addr, end_card_addr);
8025 8024 assert(!redirty_range.is_empty(), "Arithmetical tautology");
8026 8025 _mod_union_table->mark_range(redirty_range);
8027 8026 } else {
8028 8027 _mod_union_table->mark(addr);
8029 8028 }
8030 8029 _collector->_ser_pmc_preclean_ovflw++;
8031 8030 } else {
8032 8031 // During the remark phase, we need to remember this oop
8033 8032 // in the overflow list.
8034 8033 _collector->push_on_overflow_list(obj);
8035 8034 _collector->_ser_pmc_remark_ovflw++;
8036 8035 }
8037 8036 }
8038 8037 }
8039 8038 }
8040 8039
8041 8040 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
8042 8041 MemRegion span,
8043 8042 ReferenceProcessor* rp,
8044 8043 CMSBitMap* bit_map,
8045 8044 OopTaskQueue* work_queue):
8046 8045 MetadataAwareOopClosure(rp),
8047 8046 _collector(collector),
8048 8047 _span(span),
8049 8048 _bit_map(bit_map),
8050 8049 _work_queue(work_queue)
8051 8050 {
8052 8051 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
8053 8052 }
8054 8053
8055 8054 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
8056 8055 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
8057 8056
8058 8057 // Grey object rescan during second checkpoint phase --
8059 8058 // the parallel version.
8060 8059 void Par_PushAndMarkClosure::do_oop(oop obj) {
8061 8060 // In the assert below, we ignore the mark word because
8062 8061 // this oop may point to an already visited object that is
8063 8062 // on the overflow stack (in which case the mark word has
8064 8063 // been hijacked for chaining into the overflow stack --
8065 8064 // if this is the last object in the overflow stack then
8066 8065 // its mark word will be NULL). Because this object may
8067 8066 // have been subsequently popped off the global overflow
8068 8067 // stack, and the mark word possibly restored to the prototypical
8069 8068 // value, by the time we get to examined this failing assert in
8070 8069 // the debugger, is_oop_or_null(false) may subsequently start
8071 8070 // to hold.
8072 8071 assert(obj->is_oop_or_null(true),
8073 8072 "expected an oop or NULL");
8074 8073 HeapWord* addr = (HeapWord*)obj;
8075 8074 // Check if oop points into the CMS generation
8076 8075 // and is not marked
8077 8076 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
8078 8077 // a white object ...
8079 8078 // If we manage to "claim" the object, by being the
8080 8079 // first thread to mark it, then we push it on our
8081 8080 // marking stack
8082 8081 if (_bit_map->par_mark(addr)) { // ... now grey
8083 8082 // push on work queue (grey set)
8084 8083 bool simulate_overflow = false;
8085 8084 NOT_PRODUCT(
8086 8085 if (CMSMarkStackOverflowALot &&
8087 8086 _collector->par_simulate_overflow()) {
8088 8087 // simulate a stack overflow
8089 8088 simulate_overflow = true;
8090 8089 }
8091 8090 )
8092 8091 if (simulate_overflow || !_work_queue->push(obj)) {
8093 8092 _collector->par_push_on_overflow_list(obj);
8094 8093 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
8095 8094 }
8096 8095 } // Else, some other thread got there first
8097 8096 }
8098 8097 }
8099 8098
8100 8099 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
8101 8100 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
8102 8101
8103 8102 void CMSPrecleanRefsYieldClosure::do_yield_work() {
8104 8103 Mutex* bml = _collector->bitMapLock();
8105 8104 assert_lock_strong(bml);
8106 8105 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8107 8106 "CMS thread should hold CMS token");
8108 8107
8109 8108 bml->unlock();
8110 8109 ConcurrentMarkSweepThread::desynchronize(true);
8111 8110
8112 8111 ConcurrentMarkSweepThread::acknowledge_yield_request();
8113 8112
8114 8113 _collector->stopTimer();
8115 8114 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8116 8115 if (PrintCMSStatistics != 0) {
8117 8116 _collector->incrementYields();
8118 8117 }
8119 8118 _collector->icms_wait();
8120 8119
8121 8120 // See the comment in coordinator_yield()
8122 8121 for (unsigned i = 0; i < CMSYieldSleepCount &&
8123 8122 ConcurrentMarkSweepThread::should_yield() &&
8124 8123 !CMSCollector::foregroundGCIsActive(); ++i) {
8125 8124 os::sleep(Thread::current(), 1, false);
8126 8125 ConcurrentMarkSweepThread::acknowledge_yield_request();
8127 8126 }
8128 8127
8129 8128 ConcurrentMarkSweepThread::synchronize(true);
8130 8129 bml->lock();
8131 8130
8132 8131 _collector->startTimer();
8133 8132 }
8134 8133
8135 8134 bool CMSPrecleanRefsYieldClosure::should_return() {
8136 8135 if (ConcurrentMarkSweepThread::should_yield()) {
8137 8136 do_yield_work();
8138 8137 }
8139 8138 return _collector->foregroundGCIsActive();
8140 8139 }
8141 8140
8142 8141 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
8143 8142 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
8144 8143 "mr should be aligned to start at a card boundary");
8145 8144 // We'd like to assert:
8146 8145 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
8147 8146 // "mr should be a range of cards");
8148 8147 // However, that would be too strong in one case -- the last
8149 8148 // partition ends at _unallocated_block which, in general, can be
8150 8149 // an arbitrary boundary, not necessarily card aligned.
8151 8150 if (PrintCMSStatistics != 0) {
8152 8151 _num_dirty_cards +=
8153 8152 mr.word_size()/CardTableModRefBS::card_size_in_words;
8154 8153 }
8155 8154 _space->object_iterate_mem(mr, &_scan_cl);
8156 8155 }
8157 8156
8158 8157 SweepClosure::SweepClosure(CMSCollector* collector,
8159 8158 ConcurrentMarkSweepGeneration* g,
8160 8159 CMSBitMap* bitMap, bool should_yield) :
8161 8160 _collector(collector),
8162 8161 _g(g),
8163 8162 _sp(g->cmsSpace()),
8164 8163 _limit(_sp->sweep_limit()),
8165 8164 _freelistLock(_sp->freelistLock()),
8166 8165 _bitMap(bitMap),
8167 8166 _yield(should_yield),
8168 8167 _inFreeRange(false), // No free range at beginning of sweep
8169 8168 _freeRangeInFreeLists(false), // No free range at beginning of sweep
8170 8169 _lastFreeRangeCoalesced(false),
8171 8170 _freeFinger(g->used_region().start())
8172 8171 {
8173 8172 NOT_PRODUCT(
8174 8173 _numObjectsFreed = 0;
8175 8174 _numWordsFreed = 0;
8176 8175 _numObjectsLive = 0;
8177 8176 _numWordsLive = 0;
8178 8177 _numObjectsAlreadyFree = 0;
8179 8178 _numWordsAlreadyFree = 0;
8180 8179 _last_fc = NULL;
8181 8180
8182 8181 _sp->initializeIndexedFreeListArrayReturnedBytes();
8183 8182 _sp->dictionary()->initialize_dict_returned_bytes();
8184 8183 )
8185 8184 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8186 8185 "sweep _limit out of bounds");
8187 8186 if (CMSTraceSweeper) {
8188 8187 gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
8189 8188 _limit);
8190 8189 }
8191 8190 }
8192 8191
8193 8192 void SweepClosure::print_on(outputStream* st) const {
8194 8193 tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
8195 8194 _sp->bottom(), _sp->end());
8196 8195 tty->print_cr("_limit = " PTR_FORMAT, _limit);
8197 8196 tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
8198 8197 NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
8199 8198 tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
8200 8199 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
8201 8200 }
8202 8201
8203 8202 #ifndef PRODUCT
8204 8203 // Assertion checking only: no useful work in product mode --
8205 8204 // however, if any of the flags below become product flags,
8206 8205 // you may need to review this code to see if it needs to be
8207 8206 // enabled in product mode.
8208 8207 SweepClosure::~SweepClosure() {
8209 8208 assert_lock_strong(_freelistLock);
8210 8209 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8211 8210 "sweep _limit out of bounds");
8212 8211 if (inFreeRange()) {
8213 8212 warning("inFreeRange() should have been reset; dumping state of SweepClosure");
8214 8213 print();
8215 8214 ShouldNotReachHere();
8216 8215 }
8217 8216 if (Verbose && PrintGC) {
8218 8217 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
8219 8218 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
8220 8219 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
8221 8220 SIZE_FORMAT" bytes "
8222 8221 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
8223 8222 _numObjectsLive, _numWordsLive*sizeof(HeapWord),
8224 8223 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
8225 8224 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
8226 8225 * sizeof(HeapWord);
8227 8226 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
8228 8227
8229 8228 if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
8230 8229 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
8231 8230 size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
8232 8231 size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
8233 8232 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
8234 8233 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
8235 8234 indexListReturnedBytes);
8236 8235 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
8237 8236 dict_returned_bytes);
8238 8237 }
8239 8238 }
8240 8239 if (CMSTraceSweeper) {
8241 8240 gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
8242 8241 _limit);
8243 8242 }
8244 8243 }
8245 8244 #endif // PRODUCT
8246 8245
8247 8246 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
8248 8247 bool freeRangeInFreeLists) {
8249 8248 if (CMSTraceSweeper) {
8250 8249 gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
8251 8250 freeFinger, freeRangeInFreeLists);
8252 8251 }
8253 8252 assert(!inFreeRange(), "Trampling existing free range");
8254 8253 set_inFreeRange(true);
8255 8254 set_lastFreeRangeCoalesced(false);
8256 8255
8257 8256 set_freeFinger(freeFinger);
8258 8257 set_freeRangeInFreeLists(freeRangeInFreeLists);
8259 8258 if (CMSTestInFreeList) {
8260 8259 if (freeRangeInFreeLists) {
8261 8260 FreeChunk* fc = (FreeChunk*) freeFinger;
8262 8261 assert(fc->is_free(), "A chunk on the free list should be free.");
8263 8262 assert(fc->size() > 0, "Free range should have a size");
8264 8263 assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
8265 8264 }
8266 8265 }
8267 8266 }
8268 8267
8269 8268 // Note that the sweeper runs concurrently with mutators. Thus,
8270 8269 // it is possible for direct allocation in this generation to happen
8271 8270 // in the middle of the sweep. Note that the sweeper also coalesces
8272 8271 // contiguous free blocks. Thus, unless the sweeper and the allocator
8273 8272 // synchronize appropriately freshly allocated blocks may get swept up.
8274 8273 // This is accomplished by the sweeper locking the free lists while
8275 8274 // it is sweeping. Thus blocks that are determined to be free are
8276 8275 // indeed free. There is however one additional complication:
8277 8276 // blocks that have been allocated since the final checkpoint and
8278 8277 // mark, will not have been marked and so would be treated as
8279 8278 // unreachable and swept up. To prevent this, the allocator marks
8280 8279 // the bit map when allocating during the sweep phase. This leads,
8281 8280 // however, to a further complication -- objects may have been allocated
8282 8281 // but not yet initialized -- in the sense that the header isn't yet
8283 8282 // installed. The sweeper can not then determine the size of the block
8284 8283 // in order to skip over it. To deal with this case, we use a technique
8285 8284 // (due to Printezis) to encode such uninitialized block sizes in the
8286 8285 // bit map. Since the bit map uses a bit per every HeapWord, but the
8287 8286 // CMS generation has a minimum object size of 3 HeapWords, it follows
8288 8287 // that "normal marks" won't be adjacent in the bit map (there will
8289 8288 // always be at least two 0 bits between successive 1 bits). We make use
8290 8289 // of these "unused" bits to represent uninitialized blocks -- the bit
8291 8290 // corresponding to the start of the uninitialized object and the next
8292 8291 // bit are both set. Finally, a 1 bit marks the end of the object that
8293 8292 // started with the two consecutive 1 bits to indicate its potentially
8294 8293 // uninitialized state.
8295 8294
8296 8295 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
8297 8296 FreeChunk* fc = (FreeChunk*)addr;
8298 8297 size_t res;
8299 8298
8300 8299 // Check if we are done sweeping. Below we check "addr >= _limit" rather
8301 8300 // than "addr == _limit" because although _limit was a block boundary when
8302 8301 // we started the sweep, it may no longer be one because heap expansion
8303 8302 // may have caused us to coalesce the block ending at the address _limit
8304 8303 // with a newly expanded chunk (this happens when _limit was set to the
8305 8304 // previous _end of the space), so we may have stepped past _limit:
8306 8305 // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
8307 8306 if (addr >= _limit) { // we have swept up to or past the limit: finish up
8308 8307 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
8309 8308 "sweep _limit out of bounds");
8310 8309 assert(addr < _sp->end(), "addr out of bounds");
8311 8310 // Flush any free range we might be holding as a single
8312 8311 // coalesced chunk to the appropriate free list.
8313 8312 if (inFreeRange()) {
8314 8313 assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
8315 8314 err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
8316 8315 flush_cur_free_chunk(freeFinger(),
8317 8316 pointer_delta(addr, freeFinger()));
8318 8317 if (CMSTraceSweeper) {
8319 8318 gclog_or_tty->print("Sweep: last chunk: ");
8320 8319 gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
8321 8320 "[coalesced:"SIZE_FORMAT"]\n",
8322 8321 freeFinger(), pointer_delta(addr, freeFinger()),
8323 8322 lastFreeRangeCoalesced());
8324 8323 }
8325 8324 }
8326 8325
8327 8326 // help the iterator loop finish
8328 8327 return pointer_delta(_sp->end(), addr);
8329 8328 }
8330 8329
8331 8330 assert(addr < _limit, "sweep invariant");
8332 8331 // check if we should yield
8333 8332 do_yield_check(addr);
8334 8333 if (fc->is_free()) {
8335 8334 // Chunk that is already free
8336 8335 res = fc->size();
8337 8336 do_already_free_chunk(fc);
8338 8337 debug_only(_sp->verifyFreeLists());
8339 8338 // If we flush the chunk at hand in lookahead_and_flush()
8340 8339 // and it's coalesced with a preceding chunk, then the
8341 8340 // process of "mangling" the payload of the coalesced block
8342 8341 // will cause erasure of the size information from the
8343 8342 // (erstwhile) header of all the coalesced blocks but the
8344 8343 // first, so the first disjunct in the assert will not hold
8345 8344 // in that specific case (in which case the second disjunct
8346 8345 // will hold).
8347 8346 assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
8348 8347 "Otherwise the size info doesn't change at this step");
8349 8348 NOT_PRODUCT(
8350 8349 _numObjectsAlreadyFree++;
8351 8350 _numWordsAlreadyFree += res;
8352 8351 )
8353 8352 NOT_PRODUCT(_last_fc = fc;)
8354 8353 } else if (!_bitMap->isMarked(addr)) {
8355 8354 // Chunk is fresh garbage
8356 8355 res = do_garbage_chunk(fc);
8357 8356 debug_only(_sp->verifyFreeLists());
8358 8357 NOT_PRODUCT(
8359 8358 _numObjectsFreed++;
8360 8359 _numWordsFreed += res;
8361 8360 )
8362 8361 } else {
8363 8362 // Chunk that is alive.
8364 8363 res = do_live_chunk(fc);
8365 8364 debug_only(_sp->verifyFreeLists());
8366 8365 NOT_PRODUCT(
8367 8366 _numObjectsLive++;
8368 8367 _numWordsLive += res;
8369 8368 )
8370 8369 }
8371 8370 return res;
8372 8371 }
8373 8372
8374 8373 // For the smart allocation, record following
8375 8374 // split deaths - a free chunk is removed from its free list because
8376 8375 // it is being split into two or more chunks.
8377 8376 // split birth - a free chunk is being added to its free list because
8378 8377 // a larger free chunk has been split and resulted in this free chunk.
8379 8378 // coal death - a free chunk is being removed from its free list because
8380 8379 // it is being coalesced into a large free chunk.
8381 8380 // coal birth - a free chunk is being added to its free list because
8382 8381 // it was created when two or more free chunks where coalesced into
8383 8382 // this free chunk.
8384 8383 //
8385 8384 // These statistics are used to determine the desired number of free
8386 8385 // chunks of a given size. The desired number is chosen to be relative
8387 8386 // to the end of a CMS sweep. The desired number at the end of a sweep
8388 8387 // is the
8389 8388 // count-at-end-of-previous-sweep (an amount that was enough)
8390 8389 // - count-at-beginning-of-current-sweep (the excess)
8391 8390 // + split-births (gains in this size during interval)
8392 8391 // - split-deaths (demands on this size during interval)
8393 8392 // where the interval is from the end of one sweep to the end of the
8394 8393 // next.
8395 8394 //
8396 8395 // When sweeping the sweeper maintains an accumulated chunk which is
8397 8396 // the chunk that is made up of chunks that have been coalesced. That
8398 8397 // will be termed the left-hand chunk. A new chunk of garbage that
8399 8398 // is being considered for coalescing will be referred to as the
8400 8399 // right-hand chunk.
8401 8400 //
8402 8401 // When making a decision on whether to coalesce a right-hand chunk with
8403 8402 // the current left-hand chunk, the current count vs. the desired count
8404 8403 // of the left-hand chunk is considered. Also if the right-hand chunk
8405 8404 // is near the large chunk at the end of the heap (see
8406 8405 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
8407 8406 // left-hand chunk is coalesced.
8408 8407 //
8409 8408 // When making a decision about whether to split a chunk, the desired count
8410 8409 // vs. the current count of the candidate to be split is also considered.
8411 8410 // If the candidate is underpopulated (currently fewer chunks than desired)
8412 8411 // a chunk of an overpopulated (currently more chunks than desired) size may
8413 8412 // be chosen. The "hint" associated with a free list, if non-null, points
8414 8413 // to a free list which may be overpopulated.
8415 8414 //
8416 8415
8417 8416 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
8418 8417 const size_t size = fc->size();
8419 8418 // Chunks that cannot be coalesced are not in the
8420 8419 // free lists.
8421 8420 if (CMSTestInFreeList && !fc->cantCoalesce()) {
8422 8421 assert(_sp->verify_chunk_in_free_list(fc),
8423 8422 "free chunk should be in free lists");
8424 8423 }
8425 8424 // a chunk that is already free, should not have been
8426 8425 // marked in the bit map
8427 8426 HeapWord* const addr = (HeapWord*) fc;
8428 8427 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
8429 8428 // Verify that the bit map has no bits marked between
8430 8429 // addr and purported end of this block.
8431 8430 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8432 8431
8433 8432 // Some chunks cannot be coalesced under any circumstances.
8434 8433 // See the definition of cantCoalesce().
8435 8434 if (!fc->cantCoalesce()) {
8436 8435 // This chunk can potentially be coalesced.
8437 8436 if (_sp->adaptive_freelists()) {
8438 8437 // All the work is done in
8439 8438 do_post_free_or_garbage_chunk(fc, size);
8440 8439 } else { // Not adaptive free lists
8441 8440 // this is a free chunk that can potentially be coalesced by the sweeper;
8442 8441 if (!inFreeRange()) {
8443 8442 // if the next chunk is a free block that can't be coalesced
8444 8443 // it doesn't make sense to remove this chunk from the free lists
8445 8444 FreeChunk* nextChunk = (FreeChunk*)(addr + size);
8446 8445 assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
8447 8446 if ((HeapWord*)nextChunk < _sp->end() && // There is another free chunk to the right ...
8448 8447 nextChunk->is_free() && // ... which is free...
8449 8448 nextChunk->cantCoalesce()) { // ... but can't be coalesced
8450 8449 // nothing to do
8451 8450 } else {
8452 8451 // Potentially the start of a new free range:
8453 8452 // Don't eagerly remove it from the free lists.
8454 8453 // No need to remove it if it will just be put
8455 8454 // back again. (Also from a pragmatic point of view
8456 8455 // if it is a free block in a region that is beyond
8457 8456 // any allocated blocks, an assertion will fail)
8458 8457 // Remember the start of a free run.
8459 8458 initialize_free_range(addr, true);
8460 8459 // end - can coalesce with next chunk
8461 8460 }
8462 8461 } else {
8463 8462 // the midst of a free range, we are coalescing
8464 8463 print_free_block_coalesced(fc);
8465 8464 if (CMSTraceSweeper) {
8466 8465 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size);
8467 8466 }
8468 8467 // remove it from the free lists
8469 8468 _sp->removeFreeChunkFromFreeLists(fc);
8470 8469 set_lastFreeRangeCoalesced(true);
8471 8470 // If the chunk is being coalesced and the current free range is
8472 8471 // in the free lists, remove the current free range so that it
8473 8472 // will be returned to the free lists in its entirety - all
8474 8473 // the coalesced pieces included.
8475 8474 if (freeRangeInFreeLists()) {
8476 8475 FreeChunk* ffc = (FreeChunk*) freeFinger();
8477 8476 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8478 8477 "Size of free range is inconsistent with chunk size.");
8479 8478 if (CMSTestInFreeList) {
8480 8479 assert(_sp->verify_chunk_in_free_list(ffc),
8481 8480 "free range is not in free lists");
8482 8481 }
8483 8482 _sp->removeFreeChunkFromFreeLists(ffc);
8484 8483 set_freeRangeInFreeLists(false);
8485 8484 }
8486 8485 }
8487 8486 }
8488 8487 // Note that if the chunk is not coalescable (the else arm
8489 8488 // below), we unconditionally flush, without needing to do
8490 8489 // a "lookahead," as we do below.
8491 8490 if (inFreeRange()) lookahead_and_flush(fc, size);
8492 8491 } else {
8493 8492 // Code path common to both original and adaptive free lists.
8494 8493
8495 8494 // cant coalesce with previous block; this should be treated
8496 8495 // as the end of a free run if any
8497 8496 if (inFreeRange()) {
8498 8497 // we kicked some butt; time to pick up the garbage
8499 8498 assert(freeFinger() < addr, "freeFinger points too high");
8500 8499 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8501 8500 }
8502 8501 // else, nothing to do, just continue
8503 8502 }
8504 8503 }
8505 8504
8506 8505 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
8507 8506 // This is a chunk of garbage. It is not in any free list.
8508 8507 // Add it to a free list or let it possibly be coalesced into
8509 8508 // a larger chunk.
8510 8509 HeapWord* const addr = (HeapWord*) fc;
8511 8510 const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8512 8511
8513 8512 if (_sp->adaptive_freelists()) {
8514 8513 // Verify that the bit map has no bits marked between
8515 8514 // addr and purported end of just dead object.
8516 8515 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8517 8516
8518 8517 do_post_free_or_garbage_chunk(fc, size);
8519 8518 } else {
8520 8519 if (!inFreeRange()) {
8521 8520 // start of a new free range
8522 8521 assert(size > 0, "A free range should have a size");
8523 8522 initialize_free_range(addr, false);
8524 8523 } else {
8525 8524 // this will be swept up when we hit the end of the
8526 8525 // free range
8527 8526 if (CMSTraceSweeper) {
8528 8527 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size);
8529 8528 }
8530 8529 // If the chunk is being coalesced and the current free range is
8531 8530 // in the free lists, remove the current free range so that it
8532 8531 // will be returned to the free lists in its entirety - all
8533 8532 // the coalesced pieces included.
8534 8533 if (freeRangeInFreeLists()) {
8535 8534 FreeChunk* ffc = (FreeChunk*)freeFinger();
8536 8535 assert(ffc->size() == pointer_delta(addr, freeFinger()),
8537 8536 "Size of free range is inconsistent with chunk size.");
8538 8537 if (CMSTestInFreeList) {
8539 8538 assert(_sp->verify_chunk_in_free_list(ffc),
8540 8539 "free range is not in free lists");
8541 8540 }
8542 8541 _sp->removeFreeChunkFromFreeLists(ffc);
8543 8542 set_freeRangeInFreeLists(false);
8544 8543 }
8545 8544 set_lastFreeRangeCoalesced(true);
8546 8545 }
8547 8546 // this will be swept up when we hit the end of the free range
8548 8547
8549 8548 // Verify that the bit map has no bits marked between
8550 8549 // addr and purported end of just dead object.
8551 8550 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8552 8551 }
8553 8552 assert(_limit >= addr + size,
8554 8553 "A freshly garbage chunk can't possibly straddle over _limit");
8555 8554 if (inFreeRange()) lookahead_and_flush(fc, size);
8556 8555 return size;
8557 8556 }
8558 8557
8559 8558 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
8560 8559 HeapWord* addr = (HeapWord*) fc;
8561 8560 // The sweeper has just found a live object. Return any accumulated
8562 8561 // left hand chunk to the free lists.
8563 8562 if (inFreeRange()) {
8564 8563 assert(freeFinger() < addr, "freeFinger points too high");
8565 8564 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8566 8565 }
8567 8566
8568 8567 // This object is live: we'd normally expect this to be
8569 8568 // an oop, and like to assert the following:
8570 8569 // assert(oop(addr)->is_oop(), "live block should be an oop");
8571 8570 // However, as we commented above, this may be an object whose
8572 8571 // header hasn't yet been initialized.
8573 8572 size_t size;
8574 8573 assert(_bitMap->isMarked(addr), "Tautology for this control point");
8575 8574 if (_bitMap->isMarked(addr + 1)) {
8576 8575 // Determine the size from the bit map, rather than trying to
8577 8576 // compute it from the object header.
8578 8577 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8579 8578 size = pointer_delta(nextOneAddr + 1, addr);
8580 8579 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8581 8580 "alignment problem");
8582 8581
8583 8582 #ifdef ASSERT
8584 8583 if (oop(addr)->klass_or_null() != NULL) {
8585 8584 // Ignore mark word because we are running concurrent with mutators
8586 8585 assert(oop(addr)->is_oop(true), "live block should be an oop");
8587 8586 assert(size ==
8588 8587 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8589 8588 "P-mark and computed size do not agree");
8590 8589 }
8591 8590 #endif
8592 8591
8593 8592 } else {
8594 8593 // This should be an initialized object that's alive.
8595 8594 assert(oop(addr)->klass_or_null() != NULL,
8596 8595 "Should be an initialized object");
8597 8596 // Ignore mark word because we are running concurrent with mutators
8598 8597 assert(oop(addr)->is_oop(true), "live block should be an oop");
8599 8598 // Verify that the bit map has no bits marked between
8600 8599 // addr and purported end of this block.
8601 8600 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8602 8601 assert(size >= 3, "Necessary for Printezis marks to work");
8603 8602 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8604 8603 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8605 8604 }
8606 8605 return size;
8607 8606 }
8608 8607
8609 8608 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
8610 8609 size_t chunkSize) {
8611 8610 // do_post_free_or_garbage_chunk() should only be called in the case
8612 8611 // of the adaptive free list allocator.
8613 8612 const bool fcInFreeLists = fc->is_free();
8614 8613 assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8615 8614 assert((HeapWord*)fc <= _limit, "sweep invariant");
8616 8615 if (CMSTestInFreeList && fcInFreeLists) {
8617 8616 assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
8618 8617 }
8619 8618
8620 8619 if (CMSTraceSweeper) {
8621 8620 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8622 8621 }
8623 8622
8624 8623 HeapWord* const fc_addr = (HeapWord*) fc;
8625 8624
8626 8625 bool coalesce;
8627 8626 const size_t left = pointer_delta(fc_addr, freeFinger());
8628 8627 const size_t right = chunkSize;
8629 8628 switch (FLSCoalescePolicy) {
8630 8629 // numeric value forms a coalition aggressiveness metric
8631 8630 case 0: { // never coalesce
8632 8631 coalesce = false;
8633 8632 break;
8634 8633 }
8635 8634 case 1: { // coalesce if left & right chunks on overpopulated lists
8636 8635 coalesce = _sp->coalOverPopulated(left) &&
8637 8636 _sp->coalOverPopulated(right);
8638 8637 break;
8639 8638 }
8640 8639 case 2: { // coalesce if left chunk on overpopulated list (default)
8641 8640 coalesce = _sp->coalOverPopulated(left);
8642 8641 break;
8643 8642 }
8644 8643 case 3: { // coalesce if left OR right chunk on overpopulated list
8645 8644 coalesce = _sp->coalOverPopulated(left) ||
8646 8645 _sp->coalOverPopulated(right);
8647 8646 break;
8648 8647 }
8649 8648 case 4: { // always coalesce
8650 8649 coalesce = true;
8651 8650 break;
8652 8651 }
8653 8652 default:
8654 8653 ShouldNotReachHere();
8655 8654 }
8656 8655
8657 8656 // Should the current free range be coalesced?
8658 8657 // If the chunk is in a free range and either we decided to coalesce above
8659 8658 // or the chunk is near the large block at the end of the heap
8660 8659 // (isNearLargestChunk() returns true), then coalesce this chunk.
8661 8660 const bool doCoalesce = inFreeRange()
8662 8661 && (coalesce || _g->isNearLargestChunk(fc_addr));
8663 8662 if (doCoalesce) {
8664 8663 // Coalesce the current free range on the left with the new
8665 8664 // chunk on the right. If either is on a free list,
8666 8665 // it must be removed from the list and stashed in the closure.
8667 8666 if (freeRangeInFreeLists()) {
8668 8667 FreeChunk* const ffc = (FreeChunk*)freeFinger();
8669 8668 assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
8670 8669 "Size of free range is inconsistent with chunk size.");
8671 8670 if (CMSTestInFreeList) {
8672 8671 assert(_sp->verify_chunk_in_free_list(ffc),
8673 8672 "Chunk is not in free lists");
8674 8673 }
8675 8674 _sp->coalDeath(ffc->size());
8676 8675 _sp->removeFreeChunkFromFreeLists(ffc);
8677 8676 set_freeRangeInFreeLists(false);
8678 8677 }
8679 8678 if (fcInFreeLists) {
8680 8679 _sp->coalDeath(chunkSize);
8681 8680 assert(fc->size() == chunkSize,
8682 8681 "The chunk has the wrong size or is not in the free lists");
8683 8682 _sp->removeFreeChunkFromFreeLists(fc);
8684 8683 }
8685 8684 set_lastFreeRangeCoalesced(true);
8686 8685 print_free_block_coalesced(fc);
8687 8686 } else { // not in a free range and/or should not coalesce
8688 8687 // Return the current free range and start a new one.
8689 8688 if (inFreeRange()) {
8690 8689 // In a free range but cannot coalesce with the right hand chunk.
8691 8690 // Put the current free range into the free lists.
8692 8691 flush_cur_free_chunk(freeFinger(),
8693 8692 pointer_delta(fc_addr, freeFinger()));
8694 8693 }
8695 8694 // Set up for new free range. Pass along whether the right hand
8696 8695 // chunk is in the free lists.
8697 8696 initialize_free_range((HeapWord*)fc, fcInFreeLists);
8698 8697 }
8699 8698 }
8700 8699
8701 8700 // Lookahead flush:
8702 8701 // If we are tracking a free range, and this is the last chunk that
8703 8702 // we'll look at because its end crosses past _limit, we'll preemptively
8704 8703 // flush it along with any free range we may be holding on to. Note that
8705 8704 // this can be the case only for an already free or freshly garbage
8706 8705 // chunk. If this block is an object, it can never straddle
8707 8706 // over _limit. The "straddling" occurs when _limit is set at
8708 8707 // the previous end of the space when this cycle started, and
8709 8708 // a subsequent heap expansion caused the previously co-terminal
8710 8709 // free block to be coalesced with the newly expanded portion,
8711 8710 // thus rendering _limit a non-block-boundary making it dangerous
8712 8711 // for the sweeper to step over and examine.
8713 8712 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
8714 8713 assert(inFreeRange(), "Should only be called if currently in a free range.");
8715 8714 HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
8716 8715 assert(_sp->used_region().contains(eob - 1),
8717 8716 err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
8718 8717 " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
8719 8718 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
8720 8719 eob, eob-1, _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
8721 8720 if (eob >= _limit) {
8722 8721 assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
8723 8722 if (CMSTraceSweeper) {
8724 8723 gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
8725 8724 "[" PTR_FORMAT "," PTR_FORMAT ") in space "
8726 8725 "[" PTR_FORMAT "," PTR_FORMAT ")",
8727 8726 _limit, fc, eob, _sp->bottom(), _sp->end());
8728 8727 }
8729 8728 // Return the storage we are tracking back into the free lists.
8730 8729 if (CMSTraceSweeper) {
8731 8730 gclog_or_tty->print_cr("Flushing ... ");
8732 8731 }
8733 8732 assert(freeFinger() < eob, "Error");
8734 8733 flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
8735 8734 }
8736 8735 }
8737 8736
8738 8737 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
8739 8738 assert(inFreeRange(), "Should only be called if currently in a free range.");
8740 8739 assert(size > 0,
8741 8740 "A zero sized chunk cannot be added to the free lists.");
8742 8741 if (!freeRangeInFreeLists()) {
8743 8742 if (CMSTestInFreeList) {
8744 8743 FreeChunk* fc = (FreeChunk*) chunk;
8745 8744 fc->set_size(size);
8746 8745 assert(!_sp->verify_chunk_in_free_list(fc),
8747 8746 "chunk should not be in free lists yet");
8748 8747 }
8749 8748 if (CMSTraceSweeper) {
8750 8749 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8751 8750 chunk, size);
8752 8751 }
8753 8752 // A new free range is going to be starting. The current
8754 8753 // free range has not been added to the free lists yet or
8755 8754 // was removed so add it back.
8756 8755 // If the current free range was coalesced, then the death
8757 8756 // of the free range was recorded. Record a birth now.
8758 8757 if (lastFreeRangeCoalesced()) {
8759 8758 _sp->coalBirth(size);
8760 8759 }
8761 8760 _sp->addChunkAndRepairOffsetTable(chunk, size,
8762 8761 lastFreeRangeCoalesced());
8763 8762 } else if (CMSTraceSweeper) {
8764 8763 gclog_or_tty->print_cr("Already in free list: nothing to flush");
8765 8764 }
8766 8765 set_inFreeRange(false);
8767 8766 set_freeRangeInFreeLists(false);
8768 8767 }
8769 8768
8770 8769 // We take a break if we've been at this for a while,
8771 8770 // so as to avoid monopolizing the locks involved.
8772 8771 void SweepClosure::do_yield_work(HeapWord* addr) {
8773 8772 // Return current free chunk being used for coalescing (if any)
8774 8773 // to the appropriate freelist. After yielding, the next
8775 8774 // free block encountered will start a coalescing range of
8776 8775 // free blocks. If the next free block is adjacent to the
8777 8776 // chunk just flushed, they will need to wait for the next
8778 8777 // sweep to be coalesced.
8779 8778 if (inFreeRange()) {
8780 8779 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8781 8780 }
8782 8781
8783 8782 // First give up the locks, then yield, then re-lock.
8784 8783 // We should probably use a constructor/destructor idiom to
8785 8784 // do this unlock/lock or modify the MutexUnlocker class to
8786 8785 // serve our purpose. XXX
8787 8786 assert_lock_strong(_bitMap->lock());
8788 8787 assert_lock_strong(_freelistLock);
8789 8788 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8790 8789 "CMS thread should hold CMS token");
8791 8790 _bitMap->lock()->unlock();
8792 8791 _freelistLock->unlock();
8793 8792 ConcurrentMarkSweepThread::desynchronize(true);
8794 8793 ConcurrentMarkSweepThread::acknowledge_yield_request();
8795 8794 _collector->stopTimer();
8796 8795 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8797 8796 if (PrintCMSStatistics != 0) {
8798 8797 _collector->incrementYields();
8799 8798 }
8800 8799 _collector->icms_wait();
8801 8800
8802 8801 // See the comment in coordinator_yield()
8803 8802 for (unsigned i = 0; i < CMSYieldSleepCount &&
8804 8803 ConcurrentMarkSweepThread::should_yield() &&
8805 8804 !CMSCollector::foregroundGCIsActive(); ++i) {
8806 8805 os::sleep(Thread::current(), 1, false);
8807 8806 ConcurrentMarkSweepThread::acknowledge_yield_request();
8808 8807 }
8809 8808
8810 8809 ConcurrentMarkSweepThread::synchronize(true);
8811 8810 _freelistLock->lock();
8812 8811 _bitMap->lock()->lock_without_safepoint_check();
8813 8812 _collector->startTimer();
8814 8813 }
8815 8814
8816 8815 #ifndef PRODUCT
8817 8816 // This is actually very useful in a product build if it can
8818 8817 // be called from the debugger. Compile it into the product
8819 8818 // as needed.
8820 8819 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8821 8820 return debug_cms_space->verify_chunk_in_free_list(fc);
8822 8821 }
8823 8822 #endif
8824 8823
8825 8824 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8826 8825 if (CMSTraceSweeper) {
8827 8826 gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
8828 8827 fc, fc->size());
8829 8828 }
8830 8829 }
8831 8830
8832 8831 // CMSIsAliveClosure
8833 8832 bool CMSIsAliveClosure::do_object_b(oop obj) {
8834 8833 HeapWord* addr = (HeapWord*)obj;
8835 8834 return addr != NULL &&
8836 8835 (!_span.contains(addr) || _bit_map->isMarked(addr));
8837 8836 }
8838 8837
8839 8838
8840 8839 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8841 8840 MemRegion span,
8842 8841 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8843 8842 bool cpc):
8844 8843 _collector(collector),
8845 8844 _span(span),
8846 8845 _bit_map(bit_map),
8847 8846 _mark_stack(mark_stack),
8848 8847 _concurrent_precleaning(cpc) {
8849 8848 assert(!_span.is_empty(), "Empty span could spell trouble");
8850 8849 }
8851 8850
8852 8851
8853 8852 // CMSKeepAliveClosure: the serial version
8854 8853 void CMSKeepAliveClosure::do_oop(oop obj) {
8855 8854 HeapWord* addr = (HeapWord*)obj;
8856 8855 if (_span.contains(addr) &&
8857 8856 !_bit_map->isMarked(addr)) {
8858 8857 _bit_map->mark(addr);
8859 8858 bool simulate_overflow = false;
8860 8859 NOT_PRODUCT(
8861 8860 if (CMSMarkStackOverflowALot &&
8862 8861 _collector->simulate_overflow()) {
8863 8862 // simulate a stack overflow
8864 8863 simulate_overflow = true;
8865 8864 }
8866 8865 )
8867 8866 if (simulate_overflow || !_mark_stack->push(obj)) {
8868 8867 if (_concurrent_precleaning) {
8869 8868 // We dirty the overflown object and let the remark
8870 8869 // phase deal with it.
8871 8870 assert(_collector->overflow_list_is_empty(), "Error");
8872 8871 // In the case of object arrays, we need to dirty all of
8873 8872 // the cards that the object spans. No locking or atomics
8874 8873 // are needed since no one else can be mutating the mod union
8875 8874 // table.
8876 8875 if (obj->is_objArray()) {
8877 8876 size_t sz = obj->size();
8878 8877 HeapWord* end_card_addr =
8879 8878 (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8880 8879 MemRegion redirty_range = MemRegion(addr, end_card_addr);
8881 8880 assert(!redirty_range.is_empty(), "Arithmetical tautology");
8882 8881 _collector->_modUnionTable.mark_range(redirty_range);
8883 8882 } else {
8884 8883 _collector->_modUnionTable.mark(addr);
8885 8884 }
8886 8885 _collector->_ser_kac_preclean_ovflw++;
8887 8886 } else {
8888 8887 _collector->push_on_overflow_list(obj);
8889 8888 _collector->_ser_kac_ovflw++;
8890 8889 }
8891 8890 }
8892 8891 }
8893 8892 }
8894 8893
8895 8894 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8896 8895 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8897 8896
8898 8897 // CMSParKeepAliveClosure: a parallel version of the above.
8899 8898 // The work queues are private to each closure (thread),
8900 8899 // but (may be) available for stealing by other threads.
8901 8900 void CMSParKeepAliveClosure::do_oop(oop obj) {
8902 8901 HeapWord* addr = (HeapWord*)obj;
8903 8902 if (_span.contains(addr) &&
8904 8903 !_bit_map->isMarked(addr)) {
8905 8904 // In general, during recursive tracing, several threads
8906 8905 // may be concurrently getting here; the first one to
8907 8906 // "tag" it, claims it.
8908 8907 if (_bit_map->par_mark(addr)) {
8909 8908 bool res = _work_queue->push(obj);
8910 8909 assert(res, "Low water mark should be much less than capacity");
8911 8910 // Do a recursive trim in the hope that this will keep
8912 8911 // stack usage lower, but leave some oops for potential stealers
8913 8912 trim_queue(_low_water_mark);
8914 8913 } // Else, another thread got there first
8915 8914 }
8916 8915 }
8917 8916
8918 8917 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8919 8918 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8920 8919
8921 8920 void CMSParKeepAliveClosure::trim_queue(uint max) {
8922 8921 while (_work_queue->size() > max) {
8923 8922 oop new_oop;
8924 8923 if (_work_queue->pop_local(new_oop)) {
8925 8924 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8926 8925 assert(_bit_map->isMarked((HeapWord*)new_oop),
8927 8926 "no white objects on this stack!");
8928 8927 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8929 8928 // iterate over the oops in this oop, marking and pushing
8930 8929 // the ones in CMS heap (i.e. in _span).
8931 8930 new_oop->oop_iterate(&_mark_and_push);
8932 8931 }
8933 8932 }
8934 8933 }
8935 8934
8936 8935 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8937 8936 CMSCollector* collector,
8938 8937 MemRegion span, CMSBitMap* bit_map,
8939 8938 OopTaskQueue* work_queue):
8940 8939 _collector(collector),
8941 8940 _span(span),
8942 8941 _bit_map(bit_map),
8943 8942 _work_queue(work_queue) { }
8944 8943
8945 8944 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8946 8945 HeapWord* addr = (HeapWord*)obj;
8947 8946 if (_span.contains(addr) &&
8948 8947 !_bit_map->isMarked(addr)) {
8949 8948 if (_bit_map->par_mark(addr)) {
8950 8949 bool simulate_overflow = false;
8951 8950 NOT_PRODUCT(
8952 8951 if (CMSMarkStackOverflowALot &&
8953 8952 _collector->par_simulate_overflow()) {
8954 8953 // simulate a stack overflow
8955 8954 simulate_overflow = true;
8956 8955 }
8957 8956 )
8958 8957 if (simulate_overflow || !_work_queue->push(obj)) {
8959 8958 _collector->par_push_on_overflow_list(obj);
8960 8959 _collector->_par_kac_ovflw++;
8961 8960 }
8962 8961 } // Else another thread got there already
8963 8962 }
8964 8963 }
8965 8964
8966 8965 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8967 8966 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8968 8967
8969 8968 //////////////////////////////////////////////////////////////////
8970 8969 // CMSExpansionCause /////////////////////////////
8971 8970 //////////////////////////////////////////////////////////////////
8972 8971 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8973 8972 switch (cause) {
8974 8973 case _no_expansion:
8975 8974 return "No expansion";
8976 8975 case _satisfy_free_ratio:
8977 8976 return "Free ratio";
8978 8977 case _satisfy_promotion:
8979 8978 return "Satisfy promotion";
8980 8979 case _satisfy_allocation:
8981 8980 return "allocation";
8982 8981 case _allocate_par_lab:
8983 8982 return "Par LAB";
8984 8983 case _allocate_par_spooling_space:
8985 8984 return "Par Spooling Space";
8986 8985 case _adaptive_size_policy:
8987 8986 return "Ergonomics";
8988 8987 default:
8989 8988 return "unknown";
8990 8989 }
8991 8990 }
8992 8991
8993 8992 void CMSDrainMarkingStackClosure::do_void() {
8994 8993 // the max number to take from overflow list at a time
8995 8994 const size_t num = _mark_stack->capacity()/4;
8996 8995 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8997 8996 "Overflow list should be NULL during concurrent phases");
8998 8997 while (!_mark_stack->isEmpty() ||
8999 8998 // if stack is empty, check the overflow list
9000 8999 _collector->take_from_overflow_list(num, _mark_stack)) {
9001 9000 oop obj = _mark_stack->pop();
9002 9001 HeapWord* addr = (HeapWord*)obj;
9003 9002 assert(_span.contains(addr), "Should be within span");
9004 9003 assert(_bit_map->isMarked(addr), "Should be marked");
9005 9004 assert(obj->is_oop(), "Should be an oop");
9006 9005 obj->oop_iterate(_keep_alive);
9007 9006 }
9008 9007 }
9009 9008
9010 9009 void CMSParDrainMarkingStackClosure::do_void() {
9011 9010 // drain queue
9012 9011 trim_queue(0);
9013 9012 }
9014 9013
9015 9014 // Trim our work_queue so its length is below max at return
9016 9015 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
9017 9016 while (_work_queue->size() > max) {
9018 9017 oop new_oop;
9019 9018 if (_work_queue->pop_local(new_oop)) {
9020 9019 assert(new_oop->is_oop(), "Expected an oop");
9021 9020 assert(_bit_map->isMarked((HeapWord*)new_oop),
9022 9021 "no white objects on this stack!");
9023 9022 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
9024 9023 // iterate over the oops in this oop, marking and pushing
9025 9024 // the ones in CMS heap (i.e. in _span).
9026 9025 new_oop->oop_iterate(&_mark_and_push);
9027 9026 }
9028 9027 }
9029 9028 }
9030 9029
9031 9030 ////////////////////////////////////////////////////////////////////
9032 9031 // Support for Marking Stack Overflow list handling and related code
9033 9032 ////////////////////////////////////////////////////////////////////
9034 9033 // Much of the following code is similar in shape and spirit to the
9035 9034 // code used in ParNewGC. We should try and share that code
9036 9035 // as much as possible in the future.
9037 9036
9038 9037 #ifndef PRODUCT
9039 9038 // Debugging support for CMSStackOverflowALot
9040 9039
9041 9040 // It's OK to call this multi-threaded; the worst thing
9042 9041 // that can happen is that we'll get a bunch of closely
9043 9042 // spaced simulated oveflows, but that's OK, in fact
9044 9043 // probably good as it would exercise the overflow code
9045 9044 // under contention.
9046 9045 bool CMSCollector::simulate_overflow() {
9047 9046 if (_overflow_counter-- <= 0) { // just being defensive
9048 9047 _overflow_counter = CMSMarkStackOverflowInterval;
9049 9048 return true;
9050 9049 } else {
9051 9050 return false;
9052 9051 }
9053 9052 }
9054 9053
9055 9054 bool CMSCollector::par_simulate_overflow() {
9056 9055 return simulate_overflow();
9057 9056 }
9058 9057 #endif
9059 9058
9060 9059 // Single-threaded
9061 9060 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
9062 9061 assert(stack->isEmpty(), "Expected precondition");
9063 9062 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
9064 9063 size_t i = num;
9065 9064 oop cur = _overflow_list;
9066 9065 const markOop proto = markOopDesc::prototype();
9067 9066 NOT_PRODUCT(ssize_t n = 0;)
9068 9067 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
9069 9068 next = oop(cur->mark());
9070 9069 cur->set_mark(proto); // until proven otherwise
9071 9070 assert(cur->is_oop(), "Should be an oop");
9072 9071 bool res = stack->push(cur);
9073 9072 assert(res, "Bit off more than can chew?");
9074 9073 NOT_PRODUCT(n++;)
9075 9074 }
9076 9075 _overflow_list = cur;
9077 9076 #ifndef PRODUCT
9078 9077 assert(_num_par_pushes >= n, "Too many pops?");
9079 9078 _num_par_pushes -=n;
9080 9079 #endif
9081 9080 return !stack->isEmpty();
9082 9081 }
9083 9082
9084 9083 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
9085 9084 // (MT-safe) Get a prefix of at most "num" from the list.
9086 9085 // The overflow list is chained through the mark word of
9087 9086 // each object in the list. We fetch the entire list,
9088 9087 // break off a prefix of the right size and return the
9089 9088 // remainder. If other threads try to take objects from
9090 9089 // the overflow list at that time, they will wait for
9091 9090 // some time to see if data becomes available. If (and
9092 9091 // only if) another thread places one or more object(s)
9093 9092 // on the global list before we have returned the suffix
9094 9093 // to the global list, we will walk down our local list
9095 9094 // to find its end and append the global list to
9096 9095 // our suffix before returning it. This suffix walk can
9097 9096 // prove to be expensive (quadratic in the amount of traffic)
9098 9097 // when there are many objects in the overflow list and
9099 9098 // there is much producer-consumer contention on the list.
9100 9099 // *NOTE*: The overflow list manipulation code here and
9101 9100 // in ParNewGeneration:: are very similar in shape,
9102 9101 // except that in the ParNew case we use the old (from/eden)
9103 9102 // copy of the object to thread the list via its klass word.
9104 9103 // Because of the common code, if you make any changes in
9105 9104 // the code below, please check the ParNew version to see if
9106 9105 // similar changes might be needed.
9107 9106 // CR 6797058 has been filed to consolidate the common code.
9108 9107 bool CMSCollector::par_take_from_overflow_list(size_t num,
9109 9108 OopTaskQueue* work_q,
9110 9109 int no_of_gc_threads) {
9111 9110 assert(work_q->size() == 0, "First empty local work queue");
9112 9111 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
9113 9112 if (_overflow_list == NULL) {
9114 9113 return false;
9115 9114 }
9116 9115 // Grab the entire list; we'll put back a suffix
9117 9116 oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
9118 9117 Thread* tid = Thread::current();
9119 9118 // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
9120 9119 // set to ParallelGCThreads.
9121 9120 size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
9122 9121 size_t sleep_time_millis = MAX2((size_t)1, num/100);
9123 9122 // If the list is busy, we spin for a short while,
9124 9123 // sleeping between attempts to get the list.
9125 9124 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
9126 9125 os::sleep(tid, sleep_time_millis, false);
9127 9126 if (_overflow_list == NULL) {
9128 9127 // Nothing left to take
9129 9128 return false;
9130 9129 } else if (_overflow_list != BUSY) {
9131 9130 // Try and grab the prefix
9132 9131 prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
9133 9132 }
9134 9133 }
9135 9134 // If the list was found to be empty, or we spun long
9136 9135 // enough, we give up and return empty-handed. If we leave
9137 9136 // the list in the BUSY state below, it must be the case that
9138 9137 // some other thread holds the overflow list and will set it
9139 9138 // to a non-BUSY state in the future.
9140 9139 if (prefix == NULL || prefix == BUSY) {
9141 9140 // Nothing to take or waited long enough
9142 9141 if (prefix == NULL) {
9143 9142 // Write back the NULL in case we overwrote it with BUSY above
9144 9143 // and it is still the same value.
9145 9144 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9146 9145 }
9147 9146 return false;
9148 9147 }
9149 9148 assert(prefix != NULL && prefix != BUSY, "Error");
9150 9149 size_t i = num;
9151 9150 oop cur = prefix;
9152 9151 // Walk down the first "num" objects, unless we reach the end.
9153 9152 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
9154 9153 if (cur->mark() == NULL) {
9155 9154 // We have "num" or fewer elements in the list, so there
9156 9155 // is nothing to return to the global list.
9157 9156 // Write back the NULL in lieu of the BUSY we wrote
9158 9157 // above, if it is still the same value.
9159 9158 if (_overflow_list == BUSY) {
9160 9159 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
9161 9160 }
9162 9161 } else {
9163 9162 // Chop off the suffix and rerturn it to the global list.
9164 9163 assert(cur->mark() != BUSY, "Error");
9165 9164 oop suffix_head = cur->mark(); // suffix will be put back on global list
9166 9165 cur->set_mark(NULL); // break off suffix
9167 9166 // It's possible that the list is still in the empty(busy) state
9168 9167 // we left it in a short while ago; in that case we may be
9169 9168 // able to place back the suffix without incurring the cost
9170 9169 // of a walk down the list.
9171 9170 oop observed_overflow_list = _overflow_list;
9172 9171 oop cur_overflow_list = observed_overflow_list;
9173 9172 bool attached = false;
9174 9173 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
9175 9174 observed_overflow_list =
9176 9175 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9177 9176 if (cur_overflow_list == observed_overflow_list) {
9178 9177 attached = true;
9179 9178 break;
9180 9179 } else cur_overflow_list = observed_overflow_list;
9181 9180 }
9182 9181 if (!attached) {
9183 9182 // Too bad, someone else sneaked in (at least) an element; we'll need
9184 9183 // to do a splice. Find tail of suffix so we can prepend suffix to global
9185 9184 // list.
9186 9185 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
9187 9186 oop suffix_tail = cur;
9188 9187 assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
9189 9188 "Tautology");
9190 9189 observed_overflow_list = _overflow_list;
9191 9190 do {
9192 9191 cur_overflow_list = observed_overflow_list;
9193 9192 if (cur_overflow_list != BUSY) {
9194 9193 // Do the splice ...
9195 9194 suffix_tail->set_mark(markOop(cur_overflow_list));
9196 9195 } else { // cur_overflow_list == BUSY
9197 9196 suffix_tail->set_mark(NULL);
9198 9197 }
9199 9198 // ... and try to place spliced list back on overflow_list ...
9200 9199 observed_overflow_list =
9201 9200 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
9202 9201 } while (cur_overflow_list != observed_overflow_list);
9203 9202 // ... until we have succeeded in doing so.
9204 9203 }
9205 9204 }
9206 9205
9207 9206 // Push the prefix elements on work_q
9208 9207 assert(prefix != NULL, "control point invariant");
9209 9208 const markOop proto = markOopDesc::prototype();
9210 9209 oop next;
9211 9210 NOT_PRODUCT(ssize_t n = 0;)
9212 9211 for (cur = prefix; cur != NULL; cur = next) {
9213 9212 next = oop(cur->mark());
9214 9213 cur->set_mark(proto); // until proven otherwise
9215 9214 assert(cur->is_oop(), "Should be an oop");
9216 9215 bool res = work_q->push(cur);
9217 9216 assert(res, "Bit off more than we can chew?");
9218 9217 NOT_PRODUCT(n++;)
9219 9218 }
9220 9219 #ifndef PRODUCT
9221 9220 assert(_num_par_pushes >= n, "Too many pops?");
9222 9221 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
9223 9222 #endif
9224 9223 return true;
9225 9224 }
9226 9225
9227 9226 // Single-threaded
9228 9227 void CMSCollector::push_on_overflow_list(oop p) {
9229 9228 NOT_PRODUCT(_num_par_pushes++;)
9230 9229 assert(p->is_oop(), "Not an oop");
9231 9230 preserve_mark_if_necessary(p);
9232 9231 p->set_mark((markOop)_overflow_list);
9233 9232 _overflow_list = p;
9234 9233 }
9235 9234
9236 9235 // Multi-threaded; use CAS to prepend to overflow list
9237 9236 void CMSCollector::par_push_on_overflow_list(oop p) {
9238 9237 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
9239 9238 assert(p->is_oop(), "Not an oop");
9240 9239 par_preserve_mark_if_necessary(p);
9241 9240 oop observed_overflow_list = _overflow_list;
9242 9241 oop cur_overflow_list;
9243 9242 do {
9244 9243 cur_overflow_list = observed_overflow_list;
9245 9244 if (cur_overflow_list != BUSY) {
9246 9245 p->set_mark(markOop(cur_overflow_list));
9247 9246 } else {
9248 9247 p->set_mark(NULL);
9249 9248 }
9250 9249 observed_overflow_list =
9251 9250 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
9252 9251 } while (cur_overflow_list != observed_overflow_list);
9253 9252 }
9254 9253 #undef BUSY
9255 9254
9256 9255 // Single threaded
9257 9256 // General Note on GrowableArray: pushes may silently fail
9258 9257 // because we are (temporarily) out of C-heap for expanding
9259 9258 // the stack. The problem is quite ubiquitous and affects
9260 9259 // a lot of code in the JVM. The prudent thing for GrowableArray
9261 9260 // to do (for now) is to exit with an error. However, that may
9262 9261 // be too draconian in some cases because the caller may be
9263 9262 // able to recover without much harm. For such cases, we
9264 9263 // should probably introduce a "soft_push" method which returns
9265 9264 // an indication of success or failure with the assumption that
9266 9265 // the caller may be able to recover from a failure; code in
9267 9266 // the VM can then be changed, incrementally, to deal with such
9268 9267 // failures where possible, thus, incrementally hardening the VM
9269 9268 // in such low resource situations.
9270 9269 void CMSCollector::preserve_mark_work(oop p, markOop m) {
9271 9270 _preserved_oop_stack.push(p);
9272 9271 _preserved_mark_stack.push(m);
9273 9272 assert(m == p->mark(), "Mark word changed");
9274 9273 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9275 9274 "bijection");
9276 9275 }
9277 9276
9278 9277 // Single threaded
9279 9278 void CMSCollector::preserve_mark_if_necessary(oop p) {
9280 9279 markOop m = p->mark();
9281 9280 if (m->must_be_preserved(p)) {
9282 9281 preserve_mark_work(p, m);
9283 9282 }
9284 9283 }
9285 9284
9286 9285 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
9287 9286 markOop m = p->mark();
9288 9287 if (m->must_be_preserved(p)) {
9289 9288 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
9290 9289 // Even though we read the mark word without holding
9291 9290 // the lock, we are assured that it will not change
9292 9291 // because we "own" this oop, so no other thread can
9293 9292 // be trying to push it on the overflow list; see
9294 9293 // the assertion in preserve_mark_work() that checks
9295 9294 // that m == p->mark().
9296 9295 preserve_mark_work(p, m);
9297 9296 }
9298 9297 }
9299 9298
9300 9299 // We should be able to do this multi-threaded,
9301 9300 // a chunk of stack being a task (this is
9302 9301 // correct because each oop only ever appears
9303 9302 // once in the overflow list. However, it's
9304 9303 // not very easy to completely overlap this with
9305 9304 // other operations, so will generally not be done
9306 9305 // until all work's been completed. Because we
9307 9306 // expect the preserved oop stack (set) to be small,
9308 9307 // it's probably fine to do this single-threaded.
9309 9308 // We can explore cleverer concurrent/overlapped/parallel
9310 9309 // processing of preserved marks if we feel the
9311 9310 // need for this in the future. Stack overflow should
9312 9311 // be so rare in practice and, when it happens, its
9313 9312 // effect on performance so great that this will
9314 9313 // likely just be in the noise anyway.
9315 9314 void CMSCollector::restore_preserved_marks_if_any() {
9316 9315 assert(SafepointSynchronize::is_at_safepoint(),
9317 9316 "world should be stopped");
9318 9317 assert(Thread::current()->is_ConcurrentGC_thread() ||
9319 9318 Thread::current()->is_VM_thread(),
9320 9319 "should be single-threaded");
9321 9320 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
9322 9321 "bijection");
9323 9322
9324 9323 while (!_preserved_oop_stack.is_empty()) {
9325 9324 oop p = _preserved_oop_stack.pop();
9326 9325 assert(p->is_oop(), "Should be an oop");
9327 9326 assert(_span.contains(p), "oop should be in _span");
9328 9327 assert(p->mark() == markOopDesc::prototype(),
9329 9328 "Set when taken from overflow list");
9330 9329 markOop m = _preserved_mark_stack.pop();
9331 9330 p->set_mark(m);
9332 9331 }
9333 9332 assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
9334 9333 "stacks were cleared above");
9335 9334 }
9336 9335
9337 9336 #ifndef PRODUCT
9338 9337 bool CMSCollector::no_preserved_marks() const {
9339 9338 return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
9340 9339 }
9341 9340 #endif
9342 9341
9343 9342 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
9344 9343 {
9345 9344 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9346 9345 CMSAdaptiveSizePolicy* size_policy =
9347 9346 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
9348 9347 assert(size_policy->is_gc_cms_adaptive_size_policy(),
9349 9348 "Wrong type for size policy");
9350 9349 return size_policy;
9351 9350 }
9352 9351
9353 9352 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
9354 9353 size_t desired_promo_size) {
9355 9354 if (cur_promo_size < desired_promo_size) {
9356 9355 size_t expand_bytes = desired_promo_size - cur_promo_size;
9357 9356 if (PrintAdaptiveSizePolicy && Verbose) {
9358 9357 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9359 9358 "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
9360 9359 expand_bytes);
9361 9360 }
9362 9361 expand(expand_bytes,
9363 9362 MinHeapDeltaBytes,
9364 9363 CMSExpansionCause::_adaptive_size_policy);
9365 9364 } else if (desired_promo_size < cur_promo_size) {
9366 9365 size_t shrink_bytes = cur_promo_size - desired_promo_size;
9367 9366 if (PrintAdaptiveSizePolicy && Verbose) {
9368 9367 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9369 9368 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
9370 9369 shrink_bytes);
9371 9370 }
9372 9371 shrink(shrink_bytes);
9373 9372 }
9374 9373 }
9375 9374
9376 9375 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
9377 9376 GenCollectedHeap* gch = GenCollectedHeap::heap();
9378 9377 CMSGCAdaptivePolicyCounters* counters =
9379 9378 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
9380 9379 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
9381 9380 "Wrong kind of counters");
9382 9381 return counters;
9383 9382 }
9384 9383
9385 9384
9386 9385 void ASConcurrentMarkSweepGeneration::update_counters() {
9387 9386 if (UsePerfData) {
9388 9387 _space_counters->update_all();
9389 9388 _gen_counters->update_all();
9390 9389 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9391 9390 GenCollectedHeap* gch = GenCollectedHeap::heap();
9392 9391 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9393 9392 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9394 9393 "Wrong gc statistics type");
9395 9394 counters->update_counters(gc_stats_l);
9396 9395 }
9397 9396 }
9398 9397
9399 9398 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
9400 9399 if (UsePerfData) {
9401 9400 _space_counters->update_used(used);
9402 9401 _space_counters->update_capacity();
9403 9402 _gen_counters->update_all();
9404 9403
9405 9404 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9406 9405 GenCollectedHeap* gch = GenCollectedHeap::heap();
9407 9406 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9408 9407 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9409 9408 "Wrong gc statistics type");
9410 9409 counters->update_counters(gc_stats_l);
9411 9410 }
9412 9411 }
9413 9412
9414 9413 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9415 9414 assert_locked_or_safepoint(Heap_lock);
9416 9415 assert_lock_strong(freelistLock());
9417 9416 HeapWord* old_end = _cmsSpace->end();
9418 9417 HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9419 9418 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9420 9419 FreeChunk* chunk_at_end = find_chunk_at_end();
9421 9420 if (chunk_at_end == NULL) {
9422 9421 // No room to shrink
9423 9422 if (PrintGCDetails && Verbose) {
9424 9423 gclog_or_tty->print_cr("No room to shrink: old_end "
9425 9424 PTR_FORMAT " unallocated_start " PTR_FORMAT
9426 9425 " chunk_at_end " PTR_FORMAT,
9427 9426 old_end, unallocated_start, chunk_at_end);
9428 9427 }
9429 9428 return;
9430 9429 } else {
9431 9430
9432 9431 // Find the chunk at the end of the space and determine
9433 9432 // how much it can be shrunk.
9434 9433 size_t shrinkable_size_in_bytes = chunk_at_end->size();
9435 9434 size_t aligned_shrinkable_size_in_bytes =
9436 9435 align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9437 9436 assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
9438 9437 "Inconsistent chunk at end of space");
9439 9438 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9440 9439 size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9441 9440
9442 9441 // Shrink the underlying space
9443 9442 _virtual_space.shrink_by(bytes);
9444 9443 if (PrintGCDetails && Verbose) {
9445 9444 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9446 9445 " desired_bytes " SIZE_FORMAT
9447 9446 " shrinkable_size_in_bytes " SIZE_FORMAT
9448 9447 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9449 9448 " bytes " SIZE_FORMAT,
9450 9449 desired_bytes, shrinkable_size_in_bytes,
9451 9450 aligned_shrinkable_size_in_bytes, bytes);
9452 9451 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT
9453 9452 " unallocated_start " SIZE_FORMAT,
9454 9453 old_end, unallocated_start);
9455 9454 }
9456 9455
9457 9456 // If the space did shrink (shrinking is not guaranteed),
9458 9457 // shrink the chunk at the end by the appropriate amount.
9459 9458 if (((HeapWord*)_virtual_space.high()) < old_end) {
9460 9459 size_t new_word_size =
9461 9460 heap_word_size(_virtual_space.committed_size());
9462 9461
9463 9462 // Have to remove the chunk from the dictionary because it is changing
9464 9463 // size and might be someplace elsewhere in the dictionary.
9465 9464
9466 9465 // Get the chunk at end, shrink it, and put it
9467 9466 // back.
9468 9467 _cmsSpace->removeChunkFromDictionary(chunk_at_end);
9469 9468 size_t word_size_change = word_size_before - new_word_size;
9470 9469 size_t chunk_at_end_old_size = chunk_at_end->size();
9471 9470 assert(chunk_at_end_old_size >= word_size_change,
9472 9471 "Shrink is too large");
9473 9472 chunk_at_end->set_size(chunk_at_end_old_size -
9474 9473 word_size_change);
9475 9474 _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9476 9475 word_size_change);
9477 9476
9478 9477 _cmsSpace->returnChunkToDictionary(chunk_at_end);
9479 9478
9480 9479 MemRegion mr(_cmsSpace->bottom(), new_word_size);
9481 9480 _bts->resize(new_word_size); // resize the block offset shared array
9482 9481 Universe::heap()->barrier_set()->resize_covered_region(mr);
9483 9482 _cmsSpace->assert_locked();
9484 9483 _cmsSpace->set_end((HeapWord*)_virtual_space.high());
9485 9484
9486 9485 NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9487 9486
9488 9487 // update the space and generation capacity counters
9489 9488 if (UsePerfData) {
9490 9489 _space_counters->update_capacity();
9491 9490 _gen_counters->update_all();
9492 9491 }
9493 9492
9494 9493 if (Verbose && PrintGCDetails) {
9495 9494 size_t new_mem_size = _virtual_space.committed_size();
9496 9495 size_t old_mem_size = new_mem_size + bytes;
9497 9496 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
9498 9497 name(), old_mem_size/K, bytes/K, new_mem_size/K);
9499 9498 }
9500 9499 }
9501 9500
9502 9501 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9503 9502 "Inconsistency at end of space");
9504 9503 assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
9505 9504 "Shrinking is inconsistent");
9506 9505 return;
9507 9506 }
9508 9507 }
9509 9508 // Transfer some number of overflown objects to usual marking
9510 9509 // stack. Return true if some objects were transferred.
9511 9510 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9512 9511 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9513 9512 (size_t)ParGCDesiredObjsFromOverflowList);
9514 9513
9515 9514 bool res = _collector->take_from_overflow_list(num, _mark_stack);
9516 9515 assert(_collector->overflow_list_is_empty() || res,
9517 9516 "If list is not empty, we should have taken something");
9518 9517 assert(!res || !_mark_stack->isEmpty(),
9519 9518 "If we took something, it should now be on our stack");
9520 9519 return res;
9521 9520 }
9522 9521
9523 9522 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9524 9523 size_t res = _sp->block_size_no_stall(addr, _collector);
9525 9524 if (_sp->block_is_obj(addr)) {
9526 9525 if (_live_bit_map->isMarked(addr)) {
9527 9526 // It can't have been dead in a previous cycle
9528 9527 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9529 9528 } else {
9530 9529 _dead_bit_map->mark(addr); // mark the dead object
9531 9530 }
9532 9531 }
9533 9532 // Could be 0, if the block size could not be computed without stalling.
9534 9533 return res;
9535 9534 }
9536 9535
9537 9536 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
9538 9537
9539 9538 switch (phase) {
9540 9539 case CMSCollector::InitialMarking:
9541 9540 initialize(true /* fullGC */ ,
9542 9541 cause /* cause of the GC */,
9543 9542 true /* recordGCBeginTime */,
9544 9543 true /* recordPreGCUsage */,
9545 9544 false /* recordPeakUsage */,
9546 9545 false /* recordPostGCusage */,
9547 9546 true /* recordAccumulatedGCTime */,
9548 9547 false /* recordGCEndTime */,
9549 9548 false /* countCollection */ );
9550 9549 break;
9551 9550
9552 9551 case CMSCollector::FinalMarking:
9553 9552 initialize(true /* fullGC */ ,
9554 9553 cause /* cause of the GC */,
9555 9554 false /* recordGCBeginTime */,
9556 9555 false /* recordPreGCUsage */,
9557 9556 false /* recordPeakUsage */,
9558 9557 false /* recordPostGCusage */,
9559 9558 true /* recordAccumulatedGCTime */,
9560 9559 false /* recordGCEndTime */,
9561 9560 false /* countCollection */ );
9562 9561 break;
9563 9562
9564 9563 case CMSCollector::Sweeping:
9565 9564 initialize(true /* fullGC */ ,
9566 9565 cause /* cause of the GC */,
9567 9566 false /* recordGCBeginTime */,
9568 9567 false /* recordPreGCUsage */,
9569 9568 true /* recordPeakUsage */,
9570 9569 true /* recordPostGCusage */,
9571 9570 false /* recordAccumulatedGCTime */,
9572 9571 true /* recordGCEndTime */,
9573 9572 true /* countCollection */ );
9574 9573 break;
9575 9574
9576 9575 default:
9577 9576 ShouldNotReachHere();
9578 9577 }
9579 9578 }
↓ open down ↓ |
2932 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX