Print this page
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp
+++ new/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/classLoaderData.hpp"
27 27 #include "classfile/stringTable.hpp"
28 28 #include "classfile/systemDictionary.hpp"
29 29 #include "code/codeCache.hpp"
30 30 #include "gc/cms/cmsCollectorPolicy.hpp"
31 31 #include "gc/cms/cmsOopClosures.inline.hpp"
32 32 #include "gc/cms/compactibleFreeListSpace.hpp"
33 33 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
34 34 #include "gc/cms/concurrentMarkSweepThread.hpp"
35 35 #include "gc/cms/parNewGeneration.hpp"
36 36 #include "gc/cms/vmCMSOperations.hpp"
37 37 #include "gc/serial/genMarkSweep.hpp"
38 38 #include "gc/serial/tenuredGeneration.hpp"
39 39 #include "gc/shared/adaptiveSizePolicy.hpp"
40 40 #include "gc/shared/cardGeneration.inline.hpp"
41 41 #include "gc/shared/cardTableRS.hpp"
42 42 #include "gc/shared/collectedHeap.inline.hpp"
43 43 #include "gc/shared/collectorCounters.hpp"
44 44 #include "gc/shared/collectorPolicy.hpp"
45 45 #include "gc/shared/gcLocker.inline.hpp"
46 46 #include "gc/shared/gcPolicyCounters.hpp"
47 47 #include "gc/shared/gcTimer.hpp"
48 48 #include "gc/shared/gcTrace.hpp"
49 49 #include "gc/shared/gcTraceTime.hpp"
50 50 #include "gc/shared/genCollectedHeap.hpp"
51 51 #include "gc/shared/genOopClosures.inline.hpp"
52 52 #include "gc/shared/isGCActiveMark.hpp"
53 53 #include "gc/shared/referencePolicy.hpp"
54 54 #include "gc/shared/strongRootsScope.hpp"
55 55 #include "gc/shared/taskqueue.inline.hpp"
56 56 #include "memory/allocation.hpp"
57 57 #include "memory/iterator.inline.hpp"
58 58 #include "memory/padded.hpp"
59 59 #include "memory/resourceArea.hpp"
60 60 #include "oops/oop.inline.hpp"
61 61 #include "prims/jvmtiExport.hpp"
62 62 #include "runtime/atomic.inline.hpp"
63 63 #include "runtime/globals_extension.hpp"
64 64 #include "runtime/handles.inline.hpp"
65 65 #include "runtime/java.hpp"
66 66 #include "runtime/orderAccess.inline.hpp"
67 67 #include "runtime/vmThread.hpp"
68 68 #include "services/memoryService.hpp"
69 69 #include "services/runtimeService.hpp"
70 70 #include "utilities/stack.inline.hpp"
71 71
72 72 // statics
73 73 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
74 74 bool CMSCollector::_full_gc_requested = false;
75 75 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
76 76
77 77 //////////////////////////////////////////////////////////////////
78 78 // In support of CMS/VM thread synchronization
79 79 //////////////////////////////////////////////////////////////////
80 80 // We split use of the CGC_lock into 2 "levels".
81 81 // The low-level locking is of the usual CGC_lock monitor. We introduce
82 82 // a higher level "token" (hereafter "CMS token") built on top of the
83 83 // low level monitor (hereafter "CGC lock").
84 84 // The token-passing protocol gives priority to the VM thread. The
85 85 // CMS-lock doesn't provide any fairness guarantees, but clients
86 86 // should ensure that it is only held for very short, bounded
87 87 // durations.
88 88 //
89 89 // When either of the CMS thread or the VM thread is involved in
90 90 // collection operations during which it does not want the other
91 91 // thread to interfere, it obtains the CMS token.
92 92 //
93 93 // If either thread tries to get the token while the other has
94 94 // it, that thread waits. However, if the VM thread and CMS thread
95 95 // both want the token, then the VM thread gets priority while the
96 96 // CMS thread waits. This ensures, for instance, that the "concurrent"
97 97 // phases of the CMS thread's work do not block out the VM thread
98 98 // for long periods of time as the CMS thread continues to hog
99 99 // the token. (See bug 4616232).
100 100 //
101 101 // The baton-passing functions are, however, controlled by the
102 102 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
103 103 // and here the low-level CMS lock, not the high level token,
104 104 // ensures mutual exclusion.
105 105 //
106 106 // Two important conditions that we have to satisfy:
107 107 // 1. if a thread does a low-level wait on the CMS lock, then it
108 108 // relinquishes the CMS token if it were holding that token
109 109 // when it acquired the low-level CMS lock.
110 110 // 2. any low-level notifications on the low-level lock
111 111 // should only be sent when a thread has relinquished the token.
112 112 //
113 113 // In the absence of either property, we'd have potential deadlock.
114 114 //
115 115 // We protect each of the CMS (concurrent and sequential) phases
116 116 // with the CMS _token_, not the CMS _lock_.
117 117 //
118 118 // The only code protected by CMS lock is the token acquisition code
119 119 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
120 120 // baton-passing code.
121 121 //
122 122 // Unfortunately, i couldn't come up with a good abstraction to factor and
123 123 // hide the naked CGC_lock manipulation in the baton-passing code
124 124 // further below. That's something we should try to do. Also, the proof
125 125 // of correctness of this 2-level locking scheme is far from obvious,
126 126 // and potentially quite slippery. We have an uneasy suspicion, for instance,
127 127 // that there may be a theoretical possibility of delay/starvation in the
128 128 // low-level lock/wait/notify scheme used for the baton-passing because of
129 129 // potential interference with the priority scheme embodied in the
130 130 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
131 131 // invocation further below and marked with "XXX 20011219YSR".
132 132 // Indeed, as we note elsewhere, this may become yet more slippery
133 133 // in the presence of multiple CMS and/or multiple VM threads. XXX
134 134
135 135 class CMSTokenSync: public StackObj {
136 136 private:
137 137 bool _is_cms_thread;
138 138 public:
139 139 CMSTokenSync(bool is_cms_thread):
140 140 _is_cms_thread(is_cms_thread) {
141 141 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
142 142 "Incorrect argument to constructor");
143 143 ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
144 144 }
145 145
146 146 ~CMSTokenSync() {
147 147 assert(_is_cms_thread ?
148 148 ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
149 149 ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
150 150 "Incorrect state");
151 151 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
152 152 }
153 153 };
154 154
155 155 // Convenience class that does a CMSTokenSync, and then acquires
156 156 // upto three locks.
157 157 class CMSTokenSyncWithLocks: public CMSTokenSync {
158 158 private:
159 159 // Note: locks are acquired in textual declaration order
160 160 // and released in the opposite order
161 161 MutexLockerEx _locker1, _locker2, _locker3;
162 162 public:
163 163 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
164 164 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
165 165 CMSTokenSync(is_cms_thread),
166 166 _locker1(mutex1, Mutex::_no_safepoint_check_flag),
167 167 _locker2(mutex2, Mutex::_no_safepoint_check_flag),
168 168 _locker3(mutex3, Mutex::_no_safepoint_check_flag)
169 169 { }
170 170 };
171 171
172 172
173 173 //////////////////////////////////////////////////////////////////
174 174 // Concurrent Mark-Sweep Generation /////////////////////////////
175 175 //////////////////////////////////////////////////////////////////
176 176
177 177 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
178 178
179 179 // This struct contains per-thread things necessary to support parallel
180 180 // young-gen collection.
181 181 class CMSParGCThreadState: public CHeapObj<mtGC> {
182 182 public:
183 183 CFLS_LAB lab;
184 184 PromotionInfo promo;
185 185
186 186 // Constructor.
187 187 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
188 188 promo.setSpace(cfls);
189 189 }
190 190 };
191 191
192 192 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
193 193 ReservedSpace rs, size_t initial_byte_size, int level,
194 194 CardTableRS* ct, bool use_adaptive_freelists,
195 195 FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
196 196 CardGeneration(rs, initial_byte_size, level, ct),
197 197 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
198 198 _did_compact(false)
199 199 {
200 200 HeapWord* bottom = (HeapWord*) _virtual_space.low();
201 201 HeapWord* end = (HeapWord*) _virtual_space.high();
202 202
203 203 _direct_allocated_words = 0;
204 204 NOT_PRODUCT(
205 205 _numObjectsPromoted = 0;
206 206 _numWordsPromoted = 0;
207 207 _numObjectsAllocated = 0;
208 208 _numWordsAllocated = 0;
209 209 )
210 210
211 211 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
212 212 use_adaptive_freelists,
213 213 dictionaryChoice);
214 214 NOT_PRODUCT(debug_cms_space = _cmsSpace;)
215 215 _cmsSpace->_gen = this;
216 216
217 217 _gc_stats = new CMSGCStats();
218 218
219 219 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
220 220 // offsets match. The ability to tell free chunks from objects
221 221 // depends on this property.
222 222 debug_only(
223 223 FreeChunk* junk = NULL;
224 224 assert(UseCompressedClassPointers ||
225 225 junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
226 226 "Offset of FreeChunk::_prev within FreeChunk must match"
227 227 " that of OopDesc::_klass within OopDesc");
228 228 )
229 229
230 230 _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
231 231 for (uint i = 0; i < ParallelGCThreads; i++) {
232 232 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
233 233 }
234 234
235 235 _incremental_collection_failed = false;
236 236 // The "dilatation_factor" is the expansion that can occur on
237 237 // account of the fact that the minimum object size in the CMS
238 238 // generation may be larger than that in, say, a contiguous young
239 239 // generation.
240 240 // Ideally, in the calculation below, we'd compute the dilatation
241 241 // factor as: MinChunkSize/(promoting_gen's min object size)
242 242 // Since we do not have such a general query interface for the
243 243 // promoting generation, we'll instead just use the minimum
244 244 // object size (which today is a header's worth of space);
245 245 // note that all arithmetic is in units of HeapWords.
246 246 assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
247 247 assert(_dilatation_factor >= 1.0, "from previous assert");
248 248 }
249 249
250 250
251 251 // The field "_initiating_occupancy" represents the occupancy percentage
252 252 // at which we trigger a new collection cycle. Unless explicitly specified
253 253 // via CMSInitiatingOccupancyFraction (argument "io" below), it
254 254 // is calculated by:
255 255 //
256 256 // Let "f" be MinHeapFreeRatio in
257 257 //
258 258 // _initiating_occupancy = 100-f +
259 259 // f * (CMSTriggerRatio/100)
260 260 // where CMSTriggerRatio is the argument "tr" below.
261 261 //
262 262 // That is, if we assume the heap is at its desired maximum occupancy at the
263 263 // end of a collection, we let CMSTriggerRatio of the (purported) free
264 264 // space be allocated before initiating a new collection cycle.
265 265 //
266 266 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
267 267 assert(io <= 100 && tr <= 100, "Check the arguments");
268 268 if (io >= 0) {
269 269 _initiating_occupancy = (double)io / 100.0;
270 270 } else {
271 271 _initiating_occupancy = ((100 - MinHeapFreeRatio) +
272 272 (double)(tr * MinHeapFreeRatio) / 100.0)
273 273 / 100.0;
274 274 }
275 275 }
276 276
277 277 void ConcurrentMarkSweepGeneration::ref_processor_init() {
278 278 assert(collector() != NULL, "no collector");
279 279 collector()->ref_processor_init();
280 280 }
281 281
282 282 void CMSCollector::ref_processor_init() {
283 283 if (_ref_processor == NULL) {
284 284 // Allocate and initialize a reference processor
285 285 _ref_processor =
286 286 new ReferenceProcessor(_span, // span
287 287 (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
288 288 ParallelGCThreads, // mt processing degree
289 289 _cmsGen->refs_discovery_is_mt(), // mt discovery
290 290 MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
291 291 _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
292 292 &_is_alive_closure); // closure for liveness info
293 293 // Initialize the _ref_processor field of CMSGen
294 294 _cmsGen->set_ref_processor(_ref_processor);
295 295
296 296 }
297 297 }
298 298
299 299 AdaptiveSizePolicy* CMSCollector::size_policy() {
300 300 GenCollectedHeap* gch = GenCollectedHeap::heap();
301 301 return gch->gen_policy()->size_policy();
302 302 }
303 303
304 304 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
305 305
306 306 const char* gen_name = "old";
307 307 GenCollectorPolicy* gcp = (GenCollectorPolicy*) GenCollectedHeap::heap()->collector_policy();
308 308
309 309 // Generation Counters - generation 1, 1 subspace
310 310 _gen_counters = new GenerationCounters(gen_name, 1, 1,
311 311 gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
312 312
313 313 _space_counters = new GSpaceCounters(gen_name, 0,
314 314 _virtual_space.reserved_size(),
315 315 this, _gen_counters);
316 316 }
317 317
318 318 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
319 319 _cms_gen(cms_gen)
320 320 {
321 321 assert(alpha <= 100, "bad value");
322 322 _saved_alpha = alpha;
323 323
324 324 // Initialize the alphas to the bootstrap value of 100.
325 325 _gc0_alpha = _cms_alpha = 100;
326 326
327 327 _cms_begin_time.update();
328 328 _cms_end_time.update();
329 329
330 330 _gc0_duration = 0.0;
331 331 _gc0_period = 0.0;
332 332 _gc0_promoted = 0;
333 333
334 334 _cms_duration = 0.0;
335 335 _cms_period = 0.0;
336 336 _cms_allocated = 0;
337 337
338 338 _cms_used_at_gc0_begin = 0;
339 339 _cms_used_at_gc0_end = 0;
340 340 _allow_duty_cycle_reduction = false;
341 341 _valid_bits = 0;
342 342 }
343 343
344 344 double CMSStats::cms_free_adjustment_factor(size_t free) const {
345 345 // TBD: CR 6909490
346 346 return 1.0;
347 347 }
348 348
349 349 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
350 350 }
351 351
352 352 // If promotion failure handling is on use
353 353 // the padded average size of the promotion for each
354 354 // young generation collection.
355 355 double CMSStats::time_until_cms_gen_full() const {
356 356 size_t cms_free = _cms_gen->cmsSpace()->free();
357 357 GenCollectedHeap* gch = GenCollectedHeap::heap();
358 358 size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
359 359 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
360 360 if (cms_free > expected_promotion) {
361 361 // Start a cms collection if there isn't enough space to promote
362 362 // for the next minor collection. Use the padded average as
363 363 // a safety factor.
364 364 cms_free -= expected_promotion;
365 365
366 366 // Adjust by the safety factor.
367 367 double cms_free_dbl = (double)cms_free;
368 368 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
369 369 // Apply a further correction factor which tries to adjust
370 370 // for recent occurance of concurrent mode failures.
371 371 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
372 372 cms_free_dbl = cms_free_dbl * cms_adjustment;
373 373
374 374 if (PrintGCDetails && Verbose) {
375 375 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
376 376 SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
377 377 cms_free, expected_promotion);
378 378 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f",
379 379 cms_free_dbl, cms_consumption_rate() + 1.0);
380 380 }
381 381 // Add 1 in case the consumption rate goes to zero.
382 382 return cms_free_dbl / (cms_consumption_rate() + 1.0);
383 383 }
384 384 return 0.0;
385 385 }
386 386
387 387 // Compare the duration of the cms collection to the
388 388 // time remaining before the cms generation is empty.
389 389 // Note that the time from the start of the cms collection
390 390 // to the start of the cms sweep (less than the total
391 391 // duration of the cms collection) can be used. This
392 392 // has been tried and some applications experienced
393 393 // promotion failures early in execution. This was
394 394 // possibly because the averages were not accurate
395 395 // enough at the beginning.
396 396 double CMSStats::time_until_cms_start() const {
397 397 // We add "gc0_period" to the "work" calculation
398 398 // below because this query is done (mostly) at the
399 399 // end of a scavenge, so we need to conservatively
400 400 // account for that much possible delay
401 401 // in the query so as to avoid concurrent mode failures
402 402 // due to starting the collection just a wee bit too
403 403 // late.
404 404 double work = cms_duration() + gc0_period();
405 405 double deadline = time_until_cms_gen_full();
406 406 // If a concurrent mode failure occurred recently, we want to be
407 407 // more conservative and halve our expected time_until_cms_gen_full()
408 408 if (work > deadline) {
409 409 if (Verbose && PrintGCDetails) {
410 410 gclog_or_tty->print(
411 411 " CMSCollector: collect because of anticipated promotion "
412 412 "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
413 413 gc0_period(), time_until_cms_gen_full());
414 414 }
415 415 return 0.0;
416 416 }
417 417 return work - deadline;
418 418 }
419 419
420 420 #ifndef PRODUCT
421 421 void CMSStats::print_on(outputStream *st) const {
422 422 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
423 423 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
424 424 gc0_duration(), gc0_period(), gc0_promoted());
425 425 st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
426 426 cms_duration(), cms_period(), cms_allocated());
427 427 st->print(",cms_since_beg=%g,cms_since_end=%g",
428 428 cms_time_since_begin(), cms_time_since_end());
429 429 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
430 430 _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
431 431
432 432 if (valid()) {
433 433 st->print(",promo_rate=%g,cms_alloc_rate=%g",
434 434 promotion_rate(), cms_allocation_rate());
435 435 st->print(",cms_consumption_rate=%g,time_until_full=%g",
436 436 cms_consumption_rate(), time_until_cms_gen_full());
437 437 }
438 438 st->print(" ");
439 439 }
440 440 #endif // #ifndef PRODUCT
441 441
442 442 CMSCollector::CollectorState CMSCollector::_collectorState =
443 443 CMSCollector::Idling;
444 444 bool CMSCollector::_foregroundGCIsActive = false;
445 445 bool CMSCollector::_foregroundGCShouldWait = false;
446 446
447 447 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
448 448 CardTableRS* ct,
449 449 ConcurrentMarkSweepPolicy* cp):
450 450 _cmsGen(cmsGen),
451 451 _ct(ct),
452 452 _ref_processor(NULL), // will be set later
453 453 _conc_workers(NULL), // may be set later
454 454 _abort_preclean(false),
455 455 _start_sampling(false),
456 456 _between_prologue_and_epilogue(false),
457 457 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
458 458 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
459 459 -1 /* lock-free */, "No_lock" /* dummy */),
460 460 _modUnionClosurePar(&_modUnionTable),
461 461 // Adjust my span to cover old (cms) gen
462 462 _span(cmsGen->reserved()),
463 463 // Construct the is_alive_closure with _span & markBitMap
464 464 _is_alive_closure(_span, &_markBitMap),
465 465 _restart_addr(NULL),
466 466 _overflow_list(NULL),
467 467 _stats(cmsGen),
468 468 _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
469 469 //verify that this lock should be acquired with safepoint check.
470 470 Monitor::_safepoint_check_sometimes)),
471 471 _eden_chunk_array(NULL), // may be set in ctor body
472 472 _eden_chunk_capacity(0), // -- ditto --
473 473 _eden_chunk_index(0), // -- ditto --
474 474 _survivor_plab_array(NULL), // -- ditto --
475 475 _survivor_chunk_array(NULL), // -- ditto --
476 476 _survivor_chunk_capacity(0), // -- ditto --
477 477 _survivor_chunk_index(0), // -- ditto --
478 478 _ser_pmc_preclean_ovflw(0),
479 479 _ser_kac_preclean_ovflw(0),
480 480 _ser_pmc_remark_ovflw(0),
481 481 _par_pmc_remark_ovflw(0),
482 482 _ser_kac_ovflw(0),
483 483 _par_kac_ovflw(0),
484 484 #ifndef PRODUCT
485 485 _num_par_pushes(0),
486 486 #endif
487 487 _collection_count_start(0),
488 488 _verifying(false),
489 489 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
490 490 _completed_initialization(false),
491 491 _collector_policy(cp),
492 492 _should_unload_classes(CMSClassUnloadingEnabled),
493 493 _concurrent_cycles_since_last_unload(0),
494 494 _roots_scanning_options(GenCollectedHeap::SO_None),
495 495 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
496 496 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
497 497 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
498 498 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
499 499 _cms_start_registered(false)
500 500 {
501 501 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
502 502 ExplicitGCInvokesConcurrent = true;
503 503 }
504 504 // Now expand the span and allocate the collection support structures
505 505 // (MUT, marking bit map etc.) to cover both generations subject to
506 506 // collection.
507 507
508 508 // For use by dirty card to oop closures.
509 509 _cmsGen->cmsSpace()->set_collector(this);
510 510
511 511 // Allocate MUT and marking bit map
512 512 {
513 513 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
514 514 if (!_markBitMap.allocate(_span)) {
515 515 warning("Failed to allocate CMS Bit Map");
516 516 return;
517 517 }
518 518 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
519 519 }
520 520 {
521 521 _modUnionTable.allocate(_span);
522 522 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
523 523 }
524 524
525 525 if (!_markStack.allocate(MarkStackSize)) {
526 526 warning("Failed to allocate CMS Marking Stack");
527 527 return;
528 528 }
529 529
530 530 // Support for multi-threaded concurrent phases
531 531 if (CMSConcurrentMTEnabled) {
532 532 if (FLAG_IS_DEFAULT(ConcGCThreads)) {
533 533 // just for now
534 534 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
535 535 }
536 536 if (ConcGCThreads > 1) {
537 537 _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
538 538 ConcGCThreads, true);
539 539 if (_conc_workers == NULL) {
540 540 warning("GC/CMS: _conc_workers allocation failure: "
541 541 "forcing -CMSConcurrentMTEnabled");
542 542 CMSConcurrentMTEnabled = false;
543 543 } else {
544 544 _conc_workers->initialize_workers();
545 545 }
546 546 } else {
547 547 CMSConcurrentMTEnabled = false;
548 548 }
549 549 }
550 550 if (!CMSConcurrentMTEnabled) {
551 551 ConcGCThreads = 0;
552 552 } else {
553 553 // Turn off CMSCleanOnEnter optimization temporarily for
554 554 // the MT case where it's not fixed yet; see 6178663.
555 555 CMSCleanOnEnter = false;
556 556 }
557 557 assert((_conc_workers != NULL) == (ConcGCThreads > 1),
558 558 "Inconsistency");
559 559
560 560 // Parallel task queues; these are shared for the
561 561 // concurrent and stop-world phases of CMS, but
562 562 // are not shared with parallel scavenge (ParNew).
563 563 {
564 564 uint i;
565 565 uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
566 566
567 567 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
568 568 || ParallelRefProcEnabled)
569 569 && num_queues > 0) {
570 570 _task_queues = new OopTaskQueueSet(num_queues);
571 571 if (_task_queues == NULL) {
572 572 warning("task_queues allocation failure.");
573 573 return;
574 574 }
575 575 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
576 576 typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
577 577 for (i = 0; i < num_queues; i++) {
578 578 PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
579 579 if (q == NULL) {
580 580 warning("work_queue allocation failure.");
581 581 return;
582 582 }
583 583 _task_queues->register_queue(i, q);
584 584 }
585 585 for (i = 0; i < num_queues; i++) {
586 586 _task_queues->queue(i)->initialize();
587 587 _hash_seed[i] = 17; // copied from ParNew
588 588 }
589 589 }
590 590 }
591 591
592 592 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
593 593
594 594 // Clip CMSBootstrapOccupancy between 0 and 100.
595 595 _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
596 596
597 597 // Now tell CMS generations the identity of their collector
598 598 ConcurrentMarkSweepGeneration::set_collector(this);
599 599
600 600 // Create & start a CMS thread for this CMS collector
601 601 _cmsThread = ConcurrentMarkSweepThread::start(this);
602 602 assert(cmsThread() != NULL, "CMS Thread should have been created");
603 603 assert(cmsThread()->collector() == this,
604 604 "CMS Thread should refer to this gen");
605 605 assert(CGC_lock != NULL, "Where's the CGC_lock?");
606 606
607 607 // Support for parallelizing young gen rescan
608 608 GenCollectedHeap* gch = GenCollectedHeap::heap();
609 609 assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
610 610 _young_gen = (ParNewGeneration*)gch->young_gen();
611 611 if (gch->supports_inline_contig_alloc()) {
612 612 _top_addr = gch->top_addr();
613 613 _end_addr = gch->end_addr();
614 614 assert(_young_gen != NULL, "no _young_gen");
615 615 _eden_chunk_index = 0;
616 616 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
617 617 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
618 618 }
619 619
620 620 // Support for parallelizing survivor space rescan
621 621 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
622 622 const size_t max_plab_samples =
623 623 ((DefNewGeneration*)_young_gen)->max_survivor_size() / plab_sample_minimum_size();
624 624
625 625 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
626 626 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
627 627 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
628 628 _survivor_chunk_capacity = 2*max_plab_samples;
629 629 for (uint i = 0; i < ParallelGCThreads; i++) {
630 630 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
631 631 ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
632 632 assert(cur->end() == 0, "Should be 0");
633 633 assert(cur->array() == vec, "Should be vec");
634 634 assert(cur->capacity() == max_plab_samples, "Error");
635 635 }
636 636 }
637 637
638 638 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
639 639 _gc_counters = new CollectorCounters("CMS", 1);
640 640 _completed_initialization = true;
641 641 _inter_sweep_timer.start(); // start of time
642 642 }
643 643
644 644 size_t CMSCollector::plab_sample_minimum_size() {
645 645 // The default value of MinTLABSize is 2k, but there is
646 646 // no way to get the default value if the flag has been overridden.
647 647 return MAX2(ThreadLocalAllocBuffer::min_size() * HeapWordSize, 2 * K);
648 648 }
649 649
650 650 const char* ConcurrentMarkSweepGeneration::name() const {
651 651 return "concurrent mark-sweep generation";
652 652 }
653 653 void ConcurrentMarkSweepGeneration::update_counters() {
654 654 if (UsePerfData) {
655 655 _space_counters->update_all();
656 656 _gen_counters->update_all();
657 657 }
658 658 }
659 659
660 660 // this is an optimized version of update_counters(). it takes the
661 661 // used value as a parameter rather than computing it.
662 662 //
663 663 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
664 664 if (UsePerfData) {
665 665 _space_counters->update_used(used);
666 666 _space_counters->update_capacity();
667 667 _gen_counters->update_all();
668 668 }
669 669 }
670 670
671 671 void ConcurrentMarkSweepGeneration::print() const {
672 672 Generation::print();
673 673 cmsSpace()->print();
674 674 }
675 675
676 676 #ifndef PRODUCT
677 677 void ConcurrentMarkSweepGeneration::print_statistics() {
678 678 cmsSpace()->printFLCensus(0);
679 679 }
680 680 #endif
681 681
682 682 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
683 683 GenCollectedHeap* gch = GenCollectedHeap::heap();
684 684 if (PrintGCDetails) {
685 685 if (Verbose) {
686 686 gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
687 687 level(), short_name(), s, used(), capacity());
688 688 } else {
689 689 gclog_or_tty->print("[%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
690 690 level(), short_name(), s, used() / K, capacity() / K);
691 691 }
692 692 }
693 693 if (Verbose) {
694 694 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
695 695 gch->used(), gch->capacity());
696 696 } else {
697 697 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
698 698 gch->used() / K, gch->capacity() / K);
699 699 }
700 700 }
701 701
702 702 size_t
703 703 ConcurrentMarkSweepGeneration::contiguous_available() const {
704 704 // dld proposes an improvement in precision here. If the committed
705 705 // part of the space ends in a free block we should add that to
706 706 // uncommitted size in the calculation below. Will make this
707 707 // change later, staying with the approximation below for the
708 708 // time being. -- ysr.
709 709 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
710 710 }
711 711
712 712 size_t
713 713 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
714 714 return _cmsSpace->max_alloc_in_words() * HeapWordSize;
715 715 }
716 716
717 717 size_t ConcurrentMarkSweepGeneration::max_available() const {
718 718 return free() + _virtual_space.uncommitted_size();
719 719 }
720 720
721 721 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
722 722 size_t available = max_available();
723 723 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average();
724 724 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes);
725 725 if (Verbose && PrintGCDetails) {
726 726 gclog_or_tty->print_cr(
727 727 "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
728 728 "max_promo("SIZE_FORMAT")",
729 729 res? "":" not", available, res? ">=":"<",
730 730 av_promo, max_promotion_in_bytes);
731 731 }
732 732 return res;
733 733 }
734 734
735 735 // At a promotion failure dump information on block layout in heap
736 736 // (cms old generation).
737 737 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
738 738 if (CMSDumpAtPromotionFailure) {
739 739 cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
740 740 }
741 741 }
742 742
743 743 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
744 744 // Clear the promotion information. These pointers can be adjusted
745 745 // along with all the other pointers into the heap but
746 746 // compaction is expected to be a rare event with
747 747 // a heap using cms so don't do it without seeing the need.
748 748 for (uint i = 0; i < ParallelGCThreads; i++) {
749 749 _par_gc_thread_states[i]->promo.reset();
750 750 }
751 751 }
752 752
753 753 void ConcurrentMarkSweepGeneration::compute_new_size() {
754 754 assert_locked_or_safepoint(Heap_lock);
755 755
756 756 // If incremental collection failed, we just want to expand
757 757 // to the limit.
758 758 if (incremental_collection_failed()) {
759 759 clear_incremental_collection_failed();
760 760 grow_to_reserved();
761 761 return;
762 762 }
763 763
764 764 // The heap has been compacted but not reset yet.
765 765 // Any metric such as free() or used() will be incorrect.
766 766
767 767 CardGeneration::compute_new_size();
768 768
769 769 // Reset again after a possible resizing
770 770 if (did_compact()) {
771 771 cmsSpace()->reset_after_compaction();
772 772 }
773 773 }
774 774
775 775 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
776 776 assert_locked_or_safepoint(Heap_lock);
777 777
778 778 // If incremental collection failed, we just want to expand
779 779 // to the limit.
780 780 if (incremental_collection_failed()) {
781 781 clear_incremental_collection_failed();
782 782 grow_to_reserved();
783 783 return;
784 784 }
785 785
786 786 double free_percentage = ((double) free()) / capacity();
787 787 double desired_free_percentage = (double) MinHeapFreeRatio / 100;
788 788 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
789 789
790 790 // compute expansion delta needed for reaching desired free percentage
791 791 if (free_percentage < desired_free_percentage) {
792 792 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
793 793 assert(desired_capacity >= capacity(), "invalid expansion size");
794 794 size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
795 795 if (PrintGCDetails && Verbose) {
796 796 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
797 797 gclog_or_tty->print_cr("\nFrom compute_new_size: ");
798 798 gclog_or_tty->print_cr(" Free fraction %f", free_percentage);
799 799 gclog_or_tty->print_cr(" Desired free fraction %f",
800 800 desired_free_percentage);
801 801 gclog_or_tty->print_cr(" Maximum free fraction %f",
802 802 maximum_free_percentage);
803 803 gclog_or_tty->print_cr(" Capacity "SIZE_FORMAT, capacity()/1000);
804 804 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT,
805 805 desired_capacity/1000);
806 806 int prev_level = level() - 1;
807 807 if (prev_level >= 0) {
808 808 size_t prev_size = 0;
809 809 GenCollectedHeap* gch = GenCollectedHeap::heap();
810 810 Generation* prev_gen = gch->young_gen();
811 811 prev_size = prev_gen->capacity();
812 812 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT,
813 813 prev_size/1000);
814 814 }
815 815 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT,
816 816 unsafe_max_alloc_nogc()/1000);
817 817 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT,
818 818 contiguous_available()/1000);
819 819 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)",
820 820 expand_bytes);
821 821 }
822 822 // safe if expansion fails
823 823 expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
824 824 if (PrintGCDetails && Verbose) {
825 825 gclog_or_tty->print_cr(" Expanded free fraction %f",
826 826 ((double) free()) / capacity());
827 827 }
828 828 } else {
829 829 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
830 830 assert(desired_capacity <= capacity(), "invalid expansion size");
831 831 size_t shrink_bytes = capacity() - desired_capacity;
832 832 // Don't shrink unless the delta is greater than the minimum shrink we want
833 833 if (shrink_bytes >= MinHeapDeltaBytes) {
834 834 shrink_free_list_by(shrink_bytes);
835 835 }
836 836 }
837 837 }
838 838
839 839 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
840 840 return cmsSpace()->freelistLock();
841 841 }
842 842
843 843 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
844 844 bool tlab) {
845 845 CMSSynchronousYieldRequest yr;
846 846 MutexLockerEx x(freelistLock(),
847 847 Mutex::_no_safepoint_check_flag);
848 848 return have_lock_and_allocate(size, tlab);
849 849 }
850 850
851 851 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
852 852 bool tlab /* ignored */) {
853 853 assert_lock_strong(freelistLock());
854 854 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
855 855 HeapWord* res = cmsSpace()->allocate(adjustedSize);
856 856 // Allocate the object live (grey) if the background collector has
857 857 // started marking. This is necessary because the marker may
858 858 // have passed this address and consequently this object will
859 859 // not otherwise be greyed and would be incorrectly swept up.
860 860 // Note that if this object contains references, the writing
861 861 // of those references will dirty the card containing this object
862 862 // allowing the object to be blackened (and its references scanned)
863 863 // either during a preclean phase or at the final checkpoint.
864 864 if (res != NULL) {
865 865 // We may block here with an uninitialized object with
866 866 // its mark-bit or P-bits not yet set. Such objects need
867 867 // to be safely navigable by block_start().
868 868 assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
869 869 assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
870 870 collector()->direct_allocated(res, adjustedSize);
871 871 _direct_allocated_words += adjustedSize;
872 872 // allocation counters
873 873 NOT_PRODUCT(
874 874 _numObjectsAllocated++;
875 875 _numWordsAllocated += (int)adjustedSize;
876 876 )
877 877 }
878 878 return res;
879 879 }
880 880
881 881 // In the case of direct allocation by mutators in a generation that
882 882 // is being concurrently collected, the object must be allocated
883 883 // live (grey) if the background collector has started marking.
884 884 // This is necessary because the marker may
885 885 // have passed this address and consequently this object will
886 886 // not otherwise be greyed and would be incorrectly swept up.
887 887 // Note that if this object contains references, the writing
888 888 // of those references will dirty the card containing this object
889 889 // allowing the object to be blackened (and its references scanned)
890 890 // either during a preclean phase or at the final checkpoint.
891 891 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
892 892 assert(_markBitMap.covers(start, size), "Out of bounds");
893 893 if (_collectorState >= Marking) {
894 894 MutexLockerEx y(_markBitMap.lock(),
895 895 Mutex::_no_safepoint_check_flag);
896 896 // [see comments preceding SweepClosure::do_blk() below for details]
897 897 //
898 898 // Can the P-bits be deleted now? JJJ
899 899 //
900 900 // 1. need to mark the object as live so it isn't collected
901 901 // 2. need to mark the 2nd bit to indicate the object may be uninitialized
902 902 // 3. need to mark the end of the object so marking, precleaning or sweeping
903 903 // can skip over uninitialized or unparsable objects. An allocated
904 904 // object is considered uninitialized for our purposes as long as
905 905 // its klass word is NULL. All old gen objects are parsable
906 906 // as soon as they are initialized.)
907 907 _markBitMap.mark(start); // object is live
908 908 _markBitMap.mark(start + 1); // object is potentially uninitialized?
909 909 _markBitMap.mark(start + size - 1);
910 910 // mark end of object
911 911 }
912 912 // check that oop looks uninitialized
913 913 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
914 914 }
915 915
916 916 void CMSCollector::promoted(bool par, HeapWord* start,
917 917 bool is_obj_array, size_t obj_size) {
918 918 assert(_markBitMap.covers(start), "Out of bounds");
919 919 // See comment in direct_allocated() about when objects should
920 920 // be allocated live.
921 921 if (_collectorState >= Marking) {
922 922 // we already hold the marking bit map lock, taken in
923 923 // the prologue
924 924 if (par) {
925 925 _markBitMap.par_mark(start);
926 926 } else {
927 927 _markBitMap.mark(start);
928 928 }
929 929 // We don't need to mark the object as uninitialized (as
930 930 // in direct_allocated above) because this is being done with the
931 931 // world stopped and the object will be initialized by the
932 932 // time the marking, precleaning or sweeping get to look at it.
933 933 // But see the code for copying objects into the CMS generation,
934 934 // where we need to ensure that concurrent readers of the
935 935 // block offset table are able to safely navigate a block that
936 936 // is in flux from being free to being allocated (and in
937 937 // transition while being copied into) and subsequently
938 938 // becoming a bona-fide object when the copy/promotion is complete.
939 939 assert(SafepointSynchronize::is_at_safepoint(),
940 940 "expect promotion only at safepoints");
941 941
942 942 if (_collectorState < Sweeping) {
943 943 // Mark the appropriate cards in the modUnionTable, so that
944 944 // this object gets scanned before the sweep. If this is
945 945 // not done, CMS generation references in the object might
946 946 // not get marked.
947 947 // For the case of arrays, which are otherwise precisely
948 948 // marked, we need to dirty the entire array, not just its head.
949 949 if (is_obj_array) {
950 950 // The [par_]mark_range() method expects mr.end() below to
951 951 // be aligned to the granularity of a bit's representation
952 952 // in the heap. In the case of the MUT below, that's a
953 953 // card size.
954 954 MemRegion mr(start,
955 955 (HeapWord*)round_to((intptr_t)(start + obj_size),
956 956 CardTableModRefBS::card_size /* bytes */));
957 957 if (par) {
958 958 _modUnionTable.par_mark_range(mr);
959 959 } else {
960 960 _modUnionTable.mark_range(mr);
961 961 }
962 962 } else { // not an obj array; we can just mark the head
963 963 if (par) {
964 964 _modUnionTable.par_mark(start);
965 965 } else {
966 966 _modUnionTable.mark(start);
967 967 }
968 968 }
969 969 }
970 970 }
971 971 }
972 972
973 973 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
974 974 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
975 975 // allocate, copy and if necessary update promoinfo --
976 976 // delegate to underlying space.
977 977 assert_lock_strong(freelistLock());
978 978
979 979 #ifndef PRODUCT
980 980 if (GenCollectedHeap::heap()->promotion_should_fail()) {
981 981 return NULL;
982 982 }
983 983 #endif // #ifndef PRODUCT
984 984
985 985 oop res = _cmsSpace->promote(obj, obj_size);
986 986 if (res == NULL) {
987 987 // expand and retry
988 988 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords
989 989 expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
990 990 // Since this is the old generation, we don't try to promote
991 991 // into a more senior generation.
992 992 res = _cmsSpace->promote(obj, obj_size);
993 993 }
994 994 if (res != NULL) {
995 995 // See comment in allocate() about when objects should
996 996 // be allocated live.
997 997 assert(obj->is_oop(), "Will dereference klass pointer below");
998 998 collector()->promoted(false, // Not parallel
999 999 (HeapWord*)res, obj->is_objArray(), obj_size);
1000 1000 // promotion counters
1001 1001 NOT_PRODUCT(
1002 1002 _numObjectsPromoted++;
1003 1003 _numWordsPromoted +=
1004 1004 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1005 1005 )
1006 1006 }
1007 1007 return res;
1008 1008 }
1009 1009
1010 1010
1011 1011 // IMPORTANT: Notes on object size recognition in CMS.
1012 1012 // ---------------------------------------------------
1013 1013 // A block of storage in the CMS generation is always in
1014 1014 // one of three states. A free block (FREE), an allocated
1015 1015 // object (OBJECT) whose size() method reports the correct size,
1016 1016 // and an intermediate state (TRANSIENT) in which its size cannot
1017 1017 // be accurately determined.
1018 1018 // STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS)
1019 1019 // -----------------------------------------------------
1020 1020 // FREE: klass_word & 1 == 1; mark_word holds block size
1021 1021 //
1022 1022 // OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1023 1023 // obj->size() computes correct size
1024 1024 //
1025 1025 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1026 1026 //
1027 1027 // STATE IDENTIFICATION: (64 bit+COOPS)
1028 1028 // ------------------------------------
1029 1029 // FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1030 1030 //
1031 1031 // OBJECT: klass_word installed; klass_word != 0;
1032 1032 // obj->size() computes correct size
1033 1033 //
1034 1034 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1035 1035 //
1036 1036 //
1037 1037 // STATE TRANSITION DIAGRAM
1038 1038 //
1039 1039 // mut / parnew mut / parnew
1040 1040 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1041 1041 // ^ |
1042 1042 // |------------------------ DEAD <------------------------------------|
1043 1043 // sweep mut
1044 1044 //
1045 1045 // While a block is in TRANSIENT state its size cannot be determined
1046 1046 // so readers will either need to come back later or stall until
1047 1047 // the size can be determined. Note that for the case of direct
1048 1048 // allocation, P-bits, when available, may be used to determine the
1049 1049 // size of an object that may not yet have been initialized.
1050 1050
1051 1051 // Things to support parallel young-gen collection.
1052 1052 oop
1053 1053 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1054 1054 oop old, markOop m,
1055 1055 size_t word_sz) {
1056 1056 #ifndef PRODUCT
1057 1057 if (GenCollectedHeap::heap()->promotion_should_fail()) {
1058 1058 return NULL;
1059 1059 }
1060 1060 #endif // #ifndef PRODUCT
1061 1061
1062 1062 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1063 1063 PromotionInfo* promoInfo = &ps->promo;
1064 1064 // if we are tracking promotions, then first ensure space for
1065 1065 // promotion (including spooling space for saving header if necessary).
1066 1066 // then allocate and copy, then track promoted info if needed.
1067 1067 // When tracking (see PromotionInfo::track()), the mark word may
1068 1068 // be displaced and in this case restoration of the mark word
1069 1069 // occurs in the (oop_since_save_marks_)iterate phase.
1070 1070 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1071 1071 // Out of space for allocating spooling buffers;
1072 1072 // try expanding and allocating spooling buffers.
1073 1073 if (!expand_and_ensure_spooling_space(promoInfo)) {
1074 1074 return NULL;
1075 1075 }
1076 1076 }
1077 1077 assert(promoInfo->has_spooling_space(), "Control point invariant");
1078 1078 const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1079 1079 HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1080 1080 if (obj_ptr == NULL) {
1081 1081 obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1082 1082 if (obj_ptr == NULL) {
1083 1083 return NULL;
1084 1084 }
1085 1085 }
1086 1086 oop obj = oop(obj_ptr);
1087 1087 OrderAccess::storestore();
1088 1088 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1089 1089 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1090 1090 // IMPORTANT: See note on object initialization for CMS above.
1091 1091 // Otherwise, copy the object. Here we must be careful to insert the
1092 1092 // klass pointer last, since this marks the block as an allocated object.
1093 1093 // Except with compressed oops it's the mark word.
1094 1094 HeapWord* old_ptr = (HeapWord*)old;
1095 1095 // Restore the mark word copied above.
1096 1096 obj->set_mark(m);
1097 1097 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1098 1098 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1099 1099 OrderAccess::storestore();
1100 1100
1101 1101 if (UseCompressedClassPointers) {
1102 1102 // Copy gap missed by (aligned) header size calculation below
1103 1103 obj->set_klass_gap(old->klass_gap());
1104 1104 }
1105 1105 if (word_sz > (size_t)oopDesc::header_size()) {
1106 1106 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1107 1107 obj_ptr + oopDesc::header_size(),
1108 1108 word_sz - oopDesc::header_size());
1109 1109 }
1110 1110
1111 1111 // Now we can track the promoted object, if necessary. We take care
1112 1112 // to delay the transition from uninitialized to full object
1113 1113 // (i.e., insertion of klass pointer) until after, so that it
1114 1114 // atomically becomes a promoted object.
1115 1115 if (promoInfo->tracking()) {
1116 1116 promoInfo->track((PromotedObject*)obj, old->klass());
1117 1117 }
1118 1118 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1119 1119 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1120 1120 assert(old->is_oop(), "Will use and dereference old klass ptr below");
1121 1121
1122 1122 // Finally, install the klass pointer (this should be volatile).
1123 1123 OrderAccess::storestore();
1124 1124 obj->set_klass(old->klass());
1125 1125 // We should now be able to calculate the right size for this object
1126 1126 assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1127 1127
1128 1128 collector()->promoted(true, // parallel
1129 1129 obj_ptr, old->is_objArray(), word_sz);
1130 1130
1131 1131 NOT_PRODUCT(
1132 1132 Atomic::inc_ptr(&_numObjectsPromoted);
1133 1133 Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1134 1134 )
1135 1135
1136 1136 return obj;
1137 1137 }
1138 1138
1139 1139 void
1140 1140 ConcurrentMarkSweepGeneration::
1141 1141 par_promote_alloc_done(int thread_num) {
1142 1142 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1143 1143 ps->lab.retire(thread_num);
1144 1144 }
1145 1145
1146 1146 void
1147 1147 ConcurrentMarkSweepGeneration::
1148 1148 par_oop_since_save_marks_iterate_done(int thread_num) {
1149 1149 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1150 1150 ParScanWithoutBarrierClosure* dummy_cl = NULL;
1151 1151 ps->promo.promoted_oops_iterate_nv(dummy_cl);
1152 1152 }
1153 1153
1154 1154 bool ConcurrentMarkSweepGeneration::should_collect(bool full,
1155 1155 size_t size,
1156 1156 bool tlab)
1157 1157 {
1158 1158 // We allow a STW collection only if a full
1159 1159 // collection was requested.
1160 1160 return full || should_allocate(size, tlab); // FIX ME !!!
1161 1161 // This and promotion failure handling are connected at the
1162 1162 // hip and should be fixed by untying them.
1163 1163 }
1164 1164
1165 1165 bool CMSCollector::shouldConcurrentCollect() {
1166 1166 if (_full_gc_requested) {
1167 1167 if (Verbose && PrintGCDetails) {
1168 1168 gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1169 1169 " gc request (or gc_locker)");
1170 1170 }
1171 1171 return true;
1172 1172 }
1173 1173
1174 1174 FreelistLocker x(this);
1175 1175 // ------------------------------------------------------------------
1176 1176 // Print out lots of information which affects the initiation of
1177 1177 // a collection.
1178 1178 if (PrintCMSInitiationStatistics && stats().valid()) {
1179 1179 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1180 1180 gclog_or_tty->stamp();
1181 1181 gclog_or_tty->cr();
1182 1182 stats().print_on(gclog_or_tty);
1183 1183 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1184 1184 stats().time_until_cms_gen_full());
1185 1185 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1186 1186 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1187 1187 _cmsGen->contiguous_available());
1188 1188 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1189 1189 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1190 1190 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1191 1191 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1192 1192 gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1193 1193 gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1194 1194 gclog_or_tty->print_cr("metadata initialized %d",
1195 1195 MetaspaceGC::should_concurrent_collect());
1196 1196 }
1197 1197 // ------------------------------------------------------------------
1198 1198
1199 1199 // If the estimated time to complete a cms collection (cms_duration())
1200 1200 // is less than the estimated time remaining until the cms generation
1201 1201 // is full, start a collection.
1202 1202 if (!UseCMSInitiatingOccupancyOnly) {
1203 1203 if (stats().valid()) {
1204 1204 if (stats().time_until_cms_start() == 0.0) {
1205 1205 return true;
1206 1206 }
1207 1207 } else {
1208 1208 // We want to conservatively collect somewhat early in order
1209 1209 // to try and "bootstrap" our CMS/promotion statistics;
1210 1210 // this branch will not fire after the first successful CMS
1211 1211 // collection because the stats should then be valid.
1212 1212 if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1213 1213 if (Verbose && PrintGCDetails) {
1214 1214 gclog_or_tty->print_cr(
1215 1215 " CMSCollector: collect for bootstrapping statistics:"
1216 1216 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1217 1217 _bootstrap_occupancy);
1218 1218 }
1219 1219 return true;
1220 1220 }
1221 1221 }
1222 1222 }
1223 1223
1224 1224 // Otherwise, we start a collection cycle if
1225 1225 // old gen want a collection cycle started. Each may use
1226 1226 // an appropriate criterion for making this decision.
1227 1227 // XXX We need to make sure that the gen expansion
1228 1228 // criterion dovetails well with this. XXX NEED TO FIX THIS
1229 1229 if (_cmsGen->should_concurrent_collect()) {
1230 1230 if (Verbose && PrintGCDetails) {
1231 1231 gclog_or_tty->print_cr("CMS old gen initiated");
1232 1232 }
1233 1233 return true;
1234 1234 }
1235 1235
1236 1236 // We start a collection if we believe an incremental collection may fail;
1237 1237 // this is not likely to be productive in practice because it's probably too
1238 1238 // late anyway.
1239 1239 GenCollectedHeap* gch = GenCollectedHeap::heap();
1240 1240 assert(gch->collector_policy()->is_generation_policy(),
1241 1241 "You may want to check the correctness of the following");
1242 1242 if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1243 1243 if (Verbose && PrintGCDetails) {
1244 1244 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1245 1245 }
1246 1246 return true;
1247 1247 }
1248 1248
1249 1249 if (MetaspaceGC::should_concurrent_collect()) {
1250 1250 if (Verbose && PrintGCDetails) {
1251 1251 gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1252 1252 }
1253 1253 return true;
1254 1254 }
1255 1255
1256 1256 // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1257 1257 if (CMSTriggerInterval >= 0) {
1258 1258 if (CMSTriggerInterval == 0) {
1259 1259 // Trigger always
1260 1260 return true;
1261 1261 }
1262 1262
1263 1263 // Check the CMS time since begin (we do not check the stats validity
1264 1264 // as we want to be able to trigger the first CMS cycle as well)
1265 1265 if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1266 1266 if (Verbose && PrintGCDetails) {
1267 1267 if (stats().valid()) {
1268 1268 gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1269 1269 stats().cms_time_since_begin());
1270 1270 } else {
1271 1271 gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
1272 1272 }
1273 1273 }
1274 1274 return true;
1275 1275 }
1276 1276 }
1277 1277
1278 1278 return false;
1279 1279 }
1280 1280
1281 1281 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1282 1282
1283 1283 // Clear _expansion_cause fields of constituent generations
1284 1284 void CMSCollector::clear_expansion_cause() {
1285 1285 _cmsGen->clear_expansion_cause();
1286 1286 }
1287 1287
1288 1288 // We should be conservative in starting a collection cycle. To
1289 1289 // start too eagerly runs the risk of collecting too often in the
1290 1290 // extreme. To collect too rarely falls back on full collections,
1291 1291 // which works, even if not optimum in terms of concurrent work.
1292 1292 // As a work around for too eagerly collecting, use the flag
1293 1293 // UseCMSInitiatingOccupancyOnly. This also has the advantage of
1294 1294 // giving the user an easily understandable way of controlling the
1295 1295 // collections.
1296 1296 // We want to start a new collection cycle if any of the following
1297 1297 // conditions hold:
1298 1298 // . our current occupancy exceeds the configured initiating occupancy
1299 1299 // for this generation, or
1300 1300 // . we recently needed to expand this space and have not, since that
1301 1301 // expansion, done a collection of this generation, or
1302 1302 // . the underlying space believes that it may be a good idea to initiate
1303 1303 // a concurrent collection (this may be based on criteria such as the
1304 1304 // following: the space uses linear allocation and linear allocation is
1305 1305 // going to fail, or there is believed to be excessive fragmentation in
1306 1306 // the generation, etc... or ...
1307 1307 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1308 1308 // the case of the old generation; see CR 6543076):
1309 1309 // we may be approaching a point at which allocation requests may fail because
1310 1310 // we will be out of sufficient free space given allocation rate estimates.]
1311 1311 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1312 1312
1313 1313 assert_lock_strong(freelistLock());
1314 1314 if (occupancy() > initiating_occupancy()) {
1315 1315 if (PrintGCDetails && Verbose) {
1316 1316 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ",
1317 1317 short_name(), occupancy(), initiating_occupancy());
1318 1318 }
1319 1319 return true;
1320 1320 }
1321 1321 if (UseCMSInitiatingOccupancyOnly) {
1322 1322 return false;
1323 1323 }
1324 1324 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1325 1325 if (PrintGCDetails && Verbose) {
1326 1326 gclog_or_tty->print(" %s: collect because expanded for allocation ",
1327 1327 short_name());
1328 1328 }
1329 1329 return true;
1330 1330 }
1331 1331 if (_cmsSpace->should_concurrent_collect()) {
1332 1332 if (PrintGCDetails && Verbose) {
1333 1333 gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1334 1334 short_name());
1335 1335 }
1336 1336 return true;
1337 1337 }
1338 1338 return false;
1339 1339 }
1340 1340
1341 1341 void ConcurrentMarkSweepGeneration::collect(bool full,
1342 1342 bool clear_all_soft_refs,
1343 1343 size_t size,
1344 1344 bool tlab)
1345 1345 {
1346 1346 collector()->collect(full, clear_all_soft_refs, size, tlab);
1347 1347 }
1348 1348
1349 1349 void CMSCollector::collect(bool full,
1350 1350 bool clear_all_soft_refs,
1351 1351 size_t size,
1352 1352 bool tlab)
1353 1353 {
1354 1354 // The following "if" branch is present for defensive reasons.
1355 1355 // In the current uses of this interface, it can be replaced with:
1356 1356 // assert(!GC_locker.is_active(), "Can't be called otherwise");
1357 1357 // But I am not placing that assert here to allow future
1358 1358 // generality in invoking this interface.
1359 1359 if (GC_locker::is_active()) {
1360 1360 // A consistency test for GC_locker
1361 1361 assert(GC_locker::needs_gc(), "Should have been set already");
1362 1362 // Skip this foreground collection, instead
1363 1363 // expanding the heap if necessary.
1364 1364 // Need the free list locks for the call to free() in compute_new_size()
1365 1365 compute_new_size();
1366 1366 return;
1367 1367 }
1368 1368 acquire_control_and_collect(full, clear_all_soft_refs);
1369 1369 }
1370 1370
1371 1371 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1372 1372 GenCollectedHeap* gch = GenCollectedHeap::heap();
1373 1373 unsigned int gc_count = gch->total_full_collections();
1374 1374 if (gc_count == full_gc_count) {
1375 1375 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1376 1376 _full_gc_requested = true;
1377 1377 _full_gc_cause = cause;
1378 1378 CGC_lock->notify(); // nudge CMS thread
1379 1379 } else {
1380 1380 assert(gc_count > full_gc_count, "Error: causal loop");
1381 1381 }
1382 1382 }
1383 1383
1384 1384 bool CMSCollector::is_external_interruption() {
1385 1385 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1386 1386 return GCCause::is_user_requested_gc(cause) ||
1387 1387 GCCause::is_serviceability_requested_gc(cause);
1388 1388 }
1389 1389
1390 1390 void CMSCollector::report_concurrent_mode_interruption() {
1391 1391 if (is_external_interruption()) {
1392 1392 if (PrintGCDetails) {
1393 1393 gclog_or_tty->print(" (concurrent mode interrupted)");
1394 1394 }
1395 1395 } else {
1396 1396 if (PrintGCDetails) {
1397 1397 gclog_or_tty->print(" (concurrent mode failure)");
1398 1398 }
1399 1399 _gc_tracer_cm->report_concurrent_mode_failure();
1400 1400 }
1401 1401 }
1402 1402
1403 1403
1404 1404 // The foreground and background collectors need to coordinate in order
1405 1405 // to make sure that they do not mutually interfere with CMS collections.
1406 1406 // When a background collection is active,
1407 1407 // the foreground collector may need to take over (preempt) and
1408 1408 // synchronously complete an ongoing collection. Depending on the
1409 1409 // frequency of the background collections and the heap usage
1410 1410 // of the application, this preemption can be seldom or frequent.
1411 1411 // There are only certain
1412 1412 // points in the background collection that the "collection-baton"
1413 1413 // can be passed to the foreground collector.
1414 1414 //
1415 1415 // The foreground collector will wait for the baton before
1416 1416 // starting any part of the collection. The foreground collector
1417 1417 // will only wait at one location.
1418 1418 //
1419 1419 // The background collector will yield the baton before starting a new
1420 1420 // phase of the collection (e.g., before initial marking, marking from roots,
1421 1421 // precleaning, final re-mark, sweep etc.) This is normally done at the head
1422 1422 // of the loop which switches the phases. The background collector does some
1423 1423 // of the phases (initial mark, final re-mark) with the world stopped.
1424 1424 // Because of locking involved in stopping the world,
1425 1425 // the foreground collector should not block waiting for the background
1426 1426 // collector when it is doing a stop-the-world phase. The background
1427 1427 // collector will yield the baton at an additional point just before
1428 1428 // it enters a stop-the-world phase. Once the world is stopped, the
1429 1429 // background collector checks the phase of the collection. If the
1430 1430 // phase has not changed, it proceeds with the collection. If the
1431 1431 // phase has changed, it skips that phase of the collection. See
1432 1432 // the comments on the use of the Heap_lock in collect_in_background().
1433 1433 //
1434 1434 // Variable used in baton passing.
1435 1435 // _foregroundGCIsActive - Set to true by the foreground collector when
1436 1436 // it wants the baton. The foreground clears it when it has finished
1437 1437 // the collection.
1438 1438 // _foregroundGCShouldWait - Set to true by the background collector
1439 1439 // when it is running. The foreground collector waits while
1440 1440 // _foregroundGCShouldWait is true.
1441 1441 // CGC_lock - monitor used to protect access to the above variables
1442 1442 // and to notify the foreground and background collectors.
1443 1443 // _collectorState - current state of the CMS collection.
1444 1444 //
1445 1445 // The foreground collector
1446 1446 // acquires the CGC_lock
1447 1447 // sets _foregroundGCIsActive
1448 1448 // waits on the CGC_lock for _foregroundGCShouldWait to be false
1449 1449 // various locks acquired in preparation for the collection
1450 1450 // are released so as not to block the background collector
1451 1451 // that is in the midst of a collection
1452 1452 // proceeds with the collection
1453 1453 // clears _foregroundGCIsActive
1454 1454 // returns
1455 1455 //
1456 1456 // The background collector in a loop iterating on the phases of the
1457 1457 // collection
1458 1458 // acquires the CGC_lock
1459 1459 // sets _foregroundGCShouldWait
1460 1460 // if _foregroundGCIsActive is set
1461 1461 // clears _foregroundGCShouldWait, notifies _CGC_lock
1462 1462 // waits on _CGC_lock for _foregroundGCIsActive to become false
1463 1463 // and exits the loop.
1464 1464 // otherwise
1465 1465 // proceed with that phase of the collection
1466 1466 // if the phase is a stop-the-world phase,
1467 1467 // yield the baton once more just before enqueueing
1468 1468 // the stop-world CMS operation (executed by the VM thread).
1469 1469 // returns after all phases of the collection are done
1470 1470 //
1471 1471
1472 1472 void CMSCollector::acquire_control_and_collect(bool full,
1473 1473 bool clear_all_soft_refs) {
1474 1474 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1475 1475 assert(!Thread::current()->is_ConcurrentGC_thread(),
1476 1476 "shouldn't try to acquire control from self!");
1477 1477
1478 1478 // Start the protocol for acquiring control of the
1479 1479 // collection from the background collector (aka CMS thread).
1480 1480 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1481 1481 "VM thread should have CMS token");
1482 1482 // Remember the possibly interrupted state of an ongoing
1483 1483 // concurrent collection
1484 1484 CollectorState first_state = _collectorState;
1485 1485
1486 1486 // Signal to a possibly ongoing concurrent collection that
1487 1487 // we want to do a foreground collection.
1488 1488 _foregroundGCIsActive = true;
1489 1489
1490 1490 // release locks and wait for a notify from the background collector
1491 1491 // releasing the locks in only necessary for phases which
1492 1492 // do yields to improve the granularity of the collection.
1493 1493 assert_lock_strong(bitMapLock());
1494 1494 // We need to lock the Free list lock for the space that we are
1495 1495 // currently collecting.
1496 1496 assert(haveFreelistLocks(), "Must be holding free list locks");
1497 1497 bitMapLock()->unlock();
1498 1498 releaseFreelistLocks();
1499 1499 {
1500 1500 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1501 1501 if (_foregroundGCShouldWait) {
1502 1502 // We are going to be waiting for action for the CMS thread;
1503 1503 // it had better not be gone (for instance at shutdown)!
1504 1504 assert(ConcurrentMarkSweepThread::cmst() != NULL,
1505 1505 "CMS thread must be running");
1506 1506 // Wait here until the background collector gives us the go-ahead
1507 1507 ConcurrentMarkSweepThread::clear_CMS_flag(
1508 1508 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token
1509 1509 // Get a possibly blocked CMS thread going:
1510 1510 // Note that we set _foregroundGCIsActive true above,
1511 1511 // without protection of the CGC_lock.
1512 1512 CGC_lock->notify();
1513 1513 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1514 1514 "Possible deadlock");
1515 1515 while (_foregroundGCShouldWait) {
1516 1516 // wait for notification
1517 1517 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1518 1518 // Possibility of delay/starvation here, since CMS token does
1519 1519 // not know to give priority to VM thread? Actually, i think
1520 1520 // there wouldn't be any delay/starvation, but the proof of
1521 1521 // that "fact" (?) appears non-trivial. XXX 20011219YSR
1522 1522 }
1523 1523 ConcurrentMarkSweepThread::set_CMS_flag(
1524 1524 ConcurrentMarkSweepThread::CMS_vm_has_token);
1525 1525 }
1526 1526 }
1527 1527 // The CMS_token is already held. Get back the other locks.
1528 1528 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1529 1529 "VM thread should have CMS token");
1530 1530 getFreelistLocks();
1531 1531 bitMapLock()->lock_without_safepoint_check();
1532 1532 if (TraceCMSState) {
1533 1533 gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1534 1534 INTPTR_FORMAT " with first state %d", p2i(Thread::current()), first_state);
1535 1535 gclog_or_tty->print_cr(" gets control with state %d", _collectorState);
1536 1536 }
1537 1537
1538 1538 // Inform cms gen if this was due to partial collection failing.
1539 1539 // The CMS gen may use this fact to determine its expansion policy.
1540 1540 GenCollectedHeap* gch = GenCollectedHeap::heap();
1541 1541 if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1542 1542 assert(!_cmsGen->incremental_collection_failed(),
1543 1543 "Should have been noticed, reacted to and cleared");
1544 1544 _cmsGen->set_incremental_collection_failed();
1545 1545 }
1546 1546
1547 1547 if (first_state > Idling) {
1548 1548 report_concurrent_mode_interruption();
1549 1549 }
1550 1550
1551 1551 set_did_compact(true);
1552 1552
1553 1553 // If the collection is being acquired from the background
1554 1554 // collector, there may be references on the discovered
1555 1555 // references lists. Abandon those references, since some
1556 1556 // of them may have become unreachable after concurrent
1557 1557 // discovery; the STW compacting collector will redo discovery
1558 1558 // more precisely, without being subject to floating garbage.
1559 1559 // Leaving otherwise unreachable references in the discovered
1560 1560 // lists would require special handling.
1561 1561 ref_processor()->disable_discovery();
1562 1562 ref_processor()->abandon_partial_discovery();
1563 1563 ref_processor()->verify_no_references_recorded();
1564 1564
1565 1565 if (first_state > Idling) {
1566 1566 save_heap_summary();
1567 1567 }
1568 1568
1569 1569 do_compaction_work(clear_all_soft_refs);
1570 1570
1571 1571 // Has the GC time limit been exceeded?
1572 1572 size_t max_eden_size = _young_gen->max_capacity() -
1573 1573 _young_gen->to()->capacity() -
1574 1574 _young_gen->from()->capacity();
1575 1575 GCCause::Cause gc_cause = gch->gc_cause();
1576 1576 size_policy()->check_gc_overhead_limit(_young_gen->used(),
1577 1577 _young_gen->eden()->used(),
1578 1578 _cmsGen->max_capacity(),
1579 1579 max_eden_size,
1580 1580 full,
1581 1581 gc_cause,
1582 1582 gch->collector_policy());
1583 1583
1584 1584 // Reset the expansion cause, now that we just completed
1585 1585 // a collection cycle.
1586 1586 clear_expansion_cause();
1587 1587 _foregroundGCIsActive = false;
1588 1588 return;
1589 1589 }
1590 1590
1591 1591 // Resize the tenured generation
1592 1592 // after obtaining the free list locks for the
1593 1593 // two generations.
1594 1594 void CMSCollector::compute_new_size() {
1595 1595 assert_locked_or_safepoint(Heap_lock);
1596 1596 FreelistLocker z(this);
1597 1597 MetaspaceGC::compute_new_size();
1598 1598 _cmsGen->compute_new_size_free_list();
1599 1599 }
1600 1600
1601 1601 // A work method used by the foreground collector to do
1602 1602 // a mark-sweep-compact.
1603 1603 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1604 1604 GenCollectedHeap* gch = GenCollectedHeap::heap();
1605 1605
1606 1606 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1607 1607 gc_timer->register_gc_start();
1608 1608
1609 1609 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1610 1610 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1611 1611
1612 1612 GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
1613 1613
1614 1614 // Temporarily widen the span of the weak reference processing to
1615 1615 // the entire heap.
1616 1616 MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1617 1617 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1618 1618 // Temporarily, clear the "is_alive_non_header" field of the
1619 1619 // reference processor.
1620 1620 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1621 1621 // Temporarily make reference _processing_ single threaded (non-MT).
1622 1622 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1623 1623 // Temporarily make refs discovery atomic
1624 1624 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1625 1625 // Temporarily make reference _discovery_ single threaded (non-MT)
1626 1626 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1627 1627
1628 1628 ref_processor()->set_enqueuing_is_done(false);
1629 1629 ref_processor()->enable_discovery();
1630 1630 ref_processor()->setup_policy(clear_all_soft_refs);
1631 1631 // If an asynchronous collection finishes, the _modUnionTable is
1632 1632 // all clear. If we are assuming the collection from an asynchronous
1633 1633 // collection, clear the _modUnionTable.
1634 1634 assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1635 1635 "_modUnionTable should be clear if the baton was not passed");
1636 1636 _modUnionTable.clear_all();
1637 1637 assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1638 1638 "mod union for klasses should be clear if the baton was passed");
1639 1639 _ct->klass_rem_set()->clear_mod_union();
1640 1640
1641 1641 // We must adjust the allocation statistics being maintained
1642 1642 // in the free list space. We do so by reading and clearing
1643 1643 // the sweep timer and updating the block flux rate estimates below.
1644 1644 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1645 1645 if (_inter_sweep_timer.is_active()) {
1646 1646 _inter_sweep_timer.stop();
1647 1647 // Note that we do not use this sample to update the _inter_sweep_estimate.
1648 1648 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1649 1649 _inter_sweep_estimate.padded_average(),
1650 1650 _intra_sweep_estimate.padded_average());
1651 1651 }
1652 1652
1653 1653 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1654 1654 ref_processor(), clear_all_soft_refs);
1655 1655 #ifdef ASSERT
1656 1656 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1657 1657 size_t free_size = cms_space->free();
1658 1658 assert(free_size ==
1659 1659 pointer_delta(cms_space->end(), cms_space->compaction_top())
1660 1660 * HeapWordSize,
1661 1661 "All the free space should be compacted into one chunk at top");
1662 1662 assert(cms_space->dictionary()->total_chunk_size(
1663 1663 debug_only(cms_space->freelistLock())) == 0 ||
1664 1664 cms_space->totalSizeInIndexedFreeLists() == 0,
1665 1665 "All the free space should be in a single chunk");
1666 1666 size_t num = cms_space->totalCount();
1667 1667 assert((free_size == 0 && num == 0) ||
1668 1668 (free_size > 0 && (num == 1 || num == 2)),
1669 1669 "There should be at most 2 free chunks after compaction");
1670 1670 #endif // ASSERT
1671 1671 _collectorState = Resetting;
1672 1672 assert(_restart_addr == NULL,
1673 1673 "Should have been NULL'd before baton was passed");
1674 1674 reset(false /* == !concurrent */);
1675 1675 _cmsGen->reset_after_compaction();
1676 1676 _concurrent_cycles_since_last_unload = 0;
1677 1677
1678 1678 // Clear any data recorded in the PLAB chunk arrays.
1679 1679 if (_survivor_plab_array != NULL) {
1680 1680 reset_survivor_plab_arrays();
1681 1681 }
1682 1682
1683 1683 // Adjust the per-size allocation stats for the next epoch.
1684 1684 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1685 1685 // Restart the "inter sweep timer" for the next epoch.
1686 1686 _inter_sweep_timer.reset();
1687 1687 _inter_sweep_timer.start();
1688 1688
1689 1689 gc_timer->register_gc_end();
1690 1690
1691 1691 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1692 1692
1693 1693 // For a mark-sweep-compact, compute_new_size() will be called
1694 1694 // in the heap's do_collection() method.
1695 1695 }
1696 1696
1697 1697 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1698 1698 ContiguousSpace* eden_space = _young_gen->eden();
1699 1699 ContiguousSpace* from_space = _young_gen->from();
1700 1700 ContiguousSpace* to_space = _young_gen->to();
1701 1701 // Eden
1702 1702 if (_eden_chunk_array != NULL) {
1703 1703 gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1704 1704 p2i(eden_space->bottom()), p2i(eden_space->top()),
1705 1705 p2i(eden_space->end()), eden_space->capacity());
1706 1706 gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
1707 1707 "_eden_chunk_capacity=" SIZE_FORMAT,
1708 1708 _eden_chunk_index, _eden_chunk_capacity);
1709 1709 for (size_t i = 0; i < _eden_chunk_index; i++) {
1710 1710 gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1711 1711 i, p2i(_eden_chunk_array[i]));
1712 1712 }
1713 1713 }
1714 1714 // Survivor
1715 1715 if (_survivor_chunk_array != NULL) {
1716 1716 gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1717 1717 p2i(from_space->bottom()), p2i(from_space->top()),
1718 1718 p2i(from_space->end()), from_space->capacity());
1719 1719 gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
1720 1720 "_survivor_chunk_capacity=" SIZE_FORMAT,
1721 1721 _survivor_chunk_index, _survivor_chunk_capacity);
1722 1722 for (size_t i = 0; i < _survivor_chunk_index; i++) {
1723 1723 gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1724 1724 i, p2i(_survivor_chunk_array[i]));
1725 1725 }
1726 1726 }
1727 1727 }
1728 1728
1729 1729 void CMSCollector::getFreelistLocks() const {
1730 1730 // Get locks for all free lists in all generations that this
1731 1731 // collector is responsible for
1732 1732 _cmsGen->freelistLock()->lock_without_safepoint_check();
1733 1733 }
1734 1734
1735 1735 void CMSCollector::releaseFreelistLocks() const {
1736 1736 // Release locks for all free lists in all generations that this
1737 1737 // collector is responsible for
1738 1738 _cmsGen->freelistLock()->unlock();
1739 1739 }
1740 1740
1741 1741 bool CMSCollector::haveFreelistLocks() const {
1742 1742 // Check locks for all free lists in all generations that this
1743 1743 // collector is responsible for
1744 1744 assert_lock_strong(_cmsGen->freelistLock());
1745 1745 PRODUCT_ONLY(ShouldNotReachHere());
1746 1746 return true;
1747 1747 }
1748 1748
1749 1749 // A utility class that is used by the CMS collector to
1750 1750 // temporarily "release" the foreground collector from its
1751 1751 // usual obligation to wait for the background collector to
1752 1752 // complete an ongoing phase before proceeding.
1753 1753 class ReleaseForegroundGC: public StackObj {
1754 1754 private:
1755 1755 CMSCollector* _c;
1756 1756 public:
1757 1757 ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1758 1758 assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1759 1759 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1760 1760 // allow a potentially blocked foreground collector to proceed
1761 1761 _c->_foregroundGCShouldWait = false;
1762 1762 if (_c->_foregroundGCIsActive) {
1763 1763 CGC_lock->notify();
1764 1764 }
1765 1765 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1766 1766 "Possible deadlock");
1767 1767 }
1768 1768
1769 1769 ~ReleaseForegroundGC() {
1770 1770 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1771 1771 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1772 1772 _c->_foregroundGCShouldWait = true;
1773 1773 }
1774 1774 };
1775 1775
1776 1776 void CMSCollector::collect_in_background(GCCause::Cause cause) {
1777 1777 assert(Thread::current()->is_ConcurrentGC_thread(),
1778 1778 "A CMS asynchronous collection is only allowed on a CMS thread.");
1779 1779
1780 1780 GenCollectedHeap* gch = GenCollectedHeap::heap();
1781 1781 {
1782 1782 bool safepoint_check = Mutex::_no_safepoint_check_flag;
1783 1783 MutexLockerEx hl(Heap_lock, safepoint_check);
1784 1784 FreelistLocker fll(this);
1785 1785 MutexLockerEx x(CGC_lock, safepoint_check);
1786 1786 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
1787 1787 // The foreground collector is active or we're
1788 1788 // not using asynchronous collections. Skip this
1789 1789 // background collection.
1790 1790 assert(!_foregroundGCShouldWait, "Should be clear");
1791 1791 return;
1792 1792 } else {
1793 1793 assert(_collectorState == Idling, "Should be idling before start.");
1794 1794 _collectorState = InitialMarking;
1795 1795 register_gc_start(cause);
1796 1796 // Reset the expansion cause, now that we are about to begin
1797 1797 // a new cycle.
1798 1798 clear_expansion_cause();
1799 1799
1800 1800 // Clear the MetaspaceGC flag since a concurrent collection
1801 1801 // is starting but also clear it after the collection.
1802 1802 MetaspaceGC::set_should_concurrent_collect(false);
1803 1803 }
1804 1804 // Decide if we want to enable class unloading as part of the
1805 1805 // ensuing concurrent GC cycle.
1806 1806 update_should_unload_classes();
1807 1807 _full_gc_requested = false; // acks all outstanding full gc requests
1808 1808 _full_gc_cause = GCCause::_no_gc;
1809 1809 // Signal that we are about to start a collection
1810 1810 gch->increment_total_full_collections(); // ... starting a collection cycle
1811 1811 _collection_count_start = gch->total_full_collections();
1812 1812 }
1813 1813
1814 1814 // Used for PrintGC
1815 1815 size_t prev_used;
1816 1816 if (PrintGC && Verbose) {
1817 1817 prev_used = _cmsGen->used();
1818 1818 }
1819 1819
1820 1820 // The change of the collection state is normally done at this level;
1821 1821 // the exceptions are phases that are executed while the world is
1822 1822 // stopped. For those phases the change of state is done while the
1823 1823 // world is stopped. For baton passing purposes this allows the
1824 1824 // background collector to finish the phase and change state atomically.
1825 1825 // The foreground collector cannot wait on a phase that is done
1826 1826 // while the world is stopped because the foreground collector already
1827 1827 // has the world stopped and would deadlock.
1828 1828 while (_collectorState != Idling) {
1829 1829 if (TraceCMSState) {
1830 1830 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
1831 1831 p2i(Thread::current()), _collectorState);
1832 1832 }
1833 1833 // The foreground collector
1834 1834 // holds the Heap_lock throughout its collection.
1835 1835 // holds the CMS token (but not the lock)
1836 1836 // except while it is waiting for the background collector to yield.
1837 1837 //
1838 1838 // The foreground collector should be blocked (not for long)
1839 1839 // if the background collector is about to start a phase
1840 1840 // executed with world stopped. If the background
1841 1841 // collector has already started such a phase, the
1842 1842 // foreground collector is blocked waiting for the
1843 1843 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking)
1844 1844 // are executed in the VM thread.
1845 1845 //
1846 1846 // The locking order is
1847 1847 // PendingListLock (PLL) -- if applicable (FinalMarking)
1848 1848 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue())
1849 1849 // CMS token (claimed in
1850 1850 // stop_world_and_do() -->
1851 1851 // safepoint_synchronize() -->
1852 1852 // CMSThread::synchronize())
1853 1853
1854 1854 {
1855 1855 // Check if the FG collector wants us to yield.
1856 1856 CMSTokenSync x(true); // is cms thread
1857 1857 if (waitForForegroundGC()) {
1858 1858 // We yielded to a foreground GC, nothing more to be
1859 1859 // done this round.
1860 1860 assert(_foregroundGCShouldWait == false, "We set it to false in "
1861 1861 "waitForForegroundGC()");
1862 1862 if (TraceCMSState) {
1863 1863 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1864 1864 " exiting collection CMS state %d",
1865 1865 p2i(Thread::current()), _collectorState);
1866 1866 }
1867 1867 return;
1868 1868 } else {
1869 1869 // The background collector can run but check to see if the
1870 1870 // foreground collector has done a collection while the
1871 1871 // background collector was waiting to get the CGC_lock
1872 1872 // above. If yes, break so that _foregroundGCShouldWait
1873 1873 // is cleared before returning.
1874 1874 if (_collectorState == Idling) {
1875 1875 break;
1876 1876 }
1877 1877 }
1878 1878 }
1879 1879
1880 1880 assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1881 1881 "should be waiting");
1882 1882
1883 1883 switch (_collectorState) {
1884 1884 case InitialMarking:
1885 1885 {
1886 1886 ReleaseForegroundGC x(this);
1887 1887 stats().record_cms_begin();
1888 1888 VM_CMS_Initial_Mark initial_mark_op(this);
1889 1889 VMThread::execute(&initial_mark_op);
1890 1890 }
1891 1891 // The collector state may be any legal state at this point
1892 1892 // since the background collector may have yielded to the
1893 1893 // foreground collector.
1894 1894 break;
1895 1895 case Marking:
1896 1896 // initial marking in checkpointRootsInitialWork has been completed
1897 1897 if (markFromRoots()) { // we were successful
1898 1898 assert(_collectorState == Precleaning, "Collector state should "
1899 1899 "have changed");
1900 1900 } else {
1901 1901 assert(_foregroundGCIsActive, "Internal state inconsistency");
1902 1902 }
1903 1903 break;
1904 1904 case Precleaning:
1905 1905 // marking from roots in markFromRoots has been completed
1906 1906 preclean();
1907 1907 assert(_collectorState == AbortablePreclean ||
1908 1908 _collectorState == FinalMarking,
1909 1909 "Collector state should have changed");
1910 1910 break;
1911 1911 case AbortablePreclean:
1912 1912 abortable_preclean();
1913 1913 assert(_collectorState == FinalMarking, "Collector state should "
1914 1914 "have changed");
1915 1915 break;
1916 1916 case FinalMarking:
1917 1917 {
1918 1918 ReleaseForegroundGC x(this);
1919 1919
1920 1920 VM_CMS_Final_Remark final_remark_op(this);
1921 1921 VMThread::execute(&final_remark_op);
1922 1922 }
1923 1923 assert(_foregroundGCShouldWait, "block post-condition");
1924 1924 break;
1925 1925 case Sweeping:
1926 1926 // final marking in checkpointRootsFinal has been completed
1927 1927 sweep();
1928 1928 assert(_collectorState == Resizing, "Collector state change "
1929 1929 "to Resizing must be done under the free_list_lock");
1930 1930
1931 1931 case Resizing: {
1932 1932 // Sweeping has been completed...
1933 1933 // At this point the background collection has completed.
1934 1934 // Don't move the call to compute_new_size() down
1935 1935 // into code that might be executed if the background
1936 1936 // collection was preempted.
1937 1937 {
1938 1938 ReleaseForegroundGC x(this); // unblock FG collection
1939 1939 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag);
1940 1940 CMSTokenSync z(true); // not strictly needed.
1941 1941 if (_collectorState == Resizing) {
1942 1942 compute_new_size();
1943 1943 save_heap_summary();
1944 1944 _collectorState = Resetting;
1945 1945 } else {
1946 1946 assert(_collectorState == Idling, "The state should only change"
1947 1947 " because the foreground collector has finished the collection");
1948 1948 }
1949 1949 }
1950 1950 break;
1951 1951 }
1952 1952 case Resetting:
1953 1953 // CMS heap resizing has been completed
1954 1954 reset(true);
1955 1955 assert(_collectorState == Idling, "Collector state should "
1956 1956 "have changed");
1957 1957
1958 1958 MetaspaceGC::set_should_concurrent_collect(false);
1959 1959
1960 1960 stats().record_cms_end();
1961 1961 // Don't move the concurrent_phases_end() and compute_new_size()
1962 1962 // calls to here because a preempted background collection
1963 1963 // has it's state set to "Resetting".
1964 1964 break;
1965 1965 case Idling:
1966 1966 default:
1967 1967 ShouldNotReachHere();
1968 1968 break;
1969 1969 }
1970 1970 if (TraceCMSState) {
1971 1971 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d",
1972 1972 p2i(Thread::current()), _collectorState);
1973 1973 }
1974 1974 assert(_foregroundGCShouldWait, "block post-condition");
1975 1975 }
1976 1976
1977 1977 // Should this be in gc_epilogue?
1978 1978 collector_policy()->counters()->update_counters();
1979 1979
1980 1980 {
1981 1981 // Clear _foregroundGCShouldWait and, in the event that the
1982 1982 // foreground collector is waiting, notify it, before
1983 1983 // returning.
1984 1984 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1985 1985 _foregroundGCShouldWait = false;
1986 1986 if (_foregroundGCIsActive) {
1987 1987 CGC_lock->notify();
1988 1988 }
1989 1989 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1990 1990 "Possible deadlock");
1991 1991 }
1992 1992 if (TraceCMSState) {
1993 1993 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1994 1994 " exiting collection CMS state %d",
1995 1995 p2i(Thread::current()), _collectorState);
1996 1996 }
1997 1997 if (PrintGC && Verbose) {
1998 1998 _cmsGen->print_heap_change(prev_used);
1999 1999 }
2000 2000 }
2001 2001
2002 2002 void CMSCollector::register_gc_start(GCCause::Cause cause) {
2003 2003 _cms_start_registered = true;
2004 2004 _gc_timer_cm->register_gc_start();
2005 2005 _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
2006 2006 }
2007 2007
2008 2008 void CMSCollector::register_gc_end() {
2009 2009 if (_cms_start_registered) {
2010 2010 report_heap_summary(GCWhen::AfterGC);
2011 2011
2012 2012 _gc_timer_cm->register_gc_end();
2013 2013 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
2014 2014 _cms_start_registered = false;
2015 2015 }
2016 2016 }
2017 2017
2018 2018 void CMSCollector::save_heap_summary() {
2019 2019 GenCollectedHeap* gch = GenCollectedHeap::heap();
2020 2020 _last_heap_summary = gch->create_heap_summary();
2021 2021 _last_metaspace_summary = gch->create_metaspace_summary();
2022 2022 }
2023 2023
2024 2024 void CMSCollector::report_heap_summary(GCWhen::Type when) {
2025 2025 _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
2026 2026 _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
2027 2027 }
2028 2028
2029 2029 bool CMSCollector::waitForForegroundGC() {
2030 2030 bool res = false;
2031 2031 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2032 2032 "CMS thread should have CMS token");
2033 2033 // Block the foreground collector until the
2034 2034 // background collectors decides whether to
2035 2035 // yield.
2036 2036 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2037 2037 _foregroundGCShouldWait = true;
2038 2038 if (_foregroundGCIsActive) {
2039 2039 // The background collector yields to the
2040 2040 // foreground collector and returns a value
2041 2041 // indicating that it has yielded. The foreground
2042 2042 // collector can proceed.
2043 2043 res = true;
2044 2044 _foregroundGCShouldWait = false;
2045 2045 ConcurrentMarkSweepThread::clear_CMS_flag(
2046 2046 ConcurrentMarkSweepThread::CMS_cms_has_token);
2047 2047 ConcurrentMarkSweepThread::set_CMS_flag(
2048 2048 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2049 2049 // Get a possibly blocked foreground thread going
2050 2050 CGC_lock->notify();
2051 2051 if (TraceCMSState) {
2052 2052 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2053 2053 p2i(Thread::current()), _collectorState);
2054 2054 }
2055 2055 while (_foregroundGCIsActive) {
2056 2056 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2057 2057 }
2058 2058 ConcurrentMarkSweepThread::set_CMS_flag(
2059 2059 ConcurrentMarkSweepThread::CMS_cms_has_token);
2060 2060 ConcurrentMarkSweepThread::clear_CMS_flag(
2061 2061 ConcurrentMarkSweepThread::CMS_cms_wants_token);
2062 2062 }
2063 2063 if (TraceCMSState) {
2064 2064 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2065 2065 p2i(Thread::current()), _collectorState);
2066 2066 }
2067 2067 return res;
2068 2068 }
2069 2069
2070 2070 // Because of the need to lock the free lists and other structures in
2071 2071 // the collector, common to all the generations that the collector is
2072 2072 // collecting, we need the gc_prologues of individual CMS generations
2073 2073 // delegate to their collector. It may have been simpler had the
2074 2074 // current infrastructure allowed one to call a prologue on a
2075 2075 // collector. In the absence of that we have the generation's
2076 2076 // prologue delegate to the collector, which delegates back
2077 2077 // some "local" work to a worker method in the individual generations
2078 2078 // that it's responsible for collecting, while itself doing any
2079 2079 // work common to all generations it's responsible for. A similar
2080 2080 // comment applies to the gc_epilogue()'s.
2081 2081 // The role of the variable _between_prologue_and_epilogue is to
2082 2082 // enforce the invocation protocol.
2083 2083 void CMSCollector::gc_prologue(bool full) {
2084 2084 // Call gc_prologue_work() for the CMSGen
2085 2085 // we are responsible for.
2086 2086
2087 2087 // The following locking discipline assumes that we are only called
2088 2088 // when the world is stopped.
2089 2089 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2090 2090
2091 2091 // The CMSCollector prologue must call the gc_prologues for the
2092 2092 // "generations" that it's responsible
2093 2093 // for.
2094 2094
2095 2095 assert( Thread::current()->is_VM_thread()
2096 2096 || ( CMSScavengeBeforeRemark
2097 2097 && Thread::current()->is_ConcurrentGC_thread()),
2098 2098 "Incorrect thread type for prologue execution");
2099 2099
2100 2100 if (_between_prologue_and_epilogue) {
2101 2101 // We have already been invoked; this is a gc_prologue delegation
2102 2102 // from yet another CMS generation that we are responsible for, just
2103 2103 // ignore it since all relevant work has already been done.
2104 2104 return;
2105 2105 }
2106 2106
2107 2107 // set a bit saying prologue has been called; cleared in epilogue
2108 2108 _between_prologue_and_epilogue = true;
2109 2109 // Claim locks for common data structures, then call gc_prologue_work()
2110 2110 // for each CMSGen.
2111 2111
2112 2112 getFreelistLocks(); // gets free list locks on constituent spaces
2113 2113 bitMapLock()->lock_without_safepoint_check();
2114 2114
2115 2115 // Should call gc_prologue_work() for all cms gens we are responsible for
2116 2116 bool duringMarking = _collectorState >= Marking
2117 2117 && _collectorState < Sweeping;
2118 2118
2119 2119 // The young collections clear the modified oops state, which tells if
2120 2120 // there are any modified oops in the class. The remark phase also needs
2121 2121 // that information. Tell the young collection to save the union of all
2122 2122 // modified klasses.
2123 2123 if (duringMarking) {
2124 2124 _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2125 2125 }
2126 2126
2127 2127 bool registerClosure = duringMarking;
2128 2128
2129 2129 _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2130 2130
2131 2131 if (!full) {
2132 2132 stats().record_gc0_begin();
2133 2133 }
2134 2134 }
2135 2135
2136 2136 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2137 2137
2138 2138 _capacity_at_prologue = capacity();
2139 2139 _used_at_prologue = used();
2140 2140
2141 2141 // Delegate to CMScollector which knows how to coordinate between
2142 2142 // this and any other CMS generations that it is responsible for
2143 2143 // collecting.
2144 2144 collector()->gc_prologue(full);
2145 2145 }
2146 2146
2147 2147 // This is a "private" interface for use by this generation's CMSCollector.
2148 2148 // Not to be called directly by any other entity (for instance,
2149 2149 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2150 2150 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2151 2151 bool registerClosure, ModUnionClosure* modUnionClosure) {
2152 2152 assert(!incremental_collection_failed(), "Shouldn't be set yet");
2153 2153 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2154 2154 "Should be NULL");
2155 2155 if (registerClosure) {
2156 2156 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2157 2157 }
2158 2158 cmsSpace()->gc_prologue();
2159 2159 // Clear stat counters
2160 2160 NOT_PRODUCT(
2161 2161 assert(_numObjectsPromoted == 0, "check");
2162 2162 assert(_numWordsPromoted == 0, "check");
2163 2163 if (Verbose && PrintGC) {
2164 2164 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2165 2165 SIZE_FORMAT" bytes concurrently",
2166 2166 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2167 2167 }
2168 2168 _numObjectsAllocated = 0;
2169 2169 _numWordsAllocated = 0;
2170 2170 )
2171 2171 }
2172 2172
2173 2173 void CMSCollector::gc_epilogue(bool full) {
2174 2174 // The following locking discipline assumes that we are only called
2175 2175 // when the world is stopped.
2176 2176 assert(SafepointSynchronize::is_at_safepoint(),
2177 2177 "world is stopped assumption");
2178 2178
2179 2179 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2180 2180 // if linear allocation blocks need to be appropriately marked to allow the
2181 2181 // the blocks to be parsable. We also check here whether we need to nudge the
2182 2182 // CMS collector thread to start a new cycle (if it's not already active).
2183 2183 assert( Thread::current()->is_VM_thread()
2184 2184 || ( CMSScavengeBeforeRemark
2185 2185 && Thread::current()->is_ConcurrentGC_thread()),
2186 2186 "Incorrect thread type for epilogue execution");
2187 2187
2188 2188 if (!_between_prologue_and_epilogue) {
2189 2189 // We have already been invoked; this is a gc_epilogue delegation
2190 2190 // from yet another CMS generation that we are responsible for, just
2191 2191 // ignore it since all relevant work has already been done.
2192 2192 return;
2193 2193 }
2194 2194 assert(haveFreelistLocks(), "must have freelist locks");
2195 2195 assert_lock_strong(bitMapLock());
2196 2196
2197 2197 _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2198 2198
2199 2199 _cmsGen->gc_epilogue_work(full);
2200 2200
2201 2201 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2202 2202 // in case sampling was not already enabled, enable it
2203 2203 _start_sampling = true;
2204 2204 }
2205 2205 // reset _eden_chunk_array so sampling starts afresh
2206 2206 _eden_chunk_index = 0;
2207 2207
2208 2208 size_t cms_used = _cmsGen->cmsSpace()->used();
2209 2209
2210 2210 // update performance counters - this uses a special version of
2211 2211 // update_counters() that allows the utilization to be passed as a
2212 2212 // parameter, avoiding multiple calls to used().
2213 2213 //
2214 2214 _cmsGen->update_counters(cms_used);
2215 2215
2216 2216 bitMapLock()->unlock();
2217 2217 releaseFreelistLocks();
2218 2218
2219 2219 if (!CleanChunkPoolAsync) {
2220 2220 Chunk::clean_chunk_pool();
2221 2221 }
2222 2222
2223 2223 set_did_compact(false);
2224 2224 _between_prologue_and_epilogue = false; // ready for next cycle
2225 2225 }
2226 2226
2227 2227 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2228 2228 collector()->gc_epilogue(full);
2229 2229
2230 2230 // Also reset promotion tracking in par gc thread states.
2231 2231 for (uint i = 0; i < ParallelGCThreads; i++) {
2232 2232 _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2233 2233 }
2234 2234 }
2235 2235
2236 2236 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2237 2237 assert(!incremental_collection_failed(), "Should have been cleared");
2238 2238 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2239 2239 cmsSpace()->gc_epilogue();
2240 2240 // Print stat counters
2241 2241 NOT_PRODUCT(
2242 2242 assert(_numObjectsAllocated == 0, "check");
2243 2243 assert(_numWordsAllocated == 0, "check");
2244 2244 if (Verbose && PrintGC) {
2245 2245 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2246 2246 SIZE_FORMAT" bytes",
2247 2247 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2248 2248 }
2249 2249 _numObjectsPromoted = 0;
2250 2250 _numWordsPromoted = 0;
2251 2251 )
2252 2252
2253 2253 if (PrintGC && Verbose) {
2254 2254 // Call down the chain in contiguous_available needs the freelistLock
2255 2255 // so print this out before releasing the freeListLock.
2256 2256 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2257 2257 contiguous_available());
2258 2258 }
2259 2259 }
2260 2260
2261 2261 #ifndef PRODUCT
2262 2262 bool CMSCollector::have_cms_token() {
2263 2263 Thread* thr = Thread::current();
2264 2264 if (thr->is_VM_thread()) {
2265 2265 return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2266 2266 } else if (thr->is_ConcurrentGC_thread()) {
2267 2267 return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2268 2268 } else if (thr->is_GC_task_thread()) {
2269 2269 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2270 2270 ParGCRareEvent_lock->owned_by_self();
2271 2271 }
2272 2272 return false;
2273 2273 }
2274 2274 #endif
2275 2275
2276 2276 // Check reachability of the given heap address in CMS generation,
2277 2277 // treating all other generations as roots.
2278 2278 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2279 2279 // We could "guarantee" below, rather than assert, but I'll
2280 2280 // leave these as "asserts" so that an adventurous debugger
2281 2281 // could try this in the product build provided some subset of
2282 2282 // the conditions were met, provided they were interested in the
2283 2283 // results and knew that the computation below wouldn't interfere
2284 2284 // with other concurrent computations mutating the structures
2285 2285 // being read or written.
2286 2286 assert(SafepointSynchronize::is_at_safepoint(),
2287 2287 "Else mutations in object graph will make answer suspect");
2288 2288 assert(have_cms_token(), "Should hold cms token");
2289 2289 assert(haveFreelistLocks(), "must hold free list locks");
2290 2290 assert_lock_strong(bitMapLock());
2291 2291
2292 2292 // Clear the marking bit map array before starting, but, just
2293 2293 // for kicks, first report if the given address is already marked
2294 2294 gclog_or_tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2295 2295 _markBitMap.isMarked(addr) ? "" : " not");
2296 2296
2297 2297 if (verify_after_remark()) {
2298 2298 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2299 2299 bool result = verification_mark_bm()->isMarked(addr);
2300 2300 gclog_or_tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2301 2301 result ? "IS" : "is NOT");
2302 2302 return result;
2303 2303 } else {
2304 2304 gclog_or_tty->print_cr("Could not compute result");
2305 2305 return false;
2306 2306 }
2307 2307 }
2308 2308
2309 2309
2310 2310 void
2311 2311 CMSCollector::print_on_error(outputStream* st) {
2312 2312 CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2313 2313 if (collector != NULL) {
2314 2314 CMSBitMap* bitmap = &collector->_markBitMap;
2315 2315 st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2316 2316 bitmap->print_on_error(st, " Bits: ");
2317 2317
2318 2318 st->cr();
2319 2319
2320 2320 CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2321 2321 st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2322 2322 mut_bitmap->print_on_error(st, " Bits: ");
2323 2323 }
2324 2324 }
2325 2325
2326 2326 ////////////////////////////////////////////////////////
2327 2327 // CMS Verification Support
2328 2328 ////////////////////////////////////////////////////////
2329 2329 // Following the remark phase, the following invariant
2330 2330 // should hold -- each object in the CMS heap which is
2331 2331 // marked in markBitMap() should be marked in the verification_mark_bm().
2332 2332
2333 2333 class VerifyMarkedClosure: public BitMapClosure {
2334 2334 CMSBitMap* _marks;
2335 2335 bool _failed;
2336 2336
2337 2337 public:
2338 2338 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2339 2339
2340 2340 bool do_bit(size_t offset) {
2341 2341 HeapWord* addr = _marks->offsetToHeapWord(offset);
2342 2342 if (!_marks->isMarked(addr)) {
2343 2343 oop(addr)->print_on(gclog_or_tty);
2344 2344 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", p2i(addr));
2345 2345 _failed = true;
2346 2346 }
2347 2347 return true;
2348 2348 }
2349 2349
2350 2350 bool failed() { return _failed; }
2351 2351 };
2352 2352
2353 2353 bool CMSCollector::verify_after_remark(bool silent) {
2354 2354 if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2355 2355 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2356 2356 static bool init = false;
2357 2357
2358 2358 assert(SafepointSynchronize::is_at_safepoint(),
2359 2359 "Else mutations in object graph will make answer suspect");
2360 2360 assert(have_cms_token(),
2361 2361 "Else there may be mutual interference in use of "
2362 2362 " verification data structures");
2363 2363 assert(_collectorState > Marking && _collectorState <= Sweeping,
2364 2364 "Else marking info checked here may be obsolete");
2365 2365 assert(haveFreelistLocks(), "must hold free list locks");
2366 2366 assert_lock_strong(bitMapLock());
2367 2367
2368 2368
2369 2369 // Allocate marking bit map if not already allocated
2370 2370 if (!init) { // first time
2371 2371 if (!verification_mark_bm()->allocate(_span)) {
2372 2372 return false;
2373 2373 }
2374 2374 init = true;
2375 2375 }
2376 2376
2377 2377 assert(verification_mark_stack()->isEmpty(), "Should be empty");
2378 2378
2379 2379 // Turn off refs discovery -- so we will be tracing through refs.
2380 2380 // This is as intended, because by this time
2381 2381 // GC must already have cleared any refs that need to be cleared,
2382 2382 // and traced those that need to be marked; moreover,
2383 2383 // the marking done here is not going to interfere in any
2384 2384 // way with the marking information used by GC.
2385 2385 NoRefDiscovery no_discovery(ref_processor());
2386 2386
2387 2387 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2388 2388
2389 2389 // Clear any marks from a previous round
2390 2390 verification_mark_bm()->clear_all();
2391 2391 assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2392 2392 verify_work_stacks_empty();
2393 2393
2394 2394 GenCollectedHeap* gch = GenCollectedHeap::heap();
2395 2395 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
2396 2396 // Update the saved marks which may affect the root scans.
2397 2397 gch->save_marks();
2398 2398
2399 2399 if (CMSRemarkVerifyVariant == 1) {
2400 2400 // In this first variant of verification, we complete
2401 2401 // all marking, then check if the new marks-vector is
2402 2402 // a subset of the CMS marks-vector.
2403 2403 verify_after_remark_work_1();
2404 2404 } else if (CMSRemarkVerifyVariant == 2) {
2405 2405 // In this second variant of verification, we flag an error
2406 2406 // (i.e. an object reachable in the new marks-vector not reachable
2407 2407 // in the CMS marks-vector) immediately, also indicating the
2408 2408 // identify of an object (A) that references the unmarked object (B) --
2409 2409 // presumably, a mutation to A failed to be picked up by preclean/remark?
2410 2410 verify_after_remark_work_2();
2411 2411 } else {
2412 2412 warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
2413 2413 CMSRemarkVerifyVariant);
2414 2414 }
2415 2415 if (!silent) gclog_or_tty->print(" done] ");
2416 2416 return true;
2417 2417 }
2418 2418
2419 2419 void CMSCollector::verify_after_remark_work_1() {
2420 2420 ResourceMark rm;
2421 2421 HandleMark hm;
2422 2422 GenCollectedHeap* gch = GenCollectedHeap::heap();
2423 2423
2424 2424 // Get a clear set of claim bits for the roots processing to work with.
2425 2425 ClassLoaderDataGraph::clear_claimed_marks();
2426 2426
2427 2427 // Mark from roots one level into CMS
2428 2428 MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2429 2429 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2430 2430
2431 2431 {
2432 2432 StrongRootsScope srs(1);
2433 2433
2434 2434 gch->gen_process_roots(&srs,
2435 2435 _cmsGen->level(),
2436 2436 true, // younger gens are roots
2437 2437 GenCollectedHeap::ScanningOption(roots_scanning_options()),
2438 2438 should_unload_classes(),
2439 2439 ¬Older,
2440 2440 NULL,
2441 2441 NULL);
2442 2442 }
2443 2443
2444 2444 // Now mark from the roots
2445 2445 MarkFromRootsClosure markFromRootsClosure(this, _span,
2446 2446 verification_mark_bm(), verification_mark_stack(),
2447 2447 false /* don't yield */, true /* verifying */);
2448 2448 assert(_restart_addr == NULL, "Expected pre-condition");
2449 2449 verification_mark_bm()->iterate(&markFromRootsClosure);
2450 2450 while (_restart_addr != NULL) {
2451 2451 // Deal with stack overflow: by restarting at the indicated
2452 2452 // address.
2453 2453 HeapWord* ra = _restart_addr;
2454 2454 markFromRootsClosure.reset(ra);
2455 2455 _restart_addr = NULL;
2456 2456 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2457 2457 }
2458 2458 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2459 2459 verify_work_stacks_empty();
2460 2460
2461 2461 // Marking completed -- now verify that each bit marked in
2462 2462 // verification_mark_bm() is also marked in markBitMap(); flag all
2463 2463 // errors by printing corresponding objects.
2464 2464 VerifyMarkedClosure vcl(markBitMap());
2465 2465 verification_mark_bm()->iterate(&vcl);
2466 2466 if (vcl.failed()) {
2467 2467 gclog_or_tty->print("Verification failed");
2468 2468 gch->print_on(gclog_or_tty);
2469 2469 fatal("CMS: failed marking verification after remark");
2470 2470 }
2471 2471 }
2472 2472
2473 2473 class VerifyKlassOopsKlassClosure : public KlassClosure {
2474 2474 class VerifyKlassOopsClosure : public OopClosure {
2475 2475 CMSBitMap* _bitmap;
2476 2476 public:
2477 2477 VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2478 2478 void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2479 2479 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2480 2480 } _oop_closure;
2481 2481 public:
2482 2482 VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2483 2483 void do_klass(Klass* k) {
2484 2484 k->oops_do(&_oop_closure);
2485 2485 }
2486 2486 };
2487 2487
2488 2488 void CMSCollector::verify_after_remark_work_2() {
2489 2489 ResourceMark rm;
2490 2490 HandleMark hm;
2491 2491 GenCollectedHeap* gch = GenCollectedHeap::heap();
2492 2492
2493 2493 // Get a clear set of claim bits for the roots processing to work with.
2494 2494 ClassLoaderDataGraph::clear_claimed_marks();
2495 2495
2496 2496 // Mark from roots one level into CMS
2497 2497 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2498 2498 markBitMap());
2499 2499 CLDToOopClosure cld_closure(¬Older, true);
2500 2500
2501 2501 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2502 2502
2503 2503 {
2504 2504 StrongRootsScope srs(1);
2505 2505
2506 2506 gch->gen_process_roots(&srs,
2507 2507 _cmsGen->level(),
2508 2508 true, // younger gens are roots
2509 2509 GenCollectedHeap::ScanningOption(roots_scanning_options()),
2510 2510 should_unload_classes(),
2511 2511 ¬Older,
2512 2512 NULL,
2513 2513 &cld_closure);
2514 2514 }
2515 2515
2516 2516 // Now mark from the roots
2517 2517 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2518 2518 verification_mark_bm(), markBitMap(), verification_mark_stack());
2519 2519 assert(_restart_addr == NULL, "Expected pre-condition");
2520 2520 verification_mark_bm()->iterate(&markFromRootsClosure);
2521 2521 while (_restart_addr != NULL) {
2522 2522 // Deal with stack overflow: by restarting at the indicated
2523 2523 // address.
2524 2524 HeapWord* ra = _restart_addr;
2525 2525 markFromRootsClosure.reset(ra);
2526 2526 _restart_addr = NULL;
2527 2527 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2528 2528 }
2529 2529 assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2530 2530 verify_work_stacks_empty();
2531 2531
2532 2532 VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2533 2533 ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2534 2534
2535 2535 // Marking completed -- now verify that each bit marked in
2536 2536 // verification_mark_bm() is also marked in markBitMap(); flag all
2537 2537 // errors by printing corresponding objects.
2538 2538 VerifyMarkedClosure vcl(markBitMap());
2539 2539 verification_mark_bm()->iterate(&vcl);
2540 2540 assert(!vcl.failed(), "Else verification above should not have succeeded");
2541 2541 }
2542 2542
2543 2543 void ConcurrentMarkSweepGeneration::save_marks() {
2544 2544 // delegate to CMS space
2545 2545 cmsSpace()->save_marks();
2546 2546 for (uint i = 0; i < ParallelGCThreads; i++) {
2547 2547 _par_gc_thread_states[i]->promo.startTrackingPromotions();
2548 2548 }
2549 2549 }
2550 2550
2551 2551 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2552 2552 return cmsSpace()->no_allocs_since_save_marks();
2553 2553 }
2554 2554
2555 2555 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
2556 2556 \
2557 2557 void ConcurrentMarkSweepGeneration:: \
2558 2558 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
2559 2559 cl->set_generation(this); \
2560 2560 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \
2561 2561 cl->reset_generation(); \
2562 2562 save_marks(); \
2563 2563 }
2564 2564
2565 2565 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2566 2566
2567 2567 void
2568 2568 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2569 2569 if (freelistLock()->owned_by_self()) {
2570 2570 Generation::oop_iterate(cl);
2571 2571 } else {
2572 2572 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2573 2573 Generation::oop_iterate(cl);
2574 2574 }
2575 2575 }
2576 2576
2577 2577 void
2578 2578 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2579 2579 if (freelistLock()->owned_by_self()) {
2580 2580 Generation::object_iterate(cl);
2581 2581 } else {
2582 2582 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2583 2583 Generation::object_iterate(cl);
2584 2584 }
2585 2585 }
2586 2586
2587 2587 void
2588 2588 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2589 2589 if (freelistLock()->owned_by_self()) {
2590 2590 Generation::safe_object_iterate(cl);
2591 2591 } else {
2592 2592 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2593 2593 Generation::safe_object_iterate(cl);
2594 2594 }
2595 2595 }
2596 2596
2597 2597 void
2598 2598 ConcurrentMarkSweepGeneration::post_compact() {
2599 2599 }
2600 2600
2601 2601 void
2602 2602 ConcurrentMarkSweepGeneration::prepare_for_verify() {
2603 2603 // Fix the linear allocation blocks to look like free blocks.
2604 2604
2605 2605 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2606 2606 // are not called when the heap is verified during universe initialization and
2607 2607 // at vm shutdown.
2608 2608 if (freelistLock()->owned_by_self()) {
2609 2609 cmsSpace()->prepare_for_verify();
2610 2610 } else {
2611 2611 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2612 2612 cmsSpace()->prepare_for_verify();
2613 2613 }
2614 2614 }
2615 2615
2616 2616 void
2617 2617 ConcurrentMarkSweepGeneration::verify() {
2618 2618 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2619 2619 // are not called when the heap is verified during universe initialization and
2620 2620 // at vm shutdown.
2621 2621 if (freelistLock()->owned_by_self()) {
2622 2622 cmsSpace()->verify();
2623 2623 } else {
2624 2624 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2625 2625 cmsSpace()->verify();
2626 2626 }
2627 2627 }
2628 2628
2629 2629 void CMSCollector::verify() {
2630 2630 _cmsGen->verify();
2631 2631 }
2632 2632
2633 2633 #ifndef PRODUCT
2634 2634 bool CMSCollector::overflow_list_is_empty() const {
2635 2635 assert(_num_par_pushes >= 0, "Inconsistency");
2636 2636 if (_overflow_list == NULL) {
2637 2637 assert(_num_par_pushes == 0, "Inconsistency");
2638 2638 }
2639 2639 return _overflow_list == NULL;
2640 2640 }
2641 2641
2642 2642 // The methods verify_work_stacks_empty() and verify_overflow_empty()
2643 2643 // merely consolidate assertion checks that appear to occur together frequently.
2644 2644 void CMSCollector::verify_work_stacks_empty() const {
2645 2645 assert(_markStack.isEmpty(), "Marking stack should be empty");
2646 2646 assert(overflow_list_is_empty(), "Overflow list should be empty");
2647 2647 }
2648 2648
2649 2649 void CMSCollector::verify_overflow_empty() const {
2650 2650 assert(overflow_list_is_empty(), "Overflow list should be empty");
2651 2651 assert(no_preserved_marks(), "No preserved marks");
2652 2652 }
2653 2653 #endif // PRODUCT
2654 2654
2655 2655 // Decide if we want to enable class unloading as part of the
2656 2656 // ensuing concurrent GC cycle. We will collect and
2657 2657 // unload classes if it's the case that:
2658 2658 // (1) an explicit gc request has been made and the flag
2659 2659 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2660 2660 // (2) (a) class unloading is enabled at the command line, and
2661 2661 // (b) old gen is getting really full
2662 2662 // NOTE: Provided there is no change in the state of the heap between
2663 2663 // calls to this method, it should have idempotent results. Moreover,
↓ open down ↓ |
2663 lines elided |
↑ open up ↑ |
2664 2664 // its results should be monotonically increasing (i.e. going from 0 to 1,
2665 2665 // but not 1 to 0) between successive calls between which the heap was
2666 2666 // not collected. For the implementation below, it must thus rely on
2667 2667 // the property that concurrent_cycles_since_last_unload()
2668 2668 // will not decrease unless a collection cycle happened and that
2669 2669 // _cmsGen->is_too_full() are
2670 2670 // themselves also monotonic in that sense. See check_monotonicity()
2671 2671 // below.
2672 2672 void CMSCollector::update_should_unload_classes() {
2673 2673 _should_unload_classes = false;
2674 + if (!ClassUnloading) {
2675 + return;
2676 + }
2677 +
2674 2678 // Condition 1 above
2675 2679 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2676 2680 _should_unload_classes = true;
2677 2681 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2678 2682 // Disjuncts 2.b.(i,ii,iii) above
2679 2683 _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2680 2684 CMSClassUnloadingMaxInterval)
2681 2685 || _cmsGen->is_too_full();
2682 2686 }
2683 2687 }
2684 2688
2685 2689 bool ConcurrentMarkSweepGeneration::is_too_full() const {
2686 2690 bool res = should_concurrent_collect();
2687 2691 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2688 2692 return res;
2689 2693 }
2690 2694
2691 2695 void CMSCollector::setup_cms_unloading_and_verification_state() {
2692 2696 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2693 2697 || VerifyBeforeExit;
2694 2698 const int rso = GenCollectedHeap::SO_AllCodeCache;
2695 2699
2696 2700 // We set the proper root for this CMS cycle here.
2697 2701 if (should_unload_classes()) { // Should unload classes this cycle
2698 2702 remove_root_scanning_option(rso); // Shrink the root set appropriately
2699 2703 set_verifying(should_verify); // Set verification state for this cycle
2700 2704 return; // Nothing else needs to be done at this time
2701 2705 }
2702 2706
2703 2707 // Not unloading classes this cycle
2704 2708 assert(!should_unload_classes(), "Inconsistency!");
2705 2709
2706 2710 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2707 2711 // Include symbols, strings and code cache elements to prevent their resurrection.
2708 2712 add_root_scanning_option(rso);
2709 2713 set_verifying(true);
2710 2714 } else if (verifying() && !should_verify) {
2711 2715 // We were verifying, but some verification flags got disabled.
2712 2716 set_verifying(false);
2713 2717 // Exclude symbols, strings and code cache elements from root scanning to
2714 2718 // reduce IM and RM pauses.
2715 2719 remove_root_scanning_option(rso);
2716 2720 }
2717 2721 }
2718 2722
2719 2723
2720 2724 #ifndef PRODUCT
2721 2725 HeapWord* CMSCollector::block_start(const void* p) const {
2722 2726 const HeapWord* addr = (HeapWord*)p;
2723 2727 if (_span.contains(p)) {
2724 2728 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2725 2729 return _cmsGen->cmsSpace()->block_start(p);
2726 2730 }
2727 2731 }
2728 2732 return NULL;
2729 2733 }
2730 2734 #endif
2731 2735
2732 2736 HeapWord*
2733 2737 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2734 2738 bool tlab,
2735 2739 bool parallel) {
2736 2740 CMSSynchronousYieldRequest yr;
2737 2741 assert(!tlab, "Can't deal with TLAB allocation");
2738 2742 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2739 2743 expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2740 2744 if (GCExpandToAllocateDelayMillis > 0) {
2741 2745 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2742 2746 }
2743 2747 return have_lock_and_allocate(word_size, tlab);
2744 2748 }
2745 2749
2746 2750 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2747 2751 size_t bytes,
2748 2752 size_t expand_bytes,
2749 2753 CMSExpansionCause::Cause cause)
2750 2754 {
2751 2755
2752 2756 bool success = expand(bytes, expand_bytes);
2753 2757
2754 2758 // remember why we expanded; this information is used
2755 2759 // by shouldConcurrentCollect() when making decisions on whether to start
2756 2760 // a new CMS cycle.
2757 2761 if (success) {
2758 2762 set_expansion_cause(cause);
2759 2763 if (PrintGCDetails && Verbose) {
2760 2764 gclog_or_tty->print_cr("Expanded CMS gen for %s",
2761 2765 CMSExpansionCause::to_string(cause));
2762 2766 }
2763 2767 }
2764 2768 }
2765 2769
2766 2770 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2767 2771 HeapWord* res = NULL;
2768 2772 MutexLocker x(ParGCRareEvent_lock);
2769 2773 while (true) {
2770 2774 // Expansion by some other thread might make alloc OK now:
2771 2775 res = ps->lab.alloc(word_sz);
2772 2776 if (res != NULL) return res;
2773 2777 // If there's not enough expansion space available, give up.
2774 2778 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2775 2779 return NULL;
2776 2780 }
2777 2781 // Otherwise, we try expansion.
2778 2782 expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2779 2783 // Now go around the loop and try alloc again;
2780 2784 // A competing par_promote might beat us to the expansion space,
2781 2785 // so we may go around the loop again if promotion fails again.
2782 2786 if (GCExpandToAllocateDelayMillis > 0) {
2783 2787 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2784 2788 }
2785 2789 }
2786 2790 }
2787 2791
2788 2792
2789 2793 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2790 2794 PromotionInfo* promo) {
2791 2795 MutexLocker x(ParGCRareEvent_lock);
2792 2796 size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2793 2797 while (true) {
2794 2798 // Expansion by some other thread might make alloc OK now:
2795 2799 if (promo->ensure_spooling_space()) {
2796 2800 assert(promo->has_spooling_space(),
2797 2801 "Post-condition of successful ensure_spooling_space()");
2798 2802 return true;
2799 2803 }
2800 2804 // If there's not enough expansion space available, give up.
2801 2805 if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2802 2806 return false;
2803 2807 }
2804 2808 // Otherwise, we try expansion.
2805 2809 expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2806 2810 // Now go around the loop and try alloc again;
2807 2811 // A competing allocation might beat us to the expansion space,
2808 2812 // so we may go around the loop again if allocation fails again.
2809 2813 if (GCExpandToAllocateDelayMillis > 0) {
2810 2814 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2811 2815 }
2812 2816 }
2813 2817 }
2814 2818
2815 2819 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2816 2820 // Only shrink if a compaction was done so that all the free space
2817 2821 // in the generation is in a contiguous block at the end.
2818 2822 if (did_compact()) {
2819 2823 CardGeneration::shrink(bytes);
2820 2824 }
2821 2825 }
2822 2826
2823 2827 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2824 2828 assert_locked_or_safepoint(Heap_lock);
2825 2829 }
2826 2830
2827 2831 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2828 2832 assert_locked_or_safepoint(Heap_lock);
2829 2833 assert_lock_strong(freelistLock());
2830 2834 if (PrintGCDetails && Verbose) {
2831 2835 warning("Shrinking of CMS not yet implemented");
2832 2836 }
2833 2837 return;
2834 2838 }
2835 2839
2836 2840
2837 2841 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2838 2842 // phases.
2839 2843 class CMSPhaseAccounting: public StackObj {
2840 2844 public:
2841 2845 CMSPhaseAccounting(CMSCollector *collector,
2842 2846 const char *phase,
2843 2847 const GCId gc_id,
2844 2848 bool print_cr = true);
2845 2849 ~CMSPhaseAccounting();
2846 2850
2847 2851 private:
2848 2852 CMSCollector *_collector;
2849 2853 const char *_phase;
2850 2854 elapsedTimer _wallclock;
2851 2855 bool _print_cr;
2852 2856 const GCId _gc_id;
2853 2857
2854 2858 public:
2855 2859 // Not MT-safe; so do not pass around these StackObj's
2856 2860 // where they may be accessed by other threads.
2857 2861 jlong wallclock_millis() {
2858 2862 assert(_wallclock.is_active(), "Wall clock should not stop");
2859 2863 _wallclock.stop(); // to record time
2860 2864 jlong ret = _wallclock.milliseconds();
2861 2865 _wallclock.start(); // restart
2862 2866 return ret;
2863 2867 }
2864 2868 };
2865 2869
2866 2870 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2867 2871 const char *phase,
2868 2872 const GCId gc_id,
2869 2873 bool print_cr) :
2870 2874 _collector(collector), _phase(phase), _print_cr(print_cr), _gc_id(gc_id) {
2871 2875
2872 2876 if (PrintCMSStatistics != 0) {
2873 2877 _collector->resetYields();
2874 2878 }
2875 2879 if (PrintGCDetails) {
2876 2880 gclog_or_tty->gclog_stamp(_gc_id);
2877 2881 gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
2878 2882 _collector->cmsGen()->short_name(), _phase);
2879 2883 }
2880 2884 _collector->resetTimer();
2881 2885 _wallclock.start();
2882 2886 _collector->startTimer();
2883 2887 }
2884 2888
2885 2889 CMSPhaseAccounting::~CMSPhaseAccounting() {
2886 2890 assert(_wallclock.is_active(), "Wall clock should not have stopped");
2887 2891 _collector->stopTimer();
2888 2892 _wallclock.stop();
2889 2893 if (PrintGCDetails) {
2890 2894 gclog_or_tty->gclog_stamp(_gc_id);
2891 2895 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2892 2896 _collector->cmsGen()->short_name(),
2893 2897 _phase, _collector->timerValue(), _wallclock.seconds());
2894 2898 if (_print_cr) {
2895 2899 gclog_or_tty->cr();
2896 2900 }
2897 2901 if (PrintCMSStatistics != 0) {
2898 2902 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2899 2903 _collector->yields());
2900 2904 }
2901 2905 }
2902 2906 }
2903 2907
2904 2908 // CMS work
2905 2909
2906 2910 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2907 2911 class CMSParMarkTask : public AbstractGangTask {
2908 2912 protected:
2909 2913 CMSCollector* _collector;
2910 2914 uint _n_workers;
2911 2915 CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2912 2916 AbstractGangTask(name),
2913 2917 _collector(collector),
2914 2918 _n_workers(n_workers) {}
2915 2919 // Work method in support of parallel rescan ... of young gen spaces
2916 2920 void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2917 2921 ContiguousSpace* space,
2918 2922 HeapWord** chunk_array, size_t chunk_top);
2919 2923 void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2920 2924 };
2921 2925
2922 2926 // Parallel initial mark task
2923 2927 class CMSParInitialMarkTask: public CMSParMarkTask {
2924 2928 StrongRootsScope* _strong_roots_scope;
2925 2929 public:
2926 2930 CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
2927 2931 CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
2928 2932 _strong_roots_scope(strong_roots_scope) {}
2929 2933 void work(uint worker_id);
2930 2934 };
2931 2935
2932 2936 // Checkpoint the roots into this generation from outside
2933 2937 // this generation. [Note this initial checkpoint need only
2934 2938 // be approximate -- we'll do a catch up phase subsequently.]
2935 2939 void CMSCollector::checkpointRootsInitial() {
2936 2940 assert(_collectorState == InitialMarking, "Wrong collector state");
2937 2941 check_correct_thread_executing();
2938 2942 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2939 2943
2940 2944 save_heap_summary();
2941 2945 report_heap_summary(GCWhen::BeforeGC);
2942 2946
2943 2947 ReferenceProcessor* rp = ref_processor();
2944 2948 assert(_restart_addr == NULL, "Control point invariant");
2945 2949 {
2946 2950 // acquire locks for subsequent manipulations
2947 2951 MutexLockerEx x(bitMapLock(),
2948 2952 Mutex::_no_safepoint_check_flag);
2949 2953 checkpointRootsInitialWork();
2950 2954 // enable ("weak") refs discovery
2951 2955 rp->enable_discovery();
2952 2956 _collectorState = Marking;
2953 2957 }
2954 2958 }
2955 2959
2956 2960 void CMSCollector::checkpointRootsInitialWork() {
2957 2961 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2958 2962 assert(_collectorState == InitialMarking, "just checking");
2959 2963
2960 2964 // If there has not been a GC[n-1] since last GC[n] cycle completed,
2961 2965 // precede our marking with a collection of all
2962 2966 // younger generations to keep floating garbage to a minimum.
2963 2967 // XXX: we won't do this for now -- it's an optimization to be done later.
2964 2968
2965 2969 // already have locks
2966 2970 assert_lock_strong(bitMapLock());
2967 2971 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2968 2972
2969 2973 // Setup the verification and class unloading state for this
2970 2974 // CMS collection cycle.
2971 2975 setup_cms_unloading_and_verification_state();
2972 2976
2973 2977 NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
2974 2978 PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
2975 2979
2976 2980 // Reset all the PLAB chunk arrays if necessary.
2977 2981 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2978 2982 reset_survivor_plab_arrays();
2979 2983 }
2980 2984
2981 2985 ResourceMark rm;
2982 2986 HandleMark hm;
2983 2987
2984 2988 MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2985 2989 GenCollectedHeap* gch = GenCollectedHeap::heap();
2986 2990
2987 2991 verify_work_stacks_empty();
2988 2992 verify_overflow_empty();
2989 2993
2990 2994 gch->ensure_parsability(false); // fill TLABs, but no need to retire them
2991 2995 // Update the saved marks which may affect the root scans.
2992 2996 gch->save_marks();
2993 2997
2994 2998 // weak reference processing has not started yet.
2995 2999 ref_processor()->set_enqueuing_is_done(false);
2996 3000
2997 3001 // Need to remember all newly created CLDs,
2998 3002 // so that we can guarantee that the remark finds them.
2999 3003 ClassLoaderDataGraph::remember_new_clds(true);
3000 3004
3001 3005 // Whenever a CLD is found, it will be claimed before proceeding to mark
3002 3006 // the klasses. The claimed marks need to be cleared before marking starts.
3003 3007 ClassLoaderDataGraph::clear_claimed_marks();
3004 3008
3005 3009 if (CMSPrintEdenSurvivorChunks) {
3006 3010 print_eden_and_survivor_chunk_arrays();
3007 3011 }
3008 3012
3009 3013 {
3010 3014 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3011 3015 if (CMSParallelInitialMarkEnabled) {
3012 3016 // The parallel version.
3013 3017 FlexibleWorkGang* workers = gch->workers();
3014 3018 assert(workers != NULL, "Need parallel worker threads.");
3015 3019 uint n_workers = workers->active_workers();
3016 3020
3017 3021 StrongRootsScope srs(n_workers);
3018 3022
3019 3023 CMSParInitialMarkTask tsk(this, &srs, n_workers);
3020 3024 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
3021 3025 if (n_workers > 1) {
3022 3026 workers->run_task(&tsk);
3023 3027 } else {
3024 3028 tsk.work(0);
3025 3029 }
3026 3030 } else {
3027 3031 // The serial version.
3028 3032 CLDToOopClosure cld_closure(¬Older, true);
3029 3033 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3030 3034
3031 3035 StrongRootsScope srs(1);
3032 3036
3033 3037 gch->gen_process_roots(&srs,
3034 3038 _cmsGen->level(),
3035 3039 true, // younger gens are roots
3036 3040 GenCollectedHeap::ScanningOption(roots_scanning_options()),
3037 3041 should_unload_classes(),
3038 3042 ¬Older,
3039 3043 NULL,
3040 3044 &cld_closure);
3041 3045 }
3042 3046 }
3043 3047
3044 3048 // Clear mod-union table; it will be dirtied in the prologue of
3045 3049 // CMS generation per each younger generation collection.
3046 3050
3047 3051 assert(_modUnionTable.isAllClear(),
3048 3052 "Was cleared in most recent final checkpoint phase"
3049 3053 " or no bits are set in the gc_prologue before the start of the next "
3050 3054 "subsequent marking phase.");
3051 3055
3052 3056 assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3053 3057
3054 3058 // Save the end of the used_region of the constituent generations
3055 3059 // to be used to limit the extent of sweep in each generation.
3056 3060 save_sweep_limits();
3057 3061 verify_overflow_empty();
3058 3062 }
3059 3063
3060 3064 bool CMSCollector::markFromRoots() {
3061 3065 // we might be tempted to assert that:
3062 3066 // assert(!SafepointSynchronize::is_at_safepoint(),
3063 3067 // "inconsistent argument?");
3064 3068 // However that wouldn't be right, because it's possible that
3065 3069 // a safepoint is indeed in progress as a younger generation
3066 3070 // stop-the-world GC happens even as we mark in this generation.
3067 3071 assert(_collectorState == Marking, "inconsistent state?");
3068 3072 check_correct_thread_executing();
3069 3073 verify_overflow_empty();
3070 3074
3071 3075 // Weak ref discovery note: We may be discovering weak
3072 3076 // refs in this generation concurrent (but interleaved) with
3073 3077 // weak ref discovery by a younger generation collector.
3074 3078
3075 3079 CMSTokenSyncWithLocks ts(true, bitMapLock());
3076 3080 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3077 3081 CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3078 3082 bool res = markFromRootsWork();
3079 3083 if (res) {
3080 3084 _collectorState = Precleaning;
3081 3085 } else { // We failed and a foreground collection wants to take over
3082 3086 assert(_foregroundGCIsActive, "internal state inconsistency");
3083 3087 assert(_restart_addr == NULL, "foreground will restart from scratch");
3084 3088 if (PrintGCDetails) {
3085 3089 gclog_or_tty->print_cr("bailing out to foreground collection");
3086 3090 }
3087 3091 }
3088 3092 verify_overflow_empty();
3089 3093 return res;
3090 3094 }
3091 3095
3092 3096 bool CMSCollector::markFromRootsWork() {
3093 3097 // iterate over marked bits in bit map, doing a full scan and mark
3094 3098 // from these roots using the following algorithm:
3095 3099 // . if oop is to the right of the current scan pointer,
3096 3100 // mark corresponding bit (we'll process it later)
3097 3101 // . else (oop is to left of current scan pointer)
3098 3102 // push oop on marking stack
3099 3103 // . drain the marking stack
3100 3104
3101 3105 // Note that when we do a marking step we need to hold the
3102 3106 // bit map lock -- recall that direct allocation (by mutators)
3103 3107 // and promotion (by younger generation collectors) is also
3104 3108 // marking the bit map. [the so-called allocate live policy.]
3105 3109 // Because the implementation of bit map marking is not
3106 3110 // robust wrt simultaneous marking of bits in the same word,
3107 3111 // we need to make sure that there is no such interference
3108 3112 // between concurrent such updates.
3109 3113
3110 3114 // already have locks
3111 3115 assert_lock_strong(bitMapLock());
3112 3116
3113 3117 verify_work_stacks_empty();
3114 3118 verify_overflow_empty();
3115 3119 bool result = false;
3116 3120 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3117 3121 result = do_marking_mt();
3118 3122 } else {
3119 3123 result = do_marking_st();
3120 3124 }
3121 3125 return result;
3122 3126 }
3123 3127
3124 3128 // Forward decl
3125 3129 class CMSConcMarkingTask;
3126 3130
3127 3131 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3128 3132 CMSCollector* _collector;
3129 3133 CMSConcMarkingTask* _task;
3130 3134 public:
3131 3135 virtual void yield();
3132 3136
3133 3137 // "n_threads" is the number of threads to be terminated.
3134 3138 // "queue_set" is a set of work queues of other threads.
3135 3139 // "collector" is the CMS collector associated with this task terminator.
3136 3140 // "yield" indicates whether we need the gang as a whole to yield.
3137 3141 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3138 3142 ParallelTaskTerminator(n_threads, queue_set),
3139 3143 _collector(collector) { }
3140 3144
3141 3145 void set_task(CMSConcMarkingTask* task) {
3142 3146 _task = task;
3143 3147 }
3144 3148 };
3145 3149
3146 3150 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3147 3151 CMSConcMarkingTask* _task;
3148 3152 public:
3149 3153 bool should_exit_termination();
3150 3154 void set_task(CMSConcMarkingTask* task) {
3151 3155 _task = task;
3152 3156 }
3153 3157 };
3154 3158
3155 3159 // MT Concurrent Marking Task
3156 3160 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3157 3161 CMSCollector* _collector;
3158 3162 uint _n_workers; // requested/desired # workers
3159 3163 bool _result;
3160 3164 CompactibleFreeListSpace* _cms_space;
3161 3165 char _pad_front[64]; // padding to ...
3162 3166 HeapWord* _global_finger; // ... avoid sharing cache line
3163 3167 char _pad_back[64];
3164 3168 HeapWord* _restart_addr;
3165 3169
3166 3170 // Exposed here for yielding support
3167 3171 Mutex* const _bit_map_lock;
3168 3172
3169 3173 // The per thread work queues, available here for stealing
3170 3174 OopTaskQueueSet* _task_queues;
3171 3175
3172 3176 // Termination (and yielding) support
3173 3177 CMSConcMarkingTerminator _term;
3174 3178 CMSConcMarkingTerminatorTerminator _term_term;
3175 3179
3176 3180 public:
3177 3181 CMSConcMarkingTask(CMSCollector* collector,
3178 3182 CompactibleFreeListSpace* cms_space,
3179 3183 YieldingFlexibleWorkGang* workers,
3180 3184 OopTaskQueueSet* task_queues):
3181 3185 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3182 3186 _collector(collector),
3183 3187 _cms_space(cms_space),
3184 3188 _n_workers(0), _result(true),
3185 3189 _task_queues(task_queues),
3186 3190 _term(_n_workers, task_queues, _collector),
3187 3191 _bit_map_lock(collector->bitMapLock())
3188 3192 {
3189 3193 _requested_size = _n_workers;
3190 3194 _term.set_task(this);
3191 3195 _term_term.set_task(this);
3192 3196 _restart_addr = _global_finger = _cms_space->bottom();
3193 3197 }
3194 3198
3195 3199
3196 3200 OopTaskQueueSet* task_queues() { return _task_queues; }
3197 3201
3198 3202 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3199 3203
3200 3204 HeapWord** global_finger_addr() { return &_global_finger; }
3201 3205
3202 3206 CMSConcMarkingTerminator* terminator() { return &_term; }
3203 3207
3204 3208 virtual void set_for_termination(uint active_workers) {
3205 3209 terminator()->reset_for_reuse(active_workers);
3206 3210 }
3207 3211
3208 3212 void work(uint worker_id);
3209 3213 bool should_yield() {
3210 3214 return ConcurrentMarkSweepThread::should_yield()
3211 3215 && !_collector->foregroundGCIsActive();
3212 3216 }
3213 3217
3214 3218 virtual void coordinator_yield(); // stuff done by coordinator
3215 3219 bool result() { return _result; }
3216 3220
3217 3221 void reset(HeapWord* ra) {
3218 3222 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)");
3219 3223 _restart_addr = _global_finger = ra;
3220 3224 _term.reset_for_reuse();
3221 3225 }
3222 3226
3223 3227 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3224 3228 OopTaskQueue* work_q);
3225 3229
3226 3230 private:
3227 3231 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3228 3232 void do_work_steal(int i);
3229 3233 void bump_global_finger(HeapWord* f);
3230 3234 };
3231 3235
3232 3236 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3233 3237 assert(_task != NULL, "Error");
3234 3238 return _task->yielding();
3235 3239 // Note that we do not need the disjunct || _task->should_yield() above
3236 3240 // because we want terminating threads to yield only if the task
3237 3241 // is already in the midst of yielding, which happens only after at least one
3238 3242 // thread has yielded.
3239 3243 }
3240 3244
3241 3245 void CMSConcMarkingTerminator::yield() {
3242 3246 if (_task->should_yield()) {
3243 3247 _task->yield();
3244 3248 } else {
3245 3249 ParallelTaskTerminator::yield();
3246 3250 }
3247 3251 }
3248 3252
3249 3253 ////////////////////////////////////////////////////////////////
3250 3254 // Concurrent Marking Algorithm Sketch
3251 3255 ////////////////////////////////////////////////////////////////
3252 3256 // Until all tasks exhausted (both spaces):
3253 3257 // -- claim next available chunk
3254 3258 // -- bump global finger via CAS
3255 3259 // -- find first object that starts in this chunk
3256 3260 // and start scanning bitmap from that position
3257 3261 // -- scan marked objects for oops
3258 3262 // -- CAS-mark target, and if successful:
3259 3263 // . if target oop is above global finger (volatile read)
3260 3264 // nothing to do
3261 3265 // . if target oop is in chunk and above local finger
3262 3266 // then nothing to do
3263 3267 // . else push on work-queue
3264 3268 // -- Deal with possible overflow issues:
3265 3269 // . local work-queue overflow causes stuff to be pushed on
3266 3270 // global (common) overflow queue
3267 3271 // . always first empty local work queue
3268 3272 // . then get a batch of oops from global work queue if any
3269 3273 // . then do work stealing
3270 3274 // -- When all tasks claimed (both spaces)
3271 3275 // and local work queue empty,
3272 3276 // then in a loop do:
3273 3277 // . check global overflow stack; steal a batch of oops and trace
3274 3278 // . try to steal from other threads oif GOS is empty
3275 3279 // . if neither is available, offer termination
3276 3280 // -- Terminate and return result
3277 3281 //
3278 3282 void CMSConcMarkingTask::work(uint worker_id) {
3279 3283 elapsedTimer _timer;
3280 3284 ResourceMark rm;
3281 3285 HandleMark hm;
3282 3286
3283 3287 DEBUG_ONLY(_collector->verify_overflow_empty();)
3284 3288
3285 3289 // Before we begin work, our work queue should be empty
3286 3290 assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3287 3291 // Scan the bitmap covering _cms_space, tracing through grey objects.
3288 3292 _timer.start();
3289 3293 do_scan_and_mark(worker_id, _cms_space);
3290 3294 _timer.stop();
3291 3295 if (PrintCMSStatistics != 0) {
3292 3296 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3293 3297 worker_id, _timer.seconds());
3294 3298 // XXX: need xxx/xxx type of notation, two timers
3295 3299 }
3296 3300
3297 3301 // ... do work stealing
3298 3302 _timer.reset();
3299 3303 _timer.start();
3300 3304 do_work_steal(worker_id);
3301 3305 _timer.stop();
3302 3306 if (PrintCMSStatistics != 0) {
3303 3307 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3304 3308 worker_id, _timer.seconds());
3305 3309 // XXX: need xxx/xxx type of notation, two timers
3306 3310 }
3307 3311 assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3308 3312 assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3309 3313 // Note that under the current task protocol, the
3310 3314 // following assertion is true even of the spaces
3311 3315 // expanded since the completion of the concurrent
3312 3316 // marking. XXX This will likely change under a strict
3313 3317 // ABORT semantics.
3314 3318 // After perm removal the comparison was changed to
3315 3319 // greater than or equal to from strictly greater than.
3316 3320 // Before perm removal the highest address sweep would
3317 3321 // have been at the end of perm gen but now is at the
3318 3322 // end of the tenured gen.
3319 3323 assert(_global_finger >= _cms_space->end(),
3320 3324 "All tasks have been completed");
3321 3325 DEBUG_ONLY(_collector->verify_overflow_empty();)
3322 3326 }
3323 3327
3324 3328 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3325 3329 HeapWord* read = _global_finger;
3326 3330 HeapWord* cur = read;
3327 3331 while (f > read) {
3328 3332 cur = read;
3329 3333 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3330 3334 if (cur == read) {
3331 3335 // our cas succeeded
3332 3336 assert(_global_finger >= f, "protocol consistency");
3333 3337 break;
3334 3338 }
3335 3339 }
3336 3340 }
3337 3341
3338 3342 // This is really inefficient, and should be redone by
3339 3343 // using (not yet available) block-read and -write interfaces to the
3340 3344 // stack and the work_queue. XXX FIX ME !!!
3341 3345 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3342 3346 OopTaskQueue* work_q) {
3343 3347 // Fast lock-free check
3344 3348 if (ovflw_stk->length() == 0) {
3345 3349 return false;
3346 3350 }
3347 3351 assert(work_q->size() == 0, "Shouldn't steal");
3348 3352 MutexLockerEx ml(ovflw_stk->par_lock(),
3349 3353 Mutex::_no_safepoint_check_flag);
3350 3354 // Grab up to 1/4 the size of the work queue
3351 3355 size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3352 3356 (size_t)ParGCDesiredObjsFromOverflowList);
3353 3357 num = MIN2(num, ovflw_stk->length());
3354 3358 for (int i = (int) num; i > 0; i--) {
3355 3359 oop cur = ovflw_stk->pop();
3356 3360 assert(cur != NULL, "Counted wrong?");
3357 3361 work_q->push(cur);
3358 3362 }
3359 3363 return num > 0;
3360 3364 }
3361 3365
3362 3366 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3363 3367 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3364 3368 int n_tasks = pst->n_tasks();
3365 3369 // We allow that there may be no tasks to do here because
3366 3370 // we are restarting after a stack overflow.
3367 3371 assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3368 3372 uint nth_task = 0;
3369 3373
3370 3374 HeapWord* aligned_start = sp->bottom();
3371 3375 if (sp->used_region().contains(_restart_addr)) {
3372 3376 // Align down to a card boundary for the start of 0th task
3373 3377 // for this space.
3374 3378 aligned_start =
3375 3379 (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3376 3380 CardTableModRefBS::card_size);
3377 3381 }
3378 3382
3379 3383 size_t chunk_size = sp->marking_task_size();
3380 3384 while (!pst->is_task_claimed(/* reference */ nth_task)) {
3381 3385 // Having claimed the nth task in this space,
3382 3386 // compute the chunk that it corresponds to:
3383 3387 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3384 3388 aligned_start + (nth_task+1)*chunk_size);
3385 3389 // Try and bump the global finger via a CAS;
3386 3390 // note that we need to do the global finger bump
3387 3391 // _before_ taking the intersection below, because
3388 3392 // the task corresponding to that region will be
3389 3393 // deemed done even if the used_region() expands
3390 3394 // because of allocation -- as it almost certainly will
3391 3395 // during start-up while the threads yield in the
3392 3396 // closure below.
3393 3397 HeapWord* finger = span.end();
3394 3398 bump_global_finger(finger); // atomically
3395 3399 // There are null tasks here corresponding to chunks
3396 3400 // beyond the "top" address of the space.
3397 3401 span = span.intersection(sp->used_region());
3398 3402 if (!span.is_empty()) { // Non-null task
3399 3403 HeapWord* prev_obj;
3400 3404 assert(!span.contains(_restart_addr) || nth_task == 0,
3401 3405 "Inconsistency");
3402 3406 if (nth_task == 0) {
3403 3407 // For the 0th task, we'll not need to compute a block_start.
3404 3408 if (span.contains(_restart_addr)) {
3405 3409 // In the case of a restart because of stack overflow,
3406 3410 // we might additionally skip a chunk prefix.
3407 3411 prev_obj = _restart_addr;
3408 3412 } else {
3409 3413 prev_obj = span.start();
3410 3414 }
3411 3415 } else {
3412 3416 // We want to skip the first object because
3413 3417 // the protocol is to scan any object in its entirety
3414 3418 // that _starts_ in this span; a fortiori, any
3415 3419 // object starting in an earlier span is scanned
3416 3420 // as part of an earlier claimed task.
3417 3421 // Below we use the "careful" version of block_start
3418 3422 // so we do not try to navigate uninitialized objects.
3419 3423 prev_obj = sp->block_start_careful(span.start());
3420 3424 // Below we use a variant of block_size that uses the
3421 3425 // Printezis bits to avoid waiting for allocated
3422 3426 // objects to become initialized/parsable.
3423 3427 while (prev_obj < span.start()) {
3424 3428 size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3425 3429 if (sz > 0) {
3426 3430 prev_obj += sz;
3427 3431 } else {
3428 3432 // In this case we may end up doing a bit of redundant
3429 3433 // scanning, but that appears unavoidable, short of
3430 3434 // locking the free list locks; see bug 6324141.
3431 3435 break;
3432 3436 }
3433 3437 }
3434 3438 }
3435 3439 if (prev_obj < span.end()) {
3436 3440 MemRegion my_span = MemRegion(prev_obj, span.end());
3437 3441 // Do the marking work within a non-empty span --
3438 3442 // the last argument to the constructor indicates whether the
3439 3443 // iteration should be incremental with periodic yields.
3440 3444 Par_MarkFromRootsClosure cl(this, _collector, my_span,
3441 3445 &_collector->_markBitMap,
3442 3446 work_queue(i),
3443 3447 &_collector->_markStack);
3444 3448 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3445 3449 } // else nothing to do for this task
3446 3450 } // else nothing to do for this task
3447 3451 }
3448 3452 // We'd be tempted to assert here that since there are no
3449 3453 // more tasks left to claim in this space, the global_finger
3450 3454 // must exceed space->top() and a fortiori space->end(). However,
3451 3455 // that would not quite be correct because the bumping of
3452 3456 // global_finger occurs strictly after the claiming of a task,
3453 3457 // so by the time we reach here the global finger may not yet
3454 3458 // have been bumped up by the thread that claimed the last
3455 3459 // task.
3456 3460 pst->all_tasks_completed();
3457 3461 }
3458 3462
3459 3463 class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
3460 3464 private:
3461 3465 CMSCollector* _collector;
3462 3466 CMSConcMarkingTask* _task;
3463 3467 MemRegion _span;
3464 3468 CMSBitMap* _bit_map;
3465 3469 CMSMarkStack* _overflow_stack;
3466 3470 OopTaskQueue* _work_queue;
3467 3471 protected:
3468 3472 DO_OOP_WORK_DEFN
3469 3473 public:
3470 3474 Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3471 3475 CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3472 3476 MetadataAwareOopClosure(collector->ref_processor()),
3473 3477 _collector(collector),
3474 3478 _task(task),
3475 3479 _span(collector->_span),
3476 3480 _work_queue(work_queue),
3477 3481 _bit_map(bit_map),
3478 3482 _overflow_stack(overflow_stack)
3479 3483 { }
3480 3484 virtual void do_oop(oop* p);
3481 3485 virtual void do_oop(narrowOop* p);
3482 3486
3483 3487 void trim_queue(size_t max);
3484 3488 void handle_stack_overflow(HeapWord* lost);
3485 3489 void do_yield_check() {
3486 3490 if (_task->should_yield()) {
3487 3491 _task->yield();
3488 3492 }
3489 3493 }
3490 3494 };
3491 3495
3492 3496 // Grey object scanning during work stealing phase --
3493 3497 // the salient assumption here is that any references
3494 3498 // that are in these stolen objects being scanned must
3495 3499 // already have been initialized (else they would not have
3496 3500 // been published), so we do not need to check for
3497 3501 // uninitialized objects before pushing here.
3498 3502 void Par_ConcMarkingClosure::do_oop(oop obj) {
3499 3503 assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
3500 3504 HeapWord* addr = (HeapWord*)obj;
3501 3505 // Check if oop points into the CMS generation
3502 3506 // and is not marked
3503 3507 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3504 3508 // a white object ...
3505 3509 // If we manage to "claim" the object, by being the
3506 3510 // first thread to mark it, then we push it on our
3507 3511 // marking stack
3508 3512 if (_bit_map->par_mark(addr)) { // ... now grey
3509 3513 // push on work queue (grey set)
3510 3514 bool simulate_overflow = false;
3511 3515 NOT_PRODUCT(
3512 3516 if (CMSMarkStackOverflowALot &&
3513 3517 _collector->simulate_overflow()) {
3514 3518 // simulate a stack overflow
3515 3519 simulate_overflow = true;
3516 3520 }
3517 3521 )
3518 3522 if (simulate_overflow ||
3519 3523 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3520 3524 // stack overflow
3521 3525 if (PrintCMSStatistics != 0) {
3522 3526 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
3523 3527 SIZE_FORMAT, _overflow_stack->capacity());
3524 3528 }
3525 3529 // We cannot assert that the overflow stack is full because
3526 3530 // it may have been emptied since.
3527 3531 assert(simulate_overflow ||
3528 3532 _work_queue->size() == _work_queue->max_elems(),
3529 3533 "Else push should have succeeded");
3530 3534 handle_stack_overflow(addr);
3531 3535 }
3532 3536 } // Else, some other thread got there first
3533 3537 do_yield_check();
3534 3538 }
3535 3539 }
3536 3540
3537 3541 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
3538 3542 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
3539 3543
3540 3544 void Par_ConcMarkingClosure::trim_queue(size_t max) {
3541 3545 while (_work_queue->size() > max) {
3542 3546 oop new_oop;
3543 3547 if (_work_queue->pop_local(new_oop)) {
3544 3548 assert(new_oop->is_oop(), "Should be an oop");
3545 3549 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3546 3550 assert(_span.contains((HeapWord*)new_oop), "Not in span");
3547 3551 new_oop->oop_iterate(this); // do_oop() above
3548 3552 do_yield_check();
3549 3553 }
3550 3554 }
3551 3555 }
3552 3556
3553 3557 // Upon stack overflow, we discard (part of) the stack,
3554 3558 // remembering the least address amongst those discarded
3555 3559 // in CMSCollector's _restart_address.
3556 3560 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3557 3561 // We need to do this under a mutex to prevent other
3558 3562 // workers from interfering with the work done below.
3559 3563 MutexLockerEx ml(_overflow_stack->par_lock(),
3560 3564 Mutex::_no_safepoint_check_flag);
3561 3565 // Remember the least grey address discarded
3562 3566 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3563 3567 _collector->lower_restart_addr(ra);
3564 3568 _overflow_stack->reset(); // discard stack contents
3565 3569 _overflow_stack->expand(); // expand the stack if possible
3566 3570 }
3567 3571
3568 3572
3569 3573 void CMSConcMarkingTask::do_work_steal(int i) {
3570 3574 OopTaskQueue* work_q = work_queue(i);
3571 3575 oop obj_to_scan;
3572 3576 CMSBitMap* bm = &(_collector->_markBitMap);
3573 3577 CMSMarkStack* ovflw = &(_collector->_markStack);
3574 3578 int* seed = _collector->hash_seed(i);
3575 3579 Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3576 3580 while (true) {
3577 3581 cl.trim_queue(0);
3578 3582 assert(work_q->size() == 0, "Should have been emptied above");
3579 3583 if (get_work_from_overflow_stack(ovflw, work_q)) {
3580 3584 // Can't assert below because the work obtained from the
3581 3585 // overflow stack may already have been stolen from us.
3582 3586 // assert(work_q->size() > 0, "Work from overflow stack");
3583 3587 continue;
3584 3588 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3585 3589 assert(obj_to_scan->is_oop(), "Should be an oop");
3586 3590 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3587 3591 obj_to_scan->oop_iterate(&cl);
3588 3592 } else if (terminator()->offer_termination(&_term_term)) {
3589 3593 assert(work_q->size() == 0, "Impossible!");
3590 3594 break;
3591 3595 } else if (yielding() || should_yield()) {
3592 3596 yield();
3593 3597 }
3594 3598 }
3595 3599 }
3596 3600
3597 3601 // This is run by the CMS (coordinator) thread.
3598 3602 void CMSConcMarkingTask::coordinator_yield() {
3599 3603 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3600 3604 "CMS thread should hold CMS token");
3601 3605 // First give up the locks, then yield, then re-lock
3602 3606 // We should probably use a constructor/destructor idiom to
3603 3607 // do this unlock/lock or modify the MutexUnlocker class to
3604 3608 // serve our purpose. XXX
3605 3609 assert_lock_strong(_bit_map_lock);
3606 3610 _bit_map_lock->unlock();
3607 3611 ConcurrentMarkSweepThread::desynchronize(true);
3608 3612 _collector->stopTimer();
3609 3613 if (PrintCMSStatistics != 0) {
3610 3614 _collector->incrementYields();
3611 3615 }
3612 3616
3613 3617 // It is possible for whichever thread initiated the yield request
3614 3618 // not to get a chance to wake up and take the bitmap lock between
3615 3619 // this thread releasing it and reacquiring it. So, while the
3616 3620 // should_yield() flag is on, let's sleep for a bit to give the
3617 3621 // other thread a chance to wake up. The limit imposed on the number
3618 3622 // of iterations is defensive, to avoid any unforseen circumstances
3619 3623 // putting us into an infinite loop. Since it's always been this
3620 3624 // (coordinator_yield()) method that was observed to cause the
3621 3625 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3622 3626 // which is by default non-zero. For the other seven methods that
3623 3627 // also perform the yield operation, as are using a different
3624 3628 // parameter (CMSYieldSleepCount) which is by default zero. This way we
3625 3629 // can enable the sleeping for those methods too, if necessary.
3626 3630 // See 6442774.
3627 3631 //
3628 3632 // We really need to reconsider the synchronization between the GC
3629 3633 // thread and the yield-requesting threads in the future and we
3630 3634 // should really use wait/notify, which is the recommended
3631 3635 // way of doing this type of interaction. Additionally, we should
3632 3636 // consolidate the eight methods that do the yield operation and they
3633 3637 // are almost identical into one for better maintainability and
3634 3638 // readability. See 6445193.
3635 3639 //
3636 3640 // Tony 2006.06.29
3637 3641 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3638 3642 ConcurrentMarkSweepThread::should_yield() &&
3639 3643 !CMSCollector::foregroundGCIsActive(); ++i) {
3640 3644 os::sleep(Thread::current(), 1, false);
3641 3645 }
3642 3646
3643 3647 ConcurrentMarkSweepThread::synchronize(true);
3644 3648 _bit_map_lock->lock_without_safepoint_check();
3645 3649 _collector->startTimer();
3646 3650 }
3647 3651
3648 3652 bool CMSCollector::do_marking_mt() {
3649 3653 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3650 3654 uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3651 3655 conc_workers()->active_workers(),
3652 3656 Threads::number_of_non_daemon_threads());
3653 3657 conc_workers()->set_active_workers(num_workers);
3654 3658
3655 3659 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
3656 3660
3657 3661 CMSConcMarkingTask tsk(this,
3658 3662 cms_space,
3659 3663 conc_workers(),
3660 3664 task_queues());
3661 3665
3662 3666 // Since the actual number of workers we get may be different
3663 3667 // from the number we requested above, do we need to do anything different
3664 3668 // below? In particular, may be we need to subclass the SequantialSubTasksDone
3665 3669 // class?? XXX
3666 3670 cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3667 3671
3668 3672 // Refs discovery is already non-atomic.
3669 3673 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3670 3674 assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3671 3675 conc_workers()->start_task(&tsk);
3672 3676 while (tsk.yielded()) {
3673 3677 tsk.coordinator_yield();
3674 3678 conc_workers()->continue_task(&tsk);
3675 3679 }
3676 3680 // If the task was aborted, _restart_addr will be non-NULL
3677 3681 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3678 3682 while (_restart_addr != NULL) {
3679 3683 // XXX For now we do not make use of ABORTED state and have not
3680 3684 // yet implemented the right abort semantics (even in the original
3681 3685 // single-threaded CMS case). That needs some more investigation
3682 3686 // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3683 3687 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3684 3688 // If _restart_addr is non-NULL, a marking stack overflow
3685 3689 // occurred; we need to do a fresh marking iteration from the
3686 3690 // indicated restart address.
3687 3691 if (_foregroundGCIsActive) {
3688 3692 // We may be running into repeated stack overflows, having
3689 3693 // reached the limit of the stack size, while making very
3690 3694 // slow forward progress. It may be best to bail out and
3691 3695 // let the foreground collector do its job.
3692 3696 // Clear _restart_addr, so that foreground GC
3693 3697 // works from scratch. This avoids the headache of
3694 3698 // a "rescan" which would otherwise be needed because
3695 3699 // of the dirty mod union table & card table.
3696 3700 _restart_addr = NULL;
3697 3701 return false;
3698 3702 }
3699 3703 // Adjust the task to restart from _restart_addr
3700 3704 tsk.reset(_restart_addr);
3701 3705 cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3702 3706 _restart_addr);
3703 3707 _restart_addr = NULL;
3704 3708 // Get the workers going again
3705 3709 conc_workers()->start_task(&tsk);
3706 3710 while (tsk.yielded()) {
3707 3711 tsk.coordinator_yield();
3708 3712 conc_workers()->continue_task(&tsk);
3709 3713 }
3710 3714 }
3711 3715 assert(tsk.completed(), "Inconsistency");
3712 3716 assert(tsk.result() == true, "Inconsistency");
3713 3717 return true;
3714 3718 }
3715 3719
3716 3720 bool CMSCollector::do_marking_st() {
3717 3721 ResourceMark rm;
3718 3722 HandleMark hm;
3719 3723
3720 3724 // Temporarily make refs discovery single threaded (non-MT)
3721 3725 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3722 3726 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3723 3727 &_markStack, CMSYield);
3724 3728 // the last argument to iterate indicates whether the iteration
3725 3729 // should be incremental with periodic yields.
3726 3730 _markBitMap.iterate(&markFromRootsClosure);
3727 3731 // If _restart_addr is non-NULL, a marking stack overflow
3728 3732 // occurred; we need to do a fresh iteration from the
3729 3733 // indicated restart address.
3730 3734 while (_restart_addr != NULL) {
3731 3735 if (_foregroundGCIsActive) {
3732 3736 // We may be running into repeated stack overflows, having
3733 3737 // reached the limit of the stack size, while making very
3734 3738 // slow forward progress. It may be best to bail out and
3735 3739 // let the foreground collector do its job.
3736 3740 // Clear _restart_addr, so that foreground GC
3737 3741 // works from scratch. This avoids the headache of
3738 3742 // a "rescan" which would otherwise be needed because
3739 3743 // of the dirty mod union table & card table.
3740 3744 _restart_addr = NULL;
3741 3745 return false; // indicating failure to complete marking
3742 3746 }
3743 3747 // Deal with stack overflow:
3744 3748 // we restart marking from _restart_addr
3745 3749 HeapWord* ra = _restart_addr;
3746 3750 markFromRootsClosure.reset(ra);
3747 3751 _restart_addr = NULL;
3748 3752 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3749 3753 }
3750 3754 return true;
3751 3755 }
3752 3756
3753 3757 void CMSCollector::preclean() {
3754 3758 check_correct_thread_executing();
3755 3759 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3756 3760 verify_work_stacks_empty();
3757 3761 verify_overflow_empty();
3758 3762 _abort_preclean = false;
3759 3763 if (CMSPrecleaningEnabled) {
3760 3764 if (!CMSEdenChunksRecordAlways) {
3761 3765 _eden_chunk_index = 0;
3762 3766 }
3763 3767 size_t used = get_eden_used();
3764 3768 size_t capacity = get_eden_capacity();
3765 3769 // Don't start sampling unless we will get sufficiently
3766 3770 // many samples.
3767 3771 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3768 3772 * CMSScheduleRemarkEdenPenetration)) {
3769 3773 _start_sampling = true;
3770 3774 } else {
3771 3775 _start_sampling = false;
3772 3776 }
3773 3777 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3774 3778 CMSPhaseAccounting pa(this, "preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3775 3779 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3776 3780 }
3777 3781 CMSTokenSync x(true); // is cms thread
3778 3782 if (CMSPrecleaningEnabled) {
3779 3783 sample_eden();
3780 3784 _collectorState = AbortablePreclean;
3781 3785 } else {
3782 3786 _collectorState = FinalMarking;
3783 3787 }
3784 3788 verify_work_stacks_empty();
3785 3789 verify_overflow_empty();
3786 3790 }
3787 3791
3788 3792 // Try and schedule the remark such that young gen
3789 3793 // occupancy is CMSScheduleRemarkEdenPenetration %.
3790 3794 void CMSCollector::abortable_preclean() {
3791 3795 check_correct_thread_executing();
3792 3796 assert(CMSPrecleaningEnabled, "Inconsistent control state");
3793 3797 assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3794 3798
3795 3799 // If Eden's current occupancy is below this threshold,
3796 3800 // immediately schedule the remark; else preclean
3797 3801 // past the next scavenge in an effort to
3798 3802 // schedule the pause as described above. By choosing
3799 3803 // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3800 3804 // we will never do an actual abortable preclean cycle.
3801 3805 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3802 3806 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3803 3807 CMSPhaseAccounting pa(this, "abortable-preclean", _gc_tracer_cm->gc_id(), !PrintGCDetails);
3804 3808 // We need more smarts in the abortable preclean
3805 3809 // loop below to deal with cases where allocation
3806 3810 // in young gen is very very slow, and our precleaning
3807 3811 // is running a losing race against a horde of
3808 3812 // mutators intent on flooding us with CMS updates
3809 3813 // (dirty cards).
3810 3814 // One, admittedly dumb, strategy is to give up
3811 3815 // after a certain number of abortable precleaning loops
3812 3816 // or after a certain maximum time. We want to make
3813 3817 // this smarter in the next iteration.
3814 3818 // XXX FIX ME!!! YSR
3815 3819 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3816 3820 while (!(should_abort_preclean() ||
3817 3821 ConcurrentMarkSweepThread::should_terminate())) {
3818 3822 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3819 3823 cumworkdone += workdone;
3820 3824 loops++;
3821 3825 // Voluntarily terminate abortable preclean phase if we have
3822 3826 // been at it for too long.
3823 3827 if ((CMSMaxAbortablePrecleanLoops != 0) &&
3824 3828 loops >= CMSMaxAbortablePrecleanLoops) {
3825 3829 if (PrintGCDetails) {
3826 3830 gclog_or_tty->print(" CMS: abort preclean due to loops ");
3827 3831 }
3828 3832 break;
3829 3833 }
3830 3834 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3831 3835 if (PrintGCDetails) {
3832 3836 gclog_or_tty->print(" CMS: abort preclean due to time ");
3833 3837 }
3834 3838 break;
3835 3839 }
3836 3840 // If we are doing little work each iteration, we should
3837 3841 // take a short break.
3838 3842 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3839 3843 // Sleep for some time, waiting for work to accumulate
3840 3844 stopTimer();
3841 3845 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3842 3846 startTimer();
3843 3847 waited++;
3844 3848 }
3845 3849 }
3846 3850 if (PrintCMSStatistics > 0) {
3847 3851 gclog_or_tty->print(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3848 3852 loops, waited, cumworkdone);
3849 3853 }
3850 3854 }
3851 3855 CMSTokenSync x(true); // is cms thread
3852 3856 if (_collectorState != Idling) {
3853 3857 assert(_collectorState == AbortablePreclean,
3854 3858 "Spontaneous state transition?");
3855 3859 _collectorState = FinalMarking;
3856 3860 } // Else, a foreground collection completed this CMS cycle.
3857 3861 return;
3858 3862 }
3859 3863
3860 3864 // Respond to an Eden sampling opportunity
3861 3865 void CMSCollector::sample_eden() {
3862 3866 // Make sure a young gc cannot sneak in between our
3863 3867 // reading and recording of a sample.
3864 3868 assert(Thread::current()->is_ConcurrentGC_thread(),
3865 3869 "Only the cms thread may collect Eden samples");
3866 3870 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3867 3871 "Should collect samples while holding CMS token");
3868 3872 if (!_start_sampling) {
3869 3873 return;
3870 3874 }
3871 3875 // When CMSEdenChunksRecordAlways is true, the eden chunk array
3872 3876 // is populated by the young generation.
3873 3877 if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3874 3878 if (_eden_chunk_index < _eden_chunk_capacity) {
3875 3879 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample
3876 3880 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3877 3881 "Unexpected state of Eden");
3878 3882 // We'd like to check that what we just sampled is an oop-start address;
3879 3883 // however, we cannot do that here since the object may not yet have been
3880 3884 // initialized. So we'll instead do the check when we _use_ this sample
3881 3885 // later.
3882 3886 if (_eden_chunk_index == 0 ||
3883 3887 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3884 3888 _eden_chunk_array[_eden_chunk_index-1])
3885 3889 >= CMSSamplingGrain)) {
3886 3890 _eden_chunk_index++; // commit sample
3887 3891 }
3888 3892 }
3889 3893 }
3890 3894 if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3891 3895 size_t used = get_eden_used();
3892 3896 size_t capacity = get_eden_capacity();
3893 3897 assert(used <= capacity, "Unexpected state of Eden");
3894 3898 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3895 3899 _abort_preclean = true;
3896 3900 }
3897 3901 }
3898 3902 }
3899 3903
3900 3904
3901 3905 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3902 3906 assert(_collectorState == Precleaning ||
3903 3907 _collectorState == AbortablePreclean, "incorrect state");
3904 3908 ResourceMark rm;
3905 3909 HandleMark hm;
3906 3910
3907 3911 // Precleaning is currently not MT but the reference processor
3908 3912 // may be set for MT. Disable it temporarily here.
3909 3913 ReferenceProcessor* rp = ref_processor();
3910 3914 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3911 3915
3912 3916 // Do one pass of scrubbing the discovered reference lists
3913 3917 // to remove any reference objects with strongly-reachable
3914 3918 // referents.
3915 3919 if (clean_refs) {
3916 3920 CMSPrecleanRefsYieldClosure yield_cl(this);
3917 3921 assert(rp->span().equals(_span), "Spans should be equal");
3918 3922 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3919 3923 &_markStack, true /* preclean */);
3920 3924 CMSDrainMarkingStackClosure complete_trace(this,
3921 3925 _span, &_markBitMap, &_markStack,
3922 3926 &keep_alive, true /* preclean */);
3923 3927
3924 3928 // We don't want this step to interfere with a young
3925 3929 // collection because we don't want to take CPU
3926 3930 // or memory bandwidth away from the young GC threads
3927 3931 // (which may be as many as there are CPUs).
3928 3932 // Note that we don't need to protect ourselves from
3929 3933 // interference with mutators because they can't
3930 3934 // manipulate the discovered reference lists nor affect
3931 3935 // the computed reachability of the referents, the
3932 3936 // only properties manipulated by the precleaning
3933 3937 // of these reference lists.
3934 3938 stopTimer();
3935 3939 CMSTokenSyncWithLocks x(true /* is cms thread */,
3936 3940 bitMapLock());
3937 3941 startTimer();
3938 3942 sample_eden();
3939 3943
3940 3944 // The following will yield to allow foreground
3941 3945 // collection to proceed promptly. XXX YSR:
3942 3946 // The code in this method may need further
3943 3947 // tweaking for better performance and some restructuring
3944 3948 // for cleaner interfaces.
3945 3949 GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3946 3950 rp->preclean_discovered_references(
3947 3951 rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3948 3952 gc_timer, _gc_tracer_cm->gc_id());
3949 3953 }
3950 3954
3951 3955 if (clean_survivor) { // preclean the active survivor space(s)
3952 3956 PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3953 3957 &_markBitMap, &_modUnionTable,
3954 3958 &_markStack, true /* precleaning phase */);
3955 3959 stopTimer();
3956 3960 CMSTokenSyncWithLocks ts(true /* is cms thread */,
3957 3961 bitMapLock());
3958 3962 startTimer();
3959 3963 unsigned int before_count =
3960 3964 GenCollectedHeap::heap()->total_collections();
3961 3965 SurvivorSpacePrecleanClosure
3962 3966 sss_cl(this, _span, &_markBitMap, &_markStack,
3963 3967 &pam_cl, before_count, CMSYield);
3964 3968 _young_gen->from()->object_iterate_careful(&sss_cl);
3965 3969 _young_gen->to()->object_iterate_careful(&sss_cl);
3966 3970 }
3967 3971 MarkRefsIntoAndScanClosure
3968 3972 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
3969 3973 &_markStack, this, CMSYield,
3970 3974 true /* precleaning phase */);
3971 3975 // CAUTION: The following closure has persistent state that may need to
3972 3976 // be reset upon a decrease in the sequence of addresses it
3973 3977 // processes.
3974 3978 ScanMarkedObjectsAgainCarefullyClosure
3975 3979 smoac_cl(this, _span,
3976 3980 &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3977 3981
3978 3982 // Preclean dirty cards in ModUnionTable and CardTable using
3979 3983 // appropriate convergence criterion;
3980 3984 // repeat CMSPrecleanIter times unless we find that
3981 3985 // we are losing.
3982 3986 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3983 3987 assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3984 3988 "Bad convergence multiplier");
3985 3989 assert(CMSPrecleanThreshold >= 100,
3986 3990 "Unreasonably low CMSPrecleanThreshold");
3987 3991
3988 3992 size_t numIter, cumNumCards, lastNumCards, curNumCards;
3989 3993 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3990 3994 numIter < CMSPrecleanIter;
3991 3995 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3992 3996 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl);
3993 3997 if (Verbose && PrintGCDetails) {
3994 3998 gclog_or_tty->print(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3995 3999 }
3996 4000 // Either there are very few dirty cards, so re-mark
3997 4001 // pause will be small anyway, or our pre-cleaning isn't
3998 4002 // that much faster than the rate at which cards are being
3999 4003 // dirtied, so we might as well stop and re-mark since
4000 4004 // precleaning won't improve our re-mark time by much.
4001 4005 if (curNumCards <= CMSPrecleanThreshold ||
4002 4006 (numIter > 0 &&
4003 4007 (curNumCards * CMSPrecleanDenominator >
4004 4008 lastNumCards * CMSPrecleanNumerator))) {
4005 4009 numIter++;
4006 4010 cumNumCards += curNumCards;
4007 4011 break;
4008 4012 }
4009 4013 }
4010 4014
4011 4015 preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4012 4016
4013 4017 curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4014 4018 cumNumCards += curNumCards;
4015 4019 if (PrintGCDetails && PrintCMSStatistics != 0) {
4016 4020 gclog_or_tty->print_cr(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
4017 4021 curNumCards, cumNumCards, numIter);
4018 4022 }
4019 4023 return cumNumCards; // as a measure of useful work done
4020 4024 }
4021 4025
4022 4026 // PRECLEANING NOTES:
4023 4027 // Precleaning involves:
4024 4028 // . reading the bits of the modUnionTable and clearing the set bits.
4025 4029 // . For the cards corresponding to the set bits, we scan the
4026 4030 // objects on those cards. This means we need the free_list_lock
4027 4031 // so that we can safely iterate over the CMS space when scanning
4028 4032 // for oops.
4029 4033 // . When we scan the objects, we'll be both reading and setting
4030 4034 // marks in the marking bit map, so we'll need the marking bit map.
4031 4035 // . For protecting _collector_state transitions, we take the CGC_lock.
4032 4036 // Note that any races in the reading of of card table entries by the
4033 4037 // CMS thread on the one hand and the clearing of those entries by the
4034 4038 // VM thread or the setting of those entries by the mutator threads on the
4035 4039 // other are quite benign. However, for efficiency it makes sense to keep
4036 4040 // the VM thread from racing with the CMS thread while the latter is
4037 4041 // dirty card info to the modUnionTable. We therefore also use the
4038 4042 // CGC_lock to protect the reading of the card table and the mod union
4039 4043 // table by the CM thread.
4040 4044 // . We run concurrently with mutator updates, so scanning
4041 4045 // needs to be done carefully -- we should not try to scan
4042 4046 // potentially uninitialized objects.
4043 4047 //
4044 4048 // Locking strategy: While holding the CGC_lock, we scan over and
4045 4049 // reset a maximal dirty range of the mod union / card tables, then lock
4046 4050 // the free_list_lock and bitmap lock to do a full marking, then
4047 4051 // release these locks; and repeat the cycle. This allows for a
4048 4052 // certain amount of fairness in the sharing of these locks between
4049 4053 // the CMS collector on the one hand, and the VM thread and the
4050 4054 // mutators on the other.
4051 4055
4052 4056 // NOTE: preclean_mod_union_table() and preclean_card_table()
4053 4057 // further below are largely identical; if you need to modify
4054 4058 // one of these methods, please check the other method too.
4055 4059
4056 4060 size_t CMSCollector::preclean_mod_union_table(
4057 4061 ConcurrentMarkSweepGeneration* gen,
4058 4062 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4059 4063 verify_work_stacks_empty();
4060 4064 verify_overflow_empty();
4061 4065
4062 4066 // strategy: starting with the first card, accumulate contiguous
4063 4067 // ranges of dirty cards; clear these cards, then scan the region
4064 4068 // covered by these cards.
4065 4069
4066 4070 // Since all of the MUT is committed ahead, we can just use
4067 4071 // that, in case the generations expand while we are precleaning.
4068 4072 // It might also be fine to just use the committed part of the
4069 4073 // generation, but we might potentially miss cards when the
4070 4074 // generation is rapidly expanding while we are in the midst
4071 4075 // of precleaning.
4072 4076 HeapWord* startAddr = gen->reserved().start();
4073 4077 HeapWord* endAddr = gen->reserved().end();
4074 4078
4075 4079 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4076 4080
4077 4081 size_t numDirtyCards, cumNumDirtyCards;
4078 4082 HeapWord *nextAddr, *lastAddr;
4079 4083 for (cumNumDirtyCards = numDirtyCards = 0,
4080 4084 nextAddr = lastAddr = startAddr;
4081 4085 nextAddr < endAddr;
4082 4086 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4083 4087
4084 4088 ResourceMark rm;
4085 4089 HandleMark hm;
4086 4090
4087 4091 MemRegion dirtyRegion;
4088 4092 {
4089 4093 stopTimer();
4090 4094 // Potential yield point
4091 4095 CMSTokenSync ts(true);
4092 4096 startTimer();
4093 4097 sample_eden();
4094 4098 // Get dirty region starting at nextOffset (inclusive),
4095 4099 // simultaneously clearing it.
4096 4100 dirtyRegion =
4097 4101 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4098 4102 assert(dirtyRegion.start() >= nextAddr,
4099 4103 "returned region inconsistent?");
4100 4104 }
4101 4105 // Remember where the next search should begin.
4102 4106 // The returned region (if non-empty) is a right open interval,
4103 4107 // so lastOffset is obtained from the right end of that
4104 4108 // interval.
4105 4109 lastAddr = dirtyRegion.end();
4106 4110 // Should do something more transparent and less hacky XXX
4107 4111 numDirtyCards =
4108 4112 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4109 4113
4110 4114 // We'll scan the cards in the dirty region (with periodic
4111 4115 // yields for foreground GC as needed).
4112 4116 if (!dirtyRegion.is_empty()) {
4113 4117 assert(numDirtyCards > 0, "consistency check");
4114 4118 HeapWord* stop_point = NULL;
4115 4119 stopTimer();
4116 4120 // Potential yield point
4117 4121 CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4118 4122 bitMapLock());
4119 4123 startTimer();
4120 4124 {
4121 4125 verify_work_stacks_empty();
4122 4126 verify_overflow_empty();
4123 4127 sample_eden();
4124 4128 stop_point =
4125 4129 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4126 4130 }
4127 4131 if (stop_point != NULL) {
4128 4132 // The careful iteration stopped early either because it found an
4129 4133 // uninitialized object, or because we were in the midst of an
4130 4134 // "abortable preclean", which should now be aborted. Redirty
4131 4135 // the bits corresponding to the partially-scanned or unscanned
4132 4136 // cards. We'll either restart at the next block boundary or
4133 4137 // abort the preclean.
4134 4138 assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4135 4139 "Should only be AbortablePreclean.");
4136 4140 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4137 4141 if (should_abort_preclean()) {
4138 4142 break; // out of preclean loop
4139 4143 } else {
4140 4144 // Compute the next address at which preclean should pick up;
4141 4145 // might need bitMapLock in order to read P-bits.
4142 4146 lastAddr = next_card_start_after_block(stop_point);
4143 4147 }
4144 4148 }
4145 4149 } else {
4146 4150 assert(lastAddr == endAddr, "consistency check");
4147 4151 assert(numDirtyCards == 0, "consistency check");
4148 4152 break;
4149 4153 }
4150 4154 }
4151 4155 verify_work_stacks_empty();
4152 4156 verify_overflow_empty();
4153 4157 return cumNumDirtyCards;
4154 4158 }
4155 4159
4156 4160 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4157 4161 // below are largely identical; if you need to modify
4158 4162 // one of these methods, please check the other method too.
4159 4163
4160 4164 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4161 4165 ScanMarkedObjectsAgainCarefullyClosure* cl) {
4162 4166 // strategy: it's similar to precleamModUnionTable above, in that
4163 4167 // we accumulate contiguous ranges of dirty cards, mark these cards
4164 4168 // precleaned, then scan the region covered by these cards.
4165 4169 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high());
4166 4170 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4167 4171
4168 4172 cl->setFreelistLock(gen->freelistLock()); // needed for yielding
4169 4173
4170 4174 size_t numDirtyCards, cumNumDirtyCards;
4171 4175 HeapWord *lastAddr, *nextAddr;
4172 4176
4173 4177 for (cumNumDirtyCards = numDirtyCards = 0,
4174 4178 nextAddr = lastAddr = startAddr;
4175 4179 nextAddr < endAddr;
4176 4180 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4177 4181
4178 4182 ResourceMark rm;
4179 4183 HandleMark hm;
4180 4184
4181 4185 MemRegion dirtyRegion;
4182 4186 {
4183 4187 // See comments in "Precleaning notes" above on why we
4184 4188 // do this locking. XXX Could the locking overheads be
4185 4189 // too high when dirty cards are sparse? [I don't think so.]
4186 4190 stopTimer();
4187 4191 CMSTokenSync x(true); // is cms thread
4188 4192 startTimer();
4189 4193 sample_eden();
4190 4194 // Get and clear dirty region from card table
4191 4195 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4192 4196 MemRegion(nextAddr, endAddr),
4193 4197 true,
4194 4198 CardTableModRefBS::precleaned_card_val());
4195 4199
4196 4200 assert(dirtyRegion.start() >= nextAddr,
4197 4201 "returned region inconsistent?");
4198 4202 }
4199 4203 lastAddr = dirtyRegion.end();
4200 4204 numDirtyCards =
4201 4205 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4202 4206
4203 4207 if (!dirtyRegion.is_empty()) {
4204 4208 stopTimer();
4205 4209 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4206 4210 startTimer();
4207 4211 sample_eden();
4208 4212 verify_work_stacks_empty();
4209 4213 verify_overflow_empty();
4210 4214 HeapWord* stop_point =
4211 4215 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4212 4216 if (stop_point != NULL) {
4213 4217 assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4214 4218 "Should only be AbortablePreclean.");
4215 4219 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4216 4220 if (should_abort_preclean()) {
4217 4221 break; // out of preclean loop
4218 4222 } else {
4219 4223 // Compute the next address at which preclean should pick up.
4220 4224 lastAddr = next_card_start_after_block(stop_point);
4221 4225 }
4222 4226 }
4223 4227 } else {
4224 4228 break;
4225 4229 }
4226 4230 }
4227 4231 verify_work_stacks_empty();
4228 4232 verify_overflow_empty();
4229 4233 return cumNumDirtyCards;
4230 4234 }
4231 4235
4232 4236 class PrecleanKlassClosure : public KlassClosure {
4233 4237 KlassToOopClosure _cm_klass_closure;
4234 4238 public:
4235 4239 PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4236 4240 void do_klass(Klass* k) {
4237 4241 if (k->has_accumulated_modified_oops()) {
4238 4242 k->clear_accumulated_modified_oops();
4239 4243
4240 4244 _cm_klass_closure.do_klass(k);
4241 4245 }
4242 4246 }
4243 4247 };
4244 4248
4245 4249 // The freelist lock is needed to prevent asserts, is it really needed?
4246 4250 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4247 4251
4248 4252 cl->set_freelistLock(freelistLock);
4249 4253
4250 4254 CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4251 4255
4252 4256 // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4253 4257 // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4254 4258 PrecleanKlassClosure preclean_klass_closure(cl);
4255 4259 ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4256 4260
4257 4261 verify_work_stacks_empty();
4258 4262 verify_overflow_empty();
4259 4263 }
4260 4264
4261 4265 void CMSCollector::checkpointRootsFinal() {
4262 4266 assert(_collectorState == FinalMarking, "incorrect state transition?");
4263 4267 check_correct_thread_executing();
4264 4268 // world is stopped at this checkpoint
4265 4269 assert(SafepointSynchronize::is_at_safepoint(),
4266 4270 "world should be stopped");
4267 4271 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4268 4272
4269 4273 verify_work_stacks_empty();
4270 4274 verify_overflow_empty();
4271 4275
4272 4276 if (PrintGCDetails) {
4273 4277 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4274 4278 _young_gen->used() / K,
4275 4279 _young_gen->capacity() / K);
4276 4280 }
4277 4281 {
4278 4282 if (CMSScavengeBeforeRemark) {
4279 4283 GenCollectedHeap* gch = GenCollectedHeap::heap();
4280 4284 // Temporarily set flag to false, GCH->do_collection will
4281 4285 // expect it to be false and set to true
4282 4286 FlagSetting fl(gch->_is_gc_active, false);
4283 4287 NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4284 4288 PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4285 4289 int level = _cmsGen->level() - 1;
4286 4290 if (level >= 0) {
4287 4291 gch->do_collection(true, // full (i.e. force, see below)
4288 4292 false, // !clear_all_soft_refs
4289 4293 0, // size
4290 4294 false, // is_tlab
4291 4295 level // max_level
4292 4296 );
4293 4297 }
4294 4298 }
4295 4299 FreelistLocker x(this);
4296 4300 MutexLockerEx y(bitMapLock(),
4297 4301 Mutex::_no_safepoint_check_flag);
4298 4302 checkpointRootsFinalWork();
4299 4303 }
4300 4304 verify_work_stacks_empty();
4301 4305 verify_overflow_empty();
4302 4306 }
4303 4307
4304 4308 void CMSCollector::checkpointRootsFinalWork() {
4305 4309 NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4306 4310
4307 4311 assert(haveFreelistLocks(), "must have free list locks");
4308 4312 assert_lock_strong(bitMapLock());
4309 4313
4310 4314 ResourceMark rm;
4311 4315 HandleMark hm;
4312 4316
4313 4317 GenCollectedHeap* gch = GenCollectedHeap::heap();
4314 4318
4315 4319 if (should_unload_classes()) {
4316 4320 CodeCache::gc_prologue();
4317 4321 }
4318 4322 assert(haveFreelistLocks(), "must have free list locks");
4319 4323 assert_lock_strong(bitMapLock());
4320 4324
4321 4325 // We might assume that we need not fill TLAB's when
4322 4326 // CMSScavengeBeforeRemark is set, because we may have just done
4323 4327 // a scavenge which would have filled all TLAB's -- and besides
4324 4328 // Eden would be empty. This however may not always be the case --
4325 4329 // for instance although we asked for a scavenge, it may not have
4326 4330 // happened because of a JNI critical section. We probably need
4327 4331 // a policy for deciding whether we can in that case wait until
4328 4332 // the critical section releases and then do the remark following
4329 4333 // the scavenge, and skip it here. In the absence of that policy,
4330 4334 // or of an indication of whether the scavenge did indeed occur,
4331 4335 // we cannot rely on TLAB's having been filled and must do
4332 4336 // so here just in case a scavenge did not happen.
4333 4337 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them
4334 4338 // Update the saved marks which may affect the root scans.
4335 4339 gch->save_marks();
4336 4340
4337 4341 if (CMSPrintEdenSurvivorChunks) {
4338 4342 print_eden_and_survivor_chunk_arrays();
4339 4343 }
4340 4344
4341 4345 {
4342 4346 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4343 4347
4344 4348 // Note on the role of the mod union table:
4345 4349 // Since the marker in "markFromRoots" marks concurrently with
4346 4350 // mutators, it is possible for some reachable objects not to have been
4347 4351 // scanned. For instance, an only reference to an object A was
4348 4352 // placed in object B after the marker scanned B. Unless B is rescanned,
4349 4353 // A would be collected. Such updates to references in marked objects
4350 4354 // are detected via the mod union table which is the set of all cards
4351 4355 // dirtied since the first checkpoint in this GC cycle and prior to
4352 4356 // the most recent young generation GC, minus those cleaned up by the
4353 4357 // concurrent precleaning.
4354 4358 if (CMSParallelRemarkEnabled) {
4355 4359 GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
4356 4360 do_remark_parallel();
4357 4361 } else {
4358 4362 GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4359 4363 _gc_timer_cm, _gc_tracer_cm->gc_id());
4360 4364 do_remark_non_parallel();
4361 4365 }
4362 4366 }
4363 4367 verify_work_stacks_empty();
4364 4368 verify_overflow_empty();
4365 4369
4366 4370 {
4367 4371 NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
4368 4372 refProcessingWork();
4369 4373 }
4370 4374 verify_work_stacks_empty();
4371 4375 verify_overflow_empty();
4372 4376
4373 4377 if (should_unload_classes()) {
4374 4378 CodeCache::gc_epilogue();
4375 4379 }
4376 4380 JvmtiExport::gc_epilogue();
4377 4381
4378 4382 // If we encountered any (marking stack / work queue) overflow
4379 4383 // events during the current CMS cycle, take appropriate
4380 4384 // remedial measures, where possible, so as to try and avoid
4381 4385 // recurrence of that condition.
4382 4386 assert(_markStack.isEmpty(), "No grey objects");
4383 4387 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4384 4388 _ser_kac_ovflw + _ser_kac_preclean_ovflw;
4385 4389 if (ser_ovflw > 0) {
4386 4390 if (PrintCMSStatistics != 0) {
4387 4391 gclog_or_tty->print_cr("Marking stack overflow (benign) "
4388 4392 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
4389 4393 ", kac_preclean="SIZE_FORMAT")",
4390 4394 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4391 4395 _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4392 4396 }
4393 4397 _markStack.expand();
4394 4398 _ser_pmc_remark_ovflw = 0;
4395 4399 _ser_pmc_preclean_ovflw = 0;
4396 4400 _ser_kac_preclean_ovflw = 0;
4397 4401 _ser_kac_ovflw = 0;
4398 4402 }
4399 4403 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4400 4404 if (PrintCMSStatistics != 0) {
4401 4405 gclog_or_tty->print_cr("Work queue overflow (benign) "
4402 4406 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4403 4407 _par_pmc_remark_ovflw, _par_kac_ovflw);
4404 4408 }
4405 4409 _par_pmc_remark_ovflw = 0;
4406 4410 _par_kac_ovflw = 0;
4407 4411 }
4408 4412 if (PrintCMSStatistics != 0) {
4409 4413 if (_markStack._hit_limit > 0) {
4410 4414 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
4411 4415 _markStack._hit_limit);
4412 4416 }
4413 4417 if (_markStack._failed_double > 0) {
4414 4418 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
4415 4419 " current capacity "SIZE_FORMAT,
4416 4420 _markStack._failed_double,
4417 4421 _markStack.capacity());
4418 4422 }
4419 4423 }
4420 4424 _markStack._hit_limit = 0;
4421 4425 _markStack._failed_double = 0;
4422 4426
4423 4427 if ((VerifyAfterGC || VerifyDuringGC) &&
4424 4428 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4425 4429 verify_after_remark();
4426 4430 }
4427 4431
4428 4432 _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4429 4433
4430 4434 // Change under the freelistLocks.
4431 4435 _collectorState = Sweeping;
4432 4436 // Call isAllClear() under bitMapLock
4433 4437 assert(_modUnionTable.isAllClear(),
4434 4438 "Should be clear by end of the final marking");
4435 4439 assert(_ct->klass_rem_set()->mod_union_is_clear(),
4436 4440 "Should be clear by end of the final marking");
4437 4441 }
4438 4442
4439 4443 void CMSParInitialMarkTask::work(uint worker_id) {
4440 4444 elapsedTimer _timer;
4441 4445 ResourceMark rm;
4442 4446 HandleMark hm;
4443 4447
4444 4448 // ---------- scan from roots --------------
4445 4449 _timer.start();
4446 4450 GenCollectedHeap* gch = GenCollectedHeap::heap();
4447 4451 Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4448 4452
4449 4453 // ---------- young gen roots --------------
4450 4454 {
4451 4455 work_on_young_gen_roots(worker_id, &par_mri_cl);
4452 4456 _timer.stop();
4453 4457 if (PrintCMSStatistics != 0) {
4454 4458 gclog_or_tty->print_cr(
4455 4459 "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
4456 4460 worker_id, _timer.seconds());
4457 4461 }
4458 4462 }
4459 4463
4460 4464 // ---------- remaining roots --------------
4461 4465 _timer.reset();
4462 4466 _timer.start();
4463 4467
4464 4468 CLDToOopClosure cld_closure(&par_mri_cl, true);
4465 4469
4466 4470 gch->gen_process_roots(_strong_roots_scope,
4467 4471 _collector->_cmsGen->level(),
4468 4472 false, // yg was scanned above
4469 4473 GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4470 4474 _collector->should_unload_classes(),
4471 4475 &par_mri_cl,
4472 4476 NULL,
4473 4477 &cld_closure);
4474 4478 assert(_collector->should_unload_classes()
4475 4479 || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4476 4480 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4477 4481 _timer.stop();
4478 4482 if (PrintCMSStatistics != 0) {
4479 4483 gclog_or_tty->print_cr(
4480 4484 "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
4481 4485 worker_id, _timer.seconds());
4482 4486 }
4483 4487 }
4484 4488
4485 4489 // Parallel remark task
4486 4490 class CMSParRemarkTask: public CMSParMarkTask {
4487 4491 CompactibleFreeListSpace* _cms_space;
4488 4492
4489 4493 // The per-thread work queues, available here for stealing.
4490 4494 OopTaskQueueSet* _task_queues;
4491 4495 ParallelTaskTerminator _term;
4492 4496 StrongRootsScope* _strong_roots_scope;
4493 4497
4494 4498 public:
4495 4499 // A value of 0 passed to n_workers will cause the number of
4496 4500 // workers to be taken from the active workers in the work gang.
4497 4501 CMSParRemarkTask(CMSCollector* collector,
4498 4502 CompactibleFreeListSpace* cms_space,
4499 4503 uint n_workers, FlexibleWorkGang* workers,
4500 4504 OopTaskQueueSet* task_queues,
4501 4505 StrongRootsScope* strong_roots_scope):
4502 4506 CMSParMarkTask("Rescan roots and grey objects in parallel",
4503 4507 collector, n_workers),
4504 4508 _cms_space(cms_space),
4505 4509 _task_queues(task_queues),
4506 4510 _term(n_workers, task_queues),
4507 4511 _strong_roots_scope(strong_roots_scope) { }
4508 4512
4509 4513 OopTaskQueueSet* task_queues() { return _task_queues; }
4510 4514
4511 4515 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4512 4516
4513 4517 ParallelTaskTerminator* terminator() { return &_term; }
4514 4518 uint n_workers() { return _n_workers; }
4515 4519
4516 4520 void work(uint worker_id);
4517 4521
4518 4522 private:
4519 4523 // ... of dirty cards in old space
4520 4524 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4521 4525 Par_MarkRefsIntoAndScanClosure* cl);
4522 4526
4523 4527 // ... work stealing for the above
4524 4528 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4525 4529 };
4526 4530
4527 4531 class RemarkKlassClosure : public KlassClosure {
4528 4532 KlassToOopClosure _cm_klass_closure;
4529 4533 public:
4530 4534 RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4531 4535 void do_klass(Klass* k) {
4532 4536 // Check if we have modified any oops in the Klass during the concurrent marking.
4533 4537 if (k->has_accumulated_modified_oops()) {
4534 4538 k->clear_accumulated_modified_oops();
4535 4539
4536 4540 // We could have transfered the current modified marks to the accumulated marks,
4537 4541 // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4538 4542 } else if (k->has_modified_oops()) {
4539 4543 // Don't clear anything, this info is needed by the next young collection.
4540 4544 } else {
4541 4545 // No modified oops in the Klass.
4542 4546 return;
4543 4547 }
4544 4548
4545 4549 // The klass has modified fields, need to scan the klass.
4546 4550 _cm_klass_closure.do_klass(k);
4547 4551 }
4548 4552 };
4549 4553
4550 4554 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
4551 4555 ParNewGeneration* young_gen = _collector->_young_gen;
4552 4556 ContiguousSpace* eden_space = young_gen->eden();
4553 4557 ContiguousSpace* from_space = young_gen->from();
4554 4558 ContiguousSpace* to_space = young_gen->to();
4555 4559
4556 4560 HeapWord** eca = _collector->_eden_chunk_array;
4557 4561 size_t ect = _collector->_eden_chunk_index;
4558 4562 HeapWord** sca = _collector->_survivor_chunk_array;
4559 4563 size_t sct = _collector->_survivor_chunk_index;
4560 4564
4561 4565 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4562 4566 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4563 4567
4564 4568 do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
4565 4569 do_young_space_rescan(worker_id, cl, from_space, sca, sct);
4566 4570 do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
4567 4571 }
4568 4572
4569 4573 // work_queue(i) is passed to the closure
4570 4574 // Par_MarkRefsIntoAndScanClosure. The "i" parameter
4571 4575 // also is passed to do_dirty_card_rescan_tasks() and to
4572 4576 // do_work_steal() to select the i-th task_queue.
4573 4577
4574 4578 void CMSParRemarkTask::work(uint worker_id) {
4575 4579 elapsedTimer _timer;
4576 4580 ResourceMark rm;
4577 4581 HandleMark hm;
4578 4582
4579 4583 // ---------- rescan from roots --------------
4580 4584 _timer.start();
4581 4585 GenCollectedHeap* gch = GenCollectedHeap::heap();
4582 4586 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4583 4587 _collector->_span, _collector->ref_processor(),
4584 4588 &(_collector->_markBitMap),
4585 4589 work_queue(worker_id));
4586 4590
4587 4591 // Rescan young gen roots first since these are likely
4588 4592 // coarsely partitioned and may, on that account, constitute
4589 4593 // the critical path; thus, it's best to start off that
4590 4594 // work first.
4591 4595 // ---------- young gen roots --------------
4592 4596 {
4593 4597 work_on_young_gen_roots(worker_id, &par_mrias_cl);
4594 4598 _timer.stop();
4595 4599 if (PrintCMSStatistics != 0) {
4596 4600 gclog_or_tty->print_cr(
4597 4601 "Finished young gen rescan work in %dth thread: %3.3f sec",
4598 4602 worker_id, _timer.seconds());
4599 4603 }
4600 4604 }
4601 4605
4602 4606 // ---------- remaining roots --------------
4603 4607 _timer.reset();
4604 4608 _timer.start();
4605 4609 gch->gen_process_roots(_strong_roots_scope,
4606 4610 _collector->_cmsGen->level(),
4607 4611 false, // yg was scanned above
4608 4612 GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4609 4613 _collector->should_unload_classes(),
4610 4614 &par_mrias_cl,
4611 4615 NULL,
4612 4616 NULL); // The dirty klasses will be handled below
4613 4617
4614 4618 assert(_collector->should_unload_classes()
4615 4619 || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4616 4620 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4617 4621 _timer.stop();
4618 4622 if (PrintCMSStatistics != 0) {
4619 4623 gclog_or_tty->print_cr(
4620 4624 "Finished remaining root rescan work in %dth thread: %3.3f sec",
4621 4625 worker_id, _timer.seconds());
4622 4626 }
4623 4627
4624 4628 // ---------- unhandled CLD scanning ----------
4625 4629 if (worker_id == 0) { // Single threaded at the moment.
4626 4630 _timer.reset();
4627 4631 _timer.start();
4628 4632
4629 4633 // Scan all new class loader data objects and new dependencies that were
4630 4634 // introduced during concurrent marking.
4631 4635 ResourceMark rm;
4632 4636 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4633 4637 for (int i = 0; i < array->length(); i++) {
4634 4638 par_mrias_cl.do_class_loader_data(array->at(i));
4635 4639 }
4636 4640
4637 4641 // We don't need to keep track of new CLDs anymore.
4638 4642 ClassLoaderDataGraph::remember_new_clds(false);
4639 4643
4640 4644 _timer.stop();
4641 4645 if (PrintCMSStatistics != 0) {
4642 4646 gclog_or_tty->print_cr(
4643 4647 "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
4644 4648 worker_id, _timer.seconds());
4645 4649 }
4646 4650 }
4647 4651
4648 4652 // ---------- dirty klass scanning ----------
4649 4653 if (worker_id == 0) { // Single threaded at the moment.
4650 4654 _timer.reset();
4651 4655 _timer.start();
4652 4656
4653 4657 // Scan all classes that was dirtied during the concurrent marking phase.
4654 4658 RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4655 4659 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4656 4660
4657 4661 _timer.stop();
4658 4662 if (PrintCMSStatistics != 0) {
4659 4663 gclog_or_tty->print_cr(
4660 4664 "Finished dirty klass scanning work in %dth thread: %3.3f sec",
4661 4665 worker_id, _timer.seconds());
4662 4666 }
4663 4667 }
4664 4668
4665 4669 // We might have added oops to ClassLoaderData::_handles during the
4666 4670 // concurrent marking phase. These oops point to newly allocated objects
4667 4671 // that are guaranteed to be kept alive. Either by the direct allocation
4668 4672 // code, or when the young collector processes the roots. Hence,
4669 4673 // we don't have to revisit the _handles block during the remark phase.
4670 4674
4671 4675 // ---------- rescan dirty cards ------------
4672 4676 _timer.reset();
4673 4677 _timer.start();
4674 4678
4675 4679 // Do the rescan tasks for each of the two spaces
4676 4680 // (cms_space) in turn.
4677 4681 // "worker_id" is passed to select the task_queue for "worker_id"
4678 4682 do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4679 4683 _timer.stop();
4680 4684 if (PrintCMSStatistics != 0) {
4681 4685 gclog_or_tty->print_cr(
4682 4686 "Finished dirty card rescan work in %dth thread: %3.3f sec",
4683 4687 worker_id, _timer.seconds());
4684 4688 }
4685 4689
4686 4690 // ---------- steal work from other threads ...
4687 4691 // ---------- ... and drain overflow list.
4688 4692 _timer.reset();
4689 4693 _timer.start();
4690 4694 do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4691 4695 _timer.stop();
4692 4696 if (PrintCMSStatistics != 0) {
4693 4697 gclog_or_tty->print_cr(
4694 4698 "Finished work stealing in %dth thread: %3.3f sec",
4695 4699 worker_id, _timer.seconds());
4696 4700 }
4697 4701 }
4698 4702
4699 4703 // Note that parameter "i" is not used.
4700 4704 void
4701 4705 CMSParMarkTask::do_young_space_rescan(uint worker_id,
4702 4706 OopsInGenClosure* cl, ContiguousSpace* space,
4703 4707 HeapWord** chunk_array, size_t chunk_top) {
4704 4708 // Until all tasks completed:
4705 4709 // . claim an unclaimed task
4706 4710 // . compute region boundaries corresponding to task claimed
4707 4711 // using chunk_array
4708 4712 // . par_oop_iterate(cl) over that region
4709 4713
4710 4714 ResourceMark rm;
4711 4715 HandleMark hm;
4712 4716
4713 4717 SequentialSubTasksDone* pst = space->par_seq_tasks();
4714 4718
4715 4719 uint nth_task = 0;
4716 4720 uint n_tasks = pst->n_tasks();
4717 4721
4718 4722 if (n_tasks > 0) {
4719 4723 assert(pst->valid(), "Uninitialized use?");
4720 4724 HeapWord *start, *end;
4721 4725 while (!pst->is_task_claimed(/* reference */ nth_task)) {
4722 4726 // We claimed task # nth_task; compute its boundaries.
4723 4727 if (chunk_top == 0) { // no samples were taken
4724 4728 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4725 4729 start = space->bottom();
4726 4730 end = space->top();
4727 4731 } else if (nth_task == 0) {
4728 4732 start = space->bottom();
4729 4733 end = chunk_array[nth_task];
4730 4734 } else if (nth_task < (uint)chunk_top) {
4731 4735 assert(nth_task >= 1, "Control point invariant");
4732 4736 start = chunk_array[nth_task - 1];
4733 4737 end = chunk_array[nth_task];
4734 4738 } else {
4735 4739 assert(nth_task == (uint)chunk_top, "Control point invariant");
4736 4740 start = chunk_array[chunk_top - 1];
4737 4741 end = space->top();
4738 4742 }
4739 4743 MemRegion mr(start, end);
4740 4744 // Verify that mr is in space
4741 4745 assert(mr.is_empty() || space->used_region().contains(mr),
4742 4746 "Should be in space");
4743 4747 // Verify that "start" is an object boundary
4744 4748 assert(mr.is_empty() || oop(mr.start())->is_oop(),
4745 4749 "Should be an oop");
4746 4750 space->par_oop_iterate(mr, cl);
4747 4751 }
4748 4752 pst->all_tasks_completed();
4749 4753 }
4750 4754 }
4751 4755
4752 4756 void
4753 4757 CMSParRemarkTask::do_dirty_card_rescan_tasks(
4754 4758 CompactibleFreeListSpace* sp, int i,
4755 4759 Par_MarkRefsIntoAndScanClosure* cl) {
4756 4760 // Until all tasks completed:
4757 4761 // . claim an unclaimed task
4758 4762 // . compute region boundaries corresponding to task claimed
4759 4763 // . transfer dirty bits ct->mut for that region
4760 4764 // . apply rescanclosure to dirty mut bits for that region
4761 4765
4762 4766 ResourceMark rm;
4763 4767 HandleMark hm;
4764 4768
4765 4769 OopTaskQueue* work_q = work_queue(i);
4766 4770 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4767 4771 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4768 4772 // CAUTION: This closure has state that persists across calls to
4769 4773 // the work method dirty_range_iterate_clear() in that it has
4770 4774 // embedded in it a (subtype of) UpwardsObjectClosure. The
4771 4775 // use of that state in the embedded UpwardsObjectClosure instance
4772 4776 // assumes that the cards are always iterated (even if in parallel
4773 4777 // by several threads) in monotonically increasing order per each
4774 4778 // thread. This is true of the implementation below which picks
4775 4779 // card ranges (chunks) in monotonically increasing order globally
4776 4780 // and, a-fortiori, in monotonically increasing order per thread
4777 4781 // (the latter order being a subsequence of the former).
4778 4782 // If the work code below is ever reorganized into a more chaotic
4779 4783 // work-partitioning form than the current "sequential tasks"
4780 4784 // paradigm, the use of that persistent state will have to be
4781 4785 // revisited and modified appropriately. See also related
4782 4786 // bug 4756801 work on which should examine this code to make
4783 4787 // sure that the changes there do not run counter to the
4784 4788 // assumptions made here and necessary for correctness and
4785 4789 // efficiency. Note also that this code might yield inefficient
4786 4790 // behavior in the case of very large objects that span one or
4787 4791 // more work chunks. Such objects would potentially be scanned
4788 4792 // several times redundantly. Work on 4756801 should try and
4789 4793 // address that performance anomaly if at all possible. XXX
4790 4794 MemRegion full_span = _collector->_span;
4791 4795 CMSBitMap* bm = &(_collector->_markBitMap); // shared
4792 4796 MarkFromDirtyCardsClosure
4793 4797 greyRescanClosure(_collector, full_span, // entire span of interest
4794 4798 sp, bm, work_q, cl);
4795 4799
4796 4800 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4797 4801 assert(pst->valid(), "Uninitialized use?");
4798 4802 uint nth_task = 0;
4799 4803 const int alignment = CardTableModRefBS::card_size * BitsPerWord;
4800 4804 MemRegion span = sp->used_region();
4801 4805 HeapWord* start_addr = span.start();
4802 4806 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
4803 4807 alignment);
4804 4808 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4805 4809 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
4806 4810 start_addr, "Check alignment");
4807 4811 assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
4808 4812 chunk_size, "Check alignment");
4809 4813
4810 4814 while (!pst->is_task_claimed(/* reference */ nth_task)) {
4811 4815 // Having claimed the nth_task, compute corresponding mem-region,
4812 4816 // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4813 4817 // The alignment restriction ensures that we do not need any
4814 4818 // synchronization with other gang-workers while setting or
4815 4819 // clearing bits in thus chunk of the MUT.
4816 4820 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4817 4821 start_addr + (nth_task+1)*chunk_size);
4818 4822 // The last chunk's end might be way beyond end of the
4819 4823 // used region. In that case pull back appropriately.
4820 4824 if (this_span.end() > end_addr) {
4821 4825 this_span.set_end(end_addr);
4822 4826 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4823 4827 }
4824 4828 // Iterate over the dirty cards covering this chunk, marking them
4825 4829 // precleaned, and setting the corresponding bits in the mod union
4826 4830 // table. Since we have been careful to partition at Card and MUT-word
4827 4831 // boundaries no synchronization is needed between parallel threads.
4828 4832 _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
4829 4833 &modUnionClosure);
4830 4834
4831 4835 // Having transferred these marks into the modUnionTable,
4832 4836 // rescan the marked objects on the dirty cards in the modUnionTable.
4833 4837 // Even if this is at a synchronous collection, the initial marking
4834 4838 // may have been done during an asynchronous collection so there
4835 4839 // may be dirty bits in the mod-union table.
4836 4840 _collector->_modUnionTable.dirty_range_iterate_clear(
4837 4841 this_span, &greyRescanClosure);
4838 4842 _collector->_modUnionTable.verifyNoOneBitsInRange(
4839 4843 this_span.start(),
4840 4844 this_span.end());
4841 4845 }
4842 4846 pst->all_tasks_completed(); // declare that i am done
4843 4847 }
4844 4848
4845 4849 // . see if we can share work_queues with ParNew? XXX
4846 4850 void
4847 4851 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
4848 4852 int* seed) {
4849 4853 OopTaskQueue* work_q = work_queue(i);
4850 4854 NOT_PRODUCT(int num_steals = 0;)
4851 4855 oop obj_to_scan;
4852 4856 CMSBitMap* bm = &(_collector->_markBitMap);
4853 4857
4854 4858 while (true) {
4855 4859 // Completely finish any left over work from (an) earlier round(s)
4856 4860 cl->trim_queue(0);
4857 4861 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4858 4862 (size_t)ParGCDesiredObjsFromOverflowList);
4859 4863 // Now check if there's any work in the overflow list
4860 4864 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4861 4865 // only affects the number of attempts made to get work from the
4862 4866 // overflow list and does not affect the number of workers. Just
4863 4867 // pass ParallelGCThreads so this behavior is unchanged.
4864 4868 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4865 4869 work_q,
4866 4870 ParallelGCThreads)) {
4867 4871 // found something in global overflow list;
4868 4872 // not yet ready to go stealing work from others.
4869 4873 // We'd like to assert(work_q->size() != 0, ...)
4870 4874 // because we just took work from the overflow list,
4871 4875 // but of course we can't since all of that could have
4872 4876 // been already stolen from us.
4873 4877 // "He giveth and He taketh away."
4874 4878 continue;
4875 4879 }
4876 4880 // Verify that we have no work before we resort to stealing
4877 4881 assert(work_q->size() == 0, "Have work, shouldn't steal");
4878 4882 // Try to steal from other queues that have work
4879 4883 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4880 4884 NOT_PRODUCT(num_steals++;)
4881 4885 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4882 4886 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4883 4887 // Do scanning work
4884 4888 obj_to_scan->oop_iterate(cl);
4885 4889 // Loop around, finish this work, and try to steal some more
4886 4890 } else if (terminator()->offer_termination()) {
4887 4891 break; // nirvana from the infinite cycle
4888 4892 }
4889 4893 }
4890 4894 NOT_PRODUCT(
4891 4895 if (PrintCMSStatistics != 0) {
4892 4896 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
4893 4897 }
4894 4898 )
4895 4899 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4896 4900 "Else our work is not yet done");
4897 4901 }
4898 4902
4899 4903 // Record object boundaries in _eden_chunk_array by sampling the eden
4900 4904 // top in the slow-path eden object allocation code path and record
4901 4905 // the boundaries, if CMSEdenChunksRecordAlways is true. If
4902 4906 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
4903 4907 // sampling in sample_eden() that activates during the part of the
4904 4908 // preclean phase.
4905 4909 void CMSCollector::sample_eden_chunk() {
4906 4910 if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4907 4911 if (_eden_chunk_lock->try_lock()) {
4908 4912 // Record a sample. This is the critical section. The contents
4909 4913 // of the _eden_chunk_array have to be non-decreasing in the
4910 4914 // address order.
4911 4915 _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4912 4916 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4913 4917 "Unexpected state of Eden");
4914 4918 if (_eden_chunk_index == 0 ||
4915 4919 ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4916 4920 (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4917 4921 _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4918 4922 _eden_chunk_index++; // commit sample
4919 4923 }
4920 4924 _eden_chunk_lock->unlock();
4921 4925 }
4922 4926 }
4923 4927 }
4924 4928
4925 4929 // Return a thread-local PLAB recording array, as appropriate.
4926 4930 void* CMSCollector::get_data_recorder(int thr_num) {
4927 4931 if (_survivor_plab_array != NULL &&
4928 4932 (CMSPLABRecordAlways ||
4929 4933 (_collectorState > Marking && _collectorState < FinalMarking))) {
4930 4934 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4931 4935 ChunkArray* ca = &_survivor_plab_array[thr_num];
4932 4936 ca->reset(); // clear it so that fresh data is recorded
4933 4937 return (void*) ca;
4934 4938 } else {
4935 4939 return NULL;
4936 4940 }
4937 4941 }
4938 4942
4939 4943 // Reset all the thread-local PLAB recording arrays
4940 4944 void CMSCollector::reset_survivor_plab_arrays() {
4941 4945 for (uint i = 0; i < ParallelGCThreads; i++) {
4942 4946 _survivor_plab_array[i].reset();
4943 4947 }
4944 4948 }
4945 4949
4946 4950 // Merge the per-thread plab arrays into the global survivor chunk
4947 4951 // array which will provide the partitioning of the survivor space
4948 4952 // for CMS initial scan and rescan.
4949 4953 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4950 4954 int no_of_gc_threads) {
4951 4955 assert(_survivor_plab_array != NULL, "Error");
4952 4956 assert(_survivor_chunk_array != NULL, "Error");
4953 4957 assert(_collectorState == FinalMarking ||
4954 4958 (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4955 4959 for (int j = 0; j < no_of_gc_threads; j++) {
4956 4960 _cursor[j] = 0;
4957 4961 }
4958 4962 HeapWord* top = surv->top();
4959 4963 size_t i;
4960 4964 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries
4961 4965 HeapWord* min_val = top; // Higher than any PLAB address
4962 4966 uint min_tid = 0; // position of min_val this round
4963 4967 for (int j = 0; j < no_of_gc_threads; j++) {
4964 4968 ChunkArray* cur_sca = &_survivor_plab_array[j];
4965 4969 if (_cursor[j] == cur_sca->end()) {
4966 4970 continue;
4967 4971 }
4968 4972 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4969 4973 HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4970 4974 assert(surv->used_region().contains(cur_val), "Out of bounds value");
4971 4975 if (cur_val < min_val) {
4972 4976 min_tid = j;
4973 4977 min_val = cur_val;
4974 4978 } else {
4975 4979 assert(cur_val < top, "All recorded addresses should be less");
4976 4980 }
4977 4981 }
4978 4982 // At this point min_val and min_tid are respectively
4979 4983 // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4980 4984 // and the thread (j) that witnesses that address.
4981 4985 // We record this address in the _survivor_chunk_array[i]
4982 4986 // and increment _cursor[min_tid] prior to the next round i.
4983 4987 if (min_val == top) {
4984 4988 break;
4985 4989 }
4986 4990 _survivor_chunk_array[i] = min_val;
4987 4991 _cursor[min_tid]++;
4988 4992 }
4989 4993 // We are all done; record the size of the _survivor_chunk_array
4990 4994 _survivor_chunk_index = i; // exclusive: [0, i)
4991 4995 if (PrintCMSStatistics > 0) {
4992 4996 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4993 4997 }
4994 4998 // Verify that we used up all the recorded entries
4995 4999 #ifdef ASSERT
4996 5000 size_t total = 0;
4997 5001 for (int j = 0; j < no_of_gc_threads; j++) {
4998 5002 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4999 5003 total += _cursor[j];
5000 5004 }
5001 5005 assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5002 5006 // Check that the merged array is in sorted order
5003 5007 if (total > 0) {
5004 5008 for (size_t i = 0; i < total - 1; i++) {
5005 5009 if (PrintCMSStatistics > 0) {
5006 5010 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5007 5011 i, p2i(_survivor_chunk_array[i]));
5008 5012 }
5009 5013 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5010 5014 "Not sorted");
5011 5015 }
5012 5016 }
5013 5017 #endif // ASSERT
5014 5018 }
5015 5019
5016 5020 // Set up the space's par_seq_tasks structure for work claiming
5017 5021 // for parallel initial scan and rescan of young gen.
5018 5022 // See ParRescanTask where this is currently used.
5019 5023 void
5020 5024 CMSCollector::
5021 5025 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5022 5026 assert(n_threads > 0, "Unexpected n_threads argument");
5023 5027
5024 5028 // Eden space
5025 5029 if (!_young_gen->eden()->is_empty()) {
5026 5030 SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
5027 5031 assert(!pst->valid(), "Clobbering existing data?");
5028 5032 // Each valid entry in [0, _eden_chunk_index) represents a task.
5029 5033 size_t n_tasks = _eden_chunk_index + 1;
5030 5034 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5031 5035 // Sets the condition for completion of the subtask (how many threads
5032 5036 // need to finish in order to be done).
5033 5037 pst->set_n_threads(n_threads);
5034 5038 pst->set_n_tasks((int)n_tasks);
5035 5039 }
5036 5040
5037 5041 // Merge the survivor plab arrays into _survivor_chunk_array
5038 5042 if (_survivor_plab_array != NULL) {
5039 5043 merge_survivor_plab_arrays(_young_gen->from(), n_threads);
5040 5044 } else {
5041 5045 assert(_survivor_chunk_index == 0, "Error");
5042 5046 }
5043 5047
5044 5048 // To space
5045 5049 {
5046 5050 SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
5047 5051 assert(!pst->valid(), "Clobbering existing data?");
5048 5052 // Sets the condition for completion of the subtask (how many threads
5049 5053 // need to finish in order to be done).
5050 5054 pst->set_n_threads(n_threads);
5051 5055 pst->set_n_tasks(1);
5052 5056 assert(pst->valid(), "Error");
5053 5057 }
5054 5058
5055 5059 // From space
5056 5060 {
5057 5061 SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
5058 5062 assert(!pst->valid(), "Clobbering existing data?");
5059 5063 size_t n_tasks = _survivor_chunk_index + 1;
5060 5064 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5061 5065 // Sets the condition for completion of the subtask (how many threads
5062 5066 // need to finish in order to be done).
5063 5067 pst->set_n_threads(n_threads);
5064 5068 pst->set_n_tasks((int)n_tasks);
5065 5069 assert(pst->valid(), "Error");
5066 5070 }
5067 5071 }
5068 5072
5069 5073 // Parallel version of remark
5070 5074 void CMSCollector::do_remark_parallel() {
5071 5075 GenCollectedHeap* gch = GenCollectedHeap::heap();
5072 5076 FlexibleWorkGang* workers = gch->workers();
5073 5077 assert(workers != NULL, "Need parallel worker threads.");
5074 5078 // Choose to use the number of GC workers most recently set
5075 5079 // into "active_workers".
5076 5080 uint n_workers = workers->active_workers();
5077 5081
5078 5082 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
5079 5083
5080 5084 StrongRootsScope srs(n_workers);
5081 5085
5082 5086 CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
5083 5087
5084 5088 // We won't be iterating over the cards in the card table updating
5085 5089 // the younger_gen cards, so we shouldn't call the following else
5086 5090 // the verification code as well as subsequent younger_refs_iterate
5087 5091 // code would get confused. XXX
5088 5092 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5089 5093
5090 5094 // The young gen rescan work will not be done as part of
5091 5095 // process_roots (which currently doesn't know how to
5092 5096 // parallelize such a scan), but rather will be broken up into
5093 5097 // a set of parallel tasks (via the sampling that the [abortable]
5094 5098 // preclean phase did of eden, plus the [two] tasks of
5095 5099 // scanning the [two] survivor spaces. Further fine-grain
5096 5100 // parallelization of the scanning of the survivor spaces
5097 5101 // themselves, and of precleaning of the younger gen itself
5098 5102 // is deferred to the future.
5099 5103 initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5100 5104
5101 5105 // The dirty card rescan work is broken up into a "sequence"
5102 5106 // of parallel tasks (per constituent space) that are dynamically
5103 5107 // claimed by the parallel threads.
5104 5108 cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5105 5109
5106 5110 // It turns out that even when we're using 1 thread, doing the work in a
5107 5111 // separate thread causes wide variance in run times. We can't help this
5108 5112 // in the multi-threaded case, but we special-case n=1 here to get
5109 5113 // repeatable measurements of the 1-thread overhead of the parallel code.
5110 5114 if (n_workers > 1) {
5111 5115 // Make refs discovery MT-safe, if it isn't already: it may not
5112 5116 // necessarily be so, since it's possible that we are doing
5113 5117 // ST marking.
5114 5118 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5115 5119 workers->run_task(&tsk);
5116 5120 } else {
5117 5121 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5118 5122 tsk.work(0);
5119 5123 }
5120 5124
5121 5125 // restore, single-threaded for now, any preserved marks
5122 5126 // as a result of work_q overflow
5123 5127 restore_preserved_marks_if_any();
5124 5128 }
5125 5129
5126 5130 // Non-parallel version of remark
5127 5131 void CMSCollector::do_remark_non_parallel() {
5128 5132 ResourceMark rm;
5129 5133 HandleMark hm;
5130 5134 GenCollectedHeap* gch = GenCollectedHeap::heap();
5131 5135 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5132 5136
5133 5137 MarkRefsIntoAndScanClosure
5134 5138 mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5135 5139 &_markStack, this,
5136 5140 false /* should_yield */, false /* not precleaning */);
5137 5141 MarkFromDirtyCardsClosure
5138 5142 markFromDirtyCardsClosure(this, _span,
5139 5143 NULL, // space is set further below
5140 5144 &_markBitMap, &_markStack, &mrias_cl);
5141 5145 {
5142 5146 GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5143 5147 // Iterate over the dirty cards, setting the corresponding bits in the
5144 5148 // mod union table.
5145 5149 {
5146 5150 ModUnionClosure modUnionClosure(&_modUnionTable);
5147 5151 _ct->ct_bs()->dirty_card_iterate(
5148 5152 _cmsGen->used_region(),
5149 5153 &modUnionClosure);
5150 5154 }
5151 5155 // Having transferred these marks into the modUnionTable, we just need
5152 5156 // to rescan the marked objects on the dirty cards in the modUnionTable.
5153 5157 // The initial marking may have been done during an asynchronous
5154 5158 // collection so there may be dirty bits in the mod-union table.
5155 5159 const int alignment =
5156 5160 CardTableModRefBS::card_size * BitsPerWord;
5157 5161 {
5158 5162 // ... First handle dirty cards in CMS gen
5159 5163 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5160 5164 MemRegion ur = _cmsGen->used_region();
5161 5165 HeapWord* lb = ur.start();
5162 5166 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5163 5167 MemRegion cms_span(lb, ub);
5164 5168 _modUnionTable.dirty_range_iterate_clear(cms_span,
5165 5169 &markFromDirtyCardsClosure);
5166 5170 verify_work_stacks_empty();
5167 5171 if (PrintCMSStatistics != 0) {
5168 5172 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5169 5173 markFromDirtyCardsClosure.num_dirty_cards());
5170 5174 }
5171 5175 }
5172 5176 }
5173 5177 if (VerifyDuringGC &&
5174 5178 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5175 5179 HandleMark hm; // Discard invalid handles created during verification
5176 5180 Universe::verify();
5177 5181 }
5178 5182 {
5179 5183 GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5180 5184
5181 5185 verify_work_stacks_empty();
5182 5186
5183 5187 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5184 5188 StrongRootsScope srs(1);
5185 5189
5186 5190 gch->gen_process_roots(&srs,
5187 5191 _cmsGen->level(),
5188 5192 true, // younger gens as roots
5189 5193 GenCollectedHeap::ScanningOption(roots_scanning_options()),
5190 5194 should_unload_classes(),
5191 5195 &mrias_cl,
5192 5196 NULL,
5193 5197 NULL); // The dirty klasses will be handled below
5194 5198
5195 5199 assert(should_unload_classes()
5196 5200 || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5197 5201 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5198 5202 }
5199 5203
5200 5204 {
5201 5205 GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5202 5206
5203 5207 verify_work_stacks_empty();
5204 5208
5205 5209 // Scan all class loader data objects that might have been introduced
5206 5210 // during concurrent marking.
5207 5211 ResourceMark rm;
5208 5212 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5209 5213 for (int i = 0; i < array->length(); i++) {
5210 5214 mrias_cl.do_class_loader_data(array->at(i));
5211 5215 }
5212 5216
5213 5217 // We don't need to keep track of new CLDs anymore.
5214 5218 ClassLoaderDataGraph::remember_new_clds(false);
5215 5219
5216 5220 verify_work_stacks_empty();
5217 5221 }
5218 5222
5219 5223 {
5220 5224 GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5221 5225
5222 5226 verify_work_stacks_empty();
5223 5227
5224 5228 RemarkKlassClosure remark_klass_closure(&mrias_cl);
5225 5229 ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5226 5230
5227 5231 verify_work_stacks_empty();
5228 5232 }
5229 5233
5230 5234 // We might have added oops to ClassLoaderData::_handles during the
5231 5235 // concurrent marking phase. These oops point to newly allocated objects
5232 5236 // that are guaranteed to be kept alive. Either by the direct allocation
5233 5237 // code, or when the young collector processes the roots. Hence,
5234 5238 // we don't have to revisit the _handles block during the remark phase.
5235 5239
5236 5240 verify_work_stacks_empty();
5237 5241 // Restore evacuated mark words, if any, used for overflow list links
5238 5242 if (!CMSOverflowEarlyRestoration) {
5239 5243 restore_preserved_marks_if_any();
5240 5244 }
5241 5245 verify_overflow_empty();
5242 5246 }
5243 5247
5244 5248 ////////////////////////////////////////////////////////
5245 5249 // Parallel Reference Processing Task Proxy Class
5246 5250 ////////////////////////////////////////////////////////
5247 5251 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5248 5252 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5249 5253 CMSCollector* _collector;
5250 5254 CMSBitMap* _mark_bit_map;
5251 5255 const MemRegion _span;
5252 5256 ProcessTask& _task;
5253 5257
5254 5258 public:
5255 5259 CMSRefProcTaskProxy(ProcessTask& task,
5256 5260 CMSCollector* collector,
5257 5261 const MemRegion& span,
5258 5262 CMSBitMap* mark_bit_map,
5259 5263 AbstractWorkGang* workers,
5260 5264 OopTaskQueueSet* task_queues):
5261 5265 AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5262 5266 task_queues,
5263 5267 workers->active_workers()),
5264 5268 _task(task),
5265 5269 _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5266 5270 {
5267 5271 assert(_collector->_span.equals(_span) && !_span.is_empty(),
5268 5272 "Inconsistency in _span");
5269 5273 }
5270 5274
5271 5275 OopTaskQueueSet* task_queues() { return queues(); }
5272 5276
5273 5277 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5274 5278
5275 5279 void do_work_steal(int i,
5276 5280 CMSParDrainMarkingStackClosure* drain,
5277 5281 CMSParKeepAliveClosure* keep_alive,
5278 5282 int* seed);
5279 5283
5280 5284 virtual void work(uint worker_id);
5281 5285 };
5282 5286
5283 5287 void CMSRefProcTaskProxy::work(uint worker_id) {
5284 5288 ResourceMark rm;
5285 5289 HandleMark hm;
5286 5290 assert(_collector->_span.equals(_span), "Inconsistency in _span");
5287 5291 CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5288 5292 _mark_bit_map,
5289 5293 work_queue(worker_id));
5290 5294 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5291 5295 _mark_bit_map,
5292 5296 work_queue(worker_id));
5293 5297 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5294 5298 _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5295 5299 if (_task.marks_oops_alive()) {
5296 5300 do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5297 5301 _collector->hash_seed(worker_id));
5298 5302 }
5299 5303 assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5300 5304 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5301 5305 }
5302 5306
5303 5307 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5304 5308 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5305 5309 EnqueueTask& _task;
5306 5310
5307 5311 public:
5308 5312 CMSRefEnqueueTaskProxy(EnqueueTask& task)
5309 5313 : AbstractGangTask("Enqueue reference objects in parallel"),
5310 5314 _task(task)
5311 5315 { }
5312 5316
5313 5317 virtual void work(uint worker_id)
5314 5318 {
5315 5319 _task.work(worker_id);
5316 5320 }
5317 5321 };
5318 5322
5319 5323 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5320 5324 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5321 5325 _span(span),
5322 5326 _bit_map(bit_map),
5323 5327 _work_queue(work_queue),
5324 5328 _mark_and_push(collector, span, bit_map, work_queue),
5325 5329 _low_water_mark(MIN2((work_queue->max_elems()/4),
5326 5330 ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5327 5331 { }
5328 5332
5329 5333 // . see if we can share work_queues with ParNew? XXX
5330 5334 void CMSRefProcTaskProxy::do_work_steal(int i,
5331 5335 CMSParDrainMarkingStackClosure* drain,
5332 5336 CMSParKeepAliveClosure* keep_alive,
5333 5337 int* seed) {
5334 5338 OopTaskQueue* work_q = work_queue(i);
5335 5339 NOT_PRODUCT(int num_steals = 0;)
5336 5340 oop obj_to_scan;
5337 5341
5338 5342 while (true) {
5339 5343 // Completely finish any left over work from (an) earlier round(s)
5340 5344 drain->trim_queue(0);
5341 5345 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5342 5346 (size_t)ParGCDesiredObjsFromOverflowList);
5343 5347 // Now check if there's any work in the overflow list
5344 5348 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5345 5349 // only affects the number of attempts made to get work from the
5346 5350 // overflow list and does not affect the number of workers. Just
5347 5351 // pass ParallelGCThreads so this behavior is unchanged.
5348 5352 if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5349 5353 work_q,
5350 5354 ParallelGCThreads)) {
5351 5355 // Found something in global overflow list;
5352 5356 // not yet ready to go stealing work from others.
5353 5357 // We'd like to assert(work_q->size() != 0, ...)
5354 5358 // because we just took work from the overflow list,
5355 5359 // but of course we can't, since all of that might have
5356 5360 // been already stolen from us.
5357 5361 continue;
5358 5362 }
5359 5363 // Verify that we have no work before we resort to stealing
5360 5364 assert(work_q->size() == 0, "Have work, shouldn't steal");
5361 5365 // Try to steal from other queues that have work
5362 5366 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5363 5367 NOT_PRODUCT(num_steals++;)
5364 5368 assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5365 5369 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5366 5370 // Do scanning work
5367 5371 obj_to_scan->oop_iterate(keep_alive);
5368 5372 // Loop around, finish this work, and try to steal some more
5369 5373 } else if (terminator()->offer_termination()) {
5370 5374 break; // nirvana from the infinite cycle
5371 5375 }
5372 5376 }
5373 5377 NOT_PRODUCT(
5374 5378 if (PrintCMSStatistics != 0) {
5375 5379 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5376 5380 }
5377 5381 )
5378 5382 }
5379 5383
5380 5384 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5381 5385 {
5382 5386 GenCollectedHeap* gch = GenCollectedHeap::heap();
5383 5387 FlexibleWorkGang* workers = gch->workers();
5384 5388 assert(workers != NULL, "Need parallel worker threads.");
5385 5389 CMSRefProcTaskProxy rp_task(task, &_collector,
5386 5390 _collector.ref_processor()->span(),
5387 5391 _collector.markBitMap(),
5388 5392 workers, _collector.task_queues());
5389 5393 workers->run_task(&rp_task);
5390 5394 }
5391 5395
5392 5396 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5393 5397 {
5394 5398
5395 5399 GenCollectedHeap* gch = GenCollectedHeap::heap();
5396 5400 FlexibleWorkGang* workers = gch->workers();
5397 5401 assert(workers != NULL, "Need parallel worker threads.");
5398 5402 CMSRefEnqueueTaskProxy enq_task(task);
5399 5403 workers->run_task(&enq_task);
5400 5404 }
5401 5405
5402 5406 void CMSCollector::refProcessingWork() {
5403 5407 ResourceMark rm;
5404 5408 HandleMark hm;
5405 5409
5406 5410 ReferenceProcessor* rp = ref_processor();
5407 5411 assert(rp->span().equals(_span), "Spans should be equal");
5408 5412 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5409 5413 // Process weak references.
5410 5414 rp->setup_policy(false);
5411 5415 verify_work_stacks_empty();
5412 5416
5413 5417 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5414 5418 &_markStack, false /* !preclean */);
5415 5419 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5416 5420 _span, &_markBitMap, &_markStack,
5417 5421 &cmsKeepAliveClosure, false /* !preclean */);
5418 5422 {
5419 5423 GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5420 5424
5421 5425 ReferenceProcessorStats stats;
5422 5426 if (rp->processing_is_mt()) {
5423 5427 // Set the degree of MT here. If the discovery is done MT, there
5424 5428 // may have been a different number of threads doing the discovery
5425 5429 // and a different number of discovered lists may have Ref objects.
5426 5430 // That is OK as long as the Reference lists are balanced (see
5427 5431 // balance_all_queues() and balance_queues()).
5428 5432 GenCollectedHeap* gch = GenCollectedHeap::heap();
5429 5433 uint active_workers = ParallelGCThreads;
5430 5434 FlexibleWorkGang* workers = gch->workers();
5431 5435 if (workers != NULL) {
5432 5436 active_workers = workers->active_workers();
5433 5437 // The expectation is that active_workers will have already
5434 5438 // been set to a reasonable value. If it has not been set,
5435 5439 // investigate.
5436 5440 assert(active_workers > 0, "Should have been set during scavenge");
5437 5441 }
5438 5442 rp->set_active_mt_degree(active_workers);
5439 5443 CMSRefProcTaskExecutor task_executor(*this);
5440 5444 stats = rp->process_discovered_references(&_is_alive_closure,
5441 5445 &cmsKeepAliveClosure,
5442 5446 &cmsDrainMarkingStackClosure,
5443 5447 &task_executor,
5444 5448 _gc_timer_cm,
5445 5449 _gc_tracer_cm->gc_id());
5446 5450 } else {
5447 5451 stats = rp->process_discovered_references(&_is_alive_closure,
5448 5452 &cmsKeepAliveClosure,
5449 5453 &cmsDrainMarkingStackClosure,
5450 5454 NULL,
5451 5455 _gc_timer_cm,
5452 5456 _gc_tracer_cm->gc_id());
5453 5457 }
5454 5458 _gc_tracer_cm->report_gc_reference_stats(stats);
5455 5459
5456 5460 }
5457 5461
5458 5462 // This is the point where the entire marking should have completed.
5459 5463 verify_work_stacks_empty();
5460 5464
5461 5465 if (should_unload_classes()) {
5462 5466 {
5463 5467 GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5464 5468
5465 5469 // Unload classes and purge the SystemDictionary.
5466 5470 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5467 5471
5468 5472 // Unload nmethods.
5469 5473 CodeCache::do_unloading(&_is_alive_closure, purged_class);
5470 5474
5471 5475 // Prune dead klasses from subklass/sibling/implementor lists.
5472 5476 Klass::clean_weak_klass_links(&_is_alive_closure);
5473 5477 }
5474 5478
5475 5479 {
5476 5480 GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5477 5481 // Clean up unreferenced symbols in symbol table.
5478 5482 SymbolTable::unlink();
5479 5483 }
5480 5484
5481 5485 {
5482 5486 GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
5483 5487 // Delete entries for dead interned strings.
5484 5488 StringTable::unlink(&_is_alive_closure);
5485 5489 }
5486 5490 }
5487 5491
5488 5492
5489 5493 // Restore any preserved marks as a result of mark stack or
5490 5494 // work queue overflow
5491 5495 restore_preserved_marks_if_any(); // done single-threaded for now
5492 5496
5493 5497 rp->set_enqueuing_is_done(true);
5494 5498 if (rp->processing_is_mt()) {
5495 5499 rp->balance_all_queues();
5496 5500 CMSRefProcTaskExecutor task_executor(*this);
5497 5501 rp->enqueue_discovered_references(&task_executor);
5498 5502 } else {
5499 5503 rp->enqueue_discovered_references(NULL);
5500 5504 }
5501 5505 rp->verify_no_references_recorded();
5502 5506 assert(!rp->discovery_enabled(), "should have been disabled");
5503 5507 }
5504 5508
5505 5509 #ifndef PRODUCT
5506 5510 void CMSCollector::check_correct_thread_executing() {
5507 5511 Thread* t = Thread::current();
5508 5512 // Only the VM thread or the CMS thread should be here.
5509 5513 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5510 5514 "Unexpected thread type");
5511 5515 // If this is the vm thread, the foreground process
5512 5516 // should not be waiting. Note that _foregroundGCIsActive is
5513 5517 // true while the foreground collector is waiting.
5514 5518 if (_foregroundGCShouldWait) {
5515 5519 // We cannot be the VM thread
5516 5520 assert(t->is_ConcurrentGC_thread(),
5517 5521 "Should be CMS thread");
5518 5522 } else {
5519 5523 // We can be the CMS thread only if we are in a stop-world
5520 5524 // phase of CMS collection.
5521 5525 if (t->is_ConcurrentGC_thread()) {
5522 5526 assert(_collectorState == InitialMarking ||
5523 5527 _collectorState == FinalMarking,
5524 5528 "Should be a stop-world phase");
5525 5529 // The CMS thread should be holding the CMS_token.
5526 5530 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5527 5531 "Potential interference with concurrently "
5528 5532 "executing VM thread");
5529 5533 }
5530 5534 }
5531 5535 }
5532 5536 #endif
5533 5537
5534 5538 void CMSCollector::sweep() {
5535 5539 assert(_collectorState == Sweeping, "just checking");
5536 5540 check_correct_thread_executing();
5537 5541 verify_work_stacks_empty();
5538 5542 verify_overflow_empty();
5539 5543 increment_sweep_count();
5540 5544 TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5541 5545
5542 5546 _inter_sweep_timer.stop();
5543 5547 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5544 5548
5545 5549 assert(!_intra_sweep_timer.is_active(), "Should not be active");
5546 5550 _intra_sweep_timer.reset();
5547 5551 _intra_sweep_timer.start();
5548 5552 {
5549 5553 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5550 5554 CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
5551 5555 // First sweep the old gen
5552 5556 {
5553 5557 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5554 5558 bitMapLock());
5555 5559 sweepWork(_cmsGen);
5556 5560 }
5557 5561
5558 5562 // Update Universe::_heap_*_at_gc figures.
5559 5563 // We need all the free list locks to make the abstract state
5560 5564 // transition from Sweeping to Resetting. See detailed note
5561 5565 // further below.
5562 5566 {
5563 5567 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5564 5568 // Update heap occupancy information which is used as
5565 5569 // input to soft ref clearing policy at the next gc.
5566 5570 Universe::update_heap_info_at_gc();
5567 5571 _collectorState = Resizing;
5568 5572 }
5569 5573 }
5570 5574 verify_work_stacks_empty();
5571 5575 verify_overflow_empty();
5572 5576
5573 5577 if (should_unload_classes()) {
5574 5578 // Delay purge to the beginning of the next safepoint. Metaspace::contains
5575 5579 // requires that the virtual spaces are stable and not deleted.
5576 5580 ClassLoaderDataGraph::set_should_purge(true);
5577 5581 }
5578 5582
5579 5583 _intra_sweep_timer.stop();
5580 5584 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5581 5585
5582 5586 _inter_sweep_timer.reset();
5583 5587 _inter_sweep_timer.start();
5584 5588
5585 5589 // We need to use a monotonically non-decreasing time in ms
5586 5590 // or we will see time-warp warnings and os::javaTimeMillis()
5587 5591 // does not guarantee monotonicity.
5588 5592 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5589 5593 update_time_of_last_gc(now);
5590 5594
5591 5595 // NOTE on abstract state transitions:
5592 5596 // Mutators allocate-live and/or mark the mod-union table dirty
5593 5597 // based on the state of the collection. The former is done in
5594 5598 // the interval [Marking, Sweeping] and the latter in the interval
5595 5599 // [Marking, Sweeping). Thus the transitions into the Marking state
5596 5600 // and out of the Sweeping state must be synchronously visible
5597 5601 // globally to the mutators.
5598 5602 // The transition into the Marking state happens with the world
5599 5603 // stopped so the mutators will globally see it. Sweeping is
5600 5604 // done asynchronously by the background collector so the transition
5601 5605 // from the Sweeping state to the Resizing state must be done
5602 5606 // under the freelistLock (as is the check for whether to
5603 5607 // allocate-live and whether to dirty the mod-union table).
5604 5608 assert(_collectorState == Resizing, "Change of collector state to"
5605 5609 " Resizing must be done under the freelistLocks (plural)");
5606 5610
5607 5611 // Now that sweeping has been completed, we clear
5608 5612 // the incremental_collection_failed flag,
5609 5613 // thus inviting a younger gen collection to promote into
5610 5614 // this generation. If such a promotion may still fail,
5611 5615 // the flag will be set again when a young collection is
5612 5616 // attempted.
5613 5617 GenCollectedHeap* gch = GenCollectedHeap::heap();
5614 5618 gch->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up
5615 5619 gch->update_full_collections_completed(_collection_count_start);
5616 5620 }
5617 5621
5618 5622 // FIX ME!!! Looks like this belongs in CFLSpace, with
5619 5623 // CMSGen merely delegating to it.
5620 5624 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5621 5625 double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5622 5626 HeapWord* minAddr = _cmsSpace->bottom();
5623 5627 HeapWord* largestAddr =
5624 5628 (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5625 5629 if (largestAddr == NULL) {
5626 5630 // The dictionary appears to be empty. In this case
5627 5631 // try to coalesce at the end of the heap.
5628 5632 largestAddr = _cmsSpace->end();
5629 5633 }
5630 5634 size_t largestOffset = pointer_delta(largestAddr, minAddr);
5631 5635 size_t nearLargestOffset =
5632 5636 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5633 5637 if (PrintFLSStatistics != 0) {
5634 5638 gclog_or_tty->print_cr(
5635 5639 "CMS: Large Block: " PTR_FORMAT ";"
5636 5640 " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5637 5641 p2i(largestAddr),
5638 5642 p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5639 5643 }
5640 5644 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5641 5645 }
5642 5646
5643 5647 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5644 5648 return addr >= _cmsSpace->nearLargestChunk();
5645 5649 }
5646 5650
5647 5651 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5648 5652 return _cmsSpace->find_chunk_at_end();
5649 5653 }
5650 5654
5651 5655 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
5652 5656 bool full) {
5653 5657 // The next lower level has been collected. Gather any statistics
5654 5658 // that are of interest at this point.
5655 5659 if (!full && (current_level + 1) == level()) {
5656 5660 // Gather statistics on the young generation collection.
5657 5661 collector()->stats().record_gc0_end(used());
5658 5662 }
5659 5663 }
5660 5664
5661 5665 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
5662 5666 // We iterate over the space(s) underlying this generation,
5663 5667 // checking the mark bit map to see if the bits corresponding
5664 5668 // to specific blocks are marked or not. Blocks that are
5665 5669 // marked are live and are not swept up. All remaining blocks
5666 5670 // are swept up, with coalescing on-the-fly as we sweep up
5667 5671 // contiguous free and/or garbage blocks:
5668 5672 // We need to ensure that the sweeper synchronizes with allocators
5669 5673 // and stop-the-world collectors. In particular, the following
5670 5674 // locks are used:
5671 5675 // . CMS token: if this is held, a stop the world collection cannot occur
5672 5676 // . freelistLock: if this is held no allocation can occur from this
5673 5677 // generation by another thread
5674 5678 // . bitMapLock: if this is held, no other thread can access or update
5675 5679 //
5676 5680
5677 5681 // Note that we need to hold the freelistLock if we use
5678 5682 // block iterate below; else the iterator might go awry if
5679 5683 // a mutator (or promotion) causes block contents to change
5680 5684 // (for instance if the allocator divvies up a block).
5681 5685 // If we hold the free list lock, for all practical purposes
5682 5686 // young generation GC's can't occur (they'll usually need to
5683 5687 // promote), so we might as well prevent all young generation
5684 5688 // GC's while we do a sweeping step. For the same reason, we might
5685 5689 // as well take the bit map lock for the entire duration
5686 5690
5687 5691 // check that we hold the requisite locks
5688 5692 assert(have_cms_token(), "Should hold cms token");
5689 5693 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5690 5694 assert_lock_strong(gen->freelistLock());
5691 5695 assert_lock_strong(bitMapLock());
5692 5696
5693 5697 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5694 5698 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
5695 5699 gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5696 5700 _inter_sweep_estimate.padded_average(),
5697 5701 _intra_sweep_estimate.padded_average());
5698 5702 gen->setNearLargestChunk();
5699 5703
5700 5704 {
5701 5705 SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield);
5702 5706 gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5703 5707 // We need to free-up/coalesce garbage/blocks from a
5704 5708 // co-terminal free run. This is done in the SweepClosure
5705 5709 // destructor; so, do not remove this scope, else the
5706 5710 // end-of-sweep-census below will be off by a little bit.
5707 5711 }
5708 5712 gen->cmsSpace()->sweep_completed();
5709 5713 gen->cmsSpace()->endSweepFLCensus(sweep_count());
5710 5714 if (should_unload_classes()) { // unloaded classes this cycle,
5711 5715 _concurrent_cycles_since_last_unload = 0; // ... reset count
5712 5716 } else { // did not unload classes,
5713 5717 _concurrent_cycles_since_last_unload++; // ... increment count
5714 5718 }
5715 5719 }
5716 5720
5717 5721 // Reset CMS data structures (for now just the marking bit map)
5718 5722 // preparatory for the next cycle.
5719 5723 void CMSCollector::reset(bool concurrent) {
5720 5724 if (concurrent) {
5721 5725 CMSTokenSyncWithLocks ts(true, bitMapLock());
5722 5726
5723 5727 // If the state is not "Resetting", the foreground thread
5724 5728 // has done a collection and the resetting.
5725 5729 if (_collectorState != Resetting) {
5726 5730 assert(_collectorState == Idling, "The state should only change"
5727 5731 " because the foreground collector has finished the collection");
5728 5732 return;
5729 5733 }
5730 5734
5731 5735 // Clear the mark bitmap (no grey objects to start with)
5732 5736 // for the next cycle.
5733 5737 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5734 5738 CMSPhaseAccounting cmspa(this, "reset", _gc_tracer_cm->gc_id(), !PrintGCDetails);
5735 5739
5736 5740 HeapWord* curAddr = _markBitMap.startWord();
5737 5741 while (curAddr < _markBitMap.endWord()) {
5738 5742 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr);
5739 5743 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5740 5744 _markBitMap.clear_large_range(chunk);
5741 5745 if (ConcurrentMarkSweepThread::should_yield() &&
5742 5746 !foregroundGCIsActive() &&
5743 5747 CMSYield) {
5744 5748 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5745 5749 "CMS thread should hold CMS token");
5746 5750 assert_lock_strong(bitMapLock());
5747 5751 bitMapLock()->unlock();
5748 5752 ConcurrentMarkSweepThread::desynchronize(true);
5749 5753 stopTimer();
5750 5754 if (PrintCMSStatistics != 0) {
5751 5755 incrementYields();
5752 5756 }
5753 5757
5754 5758 // See the comment in coordinator_yield()
5755 5759 for (unsigned i = 0; i < CMSYieldSleepCount &&
5756 5760 ConcurrentMarkSweepThread::should_yield() &&
5757 5761 !CMSCollector::foregroundGCIsActive(); ++i) {
5758 5762 os::sleep(Thread::current(), 1, false);
5759 5763 }
5760 5764
5761 5765 ConcurrentMarkSweepThread::synchronize(true);
5762 5766 bitMapLock()->lock_without_safepoint_check();
5763 5767 startTimer();
5764 5768 }
5765 5769 curAddr = chunk.end();
5766 5770 }
5767 5771 // A successful mostly concurrent collection has been done.
5768 5772 // Because only the full (i.e., concurrent mode failure) collections
5769 5773 // are being measured for gc overhead limits, clean the "near" flag
5770 5774 // and count.
5771 5775 size_policy()->reset_gc_overhead_limit_count();
5772 5776 _collectorState = Idling;
5773 5777 } else {
5774 5778 // already have the lock
5775 5779 assert(_collectorState == Resetting, "just checking");
5776 5780 assert_lock_strong(bitMapLock());
5777 5781 _markBitMap.clear_all();
5778 5782 _collectorState = Idling;
5779 5783 }
5780 5784
5781 5785 register_gc_end();
5782 5786 }
5783 5787
5784 5788 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5785 5789 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5786 5790 GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
5787 5791 TraceCollectorStats tcs(counters());
5788 5792
5789 5793 switch (op) {
5790 5794 case CMS_op_checkpointRootsInitial: {
5791 5795 SvcGCMarker sgcm(SvcGCMarker::OTHER);
5792 5796 checkpointRootsInitial();
5793 5797 if (PrintGC) {
5794 5798 _cmsGen->printOccupancy("initial-mark");
5795 5799 }
5796 5800 break;
5797 5801 }
5798 5802 case CMS_op_checkpointRootsFinal: {
5799 5803 SvcGCMarker sgcm(SvcGCMarker::OTHER);
5800 5804 checkpointRootsFinal();
5801 5805 if (PrintGC) {
5802 5806 _cmsGen->printOccupancy("remark");
5803 5807 }
5804 5808 break;
5805 5809 }
5806 5810 default:
5807 5811 fatal("No such CMS_op");
5808 5812 }
5809 5813 }
5810 5814
5811 5815 #ifndef PRODUCT
5812 5816 size_t const CMSCollector::skip_header_HeapWords() {
5813 5817 return FreeChunk::header_size();
5814 5818 }
5815 5819
5816 5820 // Try and collect here conditions that should hold when
5817 5821 // CMS thread is exiting. The idea is that the foreground GC
5818 5822 // thread should not be blocked if it wants to terminate
5819 5823 // the CMS thread and yet continue to run the VM for a while
5820 5824 // after that.
5821 5825 void CMSCollector::verify_ok_to_terminate() const {
5822 5826 assert(Thread::current()->is_ConcurrentGC_thread(),
5823 5827 "should be called by CMS thread");
5824 5828 assert(!_foregroundGCShouldWait, "should be false");
5825 5829 // We could check here that all the various low-level locks
5826 5830 // are not held by the CMS thread, but that is overkill; see
5827 5831 // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5828 5832 // is checked.
5829 5833 }
5830 5834 #endif
5831 5835
5832 5836 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5833 5837 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5834 5838 "missing Printezis mark?");
5835 5839 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5836 5840 size_t size = pointer_delta(nextOneAddr + 1, addr);
5837 5841 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5838 5842 "alignment problem");
5839 5843 assert(size >= 3, "Necessary for Printezis marks to work");
5840 5844 return size;
5841 5845 }
5842 5846
5843 5847 // A variant of the above (block_size_using_printezis_bits()) except
5844 5848 // that we return 0 if the P-bits are not yet set.
5845 5849 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5846 5850 if (_markBitMap.isMarked(addr + 1)) {
5847 5851 assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5848 5852 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5849 5853 size_t size = pointer_delta(nextOneAddr + 1, addr);
5850 5854 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5851 5855 "alignment problem");
5852 5856 assert(size >= 3, "Necessary for Printezis marks to work");
5853 5857 return size;
5854 5858 }
5855 5859 return 0;
5856 5860 }
5857 5861
5858 5862 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5859 5863 size_t sz = 0;
5860 5864 oop p = (oop)addr;
5861 5865 if (p->klass_or_null() != NULL) {
5862 5866 sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5863 5867 } else {
5864 5868 sz = block_size_using_printezis_bits(addr);
5865 5869 }
5866 5870 assert(sz > 0, "size must be nonzero");
5867 5871 HeapWord* next_block = addr + sz;
5868 5872 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block,
5869 5873 CardTableModRefBS::card_size);
5870 5874 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) <
5871 5875 round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
5872 5876 "must be different cards");
5873 5877 return next_card;
5874 5878 }
5875 5879
5876 5880
5877 5881 // CMS Bit Map Wrapper /////////////////////////////////////////
5878 5882
5879 5883 // Construct a CMS bit map infrastructure, but don't create the
5880 5884 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
5881 5885 // further below.
5882 5886 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5883 5887 _bm(),
5884 5888 _shifter(shifter),
5885 5889 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5886 5890 Monitor::_safepoint_check_sometimes) : NULL)
5887 5891 {
5888 5892 _bmStartWord = 0;
5889 5893 _bmWordSize = 0;
5890 5894 }
5891 5895
5892 5896 bool CMSBitMap::allocate(MemRegion mr) {
5893 5897 _bmStartWord = mr.start();
5894 5898 _bmWordSize = mr.word_size();
5895 5899 ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5896 5900 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5897 5901 if (!brs.is_reserved()) {
5898 5902 warning("CMS bit map allocation failure");
5899 5903 return false;
5900 5904 }
5901 5905 // For now we'll just commit all of the bit map up front.
5902 5906 // Later on we'll try to be more parsimonious with swap.
5903 5907 if (!_virtual_space.initialize(brs, brs.size())) {
5904 5908 warning("CMS bit map backing store failure");
5905 5909 return false;
5906 5910 }
5907 5911 assert(_virtual_space.committed_size() == brs.size(),
5908 5912 "didn't reserve backing store for all of CMS bit map?");
5909 5913 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
5910 5914 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5911 5915 _bmWordSize, "inconsistency in bit map sizing");
5912 5916 _bm.set_size(_bmWordSize >> _shifter);
5913 5917
5914 5918 // bm.clear(); // can we rely on getting zero'd memory? verify below
5915 5919 assert(isAllClear(),
5916 5920 "Expected zero'd memory from ReservedSpace constructor");
5917 5921 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5918 5922 "consistency check");
5919 5923 return true;
5920 5924 }
5921 5925
5922 5926 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5923 5927 HeapWord *next_addr, *end_addr, *last_addr;
5924 5928 assert_locked();
5925 5929 assert(covers(mr), "out-of-range error");
5926 5930 // XXX assert that start and end are appropriately aligned
5927 5931 for (next_addr = mr.start(), end_addr = mr.end();
5928 5932 next_addr < end_addr; next_addr = last_addr) {
5929 5933 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5930 5934 last_addr = dirty_region.end();
5931 5935 if (!dirty_region.is_empty()) {
5932 5936 cl->do_MemRegion(dirty_region);
5933 5937 } else {
5934 5938 assert(last_addr == end_addr, "program logic");
5935 5939 return;
5936 5940 }
5937 5941 }
5938 5942 }
5939 5943
5940 5944 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
5941 5945 _bm.print_on_error(st, prefix);
5942 5946 }
5943 5947
5944 5948 #ifndef PRODUCT
5945 5949 void CMSBitMap::assert_locked() const {
5946 5950 CMSLockVerifier::assert_locked(lock());
5947 5951 }
5948 5952
5949 5953 bool CMSBitMap::covers(MemRegion mr) const {
5950 5954 // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
5951 5955 assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
5952 5956 "size inconsistency");
5953 5957 return (mr.start() >= _bmStartWord) &&
5954 5958 (mr.end() <= endWord());
5955 5959 }
5956 5960
5957 5961 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
5958 5962 return (start >= _bmStartWord && (start + size) <= endWord());
5959 5963 }
5960 5964
5961 5965 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
5962 5966 // verify that there are no 1 bits in the interval [left, right)
5963 5967 FalseBitMapClosure falseBitMapClosure;
5964 5968 iterate(&falseBitMapClosure, left, right);
5965 5969 }
5966 5970
5967 5971 void CMSBitMap::region_invariant(MemRegion mr)
5968 5972 {
5969 5973 assert_locked();
5970 5974 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5971 5975 assert(!mr.is_empty(), "unexpected empty region");
5972 5976 assert(covers(mr), "mr should be covered by bit map");
5973 5977 // convert address range into offset range
5974 5978 size_t start_ofs = heapWordToOffset(mr.start());
5975 5979 // Make sure that end() is appropriately aligned
5976 5980 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
5977 5981 (1 << (_shifter+LogHeapWordSize))),
5978 5982 "Misaligned mr.end()");
5979 5983 size_t end_ofs = heapWordToOffset(mr.end());
5980 5984 assert(end_ofs > start_ofs, "Should mark at least one bit");
5981 5985 }
5982 5986
5983 5987 #endif
5984 5988
5985 5989 bool CMSMarkStack::allocate(size_t size) {
5986 5990 // allocate a stack of the requisite depth
5987 5991 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5988 5992 size * sizeof(oop)));
5989 5993 if (!rs.is_reserved()) {
5990 5994 warning("CMSMarkStack allocation failure");
5991 5995 return false;
5992 5996 }
5993 5997 if (!_virtual_space.initialize(rs, rs.size())) {
5994 5998 warning("CMSMarkStack backing store failure");
5995 5999 return false;
5996 6000 }
5997 6001 assert(_virtual_space.committed_size() == rs.size(),
5998 6002 "didn't reserve backing store for all of CMS stack?");
5999 6003 _base = (oop*)(_virtual_space.low());
6000 6004 _index = 0;
6001 6005 _capacity = size;
6002 6006 NOT_PRODUCT(_max_depth = 0);
6003 6007 return true;
6004 6008 }
6005 6009
6006 6010 // XXX FIX ME !!! In the MT case we come in here holding a
6007 6011 // leaf lock. For printing we need to take a further lock
6008 6012 // which has lower rank. We need to recalibrate the two
6009 6013 // lock-ranks involved in order to be able to print the
6010 6014 // messages below. (Or defer the printing to the caller.
6011 6015 // For now we take the expedient path of just disabling the
6012 6016 // messages for the problematic case.)
6013 6017 void CMSMarkStack::expand() {
6014 6018 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6015 6019 if (_capacity == MarkStackSizeMax) {
6016 6020 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6017 6021 // We print a warning message only once per CMS cycle.
6018 6022 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6019 6023 }
6020 6024 return;
6021 6025 }
6022 6026 // Double capacity if possible
6023 6027 size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6024 6028 // Do not give up existing stack until we have managed to
6025 6029 // get the double capacity that we desired.
6026 6030 ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6027 6031 new_capacity * sizeof(oop)));
6028 6032 if (rs.is_reserved()) {
6029 6033 // Release the backing store associated with old stack
6030 6034 _virtual_space.release();
6031 6035 // Reinitialize virtual space for new stack
6032 6036 if (!_virtual_space.initialize(rs, rs.size())) {
6033 6037 fatal("Not enough swap for expanded marking stack");
6034 6038 }
6035 6039 _base = (oop*)(_virtual_space.low());
6036 6040 _index = 0;
6037 6041 _capacity = new_capacity;
6038 6042 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6039 6043 // Failed to double capacity, continue;
6040 6044 // we print a detail message only once per CMS cycle.
6041 6045 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6042 6046 SIZE_FORMAT"K",
6043 6047 _capacity / K, new_capacity / K);
6044 6048 }
6045 6049 }
6046 6050
6047 6051
6048 6052 // Closures
6049 6053 // XXX: there seems to be a lot of code duplication here;
6050 6054 // should refactor and consolidate common code.
6051 6055
6052 6056 // This closure is used to mark refs into the CMS generation in
6053 6057 // the CMS bit map. Called at the first checkpoint. This closure
6054 6058 // assumes that we do not need to re-mark dirty cards; if the CMS
6055 6059 // generation on which this is used is not an oldest
6056 6060 // generation then this will lose younger_gen cards!
6057 6061
6058 6062 MarkRefsIntoClosure::MarkRefsIntoClosure(
6059 6063 MemRegion span, CMSBitMap* bitMap):
6060 6064 _span(span),
6061 6065 _bitMap(bitMap)
6062 6066 {
6063 6067 assert(_ref_processor == NULL, "deliberately left NULL");
6064 6068 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6065 6069 }
6066 6070
6067 6071 void MarkRefsIntoClosure::do_oop(oop obj) {
6068 6072 // if p points into _span, then mark corresponding bit in _markBitMap
6069 6073 assert(obj->is_oop(), "expected an oop");
6070 6074 HeapWord* addr = (HeapWord*)obj;
6071 6075 if (_span.contains(addr)) {
6072 6076 // this should be made more efficient
6073 6077 _bitMap->mark(addr);
6074 6078 }
6075 6079 }
6076 6080
6077 6081 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6078 6082 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6079 6083
6080 6084 Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6081 6085 MemRegion span, CMSBitMap* bitMap):
6082 6086 _span(span),
6083 6087 _bitMap(bitMap)
6084 6088 {
6085 6089 assert(_ref_processor == NULL, "deliberately left NULL");
6086 6090 assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6087 6091 }
6088 6092
6089 6093 void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6090 6094 // if p points into _span, then mark corresponding bit in _markBitMap
6091 6095 assert(obj->is_oop(), "expected an oop");
6092 6096 HeapWord* addr = (HeapWord*)obj;
6093 6097 if (_span.contains(addr)) {
6094 6098 // this should be made more efficient
6095 6099 _bitMap->par_mark(addr);
6096 6100 }
6097 6101 }
6098 6102
6099 6103 void Par_MarkRefsIntoClosure::do_oop(oop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6100 6104 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6101 6105
6102 6106 // A variant of the above, used for CMS marking verification.
6103 6107 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6104 6108 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6105 6109 _span(span),
6106 6110 _verification_bm(verification_bm),
6107 6111 _cms_bm(cms_bm)
6108 6112 {
6109 6113 assert(_ref_processor == NULL, "deliberately left NULL");
6110 6114 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6111 6115 }
6112 6116
6113 6117 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6114 6118 // if p points into _span, then mark corresponding bit in _markBitMap
6115 6119 assert(obj->is_oop(), "expected an oop");
6116 6120 HeapWord* addr = (HeapWord*)obj;
6117 6121 if (_span.contains(addr)) {
6118 6122 _verification_bm->mark(addr);
6119 6123 if (!_cms_bm->isMarked(addr)) {
6120 6124 oop(addr)->print();
6121 6125 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6122 6126 fatal("... aborting");
6123 6127 }
6124 6128 }
6125 6129 }
6126 6130
6127 6131 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6128 6132 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6129 6133
6130 6134 //////////////////////////////////////////////////
6131 6135 // MarkRefsIntoAndScanClosure
6132 6136 //////////////////////////////////////////////////
6133 6137
6134 6138 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6135 6139 ReferenceProcessor* rp,
6136 6140 CMSBitMap* bit_map,
6137 6141 CMSBitMap* mod_union_table,
6138 6142 CMSMarkStack* mark_stack,
6139 6143 CMSCollector* collector,
6140 6144 bool should_yield,
6141 6145 bool concurrent_precleaning):
6142 6146 _collector(collector),
6143 6147 _span(span),
6144 6148 _bit_map(bit_map),
6145 6149 _mark_stack(mark_stack),
6146 6150 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6147 6151 mark_stack, concurrent_precleaning),
6148 6152 _yield(should_yield),
6149 6153 _concurrent_precleaning(concurrent_precleaning),
6150 6154 _freelistLock(NULL)
6151 6155 {
6152 6156 _ref_processor = rp;
6153 6157 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6154 6158 }
6155 6159
6156 6160 // This closure is used to mark refs into the CMS generation at the
6157 6161 // second (final) checkpoint, and to scan and transitively follow
6158 6162 // the unmarked oops. It is also used during the concurrent precleaning
6159 6163 // phase while scanning objects on dirty cards in the CMS generation.
6160 6164 // The marks are made in the marking bit map and the marking stack is
6161 6165 // used for keeping the (newly) grey objects during the scan.
6162 6166 // The parallel version (Par_...) appears further below.
6163 6167 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6164 6168 if (obj != NULL) {
6165 6169 assert(obj->is_oop(), "expected an oop");
6166 6170 HeapWord* addr = (HeapWord*)obj;
6167 6171 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6168 6172 assert(_collector->overflow_list_is_empty(),
6169 6173 "overflow list should be empty");
6170 6174 if (_span.contains(addr) &&
6171 6175 !_bit_map->isMarked(addr)) {
6172 6176 // mark bit map (object is now grey)
6173 6177 _bit_map->mark(addr);
6174 6178 // push on marking stack (stack should be empty), and drain the
6175 6179 // stack by applying this closure to the oops in the oops popped
6176 6180 // from the stack (i.e. blacken the grey objects)
6177 6181 bool res = _mark_stack->push(obj);
6178 6182 assert(res, "Should have space to push on empty stack");
6179 6183 do {
6180 6184 oop new_oop = _mark_stack->pop();
6181 6185 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6182 6186 assert(_bit_map->isMarked((HeapWord*)new_oop),
6183 6187 "only grey objects on this stack");
6184 6188 // iterate over the oops in this oop, marking and pushing
6185 6189 // the ones in CMS heap (i.e. in _span).
6186 6190 new_oop->oop_iterate(&_pushAndMarkClosure);
6187 6191 // check if it's time to yield
6188 6192 do_yield_check();
6189 6193 } while (!_mark_stack->isEmpty() ||
6190 6194 (!_concurrent_precleaning && take_from_overflow_list()));
6191 6195 // if marking stack is empty, and we are not doing this
6192 6196 // during precleaning, then check the overflow list
6193 6197 }
6194 6198 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6195 6199 assert(_collector->overflow_list_is_empty(),
6196 6200 "overflow list was drained above");
6197 6201 // We could restore evacuated mark words, if any, used for
6198 6202 // overflow list links here because the overflow list is
6199 6203 // provably empty here. That would reduce the maximum
6200 6204 // size requirements for preserved_{oop,mark}_stack.
6201 6205 // But we'll just postpone it until we are all done
6202 6206 // so we can just stream through.
6203 6207 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6204 6208 _collector->restore_preserved_marks_if_any();
6205 6209 assert(_collector->no_preserved_marks(), "No preserved marks");
6206 6210 }
6207 6211 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6208 6212 "All preserved marks should have been restored above");
6209 6213 }
6210 6214 }
6211 6215
6212 6216 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6213 6217 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6214 6218
6215 6219 void MarkRefsIntoAndScanClosure::do_yield_work() {
6216 6220 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6217 6221 "CMS thread should hold CMS token");
6218 6222 assert_lock_strong(_freelistLock);
6219 6223 assert_lock_strong(_bit_map->lock());
6220 6224 // relinquish the free_list_lock and bitMaplock()
6221 6225 _bit_map->lock()->unlock();
6222 6226 _freelistLock->unlock();
6223 6227 ConcurrentMarkSweepThread::desynchronize(true);
6224 6228 _collector->stopTimer();
6225 6229 if (PrintCMSStatistics != 0) {
6226 6230 _collector->incrementYields();
6227 6231 }
6228 6232
6229 6233 // See the comment in coordinator_yield()
6230 6234 for (unsigned i = 0;
6231 6235 i < CMSYieldSleepCount &&
6232 6236 ConcurrentMarkSweepThread::should_yield() &&
6233 6237 !CMSCollector::foregroundGCIsActive();
6234 6238 ++i) {
6235 6239 os::sleep(Thread::current(), 1, false);
6236 6240 }
6237 6241
6238 6242 ConcurrentMarkSweepThread::synchronize(true);
6239 6243 _freelistLock->lock_without_safepoint_check();
6240 6244 _bit_map->lock()->lock_without_safepoint_check();
6241 6245 _collector->startTimer();
6242 6246 }
6243 6247
6244 6248 ///////////////////////////////////////////////////////////
6245 6249 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6246 6250 // MarkRefsIntoAndScanClosure
6247 6251 ///////////////////////////////////////////////////////////
6248 6252 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6249 6253 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6250 6254 CMSBitMap* bit_map, OopTaskQueue* work_queue):
6251 6255 _span(span),
6252 6256 _bit_map(bit_map),
6253 6257 _work_queue(work_queue),
6254 6258 _low_water_mark(MIN2((work_queue->max_elems()/4),
6255 6259 ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6256 6260 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6257 6261 {
6258 6262 _ref_processor = rp;
6259 6263 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6260 6264 }
6261 6265
6262 6266 // This closure is used to mark refs into the CMS generation at the
6263 6267 // second (final) checkpoint, and to scan and transitively follow
6264 6268 // the unmarked oops. The marks are made in the marking bit map and
6265 6269 // the work_queue is used for keeping the (newly) grey objects during
6266 6270 // the scan phase whence they are also available for stealing by parallel
6267 6271 // threads. Since the marking bit map is shared, updates are
6268 6272 // synchronized (via CAS).
6269 6273 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6270 6274 if (obj != NULL) {
6271 6275 // Ignore mark word because this could be an already marked oop
6272 6276 // that may be chained at the end of the overflow list.
6273 6277 assert(obj->is_oop(true), "expected an oop");
6274 6278 HeapWord* addr = (HeapWord*)obj;
6275 6279 if (_span.contains(addr) &&
6276 6280 !_bit_map->isMarked(addr)) {
6277 6281 // mark bit map (object will become grey):
6278 6282 // It is possible for several threads to be
6279 6283 // trying to "claim" this object concurrently;
6280 6284 // the unique thread that succeeds in marking the
6281 6285 // object first will do the subsequent push on
6282 6286 // to the work queue (or overflow list).
6283 6287 if (_bit_map->par_mark(addr)) {
6284 6288 // push on work_queue (which may not be empty), and trim the
6285 6289 // queue to an appropriate length by applying this closure to
6286 6290 // the oops in the oops popped from the stack (i.e. blacken the
6287 6291 // grey objects)
6288 6292 bool res = _work_queue->push(obj);
6289 6293 assert(res, "Low water mark should be less than capacity?");
6290 6294 trim_queue(_low_water_mark);
6291 6295 } // Else, another thread claimed the object
6292 6296 }
6293 6297 }
6294 6298 }
6295 6299
6296 6300 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6297 6301 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6298 6302
6299 6303 // This closure is used to rescan the marked objects on the dirty cards
6300 6304 // in the mod union table and the card table proper.
6301 6305 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6302 6306 oop p, MemRegion mr) {
6303 6307
6304 6308 size_t size = 0;
6305 6309 HeapWord* addr = (HeapWord*)p;
6306 6310 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6307 6311 assert(_span.contains(addr), "we are scanning the CMS generation");
6308 6312 // check if it's time to yield
6309 6313 if (do_yield_check()) {
6310 6314 // We yielded for some foreground stop-world work,
6311 6315 // and we have been asked to abort this ongoing preclean cycle.
6312 6316 return 0;
6313 6317 }
6314 6318 if (_bitMap->isMarked(addr)) {
6315 6319 // it's marked; is it potentially uninitialized?
6316 6320 if (p->klass_or_null() != NULL) {
6317 6321 // an initialized object; ignore mark word in verification below
6318 6322 // since we are running concurrent with mutators
6319 6323 assert(p->is_oop(true), "should be an oop");
6320 6324 if (p->is_objArray()) {
6321 6325 // objArrays are precisely marked; restrict scanning
6322 6326 // to dirty cards only.
6323 6327 size = CompactibleFreeListSpace::adjustObjectSize(
6324 6328 p->oop_iterate(_scanningClosure, mr));
6325 6329 } else {
6326 6330 // A non-array may have been imprecisely marked; we need
6327 6331 // to scan object in its entirety.
6328 6332 size = CompactibleFreeListSpace::adjustObjectSize(
6329 6333 p->oop_iterate(_scanningClosure));
6330 6334 }
6331 6335 #ifdef ASSERT
6332 6336 size_t direct_size =
6333 6337 CompactibleFreeListSpace::adjustObjectSize(p->size());
6334 6338 assert(size == direct_size, "Inconsistency in size");
6335 6339 assert(size >= 3, "Necessary for Printezis marks to work");
6336 6340 if (!_bitMap->isMarked(addr+1)) {
6337 6341 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6338 6342 } else {
6339 6343 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6340 6344 assert(_bitMap->isMarked(addr+size-1),
6341 6345 "inconsistent Printezis mark");
6342 6346 }
6343 6347 #endif // ASSERT
6344 6348 } else {
6345 6349 // An uninitialized object.
6346 6350 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6347 6351 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6348 6352 size = pointer_delta(nextOneAddr + 1, addr);
6349 6353 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6350 6354 "alignment problem");
6351 6355 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6352 6356 // will dirty the card when the klass pointer is installed in the
6353 6357 // object (signaling the completion of initialization).
6354 6358 }
6355 6359 } else {
6356 6360 // Either a not yet marked object or an uninitialized object
6357 6361 if (p->klass_or_null() == NULL) {
6358 6362 // An uninitialized object, skip to the next card, since
6359 6363 // we may not be able to read its P-bits yet.
6360 6364 assert(size == 0, "Initial value");
6361 6365 } else {
6362 6366 // An object not (yet) reached by marking: we merely need to
6363 6367 // compute its size so as to go look at the next block.
6364 6368 assert(p->is_oop(true), "should be an oop");
6365 6369 size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6366 6370 }
6367 6371 }
6368 6372 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6369 6373 return size;
6370 6374 }
6371 6375
6372 6376 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6373 6377 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6374 6378 "CMS thread should hold CMS token");
6375 6379 assert_lock_strong(_freelistLock);
6376 6380 assert_lock_strong(_bitMap->lock());
6377 6381 // relinquish the free_list_lock and bitMaplock()
6378 6382 _bitMap->lock()->unlock();
6379 6383 _freelistLock->unlock();
6380 6384 ConcurrentMarkSweepThread::desynchronize(true);
6381 6385 _collector->stopTimer();
6382 6386 if (PrintCMSStatistics != 0) {
6383 6387 _collector->incrementYields();
6384 6388 }
6385 6389
6386 6390 // See the comment in coordinator_yield()
6387 6391 for (unsigned i = 0; i < CMSYieldSleepCount &&
6388 6392 ConcurrentMarkSweepThread::should_yield() &&
6389 6393 !CMSCollector::foregroundGCIsActive(); ++i) {
6390 6394 os::sleep(Thread::current(), 1, false);
6391 6395 }
6392 6396
6393 6397 ConcurrentMarkSweepThread::synchronize(true);
6394 6398 _freelistLock->lock_without_safepoint_check();
6395 6399 _bitMap->lock()->lock_without_safepoint_check();
6396 6400 _collector->startTimer();
6397 6401 }
6398 6402
6399 6403
6400 6404 //////////////////////////////////////////////////////////////////
6401 6405 // SurvivorSpacePrecleanClosure
6402 6406 //////////////////////////////////////////////////////////////////
6403 6407 // This (single-threaded) closure is used to preclean the oops in
6404 6408 // the survivor spaces.
6405 6409 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6406 6410
6407 6411 HeapWord* addr = (HeapWord*)p;
6408 6412 DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6409 6413 assert(!_span.contains(addr), "we are scanning the survivor spaces");
6410 6414 assert(p->klass_or_null() != NULL, "object should be initialized");
6411 6415 // an initialized object; ignore mark word in verification below
6412 6416 // since we are running concurrent with mutators
6413 6417 assert(p->is_oop(true), "should be an oop");
6414 6418 // Note that we do not yield while we iterate over
6415 6419 // the interior oops of p, pushing the relevant ones
6416 6420 // on our marking stack.
6417 6421 size_t size = p->oop_iterate(_scanning_closure);
6418 6422 do_yield_check();
6419 6423 // Observe that below, we do not abandon the preclean
6420 6424 // phase as soon as we should; rather we empty the
6421 6425 // marking stack before returning. This is to satisfy
6422 6426 // some existing assertions. In general, it may be a
6423 6427 // good idea to abort immediately and complete the marking
6424 6428 // from the grey objects at a later time.
6425 6429 while (!_mark_stack->isEmpty()) {
6426 6430 oop new_oop = _mark_stack->pop();
6427 6431 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6428 6432 assert(_bit_map->isMarked((HeapWord*)new_oop),
6429 6433 "only grey objects on this stack");
6430 6434 // iterate over the oops in this oop, marking and pushing
6431 6435 // the ones in CMS heap (i.e. in _span).
6432 6436 new_oop->oop_iterate(_scanning_closure);
6433 6437 // check if it's time to yield
6434 6438 do_yield_check();
6435 6439 }
6436 6440 unsigned int after_count =
6437 6441 GenCollectedHeap::heap()->total_collections();
6438 6442 bool abort = (_before_count != after_count) ||
6439 6443 _collector->should_abort_preclean();
6440 6444 return abort ? 0 : size;
6441 6445 }
6442 6446
6443 6447 void SurvivorSpacePrecleanClosure::do_yield_work() {
6444 6448 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6445 6449 "CMS thread should hold CMS token");
6446 6450 assert_lock_strong(_bit_map->lock());
6447 6451 // Relinquish the bit map lock
6448 6452 _bit_map->lock()->unlock();
6449 6453 ConcurrentMarkSweepThread::desynchronize(true);
6450 6454 _collector->stopTimer();
6451 6455 if (PrintCMSStatistics != 0) {
6452 6456 _collector->incrementYields();
6453 6457 }
6454 6458
6455 6459 // See the comment in coordinator_yield()
6456 6460 for (unsigned i = 0; i < CMSYieldSleepCount &&
6457 6461 ConcurrentMarkSweepThread::should_yield() &&
6458 6462 !CMSCollector::foregroundGCIsActive(); ++i) {
6459 6463 os::sleep(Thread::current(), 1, false);
6460 6464 }
6461 6465
6462 6466 ConcurrentMarkSweepThread::synchronize(true);
6463 6467 _bit_map->lock()->lock_without_safepoint_check();
6464 6468 _collector->startTimer();
6465 6469 }
6466 6470
6467 6471 // This closure is used to rescan the marked objects on the dirty cards
6468 6472 // in the mod union table and the card table proper. In the parallel
6469 6473 // case, although the bitMap is shared, we do a single read so the
6470 6474 // isMarked() query is "safe".
6471 6475 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6472 6476 // Ignore mark word because we are running concurrent with mutators
6473 6477 assert(p->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(p)));
6474 6478 HeapWord* addr = (HeapWord*)p;
6475 6479 assert(_span.contains(addr), "we are scanning the CMS generation");
6476 6480 bool is_obj_array = false;
6477 6481 #ifdef ASSERT
6478 6482 if (!_parallel) {
6479 6483 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6480 6484 assert(_collector->overflow_list_is_empty(),
6481 6485 "overflow list should be empty");
6482 6486
6483 6487 }
6484 6488 #endif // ASSERT
6485 6489 if (_bit_map->isMarked(addr)) {
6486 6490 // Obj arrays are precisely marked, non-arrays are not;
6487 6491 // so we scan objArrays precisely and non-arrays in their
6488 6492 // entirety.
6489 6493 if (p->is_objArray()) {
6490 6494 is_obj_array = true;
6491 6495 if (_parallel) {
6492 6496 p->oop_iterate(_par_scan_closure, mr);
6493 6497 } else {
6494 6498 p->oop_iterate(_scan_closure, mr);
6495 6499 }
6496 6500 } else {
6497 6501 if (_parallel) {
6498 6502 p->oop_iterate(_par_scan_closure);
6499 6503 } else {
6500 6504 p->oop_iterate(_scan_closure);
6501 6505 }
6502 6506 }
6503 6507 }
6504 6508 #ifdef ASSERT
6505 6509 if (!_parallel) {
6506 6510 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6507 6511 assert(_collector->overflow_list_is_empty(),
6508 6512 "overflow list should be empty");
6509 6513
6510 6514 }
6511 6515 #endif // ASSERT
6512 6516 return is_obj_array;
6513 6517 }
6514 6518
6515 6519 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6516 6520 MemRegion span,
6517 6521 CMSBitMap* bitMap, CMSMarkStack* markStack,
6518 6522 bool should_yield, bool verifying):
6519 6523 _collector(collector),
6520 6524 _span(span),
6521 6525 _bitMap(bitMap),
6522 6526 _mut(&collector->_modUnionTable),
6523 6527 _markStack(markStack),
6524 6528 _yield(should_yield),
6525 6529 _skipBits(0)
6526 6530 {
6527 6531 assert(_markStack->isEmpty(), "stack should be empty");
6528 6532 _finger = _bitMap->startWord();
6529 6533 _threshold = _finger;
6530 6534 assert(_collector->_restart_addr == NULL, "Sanity check");
6531 6535 assert(_span.contains(_finger), "Out of bounds _finger?");
6532 6536 DEBUG_ONLY(_verifying = verifying;)
6533 6537 }
6534 6538
6535 6539 void MarkFromRootsClosure::reset(HeapWord* addr) {
6536 6540 assert(_markStack->isEmpty(), "would cause duplicates on stack");
6537 6541 assert(_span.contains(addr), "Out of bounds _finger?");
6538 6542 _finger = addr;
6539 6543 _threshold = (HeapWord*)round_to(
6540 6544 (intptr_t)_finger, CardTableModRefBS::card_size);
6541 6545 }
6542 6546
6543 6547 // Should revisit to see if this should be restructured for
6544 6548 // greater efficiency.
6545 6549 bool MarkFromRootsClosure::do_bit(size_t offset) {
6546 6550 if (_skipBits > 0) {
6547 6551 _skipBits--;
6548 6552 return true;
6549 6553 }
6550 6554 // convert offset into a HeapWord*
6551 6555 HeapWord* addr = _bitMap->startWord() + offset;
6552 6556 assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6553 6557 "address out of range");
6554 6558 assert(_bitMap->isMarked(addr), "tautology");
6555 6559 if (_bitMap->isMarked(addr+1)) {
6556 6560 // this is an allocated but not yet initialized object
6557 6561 assert(_skipBits == 0, "tautology");
6558 6562 _skipBits = 2; // skip next two marked bits ("Printezis-marks")
6559 6563 oop p = oop(addr);
6560 6564 if (p->klass_or_null() == NULL) {
6561 6565 DEBUG_ONLY(if (!_verifying) {)
6562 6566 // We re-dirty the cards on which this object lies and increase
6563 6567 // the _threshold so that we'll come back to scan this object
6564 6568 // during the preclean or remark phase. (CMSCleanOnEnter)
6565 6569 if (CMSCleanOnEnter) {
6566 6570 size_t sz = _collector->block_size_using_printezis_bits(addr);
6567 6571 HeapWord* end_card_addr = (HeapWord*)round_to(
6568 6572 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6569 6573 MemRegion redirty_range = MemRegion(addr, end_card_addr);
6570 6574 assert(!redirty_range.is_empty(), "Arithmetical tautology");
6571 6575 // Bump _threshold to end_card_addr; note that
6572 6576 // _threshold cannot possibly exceed end_card_addr, anyhow.
6573 6577 // This prevents future clearing of the card as the scan proceeds
6574 6578 // to the right.
6575 6579 assert(_threshold <= end_card_addr,
6576 6580 "Because we are just scanning into this object");
6577 6581 if (_threshold < end_card_addr) {
6578 6582 _threshold = end_card_addr;
6579 6583 }
6580 6584 if (p->klass_or_null() != NULL) {
6581 6585 // Redirty the range of cards...
6582 6586 _mut->mark_range(redirty_range);
6583 6587 } // ...else the setting of klass will dirty the card anyway.
6584 6588 }
6585 6589 DEBUG_ONLY(})
6586 6590 return true;
6587 6591 }
6588 6592 }
6589 6593 scanOopsInOop(addr);
6590 6594 return true;
6591 6595 }
6592 6596
6593 6597 // We take a break if we've been at this for a while,
6594 6598 // so as to avoid monopolizing the locks involved.
6595 6599 void MarkFromRootsClosure::do_yield_work() {
6596 6600 // First give up the locks, then yield, then re-lock
6597 6601 // We should probably use a constructor/destructor idiom to
6598 6602 // do this unlock/lock or modify the MutexUnlocker class to
6599 6603 // serve our purpose. XXX
6600 6604 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6601 6605 "CMS thread should hold CMS token");
6602 6606 assert_lock_strong(_bitMap->lock());
6603 6607 _bitMap->lock()->unlock();
6604 6608 ConcurrentMarkSweepThread::desynchronize(true);
6605 6609 _collector->stopTimer();
6606 6610 if (PrintCMSStatistics != 0) {
6607 6611 _collector->incrementYields();
6608 6612 }
6609 6613
6610 6614 // See the comment in coordinator_yield()
6611 6615 for (unsigned i = 0; i < CMSYieldSleepCount &&
6612 6616 ConcurrentMarkSweepThread::should_yield() &&
6613 6617 !CMSCollector::foregroundGCIsActive(); ++i) {
6614 6618 os::sleep(Thread::current(), 1, false);
6615 6619 }
6616 6620
6617 6621 ConcurrentMarkSweepThread::synchronize(true);
6618 6622 _bitMap->lock()->lock_without_safepoint_check();
6619 6623 _collector->startTimer();
6620 6624 }
6621 6625
6622 6626 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6623 6627 assert(_bitMap->isMarked(ptr), "expected bit to be set");
6624 6628 assert(_markStack->isEmpty(),
6625 6629 "should drain stack to limit stack usage");
6626 6630 // convert ptr to an oop preparatory to scanning
6627 6631 oop obj = oop(ptr);
6628 6632 // Ignore mark word in verification below, since we
6629 6633 // may be running concurrent with mutators.
6630 6634 assert(obj->is_oop(true), "should be an oop");
6631 6635 assert(_finger <= ptr, "_finger runneth ahead");
6632 6636 // advance the finger to right end of this object
6633 6637 _finger = ptr + obj->size();
6634 6638 assert(_finger > ptr, "we just incremented it above");
6635 6639 // On large heaps, it may take us some time to get through
6636 6640 // the marking phase. During
6637 6641 // this time it's possible that a lot of mutations have
6638 6642 // accumulated in the card table and the mod union table --
6639 6643 // these mutation records are redundant until we have
6640 6644 // actually traced into the corresponding card.
6641 6645 // Here, we check whether advancing the finger would make
6642 6646 // us cross into a new card, and if so clear corresponding
6643 6647 // cards in the MUT (preclean them in the card-table in the
6644 6648 // future).
6645 6649
6646 6650 DEBUG_ONLY(if (!_verifying) {)
6647 6651 // The clean-on-enter optimization is disabled by default,
6648 6652 // until we fix 6178663.
6649 6653 if (CMSCleanOnEnter && (_finger > _threshold)) {
6650 6654 // [_threshold, _finger) represents the interval
6651 6655 // of cards to be cleared in MUT (or precleaned in card table).
6652 6656 // The set of cards to be cleared is all those that overlap
6653 6657 // with the interval [_threshold, _finger); note that
6654 6658 // _threshold is always kept card-aligned but _finger isn't
6655 6659 // always card-aligned.
6656 6660 HeapWord* old_threshold = _threshold;
6657 6661 assert(old_threshold == (HeapWord*)round_to(
6658 6662 (intptr_t)old_threshold, CardTableModRefBS::card_size),
6659 6663 "_threshold should always be card-aligned");
6660 6664 _threshold = (HeapWord*)round_to(
6661 6665 (intptr_t)_finger, CardTableModRefBS::card_size);
6662 6666 MemRegion mr(old_threshold, _threshold);
6663 6667 assert(!mr.is_empty(), "Control point invariant");
6664 6668 assert(_span.contains(mr), "Should clear within span");
6665 6669 _mut->clear_range(mr);
6666 6670 }
6667 6671 DEBUG_ONLY(})
6668 6672 // Note: the finger doesn't advance while we drain
6669 6673 // the stack below.
6670 6674 PushOrMarkClosure pushOrMarkClosure(_collector,
6671 6675 _span, _bitMap, _markStack,
6672 6676 _finger, this);
6673 6677 bool res = _markStack->push(obj);
6674 6678 assert(res, "Empty non-zero size stack should have space for single push");
6675 6679 while (!_markStack->isEmpty()) {
6676 6680 oop new_oop = _markStack->pop();
6677 6681 // Skip verifying header mark word below because we are
6678 6682 // running concurrent with mutators.
6679 6683 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6680 6684 // now scan this oop's oops
6681 6685 new_oop->oop_iterate(&pushOrMarkClosure);
6682 6686 do_yield_check();
6683 6687 }
6684 6688 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6685 6689 }
6686 6690
6687 6691 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
6688 6692 CMSCollector* collector, MemRegion span,
6689 6693 CMSBitMap* bit_map,
6690 6694 OopTaskQueue* work_queue,
6691 6695 CMSMarkStack* overflow_stack):
6692 6696 _collector(collector),
6693 6697 _whole_span(collector->_span),
6694 6698 _span(span),
6695 6699 _bit_map(bit_map),
6696 6700 _mut(&collector->_modUnionTable),
6697 6701 _work_queue(work_queue),
6698 6702 _overflow_stack(overflow_stack),
6699 6703 _skip_bits(0),
6700 6704 _task(task)
6701 6705 {
6702 6706 assert(_work_queue->size() == 0, "work_queue should be empty");
6703 6707 _finger = span.start();
6704 6708 _threshold = _finger; // XXX Defer clear-on-enter optimization for now
6705 6709 assert(_span.contains(_finger), "Out of bounds _finger?");
6706 6710 }
6707 6711
6708 6712 // Should revisit to see if this should be restructured for
6709 6713 // greater efficiency.
6710 6714 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
6711 6715 if (_skip_bits > 0) {
6712 6716 _skip_bits--;
6713 6717 return true;
6714 6718 }
6715 6719 // convert offset into a HeapWord*
6716 6720 HeapWord* addr = _bit_map->startWord() + offset;
6717 6721 assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6718 6722 "address out of range");
6719 6723 assert(_bit_map->isMarked(addr), "tautology");
6720 6724 if (_bit_map->isMarked(addr+1)) {
6721 6725 // this is an allocated object that might not yet be initialized
6722 6726 assert(_skip_bits == 0, "tautology");
6723 6727 _skip_bits = 2; // skip next two marked bits ("Printezis-marks")
6724 6728 oop p = oop(addr);
6725 6729 if (p->klass_or_null() == NULL) {
6726 6730 // in the case of Clean-on-Enter optimization, redirty card
6727 6731 // and avoid clearing card by increasing the threshold.
6728 6732 return true;
6729 6733 }
6730 6734 }
6731 6735 scan_oops_in_oop(addr);
6732 6736 return true;
6733 6737 }
6734 6738
6735 6739 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6736 6740 assert(_bit_map->isMarked(ptr), "expected bit to be set");
6737 6741 // Should we assert that our work queue is empty or
6738 6742 // below some drain limit?
6739 6743 assert(_work_queue->size() == 0,
6740 6744 "should drain stack to limit stack usage");
6741 6745 // convert ptr to an oop preparatory to scanning
6742 6746 oop obj = oop(ptr);
6743 6747 // Ignore mark word in verification below, since we
6744 6748 // may be running concurrent with mutators.
6745 6749 assert(obj->is_oop(true), "should be an oop");
6746 6750 assert(_finger <= ptr, "_finger runneth ahead");
6747 6751 // advance the finger to right end of this object
6748 6752 _finger = ptr + obj->size();
6749 6753 assert(_finger > ptr, "we just incremented it above");
6750 6754 // On large heaps, it may take us some time to get through
6751 6755 // the marking phase. During
6752 6756 // this time it's possible that a lot of mutations have
6753 6757 // accumulated in the card table and the mod union table --
6754 6758 // these mutation records are redundant until we have
6755 6759 // actually traced into the corresponding card.
6756 6760 // Here, we check whether advancing the finger would make
6757 6761 // us cross into a new card, and if so clear corresponding
6758 6762 // cards in the MUT (preclean them in the card-table in the
6759 6763 // future).
6760 6764
6761 6765 // The clean-on-enter optimization is disabled by default,
6762 6766 // until we fix 6178663.
6763 6767 if (CMSCleanOnEnter && (_finger > _threshold)) {
6764 6768 // [_threshold, _finger) represents the interval
6765 6769 // of cards to be cleared in MUT (or precleaned in card table).
6766 6770 // The set of cards to be cleared is all those that overlap
6767 6771 // with the interval [_threshold, _finger); note that
6768 6772 // _threshold is always kept card-aligned but _finger isn't
6769 6773 // always card-aligned.
6770 6774 HeapWord* old_threshold = _threshold;
6771 6775 assert(old_threshold == (HeapWord*)round_to(
6772 6776 (intptr_t)old_threshold, CardTableModRefBS::card_size),
6773 6777 "_threshold should always be card-aligned");
6774 6778 _threshold = (HeapWord*)round_to(
6775 6779 (intptr_t)_finger, CardTableModRefBS::card_size);
6776 6780 MemRegion mr(old_threshold, _threshold);
6777 6781 assert(!mr.is_empty(), "Control point invariant");
6778 6782 assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6779 6783 _mut->clear_range(mr);
6780 6784 }
6781 6785
6782 6786 // Note: the local finger doesn't advance while we drain
6783 6787 // the stack below, but the global finger sure can and will.
6784 6788 HeapWord** gfa = _task->global_finger_addr();
6785 6789 Par_PushOrMarkClosure pushOrMarkClosure(_collector,
6786 6790 _span, _bit_map,
6787 6791 _work_queue,
6788 6792 _overflow_stack,
6789 6793 _finger,
6790 6794 gfa, this);
6791 6795 bool res = _work_queue->push(obj); // overflow could occur here
6792 6796 assert(res, "Will hold once we use workqueues");
6793 6797 while (true) {
6794 6798 oop new_oop;
6795 6799 if (!_work_queue->pop_local(new_oop)) {
6796 6800 // We emptied our work_queue; check if there's stuff that can
6797 6801 // be gotten from the overflow stack.
6798 6802 if (CMSConcMarkingTask::get_work_from_overflow_stack(
6799 6803 _overflow_stack, _work_queue)) {
6800 6804 do_yield_check();
6801 6805 continue;
6802 6806 } else { // done
6803 6807 break;
6804 6808 }
6805 6809 }
6806 6810 // Skip verifying header mark word below because we are
6807 6811 // running concurrent with mutators.
6808 6812 assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6809 6813 // now scan this oop's oops
6810 6814 new_oop->oop_iterate(&pushOrMarkClosure);
6811 6815 do_yield_check();
6812 6816 }
6813 6817 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6814 6818 }
6815 6819
6816 6820 // Yield in response to a request from VM Thread or
6817 6821 // from mutators.
6818 6822 void Par_MarkFromRootsClosure::do_yield_work() {
6819 6823 assert(_task != NULL, "sanity");
6820 6824 _task->yield();
6821 6825 }
6822 6826
6823 6827 // A variant of the above used for verifying CMS marking work.
6824 6828 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6825 6829 MemRegion span,
6826 6830 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6827 6831 CMSMarkStack* mark_stack):
6828 6832 _collector(collector),
6829 6833 _span(span),
6830 6834 _verification_bm(verification_bm),
6831 6835 _cms_bm(cms_bm),
6832 6836 _mark_stack(mark_stack),
6833 6837 _pam_verify_closure(collector, span, verification_bm, cms_bm,
6834 6838 mark_stack)
6835 6839 {
6836 6840 assert(_mark_stack->isEmpty(), "stack should be empty");
6837 6841 _finger = _verification_bm->startWord();
6838 6842 assert(_collector->_restart_addr == NULL, "Sanity check");
6839 6843 assert(_span.contains(_finger), "Out of bounds _finger?");
6840 6844 }
6841 6845
6842 6846 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6843 6847 assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6844 6848 assert(_span.contains(addr), "Out of bounds _finger?");
6845 6849 _finger = addr;
6846 6850 }
6847 6851
6848 6852 // Should revisit to see if this should be restructured for
6849 6853 // greater efficiency.
6850 6854 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6851 6855 // convert offset into a HeapWord*
6852 6856 HeapWord* addr = _verification_bm->startWord() + offset;
6853 6857 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6854 6858 "address out of range");
6855 6859 assert(_verification_bm->isMarked(addr), "tautology");
6856 6860 assert(_cms_bm->isMarked(addr), "tautology");
6857 6861
6858 6862 assert(_mark_stack->isEmpty(),
6859 6863 "should drain stack to limit stack usage");
6860 6864 // convert addr to an oop preparatory to scanning
6861 6865 oop obj = oop(addr);
6862 6866 assert(obj->is_oop(), "should be an oop");
6863 6867 assert(_finger <= addr, "_finger runneth ahead");
6864 6868 // advance the finger to right end of this object
6865 6869 _finger = addr + obj->size();
6866 6870 assert(_finger > addr, "we just incremented it above");
6867 6871 // Note: the finger doesn't advance while we drain
6868 6872 // the stack below.
6869 6873 bool res = _mark_stack->push(obj);
6870 6874 assert(res, "Empty non-zero size stack should have space for single push");
6871 6875 while (!_mark_stack->isEmpty()) {
6872 6876 oop new_oop = _mark_stack->pop();
6873 6877 assert(new_oop->is_oop(), "Oops! expected to pop an oop");
6874 6878 // now scan this oop's oops
6875 6879 new_oop->oop_iterate(&_pam_verify_closure);
6876 6880 }
6877 6881 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6878 6882 return true;
6879 6883 }
6880 6884
6881 6885 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6882 6886 CMSCollector* collector, MemRegion span,
6883 6887 CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6884 6888 CMSMarkStack* mark_stack):
6885 6889 MetadataAwareOopClosure(collector->ref_processor()),
6886 6890 _collector(collector),
6887 6891 _span(span),
6888 6892 _verification_bm(verification_bm),
6889 6893 _cms_bm(cms_bm),
6890 6894 _mark_stack(mark_stack)
6891 6895 { }
6892 6896
6893 6897 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6894 6898 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6895 6899
6896 6900 // Upon stack overflow, we discard (part of) the stack,
6897 6901 // remembering the least address amongst those discarded
6898 6902 // in CMSCollector's _restart_address.
6899 6903 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6900 6904 // Remember the least grey address discarded
6901 6905 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6902 6906 _collector->lower_restart_addr(ra);
6903 6907 _mark_stack->reset(); // discard stack contents
6904 6908 _mark_stack->expand(); // expand the stack if possible
6905 6909 }
6906 6910
6907 6911 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6908 6912 assert(obj->is_oop_or_null(), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
6909 6913 HeapWord* addr = (HeapWord*)obj;
6910 6914 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6911 6915 // Oop lies in _span and isn't yet grey or black
6912 6916 _verification_bm->mark(addr); // now grey
6913 6917 if (!_cms_bm->isMarked(addr)) {
6914 6918 oop(addr)->print();
6915 6919 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
6916 6920 p2i(addr));
6917 6921 fatal("... aborting");
6918 6922 }
6919 6923
6920 6924 if (!_mark_stack->push(obj)) { // stack overflow
6921 6925 if (PrintCMSStatistics != 0) {
6922 6926 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
6923 6927 SIZE_FORMAT, _mark_stack->capacity());
6924 6928 }
6925 6929 assert(_mark_stack->isFull(), "Else push should have succeeded");
6926 6930 handle_stack_overflow(addr);
6927 6931 }
6928 6932 // anything including and to the right of _finger
6929 6933 // will be scanned as we iterate over the remainder of the
6930 6934 // bit map
6931 6935 }
6932 6936 }
6933 6937
6934 6938 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6935 6939 MemRegion span,
6936 6940 CMSBitMap* bitMap, CMSMarkStack* markStack,
6937 6941 HeapWord* finger, MarkFromRootsClosure* parent) :
6938 6942 MetadataAwareOopClosure(collector->ref_processor()),
6939 6943 _collector(collector),
6940 6944 _span(span),
6941 6945 _bitMap(bitMap),
6942 6946 _markStack(markStack),
6943 6947 _finger(finger),
6944 6948 _parent(parent)
6945 6949 { }
6946 6950
6947 6951 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
6948 6952 MemRegion span,
6949 6953 CMSBitMap* bit_map,
6950 6954 OopTaskQueue* work_queue,
6951 6955 CMSMarkStack* overflow_stack,
6952 6956 HeapWord* finger,
6953 6957 HeapWord** global_finger_addr,
6954 6958 Par_MarkFromRootsClosure* parent) :
6955 6959 MetadataAwareOopClosure(collector->ref_processor()),
6956 6960 _collector(collector),
6957 6961 _whole_span(collector->_span),
6958 6962 _span(span),
6959 6963 _bit_map(bit_map),
6960 6964 _work_queue(work_queue),
6961 6965 _overflow_stack(overflow_stack),
6962 6966 _finger(finger),
6963 6967 _global_finger_addr(global_finger_addr),
6964 6968 _parent(parent)
6965 6969 { }
6966 6970
6967 6971 // Assumes thread-safe access by callers, who are
6968 6972 // responsible for mutual exclusion.
6969 6973 void CMSCollector::lower_restart_addr(HeapWord* low) {
6970 6974 assert(_span.contains(low), "Out of bounds addr");
6971 6975 if (_restart_addr == NULL) {
6972 6976 _restart_addr = low;
6973 6977 } else {
6974 6978 _restart_addr = MIN2(_restart_addr, low);
6975 6979 }
6976 6980 }
6977 6981
6978 6982 // Upon stack overflow, we discard (part of) the stack,
6979 6983 // remembering the least address amongst those discarded
6980 6984 // in CMSCollector's _restart_address.
6981 6985 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6982 6986 // Remember the least grey address discarded
6983 6987 HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
6984 6988 _collector->lower_restart_addr(ra);
6985 6989 _markStack->reset(); // discard stack contents
6986 6990 _markStack->expand(); // expand the stack if possible
6987 6991 }
6988 6992
6989 6993 // Upon stack overflow, we discard (part of) the stack,
6990 6994 // remembering the least address amongst those discarded
6991 6995 // in CMSCollector's _restart_address.
6992 6996 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6993 6997 // We need to do this under a mutex to prevent other
6994 6998 // workers from interfering with the work done below.
6995 6999 MutexLockerEx ml(_overflow_stack->par_lock(),
6996 7000 Mutex::_no_safepoint_check_flag);
6997 7001 // Remember the least grey address discarded
6998 7002 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6999 7003 _collector->lower_restart_addr(ra);
7000 7004 _overflow_stack->reset(); // discard stack contents
7001 7005 _overflow_stack->expand(); // expand the stack if possible
7002 7006 }
7003 7007
7004 7008 void PushOrMarkClosure::do_oop(oop obj) {
7005 7009 // Ignore mark word because we are running concurrent with mutators.
7006 7010 assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7007 7011 HeapWord* addr = (HeapWord*)obj;
7008 7012 if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7009 7013 // Oop lies in _span and isn't yet grey or black
7010 7014 _bitMap->mark(addr); // now grey
7011 7015 if (addr < _finger) {
7012 7016 // the bit map iteration has already either passed, or
7013 7017 // sampled, this bit in the bit map; we'll need to
7014 7018 // use the marking stack to scan this oop's oops.
7015 7019 bool simulate_overflow = false;
7016 7020 NOT_PRODUCT(
7017 7021 if (CMSMarkStackOverflowALot &&
7018 7022 _collector->simulate_overflow()) {
7019 7023 // simulate a stack overflow
7020 7024 simulate_overflow = true;
7021 7025 }
7022 7026 )
7023 7027 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7024 7028 if (PrintCMSStatistics != 0) {
7025 7029 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7026 7030 SIZE_FORMAT, _markStack->capacity());
7027 7031 }
7028 7032 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7029 7033 handle_stack_overflow(addr);
7030 7034 }
7031 7035 }
7032 7036 // anything including and to the right of _finger
7033 7037 // will be scanned as we iterate over the remainder of the
7034 7038 // bit map
7035 7039 do_yield_check();
7036 7040 }
7037 7041 }
7038 7042
7039 7043 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); }
7040 7044 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7041 7045
7042 7046 void Par_PushOrMarkClosure::do_oop(oop obj) {
7043 7047 // Ignore mark word because we are running concurrent with mutators.
7044 7048 assert(obj->is_oop_or_null(true), err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7045 7049 HeapWord* addr = (HeapWord*)obj;
7046 7050 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7047 7051 // Oop lies in _span and isn't yet grey or black
7048 7052 // We read the global_finger (volatile read) strictly after marking oop
7049 7053 bool res = _bit_map->par_mark(addr); // now grey
7050 7054 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7051 7055 // Should we push this marked oop on our stack?
7052 7056 // -- if someone else marked it, nothing to do
7053 7057 // -- if target oop is above global finger nothing to do
7054 7058 // -- if target oop is in chunk and above local finger
7055 7059 // then nothing to do
7056 7060 // -- else push on work queue
7057 7061 if ( !res // someone else marked it, they will deal with it
7058 7062 || (addr >= *gfa) // will be scanned in a later task
7059 7063 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7060 7064 return;
7061 7065 }
7062 7066 // the bit map iteration has already either passed, or
7063 7067 // sampled, this bit in the bit map; we'll need to
7064 7068 // use the marking stack to scan this oop's oops.
7065 7069 bool simulate_overflow = false;
7066 7070 NOT_PRODUCT(
7067 7071 if (CMSMarkStackOverflowALot &&
7068 7072 _collector->simulate_overflow()) {
7069 7073 // simulate a stack overflow
7070 7074 simulate_overflow = true;
7071 7075 }
7072 7076 )
7073 7077 if (simulate_overflow ||
7074 7078 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7075 7079 // stack overflow
7076 7080 if (PrintCMSStatistics != 0) {
7077 7081 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7078 7082 SIZE_FORMAT, _overflow_stack->capacity());
7079 7083 }
7080 7084 // We cannot assert that the overflow stack is full because
7081 7085 // it may have been emptied since.
7082 7086 assert(simulate_overflow ||
7083 7087 _work_queue->size() == _work_queue->max_elems(),
7084 7088 "Else push should have succeeded");
7085 7089 handle_stack_overflow(addr);
7086 7090 }
7087 7091 do_yield_check();
7088 7092 }
7089 7093 }
7090 7094
7091 7095 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7092 7096 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7093 7097
7094 7098 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7095 7099 MemRegion span,
7096 7100 ReferenceProcessor* rp,
7097 7101 CMSBitMap* bit_map,
7098 7102 CMSBitMap* mod_union_table,
7099 7103 CMSMarkStack* mark_stack,
7100 7104 bool concurrent_precleaning):
7101 7105 MetadataAwareOopClosure(rp),
7102 7106 _collector(collector),
7103 7107 _span(span),
7104 7108 _bit_map(bit_map),
7105 7109 _mod_union_table(mod_union_table),
7106 7110 _mark_stack(mark_stack),
7107 7111 _concurrent_precleaning(concurrent_precleaning)
7108 7112 {
7109 7113 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7110 7114 }
7111 7115
7112 7116 // Grey object rescan during pre-cleaning and second checkpoint phases --
7113 7117 // the non-parallel version (the parallel version appears further below.)
7114 7118 void PushAndMarkClosure::do_oop(oop obj) {
7115 7119 // Ignore mark word verification. If during concurrent precleaning,
7116 7120 // the object monitor may be locked. If during the checkpoint
7117 7121 // phases, the object may already have been reached by a different
7118 7122 // path and may be at the end of the global overflow list (so
7119 7123 // the mark word may be NULL).
7120 7124 assert(obj->is_oop_or_null(true /* ignore mark word */),
7121 7125 err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7122 7126 HeapWord* addr = (HeapWord*)obj;
7123 7127 // Check if oop points into the CMS generation
7124 7128 // and is not marked
7125 7129 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7126 7130 // a white object ...
7127 7131 _bit_map->mark(addr); // ... now grey
7128 7132 // push on the marking stack (grey set)
7129 7133 bool simulate_overflow = false;
7130 7134 NOT_PRODUCT(
7131 7135 if (CMSMarkStackOverflowALot &&
7132 7136 _collector->simulate_overflow()) {
7133 7137 // simulate a stack overflow
7134 7138 simulate_overflow = true;
7135 7139 }
7136 7140 )
7137 7141 if (simulate_overflow || !_mark_stack->push(obj)) {
7138 7142 if (_concurrent_precleaning) {
7139 7143 // During precleaning we can just dirty the appropriate card(s)
7140 7144 // in the mod union table, thus ensuring that the object remains
7141 7145 // in the grey set and continue. In the case of object arrays
7142 7146 // we need to dirty all of the cards that the object spans,
7143 7147 // since the rescan of object arrays will be limited to the
7144 7148 // dirty cards.
7145 7149 // Note that no one can be interfering with us in this action
7146 7150 // of dirtying the mod union table, so no locking or atomics
7147 7151 // are required.
7148 7152 if (obj->is_objArray()) {
7149 7153 size_t sz = obj->size();
7150 7154 HeapWord* end_card_addr = (HeapWord*)round_to(
7151 7155 (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7152 7156 MemRegion redirty_range = MemRegion(addr, end_card_addr);
7153 7157 assert(!redirty_range.is_empty(), "Arithmetical tautology");
7154 7158 _mod_union_table->mark_range(redirty_range);
7155 7159 } else {
7156 7160 _mod_union_table->mark(addr);
7157 7161 }
7158 7162 _collector->_ser_pmc_preclean_ovflw++;
7159 7163 } else {
7160 7164 // During the remark phase, we need to remember this oop
7161 7165 // in the overflow list.
7162 7166 _collector->push_on_overflow_list(obj);
7163 7167 _collector->_ser_pmc_remark_ovflw++;
7164 7168 }
7165 7169 }
7166 7170 }
7167 7171 }
7168 7172
7169 7173 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7170 7174 MemRegion span,
7171 7175 ReferenceProcessor* rp,
7172 7176 CMSBitMap* bit_map,
7173 7177 OopTaskQueue* work_queue):
7174 7178 MetadataAwareOopClosure(rp),
7175 7179 _collector(collector),
7176 7180 _span(span),
7177 7181 _bit_map(bit_map),
7178 7182 _work_queue(work_queue)
7179 7183 {
7180 7184 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7181 7185 }
7182 7186
7183 7187 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
7184 7188 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7185 7189
7186 7190 // Grey object rescan during second checkpoint phase --
7187 7191 // the parallel version.
7188 7192 void Par_PushAndMarkClosure::do_oop(oop obj) {
7189 7193 // In the assert below, we ignore the mark word because
7190 7194 // this oop may point to an already visited object that is
7191 7195 // on the overflow stack (in which case the mark word has
7192 7196 // been hijacked for chaining into the overflow stack --
7193 7197 // if this is the last object in the overflow stack then
7194 7198 // its mark word will be NULL). Because this object may
7195 7199 // have been subsequently popped off the global overflow
7196 7200 // stack, and the mark word possibly restored to the prototypical
7197 7201 // value, by the time we get to examined this failing assert in
7198 7202 // the debugger, is_oop_or_null(false) may subsequently start
7199 7203 // to hold.
7200 7204 assert(obj->is_oop_or_null(true),
7201 7205 err_msg("Expected an oop or NULL at " PTR_FORMAT, p2i(obj)));
7202 7206 HeapWord* addr = (HeapWord*)obj;
7203 7207 // Check if oop points into the CMS generation
7204 7208 // and is not marked
7205 7209 if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7206 7210 // a white object ...
7207 7211 // If we manage to "claim" the object, by being the
7208 7212 // first thread to mark it, then we push it on our
7209 7213 // marking stack
7210 7214 if (_bit_map->par_mark(addr)) { // ... now grey
7211 7215 // push on work queue (grey set)
7212 7216 bool simulate_overflow = false;
7213 7217 NOT_PRODUCT(
7214 7218 if (CMSMarkStackOverflowALot &&
7215 7219 _collector->par_simulate_overflow()) {
7216 7220 // simulate a stack overflow
7217 7221 simulate_overflow = true;
7218 7222 }
7219 7223 )
7220 7224 if (simulate_overflow || !_work_queue->push(obj)) {
7221 7225 _collector->par_push_on_overflow_list(obj);
7222 7226 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS
7223 7227 }
7224 7228 } // Else, some other thread got there first
7225 7229 }
7226 7230 }
7227 7231
7228 7232 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7229 7233 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7230 7234
7231 7235 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7232 7236 Mutex* bml = _collector->bitMapLock();
7233 7237 assert_lock_strong(bml);
7234 7238 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7235 7239 "CMS thread should hold CMS token");
7236 7240
7237 7241 bml->unlock();
7238 7242 ConcurrentMarkSweepThread::desynchronize(true);
7239 7243
7240 7244 _collector->stopTimer();
7241 7245 if (PrintCMSStatistics != 0) {
7242 7246 _collector->incrementYields();
7243 7247 }
7244 7248
7245 7249 // See the comment in coordinator_yield()
7246 7250 for (unsigned i = 0; i < CMSYieldSleepCount &&
7247 7251 ConcurrentMarkSweepThread::should_yield() &&
7248 7252 !CMSCollector::foregroundGCIsActive(); ++i) {
7249 7253 os::sleep(Thread::current(), 1, false);
7250 7254 }
7251 7255
7252 7256 ConcurrentMarkSweepThread::synchronize(true);
7253 7257 bml->lock();
7254 7258
7255 7259 _collector->startTimer();
7256 7260 }
7257 7261
7258 7262 bool CMSPrecleanRefsYieldClosure::should_return() {
7259 7263 if (ConcurrentMarkSweepThread::should_yield()) {
7260 7264 do_yield_work();
7261 7265 }
7262 7266 return _collector->foregroundGCIsActive();
7263 7267 }
7264 7268
7265 7269 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7266 7270 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7267 7271 "mr should be aligned to start at a card boundary");
7268 7272 // We'd like to assert:
7269 7273 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7270 7274 // "mr should be a range of cards");
7271 7275 // However, that would be too strong in one case -- the last
7272 7276 // partition ends at _unallocated_block which, in general, can be
7273 7277 // an arbitrary boundary, not necessarily card aligned.
7274 7278 if (PrintCMSStatistics != 0) {
7275 7279 _num_dirty_cards +=
7276 7280 mr.word_size()/CardTableModRefBS::card_size_in_words;
7277 7281 }
7278 7282 _space->object_iterate_mem(mr, &_scan_cl);
7279 7283 }
7280 7284
7281 7285 SweepClosure::SweepClosure(CMSCollector* collector,
7282 7286 ConcurrentMarkSweepGeneration* g,
7283 7287 CMSBitMap* bitMap, bool should_yield) :
7284 7288 _collector(collector),
7285 7289 _g(g),
7286 7290 _sp(g->cmsSpace()),
7287 7291 _limit(_sp->sweep_limit()),
7288 7292 _freelistLock(_sp->freelistLock()),
7289 7293 _bitMap(bitMap),
7290 7294 _yield(should_yield),
7291 7295 _inFreeRange(false), // No free range at beginning of sweep
7292 7296 _freeRangeInFreeLists(false), // No free range at beginning of sweep
7293 7297 _lastFreeRangeCoalesced(false),
7294 7298 _freeFinger(g->used_region().start())
7295 7299 {
7296 7300 NOT_PRODUCT(
7297 7301 _numObjectsFreed = 0;
7298 7302 _numWordsFreed = 0;
7299 7303 _numObjectsLive = 0;
7300 7304 _numWordsLive = 0;
7301 7305 _numObjectsAlreadyFree = 0;
7302 7306 _numWordsAlreadyFree = 0;
7303 7307 _last_fc = NULL;
7304 7308
7305 7309 _sp->initializeIndexedFreeListArrayReturnedBytes();
7306 7310 _sp->dictionary()->initialize_dict_returned_bytes();
7307 7311 )
7308 7312 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7309 7313 "sweep _limit out of bounds");
7310 7314 if (CMSTraceSweeper) {
7311 7315 gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
7312 7316 p2i(_limit));
7313 7317 }
7314 7318 }
7315 7319
7316 7320 void SweepClosure::print_on(outputStream* st) const {
7317 7321 tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7318 7322 p2i(_sp->bottom()), p2i(_sp->end()));
7319 7323 tty->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7320 7324 tty->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7321 7325 NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7322 7326 tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7323 7327 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7324 7328 }
7325 7329
7326 7330 #ifndef PRODUCT
7327 7331 // Assertion checking only: no useful work in product mode --
7328 7332 // however, if any of the flags below become product flags,
7329 7333 // you may need to review this code to see if it needs to be
7330 7334 // enabled in product mode.
7331 7335 SweepClosure::~SweepClosure() {
7332 7336 assert_lock_strong(_freelistLock);
7333 7337 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7334 7338 "sweep _limit out of bounds");
7335 7339 if (inFreeRange()) {
7336 7340 warning("inFreeRange() should have been reset; dumping state of SweepClosure");
7337 7341 print();
7338 7342 ShouldNotReachHere();
7339 7343 }
7340 7344 if (Verbose && PrintGC) {
7341 7345 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
7342 7346 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7343 7347 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, "
7344 7348 SIZE_FORMAT" bytes "
7345 7349 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7346 7350 _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7347 7351 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7348 7352 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
7349 7353 * sizeof(HeapWord);
7350 7354 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7351 7355
7352 7356 if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7353 7357 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7354 7358 size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7355 7359 size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7356 7360 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
7357 7361 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes",
7358 7362 indexListReturnedBytes);
7359 7363 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes",
7360 7364 dict_returned_bytes);
7361 7365 }
7362 7366 }
7363 7367 if (CMSTraceSweeper) {
7364 7368 gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
7365 7369 p2i(_limit));
7366 7370 }
7367 7371 }
7368 7372 #endif // PRODUCT
7369 7373
7370 7374 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7371 7375 bool freeRangeInFreeLists) {
7372 7376 if (CMSTraceSweeper) {
7373 7377 gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
7374 7378 p2i(freeFinger), freeRangeInFreeLists);
7375 7379 }
7376 7380 assert(!inFreeRange(), "Trampling existing free range");
7377 7381 set_inFreeRange(true);
7378 7382 set_lastFreeRangeCoalesced(false);
7379 7383
7380 7384 set_freeFinger(freeFinger);
7381 7385 set_freeRangeInFreeLists(freeRangeInFreeLists);
7382 7386 if (CMSTestInFreeList) {
7383 7387 if (freeRangeInFreeLists) {
7384 7388 FreeChunk* fc = (FreeChunk*) freeFinger;
7385 7389 assert(fc->is_free(), "A chunk on the free list should be free.");
7386 7390 assert(fc->size() > 0, "Free range should have a size");
7387 7391 assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7388 7392 }
7389 7393 }
7390 7394 }
7391 7395
7392 7396 // Note that the sweeper runs concurrently with mutators. Thus,
7393 7397 // it is possible for direct allocation in this generation to happen
7394 7398 // in the middle of the sweep. Note that the sweeper also coalesces
7395 7399 // contiguous free blocks. Thus, unless the sweeper and the allocator
7396 7400 // synchronize appropriately freshly allocated blocks may get swept up.
7397 7401 // This is accomplished by the sweeper locking the free lists while
7398 7402 // it is sweeping. Thus blocks that are determined to be free are
7399 7403 // indeed free. There is however one additional complication:
7400 7404 // blocks that have been allocated since the final checkpoint and
7401 7405 // mark, will not have been marked and so would be treated as
7402 7406 // unreachable and swept up. To prevent this, the allocator marks
7403 7407 // the bit map when allocating during the sweep phase. This leads,
7404 7408 // however, to a further complication -- objects may have been allocated
7405 7409 // but not yet initialized -- in the sense that the header isn't yet
7406 7410 // installed. The sweeper can not then determine the size of the block
7407 7411 // in order to skip over it. To deal with this case, we use a technique
7408 7412 // (due to Printezis) to encode such uninitialized block sizes in the
7409 7413 // bit map. Since the bit map uses a bit per every HeapWord, but the
7410 7414 // CMS generation has a minimum object size of 3 HeapWords, it follows
7411 7415 // that "normal marks" won't be adjacent in the bit map (there will
7412 7416 // always be at least two 0 bits between successive 1 bits). We make use
7413 7417 // of these "unused" bits to represent uninitialized blocks -- the bit
7414 7418 // corresponding to the start of the uninitialized object and the next
7415 7419 // bit are both set. Finally, a 1 bit marks the end of the object that
7416 7420 // started with the two consecutive 1 bits to indicate its potentially
7417 7421 // uninitialized state.
7418 7422
7419 7423 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7420 7424 FreeChunk* fc = (FreeChunk*)addr;
7421 7425 size_t res;
7422 7426
7423 7427 // Check if we are done sweeping. Below we check "addr >= _limit" rather
7424 7428 // than "addr == _limit" because although _limit was a block boundary when
7425 7429 // we started the sweep, it may no longer be one because heap expansion
7426 7430 // may have caused us to coalesce the block ending at the address _limit
7427 7431 // with a newly expanded chunk (this happens when _limit was set to the
7428 7432 // previous _end of the space), so we may have stepped past _limit:
7429 7433 // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7430 7434 if (addr >= _limit) { // we have swept up to or past the limit: finish up
7431 7435 assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7432 7436 "sweep _limit out of bounds");
7433 7437 assert(addr < _sp->end(), "addr out of bounds");
7434 7438 // Flush any free range we might be holding as a single
7435 7439 // coalesced chunk to the appropriate free list.
7436 7440 if (inFreeRange()) {
7437 7441 assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7438 7442 err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", p2i(freeFinger())));
7439 7443 flush_cur_free_chunk(freeFinger(),
7440 7444 pointer_delta(addr, freeFinger()));
7441 7445 if (CMSTraceSweeper) {
7442 7446 gclog_or_tty->print("Sweep: last chunk: ");
7443 7447 gclog_or_tty->print("put_free_blk " PTR_FORMAT " ("SIZE_FORMAT") "
7444 7448 "[coalesced:%d]\n",
7445 7449 p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7446 7450 lastFreeRangeCoalesced() ? 1 : 0);
7447 7451 }
7448 7452 }
7449 7453
7450 7454 // help the iterator loop finish
7451 7455 return pointer_delta(_sp->end(), addr);
7452 7456 }
7453 7457
7454 7458 assert(addr < _limit, "sweep invariant");
7455 7459 // check if we should yield
7456 7460 do_yield_check(addr);
7457 7461 if (fc->is_free()) {
7458 7462 // Chunk that is already free
7459 7463 res = fc->size();
7460 7464 do_already_free_chunk(fc);
7461 7465 debug_only(_sp->verifyFreeLists());
7462 7466 // If we flush the chunk at hand in lookahead_and_flush()
7463 7467 // and it's coalesced with a preceding chunk, then the
7464 7468 // process of "mangling" the payload of the coalesced block
7465 7469 // will cause erasure of the size information from the
7466 7470 // (erstwhile) header of all the coalesced blocks but the
7467 7471 // first, so the first disjunct in the assert will not hold
7468 7472 // in that specific case (in which case the second disjunct
7469 7473 // will hold).
7470 7474 assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7471 7475 "Otherwise the size info doesn't change at this step");
7472 7476 NOT_PRODUCT(
7473 7477 _numObjectsAlreadyFree++;
7474 7478 _numWordsAlreadyFree += res;
7475 7479 )
7476 7480 NOT_PRODUCT(_last_fc = fc;)
7477 7481 } else if (!_bitMap->isMarked(addr)) {
7478 7482 // Chunk is fresh garbage
7479 7483 res = do_garbage_chunk(fc);
7480 7484 debug_only(_sp->verifyFreeLists());
7481 7485 NOT_PRODUCT(
7482 7486 _numObjectsFreed++;
7483 7487 _numWordsFreed += res;
7484 7488 )
7485 7489 } else {
7486 7490 // Chunk that is alive.
7487 7491 res = do_live_chunk(fc);
7488 7492 debug_only(_sp->verifyFreeLists());
7489 7493 NOT_PRODUCT(
7490 7494 _numObjectsLive++;
7491 7495 _numWordsLive += res;
7492 7496 )
7493 7497 }
7494 7498 return res;
7495 7499 }
7496 7500
7497 7501 // For the smart allocation, record following
7498 7502 // split deaths - a free chunk is removed from its free list because
7499 7503 // it is being split into two or more chunks.
7500 7504 // split birth - a free chunk is being added to its free list because
7501 7505 // a larger free chunk has been split and resulted in this free chunk.
7502 7506 // coal death - a free chunk is being removed from its free list because
7503 7507 // it is being coalesced into a large free chunk.
7504 7508 // coal birth - a free chunk is being added to its free list because
7505 7509 // it was created when two or more free chunks where coalesced into
7506 7510 // this free chunk.
7507 7511 //
7508 7512 // These statistics are used to determine the desired number of free
7509 7513 // chunks of a given size. The desired number is chosen to be relative
7510 7514 // to the end of a CMS sweep. The desired number at the end of a sweep
7511 7515 // is the
7512 7516 // count-at-end-of-previous-sweep (an amount that was enough)
7513 7517 // - count-at-beginning-of-current-sweep (the excess)
7514 7518 // + split-births (gains in this size during interval)
7515 7519 // - split-deaths (demands on this size during interval)
7516 7520 // where the interval is from the end of one sweep to the end of the
7517 7521 // next.
7518 7522 //
7519 7523 // When sweeping the sweeper maintains an accumulated chunk which is
7520 7524 // the chunk that is made up of chunks that have been coalesced. That
7521 7525 // will be termed the left-hand chunk. A new chunk of garbage that
7522 7526 // is being considered for coalescing will be referred to as the
7523 7527 // right-hand chunk.
7524 7528 //
7525 7529 // When making a decision on whether to coalesce a right-hand chunk with
7526 7530 // the current left-hand chunk, the current count vs. the desired count
7527 7531 // of the left-hand chunk is considered. Also if the right-hand chunk
7528 7532 // is near the large chunk at the end of the heap (see
7529 7533 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7530 7534 // left-hand chunk is coalesced.
7531 7535 //
7532 7536 // When making a decision about whether to split a chunk, the desired count
7533 7537 // vs. the current count of the candidate to be split is also considered.
7534 7538 // If the candidate is underpopulated (currently fewer chunks than desired)
7535 7539 // a chunk of an overpopulated (currently more chunks than desired) size may
7536 7540 // be chosen. The "hint" associated with a free list, if non-null, points
7537 7541 // to a free list which may be overpopulated.
7538 7542 //
7539 7543
7540 7544 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7541 7545 const size_t size = fc->size();
7542 7546 // Chunks that cannot be coalesced are not in the
7543 7547 // free lists.
7544 7548 if (CMSTestInFreeList && !fc->cantCoalesce()) {
7545 7549 assert(_sp->verify_chunk_in_free_list(fc),
7546 7550 "free chunk should be in free lists");
7547 7551 }
7548 7552 // a chunk that is already free, should not have been
7549 7553 // marked in the bit map
7550 7554 HeapWord* const addr = (HeapWord*) fc;
7551 7555 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7552 7556 // Verify that the bit map has no bits marked between
7553 7557 // addr and purported end of this block.
7554 7558 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7555 7559
7556 7560 // Some chunks cannot be coalesced under any circumstances.
7557 7561 // See the definition of cantCoalesce().
7558 7562 if (!fc->cantCoalesce()) {
7559 7563 // This chunk can potentially be coalesced.
7560 7564 if (_sp->adaptive_freelists()) {
7561 7565 // All the work is done in
7562 7566 do_post_free_or_garbage_chunk(fc, size);
7563 7567 } else { // Not adaptive free lists
7564 7568 // this is a free chunk that can potentially be coalesced by the sweeper;
7565 7569 if (!inFreeRange()) {
7566 7570 // if the next chunk is a free block that can't be coalesced
7567 7571 // it doesn't make sense to remove this chunk from the free lists
7568 7572 FreeChunk* nextChunk = (FreeChunk*)(addr + size);
7569 7573 assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
7570 7574 if ((HeapWord*)nextChunk < _sp->end() && // There is another free chunk to the right ...
7571 7575 nextChunk->is_free() && // ... which is free...
7572 7576 nextChunk->cantCoalesce()) { // ... but can't be coalesced
7573 7577 // nothing to do
7574 7578 } else {
7575 7579 // Potentially the start of a new free range:
7576 7580 // Don't eagerly remove it from the free lists.
7577 7581 // No need to remove it if it will just be put
7578 7582 // back again. (Also from a pragmatic point of view
7579 7583 // if it is a free block in a region that is beyond
7580 7584 // any allocated blocks, an assertion will fail)
7581 7585 // Remember the start of a free run.
7582 7586 initialize_free_range(addr, true);
7583 7587 // end - can coalesce with next chunk
7584 7588 }
7585 7589 } else {
7586 7590 // the midst of a free range, we are coalescing
7587 7591 print_free_block_coalesced(fc);
7588 7592 if (CMSTraceSweeper) {
7589 7593 gclog_or_tty->print(" -- pick up free block " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
7590 7594 }
7591 7595 // remove it from the free lists
7592 7596 _sp->removeFreeChunkFromFreeLists(fc);
7593 7597 set_lastFreeRangeCoalesced(true);
7594 7598 // If the chunk is being coalesced and the current free range is
7595 7599 // in the free lists, remove the current free range so that it
7596 7600 // will be returned to the free lists in its entirety - all
7597 7601 // the coalesced pieces included.
7598 7602 if (freeRangeInFreeLists()) {
7599 7603 FreeChunk* ffc = (FreeChunk*) freeFinger();
7600 7604 assert(ffc->size() == pointer_delta(addr, freeFinger()),
7601 7605 "Size of free range is inconsistent with chunk size.");
7602 7606 if (CMSTestInFreeList) {
7603 7607 assert(_sp->verify_chunk_in_free_list(ffc),
7604 7608 "free range is not in free lists");
7605 7609 }
7606 7610 _sp->removeFreeChunkFromFreeLists(ffc);
7607 7611 set_freeRangeInFreeLists(false);
7608 7612 }
7609 7613 }
7610 7614 }
7611 7615 // Note that if the chunk is not coalescable (the else arm
7612 7616 // below), we unconditionally flush, without needing to do
7613 7617 // a "lookahead," as we do below.
7614 7618 if (inFreeRange()) lookahead_and_flush(fc, size);
7615 7619 } else {
7616 7620 // Code path common to both original and adaptive free lists.
7617 7621
7618 7622 // cant coalesce with previous block; this should be treated
7619 7623 // as the end of a free run if any
7620 7624 if (inFreeRange()) {
7621 7625 // we kicked some butt; time to pick up the garbage
7622 7626 assert(freeFinger() < addr, "freeFinger points too high");
7623 7627 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7624 7628 }
7625 7629 // else, nothing to do, just continue
7626 7630 }
7627 7631 }
7628 7632
7629 7633 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7630 7634 // This is a chunk of garbage. It is not in any free list.
7631 7635 // Add it to a free list or let it possibly be coalesced into
7632 7636 // a larger chunk.
7633 7637 HeapWord* const addr = (HeapWord*) fc;
7634 7638 const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7635 7639
7636 7640 if (_sp->adaptive_freelists()) {
7637 7641 // Verify that the bit map has no bits marked between
7638 7642 // addr and purported end of just dead object.
7639 7643 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7640 7644
7641 7645 do_post_free_or_garbage_chunk(fc, size);
7642 7646 } else {
7643 7647 if (!inFreeRange()) {
7644 7648 // start of a new free range
7645 7649 assert(size > 0, "A free range should have a size");
7646 7650 initialize_free_range(addr, false);
7647 7651 } else {
7648 7652 // this will be swept up when we hit the end of the
7649 7653 // free range
7650 7654 if (CMSTraceSweeper) {
7651 7655 gclog_or_tty->print(" -- pick up garbage " PTR_FORMAT " (" SIZE_FORMAT ")\n", p2i(fc), size);
7652 7656 }
7653 7657 // If the chunk is being coalesced and the current free range is
7654 7658 // in the free lists, remove the current free range so that it
7655 7659 // will be returned to the free lists in its entirety - all
7656 7660 // the coalesced pieces included.
7657 7661 if (freeRangeInFreeLists()) {
7658 7662 FreeChunk* ffc = (FreeChunk*)freeFinger();
7659 7663 assert(ffc->size() == pointer_delta(addr, freeFinger()),
7660 7664 "Size of free range is inconsistent with chunk size.");
7661 7665 if (CMSTestInFreeList) {
7662 7666 assert(_sp->verify_chunk_in_free_list(ffc),
7663 7667 "free range is not in free lists");
7664 7668 }
7665 7669 _sp->removeFreeChunkFromFreeLists(ffc);
7666 7670 set_freeRangeInFreeLists(false);
7667 7671 }
7668 7672 set_lastFreeRangeCoalesced(true);
7669 7673 }
7670 7674 // this will be swept up when we hit the end of the free range
7671 7675
7672 7676 // Verify that the bit map has no bits marked between
7673 7677 // addr and purported end of just dead object.
7674 7678 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7675 7679 }
7676 7680 assert(_limit >= addr + size,
7677 7681 "A freshly garbage chunk can't possibly straddle over _limit");
7678 7682 if (inFreeRange()) lookahead_and_flush(fc, size);
7679 7683 return size;
7680 7684 }
7681 7685
7682 7686 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7683 7687 HeapWord* addr = (HeapWord*) fc;
7684 7688 // The sweeper has just found a live object. Return any accumulated
7685 7689 // left hand chunk to the free lists.
7686 7690 if (inFreeRange()) {
7687 7691 assert(freeFinger() < addr, "freeFinger points too high");
7688 7692 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7689 7693 }
7690 7694
7691 7695 // This object is live: we'd normally expect this to be
7692 7696 // an oop, and like to assert the following:
7693 7697 // assert(oop(addr)->is_oop(), "live block should be an oop");
7694 7698 // However, as we commented above, this may be an object whose
7695 7699 // header hasn't yet been initialized.
7696 7700 size_t size;
7697 7701 assert(_bitMap->isMarked(addr), "Tautology for this control point");
7698 7702 if (_bitMap->isMarked(addr + 1)) {
7699 7703 // Determine the size from the bit map, rather than trying to
7700 7704 // compute it from the object header.
7701 7705 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7702 7706 size = pointer_delta(nextOneAddr + 1, addr);
7703 7707 assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7704 7708 "alignment problem");
7705 7709
7706 7710 #ifdef ASSERT
7707 7711 if (oop(addr)->klass_or_null() != NULL) {
7708 7712 // Ignore mark word because we are running concurrent with mutators
7709 7713 assert(oop(addr)->is_oop(true), "live block should be an oop");
7710 7714 assert(size ==
7711 7715 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7712 7716 "P-mark and computed size do not agree");
7713 7717 }
7714 7718 #endif
7715 7719
7716 7720 } else {
7717 7721 // This should be an initialized object that's alive.
7718 7722 assert(oop(addr)->klass_or_null() != NULL,
7719 7723 "Should be an initialized object");
7720 7724 // Ignore mark word because we are running concurrent with mutators
7721 7725 assert(oop(addr)->is_oop(true), "live block should be an oop");
7722 7726 // Verify that the bit map has no bits marked between
7723 7727 // addr and purported end of this block.
7724 7728 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7725 7729 assert(size >= 3, "Necessary for Printezis marks to work");
7726 7730 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7727 7731 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7728 7732 }
7729 7733 return size;
7730 7734 }
7731 7735
7732 7736 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7733 7737 size_t chunkSize) {
7734 7738 // do_post_free_or_garbage_chunk() should only be called in the case
7735 7739 // of the adaptive free list allocator.
7736 7740 const bool fcInFreeLists = fc->is_free();
7737 7741 assert(_sp->adaptive_freelists(), "Should only be used in this case.");
7738 7742 assert((HeapWord*)fc <= _limit, "sweep invariant");
7739 7743 if (CMSTestInFreeList && fcInFreeLists) {
7740 7744 assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7741 7745 }
7742 7746
7743 7747 if (CMSTraceSweeper) {
7744 7748 gclog_or_tty->print_cr(" -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7745 7749 }
7746 7750
7747 7751 HeapWord* const fc_addr = (HeapWord*) fc;
7748 7752
7749 7753 bool coalesce;
7750 7754 const size_t left = pointer_delta(fc_addr, freeFinger());
7751 7755 const size_t right = chunkSize;
7752 7756 switch (FLSCoalescePolicy) {
7753 7757 // numeric value forms a coalition aggressiveness metric
7754 7758 case 0: { // never coalesce
7755 7759 coalesce = false;
7756 7760 break;
7757 7761 }
7758 7762 case 1: { // coalesce if left & right chunks on overpopulated lists
7759 7763 coalesce = _sp->coalOverPopulated(left) &&
7760 7764 _sp->coalOverPopulated(right);
7761 7765 break;
7762 7766 }
7763 7767 case 2: { // coalesce if left chunk on overpopulated list (default)
7764 7768 coalesce = _sp->coalOverPopulated(left);
7765 7769 break;
7766 7770 }
7767 7771 case 3: { // coalesce if left OR right chunk on overpopulated list
7768 7772 coalesce = _sp->coalOverPopulated(left) ||
7769 7773 _sp->coalOverPopulated(right);
7770 7774 break;
7771 7775 }
7772 7776 case 4: { // always coalesce
7773 7777 coalesce = true;
7774 7778 break;
7775 7779 }
7776 7780 default:
7777 7781 ShouldNotReachHere();
7778 7782 }
7779 7783
7780 7784 // Should the current free range be coalesced?
7781 7785 // If the chunk is in a free range and either we decided to coalesce above
7782 7786 // or the chunk is near the large block at the end of the heap
7783 7787 // (isNearLargestChunk() returns true), then coalesce this chunk.
7784 7788 const bool doCoalesce = inFreeRange()
7785 7789 && (coalesce || _g->isNearLargestChunk(fc_addr));
7786 7790 if (doCoalesce) {
7787 7791 // Coalesce the current free range on the left with the new
7788 7792 // chunk on the right. If either is on a free list,
7789 7793 // it must be removed from the list and stashed in the closure.
7790 7794 if (freeRangeInFreeLists()) {
7791 7795 FreeChunk* const ffc = (FreeChunk*)freeFinger();
7792 7796 assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7793 7797 "Size of free range is inconsistent with chunk size.");
7794 7798 if (CMSTestInFreeList) {
7795 7799 assert(_sp->verify_chunk_in_free_list(ffc),
7796 7800 "Chunk is not in free lists");
7797 7801 }
7798 7802 _sp->coalDeath(ffc->size());
7799 7803 _sp->removeFreeChunkFromFreeLists(ffc);
7800 7804 set_freeRangeInFreeLists(false);
7801 7805 }
7802 7806 if (fcInFreeLists) {
7803 7807 _sp->coalDeath(chunkSize);
7804 7808 assert(fc->size() == chunkSize,
7805 7809 "The chunk has the wrong size or is not in the free lists");
7806 7810 _sp->removeFreeChunkFromFreeLists(fc);
7807 7811 }
7808 7812 set_lastFreeRangeCoalesced(true);
7809 7813 print_free_block_coalesced(fc);
7810 7814 } else { // not in a free range and/or should not coalesce
7811 7815 // Return the current free range and start a new one.
7812 7816 if (inFreeRange()) {
7813 7817 // In a free range but cannot coalesce with the right hand chunk.
7814 7818 // Put the current free range into the free lists.
7815 7819 flush_cur_free_chunk(freeFinger(),
7816 7820 pointer_delta(fc_addr, freeFinger()));
7817 7821 }
7818 7822 // Set up for new free range. Pass along whether the right hand
7819 7823 // chunk is in the free lists.
7820 7824 initialize_free_range((HeapWord*)fc, fcInFreeLists);
7821 7825 }
7822 7826 }
7823 7827
7824 7828 // Lookahead flush:
7825 7829 // If we are tracking a free range, and this is the last chunk that
7826 7830 // we'll look at because its end crosses past _limit, we'll preemptively
7827 7831 // flush it along with any free range we may be holding on to. Note that
7828 7832 // this can be the case only for an already free or freshly garbage
7829 7833 // chunk. If this block is an object, it can never straddle
7830 7834 // over _limit. The "straddling" occurs when _limit is set at
7831 7835 // the previous end of the space when this cycle started, and
7832 7836 // a subsequent heap expansion caused the previously co-terminal
7833 7837 // free block to be coalesced with the newly expanded portion,
7834 7838 // thus rendering _limit a non-block-boundary making it dangerous
7835 7839 // for the sweeper to step over and examine.
7836 7840 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7837 7841 assert(inFreeRange(), "Should only be called if currently in a free range.");
7838 7842 HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7839 7843 assert(_sp->used_region().contains(eob - 1),
7840 7844 err_msg("eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7841 7845 " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7842 7846 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7843 7847 p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size));
7844 7848 if (eob >= _limit) {
7845 7849 assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7846 7850 if (CMSTraceSweeper) {
7847 7851 gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
7848 7852 "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7849 7853 "[" PTR_FORMAT "," PTR_FORMAT ")",
7850 7854 p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7851 7855 }
7852 7856 // Return the storage we are tracking back into the free lists.
7853 7857 if (CMSTraceSweeper) {
7854 7858 gclog_or_tty->print_cr("Flushing ... ");
7855 7859 }
7856 7860 assert(freeFinger() < eob, "Error");
7857 7861 flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7858 7862 }
7859 7863 }
7860 7864
7861 7865 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7862 7866 assert(inFreeRange(), "Should only be called if currently in a free range.");
7863 7867 assert(size > 0,
7864 7868 "A zero sized chunk cannot be added to the free lists.");
7865 7869 if (!freeRangeInFreeLists()) {
7866 7870 if (CMSTestInFreeList) {
7867 7871 FreeChunk* fc = (FreeChunk*) chunk;
7868 7872 fc->set_size(size);
7869 7873 assert(!_sp->verify_chunk_in_free_list(fc),
7870 7874 "chunk should not be in free lists yet");
7871 7875 }
7872 7876 if (CMSTraceSweeper) {
7873 7877 gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
7874 7878 p2i(chunk), size);
7875 7879 }
7876 7880 // A new free range is going to be starting. The current
7877 7881 // free range has not been added to the free lists yet or
7878 7882 // was removed so add it back.
7879 7883 // If the current free range was coalesced, then the death
7880 7884 // of the free range was recorded. Record a birth now.
7881 7885 if (lastFreeRangeCoalesced()) {
7882 7886 _sp->coalBirth(size);
7883 7887 }
7884 7888 _sp->addChunkAndRepairOffsetTable(chunk, size,
7885 7889 lastFreeRangeCoalesced());
7886 7890 } else if (CMSTraceSweeper) {
7887 7891 gclog_or_tty->print_cr("Already in free list: nothing to flush");
7888 7892 }
7889 7893 set_inFreeRange(false);
7890 7894 set_freeRangeInFreeLists(false);
7891 7895 }
7892 7896
7893 7897 // We take a break if we've been at this for a while,
7894 7898 // so as to avoid monopolizing the locks involved.
7895 7899 void SweepClosure::do_yield_work(HeapWord* addr) {
7896 7900 // Return current free chunk being used for coalescing (if any)
7897 7901 // to the appropriate freelist. After yielding, the next
7898 7902 // free block encountered will start a coalescing range of
7899 7903 // free blocks. If the next free block is adjacent to the
7900 7904 // chunk just flushed, they will need to wait for the next
7901 7905 // sweep to be coalesced.
7902 7906 if (inFreeRange()) {
7903 7907 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7904 7908 }
7905 7909
7906 7910 // First give up the locks, then yield, then re-lock.
7907 7911 // We should probably use a constructor/destructor idiom to
7908 7912 // do this unlock/lock or modify the MutexUnlocker class to
7909 7913 // serve our purpose. XXX
7910 7914 assert_lock_strong(_bitMap->lock());
7911 7915 assert_lock_strong(_freelistLock);
7912 7916 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7913 7917 "CMS thread should hold CMS token");
7914 7918 _bitMap->lock()->unlock();
7915 7919 _freelistLock->unlock();
7916 7920 ConcurrentMarkSweepThread::desynchronize(true);
7917 7921 _collector->stopTimer();
7918 7922 if (PrintCMSStatistics != 0) {
7919 7923 _collector->incrementYields();
7920 7924 }
7921 7925
7922 7926 // See the comment in coordinator_yield()
7923 7927 for (unsigned i = 0; i < CMSYieldSleepCount &&
7924 7928 ConcurrentMarkSweepThread::should_yield() &&
7925 7929 !CMSCollector::foregroundGCIsActive(); ++i) {
7926 7930 os::sleep(Thread::current(), 1, false);
7927 7931 }
7928 7932
7929 7933 ConcurrentMarkSweepThread::synchronize(true);
7930 7934 _freelistLock->lock();
7931 7935 _bitMap->lock()->lock_without_safepoint_check();
7932 7936 _collector->startTimer();
7933 7937 }
7934 7938
7935 7939 #ifndef PRODUCT
7936 7940 // This is actually very useful in a product build if it can
7937 7941 // be called from the debugger. Compile it into the product
7938 7942 // as needed.
7939 7943 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7940 7944 return debug_cms_space->verify_chunk_in_free_list(fc);
7941 7945 }
7942 7946 #endif
7943 7947
7944 7948 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7945 7949 if (CMSTraceSweeper) {
7946 7950 gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7947 7951 p2i(fc), fc->size());
7948 7952 }
7949 7953 }
7950 7954
7951 7955 // CMSIsAliveClosure
7952 7956 bool CMSIsAliveClosure::do_object_b(oop obj) {
7953 7957 HeapWord* addr = (HeapWord*)obj;
7954 7958 return addr != NULL &&
7955 7959 (!_span.contains(addr) || _bit_map->isMarked(addr));
7956 7960 }
7957 7961
7958 7962
7959 7963 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7960 7964 MemRegion span,
7961 7965 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7962 7966 bool cpc):
7963 7967 _collector(collector),
7964 7968 _span(span),
7965 7969 _bit_map(bit_map),
7966 7970 _mark_stack(mark_stack),
7967 7971 _concurrent_precleaning(cpc) {
7968 7972 assert(!_span.is_empty(), "Empty span could spell trouble");
7969 7973 }
7970 7974
7971 7975
7972 7976 // CMSKeepAliveClosure: the serial version
7973 7977 void CMSKeepAliveClosure::do_oop(oop obj) {
7974 7978 HeapWord* addr = (HeapWord*)obj;
7975 7979 if (_span.contains(addr) &&
7976 7980 !_bit_map->isMarked(addr)) {
7977 7981 _bit_map->mark(addr);
7978 7982 bool simulate_overflow = false;
7979 7983 NOT_PRODUCT(
7980 7984 if (CMSMarkStackOverflowALot &&
7981 7985 _collector->simulate_overflow()) {
7982 7986 // simulate a stack overflow
7983 7987 simulate_overflow = true;
7984 7988 }
7985 7989 )
7986 7990 if (simulate_overflow || !_mark_stack->push(obj)) {
7987 7991 if (_concurrent_precleaning) {
7988 7992 // We dirty the overflown object and let the remark
7989 7993 // phase deal with it.
7990 7994 assert(_collector->overflow_list_is_empty(), "Error");
7991 7995 // In the case of object arrays, we need to dirty all of
7992 7996 // the cards that the object spans. No locking or atomics
7993 7997 // are needed since no one else can be mutating the mod union
7994 7998 // table.
7995 7999 if (obj->is_objArray()) {
7996 8000 size_t sz = obj->size();
7997 8001 HeapWord* end_card_addr =
7998 8002 (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
7999 8003 MemRegion redirty_range = MemRegion(addr, end_card_addr);
8000 8004 assert(!redirty_range.is_empty(), "Arithmetical tautology");
8001 8005 _collector->_modUnionTable.mark_range(redirty_range);
8002 8006 } else {
8003 8007 _collector->_modUnionTable.mark(addr);
8004 8008 }
8005 8009 _collector->_ser_kac_preclean_ovflw++;
8006 8010 } else {
8007 8011 _collector->push_on_overflow_list(obj);
8008 8012 _collector->_ser_kac_ovflw++;
8009 8013 }
8010 8014 }
8011 8015 }
8012 8016 }
8013 8017
8014 8018 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8015 8019 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8016 8020
8017 8021 // CMSParKeepAliveClosure: a parallel version of the above.
8018 8022 // The work queues are private to each closure (thread),
8019 8023 // but (may be) available for stealing by other threads.
8020 8024 void CMSParKeepAliveClosure::do_oop(oop obj) {
8021 8025 HeapWord* addr = (HeapWord*)obj;
8022 8026 if (_span.contains(addr) &&
8023 8027 !_bit_map->isMarked(addr)) {
8024 8028 // In general, during recursive tracing, several threads
8025 8029 // may be concurrently getting here; the first one to
8026 8030 // "tag" it, claims it.
8027 8031 if (_bit_map->par_mark(addr)) {
8028 8032 bool res = _work_queue->push(obj);
8029 8033 assert(res, "Low water mark should be much less than capacity");
8030 8034 // Do a recursive trim in the hope that this will keep
8031 8035 // stack usage lower, but leave some oops for potential stealers
8032 8036 trim_queue(_low_water_mark);
8033 8037 } // Else, another thread got there first
8034 8038 }
8035 8039 }
8036 8040
8037 8041 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8038 8042 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8039 8043
8040 8044 void CMSParKeepAliveClosure::trim_queue(uint max) {
8041 8045 while (_work_queue->size() > max) {
8042 8046 oop new_oop;
8043 8047 if (_work_queue->pop_local(new_oop)) {
8044 8048 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8045 8049 assert(_bit_map->isMarked((HeapWord*)new_oop),
8046 8050 "no white objects on this stack!");
8047 8051 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8048 8052 // iterate over the oops in this oop, marking and pushing
8049 8053 // the ones in CMS heap (i.e. in _span).
8050 8054 new_oop->oop_iterate(&_mark_and_push);
8051 8055 }
8052 8056 }
8053 8057 }
8054 8058
8055 8059 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8056 8060 CMSCollector* collector,
8057 8061 MemRegion span, CMSBitMap* bit_map,
8058 8062 OopTaskQueue* work_queue):
8059 8063 _collector(collector),
8060 8064 _span(span),
8061 8065 _bit_map(bit_map),
8062 8066 _work_queue(work_queue) { }
8063 8067
8064 8068 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8065 8069 HeapWord* addr = (HeapWord*)obj;
8066 8070 if (_span.contains(addr) &&
8067 8071 !_bit_map->isMarked(addr)) {
8068 8072 if (_bit_map->par_mark(addr)) {
8069 8073 bool simulate_overflow = false;
8070 8074 NOT_PRODUCT(
8071 8075 if (CMSMarkStackOverflowALot &&
8072 8076 _collector->par_simulate_overflow()) {
8073 8077 // simulate a stack overflow
8074 8078 simulate_overflow = true;
8075 8079 }
8076 8080 )
8077 8081 if (simulate_overflow || !_work_queue->push(obj)) {
8078 8082 _collector->par_push_on_overflow_list(obj);
8079 8083 _collector->_par_kac_ovflw++;
8080 8084 }
8081 8085 } // Else another thread got there already
8082 8086 }
8083 8087 }
8084 8088
8085 8089 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8086 8090 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8087 8091
8088 8092 //////////////////////////////////////////////////////////////////
8089 8093 // CMSExpansionCause /////////////////////////////
8090 8094 //////////////////////////////////////////////////////////////////
8091 8095 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8092 8096 switch (cause) {
8093 8097 case _no_expansion:
8094 8098 return "No expansion";
8095 8099 case _satisfy_free_ratio:
8096 8100 return "Free ratio";
8097 8101 case _satisfy_promotion:
8098 8102 return "Satisfy promotion";
8099 8103 case _satisfy_allocation:
8100 8104 return "allocation";
8101 8105 case _allocate_par_lab:
8102 8106 return "Par LAB";
8103 8107 case _allocate_par_spooling_space:
8104 8108 return "Par Spooling Space";
8105 8109 case _adaptive_size_policy:
8106 8110 return "Ergonomics";
8107 8111 default:
8108 8112 return "unknown";
8109 8113 }
8110 8114 }
8111 8115
8112 8116 void CMSDrainMarkingStackClosure::do_void() {
8113 8117 // the max number to take from overflow list at a time
8114 8118 const size_t num = _mark_stack->capacity()/4;
8115 8119 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8116 8120 "Overflow list should be NULL during concurrent phases");
8117 8121 while (!_mark_stack->isEmpty() ||
8118 8122 // if stack is empty, check the overflow list
8119 8123 _collector->take_from_overflow_list(num, _mark_stack)) {
8120 8124 oop obj = _mark_stack->pop();
8121 8125 HeapWord* addr = (HeapWord*)obj;
8122 8126 assert(_span.contains(addr), "Should be within span");
8123 8127 assert(_bit_map->isMarked(addr), "Should be marked");
8124 8128 assert(obj->is_oop(), "Should be an oop");
8125 8129 obj->oop_iterate(_keep_alive);
8126 8130 }
8127 8131 }
8128 8132
8129 8133 void CMSParDrainMarkingStackClosure::do_void() {
8130 8134 // drain queue
8131 8135 trim_queue(0);
8132 8136 }
8133 8137
8134 8138 // Trim our work_queue so its length is below max at return
8135 8139 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8136 8140 while (_work_queue->size() > max) {
8137 8141 oop new_oop;
8138 8142 if (_work_queue->pop_local(new_oop)) {
8139 8143 assert(new_oop->is_oop(), "Expected an oop");
8140 8144 assert(_bit_map->isMarked((HeapWord*)new_oop),
8141 8145 "no white objects on this stack!");
8142 8146 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8143 8147 // iterate over the oops in this oop, marking and pushing
8144 8148 // the ones in CMS heap (i.e. in _span).
8145 8149 new_oop->oop_iterate(&_mark_and_push);
8146 8150 }
8147 8151 }
8148 8152 }
8149 8153
8150 8154 ////////////////////////////////////////////////////////////////////
8151 8155 // Support for Marking Stack Overflow list handling and related code
8152 8156 ////////////////////////////////////////////////////////////////////
8153 8157 // Much of the following code is similar in shape and spirit to the
8154 8158 // code used in ParNewGC. We should try and share that code
8155 8159 // as much as possible in the future.
8156 8160
8157 8161 #ifndef PRODUCT
8158 8162 // Debugging support for CMSStackOverflowALot
8159 8163
8160 8164 // It's OK to call this multi-threaded; the worst thing
8161 8165 // that can happen is that we'll get a bunch of closely
8162 8166 // spaced simulated overflows, but that's OK, in fact
8163 8167 // probably good as it would exercise the overflow code
8164 8168 // under contention.
8165 8169 bool CMSCollector::simulate_overflow() {
8166 8170 if (_overflow_counter-- <= 0) { // just being defensive
8167 8171 _overflow_counter = CMSMarkStackOverflowInterval;
8168 8172 return true;
8169 8173 } else {
8170 8174 return false;
8171 8175 }
8172 8176 }
8173 8177
8174 8178 bool CMSCollector::par_simulate_overflow() {
8175 8179 return simulate_overflow();
8176 8180 }
8177 8181 #endif
8178 8182
8179 8183 // Single-threaded
8180 8184 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8181 8185 assert(stack->isEmpty(), "Expected precondition");
8182 8186 assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8183 8187 size_t i = num;
8184 8188 oop cur = _overflow_list;
8185 8189 const markOop proto = markOopDesc::prototype();
8186 8190 NOT_PRODUCT(ssize_t n = 0;)
8187 8191 for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8188 8192 next = oop(cur->mark());
8189 8193 cur->set_mark(proto); // until proven otherwise
8190 8194 assert(cur->is_oop(), "Should be an oop");
8191 8195 bool res = stack->push(cur);
8192 8196 assert(res, "Bit off more than can chew?");
8193 8197 NOT_PRODUCT(n++;)
8194 8198 }
8195 8199 _overflow_list = cur;
8196 8200 #ifndef PRODUCT
8197 8201 assert(_num_par_pushes >= n, "Too many pops?");
8198 8202 _num_par_pushes -=n;
8199 8203 #endif
8200 8204 return !stack->isEmpty();
8201 8205 }
8202 8206
8203 8207 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
8204 8208 // (MT-safe) Get a prefix of at most "num" from the list.
8205 8209 // The overflow list is chained through the mark word of
8206 8210 // each object in the list. We fetch the entire list,
8207 8211 // break off a prefix of the right size and return the
8208 8212 // remainder. If other threads try to take objects from
8209 8213 // the overflow list at that time, they will wait for
8210 8214 // some time to see if data becomes available. If (and
8211 8215 // only if) another thread places one or more object(s)
8212 8216 // on the global list before we have returned the suffix
8213 8217 // to the global list, we will walk down our local list
8214 8218 // to find its end and append the global list to
8215 8219 // our suffix before returning it. This suffix walk can
8216 8220 // prove to be expensive (quadratic in the amount of traffic)
8217 8221 // when there are many objects in the overflow list and
8218 8222 // there is much producer-consumer contention on the list.
8219 8223 // *NOTE*: The overflow list manipulation code here and
8220 8224 // in ParNewGeneration:: are very similar in shape,
8221 8225 // except that in the ParNew case we use the old (from/eden)
8222 8226 // copy of the object to thread the list via its klass word.
8223 8227 // Because of the common code, if you make any changes in
8224 8228 // the code below, please check the ParNew version to see if
8225 8229 // similar changes might be needed.
8226 8230 // CR 6797058 has been filed to consolidate the common code.
8227 8231 bool CMSCollector::par_take_from_overflow_list(size_t num,
8228 8232 OopTaskQueue* work_q,
8229 8233 int no_of_gc_threads) {
8230 8234 assert(work_q->size() == 0, "First empty local work queue");
8231 8235 assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8232 8236 if (_overflow_list == NULL) {
8233 8237 return false;
8234 8238 }
8235 8239 // Grab the entire list; we'll put back a suffix
8236 8240 oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
8237 8241 Thread* tid = Thread::current();
8238 8242 // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
8239 8243 // set to ParallelGCThreads.
8240 8244 size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
8241 8245 size_t sleep_time_millis = MAX2((size_t)1, num/100);
8242 8246 // If the list is busy, we spin for a short while,
8243 8247 // sleeping between attempts to get the list.
8244 8248 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8245 8249 os::sleep(tid, sleep_time_millis, false);
8246 8250 if (_overflow_list == NULL) {
8247 8251 // Nothing left to take
8248 8252 return false;
8249 8253 } else if (_overflow_list != BUSY) {
8250 8254 // Try and grab the prefix
8251 8255 prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
8252 8256 }
8253 8257 }
8254 8258 // If the list was found to be empty, or we spun long
8255 8259 // enough, we give up and return empty-handed. If we leave
8256 8260 // the list in the BUSY state below, it must be the case that
8257 8261 // some other thread holds the overflow list and will set it
8258 8262 // to a non-BUSY state in the future.
8259 8263 if (prefix == NULL || prefix == BUSY) {
8260 8264 // Nothing to take or waited long enough
8261 8265 if (prefix == NULL) {
8262 8266 // Write back the NULL in case we overwrote it with BUSY above
8263 8267 // and it is still the same value.
8264 8268 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8265 8269 }
8266 8270 return false;
8267 8271 }
8268 8272 assert(prefix != NULL && prefix != BUSY, "Error");
8269 8273 size_t i = num;
8270 8274 oop cur = prefix;
8271 8275 // Walk down the first "num" objects, unless we reach the end.
8272 8276 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8273 8277 if (cur->mark() == NULL) {
8274 8278 // We have "num" or fewer elements in the list, so there
8275 8279 // is nothing to return to the global list.
8276 8280 // Write back the NULL in lieu of the BUSY we wrote
8277 8281 // above, if it is still the same value.
8278 8282 if (_overflow_list == BUSY) {
8279 8283 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8280 8284 }
8281 8285 } else {
8282 8286 // Chop off the suffix and return it to the global list.
8283 8287 assert(cur->mark() != BUSY, "Error");
8284 8288 oop suffix_head = cur->mark(); // suffix will be put back on global list
8285 8289 cur->set_mark(NULL); // break off suffix
8286 8290 // It's possible that the list is still in the empty(busy) state
8287 8291 // we left it in a short while ago; in that case we may be
8288 8292 // able to place back the suffix without incurring the cost
8289 8293 // of a walk down the list.
8290 8294 oop observed_overflow_list = _overflow_list;
8291 8295 oop cur_overflow_list = observed_overflow_list;
8292 8296 bool attached = false;
8293 8297 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8294 8298 observed_overflow_list =
8295 8299 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8296 8300 if (cur_overflow_list == observed_overflow_list) {
8297 8301 attached = true;
8298 8302 break;
8299 8303 } else cur_overflow_list = observed_overflow_list;
8300 8304 }
8301 8305 if (!attached) {
8302 8306 // Too bad, someone else sneaked in (at least) an element; we'll need
8303 8307 // to do a splice. Find tail of suffix so we can prepend suffix to global
8304 8308 // list.
8305 8309 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8306 8310 oop suffix_tail = cur;
8307 8311 assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8308 8312 "Tautology");
8309 8313 observed_overflow_list = _overflow_list;
8310 8314 do {
8311 8315 cur_overflow_list = observed_overflow_list;
8312 8316 if (cur_overflow_list != BUSY) {
8313 8317 // Do the splice ...
8314 8318 suffix_tail->set_mark(markOop(cur_overflow_list));
8315 8319 } else { // cur_overflow_list == BUSY
8316 8320 suffix_tail->set_mark(NULL);
8317 8321 }
8318 8322 // ... and try to place spliced list back on overflow_list ...
8319 8323 observed_overflow_list =
8320 8324 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8321 8325 } while (cur_overflow_list != observed_overflow_list);
8322 8326 // ... until we have succeeded in doing so.
8323 8327 }
8324 8328 }
8325 8329
8326 8330 // Push the prefix elements on work_q
8327 8331 assert(prefix != NULL, "control point invariant");
8328 8332 const markOop proto = markOopDesc::prototype();
8329 8333 oop next;
8330 8334 NOT_PRODUCT(ssize_t n = 0;)
8331 8335 for (cur = prefix; cur != NULL; cur = next) {
8332 8336 next = oop(cur->mark());
8333 8337 cur->set_mark(proto); // until proven otherwise
8334 8338 assert(cur->is_oop(), "Should be an oop");
8335 8339 bool res = work_q->push(cur);
8336 8340 assert(res, "Bit off more than we can chew?");
8337 8341 NOT_PRODUCT(n++;)
8338 8342 }
8339 8343 #ifndef PRODUCT
8340 8344 assert(_num_par_pushes >= n, "Too many pops?");
8341 8345 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8342 8346 #endif
8343 8347 return true;
8344 8348 }
8345 8349
8346 8350 // Single-threaded
8347 8351 void CMSCollector::push_on_overflow_list(oop p) {
8348 8352 NOT_PRODUCT(_num_par_pushes++;)
8349 8353 assert(p->is_oop(), "Not an oop");
8350 8354 preserve_mark_if_necessary(p);
8351 8355 p->set_mark((markOop)_overflow_list);
8352 8356 _overflow_list = p;
8353 8357 }
8354 8358
8355 8359 // Multi-threaded; use CAS to prepend to overflow list
8356 8360 void CMSCollector::par_push_on_overflow_list(oop p) {
8357 8361 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8358 8362 assert(p->is_oop(), "Not an oop");
8359 8363 par_preserve_mark_if_necessary(p);
8360 8364 oop observed_overflow_list = _overflow_list;
8361 8365 oop cur_overflow_list;
8362 8366 do {
8363 8367 cur_overflow_list = observed_overflow_list;
8364 8368 if (cur_overflow_list != BUSY) {
8365 8369 p->set_mark(markOop(cur_overflow_list));
8366 8370 } else {
8367 8371 p->set_mark(NULL);
8368 8372 }
8369 8373 observed_overflow_list =
8370 8374 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8371 8375 } while (cur_overflow_list != observed_overflow_list);
8372 8376 }
8373 8377 #undef BUSY
8374 8378
8375 8379 // Single threaded
8376 8380 // General Note on GrowableArray: pushes may silently fail
8377 8381 // because we are (temporarily) out of C-heap for expanding
8378 8382 // the stack. The problem is quite ubiquitous and affects
8379 8383 // a lot of code in the JVM. The prudent thing for GrowableArray
8380 8384 // to do (for now) is to exit with an error. However, that may
8381 8385 // be too draconian in some cases because the caller may be
8382 8386 // able to recover without much harm. For such cases, we
8383 8387 // should probably introduce a "soft_push" method which returns
8384 8388 // an indication of success or failure with the assumption that
8385 8389 // the caller may be able to recover from a failure; code in
8386 8390 // the VM can then be changed, incrementally, to deal with such
8387 8391 // failures where possible, thus, incrementally hardening the VM
8388 8392 // in such low resource situations.
8389 8393 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8390 8394 _preserved_oop_stack.push(p);
8391 8395 _preserved_mark_stack.push(m);
8392 8396 assert(m == p->mark(), "Mark word changed");
8393 8397 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8394 8398 "bijection");
8395 8399 }
8396 8400
8397 8401 // Single threaded
8398 8402 void CMSCollector::preserve_mark_if_necessary(oop p) {
8399 8403 markOop m = p->mark();
8400 8404 if (m->must_be_preserved(p)) {
8401 8405 preserve_mark_work(p, m);
8402 8406 }
8403 8407 }
8404 8408
8405 8409 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8406 8410 markOop m = p->mark();
8407 8411 if (m->must_be_preserved(p)) {
8408 8412 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8409 8413 // Even though we read the mark word without holding
8410 8414 // the lock, we are assured that it will not change
8411 8415 // because we "own" this oop, so no other thread can
8412 8416 // be trying to push it on the overflow list; see
8413 8417 // the assertion in preserve_mark_work() that checks
8414 8418 // that m == p->mark().
8415 8419 preserve_mark_work(p, m);
8416 8420 }
8417 8421 }
8418 8422
8419 8423 // We should be able to do this multi-threaded,
8420 8424 // a chunk of stack being a task (this is
8421 8425 // correct because each oop only ever appears
8422 8426 // once in the overflow list. However, it's
8423 8427 // not very easy to completely overlap this with
8424 8428 // other operations, so will generally not be done
8425 8429 // until all work's been completed. Because we
8426 8430 // expect the preserved oop stack (set) to be small,
8427 8431 // it's probably fine to do this single-threaded.
8428 8432 // We can explore cleverer concurrent/overlapped/parallel
8429 8433 // processing of preserved marks if we feel the
8430 8434 // need for this in the future. Stack overflow should
8431 8435 // be so rare in practice and, when it happens, its
8432 8436 // effect on performance so great that this will
8433 8437 // likely just be in the noise anyway.
8434 8438 void CMSCollector::restore_preserved_marks_if_any() {
8435 8439 assert(SafepointSynchronize::is_at_safepoint(),
8436 8440 "world should be stopped");
8437 8441 assert(Thread::current()->is_ConcurrentGC_thread() ||
8438 8442 Thread::current()->is_VM_thread(),
8439 8443 "should be single-threaded");
8440 8444 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8441 8445 "bijection");
8442 8446
8443 8447 while (!_preserved_oop_stack.is_empty()) {
8444 8448 oop p = _preserved_oop_stack.pop();
8445 8449 assert(p->is_oop(), "Should be an oop");
8446 8450 assert(_span.contains(p), "oop should be in _span");
8447 8451 assert(p->mark() == markOopDesc::prototype(),
8448 8452 "Set when taken from overflow list");
8449 8453 markOop m = _preserved_mark_stack.pop();
8450 8454 p->set_mark(m);
8451 8455 }
8452 8456 assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8453 8457 "stacks were cleared above");
8454 8458 }
8455 8459
8456 8460 #ifndef PRODUCT
8457 8461 bool CMSCollector::no_preserved_marks() const {
8458 8462 return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8459 8463 }
8460 8464 #endif
8461 8465
8462 8466 // Transfer some number of overflown objects to usual marking
8463 8467 // stack. Return true if some objects were transferred.
8464 8468 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8465 8469 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8466 8470 (size_t)ParGCDesiredObjsFromOverflowList);
8467 8471
8468 8472 bool res = _collector->take_from_overflow_list(num, _mark_stack);
8469 8473 assert(_collector->overflow_list_is_empty() || res,
8470 8474 "If list is not empty, we should have taken something");
8471 8475 assert(!res || !_mark_stack->isEmpty(),
8472 8476 "If we took something, it should now be on our stack");
8473 8477 return res;
8474 8478 }
8475 8479
8476 8480 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8477 8481 size_t res = _sp->block_size_no_stall(addr, _collector);
8478 8482 if (_sp->block_is_obj(addr)) {
8479 8483 if (_live_bit_map->isMarked(addr)) {
8480 8484 // It can't have been dead in a previous cycle
8481 8485 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8482 8486 } else {
8483 8487 _dead_bit_map->mark(addr); // mark the dead object
8484 8488 }
8485 8489 }
8486 8490 // Could be 0, if the block size could not be computed without stalling.
8487 8491 return res;
8488 8492 }
8489 8493
8490 8494 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8491 8495
8492 8496 switch (phase) {
8493 8497 case CMSCollector::InitialMarking:
8494 8498 initialize(true /* fullGC */ ,
8495 8499 cause /* cause of the GC */,
8496 8500 true /* recordGCBeginTime */,
8497 8501 true /* recordPreGCUsage */,
8498 8502 false /* recordPeakUsage */,
8499 8503 false /* recordPostGCusage */,
8500 8504 true /* recordAccumulatedGCTime */,
8501 8505 false /* recordGCEndTime */,
8502 8506 false /* countCollection */ );
8503 8507 break;
8504 8508
8505 8509 case CMSCollector::FinalMarking:
8506 8510 initialize(true /* fullGC */ ,
8507 8511 cause /* cause of the GC */,
8508 8512 false /* recordGCBeginTime */,
8509 8513 false /* recordPreGCUsage */,
8510 8514 false /* recordPeakUsage */,
8511 8515 false /* recordPostGCusage */,
8512 8516 true /* recordAccumulatedGCTime */,
8513 8517 false /* recordGCEndTime */,
8514 8518 false /* countCollection */ );
8515 8519 break;
8516 8520
8517 8521 case CMSCollector::Sweeping:
8518 8522 initialize(true /* fullGC */ ,
8519 8523 cause /* cause of the GC */,
8520 8524 false /* recordGCBeginTime */,
8521 8525 false /* recordPreGCUsage */,
8522 8526 true /* recordPeakUsage */,
8523 8527 true /* recordPostGCusage */,
8524 8528 false /* recordAccumulatedGCTime */,
8525 8529 true /* recordGCEndTime */,
8526 8530 true /* countCollection */ );
8527 8531 break;
8528 8532
8529 8533 default:
8530 8534 ShouldNotReachHere();
8531 8535 }
8532 8536 }
↓ open down ↓ |
5849 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX