Print this page
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
+++ new/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
26 26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
27 27
28 28 #include "gc_implementation/shared/gcHeapSummary.hpp"
29 29 #include "gc_implementation/shared/gSpaceCounters.hpp"
30 30 #include "gc_implementation/shared/gcStats.hpp"
31 31 #include "gc_implementation/shared/gcWhen.hpp"
32 32 #include "gc_implementation/shared/generationCounters.hpp"
33 33 #include "memory/freeBlockDictionary.hpp"
34 34 #include "memory/generation.hpp"
35 35 #include "runtime/mutexLocker.hpp"
36 36 #include "runtime/virtualspace.hpp"
37 37 #include "services/memoryService.hpp"
38 38 #include "utilities/bitMap.inline.hpp"
39 39 #include "utilities/stack.inline.hpp"
40 40 #include "utilities/taskqueue.hpp"
41 41 #include "utilities/yieldingWorkgroup.hpp"
42 42
43 43 // ConcurrentMarkSweepGeneration is in support of a concurrent
44 44 // mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
45 45 // style. We assume, for now, that this generation is always the
46 46 // seniormost generation (modulo the PermGeneration), and for simplicity
47 47 // in the first implementation, that this generation is a single compactible
48 48 // space. Neither of these restrictions appears essential, and will be
49 49 // relaxed in the future when more time is available to implement the
50 50 // greater generality (and there's a need for it).
51 51 //
52 52 // Concurrent mode failures are currently handled by
53 53 // means of a sliding mark-compact.
54 54
55 55 class CMSAdaptiveSizePolicy;
56 56 class CMSConcMarkingTask;
57 57 class CMSGCAdaptivePolicyCounters;
58 58 class CMSTracer;
59 59 class ConcurrentGCTimer;
60 60 class ConcurrentMarkSweepGeneration;
61 61 class ConcurrentMarkSweepPolicy;
62 62 class ConcurrentMarkSweepThread;
63 63 class CompactibleFreeListSpace;
64 64 class FreeChunk;
65 65 class PromotionInfo;
66 66 class ScanMarkedObjectsAgainCarefullyClosure;
67 67 class SerialOldTracer;
68 68
69 69 // A generic CMS bit map. It's the basis for both the CMS marking bit map
70 70 // as well as for the mod union table (in each case only a subset of the
71 71 // methods are used). This is essentially a wrapper around the BitMap class,
72 72 // with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
73 73 // we have _shifter == 0. and for the mod union table we have
74 74 // shifter == CardTableModRefBS::card_shift - LogHeapWordSize.)
75 75 // XXX 64-bit issues in BitMap?
76 76 class CMSBitMap VALUE_OBJ_CLASS_SPEC {
77 77 friend class VMStructs;
78 78
79 79 HeapWord* _bmStartWord; // base address of range covered by map
80 80 size_t _bmWordSize; // map size (in #HeapWords covered)
81 81 const int _shifter; // shifts to convert HeapWord to bit position
82 82 VirtualSpace _virtual_space; // underlying the bit map
83 83 BitMap _bm; // the bit map itself
84 84 public:
85 85 Mutex* const _lock; // mutex protecting _bm;
86 86
87 87 public:
88 88 // constructor
89 89 CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
90 90
91 91 // allocates the actual storage for the map
92 92 bool allocate(MemRegion mr);
93 93 // field getter
94 94 Mutex* lock() const { return _lock; }
95 95 // locking verifier convenience function
96 96 void assert_locked() const PRODUCT_RETURN;
97 97
98 98 // inquiries
99 99 HeapWord* startWord() const { return _bmStartWord; }
100 100 size_t sizeInWords() const { return _bmWordSize; }
101 101 size_t sizeInBits() const { return _bm.size(); }
102 102 // the following is one past the last word in space
103 103 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
104 104
105 105 // reading marks
106 106 bool isMarked(HeapWord* addr) const;
107 107 bool par_isMarked(HeapWord* addr) const; // do not lock checks
108 108 bool isUnmarked(HeapWord* addr) const;
109 109 bool isAllClear() const;
110 110
111 111 // writing marks
112 112 void mark(HeapWord* addr);
113 113 // For marking by parallel GC threads;
114 114 // returns true if we did, false if another thread did
115 115 bool par_mark(HeapWord* addr);
116 116
117 117 void mark_range(MemRegion mr);
118 118 void par_mark_range(MemRegion mr);
119 119 void mark_large_range(MemRegion mr);
120 120 void par_mark_large_range(MemRegion mr);
121 121 void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
122 122 void clear_range(MemRegion mr);
123 123 void par_clear_range(MemRegion mr);
124 124 void clear_large_range(MemRegion mr);
125 125 void par_clear_large_range(MemRegion mr);
126 126 void clear_all();
127 127 void clear_all_incrementally(); // Not yet implemented!!
128 128
129 129 NOT_PRODUCT(
130 130 // checks the memory region for validity
131 131 void region_invariant(MemRegion mr);
132 132 )
133 133
134 134 // iteration
135 135 void iterate(BitMapClosure* cl) {
136 136 _bm.iterate(cl);
137 137 }
138 138 void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
139 139 void dirty_range_iterate_clear(MemRegionClosure* cl);
140 140 void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
141 141
142 142 // auxiliary support for iteration
143 143 HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
144 144 HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
145 145 HeapWord* end_addr) const;
146 146 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
147 147 HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
148 148 HeapWord* end_addr) const;
149 149 MemRegion getAndClearMarkedRegion(HeapWord* addr);
150 150 MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
151 151 HeapWord* end_addr);
152 152
153 153 // conversion utilities
154 154 HeapWord* offsetToHeapWord(size_t offset) const;
155 155 size_t heapWordToOffset(HeapWord* addr) const;
156 156 size_t heapWordDiffToOffsetDiff(size_t diff) const;
157 157
158 158 // debugging
159 159 // is this address range covered by the bit-map?
160 160 NOT_PRODUCT(
161 161 bool covers(MemRegion mr) const;
162 162 bool covers(HeapWord* start, size_t size = 0) const;
163 163 )
164 164 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
165 165 };
166 166
167 167 // Represents a marking stack used by the CMS collector.
168 168 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
169 169 class CMSMarkStack: public CHeapObj<mtGC> {
170 170 //
171 171 friend class CMSCollector; // to get at expasion stats further below
172 172 //
173 173
174 174 VirtualSpace _virtual_space; // space for the stack
175 175 oop* _base; // bottom of stack
176 176 size_t _index; // one more than last occupied index
177 177 size_t _capacity; // max #elements
178 178 Mutex _par_lock; // an advisory lock used in case of parallel access
179 179 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run
180 180
181 181 protected:
182 182 size_t _hit_limit; // we hit max stack size limit
183 183 size_t _failed_double; // we failed expansion before hitting limit
184 184
185 185 public:
186 186 CMSMarkStack():
187 187 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
188 188 _hit_limit(0),
189 189 _failed_double(0) {}
190 190
191 191 bool allocate(size_t size);
192 192
193 193 size_t capacity() const { return _capacity; }
194 194
195 195 oop pop() {
196 196 if (!isEmpty()) {
197 197 return _base[--_index] ;
198 198 }
199 199 return NULL;
200 200 }
201 201
202 202 bool push(oop ptr) {
203 203 if (isFull()) {
204 204 return false;
205 205 } else {
206 206 _base[_index++] = ptr;
207 207 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
208 208 return true;
209 209 }
210 210 }
211 211
212 212 bool isEmpty() const { return _index == 0; }
213 213 bool isFull() const {
214 214 assert(_index <= _capacity, "buffer overflow");
215 215 return _index == _capacity;
216 216 }
217 217
218 218 size_t length() { return _index; }
219 219
220 220 // "Parallel versions" of some of the above
221 221 oop par_pop() {
222 222 // lock and pop
223 223 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
224 224 return pop();
225 225 }
226 226
227 227 bool par_push(oop ptr) {
228 228 // lock and push
229 229 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
230 230 return push(ptr);
231 231 }
232 232
233 233 // Forcibly reset the stack, losing all of its contents.
234 234 void reset() {
235 235 _index = 0;
236 236 }
237 237
238 238 // Expand the stack, typically in response to an overflow condition
239 239 void expand();
240 240
241 241 // Compute the least valued stack element.
242 242 oop least_value(HeapWord* low) {
243 243 oop least = (oop)low;
244 244 for (size_t i = 0; i < _index; i++) {
245 245 least = MIN2(least, _base[i]);
246 246 }
247 247 return least;
248 248 }
249 249
250 250 // Exposed here to allow stack expansion in || case
251 251 Mutex* par_lock() { return &_par_lock; }
252 252 };
253 253
254 254 class CardTableRS;
255 255 class CMSParGCThreadState;
256 256
257 257 class ModUnionClosure: public MemRegionClosure {
258 258 protected:
259 259 CMSBitMap* _t;
260 260 public:
261 261 ModUnionClosure(CMSBitMap* t): _t(t) { }
262 262 void do_MemRegion(MemRegion mr);
263 263 };
264 264
265 265 class ModUnionClosurePar: public ModUnionClosure {
266 266 public:
267 267 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
268 268 void do_MemRegion(MemRegion mr);
269 269 };
270 270
271 271 // Survivor Chunk Array in support of parallelization of
272 272 // Survivor Space rescan.
273 273 class ChunkArray: public CHeapObj<mtGC> {
274 274 size_t _index;
275 275 size_t _capacity;
276 276 size_t _overflows;
277 277 HeapWord** _array; // storage for array
278 278
279 279 public:
280 280 ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {}
281 281 ChunkArray(HeapWord** a, size_t c):
282 282 _index(0), _capacity(c), _overflows(0), _array(a) {}
283 283
284 284 HeapWord** array() { return _array; }
285 285 void set_array(HeapWord** a) { _array = a; }
286 286
287 287 size_t capacity() { return _capacity; }
288 288 void set_capacity(size_t c) { _capacity = c; }
289 289
290 290 size_t end() {
291 291 assert(_index <= capacity(),
292 292 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds",
293 293 _index, _capacity));
294 294 return _index;
295 295 } // exclusive
296 296
297 297 HeapWord* nth(size_t n) {
298 298 assert(n < end(), "Out of bounds access");
299 299 return _array[n];
300 300 }
301 301
302 302 void reset() {
303 303 _index = 0;
304 304 if (_overflows > 0 && PrintCMSStatistics > 1) {
305 305 warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times",
306 306 _capacity, _overflows);
307 307 }
308 308 _overflows = 0;
309 309 }
310 310
311 311 void record_sample(HeapWord* p, size_t sz) {
312 312 // For now we do not do anything with the size
313 313 if (_index < _capacity) {
314 314 _array[_index++] = p;
315 315 } else {
316 316 ++_overflows;
317 317 assert(_index == _capacity,
318 318 err_msg("_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT
319 319 "): out of bounds at overflow#" SIZE_FORMAT,
320 320 _index, _capacity, _overflows));
321 321 }
322 322 }
323 323 };
324 324
325 325 //
326 326 // Timing, allocation and promotion statistics for gc scheduling and incremental
327 327 // mode pacing. Most statistics are exponential averages.
328 328 //
329 329 class CMSStats VALUE_OBJ_CLASS_SPEC {
330 330 private:
331 331 ConcurrentMarkSweepGeneration* const _cms_gen; // The cms (old) gen.
332 332
333 333 // The following are exponential averages with factor alpha:
334 334 // avg = (100 - alpha) * avg + alpha * cur_sample
335 335 //
336 336 // The durations measure: end_time[n] - start_time[n]
337 337 // The periods measure: start_time[n] - start_time[n-1]
338 338 //
339 339 // The cms period and duration include only concurrent collections; time spent
340 340 // in foreground cms collections due to System.gc() or because of a failure to
341 341 // keep up are not included.
342 342 //
343 343 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the
344 344 // real value, but is used only after the first period. A value of 100 is
345 345 // used for the first sample so it gets the entire weight.
346 346 unsigned int _saved_alpha; // 0-100
347 347 unsigned int _gc0_alpha;
348 348 unsigned int _cms_alpha;
349 349
350 350 double _gc0_duration;
351 351 double _gc0_period;
352 352 size_t _gc0_promoted; // bytes promoted per gc0
353 353 double _cms_duration;
354 354 double _cms_duration_pre_sweep; // time from initiation to start of sweep
355 355 double _cms_duration_per_mb;
356 356 double _cms_period;
357 357 size_t _cms_allocated; // bytes of direct allocation per gc0 period
358 358
359 359 // Timers.
360 360 elapsedTimer _cms_timer;
361 361 TimeStamp _gc0_begin_time;
362 362 TimeStamp _cms_begin_time;
363 363 TimeStamp _cms_end_time;
364 364
365 365 // Snapshots of the amount used in the CMS generation.
366 366 size_t _cms_used_at_gc0_begin;
367 367 size_t _cms_used_at_gc0_end;
368 368 size_t _cms_used_at_cms_begin;
369 369
370 370 // Used to prevent the duty cycle from being reduced in the middle of a cms
371 371 // cycle.
372 372 bool _allow_duty_cycle_reduction;
373 373
374 374 enum {
375 375 _GC0_VALID = 0x1,
376 376 _CMS_VALID = 0x2,
377 377 _ALL_VALID = _GC0_VALID | _CMS_VALID
378 378 };
379 379
380 380 unsigned int _valid_bits;
381 381
382 382 unsigned int _icms_duty_cycle; // icms duty cycle (0-100).
383 383
384 384 protected:
385 385
386 386 // Return a duty cycle that avoids wild oscillations, by limiting the amount
387 387 // of change between old_duty_cycle and new_duty_cycle (the latter is treated
388 388 // as a recommended value).
389 389 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
390 390 unsigned int new_duty_cycle);
391 391 unsigned int icms_update_duty_cycle_impl();
392 392
393 393 // In support of adjusting of cms trigger ratios based on history
394 394 // of concurrent mode failure.
395 395 double cms_free_adjustment_factor(size_t free) const;
396 396 void adjust_cms_free_adjustment_factor(bool fail, size_t free);
397 397
398 398 public:
399 399 CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
400 400 unsigned int alpha = CMSExpAvgFactor);
401 401
402 402 // Whether or not the statistics contain valid data; higher level statistics
403 403 // cannot be called until this returns true (they require at least one young
404 404 // gen and one cms cycle to have completed).
405 405 bool valid() const;
406 406
407 407 // Record statistics.
408 408 void record_gc0_begin();
409 409 void record_gc0_end(size_t cms_gen_bytes_used);
410 410 void record_cms_begin();
411 411 void record_cms_end();
412 412
413 413 // Allow management of the cms timer, which must be stopped/started around
414 414 // yield points.
415 415 elapsedTimer& cms_timer() { return _cms_timer; }
416 416 void start_cms_timer() { _cms_timer.start(); }
417 417 void stop_cms_timer() { _cms_timer.stop(); }
418 418
419 419 // Basic statistics; units are seconds or bytes.
420 420 double gc0_period() const { return _gc0_period; }
421 421 double gc0_duration() const { return _gc0_duration; }
422 422 size_t gc0_promoted() const { return _gc0_promoted; }
423 423 double cms_period() const { return _cms_period; }
424 424 double cms_duration() const { return _cms_duration; }
425 425 double cms_duration_per_mb() const { return _cms_duration_per_mb; }
426 426 size_t cms_allocated() const { return _cms_allocated; }
427 427
428 428 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
429 429
430 430 // Seconds since the last background cms cycle began or ended.
431 431 double cms_time_since_begin() const;
432 432 double cms_time_since_end() const;
433 433
434 434 // Higher level statistics--caller must check that valid() returns true before
435 435 // calling.
436 436
437 437 // Returns bytes promoted per second of wall clock time.
438 438 double promotion_rate() const;
439 439
440 440 // Returns bytes directly allocated per second of wall clock time.
441 441 double cms_allocation_rate() const;
442 442
443 443 // Rate at which space in the cms generation is being consumed (sum of the
444 444 // above two).
445 445 double cms_consumption_rate() const;
446 446
447 447 // Returns an estimate of the number of seconds until the cms generation will
448 448 // fill up, assuming no collection work is done.
449 449 double time_until_cms_gen_full() const;
450 450
451 451 // Returns an estimate of the number of seconds remaining until
452 452 // the cms generation collection should start.
453 453 double time_until_cms_start() const;
454 454
455 455 // End of higher level statistics.
456 456
457 457 // Returns the cms incremental mode duty cycle, as a percentage (0-100).
458 458 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
459 459
460 460 // Update the duty cycle and return the new value.
461 461 unsigned int icms_update_duty_cycle();
462 462
463 463 // Debugging.
464 464 void print_on(outputStream* st) const PRODUCT_RETURN;
465 465 void print() const { print_on(gclog_or_tty); }
466 466 };
467 467
468 468 // A closure related to weak references processing which
469 469 // we embed in the CMSCollector, since we need to pass
470 470 // it to the reference processor for secondary filtering
471 471 // of references based on reachability of referent;
472 472 // see role of _is_alive_non_header closure in the
473 473 // ReferenceProcessor class.
474 474 // For objects in the CMS generation, this closure checks
475 475 // if the object is "live" (reachable). Used in weak
476 476 // reference processing.
477 477 class CMSIsAliveClosure: public BoolObjectClosure {
478 478 const MemRegion _span;
479 479 const CMSBitMap* _bit_map;
480 480
481 481 friend class CMSCollector;
482 482 public:
483 483 CMSIsAliveClosure(MemRegion span,
484 484 CMSBitMap* bit_map):
485 485 _span(span),
486 486 _bit_map(bit_map) {
487 487 assert(!span.is_empty(), "Empty span could spell trouble");
488 488 }
489 489
490 490 void do_object(oop obj) {
491 491 assert(false, "not to be invoked");
492 492 }
493 493
494 494 bool do_object_b(oop obj);
495 495 };
496 496
497 497
498 498 // Implements AbstractRefProcTaskExecutor for CMS.
499 499 class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
500 500 public:
501 501
502 502 CMSRefProcTaskExecutor(CMSCollector& collector)
503 503 : _collector(collector)
504 504 { }
505 505
506 506 // Executes a task using worker threads.
507 507 virtual void execute(ProcessTask& task);
508 508 virtual void execute(EnqueueTask& task);
↓ open down ↓ |
508 lines elided |
↑ open up ↑ |
509 509 private:
510 510 CMSCollector& _collector;
511 511 };
512 512
513 513
514 514 class CMSCollector: public CHeapObj<mtGC> {
515 515 friend class VMStructs;
516 516 friend class ConcurrentMarkSweepThread;
517 517 friend class ConcurrentMarkSweepGeneration;
518 518 friend class CompactibleFreeListSpace;
519 + friend class CMSParMarkTask;
520 + friend class CMSParInitialMarkTask;
519 521 friend class CMSParRemarkTask;
520 522 friend class CMSConcMarkingTask;
521 523 friend class CMSRefProcTaskProxy;
522 524 friend class CMSRefProcTaskExecutor;
523 525 friend class ScanMarkedObjectsAgainCarefullyClosure; // for sampling eden
524 526 friend class SurvivorSpacePrecleanClosure; // --- ditto -------
525 527 friend class PushOrMarkClosure; // to access _restart_addr
526 528 friend class Par_PushOrMarkClosure; // to access _restart_addr
527 529 friend class MarkFromRootsClosure; // -- ditto --
528 530 // ... and for clearing cards
529 531 friend class Par_MarkFromRootsClosure; // to access _restart_addr
530 532 // ... and for clearing cards
531 533 friend class Par_ConcMarkingClosure; // to access _restart_addr etc.
532 534 friend class MarkFromRootsVerifyClosure; // to access _restart_addr
533 535 friend class PushAndMarkVerifyClosure; // -- ditto --
534 536 friend class MarkRefsIntoAndScanClosure; // to access _overflow_list
535 537 friend class PushAndMarkClosure; // -- ditto --
536 538 friend class Par_PushAndMarkClosure; // -- ditto --
537 539 friend class CMSKeepAliveClosure; // -- ditto --
538 540 friend class CMSDrainMarkingStackClosure; // -- ditto --
539 541 friend class CMSInnerParMarkAndPushClosure; // -- ditto --
540 542 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list
541 543 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait
542 544 friend class VM_CMS_Operation;
543 545 friend class VM_CMS_Initial_Mark;
544 546 friend class VM_CMS_Final_Remark;
545 547 friend class TraceCMSMemoryManagerStats;
546 548
547 549 private:
548 550 jlong _time_of_last_gc;
549 551 void update_time_of_last_gc(jlong now) {
550 552 _time_of_last_gc = now;
551 553 }
552 554
553 555 OopTaskQueueSet* _task_queues;
554 556
555 557 // Overflow list of grey objects, threaded through mark-word
556 558 // Manipulated with CAS in the parallel/multi-threaded case.
557 559 oop _overflow_list;
558 560 // The following array-pair keeps track of mark words
559 561 // displaced for accomodating overflow list above.
560 562 // This code will likely be revisited under RFE#4922830.
561 563 Stack<oop, mtGC> _preserved_oop_stack;
562 564 Stack<markOop, mtGC> _preserved_mark_stack;
563 565
564 566 int* _hash_seed;
565 567
566 568 // In support of multi-threaded concurrent phases
567 569 YieldingFlexibleWorkGang* _conc_workers;
568 570
569 571 // Performance Counters
570 572 CollectorCounters* _gc_counters;
571 573
572 574 // Initialization Errors
573 575 bool _completed_initialization;
574 576
575 577 // In support of ExplicitGCInvokesConcurrent
576 578 static bool _full_gc_requested;
577 579 static GCCause::Cause _full_gc_cause;
578 580 unsigned int _collection_count_start;
579 581
580 582 // Should we unload classes this concurrent cycle?
581 583 bool _should_unload_classes;
582 584 unsigned int _concurrent_cycles_since_last_unload;
583 585 unsigned int concurrent_cycles_since_last_unload() const {
584 586 return _concurrent_cycles_since_last_unload;
585 587 }
586 588 // Did we (allow) unload classes in the previous concurrent cycle?
587 589 bool unloaded_classes_last_cycle() const {
588 590 return concurrent_cycles_since_last_unload() == 0;
589 591 }
590 592 // Root scanning options for perm gen
591 593 int _roots_scanning_options;
592 594 int roots_scanning_options() const { return _roots_scanning_options; }
593 595 void add_root_scanning_option(int o) { _roots_scanning_options |= o; }
594 596 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; }
595 597
596 598 // Verification support
597 599 CMSBitMap _verification_mark_bm;
598 600 void verify_after_remark_work_1();
599 601 void verify_after_remark_work_2();
600 602
601 603 // true if any verification flag is on.
602 604 bool _verifying;
603 605 bool verifying() const { return _verifying; }
604 606 void set_verifying(bool v) { _verifying = v; }
605 607
606 608 // Collector policy
607 609 ConcurrentMarkSweepPolicy* _collector_policy;
608 610 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
609 611
610 612 // XXX Move these to CMSStats ??? FIX ME !!!
611 613 elapsedTimer _inter_sweep_timer; // time between sweeps
612 614 elapsedTimer _intra_sweep_timer; // time _in_ sweeps
613 615 // padded decaying average estimates of the above
614 616 AdaptivePaddedAverage _inter_sweep_estimate;
615 617 AdaptivePaddedAverage _intra_sweep_estimate;
616 618
617 619 CMSTracer* _gc_tracer_cm;
618 620 ConcurrentGCTimer* _gc_timer_cm;
619 621
620 622 bool _cms_start_registered;
621 623
622 624 GCHeapSummary _last_heap_summary;
623 625 PermGenSummary _last_perm_gen_summary;
624 626
625 627 void register_foreground_gc_start(GCCause::Cause cause);
626 628 void register_gc_start(GCCause::Cause cause);
627 629 void register_gc_end();
628 630 void save_heap_summary();
629 631 void report_heap_summary(GCWhen::Type when);
630 632
631 633 protected:
632 634 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS)
633 635 ConcurrentMarkSweepGeneration* _permGen; // perm gen
634 636 MemRegion _span; // span covering above two
635 637 CardTableRS* _ct; // card table
636 638
637 639 // CMS marking support structures
638 640 CMSBitMap _markBitMap;
639 641 CMSBitMap _modUnionTable;
640 642 CMSMarkStack _markStack;
641 643 CMSMarkStack _revisitStack; // used to keep track of klassKlass objects
642 644 // to revisit
643 645 CMSBitMap _perm_gen_verify_bit_map; // Mark bit map for perm gen verification support.
644 646
645 647 HeapWord* _restart_addr; // in support of marking stack overflow
646 648 void lower_restart_addr(HeapWord* low);
647 649
648 650 // Counters in support of marking stack / work queue overflow handling:
649 651 // a non-zero value indicates certain types of overflow events during
650 652 // the current CMS cycle and could lead to stack resizing efforts at
651 653 // an opportune future time.
652 654 size_t _ser_pmc_preclean_ovflw;
653 655 size_t _ser_pmc_remark_ovflw;
654 656 size_t _par_pmc_remark_ovflw;
655 657 size_t _ser_kac_preclean_ovflw;
656 658 size_t _ser_kac_ovflw;
657 659 size_t _par_kac_ovflw;
658 660 NOT_PRODUCT(ssize_t _num_par_pushes;)
659 661
660 662 // ("Weak") Reference processing support
661 663 ReferenceProcessor* _ref_processor;
662 664 CMSIsAliveClosure _is_alive_closure;
663 665 // keep this textually after _markBitMap and _span; c'tor dependency
664 666
665 667 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work
666 668 ModUnionClosure _modUnionClosure;
667 669 ModUnionClosurePar _modUnionClosurePar;
668 670
669 671 // CMS abstract state machine
670 672 // initial_state: Idling
671 673 // next_state(Idling) = {Marking}
672 674 // next_state(Marking) = {Precleaning, Sweeping}
673 675 // next_state(Precleaning) = {AbortablePreclean, FinalMarking}
674 676 // next_state(AbortablePreclean) = {FinalMarking}
675 677 // next_state(FinalMarking) = {Sweeping}
676 678 // next_state(Sweeping) = {Resizing}
677 679 // next_state(Resizing) = {Resetting}
678 680 // next_state(Resetting) = {Idling}
679 681 // The numeric values below are chosen so that:
680 682 // . _collectorState <= Idling == post-sweep && pre-mark
681 683 // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
682 684 // precleaning || abortablePrecleanb
683 685 public:
684 686 enum CollectorState {
685 687 Resizing = 0,
686 688 Resetting = 1,
687 689 Idling = 2,
688 690 InitialMarking = 3,
689 691 Marking = 4,
690 692 Precleaning = 5,
691 693 AbortablePreclean = 6,
692 694 FinalMarking = 7,
693 695 Sweeping = 8
694 696 };
695 697 protected:
696 698 static CollectorState _collectorState;
697 699
698 700 // State related to prologue/epilogue invocation for my generations
699 701 bool _between_prologue_and_epilogue;
700 702
701 703 // Signalling/State related to coordination between fore- and backgroud GC
702 704 // Note: When the baton has been passed from background GC to foreground GC,
703 705 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
704 706 static bool _foregroundGCIsActive; // true iff foreground collector is active or
705 707 // wants to go active
706 708 static bool _foregroundGCShouldWait; // true iff background GC is active and has not
707 709 // yet passed the baton to the foreground GC
708 710
709 711 // Support for CMSScheduleRemark (abortable preclean)
710 712 bool _abort_preclean;
711 713 bool _start_sampling;
712 714
713 715 int _numYields;
714 716 size_t _numDirtyCards;
715 717 size_t _sweep_count;
716 718 // number of full gc's since the last concurrent gc.
717 719 uint _full_gcs_since_conc_gc;
718 720
719 721 // occupancy used for bootstrapping stats
720 722 double _bootstrap_occupancy;
721 723
722 724 // timer
723 725 elapsedTimer _timer;
724 726
725 727 // Timing, allocation and promotion statistics, used for scheduling.
726 728 CMSStats _stats;
727 729
728 730 // Allocation limits installed in the young gen, used only in
729 731 // CMSIncrementalMode. When an allocation in the young gen would cross one of
730 732 // these limits, the cms generation is notified and the cms thread is started
731 733 // or stopped, respectively.
732 734 HeapWord* _icms_start_limit;
733 735 HeapWord* _icms_stop_limit;
734 736
735 737 enum CMS_op_type {
736 738 CMS_op_checkpointRootsInitial,
737 739 CMS_op_checkpointRootsFinal
738 740 };
739 741
740 742 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
741 743 bool stop_world_and_do(CMS_op_type op);
742 744
743 745 OopTaskQueueSet* task_queues() { return _task_queues; }
744 746 int* hash_seed(int i) { return &_hash_seed[i]; }
↓ open down ↓ |
216 lines elided |
↑ open up ↑ |
745 747 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
746 748
747 749 // Support for parallelizing Eden rescan in CMS remark phase
748 750 void sample_eden(); // ... sample Eden space top
749 751
750 752 private:
751 753 // Support for parallelizing young gen rescan in CMS remark phase
752 754 Generation* _young_gen; // the younger gen
753 755 HeapWord** _top_addr; // ... Top of Eden
754 756 HeapWord** _end_addr; // ... End of Eden
757 + Mutex* _eden_chunk_lock;
755 758 HeapWord** _eden_chunk_array; // ... Eden partitioning array
756 759 size_t _eden_chunk_index; // ... top (exclusive) of array
757 760 size_t _eden_chunk_capacity; // ... max entries in array
758 761
759 762 // Support for parallelizing survivor space rescan
760 763 HeapWord** _survivor_chunk_array;
761 764 size_t _survivor_chunk_index;
762 765 size_t _survivor_chunk_capacity;
763 766 size_t* _cursor;
764 767 ChunkArray* _survivor_plab_array;
765 768
766 769 // Support for marking stack overflow handling
767 770 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
768 771 bool par_take_from_overflow_list(size_t num,
769 772 OopTaskQueue* to_work_q,
770 773 int no_of_gc_threads);
771 774 void push_on_overflow_list(oop p);
772 775 void par_push_on_overflow_list(oop p);
773 776 // the following is, obviously, not, in general, "MT-stable"
774 777 bool overflow_list_is_empty() const;
775 778
776 779 void preserve_mark_if_necessary(oop p);
777 780 void par_preserve_mark_if_necessary(oop p);
778 781 void preserve_mark_work(oop p, markOop m);
779 782 void restore_preserved_marks_if_any();
780 783 NOT_PRODUCT(bool no_preserved_marks() const;)
781 784 // in support of testing overflow code
782 785 NOT_PRODUCT(int _overflow_counter;)
783 786 NOT_PRODUCT(bool simulate_overflow();) // sequential
784 787 NOT_PRODUCT(bool par_simulate_overflow();) // MT version
785 788
786 789 // CMS work methods
787 790 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
788 791
789 792 // a return value of false indicates failure due to stack overflow
790 793 bool markFromRootsWork(bool asynch); // concurrent marking work
791 794
792 795 public: // FIX ME!!! only for testing
793 796 bool do_marking_st(bool asynch); // single-threaded marking
794 797 bool do_marking_mt(bool asynch); // multi-threaded marking
795 798
796 799 private:
797 800
798 801 // concurrent precleaning work
799 802 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
800 803 ScanMarkedObjectsAgainCarefullyClosure* cl);
801 804 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
802 805 ScanMarkedObjectsAgainCarefullyClosure* cl);
803 806 // Does precleaning work, returning a quantity indicative of
804 807 // the amount of "useful work" done.
805 808 size_t preclean_work(bool clean_refs, bool clean_survivors);
806 809 void abortable_preclean(); // Preclean while looking for possible abort
807 810 void initialize_sequential_subtasks_for_young_gen_rescan(int i);
808 811 // Helper function for above; merge-sorts the per-thread plab samples
809 812 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
810 813 // Resets (i.e. clears) the per-thread plab sample vectors
811 814 void reset_survivor_plab_arrays();
812 815
813 816 // final (second) checkpoint work
814 817 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
815 818 bool init_mark_was_synchronous);
816 819 // work routine for parallel version of remark
817 820 void do_remark_parallel();
818 821 // work routine for non-parallel version of remark
819 822 void do_remark_non_parallel();
820 823 // reference processing work routine (during second checkpoint)
821 824 void refProcessingWork(bool asynch, bool clear_all_soft_refs);
822 825
823 826 // concurrent sweeping work
824 827 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
825 828
826 829 // (concurrent) resetting of support data structures
827 830 void reset(bool asynch);
828 831
829 832 // Clear _expansion_cause fields of constituent generations
830 833 void clear_expansion_cause();
831 834
832 835 // An auxilliary method used to record the ends of
833 836 // used regions of each generation to limit the extent of sweep
834 837 void save_sweep_limits();
835 838
836 839 // Resize the generations included in the collector.
837 840 void compute_new_size();
838 841
839 842 // A work method used by foreground collection to determine
840 843 // what type of collection (compacting or not, continuing or fresh)
841 844 // it should do.
842 845 void decide_foreground_collection_type(bool clear_all_soft_refs,
843 846 bool* should_compact, bool* should_start_over);
844 847
845 848 // A work method used by the foreground collector to do
846 849 // a mark-sweep-compact.
847 850 void do_compaction_work(bool clear_all_soft_refs);
848 851
849 852 // A work method used by the foreground collector to do
850 853 // a mark-sweep, after taking over from a possibly on-going
851 854 // concurrent mark-sweep collection.
852 855 void do_mark_sweep_work(bool clear_all_soft_refs,
853 856 CollectorState first_state, bool should_start_over);
854 857
855 858 // Work methods for reporting concurrent mode interruption or failure
856 859 bool is_external_interruption();
857 860 void report_concurrent_mode_interruption();
858 861
859 862 // If the backgrould GC is active, acquire control from the background
860 863 // GC and do the collection.
861 864 void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
862 865
863 866 // For synchronizing passing of control from background to foreground
864 867 // GC. waitForForegroundGC() is called by the background
865 868 // collector. It if had to wait for a foreground collection,
866 869 // it returns true and the background collection should assume
867 870 // that the collection was finished by the foreground
868 871 // collector.
869 872 bool waitForForegroundGC();
870 873
871 874 // Incremental mode triggering: recompute the icms duty cycle and set the
872 875 // allocation limits in the young gen.
873 876 void icms_update_allocation_limits();
874 877
875 878 size_t block_size_using_printezis_bits(HeapWord* addr) const;
876 879 size_t block_size_if_printezis_bits(HeapWord* addr) const;
877 880 HeapWord* next_card_start_after_block(HeapWord* addr) const;
878 881
879 882 void setup_cms_unloading_and_verification_state();
880 883 public:
881 884 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
882 885 ConcurrentMarkSweepGeneration* permGen,
883 886 CardTableRS* ct,
884 887 ConcurrentMarkSweepPolicy* cp);
885 888 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
886 889
887 890 ReferenceProcessor* ref_processor() { return _ref_processor; }
888 891 void ref_processor_init();
889 892
890 893 Mutex* bitMapLock() const { return _markBitMap.lock(); }
891 894 static CollectorState abstract_state() { return _collectorState; }
892 895
893 896 bool should_abort_preclean() const; // Whether preclean should be aborted.
894 897 size_t get_eden_used() const;
895 898 size_t get_eden_capacity() const;
896 899
897 900 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
898 901
899 902 // locking checks
900 903 NOT_PRODUCT(static bool have_cms_token();)
901 904
902 905 // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
903 906 bool shouldConcurrentCollect();
904 907
905 908 void collect(bool full,
906 909 bool clear_all_soft_refs,
907 910 size_t size,
908 911 bool tlab);
909 912 void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
910 913 void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
911 914
912 915 // In support of ExplicitGCInvokesConcurrent
913 916 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
914 917 // Should we unload classes in a particular concurrent cycle?
915 918 bool should_unload_classes() const {
916 919 return _should_unload_classes;
917 920 }
918 921 bool update_should_unload_classes();
919 922
920 923 void direct_allocated(HeapWord* start, size_t size);
921 924
922 925 // Object is dead if not marked and current phase is sweeping.
923 926 bool is_dead_obj(oop obj) const;
924 927
925 928 // After a promotion (of "start"), do any necessary marking.
926 929 // If "par", then it's being done by a parallel GC thread.
927 930 // The last two args indicate if we need precise marking
928 931 // and if so the size of the object so it can be dirtied
929 932 // in its entirety.
930 933 void promoted(bool par, HeapWord* start,
931 934 bool is_obj_array, size_t obj_size);
932 935
933 936 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
934 937 size_t word_size);
935 938
936 939 void getFreelistLocks() const;
937 940 void releaseFreelistLocks() const;
938 941 bool haveFreelistLocks() const;
939 942
940 943 // GC prologue and epilogue
941 944 void gc_prologue(bool full);
942 945 void gc_epilogue(bool full);
943 946
944 947 jlong time_of_last_gc(jlong now) {
945 948 if (_collectorState <= Idling) {
↓ open down ↓ |
181 lines elided |
↑ open up ↑ |
946 949 // gc not in progress
947 950 return _time_of_last_gc;
948 951 } else {
949 952 // collection in progress
950 953 return now;
951 954 }
952 955 }
953 956
954 957 // Support for parallel remark of survivor space
955 958 void* get_data_recorder(int thr_num);
959 + void sample_eden_chunk();
956 960
957 961 CMSBitMap* markBitMap() { return &_markBitMap; }
958 962 void directAllocated(HeapWord* start, size_t size);
959 963
960 964 // main CMS steps and related support
961 965 void checkpointRootsInitial(bool asynch);
962 966 bool markFromRoots(bool asynch); // a return value of false indicates failure
963 967 // due to stack overflow
964 968 void preclean();
965 969 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
966 970 bool init_mark_was_synchronous);
967 971 void sweep(bool asynch);
968 972
969 973 // Check that the currently executing thread is the expected
970 974 // one (foreground collector or background collector).
971 975 static void check_correct_thread_executing() PRODUCT_RETURN;
972 976 // XXXPERM void print_statistics() PRODUCT_RETURN;
973 977
974 978 bool is_cms_reachable(HeapWord* addr);
975 979
976 980 // Performance Counter Support
977 981 CollectorCounters* counters() { return _gc_counters; }
978 982
979 983 // timer stuff
980 984 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
981 985 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
982 986 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
983 987 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
984 988
985 989 int yields() { return _numYields; }
986 990 void resetYields() { _numYields = 0; }
987 991 void incrementYields() { _numYields++; }
988 992 void resetNumDirtyCards() { _numDirtyCards = 0; }
989 993 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
990 994 size_t numDirtyCards() { return _numDirtyCards; }
991 995
992 996 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
993 997 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
994 998 static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
995 999 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
996 1000 size_t sweep_count() const { return _sweep_count; }
997 1001 void increment_sweep_count() { _sweep_count++; }
998 1002
999 1003 // Timers/stats for gc scheduling and incremental mode pacing.
1000 1004 CMSStats& stats() { return _stats; }
1001 1005
1002 1006 // Convenience methods that check whether CMSIncrementalMode is enabled and
1003 1007 // forward to the corresponding methods in ConcurrentMarkSweepThread.
1004 1008 static void start_icms();
1005 1009 static void stop_icms(); // Called at the end of the cms cycle.
1006 1010 static void disable_icms(); // Called before a foreground collection.
1007 1011 static void enable_icms(); // Called after a foreground collection.
1008 1012 void icms_wait(); // Called at yield points.
1009 1013
1010 1014 // Adaptive size policy
1011 1015 CMSAdaptiveSizePolicy* size_policy();
1012 1016 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
1013 1017
1014 1018 // debugging
1015 1019 void verify();
1016 1020 bool verify_after_remark();
1017 1021 void verify_ok_to_terminate() const PRODUCT_RETURN;
1018 1022 void verify_work_stacks_empty() const PRODUCT_RETURN;
1019 1023 void verify_overflow_empty() const PRODUCT_RETURN;
1020 1024
1021 1025 // convenience methods in support of debugging
1022 1026 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
1023 1027 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
↓ open down ↓ |
58 lines elided |
↑ open up ↑ |
1024 1028
1025 1029 // accessors
1026 1030 CMSMarkStack* verification_mark_stack() { return &_markStack; }
1027 1031 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
1028 1032
1029 1033 // Get the bit map with a perm gen "deadness" information.
1030 1034 CMSBitMap* perm_gen_verify_bit_map() { return &_perm_gen_verify_bit_map; }
1031 1035
1032 1036 // Initialization errors
1033 1037 bool completed_initialization() { return _completed_initialization; }
1038 +
1039 + void print_eden_and_survivor_chunk_arrays();
1034 1040 };
1035 1041
1036 1042 class CMSExpansionCause : public AllStatic {
1037 1043 public:
1038 1044 enum Cause {
1039 1045 _no_expansion,
1040 1046 _satisfy_free_ratio,
1041 1047 _satisfy_promotion,
1042 1048 _satisfy_allocation,
1043 1049 _allocate_par_lab,
1044 1050 _allocate_par_spooling_space,
1045 1051 _adaptive_size_policy
1046 1052 };
1047 1053 // Return a string describing the cause of the expansion.
1048 1054 static const char* to_string(CMSExpansionCause::Cause cause);
1049 1055 };
1050 1056
1051 1057 class ConcurrentMarkSweepGeneration: public CardGeneration {
1052 1058 friend class VMStructs;
1053 1059 friend class ConcurrentMarkSweepThread;
1054 1060 friend class ConcurrentMarkSweep;
1055 1061 friend class CMSCollector;
1056 1062 protected:
1057 1063 static CMSCollector* _collector; // the collector that collects us
1058 1064 CompactibleFreeListSpace* _cmsSpace; // underlying space (only one for now)
1059 1065
1060 1066 // Performance Counters
1061 1067 GenerationCounters* _gen_counters;
1062 1068 GSpaceCounters* _space_counters;
1063 1069
1064 1070 // Words directly allocated, used by CMSStats.
1065 1071 size_t _direct_allocated_words;
1066 1072
1067 1073 // Non-product stat counters
1068 1074 NOT_PRODUCT(
1069 1075 size_t _numObjectsPromoted;
1070 1076 size_t _numWordsPromoted;
1071 1077 size_t _numObjectsAllocated;
1072 1078 size_t _numWordsAllocated;
1073 1079 )
1074 1080
1075 1081 // Used for sizing decisions
1076 1082 bool _incremental_collection_failed;
1077 1083 bool incremental_collection_failed() {
1078 1084 return _incremental_collection_failed;
1079 1085 }
1080 1086 void set_incremental_collection_failed() {
1081 1087 _incremental_collection_failed = true;
1082 1088 }
1083 1089 void clear_incremental_collection_failed() {
1084 1090 _incremental_collection_failed = false;
1085 1091 }
1086 1092
1087 1093 // accessors
1088 1094 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1089 1095 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1090 1096
1091 1097 private:
1092 1098 // For parallel young-gen GC support.
1093 1099 CMSParGCThreadState** _par_gc_thread_states;
1094 1100
1095 1101 // Reason generation was expanded
1096 1102 CMSExpansionCause::Cause _expansion_cause;
1097 1103
1098 1104 // In support of MinChunkSize being larger than min object size
1099 1105 const double _dilatation_factor;
1100 1106
1101 1107 enum CollectionTypes {
1102 1108 Concurrent_collection_type = 0,
1103 1109 MS_foreground_collection_type = 1,
1104 1110 MSC_foreground_collection_type = 2,
1105 1111 Unknown_collection_type = 3
1106 1112 };
1107 1113
1108 1114 CollectionTypes _debug_collection_type;
1109 1115
1110 1116 // Fraction of current occupancy at which to start a CMS collection which
1111 1117 // will collect this generation (at least).
1112 1118 double _initiating_occupancy;
1113 1119
1114 1120 protected:
1115 1121 // Shrink generation by specified size (returns false if unable to shrink)
1116 1122 virtual void shrink_by(size_t bytes);
1117 1123
1118 1124 // Update statistics for GC
1119 1125 virtual void update_gc_stats(int level, bool full);
1120 1126
1121 1127 // Maximum available space in the generation (including uncommitted)
1122 1128 // space.
1123 1129 size_t max_available() const;
1124 1130
1125 1131 // getter and initializer for _initiating_occupancy field.
1126 1132 double initiating_occupancy() const { return _initiating_occupancy; }
1127 1133 void init_initiating_occupancy(intx io, intx tr);
1128 1134
1129 1135 public:
1130 1136 ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1131 1137 int level, CardTableRS* ct,
1132 1138 bool use_adaptive_freelists,
1133 1139 FreeBlockDictionary<FreeChunk>::DictionaryChoice);
1134 1140
1135 1141 // Accessors
1136 1142 CMSCollector* collector() const { return _collector; }
1137 1143 static void set_collector(CMSCollector* collector) {
1138 1144 assert(_collector == NULL, "already set");
1139 1145 _collector = collector;
1140 1146 }
1141 1147 CompactibleFreeListSpace* cmsSpace() const { return _cmsSpace; }
1142 1148
1143 1149 Mutex* freelistLock() const;
1144 1150
1145 1151 virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
1146 1152
1147 1153 // Adaptive size policy
1148 1154 CMSAdaptiveSizePolicy* size_policy();
1149 1155
1150 1156 bool refs_discovery_is_atomic() const { return false; }
1151 1157 bool refs_discovery_is_mt() const {
1152 1158 // Note: CMS does MT-discovery during the parallel-remark
1153 1159 // phases. Use ReferenceProcessorMTMutator to make refs
1154 1160 // discovery MT-safe during such phases or other parallel
1155 1161 // discovery phases in the future. This may all go away
1156 1162 // if/when we decide that refs discovery is sufficiently
1157 1163 // rare that the cost of the CAS's involved is in the
1158 1164 // noise. That's a measurement that should be done, and
1159 1165 // the code simplified if that turns out to be the case.
1160 1166 return ConcGCThreads > 1;
1161 1167 }
1162 1168
1163 1169 // Override
1164 1170 virtual void ref_processor_init();
1165 1171
1166 1172 // Grow generation by specified size (returns false if unable to grow)
1167 1173 bool grow_by(size_t bytes);
1168 1174 // Grow generation to reserved size.
1169 1175 bool grow_to_reserved();
1170 1176
1171 1177 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1172 1178
1173 1179 // Space enquiries
1174 1180 size_t capacity() const;
1175 1181 size_t used() const;
1176 1182 size_t free() const;
1177 1183 double occupancy() const { return ((double)used())/((double)capacity()); }
1178 1184 size_t contiguous_available() const;
1179 1185 size_t unsafe_max_alloc_nogc() const;
1180 1186
1181 1187 // over-rides
1182 1188 MemRegion used_region() const;
1183 1189 MemRegion used_region_at_save_marks() const;
1184 1190
1185 1191 // Does a "full" (forced) collection invoked on this generation collect
1186 1192 // all younger generations as well? Note that the second conjunct is a
1187 1193 // hack to allow the collection of the younger gen first if the flag is
1188 1194 // set. This is better than using th policy's should_collect_gen0_first()
1189 1195 // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
1190 1196 virtual bool full_collects_younger_generations() const {
1191 1197 return UseCMSCompactAtFullCollection && !CollectGen0First;
1192 1198 }
1193 1199
1194 1200 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1195 1201
1196 1202 // Support for compaction
1197 1203 CompactibleSpace* first_compaction_space() const;
1198 1204 // Adjust quantites in the generation affected by
1199 1205 // the compaction.
1200 1206 void reset_after_compaction();
1201 1207
1202 1208 // Allocation support
1203 1209 HeapWord* allocate(size_t size, bool tlab);
1204 1210 HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1205 1211 oop promote(oop obj, size_t obj_size);
1206 1212 HeapWord* par_allocate(size_t size, bool tlab) {
1207 1213 return allocate(size, tlab);
1208 1214 }
1209 1215
1210 1216 // Incremental mode triggering.
1211 1217 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
1212 1218 size_t word_size);
1213 1219
1214 1220 // Used by CMSStats to track direct allocation. The value is sampled and
1215 1221 // reset after each young gen collection.
1216 1222 size_t direct_allocated_words() const { return _direct_allocated_words; }
1217 1223 void reset_direct_allocated_words() { _direct_allocated_words = 0; }
1218 1224
1219 1225 // Overrides for parallel promotion.
1220 1226 virtual oop par_promote(int thread_num,
1221 1227 oop obj, markOop m, size_t word_sz);
1222 1228 // This one should not be called for CMS.
1223 1229 virtual void par_promote_alloc_undo(int thread_num,
1224 1230 HeapWord* obj, size_t word_sz);
1225 1231 virtual void par_promote_alloc_done(int thread_num);
1226 1232 virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1227 1233
1228 1234 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
1229 1235
1230 1236 // Inform this (non-young) generation that a promotion failure was
1231 1237 // encountered during a collection of a younger generation that
1232 1238 // promotes into this generation.
1233 1239 virtual void promotion_failure_occurred();
1234 1240
1235 1241 bool should_collect(bool full, size_t size, bool tlab);
1236 1242 virtual bool should_concurrent_collect() const;
1237 1243 virtual bool is_too_full() const;
1238 1244 void collect(bool full,
1239 1245 bool clear_all_soft_refs,
1240 1246 size_t size,
1241 1247 bool tlab);
1242 1248
1243 1249 HeapWord* expand_and_allocate(size_t word_size,
1244 1250 bool tlab,
1245 1251 bool parallel = false);
1246 1252
1247 1253 // GC prologue and epilogue
1248 1254 void gc_prologue(bool full);
1249 1255 void gc_prologue_work(bool full, bool registerClosure,
1250 1256 ModUnionClosure* modUnionClosure);
1251 1257 void gc_epilogue(bool full);
1252 1258 void gc_epilogue_work(bool full);
1253 1259
1254 1260 // Time since last GC of this generation
1255 1261 jlong time_of_last_gc(jlong now) {
1256 1262 return collector()->time_of_last_gc(now);
1257 1263 }
1258 1264 void update_time_of_last_gc(jlong now) {
1259 1265 collector()-> update_time_of_last_gc(now);
1260 1266 }
1261 1267
1262 1268 // Allocation failure
1263 1269 void expand(size_t bytes, size_t expand_bytes,
1264 1270 CMSExpansionCause::Cause cause);
1265 1271 virtual bool expand(size_t bytes, size_t expand_bytes);
1266 1272 void shrink(size_t bytes);
1267 1273 HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
1268 1274 bool expand_and_ensure_spooling_space(PromotionInfo* promo);
1269 1275
1270 1276 // Iteration support and related enquiries
1271 1277 void save_marks();
1272 1278 bool no_allocs_since_save_marks();
1273 1279 void object_iterate_since_last_GC(ObjectClosure* cl);
1274 1280 void younger_refs_iterate(OopsInGenClosure* cl);
1275 1281
1276 1282 // Iteration support specific to CMS generations
1277 1283 void save_sweep_limit();
1278 1284
1279 1285 // More iteration support
1280 1286 virtual void oop_iterate(MemRegion mr, OopClosure* cl);
1281 1287 virtual void oop_iterate(OopClosure* cl);
1282 1288 virtual void safe_object_iterate(ObjectClosure* cl);
1283 1289 virtual void object_iterate(ObjectClosure* cl);
1284 1290
1285 1291 // Need to declare the full complement of closures, whether we'll
1286 1292 // override them or not, or get message from the compiler:
1287 1293 // oop_since_save_marks_iterate_nv hides virtual function...
1288 1294 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
1289 1295 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
1290 1296 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
1291 1297
1292 1298 // Smart allocation XXX -- move to CFLSpace?
1293 1299 void setNearLargestChunk();
1294 1300 bool isNearLargestChunk(HeapWord* addr);
1295 1301
1296 1302 // Get the chunk at the end of the space. Delagates to
1297 1303 // the space.
1298 1304 FreeChunk* find_chunk_at_end();
1299 1305
1300 1306 // Overriding of unused functionality (sharing not yet supported with CMS)
1301 1307 void pre_adjust_pointers();
1302 1308 void post_compact();
1303 1309
1304 1310 // Debugging
1305 1311 void prepare_for_verify();
1306 1312 void verify();
1307 1313 void print_statistics() PRODUCT_RETURN;
1308 1314
1309 1315 // Performance Counters support
↓ open down ↓ |
266 lines elided |
↑ open up ↑ |
1310 1316 virtual void update_counters();
1311 1317 virtual void update_counters(size_t used);
1312 1318 void initialize_performance_counters();
1313 1319 CollectorCounters* counters() { return collector()->counters(); }
1314 1320
1315 1321 // Support for parallel remark of survivor space
1316 1322 void* get_data_recorder(int thr_num) {
1317 1323 //Delegate to collector
1318 1324 return collector()->get_data_recorder(thr_num);
1319 1325 }
1326 + void sample_eden_chunk() {
1327 + //Delegate to collector
1328 + return collector()->sample_eden_chunk();
1329 + }
1320 1330
1321 1331 // Printing
1322 1332 const char* name() const;
1323 1333 virtual const char* short_name() const { return "CMS"; }
1324 1334 void print() const;
1325 1335 void printOccupancy(const char* s);
1326 1336 bool must_be_youngest() const { return false; }
1327 1337 bool must_be_oldest() const { return true; }
1328 1338
1329 1339 void compute_new_size();
1330 1340
1331 1341 CollectionTypes debug_collection_type() { return _debug_collection_type; }
1332 1342 void rotate_debug_collection_type();
1333 1343 };
1334 1344
1335 1345 class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
1336 1346
1337 1347 // Return the size policy from the heap's collector
1338 1348 // policy casted to CMSAdaptiveSizePolicy*.
1339 1349 CMSAdaptiveSizePolicy* cms_size_policy() const;
1340 1350
1341 1351 // Resize the generation based on the adaptive size
1342 1352 // policy.
1343 1353 void resize(size_t cur_promo, size_t desired_promo);
1344 1354
1345 1355 // Return the GC counters from the collector policy
1346 1356 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
1347 1357
1348 1358 virtual void shrink_by(size_t bytes);
1349 1359
1350 1360 public:
1351 1361 virtual void compute_new_size();
1352 1362 ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
1353 1363 int level, CardTableRS* ct,
1354 1364 bool use_adaptive_freelists,
1355 1365 FreeBlockDictionary<FreeChunk>::DictionaryChoice
1356 1366 dictionaryChoice) :
1357 1367 ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
1358 1368 use_adaptive_freelists, dictionaryChoice) {}
1359 1369
1360 1370 virtual const char* short_name() const { return "ASCMS"; }
1361 1371 virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
1362 1372
1363 1373 virtual void update_counters();
1364 1374 virtual void update_counters(size_t used);
1365 1375 };
1366 1376
1367 1377 //
1368 1378 // Closures of various sorts used by CMS to accomplish its work
1369 1379 //
1370 1380
1371 1381 // This closure is used to check that a certain set of oops is empty.
1372 1382 class FalseClosure: public OopClosure {
1373 1383 public:
1374 1384 void do_oop(oop* p) { guarantee(false, "Should be an empty set"); }
1375 1385 void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
1376 1386 };
1377 1387
1378 1388 // This closure is used to do concurrent marking from the roots
1379 1389 // following the first checkpoint.
1380 1390 class MarkFromRootsClosure: public BitMapClosure {
1381 1391 CMSCollector* _collector;
1382 1392 MemRegion _span;
1383 1393 CMSBitMap* _bitMap;
1384 1394 CMSBitMap* _mut;
1385 1395 CMSMarkStack* _markStack;
1386 1396 CMSMarkStack* _revisitStack;
1387 1397 bool _yield;
1388 1398 int _skipBits;
1389 1399 HeapWord* _finger;
1390 1400 HeapWord* _threshold;
1391 1401 DEBUG_ONLY(bool _verifying;)
1392 1402
1393 1403 public:
1394 1404 MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
1395 1405 CMSBitMap* bitMap,
1396 1406 CMSMarkStack* markStack,
1397 1407 CMSMarkStack* revisitStack,
1398 1408 bool should_yield, bool verifying = false);
1399 1409 bool do_bit(size_t offset);
1400 1410 void reset(HeapWord* addr);
1401 1411 inline void do_yield_check();
1402 1412
1403 1413 private:
1404 1414 void scanOopsInOop(HeapWord* ptr);
1405 1415 void do_yield_work();
1406 1416 };
1407 1417
1408 1418 // This closure is used to do concurrent multi-threaded
1409 1419 // marking from the roots following the first checkpoint.
1410 1420 // XXX This should really be a subclass of The serial version
1411 1421 // above, but i have not had the time to refactor things cleanly.
1412 1422 // That willbe done for Dolphin.
1413 1423 class Par_MarkFromRootsClosure: public BitMapClosure {
1414 1424 CMSCollector* _collector;
1415 1425 MemRegion _whole_span;
1416 1426 MemRegion _span;
1417 1427 CMSBitMap* _bit_map;
1418 1428 CMSBitMap* _mut;
1419 1429 OopTaskQueue* _work_queue;
1420 1430 CMSMarkStack* _overflow_stack;
1421 1431 CMSMarkStack* _revisit_stack;
1422 1432 bool _yield;
1423 1433 int _skip_bits;
1424 1434 HeapWord* _finger;
1425 1435 HeapWord* _threshold;
1426 1436 CMSConcMarkingTask* _task;
1427 1437 public:
1428 1438 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1429 1439 MemRegion span,
1430 1440 CMSBitMap* bit_map,
1431 1441 OopTaskQueue* work_queue,
1432 1442 CMSMarkStack* overflow_stack,
1433 1443 CMSMarkStack* revisit_stack,
1434 1444 bool should_yield);
1435 1445 bool do_bit(size_t offset);
1436 1446 inline void do_yield_check();
1437 1447
1438 1448 private:
1439 1449 void scan_oops_in_oop(HeapWord* ptr);
1440 1450 void do_yield_work();
1441 1451 bool get_work_from_overflow_stack();
1442 1452 };
1443 1453
1444 1454 // The following closures are used to do certain kinds of verification of
1445 1455 // CMS marking.
1446 1456 class PushAndMarkVerifyClosure: public OopClosure {
1447 1457 CMSCollector* _collector;
1448 1458 MemRegion _span;
1449 1459 CMSBitMap* _verification_bm;
1450 1460 CMSBitMap* _cms_bm;
1451 1461 CMSMarkStack* _mark_stack;
1452 1462 protected:
1453 1463 void do_oop(oop p);
1454 1464 template <class T> inline void do_oop_work(T *p) {
1455 1465 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
1456 1466 do_oop(obj);
1457 1467 }
1458 1468 public:
1459 1469 PushAndMarkVerifyClosure(CMSCollector* cms_collector,
1460 1470 MemRegion span,
1461 1471 CMSBitMap* verification_bm,
1462 1472 CMSBitMap* cms_bm,
1463 1473 CMSMarkStack* mark_stack);
1464 1474 void do_oop(oop* p);
1465 1475 void do_oop(narrowOop* p);
1466 1476 // Deal with a stack overflow condition
1467 1477 void handle_stack_overflow(HeapWord* lost);
1468 1478 };
1469 1479
1470 1480 class MarkFromRootsVerifyClosure: public BitMapClosure {
1471 1481 CMSCollector* _collector;
1472 1482 MemRegion _span;
1473 1483 CMSBitMap* _verification_bm;
1474 1484 CMSBitMap* _cms_bm;
1475 1485 CMSMarkStack* _mark_stack;
1476 1486 HeapWord* _finger;
1477 1487 PushAndMarkVerifyClosure _pam_verify_closure;
1478 1488 public:
1479 1489 MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
1480 1490 CMSBitMap* verification_bm,
1481 1491 CMSBitMap* cms_bm,
1482 1492 CMSMarkStack* mark_stack);
1483 1493 bool do_bit(size_t offset);
1484 1494 void reset(HeapWord* addr);
1485 1495 };
1486 1496
1487 1497
1488 1498 // This closure is used to check that a certain set of bits is
1489 1499 // "empty" (i.e. the bit vector doesn't have any 1-bits).
1490 1500 class FalseBitMapClosure: public BitMapClosure {
1491 1501 public:
1492 1502 bool do_bit(size_t offset) {
1493 1503 guarantee(false, "Should not have a 1 bit");
1494 1504 return true;
1495 1505 }
1496 1506 };
1497 1507
1498 1508 // This closure is used during the second checkpointing phase
1499 1509 // to rescan the marked objects on the dirty cards in the mod
1500 1510 // union table and the card table proper. It's invoked via
1501 1511 // MarkFromDirtyCardsClosure below. It uses either
1502 1512 // [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
1503 1513 // declared in genOopClosures.hpp to accomplish some of its work.
1504 1514 // In the parallel case the bitMap is shared, so access to
1505 1515 // it needs to be suitably synchronized for updates by embedded
1506 1516 // closures that update it; however, this closure itself only
1507 1517 // reads the bit_map and because it is idempotent, is immune to
1508 1518 // reading stale values.
1509 1519 class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
1510 1520 #ifdef ASSERT
1511 1521 CMSCollector* _collector;
1512 1522 MemRegion _span;
1513 1523 union {
1514 1524 CMSMarkStack* _mark_stack;
1515 1525 OopTaskQueue* _work_queue;
1516 1526 };
1517 1527 #endif // ASSERT
1518 1528 bool _parallel;
1519 1529 CMSBitMap* _bit_map;
1520 1530 union {
1521 1531 MarkRefsIntoAndScanClosure* _scan_closure;
1522 1532 Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
1523 1533 };
1524 1534
1525 1535 public:
1526 1536 ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1527 1537 MemRegion span,
1528 1538 ReferenceProcessor* rp,
1529 1539 CMSBitMap* bit_map,
1530 1540 CMSMarkStack* mark_stack,
1531 1541 CMSMarkStack* revisit_stack,
1532 1542 MarkRefsIntoAndScanClosure* cl):
1533 1543 #ifdef ASSERT
1534 1544 _collector(collector),
1535 1545 _span(span),
1536 1546 _mark_stack(mark_stack),
1537 1547 #endif // ASSERT
1538 1548 _parallel(false),
1539 1549 _bit_map(bit_map),
1540 1550 _scan_closure(cl) { }
1541 1551
1542 1552 ScanMarkedObjectsAgainClosure(CMSCollector* collector,
1543 1553 MemRegion span,
1544 1554 ReferenceProcessor* rp,
1545 1555 CMSBitMap* bit_map,
1546 1556 OopTaskQueue* work_queue,
1547 1557 CMSMarkStack* revisit_stack,
1548 1558 Par_MarkRefsIntoAndScanClosure* cl):
1549 1559 #ifdef ASSERT
1550 1560 _collector(collector),
1551 1561 _span(span),
1552 1562 _work_queue(work_queue),
1553 1563 #endif // ASSERT
1554 1564 _parallel(true),
1555 1565 _bit_map(bit_map),
1556 1566 _par_scan_closure(cl) { }
1557 1567
1558 1568 void do_object(oop obj) {
1559 1569 guarantee(false, "Call do_object_b(oop, MemRegion) instead");
1560 1570 }
1561 1571 bool do_object_b(oop obj) {
1562 1572 guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
1563 1573 return false;
1564 1574 }
1565 1575 bool do_object_bm(oop p, MemRegion mr);
1566 1576 };
1567 1577
1568 1578 // This closure is used during the second checkpointing phase
1569 1579 // to rescan the marked objects on the dirty cards in the mod
1570 1580 // union table and the card table proper. It invokes
1571 1581 // ScanMarkedObjectsAgainClosure above to accomplish much of its work.
1572 1582 // In the parallel case, the bit map is shared and requires
1573 1583 // synchronized access.
1574 1584 class MarkFromDirtyCardsClosure: public MemRegionClosure {
1575 1585 CompactibleFreeListSpace* _space;
1576 1586 ScanMarkedObjectsAgainClosure _scan_cl;
1577 1587 size_t _num_dirty_cards;
1578 1588
1579 1589 public:
1580 1590 MarkFromDirtyCardsClosure(CMSCollector* collector,
1581 1591 MemRegion span,
1582 1592 CompactibleFreeListSpace* space,
1583 1593 CMSBitMap* bit_map,
1584 1594 CMSMarkStack* mark_stack,
1585 1595 CMSMarkStack* revisit_stack,
1586 1596 MarkRefsIntoAndScanClosure* cl):
1587 1597 _space(space),
1588 1598 _num_dirty_cards(0),
1589 1599 _scan_cl(collector, span, collector->ref_processor(), bit_map,
1590 1600 mark_stack, revisit_stack, cl) { }
1591 1601
1592 1602 MarkFromDirtyCardsClosure(CMSCollector* collector,
1593 1603 MemRegion span,
1594 1604 CompactibleFreeListSpace* space,
1595 1605 CMSBitMap* bit_map,
1596 1606 OopTaskQueue* work_queue,
1597 1607 CMSMarkStack* revisit_stack,
1598 1608 Par_MarkRefsIntoAndScanClosure* cl):
1599 1609 _space(space),
1600 1610 _num_dirty_cards(0),
1601 1611 _scan_cl(collector, span, collector->ref_processor(), bit_map,
1602 1612 work_queue, revisit_stack, cl) { }
1603 1613
1604 1614 void do_MemRegion(MemRegion mr);
1605 1615 void set_space(CompactibleFreeListSpace* space) { _space = space; }
1606 1616 size_t num_dirty_cards() { return _num_dirty_cards; }
1607 1617 };
1608 1618
1609 1619 // This closure is used in the non-product build to check
1610 1620 // that there are no MemRegions with a certain property.
1611 1621 class FalseMemRegionClosure: public MemRegionClosure {
1612 1622 void do_MemRegion(MemRegion mr) {
1613 1623 guarantee(!mr.is_empty(), "Shouldn't be empty");
1614 1624 guarantee(false, "Should never be here");
1615 1625 }
1616 1626 };
1617 1627
1618 1628 // This closure is used during the precleaning phase
1619 1629 // to "carefully" rescan marked objects on dirty cards.
1620 1630 // It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
1621 1631 // to accomplish some of its work.
1622 1632 class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
1623 1633 CMSCollector* _collector;
1624 1634 MemRegion _span;
1625 1635 bool _yield;
1626 1636 Mutex* _freelistLock;
1627 1637 CMSBitMap* _bitMap;
1628 1638 CMSMarkStack* _markStack;
1629 1639 MarkRefsIntoAndScanClosure* _scanningClosure;
1630 1640
1631 1641 public:
1632 1642 ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
1633 1643 MemRegion span,
1634 1644 CMSBitMap* bitMap,
1635 1645 CMSMarkStack* markStack,
1636 1646 CMSMarkStack* revisitStack,
1637 1647 MarkRefsIntoAndScanClosure* cl,
1638 1648 bool should_yield):
1639 1649 _collector(collector),
1640 1650 _span(span),
1641 1651 _yield(should_yield),
1642 1652 _bitMap(bitMap),
1643 1653 _markStack(markStack),
1644 1654 _scanningClosure(cl) {
1645 1655 }
1646 1656
1647 1657 void do_object(oop p) {
1648 1658 guarantee(false, "call do_object_careful instead");
1649 1659 }
1650 1660
1651 1661 size_t do_object_careful(oop p) {
1652 1662 guarantee(false, "Unexpected caller");
1653 1663 return 0;
1654 1664 }
1655 1665
1656 1666 size_t do_object_careful_m(oop p, MemRegion mr);
1657 1667
1658 1668 void setFreelistLock(Mutex* m) {
1659 1669 _freelistLock = m;
1660 1670 _scanningClosure->set_freelistLock(m);
1661 1671 }
1662 1672
1663 1673 private:
1664 1674 inline bool do_yield_check();
1665 1675
1666 1676 void do_yield_work();
1667 1677 };
1668 1678
1669 1679 class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
1670 1680 CMSCollector* _collector;
1671 1681 MemRegion _span;
1672 1682 bool _yield;
1673 1683 CMSBitMap* _bit_map;
1674 1684 CMSMarkStack* _mark_stack;
1675 1685 PushAndMarkClosure* _scanning_closure;
1676 1686 unsigned int _before_count;
1677 1687
1678 1688 public:
1679 1689 SurvivorSpacePrecleanClosure(CMSCollector* collector,
1680 1690 MemRegion span,
1681 1691 CMSBitMap* bit_map,
1682 1692 CMSMarkStack* mark_stack,
1683 1693 PushAndMarkClosure* cl,
1684 1694 unsigned int before_count,
1685 1695 bool should_yield):
1686 1696 _collector(collector),
1687 1697 _span(span),
1688 1698 _yield(should_yield),
1689 1699 _bit_map(bit_map),
1690 1700 _mark_stack(mark_stack),
1691 1701 _scanning_closure(cl),
1692 1702 _before_count(before_count)
1693 1703 { }
1694 1704
1695 1705 void do_object(oop p) {
1696 1706 guarantee(false, "call do_object_careful instead");
1697 1707 }
1698 1708
1699 1709 size_t do_object_careful(oop p);
1700 1710
1701 1711 size_t do_object_careful_m(oop p, MemRegion mr) {
1702 1712 guarantee(false, "Unexpected caller");
1703 1713 return 0;
1704 1714 }
1705 1715
1706 1716 private:
1707 1717 inline void do_yield_check();
1708 1718 void do_yield_work();
1709 1719 };
1710 1720
1711 1721 // This closure is used to accomplish the sweeping work
1712 1722 // after the second checkpoint but before the concurrent reset
1713 1723 // phase.
1714 1724 //
1715 1725 // Terminology
1716 1726 // left hand chunk (LHC) - block of one or more chunks currently being
1717 1727 // coalesced. The LHC is available for coalescing with a new chunk.
1718 1728 // right hand chunk (RHC) - block that is currently being swept that is
1719 1729 // free or garbage that can be coalesced with the LHC.
1720 1730 // _inFreeRange is true if there is currently a LHC
1721 1731 // _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
1722 1732 // _freeRangeInFreeLists is true if the LHC is in the free lists.
1723 1733 // _freeFinger is the address of the current LHC
1724 1734 class SweepClosure: public BlkClosureCareful {
1725 1735 CMSCollector* _collector; // collector doing the work
1726 1736 ConcurrentMarkSweepGeneration* _g; // Generation being swept
1727 1737 CompactibleFreeListSpace* _sp; // Space being swept
1728 1738 HeapWord* _limit;// the address at or above which the sweep should stop
1729 1739 // because we do not expect newly garbage blocks
1730 1740 // eligible for sweeping past that address.
1731 1741 Mutex* _freelistLock; // Free list lock (in space)
1732 1742 CMSBitMap* _bitMap; // Marking bit map (in
1733 1743 // generation)
1734 1744 bool _inFreeRange; // Indicates if we are in the
1735 1745 // midst of a free run
1736 1746 bool _freeRangeInFreeLists;
1737 1747 // Often, we have just found
1738 1748 // a free chunk and started
1739 1749 // a new free range; we do not
1740 1750 // eagerly remove this chunk from
1741 1751 // the free lists unless there is
1742 1752 // a possibility of coalescing.
1743 1753 // When true, this flag indicates
1744 1754 // that the _freeFinger below
1745 1755 // points to a potentially free chunk
1746 1756 // that may still be in the free lists
1747 1757 bool _lastFreeRangeCoalesced;
1748 1758 // free range contains chunks
1749 1759 // coalesced
1750 1760 bool _yield;
1751 1761 // Whether sweeping should be
1752 1762 // done with yields. For instance
1753 1763 // when done by the foreground
1754 1764 // collector we shouldn't yield.
1755 1765 HeapWord* _freeFinger; // When _inFreeRange is set, the
1756 1766 // pointer to the "left hand
1757 1767 // chunk"
1758 1768 size_t _freeRangeSize;
1759 1769 // When _inFreeRange is set, this
1760 1770 // indicates the accumulated size
1761 1771 // of the "left hand chunk"
1762 1772 NOT_PRODUCT(
1763 1773 size_t _numObjectsFreed;
1764 1774 size_t _numWordsFreed;
1765 1775 size_t _numObjectsLive;
1766 1776 size_t _numWordsLive;
1767 1777 size_t _numObjectsAlreadyFree;
1768 1778 size_t _numWordsAlreadyFree;
1769 1779 FreeChunk* _last_fc;
1770 1780 )
1771 1781 private:
1772 1782 // Code that is common to a free chunk or garbage when
1773 1783 // encountered during sweeping.
1774 1784 void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
1775 1785 // Process a free chunk during sweeping.
1776 1786 void do_already_free_chunk(FreeChunk *fc);
1777 1787 // Work method called when processing an already free or a
1778 1788 // freshly garbage chunk to do a lookahead and possibly a
1779 1789 // premptive flush if crossing over _limit.
1780 1790 void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
1781 1791 // Process a garbage chunk during sweeping.
1782 1792 size_t do_garbage_chunk(FreeChunk *fc);
1783 1793 // Process a live chunk during sweeping.
1784 1794 size_t do_live_chunk(FreeChunk* fc);
1785 1795
1786 1796 // Accessors.
1787 1797 HeapWord* freeFinger() const { return _freeFinger; }
1788 1798 void set_freeFinger(HeapWord* v) { _freeFinger = v; }
1789 1799 bool inFreeRange() const { return _inFreeRange; }
1790 1800 void set_inFreeRange(bool v) { _inFreeRange = v; }
1791 1801 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; }
1792 1802 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
1793 1803 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; }
1794 1804 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
1795 1805
1796 1806 // Initialize a free range.
1797 1807 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
1798 1808 // Return this chunk to the free lists.
1799 1809 void flush_cur_free_chunk(HeapWord* chunk, size_t size);
1800 1810
1801 1811 // Check if we should yield and do so when necessary.
1802 1812 inline void do_yield_check(HeapWord* addr);
1803 1813
1804 1814 // Yield
1805 1815 void do_yield_work(HeapWord* addr);
1806 1816
1807 1817 // Debugging/Printing
1808 1818 void print_free_block_coalesced(FreeChunk* fc) const;
1809 1819
1810 1820 public:
1811 1821 SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
1812 1822 CMSBitMap* bitMap, bool should_yield);
1813 1823 ~SweepClosure() PRODUCT_RETURN;
1814 1824
1815 1825 size_t do_blk_careful(HeapWord* addr);
1816 1826 void print() const { print_on(tty); }
1817 1827 void print_on(outputStream *st) const;
1818 1828 };
1819 1829
1820 1830 // Closures related to weak references processing
1821 1831
1822 1832 // During CMS' weak reference processing, this is a
1823 1833 // work-routine/closure used to complete transitive
1824 1834 // marking of objects as live after a certain point
1825 1835 // in which an initial set has been completely accumulated.
1826 1836 // This closure is currently used both during the final
1827 1837 // remark stop-world phase, as well as during the concurrent
1828 1838 // precleaning of the discovered reference lists.
1829 1839 class CMSDrainMarkingStackClosure: public VoidClosure {
1830 1840 CMSCollector* _collector;
1831 1841 MemRegion _span;
1832 1842 CMSMarkStack* _mark_stack;
1833 1843 CMSBitMap* _bit_map;
1834 1844 CMSKeepAliveClosure* _keep_alive;
1835 1845 bool _concurrent_precleaning;
1836 1846 public:
1837 1847 CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
1838 1848 CMSBitMap* bit_map, CMSMarkStack* mark_stack,
1839 1849 CMSKeepAliveClosure* keep_alive,
1840 1850 bool cpc):
1841 1851 _collector(collector),
1842 1852 _span(span),
1843 1853 _bit_map(bit_map),
1844 1854 _mark_stack(mark_stack),
1845 1855 _keep_alive(keep_alive),
1846 1856 _concurrent_precleaning(cpc) {
1847 1857 assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
1848 1858 "Mismatch");
1849 1859 }
1850 1860
1851 1861 void do_void();
1852 1862 };
1853 1863
1854 1864 // A parallel version of CMSDrainMarkingStackClosure above.
1855 1865 class CMSParDrainMarkingStackClosure: public VoidClosure {
1856 1866 CMSCollector* _collector;
1857 1867 MemRegion _span;
1858 1868 OopTaskQueue* _work_queue;
1859 1869 CMSBitMap* _bit_map;
1860 1870 CMSInnerParMarkAndPushClosure _mark_and_push;
1861 1871
1862 1872 public:
1863 1873 CMSParDrainMarkingStackClosure(CMSCollector* collector,
1864 1874 MemRegion span, CMSBitMap* bit_map,
1865 1875 CMSMarkStack* revisit_stack,
1866 1876 OopTaskQueue* work_queue):
1867 1877 _collector(collector),
1868 1878 _span(span),
1869 1879 _bit_map(bit_map),
1870 1880 _work_queue(work_queue),
1871 1881 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { }
1872 1882
1873 1883 public:
1874 1884 void trim_queue(uint max);
1875 1885 void do_void();
1876 1886 };
1877 1887
1878 1888 // Allow yielding or short-circuiting of reference list
1879 1889 // prelceaning work.
1880 1890 class CMSPrecleanRefsYieldClosure: public YieldClosure {
1881 1891 CMSCollector* _collector;
1882 1892 void do_yield_work();
1883 1893 public:
1884 1894 CMSPrecleanRefsYieldClosure(CMSCollector* collector):
1885 1895 _collector(collector) {}
1886 1896 virtual bool should_return();
1887 1897 };
1888 1898
1889 1899
1890 1900 // Convenience class that locks free list locks for given CMS collector
1891 1901 class FreelistLocker: public StackObj {
1892 1902 private:
1893 1903 CMSCollector* _collector;
1894 1904 public:
1895 1905 FreelistLocker(CMSCollector* collector):
1896 1906 _collector(collector) {
1897 1907 _collector->getFreelistLocks();
1898 1908 }
1899 1909
1900 1910 ~FreelistLocker() {
1901 1911 _collector->releaseFreelistLocks();
1902 1912 }
1903 1913 };
1904 1914
1905 1915 // Mark all dead objects in a given space.
1906 1916 class MarkDeadObjectsClosure: public BlkClosure {
1907 1917 const CMSCollector* _collector;
1908 1918 const CompactibleFreeListSpace* _sp;
1909 1919 CMSBitMap* _live_bit_map;
1910 1920 CMSBitMap* _dead_bit_map;
1911 1921 public:
1912 1922 MarkDeadObjectsClosure(const CMSCollector* collector,
1913 1923 const CompactibleFreeListSpace* sp,
1914 1924 CMSBitMap *live_bit_map,
1915 1925 CMSBitMap *dead_bit_map) :
1916 1926 _collector(collector),
1917 1927 _sp(sp),
1918 1928 _live_bit_map(live_bit_map),
1919 1929 _dead_bit_map(dead_bit_map) {}
1920 1930 size_t do_blk(HeapWord* addr);
1921 1931 };
1922 1932
1923 1933 class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats {
1924 1934
1925 1935 public:
1926 1936 TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause);
1927 1937 };
1928 1938
1929 1939
1930 1940 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
↓ open down ↓ |
601 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX