Print this page
rev 3900 : 8004816: G1: Kitchensink failures after marking stack changes
Summary: Reset the marking state, including the mark stack overflow flag, in the event of a marking stack overflow during serial reference processing.
Reviewed-by: jmasa
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/g1/concurrentMark.hpp
+++ new/src/share/vm/gc_implementation/g1/concurrentMark.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
26 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
27 27
28 28 #include "gc_implementation/g1/heapRegionSets.hpp"
29 29 #include "utilities/taskqueue.hpp"
30 30
31 31 class G1CollectedHeap;
32 32 class CMTask;
33 33 typedef GenericTaskQueue<oop, mtGC> CMTaskQueue;
34 34 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
35 35
36 36 // Closure used by CM during concurrent reference discovery
37 37 // and reference processing (during remarking) to determine
38 38 // if a particular object is alive. It is primarily used
39 39 // to determine if referents of discovered reference objects
40 40 // are alive. An instance is also embedded into the
41 41 // reference processor as the _is_alive_non_header field
42 42 class G1CMIsAliveClosure: public BoolObjectClosure {
43 43 G1CollectedHeap* _g1;
44 44 public:
45 45 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
46 46
47 47 void do_object(oop obj) {
48 48 ShouldNotCallThis();
49 49 }
50 50 bool do_object_b(oop obj);
51 51 };
52 52
53 53 // A generic CM bit map. This is essentially a wrapper around the BitMap
54 54 // class, with one bit per (1<<_shifter) HeapWords.
55 55
56 56 class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
57 57 protected:
58 58 HeapWord* _bmStartWord; // base address of range covered by map
59 59 size_t _bmWordSize; // map size (in #HeapWords covered)
60 60 const int _shifter; // map to char or bit
61 61 VirtualSpace _virtual_space; // underlying the bit map
62 62 BitMap _bm; // the bit map itself
63 63
64 64 public:
65 65 // constructor
66 66 CMBitMapRO(int shifter);
67 67
68 68 enum { do_yield = true };
69 69
70 70 // inquiries
71 71 HeapWord* startWord() const { return _bmStartWord; }
72 72 size_t sizeInWords() const { return _bmWordSize; }
73 73 // the following is one past the last word in space
74 74 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
75 75
76 76 // read marks
77 77
78 78 bool isMarked(HeapWord* addr) const {
79 79 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
80 80 "outside underlying space?");
81 81 return _bm.at(heapWordToOffset(addr));
82 82 }
83 83
84 84 // iteration
85 85 inline bool iterate(BitMapClosure* cl, MemRegion mr);
86 86 inline bool iterate(BitMapClosure* cl);
87 87
88 88 // Return the address corresponding to the next marked bit at or after
89 89 // "addr", and before "limit", if "limit" is non-NULL. If there is no
90 90 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
91 91 HeapWord* getNextMarkedWordAddress(HeapWord* addr,
92 92 HeapWord* limit = NULL) const;
93 93 // Return the address corresponding to the next unmarked bit at or after
94 94 // "addr", and before "limit", if "limit" is non-NULL. If there is no
95 95 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
96 96 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
97 97 HeapWord* limit = NULL) const;
98 98
99 99 // conversion utilities
100 100 // XXX Fix these so that offsets are size_t's...
101 101 HeapWord* offsetToHeapWord(size_t offset) const {
102 102 return _bmStartWord + (offset << _shifter);
103 103 }
104 104 size_t heapWordToOffset(HeapWord* addr) const {
105 105 return pointer_delta(addr, _bmStartWord) >> _shifter;
106 106 }
107 107 int heapWordDiffToOffsetDiff(size_t diff) const;
108 108 HeapWord* nextWord(HeapWord* addr) {
109 109 return offsetToHeapWord(heapWordToOffset(addr) + 1);
110 110 }
111 111
112 112 // debugging
113 113 NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
114 114 };
115 115
116 116 class CMBitMap : public CMBitMapRO {
117 117
118 118 public:
119 119 // constructor
120 120 CMBitMap(int shifter) :
121 121 CMBitMapRO(shifter) {}
122 122
123 123 // Allocates the back store for the marking bitmap
124 124 bool allocate(ReservedSpace heap_rs);
125 125
126 126 // write marks
127 127 void mark(HeapWord* addr) {
128 128 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
129 129 "outside underlying space?");
130 130 _bm.set_bit(heapWordToOffset(addr));
131 131 }
132 132 void clear(HeapWord* addr) {
133 133 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
134 134 "outside underlying space?");
135 135 _bm.clear_bit(heapWordToOffset(addr));
136 136 }
137 137 bool parMark(HeapWord* addr) {
138 138 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
139 139 "outside underlying space?");
140 140 return _bm.par_set_bit(heapWordToOffset(addr));
141 141 }
142 142 bool parClear(HeapWord* addr) {
143 143 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
144 144 "outside underlying space?");
145 145 return _bm.par_clear_bit(heapWordToOffset(addr));
146 146 }
147 147 void markRange(MemRegion mr);
148 148 void clearAll();
149 149 void clearRange(MemRegion mr);
150 150
151 151 // Starting at the bit corresponding to "addr" (inclusive), find the next
152 152 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find
153 153 // the end of this run (stopping at "end_addr"). Return the MemRegion
154 154 // covering from the start of the region corresponding to the first bit
155 155 // of the run to the end of the region corresponding to the last bit of
156 156 // the run. If there is no "1" bit at or after "addr", return an empty
157 157 // MemRegion.
158 158 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
159 159 };
160 160
161 161 // Represents a marking stack used by ConcurrentMarking in the G1 collector.
162 162 class CMMarkStack VALUE_OBJ_CLASS_SPEC {
163 163 VirtualSpace _virtual_space; // Underlying backing store for actual stack
164 164 ConcurrentMark* _cm;
165 165 oop* _base; // bottom of stack
166 166 jint _index; // one more than last occupied index
167 167 jint _capacity; // max #elements
168 168 jint _saved_index; // value of _index saved at start of GC
169 169 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
170 170
171 171 bool _overflow;
172 172 bool _should_expand;
173 173 DEBUG_ONLY(bool _drain_in_progress;)
174 174 DEBUG_ONLY(bool _drain_in_progress_yields;)
175 175
176 176 public:
177 177 CMMarkStack(ConcurrentMark* cm);
178 178 ~CMMarkStack();
179 179
180 180 #ifndef PRODUCT
181 181 jint max_depth() const {
182 182 return _max_depth;
183 183 }
184 184 #endif
185 185
186 186 bool allocate(size_t capacity);
187 187
188 188 oop pop() {
189 189 if (!isEmpty()) {
190 190 return _base[--_index] ;
191 191 }
192 192 return NULL;
193 193 }
194 194
195 195 // If overflow happens, don't do the push, and record the overflow.
196 196 // *Requires* that "ptr" is already marked.
197 197 void push(oop ptr) {
198 198 if (isFull()) {
199 199 // Record overflow.
200 200 _overflow = true;
201 201 return;
202 202 } else {
203 203 _base[_index++] = ptr;
204 204 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
205 205 }
206 206 }
207 207 // Non-block impl. Note: concurrency is allowed only with other
208 208 // "par_push" operations, not with "pop" or "drain". We would need
209 209 // parallel versions of them if such concurrency was desired.
210 210 void par_push(oop ptr);
211 211
212 212 // Pushes the first "n" elements of "ptr_arr" on the stack.
213 213 // Non-block impl. Note: concurrency is allowed only with other
214 214 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain".
215 215 void par_adjoin_arr(oop* ptr_arr, int n);
216 216
217 217 // Pushes the first "n" elements of "ptr_arr" on the stack.
218 218 // Locking impl: concurrency is allowed only with
219 219 // "par_push_arr" and/or "par_pop_arr" operations, which use the same
220 220 // locking strategy.
221 221 void par_push_arr(oop* ptr_arr, int n);
222 222
223 223 // If returns false, the array was empty. Otherwise, removes up to "max"
224 224 // elements from the stack, and transfers them to "ptr_arr" in an
225 225 // unspecified order. The actual number transferred is given in "n" ("n
226 226 // == 0" is deliberately redundant with the return value.) Locking impl:
227 227 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr"
228 228 // operations, which use the same locking strategy.
229 229 bool par_pop_arr(oop* ptr_arr, int max, int* n);
230 230
231 231 // Drain the mark stack, applying the given closure to all fields of
232 232 // objects on the stack. (That is, continue until the stack is empty,
233 233 // even if closure applications add entries to the stack.) The "bm"
234 234 // argument, if non-null, may be used to verify that only marked objects
235 235 // are on the mark stack. If "yield_after" is "true", then the
236 236 // concurrent marker performing the drain offers to yield after
237 237 // processing each object. If a yield occurs, stops the drain operation
238 238 // and returns false. Otherwise, returns true.
239 239 template<class OopClosureClass>
240 240 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false);
241 241
242 242 bool isEmpty() { return _index == 0; }
243 243 bool isFull() { return _index == _capacity; }
244 244 int maxElems() { return _capacity; }
245 245
246 246 bool overflow() { return _overflow; }
247 247 void clear_overflow() { _overflow = false; }
248 248
249 249 bool should_expand() const { return _should_expand; }
250 250 void set_should_expand();
251 251
252 252 // Expand the stack, typically in response to an overflow condition
253 253 void expand();
254 254
255 255 int size() { return _index; }
256 256
257 257 void setEmpty() { _index = 0; clear_overflow(); }
258 258
259 259 // Record the current index.
260 260 void note_start_of_gc();
261 261
262 262 // Make sure that we have not added any entries to the stack during GC.
263 263 void note_end_of_gc();
264 264
265 265 // iterate over the oops in the mark stack, up to the bound recorded via
266 266 // the call above.
267 267 void oops_do(OopClosure* f);
268 268 };
269 269
270 270 class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
271 271 private:
272 272 #ifndef PRODUCT
273 273 uintx _num_remaining;
274 274 bool _force;
275 275 #endif // !defined(PRODUCT)
276 276
277 277 public:
278 278 void init() PRODUCT_RETURN;
279 279 void update() PRODUCT_RETURN;
280 280 bool should_force() PRODUCT_RETURN_( return false; );
281 281 };
282 282
283 283 // this will enable a variety of different statistics per GC task
284 284 #define _MARKING_STATS_ 0
285 285 // this will enable the higher verbose levels
286 286 #define _MARKING_VERBOSE_ 0
287 287
288 288 #if _MARKING_STATS_
289 289 #define statsOnly(statement) \
290 290 do { \
291 291 statement ; \
292 292 } while (0)
293 293 #else // _MARKING_STATS_
294 294 #define statsOnly(statement) \
295 295 do { \
296 296 } while (0)
297 297 #endif // _MARKING_STATS_
298 298
299 299 typedef enum {
300 300 no_verbose = 0, // verbose turned off
301 301 stats_verbose, // only prints stats at the end of marking
302 302 low_verbose, // low verbose, mostly per region and per major event
303 303 medium_verbose, // a bit more detailed than low
304 304 high_verbose // per object verbose
305 305 } CMVerboseLevel;
306 306
307 307 class YoungList;
308 308
309 309 // Root Regions are regions that are not empty at the beginning of a
310 310 // marking cycle and which we might collect during an evacuation pause
311 311 // while the cycle is active. Given that, during evacuation pauses, we
312 312 // do not copy objects that are explicitly marked, what we have to do
313 313 // for the root regions is to scan them and mark all objects reachable
314 314 // from them. According to the SATB assumptions, we only need to visit
315 315 // each object once during marking. So, as long as we finish this scan
316 316 // before the next evacuation pause, we can copy the objects from the
317 317 // root regions without having to mark them or do anything else to them.
318 318 //
319 319 // Currently, we only support root region scanning once (at the start
320 320 // of the marking cycle) and the root regions are all the survivor
321 321 // regions populated during the initial-mark pause.
322 322 class CMRootRegions VALUE_OBJ_CLASS_SPEC {
323 323 private:
324 324 YoungList* _young_list;
325 325 ConcurrentMark* _cm;
326 326
327 327 volatile bool _scan_in_progress;
328 328 volatile bool _should_abort;
329 329 HeapRegion* volatile _next_survivor;
330 330
331 331 public:
332 332 CMRootRegions();
333 333 // We actually do most of the initialization in this method.
334 334 void init(G1CollectedHeap* g1h, ConcurrentMark* cm);
335 335
336 336 // Reset the claiming / scanning of the root regions.
337 337 void prepare_for_scan();
338 338
339 339 // Forces get_next() to return NULL so that the iteration aborts early.
340 340 void abort() { _should_abort = true; }
341 341
342 342 // Return true if the CM thread are actively scanning root regions,
343 343 // false otherwise.
344 344 bool scan_in_progress() { return _scan_in_progress; }
345 345
346 346 // Claim the next root region to scan atomically, or return NULL if
347 347 // all have been claimed.
348 348 HeapRegion* claim_next();
349 349
350 350 // Flag that we're done with root region scanning and notify anyone
351 351 // who's waiting on it. If aborted is false, assume that all regions
352 352 // have been claimed.
353 353 void scan_finished();
354 354
355 355 // If CM threads are still scanning root regions, wait until they
356 356 // are done. Return true if we had to wait, false otherwise.
357 357 bool wait_until_scan_finished();
358 358 };
359 359
360 360 class ConcurrentMarkThread;
361 361
362 362 class ConcurrentMark: public CHeapObj<mtGC> {
363 363 friend class CMMarkStack;
364 364 friend class ConcurrentMarkThread;
365 365 friend class CMTask;
366 366 friend class CMBitMapClosure;
367 367 friend class CMGlobalObjectClosure;
368 368 friend class CMRemarkTask;
369 369 friend class CMConcurrentMarkingTask;
370 370 friend class G1ParNoteEndTask;
371 371 friend class CalcLiveObjectsClosure;
372 372 friend class G1CMRefProcTaskProxy;
373 373 friend class G1CMRefProcTaskExecutor;
374 374 friend class G1CMParKeepAliveAndDrainClosure;
375 375 friend class G1CMParDrainMarkingStackClosure;
376 376
377 377 protected:
378 378 ConcurrentMarkThread* _cmThread; // the thread doing the work
379 379 G1CollectedHeap* _g1h; // the heap.
380 380 uint _parallel_marking_threads; // the number of marking
381 381 // threads we're use
382 382 uint _max_parallel_marking_threads; // max number of marking
383 383 // threads we'll ever use
384 384 double _sleep_factor; // how much we have to sleep, with
385 385 // respect to the work we just did, to
386 386 // meet the marking overhead goal
387 387 double _marking_task_overhead; // marking target overhead for
388 388 // a single task
389 389
390 390 // same as the two above, but for the cleanup task
391 391 double _cleanup_sleep_factor;
392 392 double _cleanup_task_overhead;
393 393
394 394 FreeRegionList _cleanup_list;
395 395
396 396 // Concurrent marking support structures
397 397 CMBitMap _markBitMap1;
398 398 CMBitMap _markBitMap2;
399 399 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap
400 400 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap
401 401
402 402 BitMap _region_bm;
403 403 BitMap _card_bm;
404 404
405 405 // Heap bounds
406 406 HeapWord* _heap_start;
407 407 HeapWord* _heap_end;
408 408
409 409 // Root region tracking and claiming.
410 410 CMRootRegions _root_regions;
411 411
412 412 // For gray objects
413 413 CMMarkStack _markStack; // Grey objects behind global finger.
414 414 HeapWord* volatile _finger; // the global finger, region aligned,
415 415 // always points to the end of the
416 416 // last claimed region
417 417
418 418 // marking tasks
419 419 uint _max_worker_id;// maximum worker id
420 420 uint _active_tasks; // task num currently active
421 421 CMTask** _tasks; // task queue array (max_worker_id len)
422 422 CMTaskQueueSet* _task_queues; // task queue set
423 423 ParallelTaskTerminator _terminator; // for termination
424 424
425 425 // Two sync barriers that are used to synchronise tasks when an
426 426 // overflow occurs. The algorithm is the following. All tasks enter
427 427 // the first one to ensure that they have all stopped manipulating
428 428 // the global data structures. After they exit it, they re-initialise
429 429 // their data structures and task 0 re-initialises the global data
430 430 // structures. Then, they enter the second sync barrier. This
431 431 // ensure, that no task starts doing work before all data
432 432 // structures (local and global) have been re-initialised. When they
433 433 // exit it, they are free to start working again.
434 434 WorkGangBarrierSync _first_overflow_barrier_sync;
435 435 WorkGangBarrierSync _second_overflow_barrier_sync;
436 436
437 437 // this is set by any task, when an overflow on the global data
438 438 // structures is detected.
439 439 volatile bool _has_overflown;
440 440 // true: marking is concurrent, false: we're in remark
441 441 volatile bool _concurrent;
442 442 // set at the end of a Full GC so that marking aborts
443 443 volatile bool _has_aborted;
444 444
445 445 // used when remark aborts due to an overflow to indicate that
446 446 // another concurrent marking phase should start
447 447 volatile bool _restart_for_overflow;
448 448
449 449 // This is true from the very start of concurrent marking until the
450 450 // point when all the tasks complete their work. It is really used
451 451 // to determine the points between the end of concurrent marking and
452 452 // time of remark.
453 453 volatile bool _concurrent_marking_in_progress;
454 454
455 455 // verbose level
456 456 CMVerboseLevel _verbose_level;
457 457
458 458 // All of these times are in ms.
459 459 NumberSeq _init_times;
460 460 NumberSeq _remark_times;
461 461 NumberSeq _remark_mark_times;
462 462 NumberSeq _remark_weak_ref_times;
463 463 NumberSeq _cleanup_times;
464 464 double _total_counting_time;
465 465 double _total_rs_scrub_time;
466 466
467 467 double* _accum_task_vtime; // accumulated task vtime
468 468
469 469 FlexibleWorkGang* _parallel_workers;
470 470
↓ open down ↓ |
470 lines elided |
↑ open up ↑ |
471 471 ForceOverflowSettings _force_overflow_conc;
472 472 ForceOverflowSettings _force_overflow_stw;
473 473
474 474 void weakRefsWork(bool clear_all_soft_refs);
475 475
476 476 void swapMarkBitMaps();
477 477
478 478 // It resets the global marking data structures, as well as the
479 479 // task local ones; should be called during initial mark.
480 480 void reset();
481 - // It resets all the marking data structures.
482 - void clear_marking_state(bool clear_overflow = true);
481 +
482 + // Resets all the marking data structures. Called when we have to restart
483 + // marking or when marking completes (via set_non_marking_state below).
484 + void reset_marking_state(bool clear_overflow = true);
483 485
484 - // It should be called to indicate which phase we're in (concurrent
485 - // mark or remark) and how many threads are currently active.
486 - void set_phase(uint active_tasks, bool concurrent);
487 486 // We do this after we're done with marking so that the marking data
488 487 // structures are initialised to a sensible and predictable state.
489 488 void set_non_marking_state();
490 489
490 + // It should be called to indicate which phase we're in (concurrent
491 + // mark or remark) and how many threads are currently active.
492 + void set_phase(uint active_tasks, bool concurrent);
493 +
491 494 // prints all gathered CM-related statistics
492 495 void print_stats();
493 496
494 497 bool cleanup_list_is_empty() {
495 498 return _cleanup_list.is_empty();
496 499 }
497 500
498 501 // accessor methods
499 502 uint parallel_marking_threads() { return _parallel_marking_threads; }
500 503 uint max_parallel_marking_threads() { return _max_parallel_marking_threads;}
501 504 double sleep_factor() { return _sleep_factor; }
502 505 double marking_task_overhead() { return _marking_task_overhead;}
503 506 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
504 507 double cleanup_task_overhead() { return _cleanup_task_overhead;}
505 508
506 509 HeapWord* finger() { return _finger; }
507 510 bool concurrent() { return _concurrent; }
508 511 uint active_tasks() { return _active_tasks; }
509 512 ParallelTaskTerminator* terminator() { return &_terminator; }
510 513
511 514 // It claims the next available region to be scanned by a marking
512 515 // task/thread. It might return NULL if the next region is empty or
513 516 // we have run out of regions. In the latter case, out_of_regions()
514 517 // determines whether we've really run out of regions or the task
515 518 // should call claim_region() again. This might seem a bit
516 519 // awkward. Originally, the code was written so that claim_region()
517 520 // either successfully returned with a non-empty region or there
518 521 // were no more regions to be claimed. The problem with this was
519 522 // that, in certain circumstances, it iterated over large chunks of
520 523 // the heap finding only empty regions and, while it was working, it
521 524 // was preventing the calling task to call its regular clock
522 525 // method. So, this way, each task will spend very little time in
523 526 // claim_region() and is allowed to call the regular clock method
524 527 // frequently.
525 528 HeapRegion* claim_region(uint worker_id);
526 529
527 530 // It determines whether we've run out of regions to scan.
528 531 bool out_of_regions() { return _finger == _heap_end; }
529 532
530 533 // Returns the task with the given id
531 534 CMTask* task(int id) {
532 535 assert(0 <= id && id < (int) _active_tasks,
533 536 "task id not within active bounds");
534 537 return _tasks[id];
535 538 }
536 539
537 540 // Returns the task queue with the given id
538 541 CMTaskQueue* task_queue(int id) {
539 542 assert(0 <= id && id < (int) _active_tasks,
540 543 "task queue id not within active bounds");
541 544 return (CMTaskQueue*) _task_queues->queue(id);
542 545 }
543 546
544 547 // Returns the task queue set
545 548 CMTaskQueueSet* task_queues() { return _task_queues; }
546 549
547 550 // Access / manipulation of the overflow flag which is set to
548 551 // indicate that the global stack has overflown
549 552 bool has_overflown() { return _has_overflown; }
550 553 void set_has_overflown() { _has_overflown = true; }
551 554 void clear_has_overflown() { _has_overflown = false; }
552 555 bool restart_for_overflow() { return _restart_for_overflow; }
553 556
554 557 bool has_aborted() { return _has_aborted; }
555 558
556 559 // Methods to enter the two overflow sync barriers
557 560 void enter_first_sync_barrier(uint worker_id);
558 561 void enter_second_sync_barrier(uint worker_id);
559 562
560 563 ForceOverflowSettings* force_overflow_conc() {
561 564 return &_force_overflow_conc;
562 565 }
563 566
564 567 ForceOverflowSettings* force_overflow_stw() {
565 568 return &_force_overflow_stw;
566 569 }
567 570
568 571 ForceOverflowSettings* force_overflow() {
569 572 if (concurrent()) {
570 573 return force_overflow_conc();
571 574 } else {
572 575 return force_overflow_stw();
573 576 }
574 577 }
575 578
576 579 // Live Data Counting data structures...
577 580 // These data structures are initialized at the start of
578 581 // marking. They are written to while marking is active.
579 582 // They are aggregated during remark; the aggregated values
580 583 // are then used to populate the _region_bm, _card_bm, and
581 584 // the total live bytes, which are then subsequently updated
582 585 // during cleanup.
583 586
584 587 // An array of bitmaps (one bit map per task). Each bitmap
585 588 // is used to record the cards spanned by the live objects
586 589 // marked by that task/worker.
587 590 BitMap* _count_card_bitmaps;
588 591
589 592 // Used to record the number of marked live bytes
590 593 // (for each region, by worker thread).
591 594 size_t** _count_marked_bytes;
592 595
593 596 // Card index of the bottom of the G1 heap. Used for biasing indices into
594 597 // the card bitmaps.
595 598 intptr_t _heap_bottom_card_num;
596 599
597 600 // Set to true when initialization is complete
598 601 bool _completed_initialization;
599 602
600 603 public:
601 604 // Manipulation of the global mark stack.
602 605 // Notice that the first mark_stack_push is CAS-based, whereas the
603 606 // two below are Mutex-based. This is OK since the first one is only
604 607 // called during evacuation pauses and doesn't compete with the
605 608 // other two (which are called by the marking tasks during
606 609 // concurrent marking or remark).
607 610 bool mark_stack_push(oop p) {
608 611 _markStack.par_push(p);
609 612 if (_markStack.overflow()) {
610 613 set_has_overflown();
611 614 return false;
612 615 }
613 616 return true;
614 617 }
615 618 bool mark_stack_push(oop* arr, int n) {
616 619 _markStack.par_push_arr(arr, n);
617 620 if (_markStack.overflow()) {
618 621 set_has_overflown();
619 622 return false;
620 623 }
621 624 return true;
622 625 }
623 626 void mark_stack_pop(oop* arr, int max, int* n) {
624 627 _markStack.par_pop_arr(arr, max, n);
625 628 }
626 629 size_t mark_stack_size() { return _markStack.size(); }
627 630 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
628 631 bool mark_stack_overflow() { return _markStack.overflow(); }
629 632 bool mark_stack_empty() { return _markStack.isEmpty(); }
630 633
631 634 CMRootRegions* root_regions() { return &_root_regions; }
632 635
633 636 bool concurrent_marking_in_progress() {
634 637 return _concurrent_marking_in_progress;
635 638 }
636 639 void set_concurrent_marking_in_progress() {
637 640 _concurrent_marking_in_progress = true;
638 641 }
639 642 void clear_concurrent_marking_in_progress() {
640 643 _concurrent_marking_in_progress = false;
641 644 }
642 645
643 646 void update_accum_task_vtime(int i, double vtime) {
644 647 _accum_task_vtime[i] += vtime;
645 648 }
646 649
647 650 double all_task_accum_vtime() {
648 651 double ret = 0.0;
649 652 for (uint i = 0; i < _max_worker_id; ++i)
650 653 ret += _accum_task_vtime[i];
651 654 return ret;
652 655 }
653 656
654 657 // Attempts to steal an object from the task queues of other tasks
655 658 bool try_stealing(uint worker_id, int* hash_seed, oop& obj) {
656 659 return _task_queues->steal(worker_id, hash_seed, obj);
657 660 }
658 661
659 662 ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
660 663 ~ConcurrentMark();
661 664
662 665 ConcurrentMarkThread* cmThread() { return _cmThread; }
663 666
664 667 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
665 668 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
666 669
667 670 // Returns the number of GC threads to be used in a concurrent
668 671 // phase based on the number of GC threads being used in a STW
669 672 // phase.
670 673 uint scale_parallel_threads(uint n_par_threads);
671 674
672 675 // Calculates the number of GC threads to be used in a concurrent phase.
673 676 uint calc_parallel_marking_threads();
674 677
675 678 // The following three are interaction between CM and
676 679 // G1CollectedHeap
677 680
678 681 // This notifies CM that a root during initial-mark needs to be
679 682 // grayed. It is MT-safe. word_size is the size of the object in
680 683 // words. It is passed explicitly as sometimes we cannot calculate
681 684 // it from the given object because it might be in an inconsistent
682 685 // state (e.g., in to-space and being copied). So the caller is
683 686 // responsible for dealing with this issue (e.g., get the size from
684 687 // the from-space image when the to-space image might be
685 688 // inconsistent) and always passing the size. hr is the region that
686 689 // contains the object and it's passed optionally from callers who
687 690 // might already have it (no point in recalculating it).
688 691 inline void grayRoot(oop obj, size_t word_size,
689 692 uint worker_id, HeapRegion* hr = NULL);
690 693
691 694 // It iterates over the heap and for each object it comes across it
692 695 // will dump the contents of its reference fields, as well as
693 696 // liveness information for the object and its referents. The dump
694 697 // will be written to a file with the following name:
695 698 // G1PrintReachableBaseFile + "." + str.
696 699 // vo decides whether the prev (vo == UsePrevMarking), the next
697 700 // (vo == UseNextMarking) marking information, or the mark word
698 701 // (vo == UseMarkWord) will be used to determine the liveness of
699 702 // each object / referent.
700 703 // If all is true, all objects in the heap will be dumped, otherwise
701 704 // only the live ones. In the dump the following symbols / breviations
702 705 // are used:
703 706 // M : an explicitly live object (its bitmap bit is set)
704 707 // > : an implicitly live object (over tams)
705 708 // O : an object outside the G1 heap (typically: in the perm gen)
706 709 // NOT : a reference field whose referent is not live
707 710 // AND MARKED : indicates that an object is both explicitly and
708 711 // implicitly live (it should be one or the other, not both)
709 712 void print_reachable(const char* str,
710 713 VerifyOption vo, bool all) PRODUCT_RETURN;
711 714
712 715 // Clear the next marking bitmap (will be called concurrently).
713 716 void clearNextBitmap();
714 717
715 718 // These two do the work that needs to be done before and after the
716 719 // initial root checkpoint. Since this checkpoint can be done at two
717 720 // different points (i.e. an explicit pause or piggy-backed on a
718 721 // young collection), then it's nice to be able to easily share the
719 722 // pre/post code. It might be the case that we can put everything in
720 723 // the post method. TP
721 724 void checkpointRootsInitialPre();
722 725 void checkpointRootsInitialPost();
723 726
724 727 // Scan all the root regions and mark everything reachable from
725 728 // them.
726 729 void scanRootRegions();
727 730
728 731 // Scan a single root region and mark everything reachable from it.
729 732 void scanRootRegion(HeapRegion* hr, uint worker_id);
730 733
731 734 // Do concurrent phase of marking, to a tentative transitive closure.
732 735 void markFromRoots();
733 736
734 737 void checkpointRootsFinal(bool clear_all_soft_refs);
735 738 void checkpointRootsFinalWork();
736 739 void cleanup();
737 740 void completeCleanup();
738 741
739 742 // Mark in the previous bitmap. NB: this is usually read-only, so use
740 743 // this carefully!
741 744 inline void markPrev(oop p);
742 745
743 746 // Clears marks for all objects in the given range, for the prev,
744 747 // next, or both bitmaps. NB: the previous bitmap is usually
745 748 // read-only, so use this carefully!
746 749 void clearRangePrevBitmap(MemRegion mr);
747 750 void clearRangeNextBitmap(MemRegion mr);
748 751 void clearRangeBothBitmaps(MemRegion mr);
749 752
750 753 // Notify data structures that a GC has started.
751 754 void note_start_of_gc() {
752 755 _markStack.note_start_of_gc();
753 756 }
754 757
755 758 // Notify data structures that a GC is finished.
756 759 void note_end_of_gc() {
757 760 _markStack.note_end_of_gc();
758 761 }
759 762
760 763 // Verify that there are no CSet oops on the stacks (taskqueues /
761 764 // global mark stack), enqueued SATB buffers, per-thread SATB
762 765 // buffers, and fingers (global / per-task). The boolean parameters
763 766 // decide which of the above data structures to verify. If marking
764 767 // is not in progress, it's a no-op.
765 768 void verify_no_cset_oops(bool verify_stacks,
766 769 bool verify_enqueued_buffers,
767 770 bool verify_thread_buffers,
768 771 bool verify_fingers) PRODUCT_RETURN;
769 772
770 773 // It is called at the end of an evacuation pause during marking so
771 774 // that CM is notified of where the new end of the heap is. It
772 775 // doesn't do anything if concurrent_marking_in_progress() is false,
773 776 // unless the force parameter is true.
774 777 void update_g1_committed(bool force = false);
775 778
776 779 bool isMarked(oop p) const {
777 780 assert(p != NULL && p->is_oop(), "expected an oop");
778 781 HeapWord* addr = (HeapWord*)p;
779 782 assert(addr >= _nextMarkBitMap->startWord() ||
780 783 addr < _nextMarkBitMap->endWord(), "in a region");
781 784
782 785 return _nextMarkBitMap->isMarked(addr);
783 786 }
784 787
785 788 inline bool not_yet_marked(oop p) const;
786 789
787 790 // XXX Debug code
788 791 bool containing_card_is_marked(void* p);
789 792 bool containing_cards_are_marked(void* start, void* last);
790 793
791 794 bool isPrevMarked(oop p) const {
792 795 assert(p != NULL && p->is_oop(), "expected an oop");
793 796 HeapWord* addr = (HeapWord*)p;
794 797 assert(addr >= _prevMarkBitMap->startWord() ||
795 798 addr < _prevMarkBitMap->endWord(), "in a region");
796 799
797 800 return _prevMarkBitMap->isMarked(addr);
798 801 }
799 802
800 803 inline bool do_yield_check(uint worker_i = 0);
801 804 inline bool should_yield();
802 805
803 806 // Called to abort the marking cycle after a Full GC takes palce.
804 807 void abort();
805 808
806 809 // This prints the global/local fingers. It is used for debugging.
807 810 NOT_PRODUCT(void print_finger();)
808 811
809 812 void print_summary_info();
810 813
811 814 void print_worker_threads_on(outputStream* st) const;
812 815
813 816 // The following indicate whether a given verbose level has been
814 817 // set. Notice that anything above stats is conditional to
815 818 // _MARKING_VERBOSE_ having been set to 1
816 819 bool verbose_stats() {
817 820 return _verbose_level >= stats_verbose;
818 821 }
819 822 bool verbose_low() {
820 823 return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
821 824 }
822 825 bool verbose_medium() {
823 826 return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
824 827 }
825 828 bool verbose_high() {
826 829 return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
827 830 }
828 831
829 832 // Liveness counting
830 833
831 834 // Utility routine to set an exclusive range of cards on the given
832 835 // card liveness bitmap
833 836 inline void set_card_bitmap_range(BitMap* card_bm,
834 837 BitMap::idx_t start_idx,
835 838 BitMap::idx_t end_idx,
836 839 bool is_par);
837 840
838 841 // Returns the card number of the bottom of the G1 heap.
839 842 // Used in biasing indices into accounting card bitmaps.
840 843 intptr_t heap_bottom_card_num() const {
841 844 return _heap_bottom_card_num;
842 845 }
843 846
844 847 // Returns the card bitmap for a given task or worker id.
845 848 BitMap* count_card_bitmap_for(uint worker_id) {
846 849 assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
847 850 assert(_count_card_bitmaps != NULL, "uninitialized");
848 851 BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
849 852 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
850 853 return task_card_bm;
851 854 }
852 855
853 856 // Returns the array containing the marked bytes for each region,
854 857 // for the given worker or task id.
855 858 size_t* count_marked_bytes_array_for(uint worker_id) {
856 859 assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
857 860 assert(_count_marked_bytes != NULL, "uninitialized");
858 861 size_t* marked_bytes_array = _count_marked_bytes[worker_id];
859 862 assert(marked_bytes_array != NULL, "uninitialized");
860 863 return marked_bytes_array;
861 864 }
862 865
863 866 // Returns the index in the liveness accounting card table bitmap
864 867 // for the given address
865 868 inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr);
866 869
867 870 // Counts the size of the given memory region in the the given
868 871 // marked_bytes array slot for the given HeapRegion.
869 872 // Sets the bits in the given card bitmap that are associated with the
870 873 // cards that are spanned by the memory region.
871 874 inline void count_region(MemRegion mr, HeapRegion* hr,
872 875 size_t* marked_bytes_array,
873 876 BitMap* task_card_bm);
874 877
875 878 // Counts the given memory region in the task/worker counting
876 879 // data structures for the given worker id.
877 880 inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
878 881
879 882 // Counts the given memory region in the task/worker counting
880 883 // data structures for the given worker id.
881 884 inline void count_region(MemRegion mr, uint worker_id);
882 885
883 886 // Counts the given object in the given task/worker counting
884 887 // data structures.
885 888 inline void count_object(oop obj, HeapRegion* hr,
886 889 size_t* marked_bytes_array,
887 890 BitMap* task_card_bm);
888 891
889 892 // Counts the given object in the task/worker counting data
890 893 // structures for the given worker id.
891 894 inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
892 895
893 896 // Attempts to mark the given object and, if successful, counts
894 897 // the object in the given task/worker counting structures.
895 898 inline bool par_mark_and_count(oop obj, HeapRegion* hr,
896 899 size_t* marked_bytes_array,
897 900 BitMap* task_card_bm);
898 901
899 902 // Attempts to mark the given object and, if successful, counts
900 903 // the object in the task/worker counting structures for the
901 904 // given worker id.
902 905 inline bool par_mark_and_count(oop obj, size_t word_size,
903 906 HeapRegion* hr, uint worker_id);
904 907
905 908 // Attempts to mark the given object and, if successful, counts
906 909 // the object in the task/worker counting structures for the
907 910 // given worker id.
908 911 inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
909 912
910 913 // Similar to the above routine but we don't know the heap region that
911 914 // contains the object to be marked/counted, which this routine looks up.
912 915 inline bool par_mark_and_count(oop obj, uint worker_id);
913 916
914 917 // Similar to the above routine but there are times when we cannot
915 918 // safely calculate the size of obj due to races and we, therefore,
916 919 // pass the size in as a parameter. It is the caller's reponsibility
917 920 // to ensure that the size passed in for obj is valid.
918 921 inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
919 922
920 923 // Unconditionally mark the given object, and unconditinally count
921 924 // the object in the counting structures for worker id 0.
922 925 // Should *not* be called from parallel code.
923 926 inline bool mark_and_count(oop obj, HeapRegion* hr);
924 927
925 928 // Similar to the above routine but we don't know the heap region that
926 929 // contains the object to be marked/counted, which this routine looks up.
927 930 // Should *not* be called from parallel code.
928 931 inline bool mark_and_count(oop obj);
929 932
930 933 // Returns true if initialization was successfully completed.
931 934 bool completed_initialization() const {
932 935 return _completed_initialization;
933 936 }
934 937
935 938 protected:
936 939 // Clear all the per-task bitmaps and arrays used to store the
937 940 // counting data.
938 941 void clear_all_count_data();
939 942
940 943 // Aggregates the counting data for each worker/task
941 944 // that was constructed while marking. Also sets
942 945 // the amount of marked bytes for each region and
943 946 // the top at concurrent mark count.
944 947 void aggregate_count_data();
945 948
946 949 // Verification routine
947 950 void verify_count_data();
948 951 };
949 952
950 953 // A class representing a marking task.
951 954 class CMTask : public TerminatorTerminator {
952 955 private:
953 956 enum PrivateConstants {
954 957 // the regular clock call is called once the scanned words reaches
955 958 // this limit
956 959 words_scanned_period = 12*1024,
957 960 // the regular clock call is called once the number of visited
958 961 // references reaches this limit
959 962 refs_reached_period = 384,
960 963 // initial value for the hash seed, used in the work stealing code
961 964 init_hash_seed = 17,
962 965 // how many entries will be transferred between global stack and
963 966 // local queues
964 967 global_stack_transfer_size = 16
965 968 };
966 969
967 970 uint _worker_id;
968 971 G1CollectedHeap* _g1h;
969 972 ConcurrentMark* _cm;
970 973 CMBitMap* _nextMarkBitMap;
971 974 // the task queue of this task
972 975 CMTaskQueue* _task_queue;
973 976 private:
974 977 // the task queue set---needed for stealing
975 978 CMTaskQueueSet* _task_queues;
976 979 // indicates whether the task has been claimed---this is only for
977 980 // debugging purposes
978 981 bool _claimed;
979 982
980 983 // number of calls to this task
981 984 int _calls;
982 985
983 986 // when the virtual timer reaches this time, the marking step should
984 987 // exit
985 988 double _time_target_ms;
986 989 // the start time of the current marking step
987 990 double _start_time_ms;
988 991
989 992 // the oop closure used for iterations over oops
990 993 G1CMOopClosure* _cm_oop_closure;
991 994
992 995 // the region this task is scanning, NULL if we're not scanning any
993 996 HeapRegion* _curr_region;
994 997 // the local finger of this task, NULL if we're not scanning a region
995 998 HeapWord* _finger;
996 999 // limit of the region this task is scanning, NULL if we're not scanning one
997 1000 HeapWord* _region_limit;
998 1001
999 1002 // the number of words this task has scanned
1000 1003 size_t _words_scanned;
1001 1004 // When _words_scanned reaches this limit, the regular clock is
1002 1005 // called. Notice that this might be decreased under certain
1003 1006 // circumstances (i.e. when we believe that we did an expensive
1004 1007 // operation).
1005 1008 size_t _words_scanned_limit;
1006 1009 // the initial value of _words_scanned_limit (i.e. what it was
1007 1010 // before it was decreased).
1008 1011 size_t _real_words_scanned_limit;
1009 1012
1010 1013 // the number of references this task has visited
1011 1014 size_t _refs_reached;
1012 1015 // When _refs_reached reaches this limit, the regular clock is
1013 1016 // called. Notice this this might be decreased under certain
1014 1017 // circumstances (i.e. when we believe that we did an expensive
1015 1018 // operation).
1016 1019 size_t _refs_reached_limit;
1017 1020 // the initial value of _refs_reached_limit (i.e. what it was before
1018 1021 // it was decreased).
1019 1022 size_t _real_refs_reached_limit;
1020 1023
1021 1024 // used by the work stealing stuff
1022 1025 int _hash_seed;
1023 1026 // if this is true, then the task has aborted for some reason
1024 1027 bool _has_aborted;
1025 1028 // set when the task aborts because it has met its time quota
1026 1029 bool _has_timed_out;
1027 1030 // true when we're draining SATB buffers; this avoids the task
1028 1031 // aborting due to SATB buffers being available (as we're already
1029 1032 // dealing with them)
1030 1033 bool _draining_satb_buffers;
1031 1034
1032 1035 // number sequence of past step times
1033 1036 NumberSeq _step_times_ms;
1034 1037 // elapsed time of this task
1035 1038 double _elapsed_time_ms;
1036 1039 // termination time of this task
1037 1040 double _termination_time_ms;
1038 1041 // when this task got into the termination protocol
1039 1042 double _termination_start_time_ms;
1040 1043
1041 1044 // true when the task is during a concurrent phase, false when it is
1042 1045 // in the remark phase (so, in the latter case, we do not have to
1043 1046 // check all the things that we have to check during the concurrent
1044 1047 // phase, i.e. SATB buffer availability...)
1045 1048 bool _concurrent;
1046 1049
1047 1050 TruncatedSeq _marking_step_diffs_ms;
1048 1051
1049 1052 // Counting data structures. Embedding the task's marked_bytes_array
1050 1053 // and card bitmap into the actual task saves having to go through
1051 1054 // the ConcurrentMark object.
1052 1055 size_t* _marked_bytes_array;
1053 1056 BitMap* _card_bm;
1054 1057
1055 1058 // LOTS of statistics related with this task
1056 1059 #if _MARKING_STATS_
1057 1060 NumberSeq _all_clock_intervals_ms;
1058 1061 double _interval_start_time_ms;
1059 1062
1060 1063 int _aborted;
1061 1064 int _aborted_overflow;
1062 1065 int _aborted_cm_aborted;
1063 1066 int _aborted_yield;
1064 1067 int _aborted_timed_out;
1065 1068 int _aborted_satb;
1066 1069 int _aborted_termination;
1067 1070
1068 1071 int _steal_attempts;
1069 1072 int _steals;
1070 1073
1071 1074 int _clock_due_to_marking;
1072 1075 int _clock_due_to_scanning;
1073 1076
1074 1077 int _local_pushes;
1075 1078 int _local_pops;
1076 1079 int _local_max_size;
1077 1080 int _objs_scanned;
1078 1081
1079 1082 int _global_pushes;
1080 1083 int _global_pops;
1081 1084 int _global_max_size;
1082 1085
1083 1086 int _global_transfers_to;
1084 1087 int _global_transfers_from;
1085 1088
1086 1089 int _regions_claimed;
1087 1090 int _objs_found_on_bitmap;
1088 1091
1089 1092 int _satb_buffers_processed;
1090 1093 #endif // _MARKING_STATS_
1091 1094
1092 1095 // it updates the local fields after this task has claimed
1093 1096 // a new region to scan
1094 1097 void setup_for_region(HeapRegion* hr);
1095 1098 // it brings up-to-date the limit of the region
1096 1099 void update_region_limit();
1097 1100
1098 1101 // called when either the words scanned or the refs visited limit
1099 1102 // has been reached
1100 1103 void reached_limit();
1101 1104 // recalculates the words scanned and refs visited limits
1102 1105 void recalculate_limits();
1103 1106 // decreases the words scanned and refs visited limits when we reach
1104 1107 // an expensive operation
1105 1108 void decrease_limits();
1106 1109 // it checks whether the words scanned or refs visited reached their
1107 1110 // respective limit and calls reached_limit() if they have
1108 1111 void check_limits() {
1109 1112 if (_words_scanned >= _words_scanned_limit ||
1110 1113 _refs_reached >= _refs_reached_limit) {
1111 1114 reached_limit();
1112 1115 }
1113 1116 }
1114 1117 // this is supposed to be called regularly during a marking step as
1115 1118 // it checks a bunch of conditions that might cause the marking step
1116 1119 // to abort
1117 1120 void regular_clock_call();
1118 1121 bool concurrent() { return _concurrent; }
1119 1122
1120 1123 public:
1121 1124 // It resets the task; it should be called right at the beginning of
1122 1125 // a marking phase.
1123 1126 void reset(CMBitMap* _nextMarkBitMap);
1124 1127 // it clears all the fields that correspond to a claimed region.
1125 1128 void clear_region_fields();
1126 1129
1127 1130 void set_concurrent(bool concurrent) { _concurrent = concurrent; }
1128 1131
1129 1132 // The main method of this class which performs a marking step
1130 1133 // trying not to exceed the given duration. However, it might exit
1131 1134 // prematurely, according to some conditions (i.e. SATB buffers are
1132 1135 // available for processing).
1133 1136 void do_marking_step(double target_ms, bool do_stealing, bool do_termination);
1134 1137
1135 1138 // These two calls start and stop the timer
1136 1139 void record_start_time() {
1137 1140 _elapsed_time_ms = os::elapsedTime() * 1000.0;
1138 1141 }
1139 1142 void record_end_time() {
1140 1143 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
1141 1144 }
1142 1145
1143 1146 // returns the worker ID associated with this task.
1144 1147 uint worker_id() { return _worker_id; }
1145 1148
1146 1149 // From TerminatorTerminator. It determines whether this task should
1147 1150 // exit the termination protocol after it's entered it.
1148 1151 virtual bool should_exit_termination();
1149 1152
1150 1153 // Resets the local region fields after a task has finished scanning a
1151 1154 // region; or when they have become stale as a result of the region
1152 1155 // being evacuated.
1153 1156 void giveup_current_region();
1154 1157
1155 1158 HeapWord* finger() { return _finger; }
1156 1159
1157 1160 bool has_aborted() { return _has_aborted; }
1158 1161 void set_has_aborted() { _has_aborted = true; }
1159 1162 void clear_has_aborted() { _has_aborted = false; }
1160 1163 bool has_timed_out() { return _has_timed_out; }
1161 1164 bool claimed() { return _claimed; }
1162 1165
1163 1166 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
1164 1167
1165 1168 // It grays the object by marking it and, if necessary, pushing it
1166 1169 // on the local queue
1167 1170 inline void deal_with_reference(oop obj);
1168 1171
1169 1172 // It scans an object and visits its children.
1170 1173 void scan_object(oop obj);
1171 1174
1172 1175 // It pushes an object on the local queue.
1173 1176 inline void push(oop obj);
1174 1177
1175 1178 // These two move entries to/from the global stack.
1176 1179 void move_entries_to_global_stack();
1177 1180 void get_entries_from_global_stack();
1178 1181
1179 1182 // It pops and scans objects from the local queue. If partially is
1180 1183 // true, then it stops when the queue size is of a given limit. If
1181 1184 // partially is false, then it stops when the queue is empty.
1182 1185 void drain_local_queue(bool partially);
1183 1186 // It moves entries from the global stack to the local queue and
1184 1187 // drains the local queue. If partially is true, then it stops when
1185 1188 // both the global stack and the local queue reach a given size. If
1186 1189 // partially if false, it tries to empty them totally.
1187 1190 void drain_global_stack(bool partially);
1188 1191 // It keeps picking SATB buffers and processing them until no SATB
1189 1192 // buffers are available.
1190 1193 void drain_satb_buffers();
1191 1194
1192 1195 // moves the local finger to a new location
1193 1196 inline void move_finger_to(HeapWord* new_finger) {
1194 1197 assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
1195 1198 _finger = new_finger;
1196 1199 }
1197 1200
1198 1201 CMTask(uint worker_id, ConcurrentMark *cm,
1199 1202 size_t* marked_bytes, BitMap* card_bm,
1200 1203 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
1201 1204
1202 1205 // it prints statistics associated with this task
1203 1206 void print_stats();
1204 1207
1205 1208 #if _MARKING_STATS_
1206 1209 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; }
1207 1210 #endif // _MARKING_STATS_
1208 1211 };
1209 1212
1210 1213 // Class that's used to to print out per-region liveness
1211 1214 // information. It's currently used at the end of marking and also
1212 1215 // after we sort the old regions at the end of the cleanup operation.
1213 1216 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure {
1214 1217 private:
1215 1218 outputStream* _out;
1216 1219
1217 1220 // Accumulators for these values.
1218 1221 size_t _total_used_bytes;
1219 1222 size_t _total_capacity_bytes;
1220 1223 size_t _total_prev_live_bytes;
1221 1224 size_t _total_next_live_bytes;
1222 1225
1223 1226 // These are set up when we come across a "stars humongous" region
1224 1227 // (as this is where most of this information is stored, not in the
1225 1228 // subsequent "continues humongous" regions). After that, for every
1226 1229 // region in a given humongous region series we deduce the right
1227 1230 // values for it by simply subtracting the appropriate amount from
1228 1231 // these fields. All these values should reach 0 after we've visited
1229 1232 // the last region in the series.
1230 1233 size_t _hum_used_bytes;
1231 1234 size_t _hum_capacity_bytes;
1232 1235 size_t _hum_prev_live_bytes;
1233 1236 size_t _hum_next_live_bytes;
1234 1237
1235 1238 static double perc(size_t val, size_t total) {
1236 1239 if (total == 0) {
1237 1240 return 0.0;
1238 1241 } else {
1239 1242 return 100.0 * ((double) val / (double) total);
1240 1243 }
1241 1244 }
1242 1245
1243 1246 static double bytes_to_mb(size_t val) {
1244 1247 return (double) val / (double) M;
1245 1248 }
1246 1249
1247 1250 // See the .cpp file.
1248 1251 size_t get_hum_bytes(size_t* hum_bytes);
1249 1252 void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes,
1250 1253 size_t* prev_live_bytes, size_t* next_live_bytes);
1251 1254
1252 1255 public:
1253 1256 // The header and footer are printed in the constructor and
1254 1257 // destructor respectively.
1255 1258 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name);
1256 1259 virtual bool doHeapRegion(HeapRegion* r);
1257 1260 ~G1PrintRegionLivenessInfoClosure();
1258 1261 };
1259 1262
1260 1263 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
↓ open down ↓ |
760 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX