Print this page
rev 4008 : 8001985: G1: Backport fix for 7200261 to hsx24
Summary: The automatic backport of the fix for 7200261 did not apply cleanly to hsx24 - there were two rejected hunks that had to be fixed up by hand.
Reviewed-by:
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/g1/concurrentMark.hpp
+++ new/src/share/vm/gc_implementation/g1/concurrentMark.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
26 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
27 27
28 28 #include "gc_implementation/g1/heapRegionSets.hpp"
29 29 #include "utilities/taskqueue.hpp"
30 30
31 31 class G1CollectedHeap;
32 32 class CMTask;
33 33 typedef GenericTaskQueue<oop, mtGC> CMTaskQueue;
34 34 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
35 35
36 36 // Closure used by CM during concurrent reference discovery
37 37 // and reference processing (during remarking) to determine
38 38 // if a particular object is alive. It is primarily used
39 39 // to determine if referents of discovered reference objects
40 40 // are alive. An instance is also embedded into the
41 41 // reference processor as the _is_alive_non_header field
42 42 class G1CMIsAliveClosure: public BoolObjectClosure {
43 43 G1CollectedHeap* _g1;
44 44 public:
45 45 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
46 46
47 47 void do_object(oop obj) {
48 48 ShouldNotCallThis();
49 49 }
50 50 bool do_object_b(oop obj);
51 51 };
52 52
53 53 // A generic CM bit map. This is essentially a wrapper around the BitMap
54 54 // class, with one bit per (1<<_shifter) HeapWords.
55 55
56 56 class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
57 57 protected:
58 58 HeapWord* _bmStartWord; // base address of range covered by map
59 59 size_t _bmWordSize; // map size (in #HeapWords covered)
60 60 const int _shifter; // map to char or bit
61 61 VirtualSpace _virtual_space; // underlying the bit map
62 62 BitMap _bm; // the bit map itself
63 63
64 64 public:
65 65 // constructor
66 66 CMBitMapRO(ReservedSpace rs, int shifter);
67 67
68 68 enum { do_yield = true };
69 69
70 70 // inquiries
71 71 HeapWord* startWord() const { return _bmStartWord; }
72 72 size_t sizeInWords() const { return _bmWordSize; }
73 73 // the following is one past the last word in space
74 74 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
75 75
76 76 // read marks
77 77
78 78 bool isMarked(HeapWord* addr) const {
79 79 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
80 80 "outside underlying space?");
81 81 return _bm.at(heapWordToOffset(addr));
82 82 }
83 83
84 84 // iteration
85 85 inline bool iterate(BitMapClosure* cl, MemRegion mr);
86 86 inline bool iterate(BitMapClosure* cl);
87 87
88 88 // Return the address corresponding to the next marked bit at or after
89 89 // "addr", and before "limit", if "limit" is non-NULL. If there is no
90 90 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
91 91 HeapWord* getNextMarkedWordAddress(HeapWord* addr,
92 92 HeapWord* limit = NULL) const;
93 93 // Return the address corresponding to the next unmarked bit at or after
94 94 // "addr", and before "limit", if "limit" is non-NULL. If there is no
95 95 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
96 96 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
97 97 HeapWord* limit = NULL) const;
98 98
99 99 // conversion utilities
100 100 // XXX Fix these so that offsets are size_t's...
101 101 HeapWord* offsetToHeapWord(size_t offset) const {
102 102 return _bmStartWord + (offset << _shifter);
103 103 }
104 104 size_t heapWordToOffset(HeapWord* addr) const {
105 105 return pointer_delta(addr, _bmStartWord) >> _shifter;
106 106 }
107 107 int heapWordDiffToOffsetDiff(size_t diff) const;
108 108 HeapWord* nextWord(HeapWord* addr) {
109 109 return offsetToHeapWord(heapWordToOffset(addr) + 1);
110 110 }
111 111
112 112 // debugging
113 113 NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
114 114 };
115 115
116 116 class CMBitMap : public CMBitMapRO {
117 117
118 118 public:
119 119 // constructor
120 120 CMBitMap(ReservedSpace rs, int shifter) :
121 121 CMBitMapRO(rs, shifter) {}
122 122
123 123 // write marks
124 124 void mark(HeapWord* addr) {
125 125 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
126 126 "outside underlying space?");
127 127 _bm.set_bit(heapWordToOffset(addr));
128 128 }
129 129 void clear(HeapWord* addr) {
130 130 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
131 131 "outside underlying space?");
132 132 _bm.clear_bit(heapWordToOffset(addr));
133 133 }
134 134 bool parMark(HeapWord* addr) {
135 135 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
136 136 "outside underlying space?");
137 137 return _bm.par_set_bit(heapWordToOffset(addr));
138 138 }
139 139 bool parClear(HeapWord* addr) {
140 140 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
141 141 "outside underlying space?");
142 142 return _bm.par_clear_bit(heapWordToOffset(addr));
143 143 }
144 144 void markRange(MemRegion mr);
145 145 void clearAll();
146 146 void clearRange(MemRegion mr);
147 147
148 148 // Starting at the bit corresponding to "addr" (inclusive), find the next
149 149 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find
150 150 // the end of this run (stopping at "end_addr"). Return the MemRegion
151 151 // covering from the start of the region corresponding to the first bit
152 152 // of the run to the end of the region corresponding to the last bit of
153 153 // the run. If there is no "1" bit at or after "addr", return an empty
154 154 // MemRegion.
155 155 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
156 156 };
157 157
158 158 // Represents a marking stack used by the CM collector.
159 159 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
160 160 class CMMarkStack VALUE_OBJ_CLASS_SPEC {
161 161 ConcurrentMark* _cm;
162 162 oop* _base; // bottom of stack
163 163 jint _index; // one more than last occupied index
164 164 jint _capacity; // max #elements
165 165 jint _saved_index; // value of _index saved at start of GC
166 166 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
167 167
168 168 bool _overflow;
169 169 DEBUG_ONLY(bool _drain_in_progress;)
170 170 DEBUG_ONLY(bool _drain_in_progress_yields;)
171 171
172 172 public:
173 173 CMMarkStack(ConcurrentMark* cm);
174 174 ~CMMarkStack();
175 175
176 176 void allocate(size_t size);
177 177
178 178 oop pop() {
179 179 if (!isEmpty()) {
180 180 return _base[--_index] ;
181 181 }
182 182 return NULL;
183 183 }
184 184
185 185 // If overflow happens, don't do the push, and record the overflow.
186 186 // *Requires* that "ptr" is already marked.
187 187 void push(oop ptr) {
188 188 if (isFull()) {
189 189 // Record overflow.
190 190 _overflow = true;
191 191 return;
192 192 } else {
193 193 _base[_index++] = ptr;
194 194 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
195 195 }
196 196 }
197 197 // Non-block impl. Note: concurrency is allowed only with other
198 198 // "par_push" operations, not with "pop" or "drain". We would need
199 199 // parallel versions of them if such concurrency was desired.
200 200 void par_push(oop ptr);
201 201
202 202 // Pushes the first "n" elements of "ptr_arr" on the stack.
203 203 // Non-block impl. Note: concurrency is allowed only with other
204 204 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain".
205 205 void par_adjoin_arr(oop* ptr_arr, int n);
206 206
207 207 // Pushes the first "n" elements of "ptr_arr" on the stack.
208 208 // Locking impl: concurrency is allowed only with
209 209 // "par_push_arr" and/or "par_pop_arr" operations, which use the same
210 210 // locking strategy.
211 211 void par_push_arr(oop* ptr_arr, int n);
212 212
213 213 // If returns false, the array was empty. Otherwise, removes up to "max"
214 214 // elements from the stack, and transfers them to "ptr_arr" in an
215 215 // unspecified order. The actual number transferred is given in "n" ("n
216 216 // == 0" is deliberately redundant with the return value.) Locking impl:
217 217 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr"
218 218 // operations, which use the same locking strategy.
219 219 bool par_pop_arr(oop* ptr_arr, int max, int* n);
220 220
221 221 // Drain the mark stack, applying the given closure to all fields of
222 222 // objects on the stack. (That is, continue until the stack is empty,
223 223 // even if closure applications add entries to the stack.) The "bm"
224 224 // argument, if non-null, may be used to verify that only marked objects
225 225 // are on the mark stack. If "yield_after" is "true", then the
226 226 // concurrent marker performing the drain offers to yield after
227 227 // processing each object. If a yield occurs, stops the drain operation
228 228 // and returns false. Otherwise, returns true.
229 229 template<class OopClosureClass>
230 230 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false);
231 231
232 232 bool isEmpty() { return _index == 0; }
233 233 bool isFull() { return _index == _capacity; }
234 234 int maxElems() { return _capacity; }
235 235
236 236 bool overflow() { return _overflow; }
237 237 void clear_overflow() { _overflow = false; }
238 238
239 239 int size() { return _index; }
240 240
241 241 void setEmpty() { _index = 0; clear_overflow(); }
242 242
243 243 // Record the current index.
244 244 void note_start_of_gc();
245 245
246 246 // Make sure that we have not added any entries to the stack during GC.
247 247 void note_end_of_gc();
248 248
249 249 // iterate over the oops in the mark stack, up to the bound recorded via
250 250 // the call above.
251 251 void oops_do(OopClosure* f);
252 252 };
253 253
254 254 class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
255 255 private:
256 256 #ifndef PRODUCT
257 257 uintx _num_remaining;
258 258 bool _force;
259 259 #endif // !defined(PRODUCT)
260 260
261 261 public:
262 262 void init() PRODUCT_RETURN;
263 263 void update() PRODUCT_RETURN;
264 264 bool should_force() PRODUCT_RETURN_( return false; );
265 265 };
266 266
267 267 // this will enable a variety of different statistics per GC task
268 268 #define _MARKING_STATS_ 0
269 269 // this will enable the higher verbose levels
270 270 #define _MARKING_VERBOSE_ 0
271 271
272 272 #if _MARKING_STATS_
273 273 #define statsOnly(statement) \
274 274 do { \
275 275 statement ; \
276 276 } while (0)
277 277 #else // _MARKING_STATS_
278 278 #define statsOnly(statement) \
279 279 do { \
280 280 } while (0)
281 281 #endif // _MARKING_STATS_
282 282
283 283 typedef enum {
284 284 no_verbose = 0, // verbose turned off
285 285 stats_verbose, // only prints stats at the end of marking
286 286 low_verbose, // low verbose, mostly per region and per major event
287 287 medium_verbose, // a bit more detailed than low
288 288 high_verbose // per object verbose
289 289 } CMVerboseLevel;
290 290
291 291 class YoungList;
292 292
293 293 // Root Regions are regions that are not empty at the beginning of a
294 294 // marking cycle and which we might collect during an evacuation pause
295 295 // while the cycle is active. Given that, during evacuation pauses, we
296 296 // do not copy objects that are explicitly marked, what we have to do
297 297 // for the root regions is to scan them and mark all objects reachable
298 298 // from them. According to the SATB assumptions, we only need to visit
299 299 // each object once during marking. So, as long as we finish this scan
300 300 // before the next evacuation pause, we can copy the objects from the
301 301 // root regions without having to mark them or do anything else to them.
302 302 //
303 303 // Currently, we only support root region scanning once (at the start
304 304 // of the marking cycle) and the root regions are all the survivor
305 305 // regions populated during the initial-mark pause.
306 306 class CMRootRegions VALUE_OBJ_CLASS_SPEC {
307 307 private:
308 308 YoungList* _young_list;
309 309 ConcurrentMark* _cm;
310 310
311 311 volatile bool _scan_in_progress;
312 312 volatile bool _should_abort;
313 313 HeapRegion* volatile _next_survivor;
314 314
315 315 public:
316 316 CMRootRegions();
317 317 // We actually do most of the initialization in this method.
318 318 void init(G1CollectedHeap* g1h, ConcurrentMark* cm);
319 319
320 320 // Reset the claiming / scanning of the root regions.
321 321 void prepare_for_scan();
322 322
323 323 // Forces get_next() to return NULL so that the iteration aborts early.
324 324 void abort() { _should_abort = true; }
325 325
326 326 // Return true if the CM thread are actively scanning root regions,
327 327 // false otherwise.
328 328 bool scan_in_progress() { return _scan_in_progress; }
329 329
330 330 // Claim the next root region to scan atomically, or return NULL if
331 331 // all have been claimed.
332 332 HeapRegion* claim_next();
333 333
334 334 // Flag that we're done with root region scanning and notify anyone
335 335 // who's waiting on it. If aborted is false, assume that all regions
336 336 // have been claimed.
337 337 void scan_finished();
338 338
339 339 // If CM threads are still scanning root regions, wait until they
340 340 // are done. Return true if we had to wait, false otherwise.
341 341 bool wait_until_scan_finished();
342 342 };
343 343
344 344 class ConcurrentMarkThread;
345 345
346 346 class ConcurrentMark: public CHeapObj<mtGC> {
347 347 friend class ConcurrentMarkThread;
348 348 friend class CMTask;
349 349 friend class CMBitMapClosure;
350 350 friend class CMGlobalObjectClosure;
351 351 friend class CMRemarkTask;
352 352 friend class CMConcurrentMarkingTask;
353 353 friend class G1ParNoteEndTask;
354 354 friend class CalcLiveObjectsClosure;
355 355 friend class G1CMRefProcTaskProxy;
356 356 friend class G1CMRefProcTaskExecutor;
357 357 friend class G1CMParKeepAliveAndDrainClosure;
358 358 friend class G1CMParDrainMarkingStackClosure;
359 359
360 360 protected:
361 361 ConcurrentMarkThread* _cmThread; // the thread doing the work
362 362 G1CollectedHeap* _g1h; // the heap.
363 363 uint _parallel_marking_threads; // the number of marking
364 364 // threads we're use
365 365 uint _max_parallel_marking_threads; // max number of marking
366 366 // threads we'll ever use
367 367 double _sleep_factor; // how much we have to sleep, with
368 368 // respect to the work we just did, to
369 369 // meet the marking overhead goal
370 370 double _marking_task_overhead; // marking target overhead for
371 371 // a single task
372 372
373 373 // same as the two above, but for the cleanup task
374 374 double _cleanup_sleep_factor;
375 375 double _cleanup_task_overhead;
376 376
377 377 FreeRegionList _cleanup_list;
378 378
379 379 // Concurrent marking support structures
380 380 CMBitMap _markBitMap1;
381 381 CMBitMap _markBitMap2;
382 382 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap
383 383 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap
384 384
385 385 BitMap _region_bm;
386 386 BitMap _card_bm;
387 387
388 388 // Heap bounds
389 389 HeapWord* _heap_start;
390 390 HeapWord* _heap_end;
391 391
392 392 // Root region tracking and claiming.
393 393 CMRootRegions _root_regions;
394 394
395 395 // For gray objects
396 396 CMMarkStack _markStack; // Grey objects behind global finger.
397 397 HeapWord* volatile _finger; // the global finger, region aligned,
398 398 // always points to the end of the
399 399 // last claimed region
400 400
401 401 // marking tasks
402 402 uint _max_task_num; // maximum task number
403 403 uint _active_tasks; // task num currently active
404 404 CMTask** _tasks; // task queue array (max_task_num len)
405 405 CMTaskQueueSet* _task_queues; // task queue set
406 406 ParallelTaskTerminator _terminator; // for termination
407 407
408 408 // Two sync barriers that are used to synchronise tasks when an
409 409 // overflow occurs. The algorithm is the following. All tasks enter
410 410 // the first one to ensure that they have all stopped manipulating
411 411 // the global data structures. After they exit it, they re-initialise
412 412 // their data structures and task 0 re-initialises the global data
413 413 // structures. Then, they enter the second sync barrier. This
414 414 // ensure, that no task starts doing work before all data
415 415 // structures (local and global) have been re-initialised. When they
416 416 // exit it, they are free to start working again.
417 417 WorkGangBarrierSync _first_overflow_barrier_sync;
418 418 WorkGangBarrierSync _second_overflow_barrier_sync;
419 419
420 420 // this is set by any task, when an overflow on the global data
421 421 // structures is detected.
422 422 volatile bool _has_overflown;
423 423 // true: marking is concurrent, false: we're in remark
424 424 volatile bool _concurrent;
425 425 // set at the end of a Full GC so that marking aborts
426 426 volatile bool _has_aborted;
427 427
428 428 // used when remark aborts due to an overflow to indicate that
429 429 // another concurrent marking phase should start
430 430 volatile bool _restart_for_overflow;
431 431
432 432 // This is true from the very start of concurrent marking until the
433 433 // point when all the tasks complete their work. It is really used
434 434 // to determine the points between the end of concurrent marking and
435 435 // time of remark.
436 436 volatile bool _concurrent_marking_in_progress;
437 437
438 438 // verbose level
439 439 CMVerboseLevel _verbose_level;
440 440
441 441 // All of these times are in ms.
442 442 NumberSeq _init_times;
443 443 NumberSeq _remark_times;
444 444 NumberSeq _remark_mark_times;
445 445 NumberSeq _remark_weak_ref_times;
446 446 NumberSeq _cleanup_times;
447 447 double _total_counting_time;
448 448 double _total_rs_scrub_time;
449 449
450 450 double* _accum_task_vtime; // accumulated task vtime
451 451
452 452 FlexibleWorkGang* _parallel_workers;
453 453
454 454 ForceOverflowSettings _force_overflow_conc;
455 455 ForceOverflowSettings _force_overflow_stw;
456 456
457 457 void weakRefsWork(bool clear_all_soft_refs);
458 458
459 459 void swapMarkBitMaps();
460 460
461 461 // It resets the global marking data structures, as well as the
462 462 // task local ones; should be called during initial mark.
463 463 void reset();
464 464 // It resets all the marking data structures.
465 465 void clear_marking_state(bool clear_overflow = true);
466 466
467 467 // It should be called to indicate which phase we're in (concurrent
468 468 // mark or remark) and how many threads are currently active.
469 469 void set_phase(uint active_tasks, bool concurrent);
470 470 // We do this after we're done with marking so that the marking data
471 471 // structures are initialised to a sensible and predictable state.
472 472 void set_non_marking_state();
473 473
474 474 // prints all gathered CM-related statistics
475 475 void print_stats();
476 476
477 477 bool cleanup_list_is_empty() {
478 478 return _cleanup_list.is_empty();
479 479 }
480 480
481 481 // accessor methods
482 482 uint parallel_marking_threads() { return _parallel_marking_threads; }
483 483 uint max_parallel_marking_threads() { return _max_parallel_marking_threads;}
484 484 double sleep_factor() { return _sleep_factor; }
485 485 double marking_task_overhead() { return _marking_task_overhead;}
486 486 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
487 487 double cleanup_task_overhead() { return _cleanup_task_overhead;}
488 488
489 489 HeapWord* finger() { return _finger; }
490 490 bool concurrent() { return _concurrent; }
491 491 uint active_tasks() { return _active_tasks; }
492 492 ParallelTaskTerminator* terminator() { return &_terminator; }
493 493
494 494 // It claims the next available region to be scanned by a marking
495 495 // task. It might return NULL if the next region is empty or we have
496 496 // run out of regions. In the latter case, out_of_regions()
497 497 // determines whether we've really run out of regions or the task
498 498 // should call claim_region() again. This might seem a bit
499 499 // awkward. Originally, the code was written so that claim_region()
500 500 // either successfully returned with a non-empty region or there
501 501 // were no more regions to be claimed. The problem with this was
502 502 // that, in certain circumstances, it iterated over large chunks of
503 503 // the heap finding only empty regions and, while it was working, it
504 504 // was preventing the calling task to call its regular clock
505 505 // method. So, this way, each task will spend very little time in
506 506 // claim_region() and is allowed to call the regular clock method
507 507 // frequently.
508 508 HeapRegion* claim_region(int task);
509 509
510 510 // It determines whether we've run out of regions to scan.
511 511 bool out_of_regions() { return _finger == _heap_end; }
512 512
513 513 // Returns the task with the given id
514 514 CMTask* task(int id) {
515 515 assert(0 <= id && id < (int) _active_tasks,
516 516 "task id not within active bounds");
517 517 return _tasks[id];
518 518 }
519 519
520 520 // Returns the task queue with the given id
521 521 CMTaskQueue* task_queue(int id) {
522 522 assert(0 <= id && id < (int) _active_tasks,
523 523 "task queue id not within active bounds");
524 524 return (CMTaskQueue*) _task_queues->queue(id);
525 525 }
526 526
527 527 // Returns the task queue set
528 528 CMTaskQueueSet* task_queues() { return _task_queues; }
529 529
530 530 // Access / manipulation of the overflow flag which is set to
531 531 // indicate that the global stack has overflown
532 532 bool has_overflown() { return _has_overflown; }
533 533 void set_has_overflown() { _has_overflown = true; }
534 534 void clear_has_overflown() { _has_overflown = false; }
535 535 bool restart_for_overflow() { return _restart_for_overflow; }
536 536
537 537 bool has_aborted() { return _has_aborted; }
538 538
539 539 // Methods to enter the two overflow sync barriers
540 540 void enter_first_sync_barrier(int task_num);
541 541 void enter_second_sync_barrier(int task_num);
542 542
543 543 ForceOverflowSettings* force_overflow_conc() {
544 544 return &_force_overflow_conc;
545 545 }
546 546
547 547 ForceOverflowSettings* force_overflow_stw() {
548 548 return &_force_overflow_stw;
549 549 }
550 550
551 551 ForceOverflowSettings* force_overflow() {
552 552 if (concurrent()) {
553 553 return force_overflow_conc();
554 554 } else {
555 555 return force_overflow_stw();
556 556 }
557 557 }
558 558
559 559 // Live Data Counting data structures...
560 560 // These data structures are initialized at the start of
561 561 // marking. They are written to while marking is active.
562 562 // They are aggregated during remark; the aggregated values
563 563 // are then used to populate the _region_bm, _card_bm, and
564 564 // the total live bytes, which are then subsequently updated
565 565 // during cleanup.
566 566
567 567 // An array of bitmaps (one bit map per task). Each bitmap
568 568 // is used to record the cards spanned by the live objects
569 569 // marked by that task/worker.
570 570 BitMap* _count_card_bitmaps;
571 571
572 572 // Used to record the number of marked live bytes
573 573 // (for each region, by worker thread).
574 574 size_t** _count_marked_bytes;
575 575
576 576 // Card index of the bottom of the G1 heap. Used for biasing indices into
577 577 // the card bitmaps.
578 578 intptr_t _heap_bottom_card_num;
579 579
580 580 public:
581 581 // Manipulation of the global mark stack.
582 582 // Notice that the first mark_stack_push is CAS-based, whereas the
583 583 // two below are Mutex-based. This is OK since the first one is only
584 584 // called during evacuation pauses and doesn't compete with the
585 585 // other two (which are called by the marking tasks during
586 586 // concurrent marking or remark).
587 587 bool mark_stack_push(oop p) {
588 588 _markStack.par_push(p);
589 589 if (_markStack.overflow()) {
590 590 set_has_overflown();
591 591 return false;
592 592 }
593 593 return true;
594 594 }
595 595 bool mark_stack_push(oop* arr, int n) {
596 596 _markStack.par_push_arr(arr, n);
597 597 if (_markStack.overflow()) {
598 598 set_has_overflown();
599 599 return false;
600 600 }
601 601 return true;
602 602 }
603 603 void mark_stack_pop(oop* arr, int max, int* n) {
604 604 _markStack.par_pop_arr(arr, max, n);
605 605 }
606 606 size_t mark_stack_size() { return _markStack.size(); }
607 607 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
608 608 bool mark_stack_overflow() { return _markStack.overflow(); }
609 609 bool mark_stack_empty() { return _markStack.isEmpty(); }
610 610
611 611 CMRootRegions* root_regions() { return &_root_regions; }
612 612
613 613 bool concurrent_marking_in_progress() {
614 614 return _concurrent_marking_in_progress;
615 615 }
616 616 void set_concurrent_marking_in_progress() {
617 617 _concurrent_marking_in_progress = true;
618 618 }
619 619 void clear_concurrent_marking_in_progress() {
620 620 _concurrent_marking_in_progress = false;
621 621 }
622 622
623 623 void update_accum_task_vtime(int i, double vtime) {
624 624 _accum_task_vtime[i] += vtime;
625 625 }
626 626
627 627 double all_task_accum_vtime() {
628 628 double ret = 0.0;
629 629 for (int i = 0; i < (int)_max_task_num; ++i)
630 630 ret += _accum_task_vtime[i];
631 631 return ret;
632 632 }
633 633
634 634 // Attempts to steal an object from the task queues of other tasks
635 635 bool try_stealing(int task_num, int* hash_seed, oop& obj) {
636 636 return _task_queues->steal(task_num, hash_seed, obj);
637 637 }
638 638
639 639 ConcurrentMark(ReservedSpace rs, uint max_regions);
640 640 ~ConcurrentMark();
641 641
642 642 ConcurrentMarkThread* cmThread() { return _cmThread; }
643 643
644 644 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
645 645 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
646 646
647 647 // Returns the number of GC threads to be used in a concurrent
648 648 // phase based on the number of GC threads being used in a STW
649 649 // phase.
650 650 uint scale_parallel_threads(uint n_par_threads);
651 651
652 652 // Calculates the number of GC threads to be used in a concurrent phase.
653 653 uint calc_parallel_marking_threads();
654 654
655 655 // The following three are interaction between CM and
656 656 // G1CollectedHeap
657 657
658 658 // This notifies CM that a root during initial-mark needs to be
659 659 // grayed. It is MT-safe. word_size is the size of the object in
660 660 // words. It is passed explicitly as sometimes we cannot calculate
661 661 // it from the given object because it might be in an inconsistent
662 662 // state (e.g., in to-space and being copied). So the caller is
663 663 // responsible for dealing with this issue (e.g., get the size from
664 664 // the from-space image when the to-space image might be
665 665 // inconsistent) and always passing the size. hr is the region that
666 666 // contains the object and it's passed optionally from callers who
667 667 // might already have it (no point in recalculating it).
668 668 inline void grayRoot(oop obj, size_t word_size,
669 669 uint worker_id, HeapRegion* hr = NULL);
670 670
671 671 // It iterates over the heap and for each object it comes across it
672 672 // will dump the contents of its reference fields, as well as
673 673 // liveness information for the object and its referents. The dump
674 674 // will be written to a file with the following name:
675 675 // G1PrintReachableBaseFile + "." + str.
676 676 // vo decides whether the prev (vo == UsePrevMarking), the next
677 677 // (vo == UseNextMarking) marking information, or the mark word
678 678 // (vo == UseMarkWord) will be used to determine the liveness of
679 679 // each object / referent.
680 680 // If all is true, all objects in the heap will be dumped, otherwise
681 681 // only the live ones. In the dump the following symbols / breviations
682 682 // are used:
683 683 // M : an explicitly live object (its bitmap bit is set)
684 684 // > : an implicitly live object (over tams)
685 685 // O : an object outside the G1 heap (typically: in the perm gen)
686 686 // NOT : a reference field whose referent is not live
687 687 // AND MARKED : indicates that an object is both explicitly and
688 688 // implicitly live (it should be one or the other, not both)
689 689 void print_reachable(const char* str,
690 690 VerifyOption vo, bool all) PRODUCT_RETURN;
691 691
692 692 // Clear the next marking bitmap (will be called concurrently).
693 693 void clearNextBitmap();
694 694
695 695 // These two do the work that needs to be done before and after the
696 696 // initial root checkpoint. Since this checkpoint can be done at two
697 697 // different points (i.e. an explicit pause or piggy-backed on a
698 698 // young collection), then it's nice to be able to easily share the
699 699 // pre/post code. It might be the case that we can put everything in
700 700 // the post method. TP
701 701 void checkpointRootsInitialPre();
702 702 void checkpointRootsInitialPost();
703 703
704 704 // Scan all the root regions and mark everything reachable from
705 705 // them.
706 706 void scanRootRegions();
707 707
708 708 // Scan a single root region and mark everything reachable from it.
709 709 void scanRootRegion(HeapRegion* hr, uint worker_id);
710 710
711 711 // Do concurrent phase of marking, to a tentative transitive closure.
712 712 void markFromRoots();
713 713
714 714 void checkpointRootsFinal(bool clear_all_soft_refs);
715 715 void checkpointRootsFinalWork();
716 716 void cleanup();
717 717 void completeCleanup();
718 718
719 719 // Mark in the previous bitmap. NB: this is usually read-only, so use
720 720 // this carefully!
721 721 inline void markPrev(oop p);
722 722
723 723 // Clears marks for all objects in the given range, for the prev,
724 724 // next, or both bitmaps. NB: the previous bitmap is usually
725 725 // read-only, so use this carefully!
726 726 void clearRangePrevBitmap(MemRegion mr);
727 727 void clearRangeNextBitmap(MemRegion mr);
728 728 void clearRangeBothBitmaps(MemRegion mr);
729 729
730 730 // Notify data structures that a GC has started.
731 731 void note_start_of_gc() {
732 732 _markStack.note_start_of_gc();
733 733 }
734 734
735 735 // Notify data structures that a GC is finished.
736 736 void note_end_of_gc() {
737 737 _markStack.note_end_of_gc();
738 738 }
739 739
740 740 // Verify that there are no CSet oops on the stacks (taskqueues /
741 741 // global mark stack), enqueued SATB buffers, per-thread SATB
742 742 // buffers, and fingers (global / per-task). The boolean parameters
743 743 // decide which of the above data structures to verify. If marking
744 744 // is not in progress, it's a no-op.
745 745 void verify_no_cset_oops(bool verify_stacks,
746 746 bool verify_enqueued_buffers,
747 747 bool verify_thread_buffers,
748 748 bool verify_fingers) PRODUCT_RETURN;
749 749
750 750 // It is called at the end of an evacuation pause during marking so
751 751 // that CM is notified of where the new end of the heap is. It
752 752 // doesn't do anything if concurrent_marking_in_progress() is false,
753 753 // unless the force parameter is true.
754 754 void update_g1_committed(bool force = false);
755 755
756 756 bool isMarked(oop p) const {
757 757 assert(p != NULL && p->is_oop(), "expected an oop");
758 758 HeapWord* addr = (HeapWord*)p;
759 759 assert(addr >= _nextMarkBitMap->startWord() ||
760 760 addr < _nextMarkBitMap->endWord(), "in a region");
761 761
762 762 return _nextMarkBitMap->isMarked(addr);
763 763 }
764 764
765 765 inline bool not_yet_marked(oop p) const;
766 766
767 767 // XXX Debug code
768 768 bool containing_card_is_marked(void* p);
769 769 bool containing_cards_are_marked(void* start, void* last);
770 770
771 771 bool isPrevMarked(oop p) const {
772 772 assert(p != NULL && p->is_oop(), "expected an oop");
773 773 HeapWord* addr = (HeapWord*)p;
774 774 assert(addr >= _prevMarkBitMap->startWord() ||
775 775 addr < _prevMarkBitMap->endWord(), "in a region");
776 776
777 777 return _prevMarkBitMap->isMarked(addr);
778 778 }
779 779
780 780 inline bool do_yield_check(uint worker_i = 0);
781 781 inline bool should_yield();
782 782
783 783 // Called to abort the marking cycle after a Full GC takes palce.
784 784 void abort();
785 785
786 786 // This prints the global/local fingers. It is used for debugging.
787 787 NOT_PRODUCT(void print_finger();)
788 788
789 789 void print_summary_info();
790 790
791 791 void print_worker_threads_on(outputStream* st) const;
792 792
793 793 // The following indicate whether a given verbose level has been
794 794 // set. Notice that anything above stats is conditional to
795 795 // _MARKING_VERBOSE_ having been set to 1
796 796 bool verbose_stats() {
797 797 return _verbose_level >= stats_verbose;
798 798 }
↓ open down ↓ |
798 lines elided |
↑ open up ↑ |
799 799 bool verbose_low() {
800 800 return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
801 801 }
802 802 bool verbose_medium() {
803 803 return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
804 804 }
805 805 bool verbose_high() {
806 806 return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
807 807 }
808 808
809 - // Counting data structure accessors
809 + // Liveness counting
810 +
811 + // Utility routine to set an exclusive range of cards on the given
812 + // card liveness bitmap
813 + inline void set_card_bitmap_range(BitMap* card_bm,
814 + BitMap::idx_t start_idx,
815 + BitMap::idx_t end_idx,
816 + bool is_par);
810 817
811 818 // Returns the card number of the bottom of the G1 heap.
812 819 // Used in biasing indices into accounting card bitmaps.
813 820 intptr_t heap_bottom_card_num() const {
814 821 return _heap_bottom_card_num;
815 822 }
816 823
817 824 // Returns the card bitmap for a given task or worker id.
818 825 BitMap* count_card_bitmap_for(uint worker_id) {
819 826 assert(0 <= worker_id && worker_id < _max_task_num, "oob");
820 827 assert(_count_card_bitmaps != NULL, "uninitialized");
821 828 BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
822 829 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
823 830 return task_card_bm;
824 831 }
825 832
826 833 // Returns the array containing the marked bytes for each region,
827 834 // for the given worker or task id.
828 835 size_t* count_marked_bytes_array_for(uint worker_id) {
829 836 assert(0 <= worker_id && worker_id < _max_task_num, "oob");
830 837 assert(_count_marked_bytes != NULL, "uninitialized");
831 838 size_t* marked_bytes_array = _count_marked_bytes[worker_id];
832 839 assert(marked_bytes_array != NULL, "uninitialized");
833 840 return marked_bytes_array;
834 841 }
835 842
836 843 // Returns the index in the liveness accounting card table bitmap
837 844 // for the given address
838 845 inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr);
839 846
840 847 // Counts the size of the given memory region in the the given
841 848 // marked_bytes array slot for the given HeapRegion.
842 849 // Sets the bits in the given card bitmap that are associated with the
843 850 // cards that are spanned by the memory region.
844 851 inline void count_region(MemRegion mr, HeapRegion* hr,
845 852 size_t* marked_bytes_array,
846 853 BitMap* task_card_bm);
847 854
848 855 // Counts the given memory region in the task/worker counting
849 856 // data structures for the given worker id.
850 857 inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
851 858
852 859 // Counts the given memory region in the task/worker counting
853 860 // data structures for the given worker id.
854 861 inline void count_region(MemRegion mr, uint worker_id);
855 862
856 863 // Counts the given object in the given task/worker counting
857 864 // data structures.
858 865 inline void count_object(oop obj, HeapRegion* hr,
859 866 size_t* marked_bytes_array,
860 867 BitMap* task_card_bm);
861 868
862 869 // Counts the given object in the task/worker counting data
863 870 // structures for the given worker id.
864 871 inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
865 872
866 873 // Attempts to mark the given object and, if successful, counts
867 874 // the object in the given task/worker counting structures.
868 875 inline bool par_mark_and_count(oop obj, HeapRegion* hr,
869 876 size_t* marked_bytes_array,
870 877 BitMap* task_card_bm);
871 878
872 879 // Attempts to mark the given object and, if successful, counts
873 880 // the object in the task/worker counting structures for the
874 881 // given worker id.
875 882 inline bool par_mark_and_count(oop obj, size_t word_size,
876 883 HeapRegion* hr, uint worker_id);
877 884
878 885 // Attempts to mark the given object and, if successful, counts
879 886 // the object in the task/worker counting structures for the
880 887 // given worker id.
881 888 inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
882 889
883 890 // Similar to the above routine but we don't know the heap region that
884 891 // contains the object to be marked/counted, which this routine looks up.
885 892 inline bool par_mark_and_count(oop obj, uint worker_id);
886 893
887 894 // Similar to the above routine but there are times when we cannot
888 895 // safely calculate the size of obj due to races and we, therefore,
889 896 // pass the size in as a parameter. It is the caller's reponsibility
890 897 // to ensure that the size passed in for obj is valid.
891 898 inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
892 899
893 900 // Unconditionally mark the given object, and unconditinally count
894 901 // the object in the counting structures for worker id 0.
895 902 // Should *not* be called from parallel code.
896 903 inline bool mark_and_count(oop obj, HeapRegion* hr);
897 904
898 905 // Similar to the above routine but we don't know the heap region that
899 906 // contains the object to be marked/counted, which this routine looks up.
900 907 // Should *not* be called from parallel code.
901 908 inline bool mark_and_count(oop obj);
902 909
903 910 protected:
904 911 // Clear all the per-task bitmaps and arrays used to store the
905 912 // counting data.
906 913 void clear_all_count_data();
907 914
908 915 // Aggregates the counting data for each worker/task
909 916 // that was constructed while marking. Also sets
910 917 // the amount of marked bytes for each region and
911 918 // the top at concurrent mark count.
912 919 void aggregate_count_data();
913 920
914 921 // Verification routine
915 922 void verify_count_data();
916 923 };
917 924
918 925 // A class representing a marking task.
919 926 class CMTask : public TerminatorTerminator {
920 927 private:
921 928 enum PrivateConstants {
922 929 // the regular clock call is called once the scanned words reaches
923 930 // this limit
924 931 words_scanned_period = 12*1024,
925 932 // the regular clock call is called once the number of visited
926 933 // references reaches this limit
927 934 refs_reached_period = 384,
928 935 // initial value for the hash seed, used in the work stealing code
929 936 init_hash_seed = 17,
930 937 // how many entries will be transferred between global stack and
931 938 // local queues
932 939 global_stack_transfer_size = 16
933 940 };
934 941
935 942 int _task_id;
936 943 G1CollectedHeap* _g1h;
937 944 ConcurrentMark* _cm;
938 945 CMBitMap* _nextMarkBitMap;
939 946 // the task queue of this task
940 947 CMTaskQueue* _task_queue;
941 948 private:
942 949 // the task queue set---needed for stealing
943 950 CMTaskQueueSet* _task_queues;
944 951 // indicates whether the task has been claimed---this is only for
945 952 // debugging purposes
946 953 bool _claimed;
947 954
948 955 // number of calls to this task
949 956 int _calls;
950 957
951 958 // when the virtual timer reaches this time, the marking step should
952 959 // exit
953 960 double _time_target_ms;
954 961 // the start time of the current marking step
955 962 double _start_time_ms;
956 963
957 964 // the oop closure used for iterations over oops
958 965 G1CMOopClosure* _cm_oop_closure;
959 966
960 967 // the region this task is scanning, NULL if we're not scanning any
961 968 HeapRegion* _curr_region;
962 969 // the local finger of this task, NULL if we're not scanning a region
963 970 HeapWord* _finger;
964 971 // limit of the region this task is scanning, NULL if we're not scanning one
965 972 HeapWord* _region_limit;
966 973
967 974 // the number of words this task has scanned
968 975 size_t _words_scanned;
969 976 // When _words_scanned reaches this limit, the regular clock is
970 977 // called. Notice that this might be decreased under certain
971 978 // circumstances (i.e. when we believe that we did an expensive
972 979 // operation).
973 980 size_t _words_scanned_limit;
974 981 // the initial value of _words_scanned_limit (i.e. what it was
975 982 // before it was decreased).
976 983 size_t _real_words_scanned_limit;
977 984
978 985 // the number of references this task has visited
979 986 size_t _refs_reached;
980 987 // When _refs_reached reaches this limit, the regular clock is
981 988 // called. Notice this this might be decreased under certain
982 989 // circumstances (i.e. when we believe that we did an expensive
983 990 // operation).
984 991 size_t _refs_reached_limit;
985 992 // the initial value of _refs_reached_limit (i.e. what it was before
986 993 // it was decreased).
987 994 size_t _real_refs_reached_limit;
988 995
989 996 // used by the work stealing stuff
990 997 int _hash_seed;
991 998 // if this is true, then the task has aborted for some reason
992 999 bool _has_aborted;
993 1000 // set when the task aborts because it has met its time quota
994 1001 bool _has_timed_out;
995 1002 // true when we're draining SATB buffers; this avoids the task
996 1003 // aborting due to SATB buffers being available (as we're already
997 1004 // dealing with them)
998 1005 bool _draining_satb_buffers;
999 1006
1000 1007 // number sequence of past step times
1001 1008 NumberSeq _step_times_ms;
1002 1009 // elapsed time of this task
1003 1010 double _elapsed_time_ms;
1004 1011 // termination time of this task
1005 1012 double _termination_time_ms;
1006 1013 // when this task got into the termination protocol
1007 1014 double _termination_start_time_ms;
1008 1015
1009 1016 // true when the task is during a concurrent phase, false when it is
1010 1017 // in the remark phase (so, in the latter case, we do not have to
1011 1018 // check all the things that we have to check during the concurrent
1012 1019 // phase, i.e. SATB buffer availability...)
1013 1020 bool _concurrent;
1014 1021
1015 1022 TruncatedSeq _marking_step_diffs_ms;
1016 1023
1017 1024 // Counting data structures. Embedding the task's marked_bytes_array
1018 1025 // and card bitmap into the actual task saves having to go through
1019 1026 // the ConcurrentMark object.
1020 1027 size_t* _marked_bytes_array;
1021 1028 BitMap* _card_bm;
1022 1029
1023 1030 // LOTS of statistics related with this task
1024 1031 #if _MARKING_STATS_
1025 1032 NumberSeq _all_clock_intervals_ms;
1026 1033 double _interval_start_time_ms;
1027 1034
1028 1035 int _aborted;
1029 1036 int _aborted_overflow;
1030 1037 int _aborted_cm_aborted;
1031 1038 int _aborted_yield;
1032 1039 int _aborted_timed_out;
1033 1040 int _aborted_satb;
1034 1041 int _aborted_termination;
1035 1042
1036 1043 int _steal_attempts;
1037 1044 int _steals;
1038 1045
1039 1046 int _clock_due_to_marking;
1040 1047 int _clock_due_to_scanning;
1041 1048
1042 1049 int _local_pushes;
1043 1050 int _local_pops;
1044 1051 int _local_max_size;
1045 1052 int _objs_scanned;
1046 1053
1047 1054 int _global_pushes;
1048 1055 int _global_pops;
1049 1056 int _global_max_size;
1050 1057
1051 1058 int _global_transfers_to;
1052 1059 int _global_transfers_from;
1053 1060
1054 1061 int _regions_claimed;
1055 1062 int _objs_found_on_bitmap;
1056 1063
1057 1064 int _satb_buffers_processed;
1058 1065 #endif // _MARKING_STATS_
1059 1066
1060 1067 // it updates the local fields after this task has claimed
1061 1068 // a new region to scan
1062 1069 void setup_for_region(HeapRegion* hr);
1063 1070 // it brings up-to-date the limit of the region
1064 1071 void update_region_limit();
1065 1072
1066 1073 // called when either the words scanned or the refs visited limit
1067 1074 // has been reached
1068 1075 void reached_limit();
1069 1076 // recalculates the words scanned and refs visited limits
1070 1077 void recalculate_limits();
1071 1078 // decreases the words scanned and refs visited limits when we reach
1072 1079 // an expensive operation
1073 1080 void decrease_limits();
1074 1081 // it checks whether the words scanned or refs visited reached their
1075 1082 // respective limit and calls reached_limit() if they have
1076 1083 void check_limits() {
1077 1084 if (_words_scanned >= _words_scanned_limit ||
1078 1085 _refs_reached >= _refs_reached_limit) {
1079 1086 reached_limit();
1080 1087 }
1081 1088 }
1082 1089 // this is supposed to be called regularly during a marking step as
1083 1090 // it checks a bunch of conditions that might cause the marking step
1084 1091 // to abort
1085 1092 void regular_clock_call();
1086 1093 bool concurrent() { return _concurrent; }
1087 1094
1088 1095 public:
1089 1096 // It resets the task; it should be called right at the beginning of
1090 1097 // a marking phase.
1091 1098 void reset(CMBitMap* _nextMarkBitMap);
1092 1099 // it clears all the fields that correspond to a claimed region.
1093 1100 void clear_region_fields();
1094 1101
1095 1102 void set_concurrent(bool concurrent) { _concurrent = concurrent; }
1096 1103
1097 1104 // The main method of this class which performs a marking step
1098 1105 // trying not to exceed the given duration. However, it might exit
1099 1106 // prematurely, according to some conditions (i.e. SATB buffers are
1100 1107 // available for processing).
1101 1108 void do_marking_step(double target_ms, bool do_stealing, bool do_termination);
1102 1109
1103 1110 // These two calls start and stop the timer
1104 1111 void record_start_time() {
1105 1112 _elapsed_time_ms = os::elapsedTime() * 1000.0;
1106 1113 }
1107 1114 void record_end_time() {
1108 1115 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
1109 1116 }
1110 1117
1111 1118 // returns the task ID
1112 1119 int task_id() { return _task_id; }
1113 1120
1114 1121 // From TerminatorTerminator. It determines whether this task should
1115 1122 // exit the termination protocol after it's entered it.
1116 1123 virtual bool should_exit_termination();
1117 1124
1118 1125 // Resets the local region fields after a task has finished scanning a
1119 1126 // region; or when they have become stale as a result of the region
1120 1127 // being evacuated.
1121 1128 void giveup_current_region();
1122 1129
1123 1130 HeapWord* finger() { return _finger; }
1124 1131
1125 1132 bool has_aborted() { return _has_aborted; }
1126 1133 void set_has_aborted() { _has_aborted = true; }
1127 1134 void clear_has_aborted() { _has_aborted = false; }
1128 1135 bool has_timed_out() { return _has_timed_out; }
1129 1136 bool claimed() { return _claimed; }
1130 1137
1131 1138 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
1132 1139
1133 1140 // It grays the object by marking it and, if necessary, pushing it
1134 1141 // on the local queue
1135 1142 inline void deal_with_reference(oop obj);
1136 1143
1137 1144 // It scans an object and visits its children.
1138 1145 void scan_object(oop obj);
1139 1146
1140 1147 // It pushes an object on the local queue.
1141 1148 inline void push(oop obj);
1142 1149
1143 1150 // These two move entries to/from the global stack.
1144 1151 void move_entries_to_global_stack();
1145 1152 void get_entries_from_global_stack();
1146 1153
1147 1154 // It pops and scans objects from the local queue. If partially is
1148 1155 // true, then it stops when the queue size is of a given limit. If
1149 1156 // partially is false, then it stops when the queue is empty.
1150 1157 void drain_local_queue(bool partially);
1151 1158 // It moves entries from the global stack to the local queue and
1152 1159 // drains the local queue. If partially is true, then it stops when
1153 1160 // both the global stack and the local queue reach a given size. If
1154 1161 // partially if false, it tries to empty them totally.
1155 1162 void drain_global_stack(bool partially);
1156 1163 // It keeps picking SATB buffers and processing them until no SATB
1157 1164 // buffers are available.
1158 1165 void drain_satb_buffers();
1159 1166
1160 1167 // moves the local finger to a new location
1161 1168 inline void move_finger_to(HeapWord* new_finger) {
1162 1169 assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
1163 1170 _finger = new_finger;
1164 1171 }
1165 1172
1166 1173 CMTask(int task_num, ConcurrentMark *cm,
1167 1174 size_t* marked_bytes, BitMap* card_bm,
1168 1175 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
1169 1176
1170 1177 // it prints statistics associated with this task
1171 1178 void print_stats();
1172 1179
1173 1180 #if _MARKING_STATS_
1174 1181 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; }
1175 1182 #endif // _MARKING_STATS_
1176 1183 };
1177 1184
1178 1185 // Class that's used to to print out per-region liveness
1179 1186 // information. It's currently used at the end of marking and also
1180 1187 // after we sort the old regions at the end of the cleanup operation.
1181 1188 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure {
1182 1189 private:
1183 1190 outputStream* _out;
1184 1191
1185 1192 // Accumulators for these values.
1186 1193 size_t _total_used_bytes;
1187 1194 size_t _total_capacity_bytes;
1188 1195 size_t _total_prev_live_bytes;
1189 1196 size_t _total_next_live_bytes;
1190 1197
1191 1198 // These are set up when we come across a "stars humongous" region
1192 1199 // (as this is where most of this information is stored, not in the
1193 1200 // subsequent "continues humongous" regions). After that, for every
1194 1201 // region in a given humongous region series we deduce the right
1195 1202 // values for it by simply subtracting the appropriate amount from
1196 1203 // these fields. All these values should reach 0 after we've visited
1197 1204 // the last region in the series.
1198 1205 size_t _hum_used_bytes;
1199 1206 size_t _hum_capacity_bytes;
1200 1207 size_t _hum_prev_live_bytes;
1201 1208 size_t _hum_next_live_bytes;
1202 1209
1203 1210 static double perc(size_t val, size_t total) {
1204 1211 if (total == 0) {
1205 1212 return 0.0;
1206 1213 } else {
1207 1214 return 100.0 * ((double) val / (double) total);
1208 1215 }
1209 1216 }
1210 1217
1211 1218 static double bytes_to_mb(size_t val) {
1212 1219 return (double) val / (double) M;
1213 1220 }
1214 1221
1215 1222 // See the .cpp file.
1216 1223 size_t get_hum_bytes(size_t* hum_bytes);
1217 1224 void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes,
1218 1225 size_t* prev_live_bytes, size_t* next_live_bytes);
1219 1226
1220 1227 public:
1221 1228 // The header and footer are printed in the constructor and
1222 1229 // destructor respectively.
1223 1230 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name);
1224 1231 virtual bool doHeapRegion(HeapRegion* r);
1225 1232 ~G1PrintRegionLivenessInfoClosure();
1226 1233 };
1227 1234
1228 1235 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
↓ open down ↓ |
409 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX