Print this page
rev 3708 : 8000244: G1: Ergonomically set MarkStackSize and use virtual space for global marking stack
Summary: Set the value of MarkStackSize to a value based on the number of parallel marking threads with a reasonable minimum. Expand the marking stack if we have to restart marking due to an overflow up to a reasonable maximum. Allocate the underlying space for the marking stack from virtual memory.
Reviewed-by: jmasa
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/g1/concurrentMark.hpp
+++ new/src/share/vm/gc_implementation/g1/concurrentMark.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
26 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
27 27
28 28 #include "gc_implementation/g1/heapRegionSets.hpp"
29 29 #include "utilities/taskqueue.hpp"
30 30
31 31 class G1CollectedHeap;
32 32 class CMTask;
33 33 typedef GenericTaskQueue<oop, mtGC> CMTaskQueue;
34 34 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
35 35
36 36 // Closure used by CM during concurrent reference discovery
37 37 // and reference processing (during remarking) to determine
38 38 // if a particular object is alive. It is primarily used
39 39 // to determine if referents of discovered reference objects
40 40 // are alive. An instance is also embedded into the
41 41 // reference processor as the _is_alive_non_header field
42 42 class G1CMIsAliveClosure: public BoolObjectClosure {
43 43 G1CollectedHeap* _g1;
44 44 public:
45 45 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
46 46
47 47 void do_object(oop obj) {
48 48 ShouldNotCallThis();
49 49 }
50 50 bool do_object_b(oop obj);
51 51 };
52 52
53 53 // A generic CM bit map. This is essentially a wrapper around the BitMap
54 54 // class, with one bit per (1<<_shifter) HeapWords.
55 55
↓ open down ↓ |
55 lines elided |
↑ open up ↑ |
56 56 class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
57 57 protected:
58 58 HeapWord* _bmStartWord; // base address of range covered by map
59 59 size_t _bmWordSize; // map size (in #HeapWords covered)
60 60 const int _shifter; // map to char or bit
61 61 VirtualSpace _virtual_space; // underlying the bit map
62 62 BitMap _bm; // the bit map itself
63 63
64 64 public:
65 65 // constructor
66 - CMBitMapRO(ReservedSpace rs, int shifter);
66 + CMBitMapRO(int shifter);
67 67
68 68 enum { do_yield = true };
69 69
70 70 // inquiries
71 71 HeapWord* startWord() const { return _bmStartWord; }
72 72 size_t sizeInWords() const { return _bmWordSize; }
73 73 // the following is one past the last word in space
74 74 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
75 75
76 76 // read marks
77 77
78 78 bool isMarked(HeapWord* addr) const {
79 79 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
80 80 "outside underlying space?");
81 81 return _bm.at(heapWordToOffset(addr));
82 82 }
83 83
84 84 // iteration
85 85 inline bool iterate(BitMapClosure* cl, MemRegion mr);
86 86 inline bool iterate(BitMapClosure* cl);
87 87
88 88 // Return the address corresponding to the next marked bit at or after
89 89 // "addr", and before "limit", if "limit" is non-NULL. If there is no
90 90 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
91 91 HeapWord* getNextMarkedWordAddress(HeapWord* addr,
92 92 HeapWord* limit = NULL) const;
93 93 // Return the address corresponding to the next unmarked bit at or after
94 94 // "addr", and before "limit", if "limit" is non-NULL. If there is no
95 95 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
96 96 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
97 97 HeapWord* limit = NULL) const;
98 98
99 99 // conversion utilities
100 100 // XXX Fix these so that offsets are size_t's...
101 101 HeapWord* offsetToHeapWord(size_t offset) const {
102 102 return _bmStartWord + (offset << _shifter);
103 103 }
104 104 size_t heapWordToOffset(HeapWord* addr) const {
105 105 return pointer_delta(addr, _bmStartWord) >> _shifter;
106 106 }
107 107 int heapWordDiffToOffsetDiff(size_t diff) const;
108 108 HeapWord* nextWord(HeapWord* addr) {
109 109 return offsetToHeapWord(heapWordToOffset(addr) + 1);
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
110 110 }
111 111
112 112 // debugging
113 113 NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
114 114 };
115 115
116 116 class CMBitMap : public CMBitMapRO {
117 117
118 118 public:
119 119 // constructor
120 - CMBitMap(ReservedSpace rs, int shifter) :
121 - CMBitMapRO(rs, shifter) {}
120 + CMBitMap(int shifter) :
121 + CMBitMapRO(shifter) {}
122 +
123 + // Allocates the back store for the marking bitmap
124 + bool allocate(ReservedSpace heap_rs);
122 125
123 126 // write marks
124 127 void mark(HeapWord* addr) {
125 128 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
126 129 "outside underlying space?");
127 130 _bm.set_bit(heapWordToOffset(addr));
128 131 }
129 132 void clear(HeapWord* addr) {
130 133 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
131 134 "outside underlying space?");
132 135 _bm.clear_bit(heapWordToOffset(addr));
133 136 }
134 137 bool parMark(HeapWord* addr) {
135 138 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
136 139 "outside underlying space?");
137 140 return _bm.par_set_bit(heapWordToOffset(addr));
138 141 }
139 142 bool parClear(HeapWord* addr) {
140 143 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
141 144 "outside underlying space?");
142 145 return _bm.par_clear_bit(heapWordToOffset(addr));
143 146 }
144 147 void markRange(MemRegion mr);
145 148 void clearAll();
146 149 void clearRange(MemRegion mr);
147 150
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
148 151 // Starting at the bit corresponding to "addr" (inclusive), find the next
149 152 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find
150 153 // the end of this run (stopping at "end_addr"). Return the MemRegion
151 154 // covering from the start of the region corresponding to the first bit
152 155 // of the run to the end of the region corresponding to the last bit of
153 156 // the run. If there is no "1" bit at or after "addr", return an empty
154 157 // MemRegion.
155 158 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
156 159 };
157 160
158 -// Represents a marking stack used by the CM collector.
159 -// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
161 +// Represents a marking stack used by ConcurrentMarking in the G1 collector.
160 162 class CMMarkStack VALUE_OBJ_CLASS_SPEC {
163 + VirtualSpace _virtual_space; // Underlying backing store for actual stack
161 164 ConcurrentMark* _cm;
162 165 oop* _base; // bottom of stack
163 - jint _index; // one more than last occupied index
164 - jint _capacity; // max #elements
165 - jint _saved_index; // value of _index saved at start of GC
166 - NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
166 + jint _index; // one more than last occupied index
167 + jint _capacity; // max #elements
168 + jint _saved_index; // value of _index saved at start of GC
169 + NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
167 170
168 - bool _overflow;
171 + bool _overflow;
172 + bool _should_expand;
169 173 DEBUG_ONLY(bool _drain_in_progress;)
170 174 DEBUG_ONLY(bool _drain_in_progress_yields;)
171 175
172 176 public:
173 177 CMMarkStack(ConcurrentMark* cm);
174 178 ~CMMarkStack();
175 179
176 - void allocate(size_t size);
180 +#ifndef PRODUCT
181 + jint max_depth() const {
182 + return _max_depth;
183 + }
184 +#endif
185 +
186 + bool allocate(size_t capacity);
177 187
178 188 oop pop() {
179 189 if (!isEmpty()) {
180 190 return _base[--_index] ;
181 191 }
182 192 return NULL;
183 193 }
184 194
185 195 // If overflow happens, don't do the push, and record the overflow.
186 196 // *Requires* that "ptr" is already marked.
187 197 void push(oop ptr) {
188 198 if (isFull()) {
189 199 // Record overflow.
190 200 _overflow = true;
191 201 return;
192 202 } else {
193 203 _base[_index++] = ptr;
194 204 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
195 205 }
196 206 }
197 207 // Non-block impl. Note: concurrency is allowed only with other
198 208 // "par_push" operations, not with "pop" or "drain". We would need
199 209 // parallel versions of them if such concurrency was desired.
200 210 void par_push(oop ptr);
201 211
202 212 // Pushes the first "n" elements of "ptr_arr" on the stack.
203 213 // Non-block impl. Note: concurrency is allowed only with other
204 214 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain".
205 215 void par_adjoin_arr(oop* ptr_arr, int n);
206 216
207 217 // Pushes the first "n" elements of "ptr_arr" on the stack.
208 218 // Locking impl: concurrency is allowed only with
209 219 // "par_push_arr" and/or "par_pop_arr" operations, which use the same
210 220 // locking strategy.
211 221 void par_push_arr(oop* ptr_arr, int n);
212 222
213 223 // If returns false, the array was empty. Otherwise, removes up to "max"
214 224 // elements from the stack, and transfers them to "ptr_arr" in an
215 225 // unspecified order. The actual number transferred is given in "n" ("n
216 226 // == 0" is deliberately redundant with the return value.) Locking impl:
217 227 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr"
218 228 // operations, which use the same locking strategy.
219 229 bool par_pop_arr(oop* ptr_arr, int max, int* n);
220 230
221 231 // Drain the mark stack, applying the given closure to all fields of
222 232 // objects on the stack. (That is, continue until the stack is empty,
223 233 // even if closure applications add entries to the stack.) The "bm"
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
224 234 // argument, if non-null, may be used to verify that only marked objects
225 235 // are on the mark stack. If "yield_after" is "true", then the
226 236 // concurrent marker performing the drain offers to yield after
227 237 // processing each object. If a yield occurs, stops the drain operation
228 238 // and returns false. Otherwise, returns true.
229 239 template<class OopClosureClass>
230 240 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false);
231 241
232 242 bool isEmpty() { return _index == 0; }
233 243 bool isFull() { return _index == _capacity; }
234 - int maxElems() { return _capacity; }
244 + int maxElems() { return _capacity; }
235 245
236 246 bool overflow() { return _overflow; }
237 247 void clear_overflow() { _overflow = false; }
238 248
249 + bool should_expand() const { return _should_expand; }
250 + void set_should_expand();
251 +
252 + // Expand the stack, typically in response to an overflow condition
253 + void expand();
254 +
239 255 int size() { return _index; }
240 256
241 257 void setEmpty() { _index = 0; clear_overflow(); }
242 258
243 259 // Record the current index.
244 260 void note_start_of_gc();
245 261
246 262 // Make sure that we have not added any entries to the stack during GC.
247 263 void note_end_of_gc();
248 264
249 265 // iterate over the oops in the mark stack, up to the bound recorded via
250 266 // the call above.
251 267 void oops_do(OopClosure* f);
252 268 };
253 269
254 270 class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
255 271 private:
256 272 #ifndef PRODUCT
257 273 uintx _num_remaining;
258 274 bool _force;
259 275 #endif // !defined(PRODUCT)
260 276
261 277 public:
262 278 void init() PRODUCT_RETURN;
263 279 void update() PRODUCT_RETURN;
264 280 bool should_force() PRODUCT_RETURN_( return false; );
265 281 };
266 282
267 283 // this will enable a variety of different statistics per GC task
268 284 #define _MARKING_STATS_ 0
269 285 // this will enable the higher verbose levels
270 286 #define _MARKING_VERBOSE_ 0
271 287
272 288 #if _MARKING_STATS_
273 289 #define statsOnly(statement) \
274 290 do { \
275 291 statement ; \
276 292 } while (0)
277 293 #else // _MARKING_STATS_
278 294 #define statsOnly(statement) \
279 295 do { \
280 296 } while (0)
281 297 #endif // _MARKING_STATS_
282 298
283 299 typedef enum {
284 300 no_verbose = 0, // verbose turned off
285 301 stats_verbose, // only prints stats at the end of marking
286 302 low_verbose, // low verbose, mostly per region and per major event
287 303 medium_verbose, // a bit more detailed than low
288 304 high_verbose // per object verbose
289 305 } CMVerboseLevel;
290 306
291 307 class YoungList;
292 308
293 309 // Root Regions are regions that are not empty at the beginning of a
294 310 // marking cycle and which we might collect during an evacuation pause
295 311 // while the cycle is active. Given that, during evacuation pauses, we
296 312 // do not copy objects that are explicitly marked, what we have to do
297 313 // for the root regions is to scan them and mark all objects reachable
298 314 // from them. According to the SATB assumptions, we only need to visit
299 315 // each object once during marking. So, as long as we finish this scan
300 316 // before the next evacuation pause, we can copy the objects from the
301 317 // root regions without having to mark them or do anything else to them.
302 318 //
303 319 // Currently, we only support root region scanning once (at the start
304 320 // of the marking cycle) and the root regions are all the survivor
305 321 // regions populated during the initial-mark pause.
306 322 class CMRootRegions VALUE_OBJ_CLASS_SPEC {
307 323 private:
308 324 YoungList* _young_list;
309 325 ConcurrentMark* _cm;
310 326
311 327 volatile bool _scan_in_progress;
312 328 volatile bool _should_abort;
313 329 HeapRegion* volatile _next_survivor;
314 330
315 331 public:
316 332 CMRootRegions();
317 333 // We actually do most of the initialization in this method.
318 334 void init(G1CollectedHeap* g1h, ConcurrentMark* cm);
319 335
320 336 // Reset the claiming / scanning of the root regions.
321 337 void prepare_for_scan();
322 338
323 339 // Forces get_next() to return NULL so that the iteration aborts early.
324 340 void abort() { _should_abort = true; }
325 341
326 342 // Return true if the CM thread are actively scanning root regions,
327 343 // false otherwise.
328 344 bool scan_in_progress() { return _scan_in_progress; }
329 345
330 346 // Claim the next root region to scan atomically, or return NULL if
331 347 // all have been claimed.
332 348 HeapRegion* claim_next();
333 349
334 350 // Flag that we're done with root region scanning and notify anyone
335 351 // who's waiting on it. If aborted is false, assume that all regions
336 352 // have been claimed.
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
337 353 void scan_finished();
338 354
339 355 // If CM threads are still scanning root regions, wait until they
340 356 // are done. Return true if we had to wait, false otherwise.
341 357 bool wait_until_scan_finished();
342 358 };
343 359
344 360 class ConcurrentMarkThread;
345 361
346 362 class ConcurrentMark: public CHeapObj<mtGC> {
363 + friend class CMMarkStack;
347 364 friend class ConcurrentMarkThread;
348 365 friend class CMTask;
349 366 friend class CMBitMapClosure;
350 367 friend class CMGlobalObjectClosure;
351 368 friend class CMRemarkTask;
352 369 friend class CMConcurrentMarkingTask;
353 370 friend class G1ParNoteEndTask;
354 371 friend class CalcLiveObjectsClosure;
355 372 friend class G1CMRefProcTaskProxy;
356 373 friend class G1CMRefProcTaskExecutor;
357 374 friend class G1CMParKeepAliveAndDrainClosure;
358 375 friend class G1CMParDrainMarkingStackClosure;
359 376
360 377 protected:
361 378 ConcurrentMarkThread* _cmThread; // the thread doing the work
362 379 G1CollectedHeap* _g1h; // the heap.
363 380 uint _parallel_marking_threads; // the number of marking
364 381 // threads we're use
365 382 uint _max_parallel_marking_threads; // max number of marking
366 383 // threads we'll ever use
367 384 double _sleep_factor; // how much we have to sleep, with
368 385 // respect to the work we just did, to
369 386 // meet the marking overhead goal
370 387 double _marking_task_overhead; // marking target overhead for
371 388 // a single task
372 389
373 390 // same as the two above, but for the cleanup task
374 391 double _cleanup_sleep_factor;
375 392 double _cleanup_task_overhead;
376 393
377 394 FreeRegionList _cleanup_list;
378 395
379 396 // Concurrent marking support structures
380 397 CMBitMap _markBitMap1;
381 398 CMBitMap _markBitMap2;
382 399 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap
383 400 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap
384 401
385 402 BitMap _region_bm;
386 403 BitMap _card_bm;
387 404
388 405 // Heap bounds
389 406 HeapWord* _heap_start;
390 407 HeapWord* _heap_end;
391 408
392 409 // Root region tracking and claiming.
393 410 CMRootRegions _root_regions;
394 411
395 412 // For gray objects
396 413 CMMarkStack _markStack; // Grey objects behind global finger.
397 414 HeapWord* volatile _finger; // the global finger, region aligned,
398 415 // always points to the end of the
399 416 // last claimed region
400 417
401 418 // marking tasks
402 419 uint _max_worker_id;// maximum worker id
403 420 uint _active_tasks; // task num currently active
404 421 CMTask** _tasks; // task queue array (max_worker_id len)
405 422 CMTaskQueueSet* _task_queues; // task queue set
406 423 ParallelTaskTerminator _terminator; // for termination
407 424
408 425 // Two sync barriers that are used to synchronise tasks when an
409 426 // overflow occurs. The algorithm is the following. All tasks enter
410 427 // the first one to ensure that they have all stopped manipulating
411 428 // the global data structures. After they exit it, they re-initialise
412 429 // their data structures and task 0 re-initialises the global data
413 430 // structures. Then, they enter the second sync barrier. This
414 431 // ensure, that no task starts doing work before all data
415 432 // structures (local and global) have been re-initialised. When they
416 433 // exit it, they are free to start working again.
417 434 WorkGangBarrierSync _first_overflow_barrier_sync;
418 435 WorkGangBarrierSync _second_overflow_barrier_sync;
419 436
420 437 // this is set by any task, when an overflow on the global data
421 438 // structures is detected.
422 439 volatile bool _has_overflown;
423 440 // true: marking is concurrent, false: we're in remark
424 441 volatile bool _concurrent;
425 442 // set at the end of a Full GC so that marking aborts
426 443 volatile bool _has_aborted;
427 444
428 445 // used when remark aborts due to an overflow to indicate that
429 446 // another concurrent marking phase should start
430 447 volatile bool _restart_for_overflow;
431 448
432 449 // This is true from the very start of concurrent marking until the
433 450 // point when all the tasks complete their work. It is really used
434 451 // to determine the points between the end of concurrent marking and
435 452 // time of remark.
436 453 volatile bool _concurrent_marking_in_progress;
437 454
438 455 // verbose level
439 456 CMVerboseLevel _verbose_level;
440 457
441 458 // All of these times are in ms.
442 459 NumberSeq _init_times;
443 460 NumberSeq _remark_times;
444 461 NumberSeq _remark_mark_times;
445 462 NumberSeq _remark_weak_ref_times;
446 463 NumberSeq _cleanup_times;
447 464 double _total_counting_time;
448 465 double _total_rs_scrub_time;
449 466
450 467 double* _accum_task_vtime; // accumulated task vtime
451 468
452 469 FlexibleWorkGang* _parallel_workers;
453 470
454 471 ForceOverflowSettings _force_overflow_conc;
455 472 ForceOverflowSettings _force_overflow_stw;
456 473
457 474 void weakRefsWork(bool clear_all_soft_refs);
458 475
459 476 void swapMarkBitMaps();
460 477
461 478 // It resets the global marking data structures, as well as the
462 479 // task local ones; should be called during initial mark.
463 480 void reset();
464 481 // It resets all the marking data structures.
465 482 void clear_marking_state(bool clear_overflow = true);
466 483
467 484 // It should be called to indicate which phase we're in (concurrent
468 485 // mark or remark) and how many threads are currently active.
469 486 void set_phase(uint active_tasks, bool concurrent);
470 487 // We do this after we're done with marking so that the marking data
471 488 // structures are initialised to a sensible and predictable state.
472 489 void set_non_marking_state();
473 490
474 491 // prints all gathered CM-related statistics
475 492 void print_stats();
476 493
477 494 bool cleanup_list_is_empty() {
478 495 return _cleanup_list.is_empty();
479 496 }
480 497
481 498 // accessor methods
482 499 uint parallel_marking_threads() { return _parallel_marking_threads; }
483 500 uint max_parallel_marking_threads() { return _max_parallel_marking_threads;}
484 501 double sleep_factor() { return _sleep_factor; }
485 502 double marking_task_overhead() { return _marking_task_overhead;}
486 503 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
487 504 double cleanup_task_overhead() { return _cleanup_task_overhead;}
488 505
489 506 HeapWord* finger() { return _finger; }
490 507 bool concurrent() { return _concurrent; }
491 508 uint active_tasks() { return _active_tasks; }
492 509 ParallelTaskTerminator* terminator() { return &_terminator; }
493 510
494 511 // It claims the next available region to be scanned by a marking
495 512 // task/thread. It might return NULL if the next region is empty or
496 513 // we have run out of regions. In the latter case, out_of_regions()
497 514 // determines whether we've really run out of regions or the task
498 515 // should call claim_region() again. This might seem a bit
499 516 // awkward. Originally, the code was written so that claim_region()
500 517 // either successfully returned with a non-empty region or there
501 518 // were no more regions to be claimed. The problem with this was
502 519 // that, in certain circumstances, it iterated over large chunks of
503 520 // the heap finding only empty regions and, while it was working, it
504 521 // was preventing the calling task to call its regular clock
505 522 // method. So, this way, each task will spend very little time in
506 523 // claim_region() and is allowed to call the regular clock method
507 524 // frequently.
508 525 HeapRegion* claim_region(uint worker_id);
509 526
510 527 // It determines whether we've run out of regions to scan.
511 528 bool out_of_regions() { return _finger == _heap_end; }
512 529
513 530 // Returns the task with the given id
514 531 CMTask* task(int id) {
515 532 assert(0 <= id && id < (int) _active_tasks,
516 533 "task id not within active bounds");
517 534 return _tasks[id];
518 535 }
519 536
520 537 // Returns the task queue with the given id
521 538 CMTaskQueue* task_queue(int id) {
522 539 assert(0 <= id && id < (int) _active_tasks,
523 540 "task queue id not within active bounds");
524 541 return (CMTaskQueue*) _task_queues->queue(id);
525 542 }
526 543
527 544 // Returns the task queue set
528 545 CMTaskQueueSet* task_queues() { return _task_queues; }
529 546
530 547 // Access / manipulation of the overflow flag which is set to
531 548 // indicate that the global stack has overflown
532 549 bool has_overflown() { return _has_overflown; }
533 550 void set_has_overflown() { _has_overflown = true; }
534 551 void clear_has_overflown() { _has_overflown = false; }
535 552 bool restart_for_overflow() { return _restart_for_overflow; }
536 553
537 554 bool has_aborted() { return _has_aborted; }
538 555
539 556 // Methods to enter the two overflow sync barriers
540 557 void enter_first_sync_barrier(uint worker_id);
541 558 void enter_second_sync_barrier(uint worker_id);
542 559
543 560 ForceOverflowSettings* force_overflow_conc() {
544 561 return &_force_overflow_conc;
545 562 }
546 563
547 564 ForceOverflowSettings* force_overflow_stw() {
548 565 return &_force_overflow_stw;
549 566 }
550 567
551 568 ForceOverflowSettings* force_overflow() {
552 569 if (concurrent()) {
553 570 return force_overflow_conc();
554 571 } else {
555 572 return force_overflow_stw();
556 573 }
557 574 }
558 575
559 576 // Live Data Counting data structures...
560 577 // These data structures are initialized at the start of
561 578 // marking. They are written to while marking is active.
562 579 // They are aggregated during remark; the aggregated values
563 580 // are then used to populate the _region_bm, _card_bm, and
564 581 // the total live bytes, which are then subsequently updated
565 582 // during cleanup.
566 583
567 584 // An array of bitmaps (one bit map per task). Each bitmap
568 585 // is used to record the cards spanned by the live objects
569 586 // marked by that task/worker.
↓ open down ↓ |
213 lines elided |
↑ open up ↑ |
570 587 BitMap* _count_card_bitmaps;
571 588
572 589 // Used to record the number of marked live bytes
573 590 // (for each region, by worker thread).
574 591 size_t** _count_marked_bytes;
575 592
576 593 // Card index of the bottom of the G1 heap. Used for biasing indices into
577 594 // the card bitmaps.
578 595 intptr_t _heap_bottom_card_num;
579 596
597 + // Set to true when initialization is complete
598 + bool _completed_initialization;
599 +
580 600 public:
581 601 // Manipulation of the global mark stack.
582 602 // Notice that the first mark_stack_push is CAS-based, whereas the
583 603 // two below are Mutex-based. This is OK since the first one is only
584 604 // called during evacuation pauses and doesn't compete with the
585 605 // other two (which are called by the marking tasks during
586 606 // concurrent marking or remark).
587 607 bool mark_stack_push(oop p) {
588 608 _markStack.par_push(p);
589 609 if (_markStack.overflow()) {
590 610 set_has_overflown();
591 611 return false;
592 612 }
593 613 return true;
594 614 }
595 615 bool mark_stack_push(oop* arr, int n) {
596 616 _markStack.par_push_arr(arr, n);
597 617 if (_markStack.overflow()) {
598 618 set_has_overflown();
599 619 return false;
600 620 }
601 621 return true;
602 622 }
603 623 void mark_stack_pop(oop* arr, int max, int* n) {
604 624 _markStack.par_pop_arr(arr, max, n);
605 625 }
606 626 size_t mark_stack_size() { return _markStack.size(); }
607 627 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
608 628 bool mark_stack_overflow() { return _markStack.overflow(); }
609 629 bool mark_stack_empty() { return _markStack.isEmpty(); }
610 630
611 631 CMRootRegions* root_regions() { return &_root_regions; }
612 632
613 633 bool concurrent_marking_in_progress() {
614 634 return _concurrent_marking_in_progress;
615 635 }
616 636 void set_concurrent_marking_in_progress() {
617 637 _concurrent_marking_in_progress = true;
618 638 }
619 639 void clear_concurrent_marking_in_progress() {
620 640 _concurrent_marking_in_progress = false;
621 641 }
622 642
623 643 void update_accum_task_vtime(int i, double vtime) {
624 644 _accum_task_vtime[i] += vtime;
625 645 }
626 646
627 647 double all_task_accum_vtime() {
628 648 double ret = 0.0;
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
629 649 for (uint i = 0; i < _max_worker_id; ++i)
630 650 ret += _accum_task_vtime[i];
631 651 return ret;
632 652 }
633 653
634 654 // Attempts to steal an object from the task queues of other tasks
635 655 bool try_stealing(uint worker_id, int* hash_seed, oop& obj) {
636 656 return _task_queues->steal(worker_id, hash_seed, obj);
637 657 }
638 658
639 - ConcurrentMark(ReservedSpace rs, uint max_regions);
659 + ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
640 660 ~ConcurrentMark();
641 661
642 662 ConcurrentMarkThread* cmThread() { return _cmThread; }
643 663
644 664 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
645 665 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
646 666
647 667 // Returns the number of GC threads to be used in a concurrent
648 668 // phase based on the number of GC threads being used in a STW
649 669 // phase.
650 670 uint scale_parallel_threads(uint n_par_threads);
651 671
652 672 // Calculates the number of GC threads to be used in a concurrent phase.
653 673 uint calc_parallel_marking_threads();
654 674
655 675 // The following three are interaction between CM and
656 676 // G1CollectedHeap
657 677
658 678 // This notifies CM that a root during initial-mark needs to be
659 679 // grayed. It is MT-safe. word_size is the size of the object in
660 680 // words. It is passed explicitly as sometimes we cannot calculate
661 681 // it from the given object because it might be in an inconsistent
662 682 // state (e.g., in to-space and being copied). So the caller is
663 683 // responsible for dealing with this issue (e.g., get the size from
664 684 // the from-space image when the to-space image might be
665 685 // inconsistent) and always passing the size. hr is the region that
666 686 // contains the object and it's passed optionally from callers who
667 687 // might already have it (no point in recalculating it).
668 688 inline void grayRoot(oop obj, size_t word_size,
669 689 uint worker_id, HeapRegion* hr = NULL);
670 690
671 691 // It iterates over the heap and for each object it comes across it
672 692 // will dump the contents of its reference fields, as well as
673 693 // liveness information for the object and its referents. The dump
674 694 // will be written to a file with the following name:
675 695 // G1PrintReachableBaseFile + "." + str.
676 696 // vo decides whether the prev (vo == UsePrevMarking), the next
677 697 // (vo == UseNextMarking) marking information, or the mark word
678 698 // (vo == UseMarkWord) will be used to determine the liveness of
679 699 // each object / referent.
680 700 // If all is true, all objects in the heap will be dumped, otherwise
681 701 // only the live ones. In the dump the following symbols / breviations
682 702 // are used:
683 703 // M : an explicitly live object (its bitmap bit is set)
684 704 // > : an implicitly live object (over tams)
685 705 // O : an object outside the G1 heap (typically: in the perm gen)
686 706 // NOT : a reference field whose referent is not live
687 707 // AND MARKED : indicates that an object is both explicitly and
688 708 // implicitly live (it should be one or the other, not both)
689 709 void print_reachable(const char* str,
690 710 VerifyOption vo, bool all) PRODUCT_RETURN;
691 711
692 712 // Clear the next marking bitmap (will be called concurrently).
693 713 void clearNextBitmap();
694 714
695 715 // These two do the work that needs to be done before and after the
696 716 // initial root checkpoint. Since this checkpoint can be done at two
697 717 // different points (i.e. an explicit pause or piggy-backed on a
698 718 // young collection), then it's nice to be able to easily share the
699 719 // pre/post code. It might be the case that we can put everything in
700 720 // the post method. TP
701 721 void checkpointRootsInitialPre();
702 722 void checkpointRootsInitialPost();
703 723
704 724 // Scan all the root regions and mark everything reachable from
705 725 // them.
706 726 void scanRootRegions();
707 727
708 728 // Scan a single root region and mark everything reachable from it.
709 729 void scanRootRegion(HeapRegion* hr, uint worker_id);
710 730
711 731 // Do concurrent phase of marking, to a tentative transitive closure.
712 732 void markFromRoots();
713 733
714 734 void checkpointRootsFinal(bool clear_all_soft_refs);
715 735 void checkpointRootsFinalWork();
716 736 void cleanup();
717 737 void completeCleanup();
718 738
719 739 // Mark in the previous bitmap. NB: this is usually read-only, so use
720 740 // this carefully!
721 741 inline void markPrev(oop p);
722 742
723 743 // Clears marks for all objects in the given range, for the prev,
724 744 // next, or both bitmaps. NB: the previous bitmap is usually
725 745 // read-only, so use this carefully!
726 746 void clearRangePrevBitmap(MemRegion mr);
727 747 void clearRangeNextBitmap(MemRegion mr);
728 748 void clearRangeBothBitmaps(MemRegion mr);
729 749
730 750 // Notify data structures that a GC has started.
731 751 void note_start_of_gc() {
732 752 _markStack.note_start_of_gc();
733 753 }
734 754
735 755 // Notify data structures that a GC is finished.
736 756 void note_end_of_gc() {
737 757 _markStack.note_end_of_gc();
738 758 }
739 759
740 760 // Verify that there are no CSet oops on the stacks (taskqueues /
741 761 // global mark stack), enqueued SATB buffers, per-thread SATB
742 762 // buffers, and fingers (global / per-task). The boolean parameters
743 763 // decide which of the above data structures to verify. If marking
744 764 // is not in progress, it's a no-op.
745 765 void verify_no_cset_oops(bool verify_stacks,
746 766 bool verify_enqueued_buffers,
747 767 bool verify_thread_buffers,
748 768 bool verify_fingers) PRODUCT_RETURN;
749 769
750 770 // It is called at the end of an evacuation pause during marking so
751 771 // that CM is notified of where the new end of the heap is. It
752 772 // doesn't do anything if concurrent_marking_in_progress() is false,
753 773 // unless the force parameter is true.
754 774 void update_g1_committed(bool force = false);
755 775
756 776 bool isMarked(oop p) const {
757 777 assert(p != NULL && p->is_oop(), "expected an oop");
758 778 HeapWord* addr = (HeapWord*)p;
759 779 assert(addr >= _nextMarkBitMap->startWord() ||
760 780 addr < _nextMarkBitMap->endWord(), "in a region");
761 781
762 782 return _nextMarkBitMap->isMarked(addr);
763 783 }
764 784
765 785 inline bool not_yet_marked(oop p) const;
766 786
767 787 // XXX Debug code
768 788 bool containing_card_is_marked(void* p);
769 789 bool containing_cards_are_marked(void* start, void* last);
770 790
771 791 bool isPrevMarked(oop p) const {
772 792 assert(p != NULL && p->is_oop(), "expected an oop");
773 793 HeapWord* addr = (HeapWord*)p;
774 794 assert(addr >= _prevMarkBitMap->startWord() ||
775 795 addr < _prevMarkBitMap->endWord(), "in a region");
776 796
777 797 return _prevMarkBitMap->isMarked(addr);
778 798 }
779 799
780 800 inline bool do_yield_check(uint worker_i = 0);
781 801 inline bool should_yield();
782 802
783 803 // Called to abort the marking cycle after a Full GC takes palce.
784 804 void abort();
785 805
786 806 // This prints the global/local fingers. It is used for debugging.
787 807 NOT_PRODUCT(void print_finger();)
788 808
789 809 void print_summary_info();
790 810
791 811 void print_worker_threads_on(outputStream* st) const;
792 812
793 813 // The following indicate whether a given verbose level has been
794 814 // set. Notice that anything above stats is conditional to
795 815 // _MARKING_VERBOSE_ having been set to 1
796 816 bool verbose_stats() {
797 817 return _verbose_level >= stats_verbose;
798 818 }
799 819 bool verbose_low() {
800 820 return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
801 821 }
802 822 bool verbose_medium() {
803 823 return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
804 824 }
805 825 bool verbose_high() {
806 826 return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
807 827 }
808 828
809 829 // Liveness counting
810 830
811 831 // Utility routine to set an exclusive range of cards on the given
812 832 // card liveness bitmap
813 833 inline void set_card_bitmap_range(BitMap* card_bm,
814 834 BitMap::idx_t start_idx,
815 835 BitMap::idx_t end_idx,
816 836 bool is_par);
817 837
818 838 // Returns the card number of the bottom of the G1 heap.
819 839 // Used in biasing indices into accounting card bitmaps.
820 840 intptr_t heap_bottom_card_num() const {
821 841 return _heap_bottom_card_num;
822 842 }
823 843
824 844 // Returns the card bitmap for a given task or worker id.
825 845 BitMap* count_card_bitmap_for(uint worker_id) {
826 846 assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
827 847 assert(_count_card_bitmaps != NULL, "uninitialized");
828 848 BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
829 849 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
830 850 return task_card_bm;
831 851 }
832 852
833 853 // Returns the array containing the marked bytes for each region,
834 854 // for the given worker or task id.
835 855 size_t* count_marked_bytes_array_for(uint worker_id) {
836 856 assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
837 857 assert(_count_marked_bytes != NULL, "uninitialized");
838 858 size_t* marked_bytes_array = _count_marked_bytes[worker_id];
839 859 assert(marked_bytes_array != NULL, "uninitialized");
840 860 return marked_bytes_array;
841 861 }
842 862
843 863 // Returns the index in the liveness accounting card table bitmap
844 864 // for the given address
845 865 inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr);
846 866
847 867 // Counts the size of the given memory region in the the given
848 868 // marked_bytes array slot for the given HeapRegion.
849 869 // Sets the bits in the given card bitmap that are associated with the
850 870 // cards that are spanned by the memory region.
851 871 inline void count_region(MemRegion mr, HeapRegion* hr,
852 872 size_t* marked_bytes_array,
853 873 BitMap* task_card_bm);
854 874
855 875 // Counts the given memory region in the task/worker counting
856 876 // data structures for the given worker id.
857 877 inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
858 878
859 879 // Counts the given memory region in the task/worker counting
860 880 // data structures for the given worker id.
861 881 inline void count_region(MemRegion mr, uint worker_id);
862 882
863 883 // Counts the given object in the given task/worker counting
864 884 // data structures.
865 885 inline void count_object(oop obj, HeapRegion* hr,
866 886 size_t* marked_bytes_array,
867 887 BitMap* task_card_bm);
868 888
869 889 // Counts the given object in the task/worker counting data
870 890 // structures for the given worker id.
871 891 inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
872 892
873 893 // Attempts to mark the given object and, if successful, counts
874 894 // the object in the given task/worker counting structures.
875 895 inline bool par_mark_and_count(oop obj, HeapRegion* hr,
876 896 size_t* marked_bytes_array,
877 897 BitMap* task_card_bm);
878 898
879 899 // Attempts to mark the given object and, if successful, counts
880 900 // the object in the task/worker counting structures for the
881 901 // given worker id.
882 902 inline bool par_mark_and_count(oop obj, size_t word_size,
883 903 HeapRegion* hr, uint worker_id);
884 904
885 905 // Attempts to mark the given object and, if successful, counts
886 906 // the object in the task/worker counting structures for the
887 907 // given worker id.
888 908 inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
889 909
890 910 // Similar to the above routine but we don't know the heap region that
891 911 // contains the object to be marked/counted, which this routine looks up.
892 912 inline bool par_mark_and_count(oop obj, uint worker_id);
893 913
894 914 // Similar to the above routine but there are times when we cannot
895 915 // safely calculate the size of obj due to races and we, therefore,
896 916 // pass the size in as a parameter. It is the caller's reponsibility
897 917 // to ensure that the size passed in for obj is valid.
898 918 inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
899 919
↓ open down ↓ |
250 lines elided |
↑ open up ↑ |
900 920 // Unconditionally mark the given object, and unconditinally count
901 921 // the object in the counting structures for worker id 0.
902 922 // Should *not* be called from parallel code.
903 923 inline bool mark_and_count(oop obj, HeapRegion* hr);
904 924
905 925 // Similar to the above routine but we don't know the heap region that
906 926 // contains the object to be marked/counted, which this routine looks up.
907 927 // Should *not* be called from parallel code.
908 928 inline bool mark_and_count(oop obj);
909 929
930 + // Returns true if initialization was successfully completed.
931 + bool completed_initialization() const {
932 + return _completed_initialization;
933 + }
934 +
910 935 protected:
911 936 // Clear all the per-task bitmaps and arrays used to store the
912 937 // counting data.
913 938 void clear_all_count_data();
914 939
915 940 // Aggregates the counting data for each worker/task
916 941 // that was constructed while marking. Also sets
917 942 // the amount of marked bytes for each region and
918 943 // the top at concurrent mark count.
919 944 void aggregate_count_data();
920 945
921 946 // Verification routine
922 947 void verify_count_data();
923 948 };
924 949
925 950 // A class representing a marking task.
926 951 class CMTask : public TerminatorTerminator {
927 952 private:
928 953 enum PrivateConstants {
929 954 // the regular clock call is called once the scanned words reaches
930 955 // this limit
931 956 words_scanned_period = 12*1024,
932 957 // the regular clock call is called once the number of visited
933 958 // references reaches this limit
934 959 refs_reached_period = 384,
935 960 // initial value for the hash seed, used in the work stealing code
936 961 init_hash_seed = 17,
937 962 // how many entries will be transferred between global stack and
938 963 // local queues
939 964 global_stack_transfer_size = 16
940 965 };
941 966
942 967 uint _worker_id;
943 968 G1CollectedHeap* _g1h;
944 969 ConcurrentMark* _cm;
945 970 CMBitMap* _nextMarkBitMap;
946 971 // the task queue of this task
947 972 CMTaskQueue* _task_queue;
948 973 private:
949 974 // the task queue set---needed for stealing
950 975 CMTaskQueueSet* _task_queues;
951 976 // indicates whether the task has been claimed---this is only for
952 977 // debugging purposes
953 978 bool _claimed;
954 979
955 980 // number of calls to this task
956 981 int _calls;
957 982
958 983 // when the virtual timer reaches this time, the marking step should
959 984 // exit
960 985 double _time_target_ms;
961 986 // the start time of the current marking step
962 987 double _start_time_ms;
963 988
964 989 // the oop closure used for iterations over oops
965 990 G1CMOopClosure* _cm_oop_closure;
966 991
967 992 // the region this task is scanning, NULL if we're not scanning any
968 993 HeapRegion* _curr_region;
969 994 // the local finger of this task, NULL if we're not scanning a region
970 995 HeapWord* _finger;
971 996 // limit of the region this task is scanning, NULL if we're not scanning one
972 997 HeapWord* _region_limit;
973 998
974 999 // the number of words this task has scanned
975 1000 size_t _words_scanned;
976 1001 // When _words_scanned reaches this limit, the regular clock is
977 1002 // called. Notice that this might be decreased under certain
978 1003 // circumstances (i.e. when we believe that we did an expensive
979 1004 // operation).
980 1005 size_t _words_scanned_limit;
981 1006 // the initial value of _words_scanned_limit (i.e. what it was
982 1007 // before it was decreased).
983 1008 size_t _real_words_scanned_limit;
984 1009
985 1010 // the number of references this task has visited
986 1011 size_t _refs_reached;
987 1012 // When _refs_reached reaches this limit, the regular clock is
988 1013 // called. Notice this this might be decreased under certain
989 1014 // circumstances (i.e. when we believe that we did an expensive
990 1015 // operation).
991 1016 size_t _refs_reached_limit;
992 1017 // the initial value of _refs_reached_limit (i.e. what it was before
993 1018 // it was decreased).
994 1019 size_t _real_refs_reached_limit;
995 1020
996 1021 // used by the work stealing stuff
997 1022 int _hash_seed;
998 1023 // if this is true, then the task has aborted for some reason
999 1024 bool _has_aborted;
1000 1025 // set when the task aborts because it has met its time quota
1001 1026 bool _has_timed_out;
1002 1027 // true when we're draining SATB buffers; this avoids the task
1003 1028 // aborting due to SATB buffers being available (as we're already
1004 1029 // dealing with them)
1005 1030 bool _draining_satb_buffers;
1006 1031
1007 1032 // number sequence of past step times
1008 1033 NumberSeq _step_times_ms;
1009 1034 // elapsed time of this task
1010 1035 double _elapsed_time_ms;
1011 1036 // termination time of this task
1012 1037 double _termination_time_ms;
1013 1038 // when this task got into the termination protocol
1014 1039 double _termination_start_time_ms;
1015 1040
1016 1041 // true when the task is during a concurrent phase, false when it is
1017 1042 // in the remark phase (so, in the latter case, we do not have to
1018 1043 // check all the things that we have to check during the concurrent
1019 1044 // phase, i.e. SATB buffer availability...)
1020 1045 bool _concurrent;
1021 1046
1022 1047 TruncatedSeq _marking_step_diffs_ms;
1023 1048
1024 1049 // Counting data structures. Embedding the task's marked_bytes_array
1025 1050 // and card bitmap into the actual task saves having to go through
1026 1051 // the ConcurrentMark object.
1027 1052 size_t* _marked_bytes_array;
1028 1053 BitMap* _card_bm;
1029 1054
1030 1055 // LOTS of statistics related with this task
1031 1056 #if _MARKING_STATS_
1032 1057 NumberSeq _all_clock_intervals_ms;
1033 1058 double _interval_start_time_ms;
1034 1059
1035 1060 int _aborted;
1036 1061 int _aborted_overflow;
1037 1062 int _aborted_cm_aborted;
1038 1063 int _aborted_yield;
1039 1064 int _aborted_timed_out;
1040 1065 int _aborted_satb;
1041 1066 int _aborted_termination;
1042 1067
1043 1068 int _steal_attempts;
1044 1069 int _steals;
1045 1070
1046 1071 int _clock_due_to_marking;
1047 1072 int _clock_due_to_scanning;
1048 1073
1049 1074 int _local_pushes;
1050 1075 int _local_pops;
1051 1076 int _local_max_size;
1052 1077 int _objs_scanned;
1053 1078
1054 1079 int _global_pushes;
1055 1080 int _global_pops;
1056 1081 int _global_max_size;
1057 1082
1058 1083 int _global_transfers_to;
1059 1084 int _global_transfers_from;
1060 1085
1061 1086 int _regions_claimed;
1062 1087 int _objs_found_on_bitmap;
1063 1088
1064 1089 int _satb_buffers_processed;
1065 1090 #endif // _MARKING_STATS_
1066 1091
1067 1092 // it updates the local fields after this task has claimed
1068 1093 // a new region to scan
1069 1094 void setup_for_region(HeapRegion* hr);
1070 1095 // it brings up-to-date the limit of the region
1071 1096 void update_region_limit();
1072 1097
1073 1098 // called when either the words scanned or the refs visited limit
1074 1099 // has been reached
1075 1100 void reached_limit();
1076 1101 // recalculates the words scanned and refs visited limits
1077 1102 void recalculate_limits();
1078 1103 // decreases the words scanned and refs visited limits when we reach
1079 1104 // an expensive operation
1080 1105 void decrease_limits();
1081 1106 // it checks whether the words scanned or refs visited reached their
1082 1107 // respective limit and calls reached_limit() if they have
1083 1108 void check_limits() {
1084 1109 if (_words_scanned >= _words_scanned_limit ||
1085 1110 _refs_reached >= _refs_reached_limit) {
1086 1111 reached_limit();
1087 1112 }
1088 1113 }
1089 1114 // this is supposed to be called regularly during a marking step as
1090 1115 // it checks a bunch of conditions that might cause the marking step
1091 1116 // to abort
1092 1117 void regular_clock_call();
1093 1118 bool concurrent() { return _concurrent; }
1094 1119
1095 1120 public:
1096 1121 // It resets the task; it should be called right at the beginning of
1097 1122 // a marking phase.
1098 1123 void reset(CMBitMap* _nextMarkBitMap);
1099 1124 // it clears all the fields that correspond to a claimed region.
1100 1125 void clear_region_fields();
1101 1126
1102 1127 void set_concurrent(bool concurrent) { _concurrent = concurrent; }
1103 1128
1104 1129 // The main method of this class which performs a marking step
1105 1130 // trying not to exceed the given duration. However, it might exit
1106 1131 // prematurely, according to some conditions (i.e. SATB buffers are
1107 1132 // available for processing).
1108 1133 void do_marking_step(double target_ms, bool do_stealing, bool do_termination);
1109 1134
1110 1135 // These two calls start and stop the timer
1111 1136 void record_start_time() {
1112 1137 _elapsed_time_ms = os::elapsedTime() * 1000.0;
1113 1138 }
1114 1139 void record_end_time() {
1115 1140 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
1116 1141 }
1117 1142
1118 1143 // returns the worker ID associated with this task.
1119 1144 uint worker_id() { return _worker_id; }
1120 1145
1121 1146 // From TerminatorTerminator. It determines whether this task should
1122 1147 // exit the termination protocol after it's entered it.
1123 1148 virtual bool should_exit_termination();
1124 1149
1125 1150 // Resets the local region fields after a task has finished scanning a
1126 1151 // region; or when they have become stale as a result of the region
1127 1152 // being evacuated.
1128 1153 void giveup_current_region();
1129 1154
1130 1155 HeapWord* finger() { return _finger; }
1131 1156
1132 1157 bool has_aborted() { return _has_aborted; }
1133 1158 void set_has_aborted() { _has_aborted = true; }
1134 1159 void clear_has_aborted() { _has_aborted = false; }
1135 1160 bool has_timed_out() { return _has_timed_out; }
1136 1161 bool claimed() { return _claimed; }
1137 1162
1138 1163 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
1139 1164
1140 1165 // It grays the object by marking it and, if necessary, pushing it
1141 1166 // on the local queue
1142 1167 inline void deal_with_reference(oop obj);
1143 1168
1144 1169 // It scans an object and visits its children.
1145 1170 void scan_object(oop obj);
1146 1171
1147 1172 // It pushes an object on the local queue.
1148 1173 inline void push(oop obj);
1149 1174
1150 1175 // These two move entries to/from the global stack.
1151 1176 void move_entries_to_global_stack();
1152 1177 void get_entries_from_global_stack();
1153 1178
1154 1179 // It pops and scans objects from the local queue. If partially is
1155 1180 // true, then it stops when the queue size is of a given limit. If
1156 1181 // partially is false, then it stops when the queue is empty.
1157 1182 void drain_local_queue(bool partially);
1158 1183 // It moves entries from the global stack to the local queue and
1159 1184 // drains the local queue. If partially is true, then it stops when
1160 1185 // both the global stack and the local queue reach a given size. If
1161 1186 // partially if false, it tries to empty them totally.
1162 1187 void drain_global_stack(bool partially);
1163 1188 // It keeps picking SATB buffers and processing them until no SATB
1164 1189 // buffers are available.
1165 1190 void drain_satb_buffers();
1166 1191
1167 1192 // moves the local finger to a new location
1168 1193 inline void move_finger_to(HeapWord* new_finger) {
1169 1194 assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
1170 1195 _finger = new_finger;
1171 1196 }
1172 1197
1173 1198 CMTask(uint worker_id, ConcurrentMark *cm,
1174 1199 size_t* marked_bytes, BitMap* card_bm,
1175 1200 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
1176 1201
1177 1202 // it prints statistics associated with this task
1178 1203 void print_stats();
1179 1204
1180 1205 #if _MARKING_STATS_
1181 1206 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; }
1182 1207 #endif // _MARKING_STATS_
1183 1208 };
1184 1209
1185 1210 // Class that's used to to print out per-region liveness
1186 1211 // information. It's currently used at the end of marking and also
1187 1212 // after we sort the old regions at the end of the cleanup operation.
1188 1213 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure {
1189 1214 private:
1190 1215 outputStream* _out;
1191 1216
1192 1217 // Accumulators for these values.
1193 1218 size_t _total_used_bytes;
1194 1219 size_t _total_capacity_bytes;
1195 1220 size_t _total_prev_live_bytes;
1196 1221 size_t _total_next_live_bytes;
1197 1222
1198 1223 // These are set up when we come across a "stars humongous" region
1199 1224 // (as this is where most of this information is stored, not in the
1200 1225 // subsequent "continues humongous" regions). After that, for every
1201 1226 // region in a given humongous region series we deduce the right
1202 1227 // values for it by simply subtracting the appropriate amount from
1203 1228 // these fields. All these values should reach 0 after we've visited
1204 1229 // the last region in the series.
1205 1230 size_t _hum_used_bytes;
1206 1231 size_t _hum_capacity_bytes;
1207 1232 size_t _hum_prev_live_bytes;
1208 1233 size_t _hum_next_live_bytes;
1209 1234
1210 1235 static double perc(size_t val, size_t total) {
1211 1236 if (total == 0) {
1212 1237 return 0.0;
1213 1238 } else {
1214 1239 return 100.0 * ((double) val / (double) total);
1215 1240 }
1216 1241 }
1217 1242
1218 1243 static double bytes_to_mb(size_t val) {
1219 1244 return (double) val / (double) M;
1220 1245 }
1221 1246
1222 1247 // See the .cpp file.
1223 1248 size_t get_hum_bytes(size_t* hum_bytes);
1224 1249 void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes,
1225 1250 size_t* prev_live_bytes, size_t* next_live_bytes);
1226 1251
1227 1252 public:
1228 1253 // The header and footer are printed in the constructor and
1229 1254 // destructor respectively.
1230 1255 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name);
1231 1256 virtual bool doHeapRegion(HeapRegion* r);
1232 1257 ~G1PrintRegionLivenessInfoClosure();
1233 1258 };
1234 1259
1235 1260 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
↓ open down ↓ |
316 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX