Print this page
rev 2585 : [mq]: g1-reference-processing
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
27 27
28 28 #include "gc_implementation/g1/concurrentMark.hpp"
29 29 #include "gc_implementation/g1/g1AllocRegion.hpp"
30 30 #include "gc_implementation/g1/g1HRPrinter.hpp"
31 31 #include "gc_implementation/g1/g1RemSet.hpp"
32 32 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
33 33 #include "gc_implementation/g1/heapRegionSeq.hpp"
34 34 #include "gc_implementation/g1/heapRegionSets.hpp"
35 35 #include "gc_implementation/shared/hSpaceCounters.hpp"
36 36 #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
37 37 #include "memory/barrierSet.hpp"
38 38 #include "memory/memRegion.hpp"
39 39 #include "memory/sharedHeap.hpp"
40 40
41 41 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
42 42 // It uses the "Garbage First" heap organization and algorithm, which
43 43 // may combine concurrent marking with parallel, incremental compaction of
44 44 // heap subsets that will yield large amounts of garbage.
45 45
46 46 class HeapRegion;
47 47 class HRRSCleanupTask;
48 48 class PermanentGenerationSpec;
49 49 class GenerationSpec;
50 50 class OopsInHeapRegionClosure;
51 51 class G1ScanHeapEvacClosure;
52 52 class ObjectClosure;
53 53 class SpaceClosure;
54 54 class CompactibleSpaceClosure;
55 55 class Space;
56 56 class G1CollectorPolicy;
57 57 class GenRemSet;
58 58 class G1RemSet;
59 59 class HeapRegionRemSetIterator;
60 60 class ConcurrentMark;
61 61 class ConcurrentMarkThread;
62 62 class ConcurrentG1Refine;
63 63 class GenerationCounters;
64 64
65 65 typedef OverflowTaskQueue<StarTask> RefToScanQueue;
66 66 typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
67 67
68 68 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
69 69 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
70 70
71 71 enum GCAllocPurpose {
72 72 GCAllocForTenured,
73 73 GCAllocForSurvived,
74 74 GCAllocPurposeCount
75 75 };
76 76
77 77 class YoungList : public CHeapObj {
78 78 private:
79 79 G1CollectedHeap* _g1h;
80 80
81 81 HeapRegion* _head;
82 82
83 83 HeapRegion* _survivor_head;
84 84 HeapRegion* _survivor_tail;
85 85
86 86 HeapRegion* _curr;
87 87
88 88 size_t _length;
89 89 size_t _survivor_length;
90 90
91 91 size_t _last_sampled_rs_lengths;
92 92 size_t _sampled_rs_lengths;
93 93
94 94 void empty_list(HeapRegion* list);
95 95
96 96 public:
97 97 YoungList(G1CollectedHeap* g1h);
98 98
99 99 void push_region(HeapRegion* hr);
100 100 void add_survivor_region(HeapRegion* hr);
101 101
102 102 void empty_list();
103 103 bool is_empty() { return _length == 0; }
104 104 size_t length() { return _length; }
105 105 size_t survivor_length() { return _survivor_length; }
106 106
107 107 // Currently we do not keep track of the used byte sum for the
108 108 // young list and the survivors and it'd be quite a lot of work to
109 109 // do so. When we'll eventually replace the young list with
110 110 // instances of HeapRegionLinkedList we'll get that for free. So,
111 111 // we'll report the more accurate information then.
112 112 size_t eden_used_bytes() {
113 113 assert(length() >= survivor_length(), "invariant");
114 114 return (length() - survivor_length()) * HeapRegion::GrainBytes;
115 115 }
116 116 size_t survivor_used_bytes() {
117 117 return survivor_length() * HeapRegion::GrainBytes;
118 118 }
119 119
120 120 void rs_length_sampling_init();
121 121 bool rs_length_sampling_more();
122 122 void rs_length_sampling_next();
123 123
124 124 void reset_sampled_info() {
125 125 _last_sampled_rs_lengths = 0;
126 126 }
127 127 size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
128 128
129 129 // for development purposes
130 130 void reset_auxilary_lists();
131 131 void clear() { _head = NULL; _length = 0; }
132 132
133 133 void clear_survivors() {
134 134 _survivor_head = NULL;
135 135 _survivor_tail = NULL;
136 136 _survivor_length = 0;
137 137 }
138 138
139 139 HeapRegion* first_region() { return _head; }
140 140 HeapRegion* first_survivor_region() { return _survivor_head; }
141 141 HeapRegion* last_survivor_region() { return _survivor_tail; }
142 142
143 143 // debugging
144 144 bool check_list_well_formed();
145 145 bool check_list_empty(bool check_sample = true);
146 146 void print();
147 147 };
↓ open down ↓ |
147 lines elided |
↑ open up ↑ |
148 148
149 149 class MutatorAllocRegion : public G1AllocRegion {
150 150 protected:
151 151 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
152 152 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
153 153 public:
154 154 MutatorAllocRegion()
155 155 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
156 156 };
157 157
158 +// The G1 STW is alive closure.
159 +// An instance is embedded into the G1CH and used as the
160 +// _is_alive_non_header closure in the STW reference
161 +// processor. It is also extensively used during refence
162 +// processing during STW evacuation pauses.
163 +class G1STWIsAliveClosure: public BoolObjectClosure {
164 + G1CollectedHeap* _g1;
165 +public:
166 + G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
167 + void do_object(oop p) { assert(false, "Do not call."); }
168 + bool do_object_b(oop p);
169 +};
170 +
158 171 class RefineCardTableEntryClosure;
172 +
159 173 class G1CollectedHeap : public SharedHeap {
160 174 friend class VM_G1CollectForAllocation;
161 175 friend class VM_GenCollectForPermanentAllocation;
162 176 friend class VM_G1CollectFull;
163 177 friend class VM_G1IncCollectionPause;
164 178 friend class VMStructs;
165 179 friend class MutatorAllocRegion;
166 180
167 181 // Closures used in implementation.
168 182 friend class G1ParCopyHelper;
169 183 friend class G1IsAliveClosure;
170 184 friend class G1EvacuateFollowersClosure;
171 185 friend class G1ParScanThreadState;
172 186 friend class G1ParScanClosureSuper;
173 187 friend class G1ParEvacuateFollowersClosure;
174 188 friend class G1ParTask;
175 189 friend class G1FreeGarbageRegionClosure;
176 190 friend class RefineCardTableEntryClosure;
177 191 friend class G1PrepareCompactClosure;
178 192 friend class RegionSorter;
179 193 friend class RegionResetter;
180 194 friend class CountRCClosure;
181 195 friend class EvacPopObjClosure;
182 196 friend class G1ParCleanupCTTask;
183 197
184 198 // Other related classes.
185 199 friend class G1MarkSweep;
186 200
187 201 private:
188 202 // The one and only G1CollectedHeap, so static functions can find it.
189 203 static G1CollectedHeap* _g1h;
190 204
191 205 static size_t _humongous_object_threshold_in_words;
192 206
193 207 // Storage for the G1 heap (excludes the permanent generation).
194 208 VirtualSpace _g1_storage;
195 209 MemRegion _g1_reserved;
196 210
197 211 // The part of _g1_storage that is currently committed.
198 212 MemRegion _g1_committed;
199 213
200 214 // The master free list. It will satisfy all new region allocations.
201 215 MasterFreeRegionList _free_list;
202 216
203 217 // The secondary free list which contains regions that have been
204 218 // freed up during the cleanup process. This will be appended to the
205 219 // master free list when appropriate.
206 220 SecondaryFreeRegionList _secondary_free_list;
207 221
208 222 // It keeps track of the humongous regions.
209 223 MasterHumongousRegionSet _humongous_set;
210 224
211 225 // The number of regions we could create by expansion.
212 226 size_t _expansion_regions;
213 227
214 228 // The block offset table for the G1 heap.
215 229 G1BlockOffsetSharedArray* _bot_shared;
216 230
217 231 // Move all of the regions off the free lists, then rebuild those free
218 232 // lists, before and after full GC.
219 233 void tear_down_region_lists();
220 234 void rebuild_region_lists();
221 235
222 236 // The sequence of all heap regions in the heap.
223 237 HeapRegionSeq _hrs;
224 238
225 239 // Alloc region used to satisfy mutator allocation requests.
226 240 MutatorAllocRegion _mutator_alloc_region;
227 241
228 242 // It resets the mutator alloc region before new allocations can take place.
229 243 void init_mutator_alloc_region();
230 244
231 245 // It releases the mutator alloc region.
232 246 void release_mutator_alloc_region();
233 247
234 248 void abandon_gc_alloc_regions();
235 249
236 250 // The to-space memory regions into which objects are being copied during
237 251 // a GC.
238 252 HeapRegion* _gc_alloc_regions[GCAllocPurposeCount];
239 253 size_t _gc_alloc_region_counts[GCAllocPurposeCount];
240 254 // These are the regions, one per GCAllocPurpose, that are half-full
241 255 // at the end of a collection and that we want to reuse during the
242 256 // next collection.
243 257 HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount];
244 258 // This specifies whether we will keep the last half-full region at
245 259 // the end of a collection so that it can be reused during the next
246 260 // collection (this is specified per GCAllocPurpose)
247 261 bool _retain_gc_alloc_region[GCAllocPurposeCount];
248 262
249 263 // A list of the regions that have been set to be alloc regions in the
250 264 // current collection.
251 265 HeapRegion* _gc_alloc_region_list;
252 266
253 267 // Helper for monitoring and management support.
254 268 G1MonitoringSupport* _g1mm;
255 269
256 270 // Determines PLAB size for a particular allocation purpose.
257 271 static size_t desired_plab_sz(GCAllocPurpose purpose);
258 272
259 273 // When called by par thread, requires the FreeList_lock to be held.
260 274 void push_gc_alloc_region(HeapRegion* hr);
261 275
262 276 // This should only be called single-threaded. Undeclares all GC alloc
263 277 // regions.
264 278 void forget_alloc_region_list();
265 279
266 280 // Should be used to set an alloc region, because there's other
267 281 // associated bookkeeping.
268 282 void set_gc_alloc_region(int purpose, HeapRegion* r);
269 283
270 284 // Check well-formedness of alloc region list.
271 285 bool check_gc_alloc_regions();
272 286
273 287 // Outside of GC pauses, the number of bytes used in all regions other
274 288 // than the current allocation region.
275 289 size_t _summary_bytes_used;
276 290
277 291 // This is used for a quick test on whether a reference points into
278 292 // the collection set or not. Basically, we have an array, with one
279 293 // byte per region, and that byte denotes whether the corresponding
280 294 // region is in the collection set or not. The entry corresponding
281 295 // the bottom of the heap, i.e., region 0, is pointed to by
282 296 // _in_cset_fast_test_base. The _in_cset_fast_test field has been
283 297 // biased so that it actually points to address 0 of the address
284 298 // space, to make the test as fast as possible (we can simply shift
285 299 // the address to address into it, instead of having to subtract the
286 300 // bottom of the heap from the address before shifting it; basically
287 301 // it works in the same way the card table works).
288 302 bool* _in_cset_fast_test;
289 303
290 304 // The allocated array used for the fast test on whether a reference
291 305 // points into the collection set or not. This field is also used to
292 306 // free the array.
293 307 bool* _in_cset_fast_test_base;
294 308
295 309 // The length of the _in_cset_fast_test_base array.
296 310 size_t _in_cset_fast_test_length;
297 311
298 312 volatile unsigned _gc_time_stamp;
299 313
300 314 size_t* _surviving_young_words;
301 315
302 316 G1HRPrinter _hr_printer;
303 317
304 318 void setup_surviving_young_words();
305 319 void update_surviving_young_words(size_t* surv_young_words);
306 320 void cleanup_surviving_young_words();
307 321
308 322 // It decides whether an explicit GC should start a concurrent cycle
309 323 // instead of doing a STW GC. Currently, a concurrent cycle is
310 324 // explicitly started if:
311 325 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
312 326 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
313 327 bool should_do_concurrent_full_gc(GCCause::Cause cause);
314 328
315 329 // Keeps track of how many "full collections" (i.e., Full GCs or
316 330 // concurrent cycles) we have completed. The number of them we have
317 331 // started is maintained in _total_full_collections in CollectedHeap.
318 332 volatile unsigned int _full_collections_completed;
319 333
320 334 // This is a non-product method that is helpful for testing. It is
321 335 // called at the end of a GC and artificially expands the heap by
322 336 // allocating a number of dead regions. This way we can induce very
323 337 // frequent marking cycles and stress the cleanup / concurrent
324 338 // cleanup code more (as all the regions that will be allocated by
325 339 // this method will be found dead by the marking cycle).
326 340 void allocate_dummy_regions() PRODUCT_RETURN;
327 341
328 342 // These are macros so that, if the assert fires, we get the correct
329 343 // line number, file, etc.
330 344
331 345 #define heap_locking_asserts_err_msg(_extra_message_) \
332 346 err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
333 347 (_extra_message_), \
334 348 BOOL_TO_STR(Heap_lock->owned_by_self()), \
335 349 BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
336 350 BOOL_TO_STR(Thread::current()->is_VM_thread()))
337 351
338 352 #define assert_heap_locked() \
339 353 do { \
340 354 assert(Heap_lock->owned_by_self(), \
341 355 heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
342 356 } while (0)
343 357
344 358 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \
345 359 do { \
346 360 assert(Heap_lock->owned_by_self() || \
347 361 (SafepointSynchronize::is_at_safepoint() && \
348 362 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
349 363 heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
350 364 "should be at a safepoint")); \
351 365 } while (0)
352 366
353 367 #define assert_heap_locked_and_not_at_safepoint() \
354 368 do { \
355 369 assert(Heap_lock->owned_by_self() && \
356 370 !SafepointSynchronize::is_at_safepoint(), \
357 371 heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
358 372 "should not be at a safepoint")); \
359 373 } while (0)
360 374
361 375 #define assert_heap_not_locked() \
362 376 do { \
363 377 assert(!Heap_lock->owned_by_self(), \
364 378 heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
365 379 } while (0)
366 380
367 381 #define assert_heap_not_locked_and_not_at_safepoint() \
368 382 do { \
369 383 assert(!Heap_lock->owned_by_self() && \
370 384 !SafepointSynchronize::is_at_safepoint(), \
371 385 heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
372 386 "should not be at a safepoint")); \
373 387 } while (0)
374 388
375 389 #define assert_at_safepoint(_should_be_vm_thread_) \
376 390 do { \
377 391 assert(SafepointSynchronize::is_at_safepoint() && \
378 392 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
379 393 heap_locking_asserts_err_msg("should be at a safepoint")); \
380 394 } while (0)
381 395
382 396 #define assert_not_at_safepoint() \
383 397 do { \
384 398 assert(!SafepointSynchronize::is_at_safepoint(), \
385 399 heap_locking_asserts_err_msg("should not be at a safepoint")); \
386 400 } while (0)
387 401
388 402 protected:
389 403
390 404 // Returns "true" iff none of the gc alloc regions have any allocations
391 405 // since the last call to "save_marks".
392 406 bool all_alloc_regions_no_allocs_since_save_marks();
393 407 // Perform finalization stuff on all allocation regions.
394 408 void retire_all_alloc_regions();
395 409
396 410 // The young region list.
397 411 YoungList* _young_list;
398 412
399 413 // The current policy object for the collector.
400 414 G1CollectorPolicy* _g1_policy;
401 415
402 416 // This is the second level of trying to allocate a new region. If
403 417 // new_region() didn't find a region on the free_list, this call will
404 418 // check whether there's anything available on the
405 419 // secondary_free_list and/or wait for more regions to appear on
406 420 // that list, if _free_regions_coming is set.
407 421 HeapRegion* new_region_try_secondary_free_list();
408 422
409 423 // Try to allocate a single non-humongous HeapRegion sufficient for
410 424 // an allocation of the given word_size. If do_expand is true,
411 425 // attempt to expand the heap if necessary to satisfy the allocation
412 426 // request.
413 427 HeapRegion* new_region(size_t word_size, bool do_expand);
414 428
415 429 // Try to allocate a new region to be used for allocation by
416 430 // a GC thread. It will try to expand the heap if no region is
417 431 // available.
418 432 HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
419 433
420 434 // Attempt to satisfy a humongous allocation request of the given
421 435 // size by finding a contiguous set of free regions of num_regions
422 436 // length and remove them from the master free list. Return the
423 437 // index of the first region or G1_NULL_HRS_INDEX if the search
424 438 // was unsuccessful.
425 439 size_t humongous_obj_allocate_find_first(size_t num_regions,
426 440 size_t word_size);
427 441
428 442 // Initialize a contiguous set of free regions of length num_regions
429 443 // and starting at index first so that they appear as a single
430 444 // humongous region.
431 445 HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
432 446 size_t num_regions,
433 447 size_t word_size);
434 448
435 449 // Attempt to allocate a humongous object of the given size. Return
436 450 // NULL if unsuccessful.
437 451 HeapWord* humongous_obj_allocate(size_t word_size);
438 452
439 453 // The following two methods, allocate_new_tlab() and
440 454 // mem_allocate(), are the two main entry points from the runtime
441 455 // into the G1's allocation routines. They have the following
442 456 // assumptions:
443 457 //
444 458 // * They should both be called outside safepoints.
445 459 //
446 460 // * They should both be called without holding the Heap_lock.
447 461 //
448 462 // * All allocation requests for new TLABs should go to
449 463 // allocate_new_tlab().
450 464 //
451 465 // * All non-TLAB allocation requests should go to mem_allocate().
452 466 //
453 467 // * If either call cannot satisfy the allocation request using the
454 468 // current allocating region, they will try to get a new one. If
455 469 // this fails, they will attempt to do an evacuation pause and
456 470 // retry the allocation.
457 471 //
458 472 // * If all allocation attempts fail, even after trying to schedule
459 473 // an evacuation pause, allocate_new_tlab() will return NULL,
460 474 // whereas mem_allocate() will attempt a heap expansion and/or
461 475 // schedule a Full GC.
462 476 //
463 477 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
464 478 // should never be called with word_size being humongous. All
465 479 // humongous allocation requests should go to mem_allocate() which
466 480 // will satisfy them with a special path.
467 481
468 482 virtual HeapWord* allocate_new_tlab(size_t word_size);
469 483
470 484 virtual HeapWord* mem_allocate(size_t word_size,
471 485 bool* gc_overhead_limit_was_exceeded);
472 486
473 487 // The following three methods take a gc_count_before_ret
474 488 // parameter which is used to return the GC count if the method
475 489 // returns NULL. Given that we are required to read the GC count
476 490 // while holding the Heap_lock, and these paths will take the
477 491 // Heap_lock at some point, it's easier to get them to read the GC
478 492 // count while holding the Heap_lock before they return NULL instead
479 493 // of the caller (namely: mem_allocate()) having to also take the
480 494 // Heap_lock just to read the GC count.
481 495
482 496 // First-level mutator allocation attempt: try to allocate out of
483 497 // the mutator alloc region without taking the Heap_lock. This
484 498 // should only be used for non-humongous allocations.
485 499 inline HeapWord* attempt_allocation(size_t word_size,
486 500 unsigned int* gc_count_before_ret);
487 501
488 502 // Second-level mutator allocation attempt: take the Heap_lock and
489 503 // retry the allocation attempt, potentially scheduling a GC
490 504 // pause. This should only be used for non-humongous allocations.
491 505 HeapWord* attempt_allocation_slow(size_t word_size,
492 506 unsigned int* gc_count_before_ret);
493 507
494 508 // Takes the Heap_lock and attempts a humongous allocation. It can
495 509 // potentially schedule a GC pause.
496 510 HeapWord* attempt_allocation_humongous(size_t word_size,
497 511 unsigned int* gc_count_before_ret);
498 512
499 513 // Allocation attempt that should be called during safepoints (e.g.,
500 514 // at the end of a successful GC). expect_null_mutator_alloc_region
501 515 // specifies whether the mutator alloc region is expected to be NULL
502 516 // or not.
503 517 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
504 518 bool expect_null_mutator_alloc_region);
505 519
506 520 // It dirties the cards that cover the block so that so that the post
507 521 // write barrier never queues anything when updating objects on this
508 522 // block. It is assumed (and in fact we assert) that the block
509 523 // belongs to a young region.
510 524 inline void dirty_young_block(HeapWord* start, size_t word_size);
511 525
512 526 // Allocate blocks during garbage collection. Will ensure an
513 527 // allocation region, either by picking one or expanding the
514 528 // heap, and then allocate a block of the given size. The block
515 529 // may not be a humongous - it must fit into a single heap region.
516 530 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
517 531
518 532 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
519 533 HeapRegion* alloc_region,
520 534 bool par,
521 535 size_t word_size);
522 536
523 537 // Ensure that no further allocations can happen in "r", bearing in mind
524 538 // that parallel threads might be attempting allocations.
525 539 void par_allocate_remaining_space(HeapRegion* r);
526 540
527 541 // Retires an allocation region when it is full or at the end of a
528 542 // GC pause.
529 543 void retire_alloc_region(HeapRegion* alloc_region, bool par);
530 544
531 545 // These two methods are the "callbacks" from the G1AllocRegion class.
532 546
533 547 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
534 548 void retire_mutator_alloc_region(HeapRegion* alloc_region,
535 549 size_t allocated_bytes);
536 550
537 551 // - if explicit_gc is true, the GC is for a System.gc() or a heap
538 552 // inspection request and should collect the entire heap
539 553 // - if clear_all_soft_refs is true, all soft references should be
540 554 // cleared during the GC
541 555 // - if explicit_gc is false, word_size describes the allocation that
542 556 // the GC should attempt (at least) to satisfy
543 557 // - it returns false if it is unable to do the collection due to the
544 558 // GC locker being active, true otherwise
545 559 bool do_collection(bool explicit_gc,
546 560 bool clear_all_soft_refs,
547 561 size_t word_size);
548 562
549 563 // Callback from VM_G1CollectFull operation.
550 564 // Perform a full collection.
551 565 void do_full_collection(bool clear_all_soft_refs);
552 566
553 567 // Resize the heap if necessary after a full collection. If this is
554 568 // after a collect-for allocation, "word_size" is the allocation size,
555 569 // and will be considered part of the used portion of the heap.
556 570 void resize_if_necessary_after_full_collection(size_t word_size);
557 571
558 572 // Callback from VM_G1CollectForAllocation operation.
↓ open down ↓ |
390 lines elided |
↑ open up ↑ |
559 573 // This function does everything necessary/possible to satisfy a
560 574 // failed allocation request (including collection, expansion, etc.)
561 575 HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
562 576
563 577 // Attempting to expand the heap sufficiently
564 578 // to support an allocation of the given "word_size". If
565 579 // successful, perform the allocation and return the address of the
566 580 // allocated block, or else "NULL".
567 581 HeapWord* expand_and_allocate(size_t word_size);
568 582
583 + // Process any reference objects discovered during
584 + // an incremental evacuation pause.
585 + void process_discovered_references();
586 +
587 + // Enqueue any remaining discovered references
588 + // after processing.
589 + void enqueue_discovered_references();
590 +
569 591 public:
570 592
571 593 G1MonitoringSupport* g1mm() { return _g1mm; }
572 594
573 595 // Expand the garbage-first heap by at least the given size (in bytes!).
574 596 // Returns true if the heap was expanded by the requested amount;
575 597 // false otherwise.
576 598 // (Rounds up to a HeapRegion boundary.)
577 599 bool expand(size_t expand_bytes);
578 600
579 601 // Do anything common to GC's.
580 602 virtual void gc_prologue(bool full);
581 603 virtual void gc_epilogue(bool full);
582 604
583 605 // We register a region with the fast "in collection set" test. We
584 606 // simply set to true the array slot corresponding to this region.
585 607 void register_region_with_in_cset_fast_test(HeapRegion* r) {
586 608 assert(_in_cset_fast_test_base != NULL, "sanity");
587 609 assert(r->in_collection_set(), "invariant");
588 610 size_t index = r->hrs_index();
589 611 assert(index < _in_cset_fast_test_length, "invariant");
590 612 assert(!_in_cset_fast_test_base[index], "invariant");
591 613 _in_cset_fast_test_base[index] = true;
592 614 }
593 615
594 616 // This is a fast test on whether a reference points into the
595 617 // collection set or not. It does not assume that the reference
596 618 // points into the heap; if it doesn't, it will return false.
597 619 bool in_cset_fast_test(oop obj) {
598 620 assert(_in_cset_fast_test != NULL, "sanity");
599 621 if (_g1_committed.contains((HeapWord*) obj)) {
600 622 // no need to subtract the bottom of the heap from obj,
601 623 // _in_cset_fast_test is biased
602 624 size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes;
603 625 bool ret = _in_cset_fast_test[index];
604 626 // let's make sure the result is consistent with what the slower
605 627 // test returns
606 628 assert( ret || !obj_in_cs(obj), "sanity");
607 629 assert(!ret || obj_in_cs(obj), "sanity");
608 630 return ret;
609 631 } else {
610 632 return false;
611 633 }
612 634 }
613 635
614 636 void clear_cset_fast_test() {
615 637 assert(_in_cset_fast_test_base != NULL, "sanity");
616 638 memset(_in_cset_fast_test_base, false,
617 639 _in_cset_fast_test_length * sizeof(bool));
618 640 }
619 641
620 642 // This is called at the end of either a concurrent cycle or a Full
621 643 // GC to update the number of full collections completed. Those two
622 644 // can happen in a nested fashion, i.e., we start a concurrent
623 645 // cycle, a Full GC happens half-way through it which ends first,
624 646 // and then the cycle notices that a Full GC happened and ends
625 647 // too. The concurrent parameter is a boolean to help us do a bit
626 648 // tighter consistency checking in the method. If concurrent is
627 649 // false, the caller is the inner caller in the nesting (i.e., the
628 650 // Full GC). If concurrent is true, the caller is the outer caller
629 651 // in this nesting (i.e., the concurrent cycle). Further nesting is
630 652 // not currently supported. The end of the this call also notifies
631 653 // the FullGCCount_lock in case a Java thread is waiting for a full
632 654 // GC to happen (e.g., it called System.gc() with
633 655 // +ExplicitGCInvokesConcurrent).
634 656 void increment_full_collections_completed(bool concurrent);
635 657
636 658 unsigned int full_collections_completed() {
637 659 return _full_collections_completed;
638 660 }
639 661
640 662 G1HRPrinter* hr_printer() { return &_hr_printer; }
641 663
642 664 protected:
643 665
644 666 // Shrink the garbage-first heap by at most the given size (in bytes!).
645 667 // (Rounds down to a HeapRegion boundary.)
646 668 virtual void shrink(size_t expand_bytes);
647 669 void shrink_helper(size_t expand_bytes);
648 670
649 671 #if TASKQUEUE_STATS
650 672 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
651 673 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
652 674 void reset_taskqueue_stats();
653 675 #endif // TASKQUEUE_STATS
654 676
655 677 // Schedule the VM operation that will do an evacuation pause to
656 678 // satisfy an allocation request of word_size. *succeeded will
657 679 // return whether the VM operation was successful (it did do an
658 680 // evacuation pause) or not (another thread beat us to it or the GC
659 681 // locker was active). Given that we should not be holding the
660 682 // Heap_lock when we enter this method, we will pass the
661 683 // gc_count_before (i.e., total_collections()) as a parameter since
662 684 // it has to be read while holding the Heap_lock. Currently, both
663 685 // methods that call do_collection_pause() release the Heap_lock
664 686 // before the call, so it's easy to read gc_count_before just before.
665 687 HeapWord* do_collection_pause(size_t word_size,
666 688 unsigned int gc_count_before,
667 689 bool* succeeded);
668 690
669 691 // The guts of the incremental collection pause, executed by the vm
670 692 // thread. It returns false if it is unable to do the collection due
671 693 // to the GC locker being active, true otherwise
672 694 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
673 695
674 696 // Actually do the work of evacuating the collection set.
675 697 void evacuate_collection_set();
676 698
677 699 // The g1 remembered set of the heap.
678 700 G1RemSet* _g1_rem_set;
679 701 // And it's mod ref barrier set, used to track updates for the above.
680 702 ModRefBarrierSet* _mr_bs;
681 703
682 704 // A set of cards that cover the objects for which the Rsets should be updated
683 705 // concurrently after the collection.
684 706 DirtyCardQueueSet _dirty_card_queue_set;
685 707
686 708 // The Heap Region Rem Set Iterator.
687 709 HeapRegionRemSetIterator** _rem_set_iterator;
688 710
689 711 // The closure used to refine a single card.
690 712 RefineCardTableEntryClosure* _refine_cte_cl;
691 713
692 714 // A function to check the consistency of dirty card logs.
693 715 void check_ct_logs_at_safepoint();
694 716
695 717 // A DirtyCardQueueSet that is used to hold cards that contain
696 718 // references into the current collection set. This is used to
697 719 // update the remembered sets of the regions in the collection
698 720 // set in the event of an evacuation failure.
699 721 DirtyCardQueueSet _into_cset_dirty_card_queue_set;
700 722
701 723 // After a collection pause, make the regions in the CS into free
702 724 // regions.
703 725 void free_collection_set(HeapRegion* cs_head);
704 726
705 727 // Abandon the current collection set without recording policy
706 728 // statistics or updating free lists.
707 729 void abandon_collection_set(HeapRegion* cs_head);
708 730
709 731 // Applies "scan_non_heap_roots" to roots outside the heap,
710 732 // "scan_rs" to roots inside the heap (having done "set_region" to
711 733 // indicate the region in which the root resides), and does "scan_perm"
712 734 // (setting the generation to the perm generation.) If "scan_rs" is
713 735 // NULL, then this step is skipped. The "worker_i"
714 736 // param is for use with parallel roots processing, and should be
715 737 // the "i" of the calling parallel worker thread's work(i) function.
716 738 // In the sequential case this param will be ignored.
717 739 void g1_process_strong_roots(bool collecting_perm_gen,
718 740 SharedHeap::ScanningOption so,
719 741 OopClosure* scan_non_heap_roots,
720 742 OopsInHeapRegionClosure* scan_rs,
721 743 OopsInGenClosure* scan_perm,
722 744 int worker_i);
723 745
724 746 // Apply "blk" to all the weak roots of the system. These include
725 747 // JNI weak roots, the code cache, system dictionary, symbol table,
726 748 // string table, and referents of reachable weak refs.
727 749 void g1_process_weak_roots(OopClosure* root_closure,
728 750 OopClosure* non_root_closure);
729 751
730 752 // Invoke "save_marks" on all heap regions.
731 753 void save_marks();
732 754
733 755 // Frees a non-humongous region by initializing its contents and
734 756 // adding it to the free list that's passed as a parameter (this is
735 757 // usually a local list which will be appended to the master free
736 758 // list later). The used bytes of freed regions are accumulated in
737 759 // pre_used. If par is true, the region's RSet will not be freed
738 760 // up. The assumption is that this will be done later.
739 761 void free_region(HeapRegion* hr,
740 762 size_t* pre_used,
741 763 FreeRegionList* free_list,
742 764 bool par);
743 765
744 766 // Frees a humongous region by collapsing it into individual regions
745 767 // and calling free_region() for each of them. The freed regions
746 768 // will be added to the free list that's passed as a parameter (this
747 769 // is usually a local list which will be appended to the master free
748 770 // list later). The used bytes of freed regions are accumulated in
749 771 // pre_used. If par is true, the region's RSet will not be freed
750 772 // up. The assumption is that this will be done later.
751 773 void free_humongous_region(HeapRegion* hr,
752 774 size_t* pre_used,
753 775 FreeRegionList* free_list,
754 776 HumongousRegionSet* humongous_proxy_set,
755 777 bool par);
756 778
757 779 // Notifies all the necessary spaces that the committed space has
758 780 // been updated (either expanded or shrunk). It should be called
759 781 // after _g1_storage is updated.
760 782 void update_committed_space(HeapWord* old_end, HeapWord* new_end);
761 783
762 784 // The concurrent marker (and the thread it runs in.)
763 785 ConcurrentMark* _cm;
764 786 ConcurrentMarkThread* _cmThread;
765 787 bool _mark_in_progress;
766 788
767 789 // The concurrent refiner.
768 790 ConcurrentG1Refine* _cg1r;
769 791
770 792 // The parallel task queues
771 793 RefToScanQueueSet *_task_queues;
772 794
773 795 // True iff a evacuation has failed in the current collection.
774 796 bool _evacuation_failed;
775 797
776 798 // Set the attribute indicating whether evacuation has failed in the
777 799 // current collection.
778 800 void set_evacuation_failed(bool b) { _evacuation_failed = b; }
779 801
780 802 // Failed evacuations cause some logical from-space objects to have
781 803 // forwarding pointers to themselves. Reset them.
782 804 void remove_self_forwarding_pointers();
783 805
784 806 // When one is non-null, so is the other. Together, they each pair is
785 807 // an object with a preserved mark, and its mark value.
786 808 GrowableArray<oop>* _objs_with_preserved_marks;
787 809 GrowableArray<markOop>* _preserved_marks_of_objs;
788 810
789 811 // Preserve the mark of "obj", if necessary, in preparation for its mark
790 812 // word being overwritten with a self-forwarding-pointer.
791 813 void preserve_mark_if_necessary(oop obj, markOop m);
792 814
793 815 // The stack of evac-failure objects left to be scanned.
794 816 GrowableArray<oop>* _evac_failure_scan_stack;
795 817 // The closure to apply to evac-failure objects.
796 818
797 819 OopsInHeapRegionClosure* _evac_failure_closure;
798 820 // Set the field above.
799 821 void
800 822 set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) {
801 823 _evac_failure_closure = evac_failure_closure;
802 824 }
803 825
804 826 // Push "obj" on the scan stack.
805 827 void push_on_evac_failure_scan_stack(oop obj);
806 828 // Process scan stack entries until the stack is empty.
807 829 void drain_evac_failure_scan_stack();
808 830 // True iff an invocation of "drain_scan_stack" is in progress; to
809 831 // prevent unnecessary recursion.
810 832 bool _drain_in_progress;
811 833
812 834 // Do any necessary initialization for evacuation-failure handling.
813 835 // "cl" is the closure that will be used to process evac-failure
814 836 // objects.
815 837 void init_for_evac_failure(OopsInHeapRegionClosure* cl);
816 838 // Do any necessary cleanup for evacuation-failure handling data
817 839 // structures.
818 840 void finalize_for_evac_failure();
819 841
820 842 // An attempt to evacuate "obj" has failed; take necessary steps.
821 843 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
822 844 void handle_evacuation_failure_common(oop obj, markOop m);
823 845
824 846 // Ensure that the relevant gc_alloc regions are set.
825 847 void get_gc_alloc_regions();
826 848 // We're done with GC alloc regions. We are going to tear down the
827 849 // gc alloc list and remove the gc alloc tag from all the regions on
828 850 // that list. However, we will also retain the last (i.e., the one
829 851 // that is half-full) GC alloc region, per GCAllocPurpose, for
830 852 // possible reuse during the next collection, provided
831 853 // _retain_gc_alloc_region[] indicates that it should be the
↓ open down ↓ |
253 lines elided |
↑ open up ↑ |
832 854 // case. Said regions are kept in the _retained_gc_alloc_regions[]
833 855 // array. If the parameter totally is set, we will not retain any
834 856 // regions, irrespective of what _retain_gc_alloc_region[]
835 857 // indicates.
836 858 void release_gc_alloc_regions(bool totally);
837 859 #ifndef PRODUCT
838 860 // Useful for debugging.
839 861 void print_gc_alloc_regions();
840 862 #endif // !PRODUCT
841 863
842 - // Instance of the concurrent mark is_alive closure for embedding
843 - // into the reference processor as the is_alive_non_header. This
844 - // prevents unnecessary additions to the discovered lists during
845 - // concurrent discovery.
846 - G1CMIsAliveClosure _is_alive_closure;
864 + // ("Weak") Reference processing support.
865 + //
866 + // G1 has 2 instances of the referece processor class. One
867 + // (_ref_processor_cm) handles reference object discovery
868 + // and subsequent processing during concurrent marking cycles.
869 + //
870 + // The other (_ref_processor_stw) handles reference object
871 + // discovery and processing during full GCs and incremental
872 + // evacuation pauses.
873 + //
874 + // During an incremental pause, reference discovery will be
875 + // temporarily disabled for _ref_processor_cm and will be
876 + // enabled for _ref_processor_stw. At the end of the evacuation
877 + // pause references discovered by _ref_processor_stw will be
878 + // processed and discovery will be disabled. The previous
879 + // setting for reference object discovery for _ref_processor_cm
880 + // will be re-instated.
881 + //
882 + // At the start of marking:
883 + // * Discovery by the CM ref processor is verified to be inactive
884 + // and it's discovered lists are empty.
885 + // * Discovery by the CM ref processor is then enabled.
886 + //
887 + // At the end of marking:
888 + // * Any references on the CM ref processor's discovered
889 + // lists are processed (possibly MT).
890 + //
891 + // At the start of full GC we:
892 + // * Disable discovery by the CM ref processor and
893 + // empty CM ref processor's discovered lists
894 + // (without processing any entries).
895 + // * Verify that the STW ref processor is inactive and it's
896 + // discovered lists are empty.
897 + // * Temporarily set STW ref processor discovery as single threaded.
898 + // * Temporarily clear the STW ref processor's _is_alive_non_header
899 + // field.
900 + // * Finally enable discovery by the STW ref processor.
901 + //
902 + // The STW ref processor is used to record any discovered
903 + // references during the full GC.
904 + //
905 + // At the end of a full GC we:
906 + // * Will enqueue any non-live discovered references on the
907 + // STW ref processor's discovered lists. This makes the
908 + // STW ref processor inactive by disabling discovery.
909 + // * Verify that the CM ref processor is still inactive
910 + // and no references have been placed on it's discovered
911 + // lists (also checked as a precondition during initial marking).
912 +
913 + // The (stw) reference processor...
914 + ReferenceProcessor* _ref_processor_stw;
915 +
916 + // Instance of the is_alive closure for embedding into the
917 + // STW reference processor as the _is_alive_non_header field.
918 + // The _is_alive_non_header prevents unnecessary additions to
919 + // the discovered lists during reference discovery.
920 + G1STWIsAliveClosure _is_alive_closure_stw;
847 921
848 - // ("Weak") Reference processing support
849 - ReferenceProcessor* _ref_processor;
922 + // The (concurrent marking) reference processor...
923 + ReferenceProcessor* _ref_processor_cm;
924 +
925 + // Instance of the concurrent mark is_alive closure for embedding
926 + // into the Concurrent Marking reference processor as the
927 + // _is_alive_non_header field. The _is_alive_non_header
928 + // prevents unnecessary additions to the discovered lists
929 + // during concurrent discovery.
930 + G1CMIsAliveClosure _is_alive_closure_cm;
850 931
851 932 enum G1H_process_strong_roots_tasks {
852 933 G1H_PS_mark_stack_oops_do,
853 934 G1H_PS_refProcessor_oops_do,
854 935 // Leave this one last.
855 936 G1H_PS_NumElements
856 937 };
857 938
858 939 SubTasksDone* _process_strong_tasks;
859 940
860 941 volatile bool _free_regions_coming;
861 942
862 943 public:
863 944
864 945 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
865 946
866 947 void set_refine_cte_cl_concurrency(bool concurrent);
867 948
868 949 RefToScanQueue *task_queue(int i) const;
869 950
870 951 // A set of cards where updates happened during the GC
871 952 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
872 953
873 954 // A DirtyCardQueueSet that is used to hold cards that contain
874 955 // references into the current collection set. This is used to
875 956 // update the remembered sets of the regions in the collection
876 957 // set in the event of an evacuation failure.
877 958 DirtyCardQueueSet& into_cset_dirty_card_queue_set()
878 959 { return _into_cset_dirty_card_queue_set; }
879 960
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
880 961 // Create a G1CollectedHeap with the specified policy.
881 962 // Must call the initialize method afterwards.
882 963 // May not return if something goes wrong.
883 964 G1CollectedHeap(G1CollectorPolicy* policy);
884 965
885 966 // Initialize the G1CollectedHeap to have the initial and
886 967 // maximum sizes, permanent generation, and remembered and barrier sets
887 968 // specified by the policy object.
888 969 jint initialize();
889 970
971 + // Initialize weak reference processing.
890 972 virtual void ref_processing_init();
891 973
892 974 void set_par_threads(int t) {
893 975 SharedHeap::set_par_threads(t);
894 976 _process_strong_tasks->set_n_threads(t);
895 977 }
896 978
897 979 virtual CollectedHeap::Name kind() const {
898 980 return CollectedHeap::G1CollectedHeap;
899 981 }
900 982
901 983 // The current policy object for the collector.
902 984 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
903 985
904 986 // Adaptive size policy. No such thing for g1.
905 987 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
906 988
907 989 // The rem set and barrier set.
908 990 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
909 991 ModRefBarrierSet* mr_bs() const { return _mr_bs; }
910 992
911 993 // The rem set iterator.
912 994 HeapRegionRemSetIterator* rem_set_iterator(int i) {
913 995 return _rem_set_iterator[i];
914 996 }
915 997
916 998 HeapRegionRemSetIterator* rem_set_iterator() {
917 999 return _rem_set_iterator[0];
918 1000 }
919 1001
920 1002 unsigned get_gc_time_stamp() {
921 1003 return _gc_time_stamp;
922 1004 }
923 1005
924 1006 void reset_gc_time_stamp() {
925 1007 _gc_time_stamp = 0;
926 1008 OrderAccess::fence();
927 1009 }
928 1010
929 1011 void increment_gc_time_stamp() {
930 1012 ++_gc_time_stamp;
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
931 1013 OrderAccess::fence();
932 1014 }
933 1015
934 1016 void iterate_dirty_card_closure(CardTableEntryClosure* cl,
935 1017 DirtyCardQueue* into_cset_dcq,
936 1018 bool concurrent, int worker_i);
937 1019
938 1020 // The shared block offset table array.
939 1021 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
940 1022
941 - // Reference Processing accessor
942 - ReferenceProcessor* ref_processor() { return _ref_processor; }
1023 + // Reference Processing accessors
1024 +
1025 + // The STW reference processor....
1026 + ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1027 +
1028 + // The Concurent Marking reference processor...
1029 + ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
943 1030
944 1031 virtual size_t capacity() const;
945 1032 virtual size_t used() const;
946 1033 // This should be called when we're not holding the heap lock. The
947 1034 // result might be a bit inaccurate.
948 1035 size_t used_unlocked() const;
949 1036 size_t recalculate_used() const;
950 1037 #ifndef PRODUCT
951 1038 size_t recalculate_used_regions() const;
952 1039 #endif // PRODUCT
953 1040
954 1041 // These virtual functions do the actual allocation.
955 1042 // Some heaps may offer a contiguous region for shared non-blocking
956 1043 // allocation, via inlined code (by exporting the address of the top and
957 1044 // end fields defining the extent of the contiguous allocation region.)
958 1045 // But G1CollectedHeap doesn't yet support this.
959 1046
960 1047 // Return an estimate of the maximum allocation that could be performed
961 1048 // without triggering any collection or expansion activity. In a
962 1049 // generational collector, for example, this is probably the largest
963 1050 // allocation that could be supported (without expansion) in the youngest
964 1051 // generation. It is "unsafe" because no locks are taken; the result
965 1052 // should be treated as an approximation, not a guarantee, for use in
966 1053 // heuristic resizing decisions.
967 1054 virtual size_t unsafe_max_alloc();
968 1055
969 1056 virtual bool is_maximal_no_gc() const {
970 1057 return _g1_storage.uncommitted_size() == 0;
971 1058 }
972 1059
973 1060 // The total number of regions in the heap.
974 1061 size_t n_regions() { return _hrs.length(); }
975 1062
976 1063 // The max number of regions in the heap.
977 1064 size_t max_regions() { return _hrs.max_length(); }
978 1065
979 1066 // The number of regions that are completely free.
980 1067 size_t free_regions() { return _free_list.length(); }
981 1068
982 1069 // The number of regions that are not completely free.
983 1070 size_t used_regions() { return n_regions() - free_regions(); }
984 1071
985 1072 // The number of regions available for "regular" expansion.
986 1073 size_t expansion_regions() { return _expansion_regions; }
987 1074
988 1075 // Factory method for HeapRegion instances. It will return NULL if
989 1076 // the allocation fails.
990 1077 HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);
991 1078
992 1079 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
993 1080 void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
994 1081 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
995 1082 void verify_dirty_young_regions() PRODUCT_RETURN;
996 1083
997 1084 // verify_region_sets() performs verification over the region
998 1085 // lists. It will be compiled in the product code to be used when
999 1086 // necessary (i.e., during heap verification).
1000 1087 void verify_region_sets();
1001 1088
1002 1089 // verify_region_sets_optional() is planted in the code for
1003 1090 // list verification in non-product builds (and it can be enabled in
1004 1091 // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1).
1005 1092 #if HEAP_REGION_SET_FORCE_VERIFY
1006 1093 void verify_region_sets_optional() {
1007 1094 verify_region_sets();
1008 1095 }
1009 1096 #else // HEAP_REGION_SET_FORCE_VERIFY
1010 1097 void verify_region_sets_optional() { }
1011 1098 #endif // HEAP_REGION_SET_FORCE_VERIFY
1012 1099
1013 1100 #ifdef ASSERT
1014 1101 bool is_on_master_free_list(HeapRegion* hr) {
1015 1102 return hr->containing_set() == &_free_list;
1016 1103 }
1017 1104
1018 1105 bool is_in_humongous_set(HeapRegion* hr) {
1019 1106 return hr->containing_set() == &_humongous_set;
1020 1107 }
1021 1108 #endif // ASSERT
1022 1109
1023 1110 // Wrapper for the region list operations that can be called from
1024 1111 // methods outside this class.
1025 1112
1026 1113 void secondary_free_list_add_as_tail(FreeRegionList* list) {
1027 1114 _secondary_free_list.add_as_tail(list);
1028 1115 }
1029 1116
1030 1117 void append_secondary_free_list() {
1031 1118 _free_list.add_as_head(&_secondary_free_list);
1032 1119 }
1033 1120
1034 1121 void append_secondary_free_list_if_not_empty_with_lock() {
1035 1122 // If the secondary free list looks empty there's no reason to
1036 1123 // take the lock and then try to append it.
1037 1124 if (!_secondary_free_list.is_empty()) {
1038 1125 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1039 1126 append_secondary_free_list();
1040 1127 }
1041 1128 }
1042 1129
1043 1130 void set_free_regions_coming();
1044 1131 void reset_free_regions_coming();
1045 1132 bool free_regions_coming() { return _free_regions_coming; }
1046 1133 void wait_while_free_regions_coming();
1047 1134
1048 1135 // Perform a collection of the heap; intended for use in implementing
1049 1136 // "System.gc". This probably implies as full a collection as the
1050 1137 // "CollectedHeap" supports.
1051 1138 virtual void collect(GCCause::Cause cause);
1052 1139
1053 1140 // The same as above but assume that the caller holds the Heap_lock.
1054 1141 void collect_locked(GCCause::Cause cause);
1055 1142
1056 1143 // This interface assumes that it's being called by the
1057 1144 // vm thread. It collects the heap assuming that the
1058 1145 // heap lock is already held and that we are executing in
1059 1146 // the context of the vm thread.
1060 1147 virtual void collect_as_vm_thread(GCCause::Cause cause);
1061 1148
1062 1149 // True iff a evacuation has failed in the most-recent collection.
1063 1150 bool evacuation_failed() { return _evacuation_failed; }
1064 1151
1065 1152 // It will free a region if it has allocated objects in it that are
1066 1153 // all dead. It calls either free_region() or
1067 1154 // free_humongous_region() depending on the type of the region that
1068 1155 // is passed to it.
1069 1156 void free_region_if_empty(HeapRegion* hr,
1070 1157 size_t* pre_used,
1071 1158 FreeRegionList* free_list,
1072 1159 HumongousRegionSet* humongous_proxy_set,
1073 1160 HRRSCleanupTask* hrrs_cleanup_task,
1074 1161 bool par);
1075 1162
1076 1163 // It appends the free list to the master free list and updates the
1077 1164 // master humongous list according to the contents of the proxy
1078 1165 // list. It also adjusts the total used bytes according to pre_used
1079 1166 // (if par is true, it will do so by taking the ParGCRareEvent_lock).
1080 1167 void update_sets_after_freeing_regions(size_t pre_used,
1081 1168 FreeRegionList* free_list,
1082 1169 HumongousRegionSet* humongous_proxy_set,
1083 1170 bool par);
1084 1171
1085 1172 // Returns "TRUE" iff "p" points into the allocated area of the heap.
1086 1173 virtual bool is_in(const void* p) const;
1087 1174
1088 1175 // Return "TRUE" iff the given object address is within the collection
1089 1176 // set.
1090 1177 inline bool obj_in_cs(oop obj);
1091 1178
1092 1179 // Return "TRUE" iff the given object address is in the reserved
1093 1180 // region of g1 (excluding the permanent generation).
1094 1181 bool is_in_g1_reserved(const void* p) const {
1095 1182 return _g1_reserved.contains(p);
1096 1183 }
1097 1184
1098 1185 // Returns a MemRegion that corresponds to the space that has been
1099 1186 // reserved for the heap
1100 1187 MemRegion g1_reserved() {
1101 1188 return _g1_reserved;
1102 1189 }
1103 1190
1104 1191 // Returns a MemRegion that corresponds to the space that has been
1105 1192 // committed in the heap
1106 1193 MemRegion g1_committed() {
1107 1194 return _g1_committed;
1108 1195 }
1109 1196
1110 1197 virtual bool is_in_closed_subset(const void* p) const;
1111 1198
1112 1199 // Dirty card table entries covering a list of young regions.
1113 1200 void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list);
1114 1201
1115 1202 // This resets the card table to all zeros. It is used after
1116 1203 // a collection pause which used the card table to claim cards.
1117 1204 void cleanUpCardTable();
1118 1205
1119 1206 // Iteration functions.
1120 1207
1121 1208 // Iterate over all the ref-containing fields of all objects, calling
1122 1209 // "cl.do_oop" on each.
1123 1210 virtual void oop_iterate(OopClosure* cl) {
1124 1211 oop_iterate(cl, true);
1125 1212 }
1126 1213 void oop_iterate(OopClosure* cl, bool do_perm);
1127 1214
1128 1215 // Same as above, restricted to a memory region.
1129 1216 virtual void oop_iterate(MemRegion mr, OopClosure* cl) {
1130 1217 oop_iterate(mr, cl, true);
1131 1218 }
1132 1219 void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm);
1133 1220
1134 1221 // Iterate over all objects, calling "cl.do_object" on each.
1135 1222 virtual void object_iterate(ObjectClosure* cl) {
1136 1223 object_iterate(cl, true);
1137 1224 }
1138 1225 virtual void safe_object_iterate(ObjectClosure* cl) {
1139 1226 object_iterate(cl, true);
1140 1227 }
1141 1228 void object_iterate(ObjectClosure* cl, bool do_perm);
1142 1229
1143 1230 // Iterate over all objects allocated since the last collection, calling
1144 1231 // "cl.do_object" on each. The heap must have been initialized properly
1145 1232 // to support this function, or else this call will fail.
1146 1233 virtual void object_iterate_since_last_GC(ObjectClosure* cl);
1147 1234
1148 1235 // Iterate over all spaces in use in the heap, in ascending address order.
1149 1236 virtual void space_iterate(SpaceClosure* cl);
1150 1237
1151 1238 // Iterate over heap regions, in address order, terminating the
1152 1239 // iteration early if the "doHeapRegion" method returns "true".
1153 1240 void heap_region_iterate(HeapRegionClosure* blk) const;
1154 1241
1155 1242 // Iterate over heap regions starting with r (or the first region if "r"
1156 1243 // is NULL), in address order, terminating early if the "doHeapRegion"
1157 1244 // method returns "true".
1158 1245 void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
1159 1246
1160 1247 // Return the region with the given index. It assumes the index is valid.
1161 1248 HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
1162 1249
1163 1250 // Divide the heap region sequence into "chunks" of some size (the number
1164 1251 // of regions divided by the number of parallel threads times some
1165 1252 // overpartition factor, currently 4). Assumes that this will be called
1166 1253 // in parallel by ParallelGCThreads worker threads with discinct worker
1167 1254 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1168 1255 // calls will use the same "claim_value", and that that claim value is
1169 1256 // different from the claim_value of any heap region before the start of
1170 1257 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by
1171 1258 // attempting to claim the first region in each chunk, and, if
1172 1259 // successful, applying the closure to each region in the chunk (and
1173 1260 // setting the claim value of the second and subsequent regions of the
1174 1261 // chunk.) For now requires that "doHeapRegion" always returns "false",
1175 1262 // i.e., that a closure never attempt to abort a traversal.
1176 1263 void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1177 1264 int worker,
1178 1265 jint claim_value);
1179 1266
1180 1267 // It resets all the region claim values to the default.
1181 1268 void reset_heap_region_claim_values();
1182 1269
1183 1270 #ifdef ASSERT
1184 1271 bool check_heap_region_claim_values(jint claim_value);
1185 1272 #endif // ASSERT
1186 1273
1187 1274 // Iterate over the regions (if any) in the current collection set.
1188 1275 void collection_set_iterate(HeapRegionClosure* blk);
1189 1276
1190 1277 // As above but starting from region r
1191 1278 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1192 1279
1193 1280 // Returns the first (lowest address) compactible space in the heap.
1194 1281 virtual CompactibleSpace* first_compactible_space();
1195 1282
1196 1283 // A CollectedHeap will contain some number of spaces. This finds the
1197 1284 // space containing a given address, or else returns NULL.
1198 1285 virtual Space* space_containing(const void* addr) const;
1199 1286
1200 1287 // A G1CollectedHeap will contain some number of heap regions. This
1201 1288 // finds the region containing a given address, or else returns NULL.
1202 1289 template <class T>
1203 1290 inline HeapRegion* heap_region_containing(const T addr) const;
1204 1291
1205 1292 // Like the above, but requires "addr" to be in the heap (to avoid a
1206 1293 // null-check), and unlike the above, may return an continuing humongous
1207 1294 // region.
1208 1295 template <class T>
1209 1296 inline HeapRegion* heap_region_containing_raw(const T addr) const;
1210 1297
1211 1298 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1212 1299 // each address in the (reserved) heap is a member of exactly
1213 1300 // one block. The defining characteristic of a block is that it is
1214 1301 // possible to find its size, and thus to progress forward to the next
1215 1302 // block. (Blocks may be of different sizes.) Thus, blocks may
1216 1303 // represent Java objects, or they might be free blocks in a
1217 1304 // free-list-based heap (or subheap), as long as the two kinds are
1218 1305 // distinguishable and the size of each is determinable.
1219 1306
1220 1307 // Returns the address of the start of the "block" that contains the
1221 1308 // address "addr". We say "blocks" instead of "object" since some heaps
1222 1309 // may not pack objects densely; a chunk may either be an object or a
1223 1310 // non-object.
1224 1311 virtual HeapWord* block_start(const void* addr) const;
1225 1312
1226 1313 // Requires "addr" to be the start of a chunk, and returns its size.
1227 1314 // "addr + size" is required to be the start of a new chunk, or the end
1228 1315 // of the active area of the heap.
1229 1316 virtual size_t block_size(const HeapWord* addr) const;
1230 1317
1231 1318 // Requires "addr" to be the start of a block, and returns "TRUE" iff
1232 1319 // the block is an object.
1233 1320 virtual bool block_is_obj(const HeapWord* addr) const;
1234 1321
1235 1322 // Does this heap support heap inspection? (+PrintClassHistogram)
1236 1323 virtual bool supports_heap_inspection() const { return true; }
1237 1324
1238 1325 // Section on thread-local allocation buffers (TLABs)
1239 1326 // See CollectedHeap for semantics.
1240 1327
1241 1328 virtual bool supports_tlab_allocation() const;
1242 1329 virtual size_t tlab_capacity(Thread* thr) const;
1243 1330 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
1244 1331
1245 1332 // Can a compiler initialize a new object without store barriers?
1246 1333 // This permission only extends from the creation of a new object
1247 1334 // via a TLAB up to the first subsequent safepoint. If such permission
1248 1335 // is granted for this heap type, the compiler promises to call
1249 1336 // defer_store_barrier() below on any slow path allocation of
1250 1337 // a new object for which such initializing store barriers will
1251 1338 // have been elided. G1, like CMS, allows this, but should be
1252 1339 // ready to provide a compensating write barrier as necessary
1253 1340 // if that storage came out of a non-young region. The efficiency
1254 1341 // of this implementation depends crucially on being able to
1255 1342 // answer very efficiently in constant time whether a piece of
1256 1343 // storage in the heap comes from a young region or not.
1257 1344 // See ReduceInitialCardMarks.
1258 1345 virtual bool can_elide_tlab_store_barriers() const {
1259 1346 // 6920090: Temporarily disabled, because of lingering
1260 1347 // instabilities related to RICM with G1. In the
1261 1348 // interim, the option ReduceInitialCardMarksForG1
1262 1349 // below is left solely as a debugging device at least
1263 1350 // until 6920109 fixes the instabilities.
1264 1351 return ReduceInitialCardMarksForG1;
1265 1352 }
1266 1353
1267 1354 virtual bool card_mark_must_follow_store() const {
1268 1355 return true;
1269 1356 }
1270 1357
1271 1358 bool is_in_young(const oop obj) {
1272 1359 HeapRegion* hr = heap_region_containing(obj);
1273 1360 return hr != NULL && hr->is_young();
1274 1361 }
1275 1362
1276 1363 #ifdef ASSERT
1277 1364 virtual bool is_in_partial_collection(const void* p);
1278 1365 #endif
1279 1366
1280 1367 virtual bool is_scavengable(const void* addr);
1281 1368
1282 1369 // We don't need barriers for initializing stores to objects
1283 1370 // in the young gen: for the SATB pre-barrier, there is no
1284 1371 // pre-value that needs to be remembered; for the remembered-set
1285 1372 // update logging post-barrier, we don't maintain remembered set
1286 1373 // information for young gen objects. Note that non-generational
1287 1374 // G1 does not have any "young" objects, should not elide
1288 1375 // the rs logging barrier and so should always answer false below.
1289 1376 // However, non-generational G1 (-XX:-G1Gen) appears to have
1290 1377 // bit-rotted so was not tested below.
1291 1378 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
1292 1379 // Re 6920090, 6920109 above.
1293 1380 assert(ReduceInitialCardMarksForG1, "Else cannot be here");
1294 1381 assert(G1Gen || !is_in_young(new_obj),
1295 1382 "Non-generational G1 should never return true below");
1296 1383 return is_in_young(new_obj);
1297 1384 }
1298 1385
1299 1386 // Can a compiler elide a store barrier when it writes
1300 1387 // a permanent oop into the heap? Applies when the compiler
1301 1388 // is storing x to the heap, where x->is_perm() is true.
1302 1389 virtual bool can_elide_permanent_oop_store_barriers() const {
1303 1390 // At least until perm gen collection is also G1-ified, at
1304 1391 // which point this should return false.
1305 1392 return true;
1306 1393 }
1307 1394
1308 1395 // Returns "true" iff the given word_size is "very large".
1309 1396 static bool isHumongous(size_t word_size) {
1310 1397 // Note this has to be strictly greater-than as the TLABs
1311 1398 // are capped at the humongous thresold and we want to
1312 1399 // ensure that we don't try to allocate a TLAB as
1313 1400 // humongous and that we don't allocate a humongous
1314 1401 // object in a TLAB.
1315 1402 return word_size > _humongous_object_threshold_in_words;
1316 1403 }
1317 1404
1318 1405 // Update mod union table with the set of dirty cards.
1319 1406 void updateModUnion();
1320 1407
1321 1408 // Set the mod union bits corresponding to the given memRegion. Note
1322 1409 // that this is always a safe operation, since it doesn't clear any
1323 1410 // bits.
1324 1411 void markModUnionRange(MemRegion mr);
1325 1412
1326 1413 // Records the fact that a marking phase is no longer in progress.
1327 1414 void set_marking_complete() {
1328 1415 _mark_in_progress = false;
1329 1416 }
1330 1417 void set_marking_started() {
1331 1418 _mark_in_progress = true;
1332 1419 }
1333 1420 bool mark_in_progress() {
1334 1421 return _mark_in_progress;
1335 1422 }
1336 1423
1337 1424 // Print the maximum heap capacity.
1338 1425 virtual size_t max_capacity() const;
1339 1426
1340 1427 virtual jlong millis_since_last_gc();
1341 1428
1342 1429 // Perform any cleanup actions necessary before allowing a verification.
1343 1430 virtual void prepare_for_verify();
1344 1431
1345 1432 // Perform verification.
1346 1433
1347 1434 // vo == UsePrevMarking -> use "prev" marking information,
1348 1435 // vo == UseNextMarking -> use "next" marking information
1349 1436 // vo == UseMarkWord -> use the mark word in the object header
1350 1437 //
1351 1438 // NOTE: Only the "prev" marking information is guaranteed to be
1352 1439 // consistent most of the time, so most calls to this should use
1353 1440 // vo == UsePrevMarking.
1354 1441 // Currently, there is only one case where this is called with
1355 1442 // vo == UseNextMarking, which is to verify the "next" marking
1356 1443 // information at the end of remark.
1357 1444 // Currently there is only one place where this is called with
1358 1445 // vo == UseMarkWord, which is to verify the marking during a
1359 1446 // full GC.
1360 1447 void verify(bool allow_dirty, bool silent, VerifyOption vo);
1361 1448
1362 1449 // Override; it uses the "prev" marking information
1363 1450 virtual void verify(bool allow_dirty, bool silent);
1364 1451 // Default behavior by calling print(tty);
1365 1452 virtual void print() const;
1366 1453 // This calls print_on(st, PrintHeapAtGCExtended).
1367 1454 virtual void print_on(outputStream* st) const;
1368 1455 // If extended is true, it will print out information for all
1369 1456 // regions in the heap by calling print_on_extended(st).
1370 1457 virtual void print_on(outputStream* st, bool extended) const;
1371 1458 virtual void print_on_extended(outputStream* st) const;
1372 1459
1373 1460 virtual void print_gc_threads_on(outputStream* st) const;
1374 1461 virtual void gc_threads_do(ThreadClosure* tc) const;
1375 1462
1376 1463 // Override
1377 1464 void print_tracing_info() const;
1378 1465
1379 1466 // The following two methods are helpful for debugging RSet issues.
1380 1467 void print_cset_rsets() PRODUCT_RETURN;
1381 1468 void print_all_rsets() PRODUCT_RETURN;
1382 1469
1383 1470 // Convenience function to be used in situations where the heap type can be
1384 1471 // asserted to be this type.
1385 1472 static G1CollectedHeap* heap();
1386 1473
1387 1474 void empty_young_list();
1388 1475
1389 1476 void set_region_short_lived_locked(HeapRegion* hr);
1390 1477 // add appropriate methods for any other surv rate groups
1391 1478
1392 1479 YoungList* young_list() { return _young_list; }
1393 1480
1394 1481 // debugging
1395 1482 bool check_young_list_well_formed() {
1396 1483 return _young_list->check_list_well_formed();
1397 1484 }
1398 1485
1399 1486 bool check_young_list_empty(bool check_heap,
1400 1487 bool check_sample = true);
1401 1488
1402 1489 // *** Stuff related to concurrent marking. It's not clear to me that so
1403 1490 // many of these need to be public.
1404 1491
1405 1492 // The functions below are helper functions that a subclass of
1406 1493 // "CollectedHeap" can use in the implementation of its virtual
1407 1494 // functions.
1408 1495 // This performs a concurrent marking of the live objects in a
1409 1496 // bitmap off to the side.
1410 1497 void doConcurrentMark();
1411 1498
1412 1499 // Do a full concurrent marking, synchronously.
1413 1500 void do_sync_mark();
1414 1501
1415 1502 bool isMarkedPrev(oop obj) const;
1416 1503 bool isMarkedNext(oop obj) const;
1417 1504
1418 1505 // vo == UsePrevMarking -> use "prev" marking information,
1419 1506 // vo == UseNextMarking -> use "next" marking information,
1420 1507 // vo == UseMarkWord -> use mark word from object header
1421 1508 bool is_obj_dead_cond(const oop obj,
1422 1509 const HeapRegion* hr,
1423 1510 const VerifyOption vo) const {
1424 1511
1425 1512 switch (vo) {
1426 1513 case VerifyOption_G1UsePrevMarking:
1427 1514 return is_obj_dead(obj, hr);
1428 1515 case VerifyOption_G1UseNextMarking:
1429 1516 return is_obj_ill(obj, hr);
1430 1517 default:
1431 1518 assert(vo == VerifyOption_G1UseMarkWord, "must be");
1432 1519 return !obj->is_gc_marked();
1433 1520 }
1434 1521 }
1435 1522
1436 1523 // Determine if an object is dead, given the object and also
1437 1524 // the region to which the object belongs. An object is dead
1438 1525 // iff a) it was not allocated since the last mark and b) it
1439 1526 // is not marked.
1440 1527
1441 1528 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1442 1529 return
1443 1530 !hr->obj_allocated_since_prev_marking(obj) &&
1444 1531 !isMarkedPrev(obj);
1445 1532 }
1446 1533
1447 1534 // This is used when copying an object to survivor space.
1448 1535 // If the object is marked live, then we mark the copy live.
1449 1536 // If the object is allocated since the start of this mark
1450 1537 // cycle, then we mark the copy live.
1451 1538 // If the object has been around since the previous mark
1452 1539 // phase, and hasn't been marked yet during this phase,
1453 1540 // then we don't mark it, we just wait for the
1454 1541 // current marking cycle to get to it.
1455 1542
1456 1543 // This function returns true when an object has been
1457 1544 // around since the previous marking and hasn't yet
1458 1545 // been marked during this marking.
1459 1546
1460 1547 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1461 1548 return
1462 1549 !hr->obj_allocated_since_next_marking(obj) &&
1463 1550 !isMarkedNext(obj);
1464 1551 }
1465 1552
1466 1553 // Determine if an object is dead, given only the object itself.
1467 1554 // This will find the region to which the object belongs and
1468 1555 // then call the region version of the same function.
1469 1556
1470 1557 // Added if it is in permanent gen it isn't dead.
1471 1558 // Added if it is NULL it isn't dead.
1472 1559
1473 1560 // vo == UsePrevMarking -> use "prev" marking information,
1474 1561 // vo == UseNextMarking -> use "next" marking information,
1475 1562 // vo == UseMarkWord -> use mark word from object header
1476 1563 bool is_obj_dead_cond(const oop obj,
1477 1564 const VerifyOption vo) const {
1478 1565
1479 1566 switch (vo) {
1480 1567 case VerifyOption_G1UsePrevMarking:
1481 1568 return is_obj_dead(obj);
1482 1569 case VerifyOption_G1UseNextMarking:
1483 1570 return is_obj_ill(obj);
1484 1571 default:
1485 1572 assert(vo == VerifyOption_G1UseMarkWord, "must be");
1486 1573 return !obj->is_gc_marked();
1487 1574 }
1488 1575 }
1489 1576
1490 1577 bool is_obj_dead(const oop obj) const {
1491 1578 const HeapRegion* hr = heap_region_containing(obj);
1492 1579 if (hr == NULL) {
1493 1580 if (Universe::heap()->is_in_permanent(obj))
1494 1581 return false;
1495 1582 else if (obj == NULL) return false;
1496 1583 else return true;
1497 1584 }
1498 1585 else return is_obj_dead(obj, hr);
1499 1586 }
1500 1587
1501 1588 bool is_obj_ill(const oop obj) const {
1502 1589 const HeapRegion* hr = heap_region_containing(obj);
1503 1590 if (hr == NULL) {
1504 1591 if (Universe::heap()->is_in_permanent(obj))
1505 1592 return false;
1506 1593 else if (obj == NULL) return false;
1507 1594 else return true;
1508 1595 }
1509 1596 else return is_obj_ill(obj, hr);
1510 1597 }
1511 1598
1512 1599 // The following is just to alert the verification code
1513 1600 // that a full collection has occurred and that the
1514 1601 // remembered sets are no longer up to date.
1515 1602 bool _full_collection;
1516 1603 void set_full_collection() { _full_collection = true;}
1517 1604 void clear_full_collection() {_full_collection = false;}
1518 1605 bool full_collection() {return _full_collection;}
1519 1606
1520 1607 ConcurrentMark* concurrent_mark() const { return _cm; }
1521 1608 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1522 1609
1523 1610 // The dirty cards region list is used to record a subset of regions
1524 1611 // whose cards need clearing. The list if populated during the
1525 1612 // remembered set scanning and drained during the card table
1526 1613 // cleanup. Although the methods are reentrant, population/draining
1527 1614 // phases must not overlap. For synchronization purposes the last
1528 1615 // element on the list points to itself.
1529 1616 HeapRegion* _dirty_cards_region_list;
1530 1617 void push_dirty_cards_region(HeapRegion* hr);
1531 1618 HeapRegion* pop_dirty_cards_region();
1532 1619
1533 1620 public:
1534 1621 void stop_conc_gc_threads();
1535 1622
1536 1623 // <NEW PREDICTION>
1537 1624
1538 1625 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
1539 1626 void check_if_region_is_too_expensive(double predicted_time_ms);
1540 1627 size_t pending_card_num();
1541 1628 size_t max_pending_card_num();
1542 1629 size_t cards_scanned();
1543 1630
1544 1631 // </NEW PREDICTION>
1545 1632
1546 1633 protected:
1547 1634 size_t _max_heap_capacity;
1548 1635 };
1549 1636
1550 1637 #define use_local_bitmaps 1
1551 1638 #define verify_local_bitmaps 0
1552 1639 #define oop_buffer_length 256
1553 1640
1554 1641 #ifndef PRODUCT
1555 1642 class GCLabBitMap;
1556 1643 class GCLabBitMapClosure: public BitMapClosure {
1557 1644 private:
1558 1645 ConcurrentMark* _cm;
1559 1646 GCLabBitMap* _bitmap;
1560 1647
1561 1648 public:
1562 1649 GCLabBitMapClosure(ConcurrentMark* cm,
1563 1650 GCLabBitMap* bitmap) {
1564 1651 _cm = cm;
1565 1652 _bitmap = bitmap;
1566 1653 }
1567 1654
1568 1655 virtual bool do_bit(size_t offset);
1569 1656 };
1570 1657 #endif // !PRODUCT
1571 1658
1572 1659 class GCLabBitMap: public BitMap {
1573 1660 private:
1574 1661 ConcurrentMark* _cm;
1575 1662
1576 1663 int _shifter;
1577 1664 size_t _bitmap_word_covers_words;
1578 1665
1579 1666 // beginning of the heap
1580 1667 HeapWord* _heap_start;
1581 1668
1582 1669 // this is the actual start of the GCLab
1583 1670 HeapWord* _real_start_word;
1584 1671
1585 1672 // this is the actual end of the GCLab
1586 1673 HeapWord* _real_end_word;
1587 1674
1588 1675 // this is the first word, possibly located before the actual start
1589 1676 // of the GCLab, that corresponds to the first bit of the bitmap
1590 1677 HeapWord* _start_word;
1591 1678
1592 1679 // size of a GCLab in words
1593 1680 size_t _gclab_word_size;
1594 1681
1595 1682 static int shifter() {
1596 1683 return MinObjAlignment - 1;
1597 1684 }
1598 1685
1599 1686 // how many heap words does a single bitmap word corresponds to?
1600 1687 static size_t bitmap_word_covers_words() {
1601 1688 return BitsPerWord << shifter();
1602 1689 }
1603 1690
1604 1691 size_t gclab_word_size() const {
1605 1692 return _gclab_word_size;
1606 1693 }
1607 1694
1608 1695 // Calculates actual GCLab size in words
1609 1696 size_t gclab_real_word_size() const {
1610 1697 return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word))
1611 1698 / BitsPerWord;
1612 1699 }
1613 1700
1614 1701 static size_t bitmap_size_in_bits(size_t gclab_word_size) {
1615 1702 size_t bits_in_bitmap = gclab_word_size >> shifter();
1616 1703 // We are going to ensure that the beginning of a word in this
1617 1704 // bitmap also corresponds to the beginning of a word in the
1618 1705 // global marking bitmap. To handle the case where a GCLab
1619 1706 // starts from the middle of the bitmap, we need to add enough
1620 1707 // space (i.e. up to a bitmap word) to ensure that we have
1621 1708 // enough bits in the bitmap.
1622 1709 return bits_in_bitmap + BitsPerWord - 1;
1623 1710 }
1624 1711 public:
1625 1712 GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size)
1626 1713 : BitMap(bitmap_size_in_bits(gclab_word_size)),
1627 1714 _cm(G1CollectedHeap::heap()->concurrent_mark()),
1628 1715 _shifter(shifter()),
1629 1716 _bitmap_word_covers_words(bitmap_word_covers_words()),
1630 1717 _heap_start(heap_start),
1631 1718 _gclab_word_size(gclab_word_size),
1632 1719 _real_start_word(NULL),
1633 1720 _real_end_word(NULL),
1634 1721 _start_word(NULL)
1635 1722 {
1636 1723 guarantee( size_in_words() >= bitmap_size_in_words(),
1637 1724 "just making sure");
1638 1725 }
1639 1726
1640 1727 inline unsigned heapWordToOffset(HeapWord* addr) {
1641 1728 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
1642 1729 assert(offset < size(), "offset should be within bounds");
1643 1730 return offset;
1644 1731 }
1645 1732
1646 1733 inline HeapWord* offsetToHeapWord(size_t offset) {
1647 1734 HeapWord* addr = _start_word + (offset << _shifter);
1648 1735 assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
1649 1736 return addr;
1650 1737 }
1651 1738
1652 1739 bool fields_well_formed() {
1653 1740 bool ret1 = (_real_start_word == NULL) &&
1654 1741 (_real_end_word == NULL) &&
1655 1742 (_start_word == NULL);
1656 1743 if (ret1)
1657 1744 return true;
1658 1745
1659 1746 bool ret2 = _real_start_word >= _start_word &&
1660 1747 _start_word < _real_end_word &&
1661 1748 (_real_start_word + _gclab_word_size) == _real_end_word &&
1662 1749 (_start_word + _gclab_word_size + _bitmap_word_covers_words)
1663 1750 > _real_end_word;
1664 1751 return ret2;
1665 1752 }
1666 1753
1667 1754 inline bool mark(HeapWord* addr) {
1668 1755 guarantee(use_local_bitmaps, "invariant");
1669 1756 assert(fields_well_formed(), "invariant");
1670 1757
1671 1758 if (addr >= _real_start_word && addr < _real_end_word) {
1672 1759 assert(!isMarked(addr), "should not have already been marked");
1673 1760
1674 1761 // first mark it on the bitmap
1675 1762 at_put(heapWordToOffset(addr), true);
1676 1763
1677 1764 return true;
1678 1765 } else {
1679 1766 return false;
1680 1767 }
1681 1768 }
1682 1769
1683 1770 inline bool isMarked(HeapWord* addr) {
1684 1771 guarantee(use_local_bitmaps, "invariant");
1685 1772 assert(fields_well_formed(), "invariant");
1686 1773
1687 1774 return at(heapWordToOffset(addr));
1688 1775 }
1689 1776
1690 1777 void set_buffer(HeapWord* start) {
1691 1778 guarantee(use_local_bitmaps, "invariant");
1692 1779 clear();
1693 1780
1694 1781 assert(start != NULL, "invariant");
1695 1782 _real_start_word = start;
1696 1783 _real_end_word = start + _gclab_word_size;
1697 1784
1698 1785 size_t diff =
1699 1786 pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
1700 1787 _start_word = start - diff;
1701 1788
1702 1789 assert(fields_well_formed(), "invariant");
1703 1790 }
1704 1791
1705 1792 #ifndef PRODUCT
1706 1793 void verify() {
1707 1794 // verify that the marks have been propagated
1708 1795 GCLabBitMapClosure cl(_cm, this);
1709 1796 iterate(&cl);
1710 1797 }
1711 1798 #endif // PRODUCT
1712 1799
1713 1800 void retire() {
1714 1801 guarantee(use_local_bitmaps, "invariant");
1715 1802 assert(fields_well_formed(), "invariant");
1716 1803
1717 1804 if (_start_word != NULL) {
1718 1805 CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
1719 1806
1720 1807 // this means that the bitmap was set up for the GCLab
1721 1808 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
1722 1809
1723 1810 mark_bitmap->mostly_disjoint_range_union(this,
1724 1811 0, // always start from the start of the bitmap
1725 1812 _start_word,
1726 1813 gclab_real_word_size());
1727 1814 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
1728 1815
1729 1816 #ifndef PRODUCT
1730 1817 if (use_local_bitmaps && verify_local_bitmaps)
1731 1818 verify();
1732 1819 #endif // PRODUCT
1733 1820 } else {
1734 1821 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
1735 1822 }
1736 1823 }
1737 1824
1738 1825 size_t bitmap_size_in_words() const {
1739 1826 return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord;
1740 1827 }
1741 1828
1742 1829 };
1743 1830
1744 1831 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1745 1832 private:
1746 1833 bool _retired;
1747 1834 bool _during_marking;
1748 1835 GCLabBitMap _bitmap;
1749 1836
1750 1837 public:
1751 1838 G1ParGCAllocBuffer(size_t gclab_word_size) :
1752 1839 ParGCAllocBuffer(gclab_word_size),
1753 1840 _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
1754 1841 _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size),
1755 1842 _retired(false)
1756 1843 { }
1757 1844
1758 1845 inline bool mark(HeapWord* addr) {
1759 1846 guarantee(use_local_bitmaps, "invariant");
1760 1847 assert(_during_marking, "invariant");
1761 1848 return _bitmap.mark(addr);
1762 1849 }
1763 1850
1764 1851 inline void set_buf(HeapWord* buf) {
1765 1852 if (use_local_bitmaps && _during_marking)
1766 1853 _bitmap.set_buffer(buf);
1767 1854 ParGCAllocBuffer::set_buf(buf);
1768 1855 _retired = false;
1769 1856 }
1770 1857
1771 1858 inline void retire(bool end_of_gc, bool retain) {
1772 1859 if (_retired)
1773 1860 return;
1774 1861 if (use_local_bitmaps && _during_marking) {
1775 1862 _bitmap.retire();
1776 1863 }
1777 1864 ParGCAllocBuffer::retire(end_of_gc, retain);
1778 1865 _retired = true;
1779 1866 }
1780 1867 };
1781 1868
1782 1869 class G1ParScanThreadState : public StackObj {
1783 1870 protected:
1784 1871 G1CollectedHeap* _g1h;
1785 1872 RefToScanQueue* _refs;
1786 1873 DirtyCardQueue _dcq;
1787 1874 CardTableModRefBS* _ct_bs;
1788 1875 G1RemSet* _g1_rem;
1789 1876
1790 1877 G1ParGCAllocBuffer _surviving_alloc_buffer;
1791 1878 G1ParGCAllocBuffer _tenured_alloc_buffer;
1792 1879 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1793 1880 ageTable _age_table;
1794 1881
1795 1882 size_t _alloc_buffer_waste;
1796 1883 size_t _undo_waste;
1797 1884
1798 1885 OopsInHeapRegionClosure* _evac_failure_cl;
1799 1886 G1ParScanHeapEvacClosure* _evac_cl;
1800 1887 G1ParScanPartialArrayClosure* _partial_scan_cl;
1801 1888
1802 1889 int _hash_seed;
1803 1890 int _queue_num;
1804 1891
1805 1892 size_t _term_attempts;
1806 1893
1807 1894 double _start;
1808 1895 double _start_strong_roots;
1809 1896 double _strong_roots_time;
1810 1897 double _start_term;
1811 1898 double _term_time;
1812 1899
1813 1900 // Map from young-age-index (0 == not young, 1 is youngest) to
1814 1901 // surviving words. base is what we get back from the malloc call
1815 1902 size_t* _surviving_young_words_base;
1816 1903 // this points into the array, as we use the first few entries for padding
1817 1904 size_t* _surviving_young_words;
1818 1905
1819 1906 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
1820 1907
1821 1908 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
1822 1909
1823 1910 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
1824 1911
1825 1912 DirtyCardQueue& dirty_card_queue() { return _dcq; }
1826 1913 CardTableModRefBS* ctbs() { return _ct_bs; }
1827 1914
1828 1915 template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
1829 1916 if (!from->is_survivor()) {
1830 1917 _g1_rem->par_write_ref(from, p, tid);
1831 1918 }
1832 1919 }
1833 1920
1834 1921 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1835 1922 // If the new value of the field points to the same region or
1836 1923 // is the to-space, we don't need to include it in the Rset updates.
1837 1924 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1838 1925 size_t card_index = ctbs()->index_for(p);
1839 1926 // If the card hasn't been added to the buffer, do it.
1840 1927 if (ctbs()->mark_card_deferred(card_index)) {
1841 1928 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1842 1929 }
1843 1930 }
1844 1931 }
1845 1932
1846 1933 public:
1847 1934 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num);
1848 1935
1849 1936 ~G1ParScanThreadState() {
1850 1937 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
1851 1938 }
1852 1939
1853 1940 RefToScanQueue* refs() { return _refs; }
1854 1941 ageTable* age_table() { return &_age_table; }
1855 1942
1856 1943 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1857 1944 return _alloc_buffers[purpose];
1858 1945 }
1859 1946
1860 1947 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1861 1948 size_t undo_waste() const { return _undo_waste; }
1862 1949
1863 1950 #ifdef ASSERT
1864 1951 bool verify_ref(narrowOop* ref) const;
1865 1952 bool verify_ref(oop* ref) const;
1866 1953 bool verify_task(StarTask ref) const;
1867 1954 #endif // ASSERT
1868 1955
1869 1956 template <class T> void push_on_queue(T* ref) {
1870 1957 assert(verify_ref(ref), "sanity");
1871 1958 refs()->push(ref);
1872 1959 }
1873 1960
1874 1961 template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1875 1962 if (G1DeferredRSUpdate) {
1876 1963 deferred_rs_update(from, p, tid);
1877 1964 } else {
1878 1965 immediate_rs_update(from, p, tid);
1879 1966 }
1880 1967 }
1881 1968
1882 1969 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1883 1970
1884 1971 HeapWord* obj = NULL;
1885 1972 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1886 1973 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1887 1974 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1888 1975 assert(gclab_word_size == alloc_buf->word_sz(),
1889 1976 "dynamic resizing is not supported");
1890 1977 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1891 1978 alloc_buf->retire(false, false);
1892 1979
1893 1980 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1894 1981 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1895 1982 // Otherwise.
1896 1983 alloc_buf->set_buf(buf);
1897 1984
1898 1985 obj = alloc_buf->allocate(word_sz);
1899 1986 assert(obj != NULL, "buffer was definitely big enough...");
1900 1987 } else {
1901 1988 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1902 1989 }
1903 1990 return obj;
1904 1991 }
1905 1992
1906 1993 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1907 1994 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
1908 1995 if (obj != NULL) return obj;
1909 1996 return allocate_slow(purpose, word_sz);
1910 1997 }
1911 1998
1912 1999 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1913 2000 if (alloc_buffer(purpose)->contains(obj)) {
1914 2001 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
1915 2002 "should contain whole object");
1916 2003 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
1917 2004 } else {
1918 2005 CollectedHeap::fill_with_object(obj, word_sz);
1919 2006 add_to_undo_waste(word_sz);
1920 2007 }
1921 2008 }
1922 2009
1923 2010 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
1924 2011 _evac_failure_cl = evac_failure_cl;
1925 2012 }
1926 2013 OopsInHeapRegionClosure* evac_failure_closure() {
1927 2014 return _evac_failure_cl;
1928 2015 }
1929 2016
1930 2017 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
1931 2018 _evac_cl = evac_cl;
1932 2019 }
1933 2020
1934 2021 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
1935 2022 _partial_scan_cl = partial_scan_cl;
1936 2023 }
1937 2024
1938 2025 int* hash_seed() { return &_hash_seed; }
1939 2026 int queue_num() { return _queue_num; }
1940 2027
1941 2028 size_t term_attempts() const { return _term_attempts; }
1942 2029 void note_term_attempt() { _term_attempts++; }
1943 2030
1944 2031 void start_strong_roots() {
1945 2032 _start_strong_roots = os::elapsedTime();
1946 2033 }
1947 2034 void end_strong_roots() {
1948 2035 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
1949 2036 }
1950 2037 double strong_roots_time() const { return _strong_roots_time; }
1951 2038
1952 2039 void start_term_time() {
1953 2040 note_term_attempt();
1954 2041 _start_term = os::elapsedTime();
1955 2042 }
1956 2043 void end_term_time() {
1957 2044 _term_time += (os::elapsedTime() - _start_term);
1958 2045 }
1959 2046 double term_time() const { return _term_time; }
1960 2047
1961 2048 double elapsed_time() const {
1962 2049 return os::elapsedTime() - _start;
1963 2050 }
1964 2051
1965 2052 static void
1966 2053 print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
1967 2054 void
1968 2055 print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
1969 2056
1970 2057 size_t* surviving_young_words() {
1971 2058 // We add on to hide entry 0 which accumulates surviving words for
1972 2059 // age -1 regions (i.e. non-young ones)
1973 2060 return _surviving_young_words;
1974 2061 }
1975 2062
1976 2063 void retire_alloc_buffers() {
1977 2064 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1978 2065 size_t waste = _alloc_buffers[ap]->words_remaining();
1979 2066 add_to_alloc_buffer_waste(waste);
1980 2067 _alloc_buffers[ap]->retire(true, false);
1981 2068 }
1982 2069 }
1983 2070
1984 2071 template <class T> void deal_with_reference(T* ref_to_scan) {
1985 2072 if (has_partial_array_mask(ref_to_scan)) {
1986 2073 _partial_scan_cl->do_oop_nv(ref_to_scan);
1987 2074 } else {
1988 2075 // Note: we can use "raw" versions of "region_containing" because
1989 2076 // "obj_to_scan" is definitely in the heap, and is not in a
1990 2077 // humongous region.
1991 2078 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
1992 2079 _evac_cl->set_region(r);
1993 2080 _evac_cl->do_oop_nv(ref_to_scan);
1994 2081 }
1995 2082 }
1996 2083
1997 2084 void deal_with_reference(StarTask ref) {
1998 2085 assert(verify_task(ref), "sanity");
1999 2086 if (ref.is_narrow()) {
2000 2087 deal_with_reference((narrowOop*)ref);
2001 2088 } else {
2002 2089 deal_with_reference((oop*)ref);
2003 2090 }
2004 2091 }
2005 2092
2006 2093 public:
2007 2094 void trim_queue();
2008 2095 };
2009 2096
2010 2097 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
↓ open down ↓ |
1058 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX