Print this page
rev 2896 : 6484965: G1: piggy-back liveness accounting phase on marking
Summary: Remove the separate counting phase of concurrent marking by tracking the amount of marked bytes and the cards spanned by marked objects in marking task/worker thread local data structures, which are updated as individual objects are marked.
Reviewed-by: brutisso
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
1 1 /*
2 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
27 27
28 28 #include "gc_implementation/g1/concurrentMark.hpp"
29 29 #include "gc_implementation/g1/g1AllocRegion.hpp"
30 30 #include "gc_implementation/g1/g1HRPrinter.hpp"
31 31 #include "gc_implementation/g1/g1RemSet.hpp"
32 32 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
33 33 #include "gc_implementation/g1/heapRegionSeq.hpp"
34 34 #include "gc_implementation/g1/heapRegionSets.hpp"
35 35 #include "gc_implementation/shared/hSpaceCounters.hpp"
36 36 #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
37 37 #include "memory/barrierSet.hpp"
38 38 #include "memory/memRegion.hpp"
39 39 #include "memory/sharedHeap.hpp"
40 40
41 41 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
42 42 // It uses the "Garbage First" heap organization and algorithm, which
43 43 // may combine concurrent marking with parallel, incremental compaction of
44 44 // heap subsets that will yield large amounts of garbage.
45 45
46 46 class HeapRegion;
47 47 class HRRSCleanupTask;
48 48 class PermanentGenerationSpec;
49 49 class GenerationSpec;
50 50 class OopsInHeapRegionClosure;
51 51 class G1ScanHeapEvacClosure;
52 52 class ObjectClosure;
53 53 class SpaceClosure;
54 54 class CompactibleSpaceClosure;
55 55 class Space;
56 56 class G1CollectorPolicy;
57 57 class GenRemSet;
58 58 class G1RemSet;
59 59 class HeapRegionRemSetIterator;
60 60 class ConcurrentMark;
61 61 class ConcurrentMarkThread;
62 62 class ConcurrentG1Refine;
63 63 class GenerationCounters;
64 64
65 65 typedef OverflowTaskQueue<StarTask> RefToScanQueue;
66 66 typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
67 67
68 68 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
69 69 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
70 70
71 71 enum GCAllocPurpose {
72 72 GCAllocForTenured,
73 73 GCAllocForSurvived,
74 74 GCAllocPurposeCount
75 75 };
76 76
77 77 class YoungList : public CHeapObj {
78 78 private:
79 79 G1CollectedHeap* _g1h;
80 80
81 81 HeapRegion* _head;
82 82
83 83 HeapRegion* _survivor_head;
84 84 HeapRegion* _survivor_tail;
85 85
86 86 HeapRegion* _curr;
87 87
88 88 size_t _length;
89 89 size_t _survivor_length;
90 90
91 91 size_t _last_sampled_rs_lengths;
92 92 size_t _sampled_rs_lengths;
93 93
94 94 void empty_list(HeapRegion* list);
95 95
96 96 public:
97 97 YoungList(G1CollectedHeap* g1h);
98 98
99 99 void push_region(HeapRegion* hr);
100 100 void add_survivor_region(HeapRegion* hr);
101 101
102 102 void empty_list();
103 103 bool is_empty() { return _length == 0; }
104 104 size_t length() { return _length; }
105 105 size_t survivor_length() { return _survivor_length; }
106 106
107 107 // Currently we do not keep track of the used byte sum for the
108 108 // young list and the survivors and it'd be quite a lot of work to
109 109 // do so. When we'll eventually replace the young list with
110 110 // instances of HeapRegionLinkedList we'll get that for free. So,
111 111 // we'll report the more accurate information then.
112 112 size_t eden_used_bytes() {
113 113 assert(length() >= survivor_length(), "invariant");
114 114 return (length() - survivor_length()) * HeapRegion::GrainBytes;
115 115 }
116 116 size_t survivor_used_bytes() {
117 117 return survivor_length() * HeapRegion::GrainBytes;
118 118 }
119 119
120 120 void rs_length_sampling_init();
121 121 bool rs_length_sampling_more();
122 122 void rs_length_sampling_next();
123 123
124 124 void reset_sampled_info() {
125 125 _last_sampled_rs_lengths = 0;
126 126 }
127 127 size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
128 128
129 129 // for development purposes
130 130 void reset_auxilary_lists();
131 131 void clear() { _head = NULL; _length = 0; }
132 132
133 133 void clear_survivors() {
134 134 _survivor_head = NULL;
135 135 _survivor_tail = NULL;
136 136 _survivor_length = 0;
137 137 }
138 138
139 139 HeapRegion* first_region() { return _head; }
140 140 HeapRegion* first_survivor_region() { return _survivor_head; }
141 141 HeapRegion* last_survivor_region() { return _survivor_tail; }
142 142
143 143 // debugging
144 144 bool check_list_well_formed();
145 145 bool check_list_empty(bool check_sample = true);
146 146 void print();
147 147 };
148 148
149 149 class MutatorAllocRegion : public G1AllocRegion {
150 150 protected:
151 151 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
152 152 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
153 153 public:
154 154 MutatorAllocRegion()
155 155 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
156 156 };
157 157
158 158 // The G1 STW is alive closure.
159 159 // An instance is embedded into the G1CH and used as the
160 160 // (optional) _is_alive_non_header closure in the STW
161 161 // reference processor. It is also extensively used during
162 162 // refence processing during STW evacuation pauses.
163 163 class G1STWIsAliveClosure: public BoolObjectClosure {
164 164 G1CollectedHeap* _g1;
165 165 public:
166 166 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
167 167 void do_object(oop p) { assert(false, "Do not call."); }
168 168 bool do_object_b(oop p);
169 169 };
170 170
171 171 class SurvivorGCAllocRegion : public G1AllocRegion {
172 172 protected:
173 173 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
174 174 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
175 175 public:
176 176 SurvivorGCAllocRegion()
177 177 : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
178 178 };
179 179
180 180 class OldGCAllocRegion : public G1AllocRegion {
181 181 protected:
182 182 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
183 183 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
184 184 public:
185 185 OldGCAllocRegion()
186 186 : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
187 187 };
188 188
189 189 class RefineCardTableEntryClosure;
190 190
191 191 class G1CollectedHeap : public SharedHeap {
192 192 friend class VM_G1CollectForAllocation;
193 193 friend class VM_GenCollectForPermanentAllocation;
194 194 friend class VM_G1CollectFull;
195 195 friend class VM_G1IncCollectionPause;
196 196 friend class VMStructs;
197 197 friend class MutatorAllocRegion;
198 198 friend class SurvivorGCAllocRegion;
199 199 friend class OldGCAllocRegion;
200 200
201 201 // Closures used in implementation.
202 202 friend class G1ParCopyHelper;
203 203 friend class G1IsAliveClosure;
204 204 friend class G1EvacuateFollowersClosure;
205 205 friend class G1ParScanThreadState;
206 206 friend class G1ParScanClosureSuper;
207 207 friend class G1ParEvacuateFollowersClosure;
208 208 friend class G1ParTask;
209 209 friend class G1FreeGarbageRegionClosure;
210 210 friend class RefineCardTableEntryClosure;
211 211 friend class G1PrepareCompactClosure;
212 212 friend class RegionSorter;
213 213 friend class RegionResetter;
214 214 friend class CountRCClosure;
215 215 friend class EvacPopObjClosure;
216 216 friend class G1ParCleanupCTTask;
217 217
218 218 // Other related classes.
219 219 friend class G1MarkSweep;
220 220
221 221 private:
222 222 // The one and only G1CollectedHeap, so static functions can find it.
223 223 static G1CollectedHeap* _g1h;
224 224
225 225 static size_t _humongous_object_threshold_in_words;
226 226
227 227 // Storage for the G1 heap (excludes the permanent generation).
228 228 VirtualSpace _g1_storage;
229 229 MemRegion _g1_reserved;
230 230
231 231 // The part of _g1_storage that is currently committed.
232 232 MemRegion _g1_committed;
233 233
234 234 // The master free list. It will satisfy all new region allocations.
235 235 MasterFreeRegionList _free_list;
236 236
237 237 // The secondary free list which contains regions that have been
238 238 // freed up during the cleanup process. This will be appended to the
239 239 // master free list when appropriate.
240 240 SecondaryFreeRegionList _secondary_free_list;
241 241
242 242 // It keeps track of the old regions.
243 243 MasterOldRegionSet _old_set;
244 244
245 245 // It keeps track of the humongous regions.
246 246 MasterHumongousRegionSet _humongous_set;
247 247
248 248 // The number of regions we could create by expansion.
249 249 size_t _expansion_regions;
250 250
251 251 // The block offset table for the G1 heap.
252 252 G1BlockOffsetSharedArray* _bot_shared;
253 253
254 254 // Tears down the region sets / lists so that they are empty and the
255 255 // regions on the heap do not belong to a region set / list. The
256 256 // only exception is the humongous set which we leave unaltered. If
257 257 // free_list_only is true, it will only tear down the master free
258 258 // list. It is called before a Full GC (free_list_only == false) or
259 259 // before heap shrinking (free_list_only == true).
260 260 void tear_down_region_sets(bool free_list_only);
261 261
262 262 // Rebuilds the region sets / lists so that they are repopulated to
263 263 // reflect the contents of the heap. The only exception is the
264 264 // humongous set which was not torn down in the first place. If
265 265 // free_list_only is true, it will only rebuild the master free
266 266 // list. It is called after a Full GC (free_list_only == false) or
267 267 // after heap shrinking (free_list_only == true).
268 268 void rebuild_region_sets(bool free_list_only);
269 269
270 270 // The sequence of all heap regions in the heap.
271 271 HeapRegionSeq _hrs;
272 272
273 273 // Alloc region used to satisfy mutator allocation requests.
274 274 MutatorAllocRegion _mutator_alloc_region;
275 275
276 276 // Alloc region used to satisfy allocation requests by the GC for
277 277 // survivor objects.
278 278 SurvivorGCAllocRegion _survivor_gc_alloc_region;
279 279
280 280 // Alloc region used to satisfy allocation requests by the GC for
281 281 // old objects.
282 282 OldGCAllocRegion _old_gc_alloc_region;
283 283
284 284 // The last old region we allocated to during the last GC.
285 285 // Typically, it is not full so we should re-use it during the next GC.
286 286 HeapRegion* _retained_old_gc_alloc_region;
287 287
288 288 // It resets the mutator alloc region before new allocations can take place.
289 289 void init_mutator_alloc_region();
290 290
291 291 // It releases the mutator alloc region.
292 292 void release_mutator_alloc_region();
293 293
294 294 // It initializes the GC alloc regions at the start of a GC.
295 295 void init_gc_alloc_regions();
296 296
297 297 // It releases the GC alloc regions at the end of a GC.
298 298 void release_gc_alloc_regions();
299 299
300 300 // It does any cleanup that needs to be done on the GC alloc regions
301 301 // before a Full GC.
302 302 void abandon_gc_alloc_regions();
303 303
304 304 // Helper for monitoring and management support.
305 305 G1MonitoringSupport* _g1mm;
306 306
307 307 // Determines PLAB size for a particular allocation purpose.
308 308 static size_t desired_plab_sz(GCAllocPurpose purpose);
309 309
310 310 // Outside of GC pauses, the number of bytes used in all regions other
311 311 // than the current allocation region.
312 312 size_t _summary_bytes_used;
313 313
314 314 // This is used for a quick test on whether a reference points into
315 315 // the collection set or not. Basically, we have an array, with one
316 316 // byte per region, and that byte denotes whether the corresponding
317 317 // region is in the collection set or not. The entry corresponding
318 318 // the bottom of the heap, i.e., region 0, is pointed to by
319 319 // _in_cset_fast_test_base. The _in_cset_fast_test field has been
320 320 // biased so that it actually points to address 0 of the address
321 321 // space, to make the test as fast as possible (we can simply shift
322 322 // the address to address into it, instead of having to subtract the
323 323 // bottom of the heap from the address before shifting it; basically
324 324 // it works in the same way the card table works).
325 325 bool* _in_cset_fast_test;
326 326
327 327 // The allocated array used for the fast test on whether a reference
328 328 // points into the collection set or not. This field is also used to
329 329 // free the array.
330 330 bool* _in_cset_fast_test_base;
331 331
332 332 // The length of the _in_cset_fast_test_base array.
333 333 size_t _in_cset_fast_test_length;
334 334
335 335 volatile unsigned _gc_time_stamp;
336 336
337 337 size_t* _surviving_young_words;
338 338
339 339 G1HRPrinter _hr_printer;
340 340
341 341 void setup_surviving_young_words();
342 342 void update_surviving_young_words(size_t* surv_young_words);
343 343 void cleanup_surviving_young_words();
344 344
345 345 // It decides whether an explicit GC should start a concurrent cycle
346 346 // instead of doing a STW GC. Currently, a concurrent cycle is
347 347 // explicitly started if:
348 348 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
349 349 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
350 350 bool should_do_concurrent_full_gc(GCCause::Cause cause);
351 351
352 352 // Keeps track of how many "full collections" (i.e., Full GCs or
353 353 // concurrent cycles) we have completed. The number of them we have
354 354 // started is maintained in _total_full_collections in CollectedHeap.
355 355 volatile unsigned int _full_collections_completed;
356 356
357 357 // This is a non-product method that is helpful for testing. It is
358 358 // called at the end of a GC and artificially expands the heap by
359 359 // allocating a number of dead regions. This way we can induce very
360 360 // frequent marking cycles and stress the cleanup / concurrent
361 361 // cleanup code more (as all the regions that will be allocated by
362 362 // this method will be found dead by the marking cycle).
363 363 void allocate_dummy_regions() PRODUCT_RETURN;
364 364
365 365 // These are macros so that, if the assert fires, we get the correct
366 366 // line number, file, etc.
367 367
368 368 #define heap_locking_asserts_err_msg(_extra_message_) \
369 369 err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
370 370 (_extra_message_), \
371 371 BOOL_TO_STR(Heap_lock->owned_by_self()), \
372 372 BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
373 373 BOOL_TO_STR(Thread::current()->is_VM_thread()))
374 374
375 375 #define assert_heap_locked() \
376 376 do { \
377 377 assert(Heap_lock->owned_by_self(), \
378 378 heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
379 379 } while (0)
380 380
381 381 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \
382 382 do { \
383 383 assert(Heap_lock->owned_by_self() || \
384 384 (SafepointSynchronize::is_at_safepoint() && \
385 385 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
386 386 heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
387 387 "should be at a safepoint")); \
388 388 } while (0)
389 389
390 390 #define assert_heap_locked_and_not_at_safepoint() \
391 391 do { \
392 392 assert(Heap_lock->owned_by_self() && \
393 393 !SafepointSynchronize::is_at_safepoint(), \
394 394 heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
395 395 "should not be at a safepoint")); \
396 396 } while (0)
397 397
398 398 #define assert_heap_not_locked() \
399 399 do { \
400 400 assert(!Heap_lock->owned_by_self(), \
401 401 heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
402 402 } while (0)
403 403
404 404 #define assert_heap_not_locked_and_not_at_safepoint() \
405 405 do { \
406 406 assert(!Heap_lock->owned_by_self() && \
407 407 !SafepointSynchronize::is_at_safepoint(), \
408 408 heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
409 409 "should not be at a safepoint")); \
410 410 } while (0)
411 411
412 412 #define assert_at_safepoint(_should_be_vm_thread_) \
413 413 do { \
414 414 assert(SafepointSynchronize::is_at_safepoint() && \
415 415 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
416 416 heap_locking_asserts_err_msg("should be at a safepoint")); \
417 417 } while (0)
418 418
419 419 #define assert_not_at_safepoint() \
420 420 do { \
421 421 assert(!SafepointSynchronize::is_at_safepoint(), \
422 422 heap_locking_asserts_err_msg("should not be at a safepoint")); \
423 423 } while (0)
424 424
425 425 protected:
426 426
427 427 // The young region list.
428 428 YoungList* _young_list;
429 429
430 430 // The current policy object for the collector.
431 431 G1CollectorPolicy* _g1_policy;
432 432
433 433 // This is the second level of trying to allocate a new region. If
434 434 // new_region() didn't find a region on the free_list, this call will
435 435 // check whether there's anything available on the
436 436 // secondary_free_list and/or wait for more regions to appear on
437 437 // that list, if _free_regions_coming is set.
438 438 HeapRegion* new_region_try_secondary_free_list();
439 439
440 440 // Try to allocate a single non-humongous HeapRegion sufficient for
441 441 // an allocation of the given word_size. If do_expand is true,
442 442 // attempt to expand the heap if necessary to satisfy the allocation
443 443 // request.
444 444 HeapRegion* new_region(size_t word_size, bool do_expand);
445 445
446 446 // Attempt to satisfy a humongous allocation request of the given
447 447 // size by finding a contiguous set of free regions of num_regions
448 448 // length and remove them from the master free list. Return the
449 449 // index of the first region or G1_NULL_HRS_INDEX if the search
450 450 // was unsuccessful.
451 451 size_t humongous_obj_allocate_find_first(size_t num_regions,
452 452 size_t word_size);
453 453
454 454 // Initialize a contiguous set of free regions of length num_regions
455 455 // and starting at index first so that they appear as a single
456 456 // humongous region.
457 457 HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
458 458 size_t num_regions,
459 459 size_t word_size);
460 460
461 461 // Attempt to allocate a humongous object of the given size. Return
462 462 // NULL if unsuccessful.
463 463 HeapWord* humongous_obj_allocate(size_t word_size);
464 464
465 465 // The following two methods, allocate_new_tlab() and
466 466 // mem_allocate(), are the two main entry points from the runtime
467 467 // into the G1's allocation routines. They have the following
468 468 // assumptions:
469 469 //
470 470 // * They should both be called outside safepoints.
471 471 //
472 472 // * They should both be called without holding the Heap_lock.
473 473 //
474 474 // * All allocation requests for new TLABs should go to
475 475 // allocate_new_tlab().
476 476 //
477 477 // * All non-TLAB allocation requests should go to mem_allocate().
478 478 //
479 479 // * If either call cannot satisfy the allocation request using the
480 480 // current allocating region, they will try to get a new one. If
481 481 // this fails, they will attempt to do an evacuation pause and
482 482 // retry the allocation.
483 483 //
484 484 // * If all allocation attempts fail, even after trying to schedule
485 485 // an evacuation pause, allocate_new_tlab() will return NULL,
486 486 // whereas mem_allocate() will attempt a heap expansion and/or
487 487 // schedule a Full GC.
488 488 //
489 489 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
490 490 // should never be called with word_size being humongous. All
491 491 // humongous allocation requests should go to mem_allocate() which
492 492 // will satisfy them with a special path.
493 493
494 494 virtual HeapWord* allocate_new_tlab(size_t word_size);
495 495
496 496 virtual HeapWord* mem_allocate(size_t word_size,
497 497 bool* gc_overhead_limit_was_exceeded);
498 498
499 499 // The following three methods take a gc_count_before_ret
500 500 // parameter which is used to return the GC count if the method
501 501 // returns NULL. Given that we are required to read the GC count
502 502 // while holding the Heap_lock, and these paths will take the
503 503 // Heap_lock at some point, it's easier to get them to read the GC
504 504 // count while holding the Heap_lock before they return NULL instead
505 505 // of the caller (namely: mem_allocate()) having to also take the
506 506 // Heap_lock just to read the GC count.
507 507
508 508 // First-level mutator allocation attempt: try to allocate out of
509 509 // the mutator alloc region without taking the Heap_lock. This
510 510 // should only be used for non-humongous allocations.
511 511 inline HeapWord* attempt_allocation(size_t word_size,
512 512 unsigned int* gc_count_before_ret);
513 513
514 514 // Second-level mutator allocation attempt: take the Heap_lock and
515 515 // retry the allocation attempt, potentially scheduling a GC
516 516 // pause. This should only be used for non-humongous allocations.
517 517 HeapWord* attempt_allocation_slow(size_t word_size,
518 518 unsigned int* gc_count_before_ret);
519 519
520 520 // Takes the Heap_lock and attempts a humongous allocation. It can
521 521 // potentially schedule a GC pause.
522 522 HeapWord* attempt_allocation_humongous(size_t word_size,
523 523 unsigned int* gc_count_before_ret);
524 524
525 525 // Allocation attempt that should be called during safepoints (e.g.,
526 526 // at the end of a successful GC). expect_null_mutator_alloc_region
527 527 // specifies whether the mutator alloc region is expected to be NULL
528 528 // or not.
529 529 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
530 530 bool expect_null_mutator_alloc_region);
531 531
532 532 // It dirties the cards that cover the block so that so that the post
533 533 // write barrier never queues anything when updating objects on this
534 534 // block. It is assumed (and in fact we assert) that the block
535 535 // belongs to a young region.
536 536 inline void dirty_young_block(HeapWord* start, size_t word_size);
537 537
538 538 // Allocate blocks during garbage collection. Will ensure an
539 539 // allocation region, either by picking one or expanding the
540 540 // heap, and then allocate a block of the given size. The block
541 541 // may not be a humongous - it must fit into a single heap region.
542 542 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
543 543
544 544 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
545 545 HeapRegion* alloc_region,
546 546 bool par,
547 547 size_t word_size);
548 548
549 549 // Ensure that no further allocations can happen in "r", bearing in mind
550 550 // that parallel threads might be attempting allocations.
551 551 void par_allocate_remaining_space(HeapRegion* r);
552 552
553 553 // Allocation attempt during GC for a survivor object / PLAB.
554 554 inline HeapWord* survivor_attempt_allocation(size_t word_size);
555 555
556 556 // Allocation attempt during GC for an old object / PLAB.
557 557 inline HeapWord* old_attempt_allocation(size_t word_size);
558 558
559 559 // These methods are the "callbacks" from the G1AllocRegion class.
560 560
561 561 // For mutator alloc regions.
562 562 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
563 563 void retire_mutator_alloc_region(HeapRegion* alloc_region,
564 564 size_t allocated_bytes);
565 565
566 566 // For GC alloc regions.
567 567 HeapRegion* new_gc_alloc_region(size_t word_size, size_t count,
568 568 GCAllocPurpose ap);
569 569 void retire_gc_alloc_region(HeapRegion* alloc_region,
570 570 size_t allocated_bytes, GCAllocPurpose ap);
571 571
572 572 // - if explicit_gc is true, the GC is for a System.gc() or a heap
573 573 // inspection request and should collect the entire heap
574 574 // - if clear_all_soft_refs is true, all soft references should be
575 575 // cleared during the GC
576 576 // - if explicit_gc is false, word_size describes the allocation that
577 577 // the GC should attempt (at least) to satisfy
578 578 // - it returns false if it is unable to do the collection due to the
579 579 // GC locker being active, true otherwise
580 580 bool do_collection(bool explicit_gc,
581 581 bool clear_all_soft_refs,
582 582 size_t word_size);
583 583
584 584 // Callback from VM_G1CollectFull operation.
585 585 // Perform a full collection.
586 586 void do_full_collection(bool clear_all_soft_refs);
587 587
588 588 // Resize the heap if necessary after a full collection. If this is
589 589 // after a collect-for allocation, "word_size" is the allocation size,
590 590 // and will be considered part of the used portion of the heap.
591 591 void resize_if_necessary_after_full_collection(size_t word_size);
592 592
593 593 // Callback from VM_G1CollectForAllocation operation.
594 594 // This function does everything necessary/possible to satisfy a
595 595 // failed allocation request (including collection, expansion, etc.)
596 596 HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
597 597
598 598 // Attempting to expand the heap sufficiently
599 599 // to support an allocation of the given "word_size". If
600 600 // successful, perform the allocation and return the address of the
601 601 // allocated block, or else "NULL".
602 602 HeapWord* expand_and_allocate(size_t word_size);
603 603
604 604 // Process any reference objects discovered during
605 605 // an incremental evacuation pause.
606 606 void process_discovered_references();
607 607
608 608 // Enqueue any remaining discovered references
609 609 // after processing.
610 610 void enqueue_discovered_references();
611 611
612 612 public:
613 613
614 614 G1MonitoringSupport* g1mm() {
615 615 assert(_g1mm != NULL, "should have been initialized");
616 616 return _g1mm;
617 617 }
618 618
619 619 // Expand the garbage-first heap by at least the given size (in bytes!).
620 620 // Returns true if the heap was expanded by the requested amount;
621 621 // false otherwise.
622 622 // (Rounds up to a HeapRegion boundary.)
623 623 bool expand(size_t expand_bytes);
624 624
625 625 // Do anything common to GC's.
626 626 virtual void gc_prologue(bool full);
627 627 virtual void gc_epilogue(bool full);
628 628
629 629 // We register a region with the fast "in collection set" test. We
630 630 // simply set to true the array slot corresponding to this region.
631 631 void register_region_with_in_cset_fast_test(HeapRegion* r) {
632 632 assert(_in_cset_fast_test_base != NULL, "sanity");
633 633 assert(r->in_collection_set(), "invariant");
634 634 size_t index = r->hrs_index();
635 635 assert(index < _in_cset_fast_test_length, "invariant");
636 636 assert(!_in_cset_fast_test_base[index], "invariant");
637 637 _in_cset_fast_test_base[index] = true;
638 638 }
639 639
640 640 // This is a fast test on whether a reference points into the
641 641 // collection set or not. It does not assume that the reference
642 642 // points into the heap; if it doesn't, it will return false.
643 643 bool in_cset_fast_test(oop obj) {
644 644 assert(_in_cset_fast_test != NULL, "sanity");
645 645 if (_g1_committed.contains((HeapWord*) obj)) {
646 646 // no need to subtract the bottom of the heap from obj,
647 647 // _in_cset_fast_test is biased
648 648 size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes;
649 649 bool ret = _in_cset_fast_test[index];
650 650 // let's make sure the result is consistent with what the slower
651 651 // test returns
652 652 assert( ret || !obj_in_cs(obj), "sanity");
653 653 assert(!ret || obj_in_cs(obj), "sanity");
654 654 return ret;
655 655 } else {
656 656 return false;
657 657 }
658 658 }
659 659
660 660 void clear_cset_fast_test() {
661 661 assert(_in_cset_fast_test_base != NULL, "sanity");
662 662 memset(_in_cset_fast_test_base, false,
663 663 _in_cset_fast_test_length * sizeof(bool));
664 664 }
665 665
666 666 // This is called at the end of either a concurrent cycle or a Full
667 667 // GC to update the number of full collections completed. Those two
668 668 // can happen in a nested fashion, i.e., we start a concurrent
669 669 // cycle, a Full GC happens half-way through it which ends first,
670 670 // and then the cycle notices that a Full GC happened and ends
671 671 // too. The concurrent parameter is a boolean to help us do a bit
672 672 // tighter consistency checking in the method. If concurrent is
673 673 // false, the caller is the inner caller in the nesting (i.e., the
674 674 // Full GC). If concurrent is true, the caller is the outer caller
675 675 // in this nesting (i.e., the concurrent cycle). Further nesting is
676 676 // not currently supported. The end of the this call also notifies
677 677 // the FullGCCount_lock in case a Java thread is waiting for a full
678 678 // GC to happen (e.g., it called System.gc() with
679 679 // +ExplicitGCInvokesConcurrent).
680 680 void increment_full_collections_completed(bool concurrent);
681 681
682 682 unsigned int full_collections_completed() {
683 683 return _full_collections_completed;
684 684 }
685 685
686 686 G1HRPrinter* hr_printer() { return &_hr_printer; }
687 687
688 688 protected:
689 689
690 690 // Shrink the garbage-first heap by at most the given size (in bytes!).
691 691 // (Rounds down to a HeapRegion boundary.)
692 692 virtual void shrink(size_t expand_bytes);
693 693 void shrink_helper(size_t expand_bytes);
694 694
695 695 #if TASKQUEUE_STATS
696 696 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
697 697 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
698 698 void reset_taskqueue_stats();
699 699 #endif // TASKQUEUE_STATS
700 700
701 701 // Schedule the VM operation that will do an evacuation pause to
702 702 // satisfy an allocation request of word_size. *succeeded will
703 703 // return whether the VM operation was successful (it did do an
704 704 // evacuation pause) or not (another thread beat us to it or the GC
705 705 // locker was active). Given that we should not be holding the
706 706 // Heap_lock when we enter this method, we will pass the
707 707 // gc_count_before (i.e., total_collections()) as a parameter since
708 708 // it has to be read while holding the Heap_lock. Currently, both
709 709 // methods that call do_collection_pause() release the Heap_lock
710 710 // before the call, so it's easy to read gc_count_before just before.
711 711 HeapWord* do_collection_pause(size_t word_size,
712 712 unsigned int gc_count_before,
713 713 bool* succeeded);
714 714
715 715 // The guts of the incremental collection pause, executed by the vm
716 716 // thread. It returns false if it is unable to do the collection due
717 717 // to the GC locker being active, true otherwise
718 718 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
719 719
720 720 // Actually do the work of evacuating the collection set.
721 721 void evacuate_collection_set();
722 722
723 723 // The g1 remembered set of the heap.
724 724 G1RemSet* _g1_rem_set;
725 725 // And it's mod ref barrier set, used to track updates for the above.
726 726 ModRefBarrierSet* _mr_bs;
727 727
728 728 // A set of cards that cover the objects for which the Rsets should be updated
729 729 // concurrently after the collection.
730 730 DirtyCardQueueSet _dirty_card_queue_set;
731 731
732 732 // The Heap Region Rem Set Iterator.
733 733 HeapRegionRemSetIterator** _rem_set_iterator;
734 734
735 735 // The closure used to refine a single card.
736 736 RefineCardTableEntryClosure* _refine_cte_cl;
737 737
738 738 // A function to check the consistency of dirty card logs.
739 739 void check_ct_logs_at_safepoint();
740 740
741 741 // A DirtyCardQueueSet that is used to hold cards that contain
742 742 // references into the current collection set. This is used to
743 743 // update the remembered sets of the regions in the collection
744 744 // set in the event of an evacuation failure.
745 745 DirtyCardQueueSet _into_cset_dirty_card_queue_set;
746 746
747 747 // After a collection pause, make the regions in the CS into free
748 748 // regions.
749 749 void free_collection_set(HeapRegion* cs_head);
750 750
751 751 // Abandon the current collection set without recording policy
752 752 // statistics or updating free lists.
753 753 void abandon_collection_set(HeapRegion* cs_head);
754 754
755 755 // Applies "scan_non_heap_roots" to roots outside the heap,
756 756 // "scan_rs" to roots inside the heap (having done "set_region" to
757 757 // indicate the region in which the root resides), and does "scan_perm"
758 758 // (setting the generation to the perm generation.) If "scan_rs" is
759 759 // NULL, then this step is skipped. The "worker_i"
760 760 // param is for use with parallel roots processing, and should be
761 761 // the "i" of the calling parallel worker thread's work(i) function.
762 762 // In the sequential case this param will be ignored.
763 763 void g1_process_strong_roots(bool collecting_perm_gen,
764 764 SharedHeap::ScanningOption so,
765 765 OopClosure* scan_non_heap_roots,
766 766 OopsInHeapRegionClosure* scan_rs,
767 767 OopsInGenClosure* scan_perm,
768 768 int worker_i);
769 769
770 770 // Apply "blk" to all the weak roots of the system. These include
771 771 // JNI weak roots, the code cache, system dictionary, symbol table,
772 772 // string table, and referents of reachable weak refs.
773 773 void g1_process_weak_roots(OopClosure* root_closure,
774 774 OopClosure* non_root_closure);
775 775
776 776 // Frees a non-humongous region by initializing its contents and
777 777 // adding it to the free list that's passed as a parameter (this is
778 778 // usually a local list which will be appended to the master free
779 779 // list later). The used bytes of freed regions are accumulated in
780 780 // pre_used. If par is true, the region's RSet will not be freed
781 781 // up. The assumption is that this will be done later.
782 782 void free_region(HeapRegion* hr,
783 783 size_t* pre_used,
784 784 FreeRegionList* free_list,
785 785 bool par);
786 786
787 787 // Frees a humongous region by collapsing it into individual regions
788 788 // and calling free_region() for each of them. The freed regions
789 789 // will be added to the free list that's passed as a parameter (this
790 790 // is usually a local list which will be appended to the master free
791 791 // list later). The used bytes of freed regions are accumulated in
792 792 // pre_used. If par is true, the region's RSet will not be freed
793 793 // up. The assumption is that this will be done later.
794 794 void free_humongous_region(HeapRegion* hr,
795 795 size_t* pre_used,
796 796 FreeRegionList* free_list,
797 797 HumongousRegionSet* humongous_proxy_set,
798 798 bool par);
799 799
800 800 // Notifies all the necessary spaces that the committed space has
801 801 // been updated (either expanded or shrunk). It should be called
802 802 // after _g1_storage is updated.
803 803 void update_committed_space(HeapWord* old_end, HeapWord* new_end);
804 804
805 805 // The concurrent marker (and the thread it runs in.)
806 806 ConcurrentMark* _cm;
807 807 ConcurrentMarkThread* _cmThread;
808 808 bool _mark_in_progress;
809 809
810 810 // The concurrent refiner.
811 811 ConcurrentG1Refine* _cg1r;
812 812
813 813 // The parallel task queues
814 814 RefToScanQueueSet *_task_queues;
815 815
816 816 // True iff a evacuation has failed in the current collection.
817 817 bool _evacuation_failed;
818 818
819 819 // Set the attribute indicating whether evacuation has failed in the
820 820 // current collection.
821 821 void set_evacuation_failed(bool b) { _evacuation_failed = b; }
822 822
823 823 // Failed evacuations cause some logical from-space objects to have
824 824 // forwarding pointers to themselves. Reset them.
825 825 void remove_self_forwarding_pointers();
826 826
827 827 // When one is non-null, so is the other. Together, they each pair is
828 828 // an object with a preserved mark, and its mark value.
829 829 GrowableArray<oop>* _objs_with_preserved_marks;
830 830 GrowableArray<markOop>* _preserved_marks_of_objs;
831 831
832 832 // Preserve the mark of "obj", if necessary, in preparation for its mark
833 833 // word being overwritten with a self-forwarding-pointer.
834 834 void preserve_mark_if_necessary(oop obj, markOop m);
835 835
836 836 // The stack of evac-failure objects left to be scanned.
837 837 GrowableArray<oop>* _evac_failure_scan_stack;
838 838 // The closure to apply to evac-failure objects.
839 839
840 840 OopsInHeapRegionClosure* _evac_failure_closure;
841 841 // Set the field above.
842 842 void
843 843 set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) {
844 844 _evac_failure_closure = evac_failure_closure;
845 845 }
846 846
847 847 // Push "obj" on the scan stack.
848 848 void push_on_evac_failure_scan_stack(oop obj);
849 849 // Process scan stack entries until the stack is empty.
850 850 void drain_evac_failure_scan_stack();
851 851 // True iff an invocation of "drain_scan_stack" is in progress; to
852 852 // prevent unnecessary recursion.
853 853 bool _drain_in_progress;
854 854
↓ open down ↓ |
854 lines elided |
↑ open up ↑ |
855 855 // Do any necessary initialization for evacuation-failure handling.
856 856 // "cl" is the closure that will be used to process evac-failure
857 857 // objects.
858 858 void init_for_evac_failure(OopsInHeapRegionClosure* cl);
859 859 // Do any necessary cleanup for evacuation-failure handling data
860 860 // structures.
861 861 void finalize_for_evac_failure();
862 862
863 863 // An attempt to evacuate "obj" has failed; take necessary steps.
864 864 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj,
865 - bool should_mark_root);
865 + bool should_mark_root,
866 + int worker_i);
866 867 void handle_evacuation_failure_common(oop obj, markOop m);
867 868
868 869 // ("Weak") Reference processing support.
869 870 //
870 871 // G1 has 2 instances of the referece processor class. One
871 872 // (_ref_processor_cm) handles reference object discovery
872 873 // and subsequent processing during concurrent marking cycles.
873 874 //
874 875 // The other (_ref_processor_stw) handles reference object
875 876 // discovery and processing during full GCs and incremental
876 877 // evacuation pauses.
877 878 //
878 879 // During an incremental pause, reference discovery will be
879 880 // temporarily disabled for _ref_processor_cm and will be
880 881 // enabled for _ref_processor_stw. At the end of the evacuation
881 882 // pause references discovered by _ref_processor_stw will be
882 883 // processed and discovery will be disabled. The previous
883 884 // setting for reference object discovery for _ref_processor_cm
884 885 // will be re-instated.
885 886 //
886 887 // At the start of marking:
887 888 // * Discovery by the CM ref processor is verified to be inactive
888 889 // and it's discovered lists are empty.
889 890 // * Discovery by the CM ref processor is then enabled.
890 891 //
891 892 // At the end of marking:
892 893 // * Any references on the CM ref processor's discovered
893 894 // lists are processed (possibly MT).
894 895 //
895 896 // At the start of full GC we:
896 897 // * Disable discovery by the CM ref processor and
897 898 // empty CM ref processor's discovered lists
898 899 // (without processing any entries).
899 900 // * Verify that the STW ref processor is inactive and it's
900 901 // discovered lists are empty.
901 902 // * Temporarily set STW ref processor discovery as single threaded.
902 903 // * Temporarily clear the STW ref processor's _is_alive_non_header
903 904 // field.
904 905 // * Finally enable discovery by the STW ref processor.
905 906 //
906 907 // The STW ref processor is used to record any discovered
907 908 // references during the full GC.
908 909 //
909 910 // At the end of a full GC we:
910 911 // * Enqueue any reference objects discovered by the STW ref processor
911 912 // that have non-live referents. This has the side-effect of
912 913 // making the STW ref processor inactive by disabling discovery.
913 914 // * Verify that the CM ref processor is still inactive
914 915 // and no references have been placed on it's discovered
915 916 // lists (also checked as a precondition during initial marking).
916 917
917 918 // The (stw) reference processor...
918 919 ReferenceProcessor* _ref_processor_stw;
919 920
920 921 // During reference object discovery, the _is_alive_non_header
921 922 // closure (if non-null) is applied to the referent object to
922 923 // determine whether the referent is live. If so then the
923 924 // reference object does not need to be 'discovered' and can
924 925 // be treated as a regular oop. This has the benefit of reducing
925 926 // the number of 'discovered' reference objects that need to
926 927 // be processed.
927 928 //
928 929 // Instance of the is_alive closure for embedding into the
929 930 // STW reference processor as the _is_alive_non_header field.
930 931 // Supplying a value for the _is_alive_non_header field is
931 932 // optional but doing so prevents unnecessary additions to
932 933 // the discovered lists during reference discovery.
933 934 G1STWIsAliveClosure _is_alive_closure_stw;
934 935
935 936 // The (concurrent marking) reference processor...
936 937 ReferenceProcessor* _ref_processor_cm;
937 938
938 939 // Instance of the concurrent mark is_alive closure for embedding
939 940 // into the Concurrent Marking reference processor as the
940 941 // _is_alive_non_header field. Supplying a value for the
941 942 // _is_alive_non_header field is optional but doing so prevents
942 943 // unnecessary additions to the discovered lists during reference
943 944 // discovery.
944 945 G1CMIsAliveClosure _is_alive_closure_cm;
945 946
946 947 // Cache used by G1CollectedHeap::start_cset_region_for_worker().
947 948 HeapRegion** _worker_cset_start_region;
948 949
949 950 // Time stamp to validate the regions recorded in the cache
950 951 // used by G1CollectedHeap::start_cset_region_for_worker().
951 952 // The heap region entry for a given worker is valid iff
952 953 // the associated time stamp value matches the current value
953 954 // of G1CollectedHeap::_gc_time_stamp.
954 955 unsigned int* _worker_cset_start_region_time_stamp;
955 956
956 957 enum G1H_process_strong_roots_tasks {
957 958 G1H_PS_mark_stack_oops_do,
958 959 G1H_PS_refProcessor_oops_do,
959 960 // Leave this one last.
960 961 G1H_PS_NumElements
961 962 };
962 963
963 964 SubTasksDone* _process_strong_tasks;
964 965
965 966 volatile bool _free_regions_coming;
966 967
967 968 public:
968 969
969 970 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
970 971
971 972 void set_refine_cte_cl_concurrency(bool concurrent);
972 973
973 974 RefToScanQueue *task_queue(int i) const;
974 975
975 976 // A set of cards where updates happened during the GC
976 977 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
977 978
978 979 // A DirtyCardQueueSet that is used to hold cards that contain
979 980 // references into the current collection set. This is used to
980 981 // update the remembered sets of the regions in the collection
981 982 // set in the event of an evacuation failure.
982 983 DirtyCardQueueSet& into_cset_dirty_card_queue_set()
983 984 { return _into_cset_dirty_card_queue_set; }
984 985
985 986 // Create a G1CollectedHeap with the specified policy.
986 987 // Must call the initialize method afterwards.
987 988 // May not return if something goes wrong.
988 989 G1CollectedHeap(G1CollectorPolicy* policy);
989 990
990 991 // Initialize the G1CollectedHeap to have the initial and
991 992 // maximum sizes, permanent generation, and remembered and barrier sets
992 993 // specified by the policy object.
993 994 jint initialize();
994 995
995 996 // Initialize weak reference processing.
996 997 virtual void ref_processing_init();
997 998
998 999 void set_par_threads(int t) {
999 1000 SharedHeap::set_par_threads(t);
1000 1001 // Done in SharedHeap but oddly there are
1001 1002 // two _process_strong_tasks's in a G1CollectedHeap
1002 1003 // so do it here too.
1003 1004 _process_strong_tasks->set_n_threads(t);
1004 1005 }
1005 1006
1006 1007 // Set _n_par_threads according to a policy TBD.
1007 1008 void set_par_threads();
1008 1009
1009 1010 void set_n_termination(int t) {
1010 1011 _process_strong_tasks->set_n_threads(t);
1011 1012 }
1012 1013
1013 1014 virtual CollectedHeap::Name kind() const {
1014 1015 return CollectedHeap::G1CollectedHeap;
1015 1016 }
1016 1017
1017 1018 // The current policy object for the collector.
1018 1019 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
1019 1020
1020 1021 // Adaptive size policy. No such thing for g1.
1021 1022 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
1022 1023
1023 1024 // The rem set and barrier set.
1024 1025 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
1025 1026 ModRefBarrierSet* mr_bs() const { return _mr_bs; }
1026 1027
1027 1028 // The rem set iterator.
1028 1029 HeapRegionRemSetIterator* rem_set_iterator(int i) {
1029 1030 return _rem_set_iterator[i];
1030 1031 }
1031 1032
1032 1033 HeapRegionRemSetIterator* rem_set_iterator() {
1033 1034 return _rem_set_iterator[0];
1034 1035 }
1035 1036
1036 1037 unsigned get_gc_time_stamp() {
1037 1038 return _gc_time_stamp;
1038 1039 }
1039 1040
1040 1041 void reset_gc_time_stamp() {
1041 1042 _gc_time_stamp = 0;
1042 1043 OrderAccess::fence();
1043 1044 // Clear the cached CSet starting regions and time stamps.
1044 1045 // Their validity is dependent on the GC timestamp.
1045 1046 clear_cset_start_regions();
1046 1047 }
1047 1048
1048 1049 void increment_gc_time_stamp() {
1049 1050 ++_gc_time_stamp;
1050 1051 OrderAccess::fence();
1051 1052 }
1052 1053
1053 1054 void iterate_dirty_card_closure(CardTableEntryClosure* cl,
1054 1055 DirtyCardQueue* into_cset_dcq,
1055 1056 bool concurrent, int worker_i);
1056 1057
1057 1058 // The shared block offset table array.
1058 1059 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
1059 1060
1060 1061 // Reference Processing accessors
1061 1062
1062 1063 // The STW reference processor....
1063 1064 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1064 1065
1065 1066 // The Concurent Marking reference processor...
1066 1067 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1067 1068
1068 1069 virtual size_t capacity() const;
1069 1070 virtual size_t used() const;
1070 1071 // This should be called when we're not holding the heap lock. The
1071 1072 // result might be a bit inaccurate.
1072 1073 size_t used_unlocked() const;
1073 1074 size_t recalculate_used() const;
1074 1075
1075 1076 // These virtual functions do the actual allocation.
1076 1077 // Some heaps may offer a contiguous region for shared non-blocking
1077 1078 // allocation, via inlined code (by exporting the address of the top and
1078 1079 // end fields defining the extent of the contiguous allocation region.)
1079 1080 // But G1CollectedHeap doesn't yet support this.
1080 1081
1081 1082 // Return an estimate of the maximum allocation that could be performed
1082 1083 // without triggering any collection or expansion activity. In a
1083 1084 // generational collector, for example, this is probably the largest
1084 1085 // allocation that could be supported (without expansion) in the youngest
1085 1086 // generation. It is "unsafe" because no locks are taken; the result
1086 1087 // should be treated as an approximation, not a guarantee, for use in
1087 1088 // heuristic resizing decisions.
1088 1089 virtual size_t unsafe_max_alloc();
1089 1090
1090 1091 virtual bool is_maximal_no_gc() const {
1091 1092 return _g1_storage.uncommitted_size() == 0;
1092 1093 }
1093 1094
1094 1095 // The total number of regions in the heap.
1095 1096 size_t n_regions() { return _hrs.length(); }
1096 1097
1097 1098 // The max number of regions in the heap.
1098 1099 size_t max_regions() { return _hrs.max_length(); }
1099 1100
1100 1101 // The number of regions that are completely free.
1101 1102 size_t free_regions() { return _free_list.length(); }
1102 1103
1103 1104 // The number of regions that are not completely free.
1104 1105 size_t used_regions() { return n_regions() - free_regions(); }
1105 1106
1106 1107 // The number of regions available for "regular" expansion.
1107 1108 size_t expansion_regions() { return _expansion_regions; }
1108 1109
1109 1110 // Factory method for HeapRegion instances. It will return NULL if
1110 1111 // the allocation fails.
1111 1112 HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);
1112 1113
1113 1114 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1114 1115 void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1115 1116 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
1116 1117 void verify_dirty_young_regions() PRODUCT_RETURN;
1117 1118
1118 1119 // verify_region_sets() performs verification over the region
1119 1120 // lists. It will be compiled in the product code to be used when
1120 1121 // necessary (i.e., during heap verification).
1121 1122 void verify_region_sets();
1122 1123
1123 1124 // verify_region_sets_optional() is planted in the code for
1124 1125 // list verification in non-product builds (and it can be enabled in
1125 1126 // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1).
1126 1127 #if HEAP_REGION_SET_FORCE_VERIFY
1127 1128 void verify_region_sets_optional() {
1128 1129 verify_region_sets();
1129 1130 }
1130 1131 #else // HEAP_REGION_SET_FORCE_VERIFY
1131 1132 void verify_region_sets_optional() { }
1132 1133 #endif // HEAP_REGION_SET_FORCE_VERIFY
1133 1134
1134 1135 #ifdef ASSERT
1135 1136 bool is_on_master_free_list(HeapRegion* hr) {
1136 1137 return hr->containing_set() == &_free_list;
1137 1138 }
1138 1139
1139 1140 bool is_in_humongous_set(HeapRegion* hr) {
1140 1141 return hr->containing_set() == &_humongous_set;
1141 1142 }
1142 1143 #endif // ASSERT
1143 1144
1144 1145 // Wrapper for the region list operations that can be called from
1145 1146 // methods outside this class.
1146 1147
1147 1148 void secondary_free_list_add_as_tail(FreeRegionList* list) {
1148 1149 _secondary_free_list.add_as_tail(list);
1149 1150 }
1150 1151
1151 1152 void append_secondary_free_list() {
1152 1153 _free_list.add_as_head(&_secondary_free_list);
1153 1154 }
1154 1155
1155 1156 void append_secondary_free_list_if_not_empty_with_lock() {
1156 1157 // If the secondary free list looks empty there's no reason to
1157 1158 // take the lock and then try to append it.
1158 1159 if (!_secondary_free_list.is_empty()) {
1159 1160 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1160 1161 append_secondary_free_list();
1161 1162 }
1162 1163 }
1163 1164
1164 1165 void old_set_remove(HeapRegion* hr) {
1165 1166 _old_set.remove(hr);
1166 1167 }
1167 1168
1168 1169 void set_free_regions_coming();
1169 1170 void reset_free_regions_coming();
1170 1171 bool free_regions_coming() { return _free_regions_coming; }
1171 1172 void wait_while_free_regions_coming();
1172 1173
1173 1174 // Perform a collection of the heap; intended for use in implementing
1174 1175 // "System.gc". This probably implies as full a collection as the
1175 1176 // "CollectedHeap" supports.
1176 1177 virtual void collect(GCCause::Cause cause);
1177 1178
1178 1179 // The same as above but assume that the caller holds the Heap_lock.
1179 1180 void collect_locked(GCCause::Cause cause);
1180 1181
1181 1182 // This interface assumes that it's being called by the
1182 1183 // vm thread. It collects the heap assuming that the
1183 1184 // heap lock is already held and that we are executing in
1184 1185 // the context of the vm thread.
1185 1186 virtual void collect_as_vm_thread(GCCause::Cause cause);
1186 1187
1187 1188 // True iff a evacuation has failed in the most-recent collection.
1188 1189 bool evacuation_failed() { return _evacuation_failed; }
1189 1190
1190 1191 // It will free a region if it has allocated objects in it that are
1191 1192 // all dead. It calls either free_region() or
1192 1193 // free_humongous_region() depending on the type of the region that
1193 1194 // is passed to it.
1194 1195 void free_region_if_empty(HeapRegion* hr,
1195 1196 size_t* pre_used,
1196 1197 FreeRegionList* free_list,
1197 1198 OldRegionSet* old_proxy_set,
1198 1199 HumongousRegionSet* humongous_proxy_set,
1199 1200 HRRSCleanupTask* hrrs_cleanup_task,
1200 1201 bool par);
1201 1202
1202 1203 // It appends the free list to the master free list and updates the
1203 1204 // master humongous list according to the contents of the proxy
1204 1205 // list. It also adjusts the total used bytes according to pre_used
1205 1206 // (if par is true, it will do so by taking the ParGCRareEvent_lock).
1206 1207 void update_sets_after_freeing_regions(size_t pre_used,
1207 1208 FreeRegionList* free_list,
1208 1209 OldRegionSet* old_proxy_set,
1209 1210 HumongousRegionSet* humongous_proxy_set,
1210 1211 bool par);
1211 1212
1212 1213 // Returns "TRUE" iff "p" points into the committed areas of the heap.
1213 1214 virtual bool is_in(const void* p) const;
1214 1215
1215 1216 // Return "TRUE" iff the given object address is within the collection
1216 1217 // set.
1217 1218 inline bool obj_in_cs(oop obj);
1218 1219
1219 1220 // Return "TRUE" iff the given object address is in the reserved
1220 1221 // region of g1 (excluding the permanent generation).
1221 1222 bool is_in_g1_reserved(const void* p) const {
1222 1223 return _g1_reserved.contains(p);
1223 1224 }
1224 1225
1225 1226 // Returns a MemRegion that corresponds to the space that has been
1226 1227 // reserved for the heap
1227 1228 MemRegion g1_reserved() {
1228 1229 return _g1_reserved;
1229 1230 }
1230 1231
1231 1232 // Returns a MemRegion that corresponds to the space that has been
1232 1233 // committed in the heap
1233 1234 MemRegion g1_committed() {
1234 1235 return _g1_committed;
1235 1236 }
1236 1237
1237 1238 virtual bool is_in_closed_subset(const void* p) const;
1238 1239
1239 1240 // This resets the card table to all zeros. It is used after
1240 1241 // a collection pause which used the card table to claim cards.
1241 1242 void cleanUpCardTable();
1242 1243
1243 1244 // Iteration functions.
1244 1245
1245 1246 // Iterate over all the ref-containing fields of all objects, calling
1246 1247 // "cl.do_oop" on each.
1247 1248 virtual void oop_iterate(OopClosure* cl) {
1248 1249 oop_iterate(cl, true);
1249 1250 }
1250 1251 void oop_iterate(OopClosure* cl, bool do_perm);
1251 1252
1252 1253 // Same as above, restricted to a memory region.
1253 1254 virtual void oop_iterate(MemRegion mr, OopClosure* cl) {
1254 1255 oop_iterate(mr, cl, true);
1255 1256 }
1256 1257 void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm);
1257 1258
1258 1259 // Iterate over all objects, calling "cl.do_object" on each.
1259 1260 virtual void object_iterate(ObjectClosure* cl) {
1260 1261 object_iterate(cl, true);
1261 1262 }
1262 1263 virtual void safe_object_iterate(ObjectClosure* cl) {
1263 1264 object_iterate(cl, true);
1264 1265 }
1265 1266 void object_iterate(ObjectClosure* cl, bool do_perm);
1266 1267
1267 1268 // Iterate over all objects allocated since the last collection, calling
1268 1269 // "cl.do_object" on each. The heap must have been initialized properly
1269 1270 // to support this function, or else this call will fail.
1270 1271 virtual void object_iterate_since_last_GC(ObjectClosure* cl);
1271 1272
1272 1273 // Iterate over all spaces in use in the heap, in ascending address order.
1273 1274 virtual void space_iterate(SpaceClosure* cl);
1274 1275
1275 1276 // Iterate over heap regions, in address order, terminating the
1276 1277 // iteration early if the "doHeapRegion" method returns "true".
1277 1278 void heap_region_iterate(HeapRegionClosure* blk) const;
1278 1279
1279 1280 // Iterate over heap regions starting with r (or the first region if "r"
1280 1281 // is NULL), in address order, terminating early if the "doHeapRegion"
1281 1282 // method returns "true".
1282 1283 void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
1283 1284
1284 1285 // Return the region with the given index. It assumes the index is valid.
1285 1286 HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
1286 1287
1287 1288 // Divide the heap region sequence into "chunks" of some size (the number
1288 1289 // of regions divided by the number of parallel threads times some
1289 1290 // overpartition factor, currently 4). Assumes that this will be called
1290 1291 // in parallel by ParallelGCThreads worker threads with discinct worker
1291 1292 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1292 1293 // calls will use the same "claim_value", and that that claim value is
1293 1294 // different from the claim_value of any heap region before the start of
1294 1295 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by
1295 1296 // attempting to claim the first region in each chunk, and, if
1296 1297 // successful, applying the closure to each region in the chunk (and
1297 1298 // setting the claim value of the second and subsequent regions of the
1298 1299 // chunk.) For now requires that "doHeapRegion" always returns "false",
1299 1300 // i.e., that a closure never attempt to abort a traversal.
1300 1301 void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1301 1302 int worker,
1302 1303 int no_of_par_workers,
1303 1304 jint claim_value);
1304 1305
1305 1306 // It resets all the region claim values to the default.
1306 1307 void reset_heap_region_claim_values();
1307 1308
1308 1309 #ifdef ASSERT
1309 1310 bool check_heap_region_claim_values(jint claim_value);
1310 1311
1311 1312 // Same as the routine above but only checks regions in the
1312 1313 // current collection set.
1313 1314 bool check_cset_heap_region_claim_values(jint claim_value);
1314 1315 #endif // ASSERT
1315 1316
1316 1317 // Clear the cached cset start regions and (more importantly)
1317 1318 // the time stamps. Called when we reset the GC time stamp.
1318 1319 void clear_cset_start_regions();
1319 1320
1320 1321 // Given the id of a worker, obtain or calculate a suitable
1321 1322 // starting region for iterating over the current collection set.
1322 1323 HeapRegion* start_cset_region_for_worker(int worker_i);
1323 1324
1324 1325 // Iterate over the regions (if any) in the current collection set.
1325 1326 void collection_set_iterate(HeapRegionClosure* blk);
1326 1327
1327 1328 // As above but starting from region r
1328 1329 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1329 1330
1330 1331 // Returns the first (lowest address) compactible space in the heap.
1331 1332 virtual CompactibleSpace* first_compactible_space();
1332 1333
1333 1334 // A CollectedHeap will contain some number of spaces. This finds the
1334 1335 // space containing a given address, or else returns NULL.
1335 1336 virtual Space* space_containing(const void* addr) const;
1336 1337
1337 1338 // A G1CollectedHeap will contain some number of heap regions. This
1338 1339 // finds the region containing a given address, or else returns NULL.
1339 1340 template <class T>
1340 1341 inline HeapRegion* heap_region_containing(const T addr) const;
1341 1342
1342 1343 // Like the above, but requires "addr" to be in the heap (to avoid a
1343 1344 // null-check), and unlike the above, may return an continuing humongous
1344 1345 // region.
1345 1346 template <class T>
1346 1347 inline HeapRegion* heap_region_containing_raw(const T addr) const;
1347 1348
1348 1349 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1349 1350 // each address in the (reserved) heap is a member of exactly
1350 1351 // one block. The defining characteristic of a block is that it is
1351 1352 // possible to find its size, and thus to progress forward to the next
1352 1353 // block. (Blocks may be of different sizes.) Thus, blocks may
1353 1354 // represent Java objects, or they might be free blocks in a
1354 1355 // free-list-based heap (or subheap), as long as the two kinds are
1355 1356 // distinguishable and the size of each is determinable.
1356 1357
1357 1358 // Returns the address of the start of the "block" that contains the
1358 1359 // address "addr". We say "blocks" instead of "object" since some heaps
1359 1360 // may not pack objects densely; a chunk may either be an object or a
1360 1361 // non-object.
1361 1362 virtual HeapWord* block_start(const void* addr) const;
1362 1363
1363 1364 // Requires "addr" to be the start of a chunk, and returns its size.
1364 1365 // "addr + size" is required to be the start of a new chunk, or the end
1365 1366 // of the active area of the heap.
1366 1367 virtual size_t block_size(const HeapWord* addr) const;
1367 1368
1368 1369 // Requires "addr" to be the start of a block, and returns "TRUE" iff
1369 1370 // the block is an object.
1370 1371 virtual bool block_is_obj(const HeapWord* addr) const;
1371 1372
1372 1373 // Does this heap support heap inspection? (+PrintClassHistogram)
1373 1374 virtual bool supports_heap_inspection() const { return true; }
1374 1375
1375 1376 // Section on thread-local allocation buffers (TLABs)
1376 1377 // See CollectedHeap for semantics.
1377 1378
1378 1379 virtual bool supports_tlab_allocation() const;
1379 1380 virtual size_t tlab_capacity(Thread* thr) const;
1380 1381 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
1381 1382
1382 1383 // Can a compiler initialize a new object without store barriers?
1383 1384 // This permission only extends from the creation of a new object
1384 1385 // via a TLAB up to the first subsequent safepoint. If such permission
1385 1386 // is granted for this heap type, the compiler promises to call
1386 1387 // defer_store_barrier() below on any slow path allocation of
1387 1388 // a new object for which such initializing store barriers will
1388 1389 // have been elided. G1, like CMS, allows this, but should be
1389 1390 // ready to provide a compensating write barrier as necessary
1390 1391 // if that storage came out of a non-young region. The efficiency
1391 1392 // of this implementation depends crucially on being able to
1392 1393 // answer very efficiently in constant time whether a piece of
1393 1394 // storage in the heap comes from a young region or not.
1394 1395 // See ReduceInitialCardMarks.
1395 1396 virtual bool can_elide_tlab_store_barriers() const {
1396 1397 return true;
1397 1398 }
1398 1399
1399 1400 virtual bool card_mark_must_follow_store() const {
1400 1401 return true;
1401 1402 }
1402 1403
1403 1404 bool is_in_young(const oop obj) {
1404 1405 HeapRegion* hr = heap_region_containing(obj);
1405 1406 return hr != NULL && hr->is_young();
1406 1407 }
1407 1408
1408 1409 #ifdef ASSERT
1409 1410 virtual bool is_in_partial_collection(const void* p);
1410 1411 #endif
1411 1412
1412 1413 virtual bool is_scavengable(const void* addr);
1413 1414
1414 1415 // We don't need barriers for initializing stores to objects
1415 1416 // in the young gen: for the SATB pre-barrier, there is no
1416 1417 // pre-value that needs to be remembered; for the remembered-set
1417 1418 // update logging post-barrier, we don't maintain remembered set
1418 1419 // information for young gen objects.
1419 1420 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
1420 1421 return is_in_young(new_obj);
1421 1422 }
1422 1423
1423 1424 // Can a compiler elide a store barrier when it writes
1424 1425 // a permanent oop into the heap? Applies when the compiler
1425 1426 // is storing x to the heap, where x->is_perm() is true.
1426 1427 virtual bool can_elide_permanent_oop_store_barriers() const {
1427 1428 // At least until perm gen collection is also G1-ified, at
1428 1429 // which point this should return false.
1429 1430 return true;
1430 1431 }
1431 1432
1432 1433 // Returns "true" iff the given word_size is "very large".
1433 1434 static bool isHumongous(size_t word_size) {
1434 1435 // Note this has to be strictly greater-than as the TLABs
1435 1436 // are capped at the humongous thresold and we want to
1436 1437 // ensure that we don't try to allocate a TLAB as
1437 1438 // humongous and that we don't allocate a humongous
1438 1439 // object in a TLAB.
1439 1440 return word_size > _humongous_object_threshold_in_words;
1440 1441 }
1441 1442
1442 1443 // Update mod union table with the set of dirty cards.
1443 1444 void updateModUnion();
1444 1445
1445 1446 // Set the mod union bits corresponding to the given memRegion. Note
1446 1447 // that this is always a safe operation, since it doesn't clear any
1447 1448 // bits.
1448 1449 void markModUnionRange(MemRegion mr);
1449 1450
1450 1451 // Records the fact that a marking phase is no longer in progress.
1451 1452 void set_marking_complete() {
1452 1453 _mark_in_progress = false;
1453 1454 }
1454 1455 void set_marking_started() {
1455 1456 _mark_in_progress = true;
1456 1457 }
1457 1458 bool mark_in_progress() {
1458 1459 return _mark_in_progress;
1459 1460 }
1460 1461
1461 1462 // Print the maximum heap capacity.
1462 1463 virtual size_t max_capacity() const;
1463 1464
1464 1465 virtual jlong millis_since_last_gc();
1465 1466
1466 1467 // Perform any cleanup actions necessary before allowing a verification.
1467 1468 virtual void prepare_for_verify();
1468 1469
1469 1470 // Perform verification.
1470 1471
1471 1472 // vo == UsePrevMarking -> use "prev" marking information,
1472 1473 // vo == UseNextMarking -> use "next" marking information
1473 1474 // vo == UseMarkWord -> use the mark word in the object header
1474 1475 //
1475 1476 // NOTE: Only the "prev" marking information is guaranteed to be
1476 1477 // consistent most of the time, so most calls to this should use
1477 1478 // vo == UsePrevMarking.
1478 1479 // Currently, there is only one case where this is called with
1479 1480 // vo == UseNextMarking, which is to verify the "next" marking
1480 1481 // information at the end of remark.
1481 1482 // Currently there is only one place where this is called with
1482 1483 // vo == UseMarkWord, which is to verify the marking during a
1483 1484 // full GC.
1484 1485 void verify(bool allow_dirty, bool silent, VerifyOption vo);
1485 1486
1486 1487 // Override; it uses the "prev" marking information
1487 1488 virtual void verify(bool allow_dirty, bool silent);
1488 1489 virtual void print_on(outputStream* st) const;
1489 1490 virtual void print_extended_on(outputStream* st) const;
1490 1491
1491 1492 virtual void print_gc_threads_on(outputStream* st) const;
1492 1493 virtual void gc_threads_do(ThreadClosure* tc) const;
1493 1494
1494 1495 // Override
1495 1496 void print_tracing_info() const;
1496 1497
1497 1498 // The following two methods are helpful for debugging RSet issues.
1498 1499 void print_cset_rsets() PRODUCT_RETURN;
1499 1500 void print_all_rsets() PRODUCT_RETURN;
1500 1501
1501 1502 // Convenience function to be used in situations where the heap type can be
1502 1503 // asserted to be this type.
1503 1504 static G1CollectedHeap* heap();
1504 1505
1505 1506 void set_region_short_lived_locked(HeapRegion* hr);
1506 1507 // add appropriate methods for any other surv rate groups
1507 1508
1508 1509 YoungList* young_list() { return _young_list; }
1509 1510
1510 1511 // debugging
1511 1512 bool check_young_list_well_formed() {
1512 1513 return _young_list->check_list_well_formed();
1513 1514 }
1514 1515
1515 1516 bool check_young_list_empty(bool check_heap,
1516 1517 bool check_sample = true);
1517 1518
1518 1519 // *** Stuff related to concurrent marking. It's not clear to me that so
1519 1520 // many of these need to be public.
1520 1521
1521 1522 // The functions below are helper functions that a subclass of
1522 1523 // "CollectedHeap" can use in the implementation of its virtual
1523 1524 // functions.
1524 1525 // This performs a concurrent marking of the live objects in a
1525 1526 // bitmap off to the side.
1526 1527 void doConcurrentMark();
1527 1528
1528 1529 bool isMarkedPrev(oop obj) const;
1529 1530 bool isMarkedNext(oop obj) const;
1530 1531
1531 1532 // vo == UsePrevMarking -> use "prev" marking information,
1532 1533 // vo == UseNextMarking -> use "next" marking information,
1533 1534 // vo == UseMarkWord -> use mark word from object header
1534 1535 bool is_obj_dead_cond(const oop obj,
1535 1536 const HeapRegion* hr,
1536 1537 const VerifyOption vo) const {
1537 1538
1538 1539 switch (vo) {
1539 1540 case VerifyOption_G1UsePrevMarking:
1540 1541 return is_obj_dead(obj, hr);
1541 1542 case VerifyOption_G1UseNextMarking:
1542 1543 return is_obj_ill(obj, hr);
1543 1544 default:
1544 1545 assert(vo == VerifyOption_G1UseMarkWord, "must be");
1545 1546 return !obj->is_gc_marked();
1546 1547 }
1547 1548 }
1548 1549
1549 1550 // Determine if an object is dead, given the object and also
1550 1551 // the region to which the object belongs. An object is dead
1551 1552 // iff a) it was not allocated since the last mark and b) it
1552 1553 // is not marked.
1553 1554
1554 1555 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1555 1556 return
1556 1557 !hr->obj_allocated_since_prev_marking(obj) &&
1557 1558 !isMarkedPrev(obj);
1558 1559 }
1559 1560
1560 1561 // This is used when copying an object to survivor space.
1561 1562 // If the object is marked live, then we mark the copy live.
1562 1563 // If the object is allocated since the start of this mark
1563 1564 // cycle, then we mark the copy live.
1564 1565 // If the object has been around since the previous mark
1565 1566 // phase, and hasn't been marked yet during this phase,
1566 1567 // then we don't mark it, we just wait for the
1567 1568 // current marking cycle to get to it.
1568 1569
1569 1570 // This function returns true when an object has been
1570 1571 // around since the previous marking and hasn't yet
1571 1572 // been marked during this marking.
1572 1573
1573 1574 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1574 1575 return
1575 1576 !hr->obj_allocated_since_next_marking(obj) &&
1576 1577 !isMarkedNext(obj);
1577 1578 }
1578 1579
1579 1580 // Determine if an object is dead, given only the object itself.
1580 1581 // This will find the region to which the object belongs and
1581 1582 // then call the region version of the same function.
1582 1583
1583 1584 // Added if it is in permanent gen it isn't dead.
1584 1585 // Added if it is NULL it isn't dead.
1585 1586
1586 1587 // vo == UsePrevMarking -> use "prev" marking information,
1587 1588 // vo == UseNextMarking -> use "next" marking information,
1588 1589 // vo == UseMarkWord -> use mark word from object header
1589 1590 bool is_obj_dead_cond(const oop obj,
1590 1591 const VerifyOption vo) const {
1591 1592
1592 1593 switch (vo) {
1593 1594 case VerifyOption_G1UsePrevMarking:
1594 1595 return is_obj_dead(obj);
1595 1596 case VerifyOption_G1UseNextMarking:
1596 1597 return is_obj_ill(obj);
1597 1598 default:
1598 1599 assert(vo == VerifyOption_G1UseMarkWord, "must be");
1599 1600 return !obj->is_gc_marked();
1600 1601 }
1601 1602 }
1602 1603
1603 1604 bool is_obj_dead(const oop obj) const {
1604 1605 const HeapRegion* hr = heap_region_containing(obj);
1605 1606 if (hr == NULL) {
1606 1607 if (Universe::heap()->is_in_permanent(obj))
1607 1608 return false;
1608 1609 else if (obj == NULL) return false;
1609 1610 else return true;
1610 1611 }
1611 1612 else return is_obj_dead(obj, hr);
1612 1613 }
1613 1614
1614 1615 bool is_obj_ill(const oop obj) const {
1615 1616 const HeapRegion* hr = heap_region_containing(obj);
1616 1617 if (hr == NULL) {
1617 1618 if (Universe::heap()->is_in_permanent(obj))
1618 1619 return false;
1619 1620 else if (obj == NULL) return false;
1620 1621 else return true;
1621 1622 }
1622 1623 else return is_obj_ill(obj, hr);
1623 1624 }
1624 1625
1625 1626 // The following is just to alert the verification code
1626 1627 // that a full collection has occurred and that the
1627 1628 // remembered sets are no longer up to date.
1628 1629 bool _full_collection;
1629 1630 void set_full_collection() { _full_collection = true;}
1630 1631 void clear_full_collection() {_full_collection = false;}
1631 1632 bool full_collection() {return _full_collection;}
1632 1633
1633 1634 ConcurrentMark* concurrent_mark() const { return _cm; }
1634 1635 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1635 1636
1636 1637 // The dirty cards region list is used to record a subset of regions
1637 1638 // whose cards need clearing. The list if populated during the
1638 1639 // remembered set scanning and drained during the card table
1639 1640 // cleanup. Although the methods are reentrant, population/draining
1640 1641 // phases must not overlap. For synchronization purposes the last
1641 1642 // element on the list points to itself.
1642 1643 HeapRegion* _dirty_cards_region_list;
1643 1644 void push_dirty_cards_region(HeapRegion* hr);
1644 1645 HeapRegion* pop_dirty_cards_region();
1645 1646
1646 1647 public:
1647 1648 void stop_conc_gc_threads();
1648 1649
1649 1650 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
1650 1651 void check_if_region_is_too_expensive(double predicted_time_ms);
1651 1652 size_t pending_card_num();
1652 1653 size_t max_pending_card_num();
1653 1654 size_t cards_scanned();
1654 1655
1655 1656 protected:
1656 1657 size_t _max_heap_capacity;
1657 1658 };
1658 1659
1659 1660 #define use_local_bitmaps 1
1660 1661 #define verify_local_bitmaps 0
1661 1662 #define oop_buffer_length 256
1662 1663
1663 1664 #ifndef PRODUCT
1664 1665 class GCLabBitMap;
1665 1666 class GCLabBitMapClosure: public BitMapClosure {
1666 1667 private:
1667 1668 ConcurrentMark* _cm;
1668 1669 GCLabBitMap* _bitmap;
1669 1670
1670 1671 public:
1671 1672 GCLabBitMapClosure(ConcurrentMark* cm,
1672 1673 GCLabBitMap* bitmap) {
1673 1674 _cm = cm;
1674 1675 _bitmap = bitmap;
1675 1676 }
1676 1677
1677 1678 virtual bool do_bit(size_t offset);
1678 1679 };
1679 1680 #endif // !PRODUCT
1680 1681
1681 1682 class GCLabBitMap: public BitMap {
1682 1683 private:
1683 1684 ConcurrentMark* _cm;
1684 1685
1685 1686 int _shifter;
1686 1687 size_t _bitmap_word_covers_words;
1687 1688
1688 1689 // beginning of the heap
1689 1690 HeapWord* _heap_start;
1690 1691
1691 1692 // this is the actual start of the GCLab
1692 1693 HeapWord* _real_start_word;
1693 1694
1694 1695 // this is the actual end of the GCLab
1695 1696 HeapWord* _real_end_word;
1696 1697
1697 1698 // this is the first word, possibly located before the actual start
1698 1699 // of the GCLab, that corresponds to the first bit of the bitmap
1699 1700 HeapWord* _start_word;
1700 1701
1701 1702 // size of a GCLab in words
1702 1703 size_t _gclab_word_size;
1703 1704
1704 1705 static int shifter() {
1705 1706 return MinObjAlignment - 1;
1706 1707 }
1707 1708
1708 1709 // how many heap words does a single bitmap word corresponds to?
1709 1710 static size_t bitmap_word_covers_words() {
1710 1711 return BitsPerWord << shifter();
1711 1712 }
1712 1713
1713 1714 size_t gclab_word_size() const {
1714 1715 return _gclab_word_size;
1715 1716 }
1716 1717
1717 1718 // Calculates actual GCLab size in words
1718 1719 size_t gclab_real_word_size() const {
1719 1720 return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word))
1720 1721 / BitsPerWord;
1721 1722 }
1722 1723
↓ open down ↓ |
847 lines elided |
↑ open up ↑ |
1723 1724 static size_t bitmap_size_in_bits(size_t gclab_word_size) {
1724 1725 size_t bits_in_bitmap = gclab_word_size >> shifter();
1725 1726 // We are going to ensure that the beginning of a word in this
1726 1727 // bitmap also corresponds to the beginning of a word in the
1727 1728 // global marking bitmap. To handle the case where a GCLab
1728 1729 // starts from the middle of the bitmap, we need to add enough
1729 1730 // space (i.e. up to a bitmap word) to ensure that we have
1730 1731 // enough bits in the bitmap.
1731 1732 return bits_in_bitmap + BitsPerWord - 1;
1732 1733 }
1734 +
1733 1735 public:
1734 1736 GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size)
1735 1737 : BitMap(bitmap_size_in_bits(gclab_word_size)),
1736 1738 _cm(G1CollectedHeap::heap()->concurrent_mark()),
1737 1739 _shifter(shifter()),
1738 1740 _bitmap_word_covers_words(bitmap_word_covers_words()),
1739 1741 _heap_start(heap_start),
1740 1742 _gclab_word_size(gclab_word_size),
1741 1743 _real_start_word(NULL),
1742 1744 _real_end_word(NULL),
1743 1745 _start_word(NULL)
1744 1746 {
1745 1747 guarantee( size_in_words() >= bitmap_size_in_words(),
1746 1748 "just making sure");
1747 1749 }
1748 1750
1749 1751 inline unsigned heapWordToOffset(HeapWord* addr) {
1750 1752 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
1751 1753 assert(offset < size(), "offset should be within bounds");
1752 1754 return offset;
1753 1755 }
1754 1756
1755 1757 inline HeapWord* offsetToHeapWord(size_t offset) {
1756 1758 HeapWord* addr = _start_word + (offset << _shifter);
1757 1759 assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
1758 1760 return addr;
1759 1761 }
1760 1762
1761 1763 bool fields_well_formed() {
1762 1764 bool ret1 = (_real_start_word == NULL) &&
1763 1765 (_real_end_word == NULL) &&
1764 1766 (_start_word == NULL);
1765 1767 if (ret1)
1766 1768 return true;
1767 1769
1768 1770 bool ret2 = _real_start_word >= _start_word &&
1769 1771 _start_word < _real_end_word &&
1770 1772 (_real_start_word + _gclab_word_size) == _real_end_word &&
1771 1773 (_start_word + _gclab_word_size + _bitmap_word_covers_words)
1772 1774 > _real_end_word;
1773 1775 return ret2;
1774 1776 }
1775 1777
1776 1778 inline bool mark(HeapWord* addr) {
1777 1779 guarantee(use_local_bitmaps, "invariant");
1778 1780 assert(fields_well_formed(), "invariant");
1779 1781
1780 1782 if (addr >= _real_start_word && addr < _real_end_word) {
1781 1783 assert(!isMarked(addr), "should not have already been marked");
1782 1784
1783 1785 // first mark it on the bitmap
1784 1786 at_put(heapWordToOffset(addr), true);
1785 1787
1786 1788 return true;
1787 1789 } else {
1788 1790 return false;
1789 1791 }
1790 1792 }
1791 1793
1792 1794 inline bool isMarked(HeapWord* addr) {
1793 1795 guarantee(use_local_bitmaps, "invariant");
1794 1796 assert(fields_well_formed(), "invariant");
1795 1797
1796 1798 return at(heapWordToOffset(addr));
1797 1799 }
1798 1800
1799 1801 void set_buffer(HeapWord* start) {
1800 1802 guarantee(use_local_bitmaps, "invariant");
1801 1803 clear();
1802 1804
1803 1805 assert(start != NULL, "invariant");
1804 1806 _real_start_word = start;
1805 1807 _real_end_word = start + _gclab_word_size;
1806 1808
1807 1809 size_t diff =
1808 1810 pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
1809 1811 _start_word = start - diff;
1810 1812
1811 1813 assert(fields_well_formed(), "invariant");
↓ open down ↓ |
69 lines elided |
↑ open up ↑ |
1812 1814 }
1813 1815
1814 1816 #ifndef PRODUCT
1815 1817 void verify() {
1816 1818 // verify that the marks have been propagated
1817 1819 GCLabBitMapClosure cl(_cm, this);
1818 1820 iterate(&cl);
1819 1821 }
1820 1822 #endif // PRODUCT
1821 1823
1822 - void retire() {
1823 - guarantee(use_local_bitmaps, "invariant");
1824 - assert(fields_well_formed(), "invariant");
1825 -
1826 - if (_start_word != NULL) {
1827 - CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
1828 -
1829 - // this means that the bitmap was set up for the GCLab
1830 - assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
1831 -
1832 - mark_bitmap->mostly_disjoint_range_union(this,
1833 - 0, // always start from the start of the bitmap
1834 - _start_word,
1835 - gclab_real_word_size());
1836 - _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
1837 -
1838 -#ifndef PRODUCT
1839 - if (use_local_bitmaps && verify_local_bitmaps)
1840 - verify();
1841 -#endif // PRODUCT
1842 - } else {
1843 - assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
1844 - }
1845 - }
1824 + void retire(int worker_i);
1846 1825
1847 1826 size_t bitmap_size_in_words() const {
1848 1827 return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord;
1849 1828 }
1850 1829
1851 1830 };
1852 1831
1853 1832 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1854 1833 private:
1855 1834 bool _retired;
1856 1835 bool _should_mark_objects;
1857 1836 GCLabBitMap _bitmap;
1837 + int _worker_i;
1858 1838
1859 1839 public:
1860 - G1ParGCAllocBuffer(size_t gclab_word_size);
1840 + G1ParGCAllocBuffer(size_t gclab_word_size, int worker_i);
1861 1841
1862 1842 inline bool mark(HeapWord* addr) {
1863 1843 guarantee(use_local_bitmaps, "invariant");
1864 1844 assert(_should_mark_objects, "invariant");
1865 1845 return _bitmap.mark(addr);
1866 1846 }
1867 1847
1868 1848 inline void set_buf(HeapWord* buf) {
1869 1849 if (use_local_bitmaps && _should_mark_objects) {
1870 1850 _bitmap.set_buffer(buf);
1871 1851 }
1872 1852 ParGCAllocBuffer::set_buf(buf);
1873 1853 _retired = false;
1874 1854 }
1875 1855
1876 1856 inline void retire(bool end_of_gc, bool retain) {
1877 1857 if (_retired)
1878 1858 return;
1879 1859 if (use_local_bitmaps && _should_mark_objects) {
1880 - _bitmap.retire();
1860 + _bitmap.retire(_worker_i);
1881 1861 }
1882 1862 ParGCAllocBuffer::retire(end_of_gc, retain);
1883 1863 _retired = true;
1884 1864 }
1885 1865 };
1886 1866
1887 1867 class G1ParScanThreadState : public StackObj {
1888 1868 protected:
1889 1869 G1CollectedHeap* _g1h;
1890 1870 RefToScanQueue* _refs;
1891 1871 DirtyCardQueue _dcq;
1892 1872 CardTableModRefBS* _ct_bs;
1893 1873 G1RemSet* _g1_rem;
1894 1874
1895 1875 G1ParGCAllocBuffer _surviving_alloc_buffer;
1896 1876 G1ParGCAllocBuffer _tenured_alloc_buffer;
1897 1877 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1898 1878 ageTable _age_table;
1899 1879
1900 1880 size_t _alloc_buffer_waste;
1901 1881 size_t _undo_waste;
1902 1882
1903 1883 OopsInHeapRegionClosure* _evac_failure_cl;
1904 1884 G1ParScanHeapEvacClosure* _evac_cl;
1905 1885 G1ParScanPartialArrayClosure* _partial_scan_cl;
1906 1886
1907 1887 int _hash_seed;
1908 1888 int _queue_num;
1909 1889
1910 1890 size_t _term_attempts;
1911 1891
1912 1892 double _start;
1913 1893 double _start_strong_roots;
1914 1894 double _strong_roots_time;
1915 1895 double _start_term;
1916 1896 double _term_time;
1917 1897
1918 1898 // Map from young-age-index (0 == not young, 1 is youngest) to
1919 1899 // surviving words. base is what we get back from the malloc call
1920 1900 size_t* _surviving_young_words_base;
1921 1901 // this points into the array, as we use the first few entries for padding
1922 1902 size_t* _surviving_young_words;
1923 1903
1924 1904 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
1925 1905
1926 1906 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
1927 1907
1928 1908 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
1929 1909
1930 1910 DirtyCardQueue& dirty_card_queue() { return _dcq; }
1931 1911 CardTableModRefBS* ctbs() { return _ct_bs; }
1932 1912
1933 1913 template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
1934 1914 if (!from->is_survivor()) {
1935 1915 _g1_rem->par_write_ref(from, p, tid);
1936 1916 }
1937 1917 }
1938 1918
1939 1919 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1940 1920 // If the new value of the field points to the same region or
1941 1921 // is the to-space, we don't need to include it in the Rset updates.
1942 1922 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1943 1923 size_t card_index = ctbs()->index_for(p);
1944 1924 // If the card hasn't been added to the buffer, do it.
1945 1925 if (ctbs()->mark_card_deferred(card_index)) {
1946 1926 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1947 1927 }
1948 1928 }
1949 1929 }
1950 1930
1951 1931 public:
1952 1932 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num);
1953 1933
1954 1934 ~G1ParScanThreadState() {
1955 1935 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
1956 1936 }
1957 1937
1958 1938 RefToScanQueue* refs() { return _refs; }
1959 1939 ageTable* age_table() { return &_age_table; }
1960 1940
1961 1941 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1962 1942 return _alloc_buffers[purpose];
1963 1943 }
1964 1944
1965 1945 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1966 1946 size_t undo_waste() const { return _undo_waste; }
1967 1947
1968 1948 #ifdef ASSERT
1969 1949 bool verify_ref(narrowOop* ref) const;
1970 1950 bool verify_ref(oop* ref) const;
1971 1951 bool verify_task(StarTask ref) const;
1972 1952 #endif // ASSERT
1973 1953
1974 1954 template <class T> void push_on_queue(T* ref) {
1975 1955 assert(verify_ref(ref), "sanity");
1976 1956 refs()->push(ref);
1977 1957 }
1978 1958
1979 1959 template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1980 1960 if (G1DeferredRSUpdate) {
1981 1961 deferred_rs_update(from, p, tid);
1982 1962 } else {
1983 1963 immediate_rs_update(from, p, tid);
1984 1964 }
1985 1965 }
1986 1966
1987 1967 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1988 1968
1989 1969 HeapWord* obj = NULL;
1990 1970 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1991 1971 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1992 1972 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1993 1973 assert(gclab_word_size == alloc_buf->word_sz(),
1994 1974 "dynamic resizing is not supported");
1995 1975 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1996 1976 alloc_buf->retire(false, false);
1997 1977
1998 1978 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1999 1979 if (buf == NULL) return NULL; // Let caller handle allocation failure.
2000 1980 // Otherwise.
2001 1981 alloc_buf->set_buf(buf);
2002 1982
2003 1983 obj = alloc_buf->allocate(word_sz);
2004 1984 assert(obj != NULL, "buffer was definitely big enough...");
2005 1985 } else {
2006 1986 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
2007 1987 }
2008 1988 return obj;
2009 1989 }
2010 1990
2011 1991 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
2012 1992 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
2013 1993 if (obj != NULL) return obj;
2014 1994 return allocate_slow(purpose, word_sz);
2015 1995 }
2016 1996
2017 1997 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
2018 1998 if (alloc_buffer(purpose)->contains(obj)) {
2019 1999 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
2020 2000 "should contain whole object");
2021 2001 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
2022 2002 } else {
2023 2003 CollectedHeap::fill_with_object(obj, word_sz);
2024 2004 add_to_undo_waste(word_sz);
2025 2005 }
2026 2006 }
2027 2007
2028 2008 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
2029 2009 _evac_failure_cl = evac_failure_cl;
2030 2010 }
2031 2011 OopsInHeapRegionClosure* evac_failure_closure() {
2032 2012 return _evac_failure_cl;
2033 2013 }
2034 2014
2035 2015 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
2036 2016 _evac_cl = evac_cl;
2037 2017 }
2038 2018
2039 2019 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
2040 2020 _partial_scan_cl = partial_scan_cl;
2041 2021 }
2042 2022
2043 2023 int* hash_seed() { return &_hash_seed; }
2044 2024 int queue_num() { return _queue_num; }
2045 2025
2046 2026 size_t term_attempts() const { return _term_attempts; }
2047 2027 void note_term_attempt() { _term_attempts++; }
2048 2028
2049 2029 void start_strong_roots() {
2050 2030 _start_strong_roots = os::elapsedTime();
2051 2031 }
2052 2032 void end_strong_roots() {
2053 2033 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
2054 2034 }
2055 2035 double strong_roots_time() const { return _strong_roots_time; }
2056 2036
2057 2037 void start_term_time() {
2058 2038 note_term_attempt();
2059 2039 _start_term = os::elapsedTime();
2060 2040 }
2061 2041 void end_term_time() {
2062 2042 _term_time += (os::elapsedTime() - _start_term);
2063 2043 }
2064 2044 double term_time() const { return _term_time; }
2065 2045
2066 2046 double elapsed_time() const {
2067 2047 return os::elapsedTime() - _start;
2068 2048 }
2069 2049
2070 2050 static void
2071 2051 print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
2072 2052 void
2073 2053 print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
2074 2054
2075 2055 size_t* surviving_young_words() {
2076 2056 // We add on to hide entry 0 which accumulates surviving words for
2077 2057 // age -1 regions (i.e. non-young ones)
2078 2058 return _surviving_young_words;
2079 2059 }
2080 2060
2081 2061 void retire_alloc_buffers() {
2082 2062 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2083 2063 size_t waste = _alloc_buffers[ap]->words_remaining();
2084 2064 add_to_alloc_buffer_waste(waste);
2085 2065 _alloc_buffers[ap]->retire(true, false);
2086 2066 }
2087 2067 }
2088 2068
2089 2069 template <class T> void deal_with_reference(T* ref_to_scan) {
2090 2070 if (has_partial_array_mask(ref_to_scan)) {
2091 2071 _partial_scan_cl->do_oop_nv(ref_to_scan);
2092 2072 } else {
2093 2073 // Note: we can use "raw" versions of "region_containing" because
2094 2074 // "obj_to_scan" is definitely in the heap, and is not in a
2095 2075 // humongous region.
2096 2076 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
2097 2077 _evac_cl->set_region(r);
2098 2078 _evac_cl->do_oop_nv(ref_to_scan);
2099 2079 }
2100 2080 }
2101 2081
2102 2082 void deal_with_reference(StarTask ref) {
2103 2083 assert(verify_task(ref), "sanity");
2104 2084 if (ref.is_narrow()) {
2105 2085 deal_with_reference((narrowOop*)ref);
2106 2086 } else {
2107 2087 deal_with_reference((oop*)ref);
2108 2088 }
2109 2089 }
2110 2090
2111 2091 public:
2112 2092 void trim_queue();
2113 2093 };
2114 2094
2115 2095 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
↓ open down ↓ |
225 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX